content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
############################### full dataset for export #############################
user_year_month <- usertypedata_mean[, c(1,2)]
tryial <- dataframe_calendAdjust%>%
mutate("Year" = user_year_month[,1], "Month" = user_year_month[,2])
colnames(tryial[, 19]) <- "Year"
colnames(tryial[, 20]) <- "Month"
full_data <- tryial[, c(3:20)]%>%
relocate(Year, .before = ..Total_Users)%>%
relocate(Month, .after = Year)
write.csv(full_data, file = "full_data.csv")
###################################################################################
| /data cleaning.R | no_license | agbleze/R_shiny_app_GoogleTrends | R | false | false | 551 | r | ############################### full dataset for export #############################
user_year_month <- usertypedata_mean[, c(1,2)]
tryial <- dataframe_calendAdjust%>%
mutate("Year" = user_year_month[,1], "Month" = user_year_month[,2])
colnames(tryial[, 19]) <- "Year"
colnames(tryial[, 20]) <- "Month"
full_data <- tryial[, c(3:20)]%>%
relocate(Year, .before = ..Total_Users)%>%
relocate(Month, .after = Year)
write.csv(full_data, file = "full_data.csv")
###################################################################################
|
library(ftnonpar)
### Name: pmden
### Title: Piecewise monotone density estimation with taut strings
### Aliases: pmden
### Keywords: smooth nonparametric models
### ** Examples
aaa <- rclaw(500)
pmden(aaa,verb=TRUE)$n
| /data/genthat_extracted_code/ftnonpar/examples/pmden.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 226 | r | library(ftnonpar)
### Name: pmden
### Title: Piecewise monotone density estimation with taut strings
### Aliases: pmden
### Keywords: smooth nonparametric models
### ** Examples
aaa <- rclaw(500)
pmden(aaa,verb=TRUE)$n
|
/src/maps/maps_jofre_forCAD.R | no_license | bonaventura-p/geoa_project | R | false | false | 6,579 | r | ||
#############
#apc models
rm(list=ls())
library(apcwin)
library(lme4)
source('config~.R')
simdir = paste0(outdir,'simdata_hapc/')
#simdir = paste0(outdir,'simdata1/')
#collect overview of models
simtable = read.csv(
paste0(simdir,'simtable.csv')
)
for(lnum in simtable$simnumber){
#for(lnum in 4:4){
rownum = which(simtable$simnumber == lnum)
####load
load(paste0(simdir,'sim',lnum,'.RData'))
tdat = simulated$simdat
tdat$y = as.numeric(tdat$y)
library(lme4)
library(dplyr)
#center age
tdat = tdat %>%
mutate(ca = a-mean(a,na.rm=TRUE),
ca2 = ca^2,
#real breaks
pf = window(p,breaks=simulated$p.breaks),
cf = window(c,breaks=simulated$c.breaks),
#hapc breaks
p.hf = window(p,winlength=5),
c.hf = window(c,winlength=3))
hapc = lmer(y~ca + I(ca^2) +
p.hf + c.hf +
(1|p.hf) + (1|c.hf),
data = tdat)
#####################3
#predict
hapc.effs = function(mermod,newdat){
#mermod is fitted model, newdat is new data
effs=simulate(mermod,
nsim=100,
use.u = TRUE,
newdat=newdat,
allow.new.levels=TRUE,
se.fit=TRUE)
pred = data.frame(fit=apply(effs,1,mean),
ll = apply(effs,1,quantile,prob=0.025),
ul = apply(effs,1,quantile,prob=0.975))
return(pred)
}
mna = mean(tdat$ca)
mnp = window(mean(tdat$p),breaks=attr(tdat$p.hf,'breaks'))
mnc = window(mean(tdat$c),breaks=attr(tdat$c.hf,'breaks'))
xm.a = data.frame(ca = min(tdat$ca):max(tdat$ca),
p.hf=mnp,
c.hf=mnc
)
pred = hapc.effs(hapc,xm.a) %>%
mutate(x = unique(tdat$a),
dim= 'a')
xm.p = data.frame(ca = mna,
p.hf=scopedummy(tdat$p.hf,unique.vals=unique(tdat$p)),
c.hf=mnc)
pred = rbind(pred,hapc.effs(hapc,xm.p) %>%
mutate(x=unique(tdat$p),
dim='p'))
xm.c = data.frame(ca = mna,
p.hf=mnp,
c.hf=scopedummy(tdat$c.hf))
pred = rbind(pred,hapc.effs(hapc,xm.c) %>%
mutate(x=unique(tdat$c),
dim='c'))
##########3
#real effects
#model is identifiable!! (can check directly from betas..., but using scopedummy and predict)
tdat$a2 = tdat$a^2
tt = lm(y~a+a2+pf+cf,data=tdat)
true.b =list()
###a effects
xh= data.frame(a=min(tdat$a):max(tdat$a)); xh$a2 = xh$a^2
xh$pf = window(mean(tdat$p),breaks=attr(tdat$pf,'breaks'))
xh$cf = window(mean(tdat$c),breaks=attr(tdat$cf,'breaks'))
true.b[['a']] = data.frame(
x=xh$a,
m.eff = predict(tt,newdata=xh))
true.b[['a']]$dim='a'
###p effects
xh= data.frame(pf=window(unique(tdat$p),
breaks=attr(tdat$pf,'breaks')))
xh$a = mean(tdat$a); xh$a2=mean(tdat$a2)
xh$cf = window(mean(tdat$c),breaks=attr(tdat$cf,'breaks'))
true.b[['p']] = data.frame(
x=unique(tdat$p),
m.eff = predict(tt,newdata=xh))
true.b[['p']]$dim='p'
###c effects
xh= data.frame(cf=scopedummy(tdat$cf))
xh$a = mean(tdat$a); xh$a2=mean(tdat$a2)
xh$pf = window(mean(tdat$p),breaks=attr(tdat$pf,'breaks'))
true.b[['c']] = data.frame(
x=min(tdat$c):max(tdat$c),
m.eff = predict(tt,newdata=xh))
true.b[['c']]$dim='c'
#####
#plot
r.effs = do.call(rbind,true.b)
############
#plot (and calcualate overlaps)
pp = plot(effs)
#plot comparisons
#pp +
# geom_line(data=r.effs,
# aes(x=x,y=m.eff),linetype=2) +
# geom_line(data=pred,
# aes(x=x,y=fit),linetype=3) +
# geom_ribbon(data = pred,
# aes(x=x,y=fit,ymax=ul,ymin=ll),alpha=0.1) +
# theme_classic()
##########3
#calculate differences
mm = merge(r.effs,pp$data,by=c('dim','x')) %>%
mutate(ol=ifelse(m.eff>ul & m.eff<ll,TRUE,FALSE))
mm = mm %>%
rename(ll = ul,
ul = ll)
pred = pred %>%
rename(Fit.h = fit,
ll.h = ll,
ul.h = ul)
mm = merge(mm,pred,by=c('dim','x')) %>%
mutate(ol.h = ifelse(m.eff>ll.h & m.eff<ul.h,TRUE,FALSE),
rng.h = abs(ul.h-ll.h),
rng = abs(ul-ll))
#hapc results
simtable[rownum,'hol'] = mean(mm$ol.h)
simtable[rownum,'hrmse'] = summary(hapc)$sigma
simtable[rownum,'efficiency'] = mean(mm$rng/mm$rng.h)
#save simtable results
write.csv(simtable,
file=paste0(simdir,'fullsimtable.csv'),
row.names=FALSE)
}
| /code/hapc.R | no_license | bjb40/apc | R | false | false | 4,367 | r | #############
#apc models
rm(list=ls())
library(apcwin)
library(lme4)
source('config~.R')
simdir = paste0(outdir,'simdata_hapc/')
#simdir = paste0(outdir,'simdata1/')
#collect overview of models
simtable = read.csv(
paste0(simdir,'simtable.csv')
)
for(lnum in simtable$simnumber){
#for(lnum in 4:4){
rownum = which(simtable$simnumber == lnum)
####load
load(paste0(simdir,'sim',lnum,'.RData'))
tdat = simulated$simdat
tdat$y = as.numeric(tdat$y)
library(lme4)
library(dplyr)
#center age
tdat = tdat %>%
mutate(ca = a-mean(a,na.rm=TRUE),
ca2 = ca^2,
#real breaks
pf = window(p,breaks=simulated$p.breaks),
cf = window(c,breaks=simulated$c.breaks),
#hapc breaks
p.hf = window(p,winlength=5),
c.hf = window(c,winlength=3))
hapc = lmer(y~ca + I(ca^2) +
p.hf + c.hf +
(1|p.hf) + (1|c.hf),
data = tdat)
#####################3
#predict
hapc.effs = function(mermod,newdat){
#mermod is fitted model, newdat is new data
effs=simulate(mermod,
nsim=100,
use.u = TRUE,
newdat=newdat,
allow.new.levels=TRUE,
se.fit=TRUE)
pred = data.frame(fit=apply(effs,1,mean),
ll = apply(effs,1,quantile,prob=0.025),
ul = apply(effs,1,quantile,prob=0.975))
return(pred)
}
mna = mean(tdat$ca)
mnp = window(mean(tdat$p),breaks=attr(tdat$p.hf,'breaks'))
mnc = window(mean(tdat$c),breaks=attr(tdat$c.hf,'breaks'))
xm.a = data.frame(ca = min(tdat$ca):max(tdat$ca),
p.hf=mnp,
c.hf=mnc
)
pred = hapc.effs(hapc,xm.a) %>%
mutate(x = unique(tdat$a),
dim= 'a')
xm.p = data.frame(ca = mna,
p.hf=scopedummy(tdat$p.hf,unique.vals=unique(tdat$p)),
c.hf=mnc)
pred = rbind(pred,hapc.effs(hapc,xm.p) %>%
mutate(x=unique(tdat$p),
dim='p'))
xm.c = data.frame(ca = mna,
p.hf=mnp,
c.hf=scopedummy(tdat$c.hf))
pred = rbind(pred,hapc.effs(hapc,xm.c) %>%
mutate(x=unique(tdat$c),
dim='c'))
##########3
#real effects
#model is identifiable!! (can check directly from betas..., but using scopedummy and predict)
tdat$a2 = tdat$a^2
tt = lm(y~a+a2+pf+cf,data=tdat)
true.b =list()
###a effects
xh= data.frame(a=min(tdat$a):max(tdat$a)); xh$a2 = xh$a^2
xh$pf = window(mean(tdat$p),breaks=attr(tdat$pf,'breaks'))
xh$cf = window(mean(tdat$c),breaks=attr(tdat$cf,'breaks'))
true.b[['a']] = data.frame(
x=xh$a,
m.eff = predict(tt,newdata=xh))
true.b[['a']]$dim='a'
###p effects
xh= data.frame(pf=window(unique(tdat$p),
breaks=attr(tdat$pf,'breaks')))
xh$a = mean(tdat$a); xh$a2=mean(tdat$a2)
xh$cf = window(mean(tdat$c),breaks=attr(tdat$cf,'breaks'))
true.b[['p']] = data.frame(
x=unique(tdat$p),
m.eff = predict(tt,newdata=xh))
true.b[['p']]$dim='p'
###c effects
xh= data.frame(cf=scopedummy(tdat$cf))
xh$a = mean(tdat$a); xh$a2=mean(tdat$a2)
xh$pf = window(mean(tdat$p),breaks=attr(tdat$pf,'breaks'))
true.b[['c']] = data.frame(
x=min(tdat$c):max(tdat$c),
m.eff = predict(tt,newdata=xh))
true.b[['c']]$dim='c'
#####
#plot
r.effs = do.call(rbind,true.b)
############
#plot (and calcualate overlaps)
pp = plot(effs)
#plot comparisons
#pp +
# geom_line(data=r.effs,
# aes(x=x,y=m.eff),linetype=2) +
# geom_line(data=pred,
# aes(x=x,y=fit),linetype=3) +
# geom_ribbon(data = pred,
# aes(x=x,y=fit,ymax=ul,ymin=ll),alpha=0.1) +
# theme_classic()
##########3
#calculate differences
mm = merge(r.effs,pp$data,by=c('dim','x')) %>%
mutate(ol=ifelse(m.eff>ul & m.eff<ll,TRUE,FALSE))
mm = mm %>%
rename(ll = ul,
ul = ll)
pred = pred %>%
rename(Fit.h = fit,
ll.h = ll,
ul.h = ul)
mm = merge(mm,pred,by=c('dim','x')) %>%
mutate(ol.h = ifelse(m.eff>ll.h & m.eff<ul.h,TRUE,FALSE),
rng.h = abs(ul.h-ll.h),
rng = abs(ul-ll))
#hapc results
simtable[rownum,'hol'] = mean(mm$ol.h)
simtable[rownum,'hrmse'] = summary(hapc)$sigma
simtable[rownum,'efficiency'] = mean(mm$rng/mm$rng.h)
#save simtable results
write.csv(simtable,
file=paste0(simdir,'fullsimtable.csv'),
row.names=FALSE)
}
|
with(a65b4aaae4b9947348d5374e8a00b668d, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';rm(list=ls())}); | /80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/aAaAYxtEqVjqI.R | no_license | ayanmanna8/test | R | false | false | 212 | r | with(a65b4aaae4b9947348d5374e8a00b668d, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';rm(list=ls())}); |
setwd("/Users/arindambose/Documents")
mydatasl <- read.csv("semlearn1.csv", header = TRUE)
ry12 <- 0.801
b1 <- 0.32
b2 <- 0.599
| /semlearning.R | no_license | arinbasu/arinsRcodes | R | false | false | 129 | r | setwd("/Users/arindambose/Documents")
mydatasl <- read.csv("semlearn1.csv", header = TRUE)
ry12 <- 0.801
b1 <- 0.32
b2 <- 0.599
|
# install
install.packages("readxl")
install.packages("writexl")
install.packages("dplyr")
install.packages("ggplot2")
# load packages
library("readxl")
library("writexl")
library("dplyr")
library("ggplot2")
# ======================================== S C R U B B I N G ========================================
# set file names
input <- "/home/carl/Desktop/RtnData.xlsx"
output <- "/home/carl/Desktop/RtnDataClean.xlsx"
# read in the data
data <- read_excel(input, sheet = "Data")
sic <- read_excel(input, sheet = "SIC")
# left outer-join on industry code
rtnData <- merge(x = data, y = sic, by.x = "INDUSTRY_CODE", by.y = "SIC_CODE", all.x = TRUE)
# drop VAL_DATE column
rtnData <- rtnData %>% select(-c("VAL_DATE"))
# remove NBOC and Amendment rows
rtnData <- rtnData %>% filter(EVENT_TYPE != "NBOC")
rtnData <- rtnData %>% filter(EVENT_TYPE != "Amendment")
# use only positive premiums
rtnData <- rtnData %>% filter(NEEDED_PREMIUM > 0 & FINAL_QUOTED_PREMIUM > 0)
# use plans with positive enrolled
rtnData <- rtnData %>% filter(ENROLLED_LIVES > 0)
# standardize renewals
rtnData <- rtnData %>% mutate(RENEWAL_INSTANCE = case_when(EVENT_TYPE == "New Business" ~ "0",
RENEWAL_INSTANCE == "1st Renewal" ~ "1",
RENEWAL_INSTANCE == "2nd Renewal" ~ "2",
RENEWAL_INSTANCE == "2nd+" ~ "3",
RENEWAL_INSTANCE == "3rd Renewal +" ~ "3",
TRUE ~ as.character(RENEWAL_INSTANCE)))
rtnData <- rtnData %>% filter(!is.na(RENEWAL_INSTANCE))
# convert renewals to numeric
rtnData$RENEWAL_INSTANCE <- as.numeric(rtnData$RENEWAL_INSTANCE)
# format date timestamps
rtnData$RAE_EFF_DATE <- as.Date(rtnData$RAE_EFF_DATE, "%y-%m-%d")
rtnData$COV_EFF_DATE <- as.Date(rtnData$COV_EFF_DATE, "%y-%m-%d")
# set date range >= 2017
rtnData <- rtnData %>% filter(RAE_EFF_DATE >= as.Date("2017-01-01"))
rtnData <- rtnData %>% filter(COV_EFF_DATE >= as.Date("2017-01-01"))
# clean up sales office names
rtnData <- rtnData %>% mutate(SALES_OFFICE = tolower(SALES_OFFICE))
rtnData <- rtnData %>% mutate(SALES_OFFICE = case_when(SALES_OFFICE == "central philadelphia" ~ "philadelphia",
SALES_OFFICE == "chicago/indy/milwaukee" ~ "chicago",
SALES_OFFICE == "home" ~ "ft wayne",
SALES_OFFICE == "miami/orlando/tampa" ~ "miami",
SALES_OFFICE == "washington dc" ~ "washington d.c.",
TRUE ~ as.character(SALES_OFFICE)))
# map sales office to sales region
rtnData <- rtnData %>% mutate(UW_REGION = tolower(UW_REGION))
regions <- list("home" = c("ft wayne"),
"central" = c("chicago","cincinnati","cleveland","detroit","ft lauderdale","indianapolis","miami","milwaukee","minneapolis","omaha","orlando","pittsburgh","st. louis","tampa"),
"east" = c("atlanta","boston","charlotte","long island","nashville","new jersey","new york","parsnippany","philadelphia","portland, me","rochester","washington d.c.","white plains"),
"west" = c("dallas","denver","houston","kansas city","los angeles","orange county","phoenix","portland, or","sacramento","san diego","san francisco","seattle"))
rtnData <- rtnData %>% mutate(UW_REGION = case_when(SALES_OFFICE %in% regions$home ~ "home",
SALES_OFFICE %in% regions$central ~ "central",
SALES_OFFICE %in% regions$east ~ "east",
SALES_OFFICE %in% regions$west ~ "west",
TRUE ~ as.character(UW_REGION)))
# define coverage types
coverageType <- list("Life" = c("Basic Life","Dependent Life","Life","Life EE Paid","Life ER Paid","Optional Child Life","Optional Life","Optional Spouse Life","Voluntary Child Life","Voluntary Life","Voluntary Life - Unismoke","Voluntary Life - Unismoke ALT Plan","Voluntary Spouse Life"),
"STD" = c("NY DBL","State Disability - DBL","State Disability - TDB","STD","STD ASO","STD ATP","STD Core/Buy Up","STD EE Paid","STD ER Paid","STD Spec Worksite","STD True Zero","SW STD Alternate","Voluntary STD"),
"LTD" = c("LTD","LTD Core/Buy Up","LTD EE Paid","LTD ER Paid","LTD Spec Worksite","LTD True Zero","Optional LTD","Voluntary LTD"),
"AD&D" = c("AD&D","AD&D EE Paid","AD&D ER Paid","Basic AD&D","Optional AD&D","Voluntary AD&D","Voluntary Spouse AD&D"),
"Dental" = c("Dental","Dental HMO","Self-Funded Dental","Voluntary Dental","Voluntary Dental HMO"))
# aggregate coverage names into coverage types
rtnData <- rtnData %>% mutate(COVERAGE_TYPE = case_when(COVERAGE_NAME %in% coverageType$Life ~ "Life",
COVERAGE_NAME %in% coverageType$STD ~ "STD",
COVERAGE_NAME %in% coverageType$LTD ~ "LTD",
COVERAGE_NAME %in% coverageType$`AD&D` ~ "AD&D",
COVERAGE_NAME %in% coverageType$Dental ~ "Dental",
TRUE ~ "Misc"))
# remove misc coverage types (i.e. Implementation Credit, Vision, Voluntary Vision)
rtnData <- rtnData %>% filter(COVERAGE_TYPE != "Misc")
# classify groups into size by eligible lives
rtnData <- rtnData %>% mutate(GROUP_SIZE = case_when(ELIGIBLE_LIVES < 100 ~ "S",
ELIGIBLE_LIVES >= 100 & ELIGIBLE_LIVES < 1000 ~ "M",
ELIGIBLE_LIVES >= 1000 & ELIGIBLE_LIVES < 10000 ~ "L",
ELIGIBLE_LIVES >= 10000 ~ "XL",
TRUE ~ "NA"))
# add rtn column
rtnData <- rtnData %>% mutate(RTN = FINAL_QUOTED_PREMIUM / NEEDED_PREMIUM)
# write rtn data to Excel
#write_xlsx(rtnData, path = output, col_names = TRUE)
# ======================================== M O D E L I N G ========================================
# shift renewal instance for logarithmic model because log(0) = -inf
rtnData <- rtnData %>% mutate(RENEWAL_INSTANCE = RENEWAL_INSTANCE + 1.0)
# create modeling factors based on coverage type, region & industry
model.factors <- as.data.table(rtnData %>% group_by(COVERAGE_TYPE, UW_REGION, INDUSTRY) %>% summarise())
# initialize model list
models <- list()
# build out models for all factors (3 regions) x (5 coverage types) x (10 industries) = 150 combinations
for (i in 1:nrow(model.factors)) {
# pull in relevant data for each factor
model.data <- rtnData %>% filter(COVERAGE_TYPE == model.factors[i, "COVERAGE_TYPE"][[1]],
UW_REGION == model.factors[i, "UW_REGION"][[1]],
INDUSTRY == model.factors[i, "INDUSTRY"][[1]]) %>% select(RENEWAL_INSTANCE, RTN, NEEDED_PREMIUM)
# save regression to model list
models[[i]] <- lm(RTN ~ log(RENEWAL_INSTANCE), data = model.data, weights = NEEDED_PREMIUM)
}
# example using short-term disability in the service industry from the east region
std.east.service <- rtnData %>% filter(COVERAGE_TYPE == "STD", UW_REGION == "east", INDUSTRY == "Services") %>%
select(RENEWAL_INSTANCE, RTN, NEEDED_PREMIUM)
# create a logarithmic model weighted by needed premium
std.east.service.fit <- lm(RTN ~ log(RENEWAL_INSTANCE), data = std.east.service, weights = NEEDED_PREMIUM)
# generate sampling points for model
x <- data.frame(RENEWAL_INSTANCE = seq(from = range(std.east.service$RENEWAL_INSTANCE)[1],
to = range(std.east.service$RENEWAL_INSTANCE)[2],
length.out = 100))
# get prediction errors
errors <- predict(std.east.service.fit, newdata = x, se.fit = TRUE)
# create alpha confidence interval
alpha <- 0.99
x$lci <- errors$fit - qnorm(alpha) * errors$se.fit
x$fit <- errors$fit
x$uci <- errors$fit + qnorm(alpha) * errors$se.fit
# plot points, model and confidence interval
ggplot(x, aes(x = RENEWAL_INSTANCE, y = fit)) +
xlab("Renewal") + ylab("RTN") +
theme_bw() + geom_line() + geom_smooth(aes(ymin = lci, ymax = uci), stat = "identity") +
geom_point(data = std.east.service, aes(x = RENEWAL_INSTANCE, y = RTN, size = NEEDED_PREMIUM))
| /scripts/logarithmic-model/clean_log_model.r | no_license | Infinite-Actuary/ACTS-475 | R | false | false | 8,794 | r | # install
install.packages("readxl")
install.packages("writexl")
install.packages("dplyr")
install.packages("ggplot2")
# load packages
library("readxl")
library("writexl")
library("dplyr")
library("ggplot2")
# ======================================== S C R U B B I N G ========================================
# set file names
input <- "/home/carl/Desktop/RtnData.xlsx"
output <- "/home/carl/Desktop/RtnDataClean.xlsx"
# read in the data
data <- read_excel(input, sheet = "Data")
sic <- read_excel(input, sheet = "SIC")
# left outer-join on industry code
rtnData <- merge(x = data, y = sic, by.x = "INDUSTRY_CODE", by.y = "SIC_CODE", all.x = TRUE)
# drop VAL_DATE column
rtnData <- rtnData %>% select(-c("VAL_DATE"))
# remove NBOC and Amendment rows
rtnData <- rtnData %>% filter(EVENT_TYPE != "NBOC")
rtnData <- rtnData %>% filter(EVENT_TYPE != "Amendment")
# use only positive premiums
rtnData <- rtnData %>% filter(NEEDED_PREMIUM > 0 & FINAL_QUOTED_PREMIUM > 0)
# use plans with positive enrolled
rtnData <- rtnData %>% filter(ENROLLED_LIVES > 0)
# standardize renewals
rtnData <- rtnData %>% mutate(RENEWAL_INSTANCE = case_when(EVENT_TYPE == "New Business" ~ "0",
RENEWAL_INSTANCE == "1st Renewal" ~ "1",
RENEWAL_INSTANCE == "2nd Renewal" ~ "2",
RENEWAL_INSTANCE == "2nd+" ~ "3",
RENEWAL_INSTANCE == "3rd Renewal +" ~ "3",
TRUE ~ as.character(RENEWAL_INSTANCE)))
rtnData <- rtnData %>% filter(!is.na(RENEWAL_INSTANCE))
# convert renewals to numeric
rtnData$RENEWAL_INSTANCE <- as.numeric(rtnData$RENEWAL_INSTANCE)
# format date timestamps
rtnData$RAE_EFF_DATE <- as.Date(rtnData$RAE_EFF_DATE, "%y-%m-%d")
rtnData$COV_EFF_DATE <- as.Date(rtnData$COV_EFF_DATE, "%y-%m-%d")
# set date range >= 2017
rtnData <- rtnData %>% filter(RAE_EFF_DATE >= as.Date("2017-01-01"))
rtnData <- rtnData %>% filter(COV_EFF_DATE >= as.Date("2017-01-01"))
# clean up sales office names
rtnData <- rtnData %>% mutate(SALES_OFFICE = tolower(SALES_OFFICE))
rtnData <- rtnData %>% mutate(SALES_OFFICE = case_when(SALES_OFFICE == "central philadelphia" ~ "philadelphia",
SALES_OFFICE == "chicago/indy/milwaukee" ~ "chicago",
SALES_OFFICE == "home" ~ "ft wayne",
SALES_OFFICE == "miami/orlando/tampa" ~ "miami",
SALES_OFFICE == "washington dc" ~ "washington d.c.",
TRUE ~ as.character(SALES_OFFICE)))
# map sales office to sales region
rtnData <- rtnData %>% mutate(UW_REGION = tolower(UW_REGION))
regions <- list("home" = c("ft wayne"),
"central" = c("chicago","cincinnati","cleveland","detroit","ft lauderdale","indianapolis","miami","milwaukee","minneapolis","omaha","orlando","pittsburgh","st. louis","tampa"),
"east" = c("atlanta","boston","charlotte","long island","nashville","new jersey","new york","parsnippany","philadelphia","portland, me","rochester","washington d.c.","white plains"),
"west" = c("dallas","denver","houston","kansas city","los angeles","orange county","phoenix","portland, or","sacramento","san diego","san francisco","seattle"))
rtnData <- rtnData %>% mutate(UW_REGION = case_when(SALES_OFFICE %in% regions$home ~ "home",
SALES_OFFICE %in% regions$central ~ "central",
SALES_OFFICE %in% regions$east ~ "east",
SALES_OFFICE %in% regions$west ~ "west",
TRUE ~ as.character(UW_REGION)))
# define coverage types
coverageType <- list("Life" = c("Basic Life","Dependent Life","Life","Life EE Paid","Life ER Paid","Optional Child Life","Optional Life","Optional Spouse Life","Voluntary Child Life","Voluntary Life","Voluntary Life - Unismoke","Voluntary Life - Unismoke ALT Plan","Voluntary Spouse Life"),
"STD" = c("NY DBL","State Disability - DBL","State Disability - TDB","STD","STD ASO","STD ATP","STD Core/Buy Up","STD EE Paid","STD ER Paid","STD Spec Worksite","STD True Zero","SW STD Alternate","Voluntary STD"),
"LTD" = c("LTD","LTD Core/Buy Up","LTD EE Paid","LTD ER Paid","LTD Spec Worksite","LTD True Zero","Optional LTD","Voluntary LTD"),
"AD&D" = c("AD&D","AD&D EE Paid","AD&D ER Paid","Basic AD&D","Optional AD&D","Voluntary AD&D","Voluntary Spouse AD&D"),
"Dental" = c("Dental","Dental HMO","Self-Funded Dental","Voluntary Dental","Voluntary Dental HMO"))
# aggregate coverage names into coverage types
rtnData <- rtnData %>% mutate(COVERAGE_TYPE = case_when(COVERAGE_NAME %in% coverageType$Life ~ "Life",
COVERAGE_NAME %in% coverageType$STD ~ "STD",
COVERAGE_NAME %in% coverageType$LTD ~ "LTD",
COVERAGE_NAME %in% coverageType$`AD&D` ~ "AD&D",
COVERAGE_NAME %in% coverageType$Dental ~ "Dental",
TRUE ~ "Misc"))
# remove misc coverage types (i.e. Implementation Credit, Vision, Voluntary Vision)
rtnData <- rtnData %>% filter(COVERAGE_TYPE != "Misc")
# classify groups into size by eligible lives
rtnData <- rtnData %>% mutate(GROUP_SIZE = case_when(ELIGIBLE_LIVES < 100 ~ "S",
ELIGIBLE_LIVES >= 100 & ELIGIBLE_LIVES < 1000 ~ "M",
ELIGIBLE_LIVES >= 1000 & ELIGIBLE_LIVES < 10000 ~ "L",
ELIGIBLE_LIVES >= 10000 ~ "XL",
TRUE ~ "NA"))
# add rtn column
rtnData <- rtnData %>% mutate(RTN = FINAL_QUOTED_PREMIUM / NEEDED_PREMIUM)
# write rtn data to Excel
#write_xlsx(rtnData, path = output, col_names = TRUE)
# ======================================== M O D E L I N G ========================================
# shift renewal instance for logarithmic model because log(0) = -inf
rtnData <- rtnData %>% mutate(RENEWAL_INSTANCE = RENEWAL_INSTANCE + 1.0)
# create modeling factors based on coverage type, region & industry
model.factors <- as.data.table(rtnData %>% group_by(COVERAGE_TYPE, UW_REGION, INDUSTRY) %>% summarise())
# initialize model list
models <- list()
# build out models for all factors (3 regions) x (5 coverage types) x (10 industries) = 150 combinations
for (i in 1:nrow(model.factors)) {
# pull in relevant data for each factor
model.data <- rtnData %>% filter(COVERAGE_TYPE == model.factors[i, "COVERAGE_TYPE"][[1]],
UW_REGION == model.factors[i, "UW_REGION"][[1]],
INDUSTRY == model.factors[i, "INDUSTRY"][[1]]) %>% select(RENEWAL_INSTANCE, RTN, NEEDED_PREMIUM)
# save regression to model list
models[[i]] <- lm(RTN ~ log(RENEWAL_INSTANCE), data = model.data, weights = NEEDED_PREMIUM)
}
# example using short-term disability in the service industry from the east region
std.east.service <- rtnData %>% filter(COVERAGE_TYPE == "STD", UW_REGION == "east", INDUSTRY == "Services") %>%
select(RENEWAL_INSTANCE, RTN, NEEDED_PREMIUM)
# create a logarithmic model weighted by needed premium
std.east.service.fit <- lm(RTN ~ log(RENEWAL_INSTANCE), data = std.east.service, weights = NEEDED_PREMIUM)
# generate sampling points for model
x <- data.frame(RENEWAL_INSTANCE = seq(from = range(std.east.service$RENEWAL_INSTANCE)[1],
to = range(std.east.service$RENEWAL_INSTANCE)[2],
length.out = 100))
# get prediction errors
errors <- predict(std.east.service.fit, newdata = x, se.fit = TRUE)
# create alpha confidence interval
alpha <- 0.99
x$lci <- errors$fit - qnorm(alpha) * errors$se.fit
x$fit <- errors$fit
x$uci <- errors$fit + qnorm(alpha) * errors$se.fit
# plot points, model and confidence interval
ggplot(x, aes(x = RENEWAL_INSTANCE, y = fit)) +
xlab("Renewal") + ylab("RTN") +
theme_bw() + geom_line() + geom_smooth(aes(ymin = lci, ymax = uci), stat = "identity") +
geom_point(data = std.east.service, aes(x = RENEWAL_INSTANCE, y = RTN, size = NEEDED_PREMIUM))
|
# Moral Foundations Dictionary
library("quanteda")
data_dictionary_MFD <- dictionary(file = "sources/MFD/moral_foundations_dictionary.dic")
devtools::use_data(data_dictionary_MFD, overwrite = TRUE)
| /sources/MFD/create-data_dictionary_MFD.R | no_license | evanodell/quanteda.dictionaries | R | false | false | 201 | r | # Moral Foundations Dictionary
library("quanteda")
data_dictionary_MFD <- dictionary(file = "sources/MFD/moral_foundations_dictionary.dic")
devtools::use_data(data_dictionary_MFD, overwrite = TRUE)
|
\name{timeZoneList}
\alias{timeZoneList}
\title{
Time Zone List
}
\description{
Returns or modifies the time zone list.
}
\usage{
timeZoneList(...)
}
\arguments{
\item{\dots}{
(see below)
}
}
\value{
returns the value of the time zone list before the function call is
returned. If arguments are given, it is returned invisibly.
}
\section{Side Effects}{
If arguments are given, they are used to modify the current value
of \code{.time.zone.list}, which is assigned in the \code{splusTimeDate} package environment.
It is like \code{timeDateOptions}, where if you want your entries to the time zone list to persist in
subsequent sessions, you should use \code{timeZoneList} in \code{.First}.
}
\details{
The time zone list is a named list whose names are the character strings that
are recognized when you convert strings to time objects, and
whose elements are the corresponding time zone objects. (See the
documentation for class \code{timeZone}.) The
\code{timeZoneList} function provides an easy way for the user to define
the character strings to use for input of given time zones, and
to define user-defined time zones for use in \code{timeDate} objects.
For example, a North American user would
probably want to recognize "EST" as the US or Canadian time zone
known as Eastern Standard Time, whereas an Australian user might prefer to
have "EST" refer to Eastern Australian time.
The \code{timeZoneList} function has the following behavior:
\itemize{
\item If no arguments are given, the current time zone list
is returned.
\item If a single list is given as the argument,
its named components are added to the time zone list.
\item If multiple named arguments are given, they are added to the list.
}
In either of the two latter
cases, the elements to be added to the list must be time zone objects.
The default time zone list has the following built-in zone components.
(See documentation on the \code{timeZone} class for more information.)
\describe{
\item{Atlantic}{
can/atlantic (Standard/daylight Canadian Atlantic time)
}
\item{ADT}{
can/atlantic
}
\item{AST}{
can/atlantic
}
\item{Halifax}{
can/atlantic
}
\item{PuertoRico}{
st/atlantic (Atlantic Standard Time, Puerto Rico and Virgin Islands)
}
\item{Eastern}{
us/eastern (Standard/daylight US Eastern time)
}
\item{EST}{
us/eastern
}
\item{EDT}{
us/eastern
}
\item{EST5EDT}{
us/eastern
}
\item{EST/EDT}{
us/eastern
}
\item{Indiana}{
st/eastern (Standard only US/Canadian Eastern time)
}
\item{Toronto}{
can/eastern (Standard/daylight Canadian Eastern time)
}
\item{Central}{
us/central (Standard/daylight US Central time)
}
\item{CST}{
us/central
}
\item{CDT}{
us/central
}
\item{CST6CDT}{
us/central
}
\item{CST/CDT}{
us/central
}
\item{Chicago}{
us/central
}
\item{Winnipeg}{
can/central (Standard/daylight Canadian Central time)
}
\item{Mountain}{
us/mountain (Standard/daylight US Mountain time)
}
\item{MST}{
us/mountain
}
\item{MDT}{
us/mountain
}
\item{MST7MDT}{
us/mountain
}
\item{MST/MDT}{
us/mountain
}
\item{Denver}{
us/mountain
}
\item{Arizona}{
st/mountain (Standard only US/Canadian Mountain time)
}
\item{Edmonton}{
can/mountain (Standard/daylight Canadian Mountain time)
}
\item{Pacific}{
us/pacific (Standard/daylight US Pacific time)
}
\item{PST}{
us/pacific
}
\item{PDT}{
us/pacific
}
\item{PST8PDT}{
us/pacific
}
\item{PST/PDT}{
us/pacific
}
\item{Vancouver}{
can/pacific (Standard/daylight Canadian Pacific time)
}
\item{Alaska}{
us/alaska (Standard/daylight US Alaska time)
}
\item{AKST}{
us/alaska
}
\item{AKDT}{
us/alaska
}
\item{AKST/AKDT}{
us/alaska
}
\item{Aleutian}{
us/hawaii (Standard/daylight US Hawaii/Aleutian time)
}
\item{HST}{
st/hawaii (Standard only US Hawaii/Aleutian time)
}
\item{Hawaii}{
st/hawaii
}
\item{Midway}{
st/samoa (Standard time for Samoa)
}
\item{Samoa}{
st/samoa
}
\item{SST}{
st/samoa
}
\item{Japan}{
st/japan (Standard time for Japan)
}
\item{Tokyo}{
st/japan
}
\item{JST}{
st/japan
}
\item{China}{
st/china (Standard time for China and Western Australia)
}
\item{HongKong}{
hongkong (Standard/daylight time for Hong Kong)
}
\item{Singapore}{
singapore (Standard time for Singapore, reflecting changed zones in 1982)
}
\item{Sydney}{
aust/nsw (Standard/summer time for New South Wales, Australia)
}
\item{Hobart}{
aust/tasmania (Standard/summer time for Tasmania, Australia)
}
\item{Melbourne}{
aust/victoria (Standard/summer time for Victoria, Australia)
}
\item{Adelaide}{
aust/south (Standard/summer time for South Australia)
}
\item{Darwin}{
st/caustralia (Standard only time for Central Australia)
}
\item{Perth}{
aust/western (Standard/daylight time for Western Australia)
}
\item{Auckland}{
newzealand (Standard time for New Zealand)
}
\item{NZST}{
newzealand
}
\item{NZDT}{
newzealand
}
\item{Marshall}{
st/newzealand (Marshall Islands Standard Time)
}
\item{Wake}{
st/newzealand (Wake Islands Standard Time)
}
\item{IDLE}{
st/newzealand (International Date Line East)
}
\item{Chamorro}{
st/eaustralia (Chamorro Standard Time - Guam and Northern Mariana Islands)
}
\item{ChST}{
st/eaustralia (Chamorro Standard Time - Guam and Northern Mariana Islands)
}
\item{Yap}{
st/eaustralia (Yap Time)
}
\item{YAPT}{
st/eaustralia (Yap Time)
}
\item{Caroline}{
st/caroline (Line Islands Time - Caroline and other Line Islands)
}
\item{LINT}{
st/caroline (Line Islands Time - Caroline and other Line Islands)
}
\item{UTC}{
utc (Greenwich Mean Time/Universal Coordinated Time)
}
\item{GMT}{
utc
}
\item{GDT}{
britain (Standard time for Great Britain)
}
\item{London}{
britain (Standard time for Great Britain)
}
\item{BST}{
britain
}
\item{WET}{
europe/west (Standard/summer time for EU members, Western zone)
}
\item{Wes}{
europe/west (Standard/summer time for EU members, Western zone)
}
\item{WEST}{
europe/west
}
\item{WET/WEST}{
europe/west
}
\item{WED}{
europe/west
}
\item{WEDT}{
europe/west
}
\item{CET}{
europe/central (Standard/summer time for EU members, Central zone)
}
\item{CEST}{
europe/central
}
\item{MET}{
europe/central
}
\item{MEST}{
europe/central
}
\item{MET/MEST}{
europe/central
}
\item{EET}{
europe/east (Standard/summer time for EU members, Eastern zone)
}
\item{EEST}{
europe/east
}
\item{EET/EEST}{
europe/east
}
}
}
\seealso{
\code{\link{timeZoneC}}, \code{\link{timeZoneR}}, \code{\linkS4class{timeZone}} class.
}
\examples{
# return the entire time zone list
timeZoneList()
# define the string "PDT8PST" to mean US Pacific time
timeZoneList(PDT8PST = timeZoneC("us/pacific"))
# define a time zone for a small island 1/2 hour east of GMT
timeZoneList(small.island = timeZoneR(offset=1800))
}
\keyword{chron}
% docclass is function
% Converted by mySd2Rd version 47442.
| /man/timznlst.Rd | no_license | cran/splusTimeDate | R | false | false | 6,745 | rd | \name{timeZoneList}
\alias{timeZoneList}
\title{
Time Zone List
}
\description{
Returns or modifies the time zone list.
}
\usage{
timeZoneList(...)
}
\arguments{
\item{\dots}{
(see below)
}
}
\value{
returns the value of the time zone list before the function call is
returned. If arguments are given, it is returned invisibly.
}
\section{Side Effects}{
If arguments are given, they are used to modify the current value
of \code{.time.zone.list}, which is assigned in the \code{splusTimeDate} package environment.
It is like \code{timeDateOptions}, where if you want your entries to the time zone list to persist in
subsequent sessions, you should use \code{timeZoneList} in \code{.First}.
}
\details{
The time zone list is a named list whose names are the character strings that
are recognized when you convert strings to time objects, and
whose elements are the corresponding time zone objects. (See the
documentation for class \code{timeZone}.) The
\code{timeZoneList} function provides an easy way for the user to define
the character strings to use for input of given time zones, and
to define user-defined time zones for use in \code{timeDate} objects.
For example, a North American user would
probably want to recognize "EST" as the US or Canadian time zone
known as Eastern Standard Time, whereas an Australian user might prefer to
have "EST" refer to Eastern Australian time.
The \code{timeZoneList} function has the following behavior:
\itemize{
\item If no arguments are given, the current time zone list
is returned.
\item If a single list is given as the argument,
its named components are added to the time zone list.
\item If multiple named arguments are given, they are added to the list.
}
In either of the two latter
cases, the elements to be added to the list must be time zone objects.
The default time zone list has the following built-in zone components.
(See documentation on the \code{timeZone} class for more information.)
\describe{
\item{Atlantic}{
can/atlantic (Standard/daylight Canadian Atlantic time)
}
\item{ADT}{
can/atlantic
}
\item{AST}{
can/atlantic
}
\item{Halifax}{
can/atlantic
}
\item{PuertoRico}{
st/atlantic (Atlantic Standard Time, Puerto Rico and Virgin Islands)
}
\item{Eastern}{
us/eastern (Standard/daylight US Eastern time)
}
\item{EST}{
us/eastern
}
\item{EDT}{
us/eastern
}
\item{EST5EDT}{
us/eastern
}
\item{EST/EDT}{
us/eastern
}
\item{Indiana}{
st/eastern (Standard only US/Canadian Eastern time)
}
\item{Toronto}{
can/eastern (Standard/daylight Canadian Eastern time)
}
\item{Central}{
us/central (Standard/daylight US Central time)
}
\item{CST}{
us/central
}
\item{CDT}{
us/central
}
\item{CST6CDT}{
us/central
}
\item{CST/CDT}{
us/central
}
\item{Chicago}{
us/central
}
\item{Winnipeg}{
can/central (Standard/daylight Canadian Central time)
}
\item{Mountain}{
us/mountain (Standard/daylight US Mountain time)
}
\item{MST}{
us/mountain
}
\item{MDT}{
us/mountain
}
\item{MST7MDT}{
us/mountain
}
\item{MST/MDT}{
us/mountain
}
\item{Denver}{
us/mountain
}
\item{Arizona}{
st/mountain (Standard only US/Canadian Mountain time)
}
\item{Edmonton}{
can/mountain (Standard/daylight Canadian Mountain time)
}
\item{Pacific}{
us/pacific (Standard/daylight US Pacific time)
}
\item{PST}{
us/pacific
}
\item{PDT}{
us/pacific
}
\item{PST8PDT}{
us/pacific
}
\item{PST/PDT}{
us/pacific
}
\item{Vancouver}{
can/pacific (Standard/daylight Canadian Pacific time)
}
\item{Alaska}{
us/alaska (Standard/daylight US Alaska time)
}
\item{AKST}{
us/alaska
}
\item{AKDT}{
us/alaska
}
\item{AKST/AKDT}{
us/alaska
}
\item{Aleutian}{
us/hawaii (Standard/daylight US Hawaii/Aleutian time)
}
\item{HST}{
st/hawaii (Standard only US Hawaii/Aleutian time)
}
\item{Hawaii}{
st/hawaii
}
\item{Midway}{
st/samoa (Standard time for Samoa)
}
\item{Samoa}{
st/samoa
}
\item{SST}{
st/samoa
}
\item{Japan}{
st/japan (Standard time for Japan)
}
\item{Tokyo}{
st/japan
}
\item{JST}{
st/japan
}
\item{China}{
st/china (Standard time for China and Western Australia)
}
\item{HongKong}{
hongkong (Standard/daylight time for Hong Kong)
}
\item{Singapore}{
singapore (Standard time for Singapore, reflecting changed zones in 1982)
}
\item{Sydney}{
aust/nsw (Standard/summer time for New South Wales, Australia)
}
\item{Hobart}{
aust/tasmania (Standard/summer time for Tasmania, Australia)
}
\item{Melbourne}{
aust/victoria (Standard/summer time for Victoria, Australia)
}
\item{Adelaide}{
aust/south (Standard/summer time for South Australia)
}
\item{Darwin}{
st/caustralia (Standard only time for Central Australia)
}
\item{Perth}{
aust/western (Standard/daylight time for Western Australia)
}
\item{Auckland}{
newzealand (Standard time for New Zealand)
}
\item{NZST}{
newzealand
}
\item{NZDT}{
newzealand
}
\item{Marshall}{
st/newzealand (Marshall Islands Standard Time)
}
\item{Wake}{
st/newzealand (Wake Islands Standard Time)
}
\item{IDLE}{
st/newzealand (International Date Line East)
}
\item{Chamorro}{
st/eaustralia (Chamorro Standard Time - Guam and Northern Mariana Islands)
}
\item{ChST}{
st/eaustralia (Chamorro Standard Time - Guam and Northern Mariana Islands)
}
\item{Yap}{
st/eaustralia (Yap Time)
}
\item{YAPT}{
st/eaustralia (Yap Time)
}
\item{Caroline}{
st/caroline (Line Islands Time - Caroline and other Line Islands)
}
\item{LINT}{
st/caroline (Line Islands Time - Caroline and other Line Islands)
}
\item{UTC}{
utc (Greenwich Mean Time/Universal Coordinated Time)
}
\item{GMT}{
utc
}
\item{GDT}{
britain (Standard time for Great Britain)
}
\item{London}{
britain (Standard time for Great Britain)
}
\item{BST}{
britain
}
\item{WET}{
europe/west (Standard/summer time for EU members, Western zone)
}
\item{Wes}{
europe/west (Standard/summer time for EU members, Western zone)
}
\item{WEST}{
europe/west
}
\item{WET/WEST}{
europe/west
}
\item{WED}{
europe/west
}
\item{WEDT}{
europe/west
}
\item{CET}{
europe/central (Standard/summer time for EU members, Central zone)
}
\item{CEST}{
europe/central
}
\item{MET}{
europe/central
}
\item{MEST}{
europe/central
}
\item{MET/MEST}{
europe/central
}
\item{EET}{
europe/east (Standard/summer time for EU members, Eastern zone)
}
\item{EEST}{
europe/east
}
\item{EET/EEST}{
europe/east
}
}
}
\seealso{
\code{\link{timeZoneC}}, \code{\link{timeZoneR}}, \code{\linkS4class{timeZone}} class.
}
\examples{
# return the entire time zone list
timeZoneList()
# define the string "PDT8PST" to mean US Pacific time
timeZoneList(PDT8PST = timeZoneC("us/pacific"))
# define a time zone for a small island 1/2 hour east of GMT
timeZoneList(small.island = timeZoneR(offset=1800))
}
\keyword{chron}
% docclass is function
% Converted by mySd2Rd version 47442.
|
\name{multispecies}
\alias{multispecies}
\title{
Summarizes data from multiple species.
}
\description{
This function may be used to summarize data from different species or
similar (e.g. different lineages, etc). By default, it summarizes with
the mean and standard deviation, but different functions may be used.
}
\usage{
multispecies(..., FUN=list(mean=mean, sd=sd), na.rm=FALSE)
}
\arguments{
\item{...}{
Input data to be summarized. It must be numeric and it can be multiple
vectors of the same size and order, or more simply a matrix with all
data to be summarised. The matrix must have the different species/data in
columns.
}
\item{FUN}{
This is a list of functions to be applied to summarize the data. By default
it uses the mean and sd, but it can be any other function that returns a
number from a vector (e.g. max, min) or a user-defined function. If the
objects are named in the FUN list, than those names will be given to the
resulting columns. Otherwise, function are applied in the same order as
given.
}
\item{na.rm}{
A logical indicating whether missing values should be removed. Will only
work if the functions in FUN accept it.
}
}
\details{
This function is a simple wrapper with some error checking for the native
R function 'apply'.
}
\value{
Returns a matrix with functions applied in the same order as FUN.
}
\author{
Pedro Tarroso <ptarroso@cibio.up.pt>
}
\seealso{
\code{\link{apply}}
\code{\link{princomp}}
\code{\link{prcomp}}
}
\examples{
data(vipers)
data(d.gen)
data(grid)
# create a matrix of distances from sample points (columns) to all
# grid pixels
rd <- geo.dist(grid, vipers[,1:2])
#interpolate with idw
result <- intgen.idw(rd, d.gen)
ms <- multispecies(result)
# plot the mean
grid.image(ms, grid, main = "Mean")
# plot the standard deviation
grid.image(ms, grid, ic=2, main = "Standard Deviation")
}
\keyword{ idw }
\keyword{ interpolation }
| /source/man/multispecies.Rd | no_license | ptarroso/phylin | R | false | false | 2,000 | rd | \name{multispecies}
\alias{multispecies}
\title{
Summarizes data from multiple species.
}
\description{
This function may be used to summarize data from different species or
similar (e.g. different lineages, etc). By default, it summarizes with
the mean and standard deviation, but different functions may be used.
}
\usage{
multispecies(..., FUN=list(mean=mean, sd=sd), na.rm=FALSE)
}
\arguments{
\item{...}{
Input data to be summarized. It must be numeric and it can be multiple
vectors of the same size and order, or more simply a matrix with all
data to be summarised. The matrix must have the different species/data in
columns.
}
\item{FUN}{
This is a list of functions to be applied to summarize the data. By default
it uses the mean and sd, but it can be any other function that returns a
number from a vector (e.g. max, min) or a user-defined function. If the
objects are named in the FUN list, than those names will be given to the
resulting columns. Otherwise, function are applied in the same order as
given.
}
\item{na.rm}{
A logical indicating whether missing values should be removed. Will only
work if the functions in FUN accept it.
}
}
\details{
This function is a simple wrapper with some error checking for the native
R function 'apply'.
}
\value{
Returns a matrix with functions applied in the same order as FUN.
}
\author{
Pedro Tarroso <ptarroso@cibio.up.pt>
}
\seealso{
\code{\link{apply}}
\code{\link{princomp}}
\code{\link{prcomp}}
}
\examples{
data(vipers)
data(d.gen)
data(grid)
# create a matrix of distances from sample points (columns) to all
# grid pixels
rd <- geo.dist(grid, vipers[,1:2])
#interpolate with idw
result <- intgen.idw(rd, d.gen)
ms <- multispecies(result)
# plot the mean
grid.image(ms, grid, main = "Mean")
# plot the standard deviation
grid.image(ms, grid, ic=2, main = "Standard Deviation")
}
\keyword{ idw }
\keyword{ interpolation }
|
source("expr_functions.R")
n <- 1000
m <- 19
rounds1 <- 100
rounds2 <- 900
X_dist <- Sys.getenv("Xdist")
ratio <- floor(as.numeric(Sys.getenv("ratio")))
ninds <- floor(as.numeric(Sys.getenv("ninds")))
seed <- as.numeric(Sys.getenv("seed"))
set.seed(seed)
X_file <- paste0("../data/mat_", X_dist, "_ratio", ratio,
"_ninds", ninds, "_seed", seed, ".RData")
p <- ceiling(n / ratio)
X <- gen_X(n, p, X_dist)
if (ninds == 1){
M <- NULL
} else {
M <- diag((1:ninds) / ninds) + 1
}
#### Genetic Algorithm
res1 <- find_eta_GA(X, m, testinds = 1:ninds,
popSize = 10, rounds = rounds1,
M = M)
res2 <- find_eta_GA(X, m, ga_obj = res1$ga_obj,
testinds = 1:ninds,
popSize = 10, rounds = rounds2,
M = M)
result <- list(X = X,
ordering1 = res1$ordering,
ordering2 = res2$ordering)
save(file = X_file, result)
| /code/CPT_expr_prepareX.R | no_license | lihualei71/CPT | R | false | false | 953 | r | source("expr_functions.R")
n <- 1000
m <- 19
rounds1 <- 100
rounds2 <- 900
X_dist <- Sys.getenv("Xdist")
ratio <- floor(as.numeric(Sys.getenv("ratio")))
ninds <- floor(as.numeric(Sys.getenv("ninds")))
seed <- as.numeric(Sys.getenv("seed"))
set.seed(seed)
X_file <- paste0("../data/mat_", X_dist, "_ratio", ratio,
"_ninds", ninds, "_seed", seed, ".RData")
p <- ceiling(n / ratio)
X <- gen_X(n, p, X_dist)
if (ninds == 1){
M <- NULL
} else {
M <- diag((1:ninds) / ninds) + 1
}
#### Genetic Algorithm
res1 <- find_eta_GA(X, m, testinds = 1:ninds,
popSize = 10, rounds = rounds1,
M = M)
res2 <- find_eta_GA(X, m, ga_obj = res1$ga_obj,
testinds = 1:ninds,
popSize = 10, rounds = rounds2,
M = M)
result <- list(X = X,
ordering1 = res1$ordering,
ordering2 = res2$ordering)
save(file = X_file, result)
|
lnl.binomial <- function(param, y, X, id, model, link, rn, start.sigma = FALSE){
if (link == 'probit'){
F <- pnorm
f <- dnorm
e <- function(x) - x * f(x)
}
if (link == 'logit'){
F <- function(x) exp(x) / (1 + exp(x))
f <- function(x) F(x) * (1 - F(x))
e <- function(x) (exp(x) - exp(3*x))/(1+exp(x))^4
}
mills <- function(x) f(x) / F(x)
millsp <- function(x) e(x)/F(x) - (f(x)/F(x))^2
K <- ncol(X)
n <- length(unique(id))
N <- length(y)
q <- 2 * y - 1
beta <- param[1:K]
bX <- as.numeric(crossprod(t(X), beta))
if (start.sigma){
mu <- - tapply(q * mills(q * bX), id, sum) / tapply(millsp(q * bX), id, sum)
return(sqrt(2) * sd(mu))
}
if (model == "random"){
sigma <- param[K + 1L]
Pitr <- lapply(rn$nodes, function(x) F( q * (bX + sigma * x)))
Pir <- lapply(Pitr, function(x) tapply(x, id, prod))
Li <- Reduce("+", mapply("*", Pir, rn$weights, SIMPLIFY = FALSE)) / sqrt(pi)
}
if (model == "pooling") Li <- F(q * bX)
lnL <- sum(log(Li))
if (model == "random"){
gitr <- mapply(function(w, x, p)
q * w * cbind(rep(1, N), x) *
as.numeric(p[as.character(id)]) *
mills( q * (bX + sigma * x)),
rn$weights, rn$nodes, Pir, SIMPLIFY = FALSE)
g <- Reduce("+", gitr)
gradi <- cbind(g[, 1] * X, g[, 2]) /
as.numeric(Li[as.character(id)])/ sqrt(pi)
}
if (model == "pooling") gradi <- q * mills(q * bX) * X
if (model == "pooling")
H <- crossprod(millsp(q * bX) * X, X)
if (model == "random"){
Hr <- mapply(
function(w, v, p){
p <- p / Li
P <- p[as.character(id)]
p <- as.numeric(p)
P <- as.numeric(P)
z <- q * (bX + sigma * v)
gi <- q * mills(z) * cbind(X, v)
gi <- apply(gi, 2, tapply, id, sum)
H1 <- crossprod(p * gi, gi)
H2 <- crossprod(P * millsp(z) * cbind(X, v), cbind(X, v))
(H1 + H2) * w
},
rn$weights, rn$nodes, Pir, SIMPLIFY = FALSE)
H <- Reduce("+", Hr) / sqrt(pi) - crossprod(apply(gradi, 2, tapply, id, sum))
}
attr(lnL, 'gradient') <- gradi
attr(lnL, 'hessian') <- H
lnL
}
| /pglm/R/lnl.binomial.R | no_license | ingted/R-Examples | R | false | false | 2,351 | r | lnl.binomial <- function(param, y, X, id, model, link, rn, start.sigma = FALSE){
if (link == 'probit'){
F <- pnorm
f <- dnorm
e <- function(x) - x * f(x)
}
if (link == 'logit'){
F <- function(x) exp(x) / (1 + exp(x))
f <- function(x) F(x) * (1 - F(x))
e <- function(x) (exp(x) - exp(3*x))/(1+exp(x))^4
}
mills <- function(x) f(x) / F(x)
millsp <- function(x) e(x)/F(x) - (f(x)/F(x))^2
K <- ncol(X)
n <- length(unique(id))
N <- length(y)
q <- 2 * y - 1
beta <- param[1:K]
bX <- as.numeric(crossprod(t(X), beta))
if (start.sigma){
mu <- - tapply(q * mills(q * bX), id, sum) / tapply(millsp(q * bX), id, sum)
return(sqrt(2) * sd(mu))
}
if (model == "random"){
sigma <- param[K + 1L]
Pitr <- lapply(rn$nodes, function(x) F( q * (bX + sigma * x)))
Pir <- lapply(Pitr, function(x) tapply(x, id, prod))
Li <- Reduce("+", mapply("*", Pir, rn$weights, SIMPLIFY = FALSE)) / sqrt(pi)
}
if (model == "pooling") Li <- F(q * bX)
lnL <- sum(log(Li))
if (model == "random"){
gitr <- mapply(function(w, x, p)
q * w * cbind(rep(1, N), x) *
as.numeric(p[as.character(id)]) *
mills( q * (bX + sigma * x)),
rn$weights, rn$nodes, Pir, SIMPLIFY = FALSE)
g <- Reduce("+", gitr)
gradi <- cbind(g[, 1] * X, g[, 2]) /
as.numeric(Li[as.character(id)])/ sqrt(pi)
}
if (model == "pooling") gradi <- q * mills(q * bX) * X
if (model == "pooling")
H <- crossprod(millsp(q * bX) * X, X)
if (model == "random"){
Hr <- mapply(
function(w, v, p){
p <- p / Li
P <- p[as.character(id)]
p <- as.numeric(p)
P <- as.numeric(P)
z <- q * (bX + sigma * v)
gi <- q * mills(z) * cbind(X, v)
gi <- apply(gi, 2, tapply, id, sum)
H1 <- crossprod(p * gi, gi)
H2 <- crossprod(P * millsp(z) * cbind(X, v), cbind(X, v))
(H1 + H2) * w
},
rn$weights, rn$nodes, Pir, SIMPLIFY = FALSE)
H <- Reduce("+", Hr) / sqrt(pi) - crossprod(apply(gradi, 2, tapply, id, sum))
}
attr(lnL, 'gradient') <- gradi
attr(lnL, 'hessian') <- H
lnL
}
|
# take input from user and display values
name <- readline("Enter your name")
Bav
age <- readline("Enter your age")
40
hello <- paste("Thank you! You have told me your name is:", name, "and you are:", age, "years old.")
print(hello)
# get details of objects in the memory
ls()
# how much memory is being used by each object
sort( sapply(ls(),function(x){object.size(get(x))}))
# to find our total memory used by R session
object.size(x=lapply(ls(), get))
print(object.size(x=lapply(ls(), get)), units="Mb")
# create sequ of numbers from 20-50
print("Sequence of numbers from 20 to 50:")
print(seq(20,50))
# find the mean of number 20-60
print("Mean of numbers from 20 to 60:")
print(mean(20:60))
# find sum of numbers 51-91
print("Sum of numbers from 51 to 91:")
print(sum(51:91))
#vector which creates 10 random integer no. between -50 and 50
vector = sample(-50:50, 10, replace=TRUE)
print("Content of the vector:")
print("10 random integer values between -50 and +50:")
print(vector)
| /files/R Tasks - Session 1 - 17.11.2020.R | no_license | bavn/ttax | R | false | false | 992 | r | # take input from user and display values
name <- readline("Enter your name")
Bav
age <- readline("Enter your age")
40
hello <- paste("Thank you! You have told me your name is:", name, "and you are:", age, "years old.")
print(hello)
# get details of objects in the memory
ls()
# how much memory is being used by each object
sort( sapply(ls(),function(x){object.size(get(x))}))
# to find our total memory used by R session
object.size(x=lapply(ls(), get))
print(object.size(x=lapply(ls(), get)), units="Mb")
# create sequ of numbers from 20-50
print("Sequence of numbers from 20 to 50:")
print(seq(20,50))
# find the mean of number 20-60
print("Mean of numbers from 20 to 60:")
print(mean(20:60))
# find sum of numbers 51-91
print("Sum of numbers from 51 to 91:")
print(sum(51:91))
#vector which creates 10 random integer no. between -50 and 50
vector = sample(-50:50, 10, replace=TRUE)
print("Content of the vector:")
print("10 random integer values between -50 and +50:")
print(vector)
|
\name{MacroBiblio}
\alias{MacroBiblio}
\title{Analysis of Bibliography (MacroBiblio)}
\description{
Macro function for the analysis of a bibliographic database.
}
\usage{
MacroBiblio(base, num.text = "Abstract", num.agg = "Year", idiom = "en",
lminword = 3, Fmin = 10, Dmin = 5, Fmax = NULL, equivalence = NULL,
stop.word.user = NULL, lmd = 3, lmk = 3, ncp = 10, row.sup = NULL,
col.sup = NULL, graph = TRUE, axes = c(1, 2), proba = 0.01)
}
\arguments{
\item{base}{data frame with I rows (abstracts/articles) and J columns. The names of the main columns must be: Title, Year, Abstract, Journal; in addition, the database may have other quantitative or categorical variables such as: "Author", "Year_class", etc.}
\item{num.text}{column index(es) or name(s) of the textual column(s) (by default "Abstract" )}
\item{num.agg}{column index or name of the aggregation column (by default "Year")}
\item{idiom}{language of the textual column(s) (by default English "en")}
\item{lminword}{minimum threshold on the word length (by default 3)}
\item{Fmin}{minimum threshold on the word frequency (by default 10)}
\item{Dmin}{minimum threshold on the number of documents using the word (by default 5)}
\item{Fmax}{maximum threshold on the word frequency}
\item{equivalence}{data frame with n rows and two columns (original word and new word)}
\item{stop.word.user}{vector indicating the stopwords chosen by the user }
\item{lmk}{minimum threshold on the contribution for selecting the metakeys (by default 3, which mean contribution 3 times greater than the mean contribution)}
\item{lmd}{minimum threshold on the contribution for selecting the metadocs (by default 3, which mean contribution 3 times greater than the mean contribution)}
\item{ncp}{number of dimensions stored in the results (by default 10)}
\item{row.sup}{vector with the index(es) or name(s) of the supplementary row(s) }
\item{col.sup}{vector with the index(es) or name(s) of the supplementary frequency column(s)}
\item{graph}{boolean, if TRUE graphs are displayed}
\item{axes}{a length 2 vector specifying the dimensions to plot}
\item{proba}{significance threshold used to select the characteristic words in each category (by default 0.01)}
}
\value{
Returns a list including:
\item{Corpus}{summary of the information about the corpus}
\item{Glossary}{glossary of the selected words in frequency order}
\item{DocTermR}{documents by words (all documents, selected words)}
\item{Tagreg}{lexical aggregated table}
\item{Metakeys.Metadocs}{graphical representation of metakeys and metadocs}
\item{res.CA}{results of direct correspondence analysis}
\item{res.CA.Agreg}{results of aggregate correspondence analysis by year}
\item{CharWord}{characteristic words of each category of the aggregation variable}
\item{res.CHCPC}{results of constrained hierarchical clustering}
\item{res.MFACT}{result of multiple factor analysis for contingency tables}
\item{OrdWord}{words order by their coordinates on the first dimension}
\item{pioneers}{pioneer articles}
}
\references{
Kerbaol, M. Bansard, JY.;Coatrieux, JL. ( 2006) An analysis of IEEE publications in biomedical engineering. \emph{IEEE Engineering in Medicine and Biology. Magazine}.
Morin, A.( 2006 )Intensive Use of Factorial Correspondence Analysis for Text Mining:\emph{ Application with Statistical Education Publications}.
Becue-Bertaut, M. (2014). Tracking verbal-based methods beyond conventional descriptive analysis in food science bibliography. A statistical approach. \emph{Food Quality and Preference},32, 2-15.
Lebart, L., Salem, A., & Berry, L. (1998). Exploring textual data. (D. Kluwer, Ed.).
}
\author{Daria M. Hernandez \email{daria.micaela.hernandez@upc.edu}, Monica Becue-Bertaut, Belchin Kostov}
\seealso{ \code{\link{print.MacroBiblio}},\code{\link{summary.MacroBiblio}}}
\examples{
\dontrun{
data(dataBiblio)
res.dataBiblio<-MacroBiblio(dataBiblio, lmd = 6, lmk =6)
print(res.dataBiblio)
summary(res.dataBiblio, nword=20)
}
}
\keyword{multivariate}
| /man/MacroBiblio.Rd | no_license | cran/TextoMineR | R | false | false | 4,128 | rd | \name{MacroBiblio}
\alias{MacroBiblio}
\title{Analysis of Bibliography (MacroBiblio)}
\description{
Macro function for the analysis of a bibliographic database.
}
\usage{
MacroBiblio(base, num.text = "Abstract", num.agg = "Year", idiom = "en",
lminword = 3, Fmin = 10, Dmin = 5, Fmax = NULL, equivalence = NULL,
stop.word.user = NULL, lmd = 3, lmk = 3, ncp = 10, row.sup = NULL,
col.sup = NULL, graph = TRUE, axes = c(1, 2), proba = 0.01)
}
\arguments{
\item{base}{data frame with I rows (abstracts/articles) and J columns. The names of the main columns must be: Title, Year, Abstract, Journal; in addition, the database may have other quantitative or categorical variables such as: "Author", "Year_class", etc.}
\item{num.text}{column index(es) or name(s) of the textual column(s) (by default "Abstract" )}
\item{num.agg}{column index or name of the aggregation column (by default "Year")}
\item{idiom}{language of the textual column(s) (by default English "en")}
\item{lminword}{minimum threshold on the word length (by default 3)}
\item{Fmin}{minimum threshold on the word frequency (by default 10)}
\item{Dmin}{minimum threshold on the number of documents using the word (by default 5)}
\item{Fmax}{maximum threshold on the word frequency}
\item{equivalence}{data frame with n rows and two columns (original word and new word)}
\item{stop.word.user}{vector indicating the stopwords chosen by the user }
\item{lmk}{minimum threshold on the contribution for selecting the metakeys (by default 3, which mean contribution 3 times greater than the mean contribution)}
\item{lmd}{minimum threshold on the contribution for selecting the metadocs (by default 3, which mean contribution 3 times greater than the mean contribution)}
\item{ncp}{number of dimensions stored in the results (by default 10)}
\item{row.sup}{vector with the index(es) or name(s) of the supplementary row(s) }
\item{col.sup}{vector with the index(es) or name(s) of the supplementary frequency column(s)}
\item{graph}{boolean, if TRUE graphs are displayed}
\item{axes}{a length 2 vector specifying the dimensions to plot}
\item{proba}{significance threshold used to select the characteristic words in each category (by default 0.01)}
}
\value{
Returns a list including:
\item{Corpus}{summary of the information about the corpus}
\item{Glossary}{glossary of the selected words in frequency order}
\item{DocTermR}{documents by words (all documents, selected words)}
\item{Tagreg}{lexical aggregated table}
\item{Metakeys.Metadocs}{graphical representation of metakeys and metadocs}
\item{res.CA}{results of direct correspondence analysis}
\item{res.CA.Agreg}{results of aggregate correspondence analysis by year}
\item{CharWord}{characteristic words of each category of the aggregation variable}
\item{res.CHCPC}{results of constrained hierarchical clustering}
\item{res.MFACT}{result of multiple factor analysis for contingency tables}
\item{OrdWord}{words order by their coordinates on the first dimension}
\item{pioneers}{pioneer articles}
}
\references{
Kerbaol, M. Bansard, JY.;Coatrieux, JL. ( 2006) An analysis of IEEE publications in biomedical engineering. \emph{IEEE Engineering in Medicine and Biology. Magazine}.
Morin, A.( 2006 )Intensive Use of Factorial Correspondence Analysis for Text Mining:\emph{ Application with Statistical Education Publications}.
Becue-Bertaut, M. (2014). Tracking verbal-based methods beyond conventional descriptive analysis in food science bibliography. A statistical approach. \emph{Food Quality and Preference},32, 2-15.
Lebart, L., Salem, A., & Berry, L. (1998). Exploring textual data. (D. Kluwer, Ed.).
}
\author{Daria M. Hernandez \email{daria.micaela.hernandez@upc.edu}, Monica Becue-Bertaut, Belchin Kostov}
\seealso{ \code{\link{print.MacroBiblio}},\code{\link{summary.MacroBiblio}}}
\examples{
\dontrun{
data(dataBiblio)
res.dataBiblio<-MacroBiblio(dataBiblio, lmd = 6, lmk =6)
print(res.dataBiblio)
summary(res.dataBiblio, nword=20)
}
}
\keyword{multivariate}
|
#UPLOADING PACKAGES
library(shiny)
library(DT)
##########################################################################################
# install rtweet from CRAN
# Here we are checking if the package is installed
if(!require("rtweet")){
# If the package is not in the system then it will be install
install.packages("rtweet", dependencies = TRUE)
# Here we are loading the package
library("rtweet")
}
#isntall tmap from the maps
if(!require("tmap")){
# If the package is not in the system then it will be install
install.packages("tmap", dependencies = TRUE)
# Here we are loading the package
library("tmap")
}
#This is for the maps and geolocations
# Here we are checking if the package is installed
if(!require("ggplot2")){
# If the package is not in the system then it will be install
install.packages("ggplot2", dependencies = TRUE)
# Here we are loading the package
library("ggplot2")
}
#This is for the maps and geolocations
# Here we are checking if the package is installed
if(!require("googleway")){
# If the package is not in the system then it will be install
install.packages("googleway", dependencies = TRUE)
# Here we are loading the package
library("googleway")
}
#install stringi
# Here we are checking if the package is installed
if(!require("stringi")){
# If the package is not in the system then it will be install
install.packages("stringi", repos="http://cran.rstudio.com/", dependencies=TRUE)
# Here we are loading the package
library("stringi")
}
#install yaml
# Here we are checking if the package is installed
if(!require("yaml")){
# If the package is not in the system then it will be install
install.packages("yaml", repos="http://cran.rstudio.com/", dependencies=TRUE)
# Here we are loading the package
library("yaml")
}
#install mapsapi
# Here we are checking if the package is installed
if(!require("mapsapi")){
# If the package is not in the system then it will be install
install.packages("mapsapi")
# Here we are loading the package
library("mapsapi")
}
#Colling the library leaflet to plot the map
library("leaflet")
#install devtools
# Here we are checking if the package is installed
if(!require("devtools")){
# If the package is not in the system then it will be install
install.packages("devtools", repos="http://cran.rstudio.com/", dependencies=TRUE)
# Here we are loading the package
library("devtools")
}
##############################################################################
#INSTALLING THE PACKAGES FOR THE WORDCLOUD
#################################################################################
#install tidytext
# Here we are checking if the package is installed
if(!require("tidytext")){
# If the package is not in the system then it will be install
install.packages("tidytext", dependencies = TRUE)
# Here we are loading the package
library("tidytext")
}
# install dplyr
# Here we are checking if the package is installed
if(!require("dplyr")){
# If the package is not in the system then it will be install
install.packages("dplyr", dependencies = TRUE)
# Here we are loading the package
library("dplyr")
}
# install stringr
# Here we are checking if the package is installed
if(!require("stringr")){
# If the package is not in the system then it will be install
install.packages("stringr", dependencies = TRUE)
# Here we are loading the package
library("stringr")
}
#require(devtools)
# install wordcloud
# Here we are checking if the package is installed
if(!require("wordcloud")){
# If the package is not in the system then it will be install
install.packages("wordcloud", dependencies = TRUE)
# Here we are loading the package
library("wordcloud")
}
library(stopwords)
############################################### PART 1 - PLOTTING THE MAP ###############################
#############################################################################
#AUTHENTICATION
############################################################################
#TWITTER AUTHENTICATION
#For more information regarding the tweets, see the below link:
#https://rtweet.info/ para quitar os tweets
#Installing the KeyRing package to hide the passwords and user names
install.packages("keyring")
library(keyring)
#Authentification on Twitter accounts
#Hiding my Twitters credentials
#key_set("app")
#key_set("consumer_key")
#key_set("consumer_secret")
#Authentification on Twitter accounts
create_token(
app = key_get("app"),
consumer_key = key_get("consumer_key"),
consumer_secret = key_get("consumer_secret"))
#GOOGLE AUTHENTICATION
#Authentification on my google account to get the geocOde
#This is my API from my API key. To get the API see this site:
#https://cloud.google.com/maps-platform/?__utma=102347093.739445211.1529438971.1543151047.1543151047.1&__utmb=102347093.0.10.1543151047&__utmc=102347093&__utmx=-&__utmz=102347093.1543151047.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)&__utmv=-&__utmk=222020888&_ga=2.179297060.1418589899.1543143627-739445211.1529438971#get-started
#Hiding my google credentials
#key_set("key_google")
key_google<-key_get("key_google")
####SHINYAPPS#####
#Packages for the shinyapps.io
#install.packages('rsconnect')
#Authentification
#Hidden my credentials
#key_set("name")
#key_set("token")
#key_set("secret")
#rsconnect::setAccountInfo(name=key_get("name"),
#token= key_get("token"),
#secret= key_get("secret"))
#library(rsconnect)
#rsconnect::deployApp('.TFM-R\App-1\ui.R')
| /App-1/global.R | no_license | paquinho89/Tweets-analyzer | R | false | false | 5,722 | r | #UPLOADING PACKAGES
library(shiny)
library(DT)
##########################################################################################
# install rtweet from CRAN
# Here we are checking if the package is installed
if(!require("rtweet")){
# If the package is not in the system then it will be install
install.packages("rtweet", dependencies = TRUE)
# Here we are loading the package
library("rtweet")
}
#isntall tmap from the maps
if(!require("tmap")){
# If the package is not in the system then it will be install
install.packages("tmap", dependencies = TRUE)
# Here we are loading the package
library("tmap")
}
#This is for the maps and geolocations
# Here we are checking if the package is installed
if(!require("ggplot2")){
# If the package is not in the system then it will be install
install.packages("ggplot2", dependencies = TRUE)
# Here we are loading the package
library("ggplot2")
}
#This is for the maps and geolocations
# Here we are checking if the package is installed
if(!require("googleway")){
# If the package is not in the system then it will be install
install.packages("googleway", dependencies = TRUE)
# Here we are loading the package
library("googleway")
}
#install stringi
# Here we are checking if the package is installed
if(!require("stringi")){
# If the package is not in the system then it will be install
install.packages("stringi", repos="http://cran.rstudio.com/", dependencies=TRUE)
# Here we are loading the package
library("stringi")
}
#install yaml
# Here we are checking if the package is installed
if(!require("yaml")){
# If the package is not in the system then it will be install
install.packages("yaml", repos="http://cran.rstudio.com/", dependencies=TRUE)
# Here we are loading the package
library("yaml")
}
#install mapsapi
# Here we are checking if the package is installed
if(!require("mapsapi")){
# If the package is not in the system then it will be install
install.packages("mapsapi")
# Here we are loading the package
library("mapsapi")
}
#Colling the library leaflet to plot the map
library("leaflet")
#install devtools
# Here we are checking if the package is installed
if(!require("devtools")){
# If the package is not in the system then it will be install
install.packages("devtools", repos="http://cran.rstudio.com/", dependencies=TRUE)
# Here we are loading the package
library("devtools")
}
##############################################################################
#INSTALLING THE PACKAGES FOR THE WORDCLOUD
#################################################################################
#install tidytext
# Here we are checking if the package is installed
if(!require("tidytext")){
# If the package is not in the system then it will be install
install.packages("tidytext", dependencies = TRUE)
# Here we are loading the package
library("tidytext")
}
# install dplyr
# Here we are checking if the package is installed
if(!require("dplyr")){
# If the package is not in the system then it will be install
install.packages("dplyr", dependencies = TRUE)
# Here we are loading the package
library("dplyr")
}
# install stringr
# Here we are checking if the package is installed
if(!require("stringr")){
# If the package is not in the system then it will be install
install.packages("stringr", dependencies = TRUE)
# Here we are loading the package
library("stringr")
}
#require(devtools)
# install wordcloud
# Here we are checking if the package is installed
if(!require("wordcloud")){
# If the package is not in the system then it will be install
install.packages("wordcloud", dependencies = TRUE)
# Here we are loading the package
library("wordcloud")
}
library(stopwords)
############################################### PART 1 - PLOTTING THE MAP ###############################
#############################################################################
#AUTHENTICATION
############################################################################
#TWITTER AUTHENTICATION
#For more information regarding the tweets, see the below link:
#https://rtweet.info/ para quitar os tweets
#Installing the KeyRing package to hide the passwords and user names
install.packages("keyring")
library(keyring)
#Authentification on Twitter accounts
#Hiding my Twitters credentials
#key_set("app")
#key_set("consumer_key")
#key_set("consumer_secret")
#Authentification on Twitter accounts
create_token(
app = key_get("app"),
consumer_key = key_get("consumer_key"),
consumer_secret = key_get("consumer_secret"))
#GOOGLE AUTHENTICATION
#Authentification on my google account to get the geocOde
#This is my API from my API key. To get the API see this site:
#https://cloud.google.com/maps-platform/?__utma=102347093.739445211.1529438971.1543151047.1543151047.1&__utmb=102347093.0.10.1543151047&__utmc=102347093&__utmx=-&__utmz=102347093.1543151047.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)&__utmv=-&__utmk=222020888&_ga=2.179297060.1418589899.1543143627-739445211.1529438971#get-started
#Hiding my google credentials
#key_set("key_google")
key_google<-key_get("key_google")
####SHINYAPPS#####
#Packages for the shinyapps.io
#install.packages('rsconnect')
#Authentification
#Hidden my credentials
#key_set("name")
#key_set("token")
#key_set("secret")
#rsconnect::setAccountInfo(name=key_get("name"),
#token= key_get("token"),
#secret= key_get("secret"))
#library(rsconnect)
#rsconnect::deployApp('.TFM-R\App-1\ui.R')
|
#' Bernstein polynomials
#'
#' Bernstein polynomials
#'
#' @param k Bernstein polynomial k
#' @param n Bernstein polynomial degree
#' @param indeterminate indeterminate
#' @return a mpoly object
#' @author David Kahle
#' @export
#' @examples
#'
#' bernstein(0, 0)
#'
#' bernstein(0, 1)
#' bernstein(1, 1)
#'
#' bernstein(0, 1, "t")
#'
#' bernstein(0:2, 2)
#' bernstein(0:3, 3)
#' bernstein(0:3, 3, "t")
#'
#'
#' bernstein(0:4, 4)
#' bernstein(0:10, 10)
#' bernstein(0:10, 10, "t")
#' bernstein(0:20, 20, "t")
#'
#' \dontrun{ # visualize the bernstein polynomials
#'
#' library(ggplot2); theme_set(theme_classic())
#' library(tidyr)
#'
#' s <- seq(0, 1, length.out = 101)
#' N <- 10 # number of bernstein polynomials to plot
#' (bernPolys <- bernstein(0:N, N))
#'
#' df <- data.frame(s, as.function(bernPolys)(s))
#' names(df) <- c("x", paste0("B_", 0:N))
#' head(df)
#'
#' mdf <- gather(df, degree, value, -x)
#' head(mdf)
#'
#' qplot(x, value, data = mdf, geom = "line", color = degree)
#'
#' }
#'
bernstein <- function(k, n, indeterminate = "x"){
## make it possible for vector k args
if(length(k) > 1){
listOPolys <- lapply(k, function(.) bernstein(., n, indeterminate))
class(listOPolys) <- "mpolyList"
return(listOPolys)
}
## construct coefficients and degrees of terms
m <- n - k
coefs <- choose(n, k) * (-1)^(0:m) * choose(m, 0:m)
degs <- k:n
## construct polynomial as list
p <- Map(function(deg, coef) c(x = deg, coef = coef), degs, coefs)
## wipe out zeros
p <- lapply(p, function(v) v[v != 0])
## class list
class(p) <- c("bernstein", "mpoly")
attr(p, "bernstein") <- list(k = k, n = n, indeterminate = indeterminate)
## swap and return
swap(p, "x", indeterminate)
}
#' Bernstein polynomial approximation
#'
#' Bernstein polynomial approximation
#'
#' @param f the function to approximate
#' @param n Bernstein polynomial degree
#' @param lower lower bound for approximation
#' @param upper upper bound for approximation
#' @param indeterminate indeterminate
#' @return a mpoly object
#' @author David Kahle
#' @export
#' @examples
#'
#'
#'
#'
#'
#' \dontrun{ # visualize the bernstein polynomials
#'
#' library(ggplot2); theme_set(theme_bw())
#' library(reshape2)
#'
#'
#'
#'
#' f <- function(x) sin(2*pi*x)
#' p <- bernsteinApprox(f, 20)
#' round(p, 3)
#'
#' x <- seq(0, 1, length.out = 101)
#' df <- data.frame(
#' x = rep(x, 2),
#' y = c(f(x), as.function(p)(x)),
#' which = rep(c("actual", "approx"), each = 101)
#' )
#' qplot(x, y, data = df, geom = "line", color = which)
#'
#'
#'
#'
#'
#'
#' p <- bernsteinApprox(sin, 20, pi/2, 1.5*pi)
#' round(p, 4)
#'
#' x <- seq(0, 2*pi, length.out = 101)
#' df <- data.frame(
#' x = rep(x, 2),
#' y = c(sin(x), as.function(p)(x)),
#' which = rep(c("actual", "approx"), each = 101)
#' )
#' qplot(x, y, data = df, geom = "line", color = which)
#'
#'
#'
#'
#'
#'
#'
#'
#' p <- bernsteinApprox(dnorm, 15, -1.25, 1.25)
#' round(p, 4)
#'
#' x <- seq(-3, 3, length.out = 101)
#' df <- data.frame(
#' x = rep(x, 2),
#' y = c(dnorm(x), as.function(p)(x)),
#' which = rep(c("actual", "approx"), each = 101)
#' )
#' qplot(x, y, data = df, geom = "line", color = which)
#'
#'
#'
#'
#'
#'
#' }
#'
bernsteinApprox <- function(f, n, lower = 0, upper = 1, indeterminate = "x"){
## compute support and determine weights
s <- (0:n)/n
fscaled <- function(.) f( (upper-lower)*. + lower )
weights <- as.list(fscaled(s))
## convert weights to mpolyList
weights <- lapply(weights, function(x) mpoly(list(c(coef = x))))
class(weights) <- "mpolyList"
## multiply weights by basis
approxPoly <- Reduce(`+`, weights * bernstein(0:n, n, "temp"))
## compute plugin and plug in
pluginPoly <- (upper-lower)^-1 * (mp(indeterminate) + -1*lower)
plug(approxPoly, "temp", pluginPoly)
}
| /R/bernstein.R | no_license | GrantInnerst/mpoly | R | false | false | 3,922 | r | #' Bernstein polynomials
#'
#' Bernstein polynomials
#'
#' @param k Bernstein polynomial k
#' @param n Bernstein polynomial degree
#' @param indeterminate indeterminate
#' @return a mpoly object
#' @author David Kahle
#' @export
#' @examples
#'
#' bernstein(0, 0)
#'
#' bernstein(0, 1)
#' bernstein(1, 1)
#'
#' bernstein(0, 1, "t")
#'
#' bernstein(0:2, 2)
#' bernstein(0:3, 3)
#' bernstein(0:3, 3, "t")
#'
#'
#' bernstein(0:4, 4)
#' bernstein(0:10, 10)
#' bernstein(0:10, 10, "t")
#' bernstein(0:20, 20, "t")
#'
#' \dontrun{ # visualize the bernstein polynomials
#'
#' library(ggplot2); theme_set(theme_classic())
#' library(tidyr)
#'
#' s <- seq(0, 1, length.out = 101)
#' N <- 10 # number of bernstein polynomials to plot
#' (bernPolys <- bernstein(0:N, N))
#'
#' df <- data.frame(s, as.function(bernPolys)(s))
#' names(df) <- c("x", paste0("B_", 0:N))
#' head(df)
#'
#' mdf <- gather(df, degree, value, -x)
#' head(mdf)
#'
#' qplot(x, value, data = mdf, geom = "line", color = degree)
#'
#' }
#'
bernstein <- function(k, n, indeterminate = "x"){
## make it possible for vector k args
if(length(k) > 1){
listOPolys <- lapply(k, function(.) bernstein(., n, indeterminate))
class(listOPolys) <- "mpolyList"
return(listOPolys)
}
## construct coefficients and degrees of terms
m <- n - k
coefs <- choose(n, k) * (-1)^(0:m) * choose(m, 0:m)
degs <- k:n
## construct polynomial as list
p <- Map(function(deg, coef) c(x = deg, coef = coef), degs, coefs)
## wipe out zeros
p <- lapply(p, function(v) v[v != 0])
## class list
class(p) <- c("bernstein", "mpoly")
attr(p, "bernstein") <- list(k = k, n = n, indeterminate = indeterminate)
## swap and return
swap(p, "x", indeterminate)
}
#' Bernstein polynomial approximation
#'
#' Bernstein polynomial approximation
#'
#' @param f the function to approximate
#' @param n Bernstein polynomial degree
#' @param lower lower bound for approximation
#' @param upper upper bound for approximation
#' @param indeterminate indeterminate
#' @return a mpoly object
#' @author David Kahle
#' @export
#' @examples
#'
#'
#'
#'
#'
#' \dontrun{ # visualize the bernstein polynomials
#'
#' library(ggplot2); theme_set(theme_bw())
#' library(reshape2)
#'
#'
#'
#'
#' f <- function(x) sin(2*pi*x)
#' p <- bernsteinApprox(f, 20)
#' round(p, 3)
#'
#' x <- seq(0, 1, length.out = 101)
#' df <- data.frame(
#' x = rep(x, 2),
#' y = c(f(x), as.function(p)(x)),
#' which = rep(c("actual", "approx"), each = 101)
#' )
#' qplot(x, y, data = df, geom = "line", color = which)
#'
#'
#'
#'
#'
#'
#' p <- bernsteinApprox(sin, 20, pi/2, 1.5*pi)
#' round(p, 4)
#'
#' x <- seq(0, 2*pi, length.out = 101)
#' df <- data.frame(
#' x = rep(x, 2),
#' y = c(sin(x), as.function(p)(x)),
#' which = rep(c("actual", "approx"), each = 101)
#' )
#' qplot(x, y, data = df, geom = "line", color = which)
#'
#'
#'
#'
#'
#'
#'
#'
#' p <- bernsteinApprox(dnorm, 15, -1.25, 1.25)
#' round(p, 4)
#'
#' x <- seq(-3, 3, length.out = 101)
#' df <- data.frame(
#' x = rep(x, 2),
#' y = c(dnorm(x), as.function(p)(x)),
#' which = rep(c("actual", "approx"), each = 101)
#' )
#' qplot(x, y, data = df, geom = "line", color = which)
#'
#'
#'
#'
#'
#'
#' }
#'
bernsteinApprox <- function(f, n, lower = 0, upper = 1, indeterminate = "x"){
## compute support and determine weights
s <- (0:n)/n
fscaled <- function(.) f( (upper-lower)*. + lower )
weights <- as.list(fscaled(s))
## convert weights to mpolyList
weights <- lapply(weights, function(x) mpoly(list(c(coef = x))))
class(weights) <- "mpolyList"
## multiply weights by basis
approxPoly <- Reduce(`+`, weights * bernstein(0:n, n, "temp"))
## compute plugin and plug in
pluginPoly <- (upper-lower)^-1 * (mp(indeterminate) + -1*lower)
plug(approxPoly, "temp", pluginPoly)
}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157156e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615831851-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157156e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
library("data.table", lib.loc="C:/Users/Hendrik/Documents/R/win-library/3.1")
myFile <- "~/Computational Scientist/Studiemateriaal/Johns Hopkins University Data Science Course Track/4 Exploratory Data Analysis/CourseProject1/household_power_consumption.txt"
#I liked 'fread' best. The file needs to go fully into RAM but it was possible here.
#When using GB's of data other methods must be used.
power_raw <- fread(myFile, sep=";", sep2="auto", nrows=-1L, header=TRUE, na.strings="?",
stringsAsFactors=FALSE, verbose=FALSE, autostart=30L, skip=-1L, select=NULL, drop=NULL,
colClasses=NULL,integer64=getOption("datatable.integer64"), # default: "integer64"
showProgress=getOption("datatable.showProgress") # default: TRUE
)
#subset the 2880 observations during 1st and 2nd Feb 2007 we'll use for the project.
#Cast as data.frame because else we'll have trouble with the datetime.
power <- as.data.frame(power_raw[power_raw$Date == "1/2/2007" | power_raw$Date == "2/2/2007",])
print("1st and 2nd Feb 2007 data read into a data.frame")
#fix up the dates and times
datetimes <- paste(power$Date,power$Time)
datetimes2 <- strptime(datetimes, "%d/%m/%Y%H:%M:%S",tz="") #gives us a List
power2 <- cbind(datetimes2,power) #add this list as a column in the power2 data.frame
#close and reset any open gfx devices
#dev.off() <= this was causing trouble: the PNG file turned out white and blank.
#tell R we're plotting to a .PNG file with specified height and width.
dev.new(png(filename = "plot3.png",width = 480, height = 480, units = "px",bg = "transparent"))
#Mimicking the plot layout of the assignment.
#ATTENTION: "do","vr","za" is "Fri","Sat","Sun" in my native language. As my Windows and RStudio are both set in English,
# I don't quite find why this is in Dutch and how I can change it. Please don't deduct points for this :)
with(power2,plot(Sub_metering_1 ~ datetimes2,col="black",xlab = "",
ylab = "Energy sub metering", yaxp = c(0,30,3), type='l', lty=1,lwd=1))
with(power2,points(Sub_metering_2 ~ datetimes2,col="red",xlab = "",
ylab = "", yaxp = c(0,30,3), type='l', lty=1,lwd=1))
with(power2,points(Sub_metering_3 ~ datetimes2,col="blue",xlab = "",
ylab = "", yaxp = c(0,30,3), type='l', lty=1,lwd=1))
#Mimicking the legend.
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"))
dev.off()
print("png made")
#to go back to RStudio graphics device ('plots' tab)
#dev.new(noRStudioGD = FALSE) | /plot3.R | no_license | Hdooster/ExData_Plotting1 | R | false | false | 2,684 | r | library("data.table", lib.loc="C:/Users/Hendrik/Documents/R/win-library/3.1")
myFile <- "~/Computational Scientist/Studiemateriaal/Johns Hopkins University Data Science Course Track/4 Exploratory Data Analysis/CourseProject1/household_power_consumption.txt"
#I liked 'fread' best. The file needs to go fully into RAM but it was possible here.
#When using GB's of data other methods must be used.
power_raw <- fread(myFile, sep=";", sep2="auto", nrows=-1L, header=TRUE, na.strings="?",
stringsAsFactors=FALSE, verbose=FALSE, autostart=30L, skip=-1L, select=NULL, drop=NULL,
colClasses=NULL,integer64=getOption("datatable.integer64"), # default: "integer64"
showProgress=getOption("datatable.showProgress") # default: TRUE
)
#subset the 2880 observations during 1st and 2nd Feb 2007 we'll use for the project.
#Cast as data.frame because else we'll have trouble with the datetime.
power <- as.data.frame(power_raw[power_raw$Date == "1/2/2007" | power_raw$Date == "2/2/2007",])
print("1st and 2nd Feb 2007 data read into a data.frame")
#fix up the dates and times
datetimes <- paste(power$Date,power$Time)
datetimes2 <- strptime(datetimes, "%d/%m/%Y%H:%M:%S",tz="") #gives us a List
power2 <- cbind(datetimes2,power) #add this list as a column in the power2 data.frame
#close and reset any open gfx devices
#dev.off() <= this was causing trouble: the PNG file turned out white and blank.
#tell R we're plotting to a .PNG file with specified height and width.
dev.new(png(filename = "plot3.png",width = 480, height = 480, units = "px",bg = "transparent"))
#Mimicking the plot layout of the assignment.
#ATTENTION: "do","vr","za" is "Fri","Sat","Sun" in my native language. As my Windows and RStudio are both set in English,
# I don't quite find why this is in Dutch and how I can change it. Please don't deduct points for this :)
with(power2,plot(Sub_metering_1 ~ datetimes2,col="black",xlab = "",
ylab = "Energy sub metering", yaxp = c(0,30,3), type='l', lty=1,lwd=1))
with(power2,points(Sub_metering_2 ~ datetimes2,col="red",xlab = "",
ylab = "", yaxp = c(0,30,3), type='l', lty=1,lwd=1))
with(power2,points(Sub_metering_3 ~ datetimes2,col="blue",xlab = "",
ylab = "", yaxp = c(0,30,3), type='l', lty=1,lwd=1))
#Mimicking the legend.
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"))
dev.off()
print("png made")
#to go back to RStudio graphics device ('plots' tab)
#dev.new(noRStudioGD = FALSE) |
##
## Load only the required data fields -> where features contain mean/std
##
features <- read.table("./UCI HAR Dataset/features.txt")
features_need <- features[grep("mean|std", features[,2]), ]
# grep returns the postions and the actual values are returned as the positions are encapsulated (nested) again in features
# select only the needed features in test and train data
# col 1 of features provides the col number holding the std/mean values, so we grab all rows of these cols
# This is requirement #2
#combine test and train data with labels
# each of the dfs have the same no. of cols. 3 dfs as the data has been normalised
# subject_train indicates which subject (1-32) the corresponding data_train is for
# label_train indicates which of the 6 activity was done for the data_train
# data_train contains the recordings (data) of each activity labelled in label_train
# repeat the same operations twice - once for training, one for test
## 1.
data_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
data_train_need <- data_train[, as.numeric(features_need[,1])]
label_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
train <- cbind(subject_train, label_train, data_train_need)
##2.
data_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
data_test_need <- data_test[, as.numeric(features_need[,1])]
label_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
test <- cbind(subject_test, label_test, data_test_need)
# merge the two datasets (test and train) into one
# appends one dataset after the other
# requirement #1
mergeddata <- rbind(train, test)
## Rename merged data with descriptive variable names
# This is requirement #4
# use RE - removes the chars '(' , ')' as it affects the operation in the subsequent line
features_need[,2]<- gsub("[()]","", features_need[,2])
# col 1 is from subject file, col 2 is from label file, the remaining cols are from the features file (see the cbind above)
colnames(mergeddata) <- c("subject", "activity", features_need[,2])
## Put a text description instead of a number to describe the activities in the data set
# - breaks normalisation rules (number takes less space than descriptive text)
# This is requirement #3
# Load activity labels with descriptive text and put the descriptions into the mergeddata
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
mergeddata$activity <- factor(mergeddata$activity, levels = activity_labels[, 1], labels = activity_labels[, 2])
## create average of each variable for each activity and each subject.
# This is requirement #5
# cols 1 & 2 do not need to be aggreated
summary_data <-aggregate(mergeddata[, 3:ncol(mergeddata)], by=list(mergeddata$subject,mergeddata$activity), FUN=mean, na.rm=TRUE)
# change 1st 2 colnames from Group1 & Group2 to something more meaningful
colnames(summary_data) [1:2]<- c("subject", "activity")
write.table(summary_data, file="submit_tidy_data.txt", row.names = FALSE) | /run_analysis.R | no_license | AlvinToh2018/getting-and-cleaning-data | R | false | false | 3,182 | r | ##
## Load only the required data fields -> where features contain mean/std
##
features <- read.table("./UCI HAR Dataset/features.txt")
features_need <- features[grep("mean|std", features[,2]), ]
# grep returns the postions and the actual values are returned as the positions are encapsulated (nested) again in features
# select only the needed features in test and train data
# col 1 of features provides the col number holding the std/mean values, so we grab all rows of these cols
# This is requirement #2
#combine test and train data with labels
# each of the dfs have the same no. of cols. 3 dfs as the data has been normalised
# subject_train indicates which subject (1-32) the corresponding data_train is for
# label_train indicates which of the 6 activity was done for the data_train
# data_train contains the recordings (data) of each activity labelled in label_train
# repeat the same operations twice - once for training, one for test
## 1.
data_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
data_train_need <- data_train[, as.numeric(features_need[,1])]
label_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
train <- cbind(subject_train, label_train, data_train_need)
##2.
data_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
data_test_need <- data_test[, as.numeric(features_need[,1])]
label_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
test <- cbind(subject_test, label_test, data_test_need)
# merge the two datasets (test and train) into one
# appends one dataset after the other
# requirement #1
mergeddata <- rbind(train, test)
## Rename merged data with descriptive variable names
# This is requirement #4
# use RE - removes the chars '(' , ')' as it affects the operation in the subsequent line
features_need[,2]<- gsub("[()]","", features_need[,2])
# col 1 is from subject file, col 2 is from label file, the remaining cols are from the features file (see the cbind above)
colnames(mergeddata) <- c("subject", "activity", features_need[,2])
## Put a text description instead of a number to describe the activities in the data set
# - breaks normalisation rules (number takes less space than descriptive text)
# This is requirement #3
# Load activity labels with descriptive text and put the descriptions into the mergeddata
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
mergeddata$activity <- factor(mergeddata$activity, levels = activity_labels[, 1], labels = activity_labels[, 2])
## create average of each variable for each activity and each subject.
# This is requirement #5
# cols 1 & 2 do not need to be aggreated
summary_data <-aggregate(mergeddata[, 3:ncol(mergeddata)], by=list(mergeddata$subject,mergeddata$activity), FUN=mean, na.rm=TRUE)
# change 1st 2 colnames from Group1 & Group2 to something more meaningful
colnames(summary_data) [1:2]<- c("subject", "activity")
write.table(summary_data, file="submit_tidy_data.txt", row.names = FALSE) |
# Assume data files are in working dir
# Since loading takes a while, check if items are already in environment
library(ggplot2)
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("Source_Classification_Code.rds")
}
# Create subset for Baltimore
NEI_baltimore <- subset(NEI, fips=="24510")
# Aggregate by Year and Type
aggregatedBaltimoreByYearAndType <- aggregate(Emissions ~ year+type, NEI_baltimore, sum)
png("plot3.png",width=640,height=480)
g <- ggplot(aggregatedBaltimoreByYearAndType, aes(year, Emissions, color = type))
g <- g + geom_line() +
xlab("Years") +
ylab("Total PM2.5 Emissions") +
ggtitle("Total Emissions in Baltimore City, Maryland from 1999 to 2008")
print(g)
dev.off() | /plot3.R | no_license | awgroeneveld/ExplDataAnalysisProject2 | R | false | false | 764 | r | # Assume data files are in working dir
# Since loading takes a while, check if items are already in environment
library(ggplot2)
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("Source_Classification_Code.rds")
}
# Create subset for Baltimore
NEI_baltimore <- subset(NEI, fips=="24510")
# Aggregate by Year and Type
aggregatedBaltimoreByYearAndType <- aggregate(Emissions ~ year+type, NEI_baltimore, sum)
png("plot3.png",width=640,height=480)
g <- ggplot(aggregatedBaltimoreByYearAndType, aes(year, Emissions, color = type))
g <- g + geom_line() +
xlab("Years") +
ylab("Total PM2.5 Emissions") +
ggtitle("Total Emissions in Baltimore City, Maryland from 1999 to 2008")
print(g)
dev.off() |
## ----load-libraries------------------------------------------------------
# load libraries
library(raster) # work with raster files
library(rgdal) # work with raster files
# set working directory to ensure R can find the file we wish to import
# setwd("working-dir-path-here")
## ----open-DTMs-----------------------------------------------------------
# Load DTMs into R
DTM_pre <- raster("lidar/pre-flood/preDTM3.tif")
DTM_post <- raster("lidar/post-flood/postDTM3.tif")
# View raster structure
DTM_pre
DTM_post
## ----open-hillshade------------------------------------------------------
# import DSM hillshade
DTMpre_hill <- raster("lidar/pre-flood/preDTMhill3.tif")
DTMpost_hill <- raster("lidar/post-flood/postDTMhill3.tif")
## ----plot-rasters--------------------------------------------------------
# plot Pre-flood w/ hillshade
plot(DTMpre_hill,
col=grey(1:100/100), # create a color ramp of grey colors for hillshade
legend=FALSE, # no legend, we don't care about the grey of the hillshade
main="Four Mile Canyon Creek, Boulder County\nPre-Flood",
axes=FALSE) # makes for a cleaner plot, if the coordinates aren't necessary
plot(DTM_pre,
axes=FALSE,
alpha=0.5, # sets how transparent the object will be (0=transparent, 1=not transparent)
add=T) # add=TRUE (or T), add plot to the previous plotting frame
# plot Post-flood w/ hillshade
# note, no add=T in this code, so new plotting frame.
plot(DTMpost_hill,
col=grey(1:100/100),
legend=FALSE,
main="Four Mile Canyon Creek, Boulder County\nPost-Flood",
axes=FALSE)
plot(DTM_post,
axes=FALSE,
alpha=0.5,
add=T)
## ----create-difference-model---------------------------------------------
# DoD: erosion to be neg, deposition to be positive, therefore post - pre
DoD <- DTM_post-DTM_pre
plot(DoD,
main="Digital Elevation Model of Difference (DoD)",
axes=FALSE)
## ----hist-DoD------------------------------------------------------------
# histogram of values in DoD
hist(DoD)
## ----pretty-diff-model---------------------------------------------------
# Color palette for 5 categories
difCol5 = c("#d7191c","#fdae61","#ffffbf","#abd9e9","#2c7bb6")
# Alternate palette for 7 categories
#difCol7 = c("#d73027","#fc8d59","#fee090","#ffffbf","#e0f3f8","#91bfdb","#4575b4")
# plot hillshade first
plot(DTMpost_hill,
col=grey(1:100/100), # create a color ramp of grey colors
legend=FALSE,
main="Elevation Change Post Flood\nFour Mile Canyon Creek, Boulder County",
axes=FALSE)
# add the DoD to it with specified breaks & colors
plot(DoD,
breaks = c(-5,-1,-0.5,0.5,1,10),
col= difCol5,
axes=FALSE,
alpha=0.4,
add =T)
## ----crop-raster-man-----------------------------------------------------
# plot the rasters you want to crop from
plot(DTMpost_hill,
col=grey(1:100/100), # create a color ramp of grey colors
legend=FALSE,
main="Four Mile Canyon Creek, Boulder County\nPre-Flood",
axes=FALSE)
plot(DoD,
breaks = c(-5,-1,-0.5,0.5,1,10),
col= difCol5,
axes=FALSE,
alpha=0.4,
add =T)
# crop by designating two opposite corners
cropbox1<-drawExtent()
## ----crop-raster-man-view------------------------------------------------
# view the extent of the cropbox1
cropbox1
## ----crop-raster-coords--------------------------------------------------
# desired coordinates of the box
cropbox2<-c(473792.6,474999,4434526,4435453)
## ----plot-crop-raster----------------------------------------------------
# crop desired layers to this cropbox
DTM_pre_crop <- crop(DTM_pre, cropbox2)
DTM_post_crop <- crop(DTM_post, cropbox2)
DTMpre_hill_crop <- crop(DTMpre_hill,cropbox2)
DTMpost_hill_crop <- crop(DTMpost_hill,cropbox2)
DoD_crop <- crop(DoD, cropbox2)
# plot all again using the cropped layers
# PRE
plot(DTMpre_hill_crop,
col=grey(1:100/100), # create a color ramp of grey colors
legend=FALSE,
main="Four Mile Canyon Creek, Boulder County\nPre-Flood",
axes=FALSE)
# note \n in the title forces a line break in the title
plot(DTM_pre_crop,
axes=FALSE,
alpha=0.5,
add=T)
# POST
# plot Post-flood w/ hillshade
plot(DTMpost_hill_crop,
col=grey(1:100/100), # create a color ramp of grey colors
legend=FALSE,
main="Four Mile Canyon Creek, Boulder County\nPost-Flood",
axes=FALSE)
plot(DTM_post_crop,
axes=FALSE,
alpha=0.5,
add=T)
# CHANGE - DoD
plot(DTMpost_hill_crop,
col=grey(1:100/100), # create a color ramp of grey colors
legend=FALSE,
main="Elevation Change Post Flood\nFour Mile Canyon Creek, Boulder County",
axes=FALSE)
plot(DoD_crop,
breaks = c(-5,-1,-0.5,0.5,1,10),
col= difCol5,
axes=FALSE,
alpha=0.4,
add =T)
| /code/disturb-events-co13/NEON-Boulder-Flood-LiDAR-in-R.R | no_license | mperignon/NEON-Lesson-Building-Data-Skills | R | false | false | 4,722 | r | ## ----load-libraries------------------------------------------------------
# load libraries
library(raster) # work with raster files
library(rgdal) # work with raster files
# set working directory to ensure R can find the file we wish to import
# setwd("working-dir-path-here")
## ----open-DTMs-----------------------------------------------------------
# Load DTMs into R
DTM_pre <- raster("lidar/pre-flood/preDTM3.tif")
DTM_post <- raster("lidar/post-flood/postDTM3.tif")
# View raster structure
DTM_pre
DTM_post
## ----open-hillshade------------------------------------------------------
# import DSM hillshade
DTMpre_hill <- raster("lidar/pre-flood/preDTMhill3.tif")
DTMpost_hill <- raster("lidar/post-flood/postDTMhill3.tif")
## ----plot-rasters--------------------------------------------------------
# plot Pre-flood w/ hillshade
plot(DTMpre_hill,
col=grey(1:100/100), # create a color ramp of grey colors for hillshade
legend=FALSE, # no legend, we don't care about the grey of the hillshade
main="Four Mile Canyon Creek, Boulder County\nPre-Flood",
axes=FALSE) # makes for a cleaner plot, if the coordinates aren't necessary
plot(DTM_pre,
axes=FALSE,
alpha=0.5, # sets how transparent the object will be (0=transparent, 1=not transparent)
add=T) # add=TRUE (or T), add plot to the previous plotting frame
# plot Post-flood w/ hillshade
# note, no add=T in this code, so new plotting frame.
plot(DTMpost_hill,
col=grey(1:100/100),
legend=FALSE,
main="Four Mile Canyon Creek, Boulder County\nPost-Flood",
axes=FALSE)
plot(DTM_post,
axes=FALSE,
alpha=0.5,
add=T)
## ----create-difference-model---------------------------------------------
# DoD: erosion to be neg, deposition to be positive, therefore post - pre
DoD <- DTM_post-DTM_pre
plot(DoD,
main="Digital Elevation Model of Difference (DoD)",
axes=FALSE)
## ----hist-DoD------------------------------------------------------------
# histogram of values in DoD
hist(DoD)
## ----pretty-diff-model---------------------------------------------------
# Color palette for 5 categories
difCol5 = c("#d7191c","#fdae61","#ffffbf","#abd9e9","#2c7bb6")
# Alternate palette for 7 categories
#difCol7 = c("#d73027","#fc8d59","#fee090","#ffffbf","#e0f3f8","#91bfdb","#4575b4")
# plot hillshade first
plot(DTMpost_hill,
col=grey(1:100/100), # create a color ramp of grey colors
legend=FALSE,
main="Elevation Change Post Flood\nFour Mile Canyon Creek, Boulder County",
axes=FALSE)
# add the DoD to it with specified breaks & colors
plot(DoD,
breaks = c(-5,-1,-0.5,0.5,1,10),
col= difCol5,
axes=FALSE,
alpha=0.4,
add =T)
## ----crop-raster-man-----------------------------------------------------
# plot the rasters you want to crop from
plot(DTMpost_hill,
col=grey(1:100/100), # create a color ramp of grey colors
legend=FALSE,
main="Four Mile Canyon Creek, Boulder County\nPre-Flood",
axes=FALSE)
plot(DoD,
breaks = c(-5,-1,-0.5,0.5,1,10),
col= difCol5,
axes=FALSE,
alpha=0.4,
add =T)
# crop by designating two opposite corners
cropbox1<-drawExtent()
## ----crop-raster-man-view------------------------------------------------
# view the extent of the cropbox1
cropbox1
## ----crop-raster-coords--------------------------------------------------
# desired coordinates of the box
cropbox2<-c(473792.6,474999,4434526,4435453)
## ----plot-crop-raster----------------------------------------------------
# crop desired layers to this cropbox
DTM_pre_crop <- crop(DTM_pre, cropbox2)
DTM_post_crop <- crop(DTM_post, cropbox2)
DTMpre_hill_crop <- crop(DTMpre_hill,cropbox2)
DTMpost_hill_crop <- crop(DTMpost_hill,cropbox2)
DoD_crop <- crop(DoD, cropbox2)
# plot all again using the cropped layers
# PRE
plot(DTMpre_hill_crop,
col=grey(1:100/100), # create a color ramp of grey colors
legend=FALSE,
main="Four Mile Canyon Creek, Boulder County\nPre-Flood",
axes=FALSE)
# note \n in the title forces a line break in the title
plot(DTM_pre_crop,
axes=FALSE,
alpha=0.5,
add=T)
# POST
# plot Post-flood w/ hillshade
plot(DTMpost_hill_crop,
col=grey(1:100/100), # create a color ramp of grey colors
legend=FALSE,
main="Four Mile Canyon Creek, Boulder County\nPost-Flood",
axes=FALSE)
plot(DTM_post_crop,
axes=FALSE,
alpha=0.5,
add=T)
# CHANGE - DoD
plot(DTMpost_hill_crop,
col=grey(1:100/100), # create a color ramp of grey colors
legend=FALSE,
main="Elevation Change Post Flood\nFour Mile Canyon Creek, Boulder County",
axes=FALSE)
plot(DoD_crop,
breaks = c(-5,-1,-0.5,0.5,1,10),
col= difCol5,
axes=FALSE,
alpha=0.4,
add =T)
|
# ------------------------------------------------------------------------------
# Libraries
library(dplyr)
library(readr)
library(stringr)
library(caret)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Read, convert and subset the data
load("../Data/data_train.rda")
load("../Data/data_test.rda")
X_train <- data_train %>% dplyr::select(dplyr::matches("pix*"))
X_test <- data_test %>% dplyr::select(dplyr::matches("pix*"))
data_train[["class"]] <- as.factor(make.names(data_train[["class"]]))
y_train <- data_train[["class"]]
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Control using cross-validation
fitControl <- trainControl(method="cv",
number=3,
verboseIter=TRUE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Hyperparameters tunning
modelFit <- caret::train(y=y_train,
x=X_train,
method="ORFlog",
trControl=fitControl,
tuneLength = 10)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Accuracy check on train sample
pred.test <- predict(modelFit,newdata=subset(data_train, select = -c(class)))
matrix <- confusionMatrix(data_train[["class"]], pred.test)
cat(c(format(Sys.time(), "%d/%m/%Y %H:%M"),
modelFit[["method"]],
round(matrix[["overall"]][1], 5),"\n"),
file="../Log/log.txt",
sep = "\t",
append = TRUE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Predict test sample
pred <- predict(modelFit,newdata=X_test) %>% str_remove("[X]")
write.table(pred,
paste0("../Pred/pred_",modelFit[["method"]],"_",format(Sys.time(),
"%y%m%d%H%M"),".csv"),
col.names=FALSE,
row.names=FALSE,
quote = FALSE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Save model
saveRDS(modelFit,
paste0("../Model/",modelFit[["method"]],"_",format(Sys.time(),
"%y%m%d%H%M"),".rds"))
# ------------------------------------------------------------------------------
| /Caret/Script/Script_ORFlog.R | no_license | bemayer/DigitRecognizer | R | false | false | 2,721 | r | # ------------------------------------------------------------------------------
# Libraries
library(dplyr)
library(readr)
library(stringr)
library(caret)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Read, convert and subset the data
load("../Data/data_train.rda")
load("../Data/data_test.rda")
X_train <- data_train %>% dplyr::select(dplyr::matches("pix*"))
X_test <- data_test %>% dplyr::select(dplyr::matches("pix*"))
data_train[["class"]] <- as.factor(make.names(data_train[["class"]]))
y_train <- data_train[["class"]]
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Control using cross-validation
fitControl <- trainControl(method="cv",
number=3,
verboseIter=TRUE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Hyperparameters tunning
modelFit <- caret::train(y=y_train,
x=X_train,
method="ORFlog",
trControl=fitControl,
tuneLength = 10)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Accuracy check on train sample
pred.test <- predict(modelFit,newdata=subset(data_train, select = -c(class)))
matrix <- confusionMatrix(data_train[["class"]], pred.test)
cat(c(format(Sys.time(), "%d/%m/%Y %H:%M"),
modelFit[["method"]],
round(matrix[["overall"]][1], 5),"\n"),
file="../Log/log.txt",
sep = "\t",
append = TRUE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Predict test sample
pred <- predict(modelFit,newdata=X_test) %>% str_remove("[X]")
write.table(pred,
paste0("../Pred/pred_",modelFit[["method"]],"_",format(Sys.time(),
"%y%m%d%H%M"),".csv"),
col.names=FALSE,
row.names=FALSE,
quote = FALSE)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Save model
saveRDS(modelFit,
paste0("../Model/",modelFit[["method"]],"_",format(Sys.time(),
"%y%m%d%H%M"),".rds"))
# ------------------------------------------------------------------------------
|
# Copyright 2017 Observational Health Data Sciences and Informatics
#
# This file is part of EvaluatingCaseControl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Create the exposure and outcome cohorts
#'
#' @details
#' This function will create the exposure and outcome cohorts following the definitions included in
#' this package.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param workDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param studyCohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param workFolder Name of local folder to place results; make sure to use forward slashes
#' (/)
#'
#' @export
createCohorts <- function(connectionDetails,
cdmDatabaseSchema,
workDatabaseSchema,
studyCohortTable = "ohdsi_case_control",
oracleTempSchema,
workFolder) {
conn <- DatabaseConnector::connect(connectionDetails)
.createCohorts(connection = conn,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = workDatabaseSchema,
cohortTable = studyCohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = workFolder)
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "EvaluatingCaseControl")
negativeControls <- read.csv(pathToCsv)
writeLines("- Creating exposure cohorts for negative controls")
sql <- SqlRender::loadRenderTranslateSql("ExposureCohorts.sql",
"EvaluatingCaseControl",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
target_database_schema = workDatabaseSchema,
target_cohort_table = studyCohortTable,
exposure_ids = c(negativeControls$targetId, negativeControls$comparatorId))
DatabaseConnector::executeSql(conn, sql)
writeLines("- Creating nesting cohorts for negative controls")
sql <- SqlRender::loadRenderTranslateSql("NestingCohorts.sql",
"EvaluatingCaseControl",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
target_database_schema = workDatabaseSchema,
target_cohort_table = studyCohortTable,
nesting_ids = negativeControls$nestingId)
DatabaseConnector::executeSql(conn, sql)
sql <- "SELECT cohort_definition_id, COUNT(*) AS cohort_count FROM @target_database_schema.@target_cohort_table GROUP BY cohort_definition_id"
sql <- SqlRender::renderSql(sql,
target_database_schema = workDatabaseSchema,
target_cohort_table = studyCohortTable)$sql
sql <- SqlRender::translateSql(sql, targetDialect = connectionDetails$dbms)$sql
counts <- DatabaseConnector::querySql(conn, sql)
write.csv(counts, file.path(workFolder, "CohortCounts.csv"))
RJDBC::dbDisconnect(conn)
}
addCohortNames <- function(data, IdColumnName = "cohortDefinitionId", nameColumnName = "cohortName") {
pathToCsv <- system.file("settings", "CohortsToCreate.csv", package = "AlendronateVsRaloxifene")
cohortsToCreate <- read.csv(pathToCsv)
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "AlendronateVsRaloxifene")
negativeControls <- read.csv(pathToCsv)
idToName <- data.frame(cohortId = c(cohortsToCreate$cohortId, negativeControls$conceptId),
cohortName = c(as.character(cohortsToCreate$name), as.character(negativeControls$name)))
names(idToName)[1] <- IdColumnName
names(idToName)[2] <- nameColumnName
data <- merge(data, idToName, all.x = TRUE)
# Change order of columns:
idCol <- which(colnames(data) == IdColumnName)
if (idCol < ncol(data) - 1) {
data <- data[, c(1:idCol, ncol(data) , (idCol+1):(ncol(data)-1))]
}
return(data)
}
| /EvaluatingCaseControl/R/CreateAllCohorts.R | no_license | rossdwilliams/StudyProtocolSandbox | R | false | false | 6,004 | r | # Copyright 2017 Observational Health Data Sciences and Informatics
#
# This file is part of EvaluatingCaseControl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Create the exposure and outcome cohorts
#'
#' @details
#' This function will create the exposure and outcome cohorts following the definitions included in
#' this package.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param workDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param studyCohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param workFolder Name of local folder to place results; make sure to use forward slashes
#' (/)
#'
#' @export
createCohorts <- function(connectionDetails,
cdmDatabaseSchema,
workDatabaseSchema,
studyCohortTable = "ohdsi_case_control",
oracleTempSchema,
workFolder) {
conn <- DatabaseConnector::connect(connectionDetails)
.createCohorts(connection = conn,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = workDatabaseSchema,
cohortTable = studyCohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = workFolder)
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "EvaluatingCaseControl")
negativeControls <- read.csv(pathToCsv)
writeLines("- Creating exposure cohorts for negative controls")
sql <- SqlRender::loadRenderTranslateSql("ExposureCohorts.sql",
"EvaluatingCaseControl",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
target_database_schema = workDatabaseSchema,
target_cohort_table = studyCohortTable,
exposure_ids = c(negativeControls$targetId, negativeControls$comparatorId))
DatabaseConnector::executeSql(conn, sql)
writeLines("- Creating nesting cohorts for negative controls")
sql <- SqlRender::loadRenderTranslateSql("NestingCohorts.sql",
"EvaluatingCaseControl",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
target_database_schema = workDatabaseSchema,
target_cohort_table = studyCohortTable,
nesting_ids = negativeControls$nestingId)
DatabaseConnector::executeSql(conn, sql)
sql <- "SELECT cohort_definition_id, COUNT(*) AS cohort_count FROM @target_database_schema.@target_cohort_table GROUP BY cohort_definition_id"
sql <- SqlRender::renderSql(sql,
target_database_schema = workDatabaseSchema,
target_cohort_table = studyCohortTable)$sql
sql <- SqlRender::translateSql(sql, targetDialect = connectionDetails$dbms)$sql
counts <- DatabaseConnector::querySql(conn, sql)
write.csv(counts, file.path(workFolder, "CohortCounts.csv"))
RJDBC::dbDisconnect(conn)
}
addCohortNames <- function(data, IdColumnName = "cohortDefinitionId", nameColumnName = "cohortName") {
pathToCsv <- system.file("settings", "CohortsToCreate.csv", package = "AlendronateVsRaloxifene")
cohortsToCreate <- read.csv(pathToCsv)
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "AlendronateVsRaloxifene")
negativeControls <- read.csv(pathToCsv)
idToName <- data.frame(cohortId = c(cohortsToCreate$cohortId, negativeControls$conceptId),
cohortName = c(as.character(cohortsToCreate$name), as.character(negativeControls$name)))
names(idToName)[1] <- IdColumnName
names(idToName)[2] <- nameColumnName
data <- merge(data, idToName, all.x = TRUE)
# Change order of columns:
idCol <- which(colnames(data) == IdColumnName)
if (idCol < ncol(data) - 1) {
data <- data[, c(1:idCol, ncol(data) , (idCol+1):(ncol(data)-1))]
}
return(data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WUAction.R
\name{r2hpcc.UnprotectWsWorkunits}
\alias{r2hpcc.UnprotectWsWorkunits}
\title{Title}
\usage{
r2hpcc.UnprotectWsWorkunits(conn, workunits)
}
\arguments{
\item{conn}{- HPCC connection information}
\item{workunits}{- list of workunits to set to unprotected}
}
\value{
- status of processed operation
}
\description{
Title
}
| /man/r2hpcc.UnprotectWsWorkunits.Rd | no_license | chuajoey/r2hpcc | R | false | true | 430 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WUAction.R
\name{r2hpcc.UnprotectWsWorkunits}
\alias{r2hpcc.UnprotectWsWorkunits}
\title{Title}
\usage{
r2hpcc.UnprotectWsWorkunits(conn, workunits)
}
\arguments{
\item{conn}{- HPCC connection information}
\item{workunits}{- list of workunits to set to unprotected}
}
\value{
- status of processed operation
}
\description{
Title
}
|
# printing numbers with 1 decimal place and commas
nice.num<-function(x) {
trimws(format(round(x,1),
big.mark=",", nsmall = 1, digits=1, scientific=FALSE))}
# printing numbers with 2 decimal place and commas
nice.num2<-function(x) {
trimws(format(round(x,2),
big.mark=",", nsmall = 2, digits=2, scientific=FALSE))}
# for counts- without decimal place
nice.num.count<-function(x) {
trimws(format(x,
big.mark=",", nsmall = 0, digits=0, scientific=FALSE))}
# tidy up tableone output
TidyTableOne<-function(working.table){
# format
for(i in 1:ncol({{working.table}})) {
cur_column <- working.table[, i]
cur_column <- str_extract(cur_column, '[0-9.]+\\b') %>%
as.numeric()
cur_column <-nice.num.count(cur_column)
# add back in
working.table[, i] <- str_replace(string={{working.table}}[, i],
pattern='[0-9.]+\\b',
replacement=cur_column)
}
rownames(working.table)<-str_to_sentence(rownames(working.table))
rownames(working.table)<-str_replace(rownames(working.table),
"Prior_obs_years", "Years of prior observation time")
rownames(working.table)<-str_replace(rownames(working.table),
"Age_gr", "Age group")
rownames(working.table)<-str_replace(rownames(working.table),
"Gender", "Sex")
rownames(working.table)<-str_replace(rownames(working.table),
"Hrfs_gr", "Hospital Frailty Risk Score")
rownames(working.table)<-str_replace(rownames(working.table),
"Copd", "COPD")
rownames(working.table)<-str_replace(rownames(working.table) , "_", " ")
rownames(working.table)<-str_replace(rownames(working.table) , "_", " ")
rownames(working.table)<-str_replace(rownames(working.table) , " = 1 ", " ")
rownames(working.table)<-str_replace(rownames(working.table) , "iqr", "IQR")
#return
working.table}
# get tableone
get.patient.characteristics<-function(Population, vars, factor.vars){
summary.table<-data.frame(print(CreateTableOne(
vars = vars,
factorVars=factor.vars,
includeNA=T,
data = Population,
test = F),
showAllLevels=F,smd=F,
nonnormal = vars,
noSpaces = TRUE,
contDigits = 1,
printToggle=FALSE))
TidyTableOne(summary.table)
}
# general formatting for figures
gg.general.format<-function(plot){
plot+
theme_bw()+
theme(legend.title = element_blank(),
axis.text=element_text(size=12),
axis.title=element_text(size=12,face="bold"),
legend.text=element_text(size=12)) }
gg.general.format.facet<-function(plot){
plot+
theme_bw()+
scale_y_continuous(label=label_comma(accuracy= 1), position = "right", limits=c(0,NA))+
theme(panel.spacing = unit(0.6, "lines"),
legend.title = element_blank(),
axis.text=element_text(size=14),
axis.title=element_text(size=14,face="bold"),
strip.text = element_text(size=14, face="bold"),
strip.text.y.left = element_text(angle = 0),
strip.background = element_rect( fill="#f7f7f7"),
# axis.title.y.right = element_text(angle = 0),
legend.text=element_text(size=14),
legend.position = "top") }
| /Functions/Functions.R | no_license | Xintong-Li-ZnCu/CovCoagBackgroundIncidence | R | false | false | 3,280 | r | # printing numbers with 1 decimal place and commas
nice.num<-function(x) {
trimws(format(round(x,1),
big.mark=",", nsmall = 1, digits=1, scientific=FALSE))}
# printing numbers with 2 decimal place and commas
nice.num2<-function(x) {
trimws(format(round(x,2),
big.mark=",", nsmall = 2, digits=2, scientific=FALSE))}
# for counts- without decimal place
nice.num.count<-function(x) {
trimws(format(x,
big.mark=",", nsmall = 0, digits=0, scientific=FALSE))}
# tidy up tableone output
TidyTableOne<-function(working.table){
# format
for(i in 1:ncol({{working.table}})) {
cur_column <- working.table[, i]
cur_column <- str_extract(cur_column, '[0-9.]+\\b') %>%
as.numeric()
cur_column <-nice.num.count(cur_column)
# add back in
working.table[, i] <- str_replace(string={{working.table}}[, i],
pattern='[0-9.]+\\b',
replacement=cur_column)
}
rownames(working.table)<-str_to_sentence(rownames(working.table))
rownames(working.table)<-str_replace(rownames(working.table),
"Prior_obs_years", "Years of prior observation time")
rownames(working.table)<-str_replace(rownames(working.table),
"Age_gr", "Age group")
rownames(working.table)<-str_replace(rownames(working.table),
"Gender", "Sex")
rownames(working.table)<-str_replace(rownames(working.table),
"Hrfs_gr", "Hospital Frailty Risk Score")
rownames(working.table)<-str_replace(rownames(working.table),
"Copd", "COPD")
rownames(working.table)<-str_replace(rownames(working.table) , "_", " ")
rownames(working.table)<-str_replace(rownames(working.table) , "_", " ")
rownames(working.table)<-str_replace(rownames(working.table) , " = 1 ", " ")
rownames(working.table)<-str_replace(rownames(working.table) , "iqr", "IQR")
#return
working.table}
# get tableone
get.patient.characteristics<-function(Population, vars, factor.vars){
summary.table<-data.frame(print(CreateTableOne(
vars = vars,
factorVars=factor.vars,
includeNA=T,
data = Population,
test = F),
showAllLevels=F,smd=F,
nonnormal = vars,
noSpaces = TRUE,
contDigits = 1,
printToggle=FALSE))
TidyTableOne(summary.table)
}
# general formatting for figures
gg.general.format<-function(plot){
plot+
theme_bw()+
theme(legend.title = element_blank(),
axis.text=element_text(size=12),
axis.title=element_text(size=12,face="bold"),
legend.text=element_text(size=12)) }
gg.general.format.facet<-function(plot){
plot+
theme_bw()+
scale_y_continuous(label=label_comma(accuracy= 1), position = "right", limits=c(0,NA))+
theme(panel.spacing = unit(0.6, "lines"),
legend.title = element_blank(),
axis.text=element_text(size=14),
axis.title=element_text(size=14,face="bold"),
strip.text = element_text(size=14, face="bold"),
strip.text.y.left = element_text(angle = 0),
strip.background = element_rect( fill="#f7f7f7"),
# axis.title.y.right = element_text(angle = 0),
legend.text=element_text(size=14),
legend.position = "top") }
|
# Rscript coexpr_around_TADs.R fixKb 1000000
# Rscript coexpr_around_TADs.R sameKb
# Rscript coexpr_around_TADs.R sameNbr
script_name <- "coexpr_around_TADs.R"
startTime <- Sys.time()
cat("> START coexpr_around_TADs.R \n")
SSHFS <- FALSE
buildData <- TRUE
setDir <- ifelse(SSHFS, "/media/electron", "") # needed to load the setting file...
require(foreach)
require(reshape2)
require(doMC)
registerDoMC(ifelse(SSHFS, 2, 90))
source("utils_fct.R")
# CREATE_SAMPLE_AROUND_TADS_FIXKB CREATE_SAMPLE_AROUND_TADS_SAMEKB CREATE_SAMPLE_AROUND_TADS_SAMENBR
# args <- c("fixKb", "1000000")
args <- commandArgs(trailingOnly = TRUE)
stopifnot(args[1] %in% c("fixKb", "sameKb", "sameNbr"))
if(args[1] == "fixKb") {
stopifnot(length(args) == 2)
stopifnot(!is.na(as.numeric(as.character(args[2]))))
windowSizeBp <- args[2]
} else {
args[2] <- ""
}
options(scipen=100)
outFolder <- file.path("COEXPR_AROUND_TADS", args[1], args[2])
dir.create(outFolder, recursive=TRUE)
inFold <- file.path(paste0("CREATE_SAMPLE_AROUND_TADS_", toupper(args[1]), "_TWOSIDED"), args[2])
stopifnot(dir.exists(inFold))
corrMeth <- "pearson"
script0_name <- "0_prepGeneData"
pipFolder <- file.path("..", "Yuanlong_Cancer_HiC_data_TAD_DA")
stopifnot(dir.exists(pipFolder))
pipOutFolder <- file.path(pipFolder, "PIPELINE", "OUTPUT_FOLDER")
stopifnot(dir.exists(pipOutFolder))
all_ds_sample_data_file <- file.path(inFold, paste0("all_ds_sample_around_TADs_", args[1], ".Rdata") )
stopifnot(file.exists(all_ds_sample_data_file))
all_ds_sample_data <- eval(parse(text = load(all_ds_sample_data_file)))
all_ds <- names(all_ds_sample_data)
nDS <- length(all_ds)
cat("... found nDS = ", nDS, "\n")
ds = all_ds[2]
all_ds_around_TADs_corr <- foreach(ds = all_ds ) %do% {
hicds <- dirname(ds)
exprds <- basename(ds)
stopifnot(dir.exists(file.path(pipFolder, hicds)))
geneList_file <- file.path(pipOutFolder, hicds, exprds, script0_name, "pipeline_geneList.Rdata")
stopifnot(file.exists(geneList_file))
geneList <- eval(parse(text = load(geneList_file)))
regionList_file <- file.path(pipOutFolder, hicds, exprds, script0_name, "pipeline_regionList.Rdata")
stopifnot(file.exists(regionList_file))
regionList <- eval(parse(text = load(regionList_file)))
settingFile <- file.path(pipFolder, "PIPELINE", "INPUT_FILES", hicds, paste0("run_settings_", exprds, ".R"))
stopifnot(file.exists(settingFile))
source(settingFile)
sample1_file <- file.path(setDir, sample1_file)
sample2_file <- file.path(setDir, sample2_file)
stopifnot(file.exists(sample1_file))
stopifnot(file.exists(sample2_file))
cond1_ID <- eval(parse(text = load(sample1_file)))
cond2_ID <- eval(parse(text = load(sample2_file)))
qqnormDTfile <- file.path(pipOutFolder, hicds, exprds,script0_name, "rna_qqnorm_rnaseqDT.Rdata")
stopifnot(file.exists(qqnormDTfile))
qqnormDT <- eval(parse(text = load(qqnormDTfile)))
stopifnot(names(geneList) %in% rownames(qqnormDT))
stopifnot(setequal(colnames(qqnormDT), c(cond1_ID, cond2_ID)))
norm_rnaseqDT <- qqnormDT[names(geneList),] # !!! ENSURE THAT THE QQNORM IN THE SAME ORDER AS THE GENELIST !!!
stopifnot(rownames(norm_rnaseqDT) == names(geneList))
stopifnot(!duplicated(names(geneList)))
ds_sample_data <- all_ds_sample_data[[paste0(ds)]]
all_regs <- names(ds_sample_data)
stopifnot(setequal(all_regs, regionList))
reg = all_regs[7]
ds_all_corr_data <- foreach(reg = all_regs) %dopar% {
tad_data <- ds_sample_data[[paste0(reg)]]
if(tad_data$nGenes > 0) {
########## => TAKING THE TADs AND SAMPLING ON BOTH SIDES OF THE TADs
sample_genes <- tad_data$genes
tad_genes <- tad_data$tad_genes
stopifnot(! sample_genes %in% tad_genes)
stopifnot(sample_genes %in% geneList)
inTAD_genes <- names(geneList)[geneList %in% tad_genes] # the names used in the nrom_rnaseqDT
outTAD_genes <- names(geneList)[geneList %in% sample_genes] # the names used in the nrom_rnaseqDT
nTotGenes <- length(inTAD_genes) + length(outTAD_genes)
stopifnot(! inTAD_genes %in% outTAD_genes)
stopifnot(! outTAD_genes %in% inTAD_genes)
stopifnot(inTAD_genes %in% rownames(norm_rnaseqDT))
stopifnot(outTAD_genes %in% rownames(norm_rnaseqDT))
sub_normDT <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes),]
stopifnot(nrow(sub_normDT) == nTotGenes)
stopifnot(rownames(sub_normDT) == c(inTAD_genes, outTAD_genes))
stopifnot(cond1_ID %in% colnames(sub_normDT))
stopifnot(cond2_ID %in% colnames(sub_normDT))
sub_normDT_cond1 <- sub_normDT[,cond1_ID]
sub_normDT_cond2 <- sub_normDT[,cond2_ID]
stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond1))
stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond2))
stopifnot( ncol(sub_normDT_cond1) + ncol(sub_normDT_cond2) == ncol(sub_normDT))
stopifnot( ncol(sub_normDT_cond1) == length(cond1_ID))
stopifnot(ncol(sub_normDT_cond2) == length(cond2_ID))
meanCorr_all <- get_meanCorr_value(
exprMatrix = sub_normDT,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes,
cormet = corrMeth
)
meanCorr_cond1 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond1,
inside_genes = inTAD_genes,
outside_genes = outTAD_genes,
cormet = corrMeth
)
meanCorr_cond2 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond2,
inside_genes = inTAD_genes,
outside_genes = outTAD_genes,
cormet = corrMeth
)
########## => TAKING THE TADs AND SAMPLING ON THE RIGHT ONLY
if(tad_data$nGenes_right > 0) {
sample_genes_right <- tad_data$genes_right
outTAD_genes_right <- names(geneList)[geneList %in% sample_genes_right] # the names used in the nrom_rnaseqDT
stopifnot(! sample_genes_right %in% tad_genes)
nTotGenes_right <- length(inTAD_genes) + length(outTAD_genes_right)
stopifnot(! inTAD_genes %in% outTAD_genes_right)
stopifnot(! outTAD_genes_right %in% inTAD_genes)
stopifnot(outTAD_genes_right %in% rownames(norm_rnaseqDT))
sub_normDT_right <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes_right),]
stopifnot(nrow(sub_normDT_right) == nTotGenes_right)
stopifnot(rownames(sub_normDT_right) == c(inTAD_genes, outTAD_genes_right))
stopifnot(cond1_ID %in% colnames(sub_normDT_right))
stopifnot(cond2_ID %in% colnames(sub_normDT_right))
sub_normDT_cond1_right <- sub_normDT_right[,cond1_ID]
sub_normDT_cond2_right <- sub_normDT_right[,cond2_ID]
stopifnot(nrow(sub_normDT_right) == nrow(sub_normDT_cond1_right))
stopifnot(nrow(sub_normDT_right) == nrow(sub_normDT_cond2_right))
stopifnot( ncol(sub_normDT_cond1_right) + ncol(sub_normDT_cond2_right) == ncol(sub_normDT_right))
stopifnot( ncol(sub_normDT_cond1_right) == length(cond1_ID))
stopifnot(ncol(sub_normDT_cond2_right) == length(cond2_ID))
meanCorrRight_all <- get_meanCorr_value(
exprMatrix = sub_normDT_right,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_right,
cormet = corrMeth
)
meanCorrRight_cond1 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond1_right,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_right,
cormet = corrMeth
)
meanCorrRight_cond2 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond2_right,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_right,
cormet = corrMeth
)
} else {
meanCorrRight_all <- NA
meanCorrRight_cond1 <- NA
meanCorrRight_cond2 <- NA
}
if(tad_data$nGenes_left > 0) {
sample_genes_left <- tad_data$genes_left
stopifnot(! sample_genes_left %in% tad_genes)
outTAD_genes_left <- names(geneList)[geneList %in% sample_genes_left] # the names used in the nrom_rnaseqDT
stopifnot(! sample_genes_left %in% tad_genes)
nTotGenes_left <- length(inTAD_genes) + length(outTAD_genes_left)
stopifnot(! inTAD_genes %in% outTAD_genes_left)
stopifnot(! outTAD_genes_left %in% inTAD_genes)
stopifnot(outTAD_genes_left %in% rownames(norm_rnaseqDT))
sub_normDT_left <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes_left),]
stopifnot(nrow(sub_normDT_left) == nTotGenes_left)
stopifnot(rownames(sub_normDT_left) == c(inTAD_genes, outTAD_genes_left))
stopifnot(cond1_ID %in% colnames(sub_normDT_left))
stopifnot(cond2_ID %in% colnames(sub_normDT_left))
sub_normDT_cond1_left <- sub_normDT_left[,cond1_ID]
sub_normDT_cond2_left <- sub_normDT_left[,cond2_ID]
stopifnot(nrow(sub_normDT_left) == nrow(sub_normDT_cond1_left))
stopifnot(nrow(sub_normDT_left) == nrow(sub_normDT_cond2_left))
stopifnot( ncol(sub_normDT_cond1_left) + ncol(sub_normDT_cond2_left) == ncol(sub_normDT_left))
stopifnot( ncol(sub_normDT_cond1_left) == length(cond1_ID))
stopifnot(ncol(sub_normDT_cond2_left) == length(cond2_ID))
meanCorrLeft_all <- get_meanCorr_value(
exprMatrix = sub_normDT_left,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_left,
cormet = corrMeth
)
meanCorrLeft_cond1 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond1_left,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_left,
cormet = corrMeth
)
meanCorrLeft_cond2 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond2_left,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_left,
cormet = corrMeth
)
} else {
meanCorrLeft_all <- NA
meanCorrLeft_cond1 <- NA
meanCorrLeft_cond2 <- NA
}
} else {
meanCorr_all <- NA
meanCorr_cond1 <- NA
meanCorr_cond2 <- NA
meanCorrRight_all <- NA
meanCorrRight_cond1 <- NA
meanCorrRight_cond2 <- NA
meanCorrLeft_all <- NA
meanCorrLeft_cond1 <- NA
meanCorrLeft_cond2 <- NA
}
list(
nGenes = tad_data$nGenes,
meanCorr = meanCorr_all,
meanCorr_cond1 = meanCorr_cond1,
meanCorr_cond2 = meanCorr_cond2,
nGenes_right = tad_data$nGenes_right,
meanCorr_right = meanCorrRight_all,
meanCorr_cond1_right = meanCorrRight_cond1,
meanCorr_cond2_right = meanCorrRight_cond2,
nGenes_left = tad_data$nGenes_left,
meanCorr_left = meanCorrLeft_all,
meanCorr_cond1_left = meanCorrLeft_cond1,
meanCorr_cond2_left = meanCorrLeft_cond2
)
} # end iterating over all TADs for the current dataset
names(ds_all_corr_data) <- all_regs
ds_all_corr_data
} # end iterating over all DS
names(all_ds_around_TADs_corr) <- all_ds
outFile <- file.path(outFolder, "all_ds_around_TADs_corr.Rdata")
save(all_ds_around_TADs_corr, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
####################################################################################
####################################################################################3
####################################################################################3
txt <- paste0(startTime, "\n", Sys.time(), "\n")
cat(paste0(txt))
cat(paste0("*** DONE: ", script_name, "\n"))
| /2_Yuanlong_Cancer_HiC_data_TAD_DA/coexpr_around_TADs.R | no_license | marzuf/2Yuanlong_Cancer_HiC_data_TAD_DA | R | false | false | 12,634 | r |
# Rscript coexpr_around_TADs.R fixKb 1000000
# Rscript coexpr_around_TADs.R sameKb
# Rscript coexpr_around_TADs.R sameNbr
script_name <- "coexpr_around_TADs.R"
startTime <- Sys.time()
cat("> START coexpr_around_TADs.R \n")
SSHFS <- FALSE
buildData <- TRUE
setDir <- ifelse(SSHFS, "/media/electron", "") # needed to load the setting file...
require(foreach)
require(reshape2)
require(doMC)
registerDoMC(ifelse(SSHFS, 2, 90))
source("utils_fct.R")
# CREATE_SAMPLE_AROUND_TADS_FIXKB CREATE_SAMPLE_AROUND_TADS_SAMEKB CREATE_SAMPLE_AROUND_TADS_SAMENBR
# args <- c("fixKb", "1000000")
args <- commandArgs(trailingOnly = TRUE)
stopifnot(args[1] %in% c("fixKb", "sameKb", "sameNbr"))
if(args[1] == "fixKb") {
stopifnot(length(args) == 2)
stopifnot(!is.na(as.numeric(as.character(args[2]))))
windowSizeBp <- args[2]
} else {
args[2] <- ""
}
options(scipen=100)
outFolder <- file.path("COEXPR_AROUND_TADS", args[1], args[2])
dir.create(outFolder, recursive=TRUE)
inFold <- file.path(paste0("CREATE_SAMPLE_AROUND_TADS_", toupper(args[1]), "_TWOSIDED"), args[2])
stopifnot(dir.exists(inFold))
corrMeth <- "pearson"
script0_name <- "0_prepGeneData"
pipFolder <- file.path("..", "Yuanlong_Cancer_HiC_data_TAD_DA")
stopifnot(dir.exists(pipFolder))
pipOutFolder <- file.path(pipFolder, "PIPELINE", "OUTPUT_FOLDER")
stopifnot(dir.exists(pipOutFolder))
all_ds_sample_data_file <- file.path(inFold, paste0("all_ds_sample_around_TADs_", args[1], ".Rdata") )
stopifnot(file.exists(all_ds_sample_data_file))
all_ds_sample_data <- eval(parse(text = load(all_ds_sample_data_file)))
all_ds <- names(all_ds_sample_data)
nDS <- length(all_ds)
cat("... found nDS = ", nDS, "\n")
ds = all_ds[2]
all_ds_around_TADs_corr <- foreach(ds = all_ds ) %do% {
hicds <- dirname(ds)
exprds <- basename(ds)
stopifnot(dir.exists(file.path(pipFolder, hicds)))
geneList_file <- file.path(pipOutFolder, hicds, exprds, script0_name, "pipeline_geneList.Rdata")
stopifnot(file.exists(geneList_file))
geneList <- eval(parse(text = load(geneList_file)))
regionList_file <- file.path(pipOutFolder, hicds, exprds, script0_name, "pipeline_regionList.Rdata")
stopifnot(file.exists(regionList_file))
regionList <- eval(parse(text = load(regionList_file)))
settingFile <- file.path(pipFolder, "PIPELINE", "INPUT_FILES", hicds, paste0("run_settings_", exprds, ".R"))
stopifnot(file.exists(settingFile))
source(settingFile)
sample1_file <- file.path(setDir, sample1_file)
sample2_file <- file.path(setDir, sample2_file)
stopifnot(file.exists(sample1_file))
stopifnot(file.exists(sample2_file))
cond1_ID <- eval(parse(text = load(sample1_file)))
cond2_ID <- eval(parse(text = load(sample2_file)))
qqnormDTfile <- file.path(pipOutFolder, hicds, exprds,script0_name, "rna_qqnorm_rnaseqDT.Rdata")
stopifnot(file.exists(qqnormDTfile))
qqnormDT <- eval(parse(text = load(qqnormDTfile)))
stopifnot(names(geneList) %in% rownames(qqnormDT))
stopifnot(setequal(colnames(qqnormDT), c(cond1_ID, cond2_ID)))
norm_rnaseqDT <- qqnormDT[names(geneList),] # !!! ENSURE THAT THE QQNORM IN THE SAME ORDER AS THE GENELIST !!!
stopifnot(rownames(norm_rnaseqDT) == names(geneList))
stopifnot(!duplicated(names(geneList)))
ds_sample_data <- all_ds_sample_data[[paste0(ds)]]
all_regs <- names(ds_sample_data)
stopifnot(setequal(all_regs, regionList))
reg = all_regs[7]
ds_all_corr_data <- foreach(reg = all_regs) %dopar% {
tad_data <- ds_sample_data[[paste0(reg)]]
if(tad_data$nGenes > 0) {
########## => TAKING THE TADs AND SAMPLING ON BOTH SIDES OF THE TADs
sample_genes <- tad_data$genes
tad_genes <- tad_data$tad_genes
stopifnot(! sample_genes %in% tad_genes)
stopifnot(sample_genes %in% geneList)
inTAD_genes <- names(geneList)[geneList %in% tad_genes] # the names used in the nrom_rnaseqDT
outTAD_genes <- names(geneList)[geneList %in% sample_genes] # the names used in the nrom_rnaseqDT
nTotGenes <- length(inTAD_genes) + length(outTAD_genes)
stopifnot(! inTAD_genes %in% outTAD_genes)
stopifnot(! outTAD_genes %in% inTAD_genes)
stopifnot(inTAD_genes %in% rownames(norm_rnaseqDT))
stopifnot(outTAD_genes %in% rownames(norm_rnaseqDT))
sub_normDT <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes),]
stopifnot(nrow(sub_normDT) == nTotGenes)
stopifnot(rownames(sub_normDT) == c(inTAD_genes, outTAD_genes))
stopifnot(cond1_ID %in% colnames(sub_normDT))
stopifnot(cond2_ID %in% colnames(sub_normDT))
sub_normDT_cond1 <- sub_normDT[,cond1_ID]
sub_normDT_cond2 <- sub_normDT[,cond2_ID]
stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond1))
stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond2))
stopifnot( ncol(sub_normDT_cond1) + ncol(sub_normDT_cond2) == ncol(sub_normDT))
stopifnot( ncol(sub_normDT_cond1) == length(cond1_ID))
stopifnot(ncol(sub_normDT_cond2) == length(cond2_ID))
meanCorr_all <- get_meanCorr_value(
exprMatrix = sub_normDT,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes,
cormet = corrMeth
)
meanCorr_cond1 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond1,
inside_genes = inTAD_genes,
outside_genes = outTAD_genes,
cormet = corrMeth
)
meanCorr_cond2 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond2,
inside_genes = inTAD_genes,
outside_genes = outTAD_genes,
cormet = corrMeth
)
########## => TAKING THE TADs AND SAMPLING ON THE RIGHT ONLY
if(tad_data$nGenes_right > 0) {
sample_genes_right <- tad_data$genes_right
outTAD_genes_right <- names(geneList)[geneList %in% sample_genes_right] # the names used in the nrom_rnaseqDT
stopifnot(! sample_genes_right %in% tad_genes)
nTotGenes_right <- length(inTAD_genes) + length(outTAD_genes_right)
stopifnot(! inTAD_genes %in% outTAD_genes_right)
stopifnot(! outTAD_genes_right %in% inTAD_genes)
stopifnot(outTAD_genes_right %in% rownames(norm_rnaseqDT))
sub_normDT_right <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes_right),]
stopifnot(nrow(sub_normDT_right) == nTotGenes_right)
stopifnot(rownames(sub_normDT_right) == c(inTAD_genes, outTAD_genes_right))
stopifnot(cond1_ID %in% colnames(sub_normDT_right))
stopifnot(cond2_ID %in% colnames(sub_normDT_right))
sub_normDT_cond1_right <- sub_normDT_right[,cond1_ID]
sub_normDT_cond2_right <- sub_normDT_right[,cond2_ID]
stopifnot(nrow(sub_normDT_right) == nrow(sub_normDT_cond1_right))
stopifnot(nrow(sub_normDT_right) == nrow(sub_normDT_cond2_right))
stopifnot( ncol(sub_normDT_cond1_right) + ncol(sub_normDT_cond2_right) == ncol(sub_normDT_right))
stopifnot( ncol(sub_normDT_cond1_right) == length(cond1_ID))
stopifnot(ncol(sub_normDT_cond2_right) == length(cond2_ID))
meanCorrRight_all <- get_meanCorr_value(
exprMatrix = sub_normDT_right,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_right,
cormet = corrMeth
)
meanCorrRight_cond1 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond1_right,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_right,
cormet = corrMeth
)
meanCorrRight_cond2 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond2_right,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_right,
cormet = corrMeth
)
} else {
meanCorrRight_all <- NA
meanCorrRight_cond1 <- NA
meanCorrRight_cond2 <- NA
}
if(tad_data$nGenes_left > 0) {
sample_genes_left <- tad_data$genes_left
stopifnot(! sample_genes_left %in% tad_genes)
outTAD_genes_left <- names(geneList)[geneList %in% sample_genes_left] # the names used in the nrom_rnaseqDT
stopifnot(! sample_genes_left %in% tad_genes)
nTotGenes_left <- length(inTAD_genes) + length(outTAD_genes_left)
stopifnot(! inTAD_genes %in% outTAD_genes_left)
stopifnot(! outTAD_genes_left %in% inTAD_genes)
stopifnot(outTAD_genes_left %in% rownames(norm_rnaseqDT))
sub_normDT_left <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes_left),]
stopifnot(nrow(sub_normDT_left) == nTotGenes_left)
stopifnot(rownames(sub_normDT_left) == c(inTAD_genes, outTAD_genes_left))
stopifnot(cond1_ID %in% colnames(sub_normDT_left))
stopifnot(cond2_ID %in% colnames(sub_normDT_left))
sub_normDT_cond1_left <- sub_normDT_left[,cond1_ID]
sub_normDT_cond2_left <- sub_normDT_left[,cond2_ID]
stopifnot(nrow(sub_normDT_left) == nrow(sub_normDT_cond1_left))
stopifnot(nrow(sub_normDT_left) == nrow(sub_normDT_cond2_left))
stopifnot( ncol(sub_normDT_cond1_left) + ncol(sub_normDT_cond2_left) == ncol(sub_normDT_left))
stopifnot( ncol(sub_normDT_cond1_left) == length(cond1_ID))
stopifnot(ncol(sub_normDT_cond2_left) == length(cond2_ID))
meanCorrLeft_all <- get_meanCorr_value(
exprMatrix = sub_normDT_left,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_left,
cormet = corrMeth
)
meanCorrLeft_cond1 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond1_left,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_left,
cormet = corrMeth
)
meanCorrLeft_cond2 <- get_meanCorr_value(
exprMatrix = sub_normDT_cond2_left,
inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
outside_genes = outTAD_genes_left,
cormet = corrMeth
)
} else {
meanCorrLeft_all <- NA
meanCorrLeft_cond1 <- NA
meanCorrLeft_cond2 <- NA
}
} else {
meanCorr_all <- NA
meanCorr_cond1 <- NA
meanCorr_cond2 <- NA
meanCorrRight_all <- NA
meanCorrRight_cond1 <- NA
meanCorrRight_cond2 <- NA
meanCorrLeft_all <- NA
meanCorrLeft_cond1 <- NA
meanCorrLeft_cond2 <- NA
}
list(
nGenes = tad_data$nGenes,
meanCorr = meanCorr_all,
meanCorr_cond1 = meanCorr_cond1,
meanCorr_cond2 = meanCorr_cond2,
nGenes_right = tad_data$nGenes_right,
meanCorr_right = meanCorrRight_all,
meanCorr_cond1_right = meanCorrRight_cond1,
meanCorr_cond2_right = meanCorrRight_cond2,
nGenes_left = tad_data$nGenes_left,
meanCorr_left = meanCorrLeft_all,
meanCorr_cond1_left = meanCorrLeft_cond1,
meanCorr_cond2_left = meanCorrLeft_cond2
)
} # end iterating over all TADs for the current dataset
names(ds_all_corr_data) <- all_regs
ds_all_corr_data
} # end iterating over all DS
names(all_ds_around_TADs_corr) <- all_ds
outFile <- file.path(outFolder, "all_ds_around_TADs_corr.Rdata")
save(all_ds_around_TADs_corr, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
####################################################################################
####################################################################################3
####################################################################################3
txt <- paste0(startTime, "\n", Sys.time(), "\n")
cat(paste0(txt))
cat(paste0("*** DONE: ", script_name, "\n"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simplereg.R
\name{simplereg}
\alias{simplereg}
\title{Simple linear and nonparametric regression}
\usage{
simplereg(x, y, type = "lin", sp = NULL)
}
\arguments{
\item{x}{numerical vector, input x values}
\item{y}{numerical vector, input y values}
\item{type}{character, type of regression; available options are: \code{lin} (linear regression, the default), \code{pol} (local polynomial regression of degree 2), \code{ks} (nonparametric kernel smoothing)}
\item{sp}{numeric, parameter to control the degree of smoothing; span for local polynomial regression and bandwidth for ksmooth}
}
\value{
An object of class \code{simplereg}, i.e. a list with the following objects:
\itemize{
\item \code{Model}, the output model (linear regression, local polynomial regression, or kernel smoothing)
}
\itemize{
\item \code{R2}, (in-sample) coefficient of determination
}
\itemize{
\item \code{x}, input x values
}
\itemize{
\item \code{y}, input y values
}
\itemize{
\item \code{type}, type of regression
}
}
\description{
Simple linear and nonparametric regression
}
\examples{
Pbox.sel <- subset(Pbox, MIN >= 500)
X <- Pbox.sel$AST/Pbox.sel$MIN
Y <- Pbox.sel$TOV/Pbox.sel$MIN
Pl <- Pbox.sel$Player
mod <- simplereg(x=X, y=Y, type="lin")
}
\references{
P. Zuccolotto and M. Manisera (2020) Basketball Data Science: With Applications in R. CRC Press.
}
\seealso{
\code{\link{loess}}, \code{\link{ksmooth}}
}
\author{
Marco Sandri, Paola Zuccolotto, Marica Manisera (\email{basketball.analyzer.help@gmail.com})
}
| /man/simplereg.Rd | no_license | CheThanos/BasketballAnalyzeR | R | false | true | 1,588 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simplereg.R
\name{simplereg}
\alias{simplereg}
\title{Simple linear and nonparametric regression}
\usage{
simplereg(x, y, type = "lin", sp = NULL)
}
\arguments{
\item{x}{numerical vector, input x values}
\item{y}{numerical vector, input y values}
\item{type}{character, type of regression; available options are: \code{lin} (linear regression, the default), \code{pol} (local polynomial regression of degree 2), \code{ks} (nonparametric kernel smoothing)}
\item{sp}{numeric, parameter to control the degree of smoothing; span for local polynomial regression and bandwidth for ksmooth}
}
\value{
An object of class \code{simplereg}, i.e. a list with the following objects:
\itemize{
\item \code{Model}, the output model (linear regression, local polynomial regression, or kernel smoothing)
}
\itemize{
\item \code{R2}, (in-sample) coefficient of determination
}
\itemize{
\item \code{x}, input x values
}
\itemize{
\item \code{y}, input y values
}
\itemize{
\item \code{type}, type of regression
}
}
\description{
Simple linear and nonparametric regression
}
\examples{
Pbox.sel <- subset(Pbox, MIN >= 500)
X <- Pbox.sel$AST/Pbox.sel$MIN
Y <- Pbox.sel$TOV/Pbox.sel$MIN
Pl <- Pbox.sel$Player
mod <- simplereg(x=X, y=Y, type="lin")
}
\references{
P. Zuccolotto and M. Manisera (2020) Basketball Data Science: With Applications in R. CRC Press.
}
\seealso{
\code{\link{loess}}, \code{\link{ksmooth}}
}
\author{
Marco Sandri, Paola Zuccolotto, Marica Manisera (\email{basketball.analyzer.help@gmail.com})
}
|
library("DESeq2")
library(SummarizedExperiment)
library(pvca)
library(lme4)
genecounts <- as.matrix(read.table("~/genecounts.csv", sep=",", header=TRUE, row.names=1))
mycoldata <- read.csv("~/sampleinfo.csv", sep=",", row.names = 1, header=TRUE)
mydds = DESeqDataSetFromMatrix(countData = genecounts, colData = sampleinfo, design = ~ Ancestry + Disease + Location + Disease:Location)
mydds <- mydds[ rowMeans(counts(mydds)) > 5, ]
vsd <- vst(mydds, blind=TRUE)
mydds <- DESeq(mydds)
###PCA/PVCA###
gene.pca <- prcomp(t(assay(vsd)))
gene.pca.proportionvariances <- ((gene.pca$sdev^2) / (sum(gene.pca$sdev^2)))*100
pct_threshold <- 0.75
batch.factors <- c("Ancestry", "Location", "Disease")
geneeset <- ExpressionSet(assay(vsd),as(sampleinfo,"AnnotatedDataFrame"))
pvcaobj <- pvcaBatchAssess(geneeset, batch.factors, pct_threshold)
pvca.res <- data.frame(label=as.character(pvcaobj$label),wmpv=round(as.numeric(pvcaobj$dat),3))
immgenes <- read.table("~/immunegenes.txt", sep="\t", header=FALSE, stringsAsFactors = FALSE, row.names = 1)
immvsd <- vsd[ rownames(vsd) %in% rownames(immgenes), ]
immgenespc <- plotPCA(immvsd, intgroup=c("Location"), returnData=TRUE)
epigenes <- read.table("~/epithelialgenes.txt", sep="\t", header=FALSE, stringsAsFactors = FALSE, row.names = 1)
epivsd <- vsd[ rownames(vsd) %in% rownames(epigenes), ]
epigenespc <- plotPCA(epivsd, intgroup=c("Location"), returnData=TRUE)
immpca.sig <- t.test(immgenespc$PC1~immgenespc$Location)
epipca.sig <- t.test(epigenespc$PC1~epigenespc$Location) | /AnalyzeGeneExpression.R | no_license | kiera-gt/RectumIleumIBD-RNAseq | R | false | false | 1,516 | r | library("DESeq2")
library(SummarizedExperiment)
library(pvca)
library(lme4)
genecounts <- as.matrix(read.table("~/genecounts.csv", sep=",", header=TRUE, row.names=1))
mycoldata <- read.csv("~/sampleinfo.csv", sep=",", row.names = 1, header=TRUE)
mydds = DESeqDataSetFromMatrix(countData = genecounts, colData = sampleinfo, design = ~ Ancestry + Disease + Location + Disease:Location)
mydds <- mydds[ rowMeans(counts(mydds)) > 5, ]
vsd <- vst(mydds, blind=TRUE)
mydds <- DESeq(mydds)
###PCA/PVCA###
gene.pca <- prcomp(t(assay(vsd)))
gene.pca.proportionvariances <- ((gene.pca$sdev^2) / (sum(gene.pca$sdev^2)))*100
pct_threshold <- 0.75
batch.factors <- c("Ancestry", "Location", "Disease")
geneeset <- ExpressionSet(assay(vsd),as(sampleinfo,"AnnotatedDataFrame"))
pvcaobj <- pvcaBatchAssess(geneeset, batch.factors, pct_threshold)
pvca.res <- data.frame(label=as.character(pvcaobj$label),wmpv=round(as.numeric(pvcaobj$dat),3))
immgenes <- read.table("~/immunegenes.txt", sep="\t", header=FALSE, stringsAsFactors = FALSE, row.names = 1)
immvsd <- vsd[ rownames(vsd) %in% rownames(immgenes), ]
immgenespc <- plotPCA(immvsd, intgroup=c("Location"), returnData=TRUE)
epigenes <- read.table("~/epithelialgenes.txt", sep="\t", header=FALSE, stringsAsFactors = FALSE, row.names = 1)
epivsd <- vsd[ rownames(vsd) %in% rownames(epigenes), ]
epigenespc <- plotPCA(epivsd, intgroup=c("Location"), returnData=TRUE)
immpca.sig <- t.test(immgenespc$PC1~immgenespc$Location)
epipca.sig <- t.test(epigenespc$PC1~epigenespc$Location) |
\name{KNN_SUM}
\alias{KNN_SUM}
\title{Sum of distance to k-nearest neighbors}
\description{Function to calculate sum of distance to k-nearest neighbors as an outlier score, based on a kd-tree}
\usage{
KNN_SUM(dataset, k=5)
}
\arguments{
\item{dataset}{The dataset for which observations have a summed k-nearest neighbors distance returned}
\item{k}{The number of k-nearest neighbors. k has to be smaller than the number of observations in dataset}
}
\details{KNN_SUM computes the sum of distance to neighboring observations. A kd-tree is used for kNN computation, using the kNN() function from the 'dbscan' package.
The KNN_SUM function is useful for outlier detection in clustering and other multidimensional domains.}
\value{A vector of summed distance for observations. The greater distance, the greater outlierness}
\author{Jacob H. Madsen}
\examples{
# Create dataset and set an optional k
X <- iris[,1:4]
K <- 5
# Find outliers
outlier_score <- KNN_SUM(dataset=X, k=K)
# Sort and find index for most outlying observations
names(outlier_score) <- 1:nrow(X)
sort(outlier_score, decreasing = TRUE)
# Inspect the distribution of outlier scores
hist(outlier_score)
}
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }
| /man/KNN_SUM.Rd | no_license | yimengweimaxu/DDoutlier | R | false | false | 1,243 | rd | \name{KNN_SUM}
\alias{KNN_SUM}
\title{Sum of distance to k-nearest neighbors}
\description{Function to calculate sum of distance to k-nearest neighbors as an outlier score, based on a kd-tree}
\usage{
KNN_SUM(dataset, k=5)
}
\arguments{
\item{dataset}{The dataset for which observations have a summed k-nearest neighbors distance returned}
\item{k}{The number of k-nearest neighbors. k has to be smaller than the number of observations in dataset}
}
\details{KNN_SUM computes the sum of distance to neighboring observations. A kd-tree is used for kNN computation, using the kNN() function from the 'dbscan' package.
The KNN_SUM function is useful for outlier detection in clustering and other multidimensional domains.}
\value{A vector of summed distance for observations. The greater distance, the greater outlierness}
\author{Jacob H. Madsen}
\examples{
# Create dataset and set an optional k
X <- iris[,1:4]
K <- 5
# Find outliers
outlier_score <- KNN_SUM(dataset=X, k=K)
# Sort and find index for most outlying observations
names(outlier_score) <- 1:nrow(X)
sort(outlier_score, decreasing = TRUE)
# Inspect the distribution of outlier scores
hist(outlier_score)
}
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }
|
context('utils')
stat_file <- system.file('testdata', 'hmmstat.tar.gz', package = 'phylen')
untar(stat_file, files = 'hmmstat.txt', exdir = tempdir())
stat <- list.files(path = tempdir(),
pattern = '^hmmstat.txt$',
full.names = TRUE)
xpec <- c("thaNOG.ENOG411CCBB.meta_raw", "thaNOG.ENOG411CCBA.meta_raw",
"thaNOG.ENOG411CCB9.meta_raw", "thaNOG.ENOG411CCB8.meta_raw")
test_that('getIdsFromStats works',{
x <- phylen:::getIdsFromStats(stats = stat)
expect_is(x, 'character')
expect_length(x, 4L)
expect_identical(x, xpec)
})
file.remove(stat)
| /tests/testthat/test_utils.R | permissive | iferres/phylen | R | false | false | 602 | r | context('utils')
stat_file <- system.file('testdata', 'hmmstat.tar.gz', package = 'phylen')
untar(stat_file, files = 'hmmstat.txt', exdir = tempdir())
stat <- list.files(path = tempdir(),
pattern = '^hmmstat.txt$',
full.names = TRUE)
xpec <- c("thaNOG.ENOG411CCBB.meta_raw", "thaNOG.ENOG411CCBA.meta_raw",
"thaNOG.ENOG411CCB9.meta_raw", "thaNOG.ENOG411CCB8.meta_raw")
test_that('getIdsFromStats works',{
x <- phylen:::getIdsFromStats(stats = stat)
expect_is(x, 'character')
expect_length(x, 4L)
expect_identical(x, xpec)
})
file.remove(stat)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seasonal_dot.R
\name{seasonal_dot}
\alias{seasonal_dot}
\alias{seasonal_dot.swmpr}
\title{Seasonal Dot Plot}
\usage{
seasonal_dot(swmpr_in, ...)
\method{seasonal_dot}{swmpr}(
swmpr_in,
param = NULL,
lm_trend = FALSE,
lm_lab = FALSE,
free_y = FALSE,
log_trans = FALSE,
converted = FALSE,
plot_title = FALSE,
plot = TRUE,
...
)
}
\arguments{
\item{swmpr_in}{input swmpr object}
\item{...}{additional arguments passed to other methods. See \code{\link{assign_season}}}
\item{param}{chr string of variable to plot}
\item{lm_trend}{logical, add linear trend line?}
\item{lm_lab}{logical, add significance label? Statistically significant results will appear in bold.}
\item{free_y}{logical, should the y-axis be free? Defaults to \code{FALSE}. If \code{FALSE}, defaults to zero, unless negative values are present. If \code{TRUE}, y-axis limits are selected by \code{ggplot}}
\item{log_trans}{logical, should y-axis be log? Defaults to \code{FALSE}}
\item{converted}{logical, were the units converted from the original units used by CDMO? Defaults to \code{FALSE}. See \code{y_labeler} for details.}
\item{plot_title}{logical, should the station name be included as the plot title? Defaults to \code{FALSE}}
\item{plot}{logical, should a plot be returned? Defaults to \code{TRUE}}
}
\value{
Returns a \code{\link[ggplot2]{ggplot}} object
}
\description{
Plot average/min/max seasonal values faceted by season
}
\details{
This function summarizes minimum, mean, and maximum values calculated on a seasonal basis to allow for easier intra-season comparisons over time.
\code{lm_trend = TRUE} adds a linear regression to the plot, and \code{lm_lab = TRUE} will add p-values from the linear regression to the plot. If the p-values are significant (p < 0.05) then the text will appear in bold. \code{lm_lab} text is color coded to match with the corresponding dots.
}
\examples{
dat_wq <- elksmwq
#dat_wq <- subset(dat_wq, subset = c('2010-01-01 0:00', '2017-01-01 0:00'))
dat_wq <- qaqc(dat_wq, qaqc_keep = c(0, 3, 5))
x <-
seasonal_dot(dat_wq, param = 'do_mgl'
, lm_trend = TRUE
, lm_lab = TRUE
, plot_title = TRUE)
\donttest{
y <-
seasonal_dot(dat_wq, param = 'do_mgl'
, lm_trend = FALSE
, lm_lab = FALSE
, plot_title = TRUE)
z <-
seasonal_dot(dat_wq, param = 'do_mgl'
, lm_trend = TRUE
, lm_lab = FALSE
, plot_title = TRUE)
dat_nut <- elknmnut
dat_nut <- subset(dat_nut, subset = c('2007-01-01 0:00', '2017-01-01 0:00'))
dat_nut <- qaqc(dat_nut, qaqc_keep = c(0, 3, 5))
x1 <-
seasonal_dot(dat_nut
, param = 'chla_n'
, season_grps = list(c(1,2,3), c(4,5,6), c(7,8,9), c(10, 11, 12))
, season_names = c('Winter', 'Spring', 'Summer', 'Fall')
, season_start = 'Spring'
, lm_trend = FALSE
, lm_lab = FALSE
, plot_title = TRUE)
y1 <-
seasonal_dot(dat_nut, param = 'chla_n'
, lm_trend = TRUE
, lm_lab = FALSE
, plot_title = TRUE)
z1 <-
seasonal_dot(dat_nut, param = 'chla_n'
, lm_trend = TRUE
, lm_lab = TRUE
, plot_title = TRUE)
}
}
\seealso{
\code{\link[ggplot2]{ggplot}}, \code{\link{assign_season}}, \code{\link{y_labeler}}
}
\author{
Julie Padilla, Dave Eslinger
}
\concept{analyze}
| /man/seasonal_dot.Rd | no_license | cran/SWMPrExtension | R | false | true | 3,647 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seasonal_dot.R
\name{seasonal_dot}
\alias{seasonal_dot}
\alias{seasonal_dot.swmpr}
\title{Seasonal Dot Plot}
\usage{
seasonal_dot(swmpr_in, ...)
\method{seasonal_dot}{swmpr}(
swmpr_in,
param = NULL,
lm_trend = FALSE,
lm_lab = FALSE,
free_y = FALSE,
log_trans = FALSE,
converted = FALSE,
plot_title = FALSE,
plot = TRUE,
...
)
}
\arguments{
\item{swmpr_in}{input swmpr object}
\item{...}{additional arguments passed to other methods. See \code{\link{assign_season}}}
\item{param}{chr string of variable to plot}
\item{lm_trend}{logical, add linear trend line?}
\item{lm_lab}{logical, add significance label? Statistically significant results will appear in bold.}
\item{free_y}{logical, should the y-axis be free? Defaults to \code{FALSE}. If \code{FALSE}, defaults to zero, unless negative values are present. If \code{TRUE}, y-axis limits are selected by \code{ggplot}}
\item{log_trans}{logical, should y-axis be log? Defaults to \code{FALSE}}
\item{converted}{logical, were the units converted from the original units used by CDMO? Defaults to \code{FALSE}. See \code{y_labeler} for details.}
\item{plot_title}{logical, should the station name be included as the plot title? Defaults to \code{FALSE}}
\item{plot}{logical, should a plot be returned? Defaults to \code{TRUE}}
}
\value{
Returns a \code{\link[ggplot2]{ggplot}} object
}
\description{
Plot average/min/max seasonal values faceted by season
}
\details{
This function summarizes minimum, mean, and maximum values calculated on a seasonal basis to allow for easier intra-season comparisons over time.
\code{lm_trend = TRUE} adds a linear regression to the plot, and \code{lm_lab = TRUE} will add p-values from the linear regression to the plot. If the p-values are significant (p < 0.05) then the text will appear in bold. \code{lm_lab} text is color coded to match with the corresponding dots.
}
\examples{
dat_wq <- elksmwq
#dat_wq <- subset(dat_wq, subset = c('2010-01-01 0:00', '2017-01-01 0:00'))
dat_wq <- qaqc(dat_wq, qaqc_keep = c(0, 3, 5))
x <-
seasonal_dot(dat_wq, param = 'do_mgl'
, lm_trend = TRUE
, lm_lab = TRUE
, plot_title = TRUE)
\donttest{
y <-
seasonal_dot(dat_wq, param = 'do_mgl'
, lm_trend = FALSE
, lm_lab = FALSE
, plot_title = TRUE)
z <-
seasonal_dot(dat_wq, param = 'do_mgl'
, lm_trend = TRUE
, lm_lab = FALSE
, plot_title = TRUE)
dat_nut <- elknmnut
dat_nut <- subset(dat_nut, subset = c('2007-01-01 0:00', '2017-01-01 0:00'))
dat_nut <- qaqc(dat_nut, qaqc_keep = c(0, 3, 5))
x1 <-
seasonal_dot(dat_nut
, param = 'chla_n'
, season_grps = list(c(1,2,3), c(4,5,6), c(7,8,9), c(10, 11, 12))
, season_names = c('Winter', 'Spring', 'Summer', 'Fall')
, season_start = 'Spring'
, lm_trend = FALSE
, lm_lab = FALSE
, plot_title = TRUE)
y1 <-
seasonal_dot(dat_nut, param = 'chla_n'
, lm_trend = TRUE
, lm_lab = FALSE
, plot_title = TRUE)
z1 <-
seasonal_dot(dat_nut, param = 'chla_n'
, lm_trend = TRUE
, lm_lab = TRUE
, plot_title = TRUE)
}
}
\seealso{
\code{\link[ggplot2]{ggplot}}, \code{\link{assign_season}}, \code{\link{y_labeler}}
}
\author{
Julie Padilla, Dave Eslinger
}
\concept{analyze}
|
# function to compute multivariate normal density, utilize the fact that we have both Sigma and SigmaInverse matrix
# input: xx: a vector, i.e. a point in R^d
# mu: mean \mu of the multivariate normal distribution
# SigmaList: a list object, SigmaList[[1]] is the covariance matrix \Sigma, and SigmaList[[2]] is the inverse of covariance matrix
# output: a real number that is the value of the density function at xx
mydmvnorm = function(xx, mu, SigmaList){
Sigma = SigmaList[[1]]
SigmaInverse = SigmaList[[2]]
D = dim(Sigma)[1]
xxTilde = xx - mu
prob = exp((-0.5) * t(xxTilde) %*% SigmaInverse %*% xxTilde )
prob = prob/((2 * pi)^(D/2) * sqrt(abs(det(Sigma))))
return(prob)
} | /mydmvnorm.R | no_license | xinwangmath/EM_categorization | R | false | false | 729 | r | # function to compute multivariate normal density, utilize the fact that we have both Sigma and SigmaInverse matrix
# input: xx: a vector, i.e. a point in R^d
# mu: mean \mu of the multivariate normal distribution
# SigmaList: a list object, SigmaList[[1]] is the covariance matrix \Sigma, and SigmaList[[2]] is the inverse of covariance matrix
# output: a real number that is the value of the density function at xx
mydmvnorm = function(xx, mu, SigmaList){
Sigma = SigmaList[[1]]
SigmaInverse = SigmaList[[2]]
D = dim(Sigma)[1]
xxTilde = xx - mu
prob = exp((-0.5) * t(xxTilde) %*% SigmaInverse %*% xxTilde )
prob = prob/((2 * pi)^(D/2) * sqrt(abs(det(Sigma))))
return(prob)
} |
#' Stratified Standard Error calculator for SSD
#'
#' @param Size a numerical vector giving the size of each individual as a scalar quantity
#' @param Sex a character or factor vector recording sex for each individual as 'm' or 'f', individuals must be in the same order as the Size vector
#' @param Strata a character or factor vector giving the factor by which we wish to divide individuals into groups before calculating SSD, individuals must be in the same order as the Size vector
#' @param rep number of times to perform the resampling procedure
#'
#' @export
StratSSDBoot <- function(Size, Sex, Strata, rep = 1000, log = F, ...) {
## Does stratified bootstraps for SSD
Strata <- as.factor(Strata)
Levels <- levels(Strata)
n <- nlevels(Strata)
SE <- rep(NA_real_, n)
M <- rep(NA_real_, n)
for (i in 1:n) {
level <- Levels[i]
SSDBooted <- bootSSD(Size = Size[Strata == level], Sex = Sex[Strata == level], rep, log = log, ...)
SE[i] <- sd(SSDBooted$t)
M[i] <- mean(SSDBooted$t)
}
Results <- data.frame(Factors = Levels, SSD.Mean = M, SSD.SE = SE)
Results
}
| /R/StratSSDBoot.R | no_license | TWilliamBell/angler | R | false | false | 1,098 | r | #' Stratified Standard Error calculator for SSD
#'
#' @param Size a numerical vector giving the size of each individual as a scalar quantity
#' @param Sex a character or factor vector recording sex for each individual as 'm' or 'f', individuals must be in the same order as the Size vector
#' @param Strata a character or factor vector giving the factor by which we wish to divide individuals into groups before calculating SSD, individuals must be in the same order as the Size vector
#' @param rep number of times to perform the resampling procedure
#'
#' @export
StratSSDBoot <- function(Size, Sex, Strata, rep = 1000, log = F, ...) {
## Does stratified bootstraps for SSD
Strata <- as.factor(Strata)
Levels <- levels(Strata)
n <- nlevels(Strata)
SE <- rep(NA_real_, n)
M <- rep(NA_real_, n)
for (i in 1:n) {
level <- Levels[i]
SSDBooted <- bootSSD(Size = Size[Strata == level], Sex = Sex[Strata == level], rep, log = log, ...)
SE[i] <- sd(SSDBooted$t)
M[i] <- mean(SSDBooted$t)
}
Results <- data.frame(Factors = Levels, SSD.Mean = M, SSD.SE = SE)
Results
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_generic.R, R/bids.R
\name{preproc_scans}
\alias{preproc_scans}
\alias{preproc_scans.bids_project}
\title{Get pre-processed fmri scans}
\usage{
preproc_scans(x, ...)
\method{preproc_scans}{bids_project}(
x,
subid = ".*",
task = ".*",
run = ".*",
variant = "a^",
space = ".*",
session = ".*",
modality = "bold",
full_path = FALSE,
...
)
}
\arguments{
\item{x}{the \code{bids_project} object}
\item{...}{extra args}
\item{subid}{regular expression matching 'task'}
\item{task}{regular expression matching 'task'}
\item{run}{regular expression matching 'run'}
\item{session}{regular expression matching 'session'}
\item{modality}{regular expression matching 'modality'}
\item{full_path}{return full file path?}
}
\description{
extract fmriprep-created \code{preproc} scans from bids project
}
\section{Methods (by class)}{
\itemize{
\item \code{bids_project}:
}}
\examples{
proj <- bids_project(system.file("inst/extdata/phoneme_stripped", package="bidser"), fmriprep=TRUE)
preproc_scans(proj)
}
| /man/preproc_scans.Rd | no_license | bbuchsbaum/bidser | R | false | true | 1,104 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_generic.R, R/bids.R
\name{preproc_scans}
\alias{preproc_scans}
\alias{preproc_scans.bids_project}
\title{Get pre-processed fmri scans}
\usage{
preproc_scans(x, ...)
\method{preproc_scans}{bids_project}(
x,
subid = ".*",
task = ".*",
run = ".*",
variant = "a^",
space = ".*",
session = ".*",
modality = "bold",
full_path = FALSE,
...
)
}
\arguments{
\item{x}{the \code{bids_project} object}
\item{...}{extra args}
\item{subid}{regular expression matching 'task'}
\item{task}{regular expression matching 'task'}
\item{run}{regular expression matching 'run'}
\item{session}{regular expression matching 'session'}
\item{modality}{regular expression matching 'modality'}
\item{full_path}{return full file path?}
}
\description{
extract fmriprep-created \code{preproc} scans from bids project
}
\section{Methods (by class)}{
\itemize{
\item \code{bids_project}:
}}
\examples{
proj <- bids_project(system.file("inst/extdata/phoneme_stripped", package="bidser"), fmriprep=TRUE)
preproc_scans(proj)
}
|
context("Test autodetection")
test_that("Test meta columns are populated", {
path <- system.file("extdata", "test_autodetect.csv", package="xltabr")
df <- utils::read.csv(path, stringsAsFactors = FALSE)
df$f <- as.Date(df$f)
tab <- xltabr::initialise() %>%
xltabr::add_body(df)
tab <- xltabr:::auto_detect_left_headers(tab)
t1 <- all(tab$body$left_header_colnames == c("a", "b", "c"))
expect_true(t1)
tab <- xltabr:::auto_detect_body_title_level(tab)
t1 = all(tab$body$body_df$meta_row_ ==c("body|title_3", "body|title_4", "body|title_5", "body"))
t2 = all(tab$body$body_df$meta_left_header_row_ == c("body|left_header|title_3", "body|left_header|title_4", "body|left_header|title_5",
"body|left_header"))
expect_true(t1)
expect_true(t2)
tab <- auto_style_number_formatting(tab)
t1 <- all(tab$body$meta_col_ == c("text1", "text1", "text1", "integer1",
"number1", "date1"))
expect_true(t1)
path <- system.file("extdata", "test_autodetect.csv", package="xltabr")
df <- read.csv(path, stringsAsFactors = FALSE)
tab <- xltabr::initialise() %>%
xltabr::add_body(df) %>%
xltabr:::auto_detect_left_headers() %>%
xltabr:::auto_style_body_rows() %>%
xltabr:::auto_style_indent() %>%
xltabr::auto_style_number_formatting()
xltabr:::body_get_cell_styles_table(tab)
})
test_that("Test that indent/coalesce works correctly", {
path <- system.file("extdata", "test_autodetect.csv", package="xltabr")
df <- read.csv(path, stringsAsFactors = FALSE)
df$f <- as.Date(df$f)
tab <- xltabr::initialise() %>%
xltabr::add_body(df)
tab <- xltabr:::auto_detect_left_headers(tab)
t1 = all(tab$body$left_header_colnames == c("a", "b", "c"))
testthat::expect_true(t1)
tab <- xltabr:::auto_detect_body_title_level(tab)
tab <- xltabr:::auto_style_indent(tab)
#Check it's autodetected left_header_coluns correctly
t1 = tab$body$left_header_colnames == tab$misc$coalesce_left_header_colname
testthat::expect_true(t1)
t1 = all(tab$body$body_df$meta_left_header_row_ == c("body|left_header|title_3", "body|left_header|title_4|indent_1",
"body|left_header|title_5|indent_2", "body|left_header|indent_3"))
testthat::expect_true(t1)
t1 = all(tab$body$body_df_to_write$new_left_headers == c("Grand Total", "a", "b", "c"))
testthat::expect_true(t1)
})
test_that("Test second autodetect dataset", {
path <- system.file("extdata", "test_autodetect_2.csv", package="xltabr")
df <- read.csv(path, stringsAsFactors = FALSE)
df$g <- as.Date(df$g)
cols <- colnames(df)
tab <- xltabr::initialise() %>%
xltabr::add_top_headers(cols) %>%
xltabr::add_body(df)
tab <- xltabr:::auto_detect_left_headers(tab)
t1 <- all(tab$body$left_header_colnames == c("a", "b", "c", "d"))
testthat::expect_true(t1)
tab <- xltabr:::auto_detect_body_title_level(tab)
tab <- xltabr:::auto_style_indent(tab)
t1 = all(tab$body$body_df$meta_left_header_row_ == c("body|left_header|title_2", "body|left_header|title_3|indent_1",
"body|left_header|title_4|indent_2", "body|left_header|title_5|indent_3",
"body|left_header|indent_4"))
testthat::expect_true(t1)
t1 = (tab$body$left_header_colnames == " ")
testthat::expect_true(t1)
t1 = all(tab$top_headers$top_headers_list[[1]] == c(" ", "e", "f", "g"))
testthat::expect_true(t1)
})
| /data/genthat_extracted_code/xltabr/tests/test_autodetect.R | no_license | surayaaramli/typeRrh | R | false | false | 3,555 | r | context("Test autodetection")
test_that("Test meta columns are populated", {
path <- system.file("extdata", "test_autodetect.csv", package="xltabr")
df <- utils::read.csv(path, stringsAsFactors = FALSE)
df$f <- as.Date(df$f)
tab <- xltabr::initialise() %>%
xltabr::add_body(df)
tab <- xltabr:::auto_detect_left_headers(tab)
t1 <- all(tab$body$left_header_colnames == c("a", "b", "c"))
expect_true(t1)
tab <- xltabr:::auto_detect_body_title_level(tab)
t1 = all(tab$body$body_df$meta_row_ ==c("body|title_3", "body|title_4", "body|title_5", "body"))
t2 = all(tab$body$body_df$meta_left_header_row_ == c("body|left_header|title_3", "body|left_header|title_4", "body|left_header|title_5",
"body|left_header"))
expect_true(t1)
expect_true(t2)
tab <- auto_style_number_formatting(tab)
t1 <- all(tab$body$meta_col_ == c("text1", "text1", "text1", "integer1",
"number1", "date1"))
expect_true(t1)
path <- system.file("extdata", "test_autodetect.csv", package="xltabr")
df <- read.csv(path, stringsAsFactors = FALSE)
tab <- xltabr::initialise() %>%
xltabr::add_body(df) %>%
xltabr:::auto_detect_left_headers() %>%
xltabr:::auto_style_body_rows() %>%
xltabr:::auto_style_indent() %>%
xltabr::auto_style_number_formatting()
xltabr:::body_get_cell_styles_table(tab)
})
test_that("Test that indent/coalesce works correctly", {
path <- system.file("extdata", "test_autodetect.csv", package="xltabr")
df <- read.csv(path, stringsAsFactors = FALSE)
df$f <- as.Date(df$f)
tab <- xltabr::initialise() %>%
xltabr::add_body(df)
tab <- xltabr:::auto_detect_left_headers(tab)
t1 = all(tab$body$left_header_colnames == c("a", "b", "c"))
testthat::expect_true(t1)
tab <- xltabr:::auto_detect_body_title_level(tab)
tab <- xltabr:::auto_style_indent(tab)
#Check it's autodetected left_header_coluns correctly
t1 = tab$body$left_header_colnames == tab$misc$coalesce_left_header_colname
testthat::expect_true(t1)
t1 = all(tab$body$body_df$meta_left_header_row_ == c("body|left_header|title_3", "body|left_header|title_4|indent_1",
"body|left_header|title_5|indent_2", "body|left_header|indent_3"))
testthat::expect_true(t1)
t1 = all(tab$body$body_df_to_write$new_left_headers == c("Grand Total", "a", "b", "c"))
testthat::expect_true(t1)
})
test_that("Test second autodetect dataset", {
path <- system.file("extdata", "test_autodetect_2.csv", package="xltabr")
df <- read.csv(path, stringsAsFactors = FALSE)
df$g <- as.Date(df$g)
cols <- colnames(df)
tab <- xltabr::initialise() %>%
xltabr::add_top_headers(cols) %>%
xltabr::add_body(df)
tab <- xltabr:::auto_detect_left_headers(tab)
t1 <- all(tab$body$left_header_colnames == c("a", "b", "c", "d"))
testthat::expect_true(t1)
tab <- xltabr:::auto_detect_body_title_level(tab)
tab <- xltabr:::auto_style_indent(tab)
t1 = all(tab$body$body_df$meta_left_header_row_ == c("body|left_header|title_2", "body|left_header|title_3|indent_1",
"body|left_header|title_4|indent_2", "body|left_header|title_5|indent_3",
"body|left_header|indent_4"))
testthat::expect_true(t1)
t1 = (tab$body$left_header_colnames == " ")
testthat::expect_true(t1)
t1 = all(tab$top_headers$top_headers_list[[1]] == c(" ", "e", "f", "g"))
testthat::expect_true(t1)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lm_nested_concord.R
\name{CEL_lm_nested}
\alias{CEL_lm_nested}
\title{Run the nested algorithm for linear regression composite tests.}
\usage{
CEL_lm_nested(beta_test, X, y, F_reparam, s_hat_reparam, gamma_init = NULL,
outer_eps = 1e-08, outer_maxit = 1000, inner_opt_type = c("owen",
"LBFGS"), inner_owen_arg = optim_owen_inner_control(),
inner_lbfgs_arg = list(invisible = 1),
beta_newton_arg = optim_newton_control(), outer_tol_type = c("fval",
"gval"), verbose = F)
}
\arguments{
\item{beta_test}{a vector of candidate test values.}
\item{X}{a design matrix with observations in rows and variables in columns.}
\item{y}{a vector of the responses.}
\item{F_reparam}{the \code{F} matrix for the affine reparameterization.}
\item{s_hat_reparam}{the \code{s} vector for the affine reparameterization.}
\item{gamma_init}{a vector of initial values for the dual, or a \code{"random"} to specify sampling from \code{\link[stats]{rnorm}}. Default is zero vector.}
\item{outer_eps}{absolute tolerance required for outer loop convergence.}
\item{outer_maxit}{maximum number of outer loop iterations.}
\item{inner_opt_type}{optimization type for the inner loop.}
\item{inner_owen_arg}{control arguments passed to the dual formulation (\code{owen}) optimization for the inner loop; see \code{\link{optim_owen_inner_control}}.}
\item{inner_lbfgs_arg}{a list of arguments passed to \code{\link[lbfgs]{lbfgs}}.}
\item{beta_newton_arg}{control arguments passed to the damped Newton optimization for the inner loop; see \code{\link{optim_newton_control}}.}
\item{outer_tol_type}{the type of tolerance checking for the outer loop.}
\item{verbose}{a boolean to allow console output.}
}
\description{
Run the nested algorithm for linear regression composite tests.
}
| /man/CEL_lm_nested.Rd | no_license | hoangtt1989/BICEP | R | false | true | 1,852 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lm_nested_concord.R
\name{CEL_lm_nested}
\alias{CEL_lm_nested}
\title{Run the nested algorithm for linear regression composite tests.}
\usage{
CEL_lm_nested(beta_test, X, y, F_reparam, s_hat_reparam, gamma_init = NULL,
outer_eps = 1e-08, outer_maxit = 1000, inner_opt_type = c("owen",
"LBFGS"), inner_owen_arg = optim_owen_inner_control(),
inner_lbfgs_arg = list(invisible = 1),
beta_newton_arg = optim_newton_control(), outer_tol_type = c("fval",
"gval"), verbose = F)
}
\arguments{
\item{beta_test}{a vector of candidate test values.}
\item{X}{a design matrix with observations in rows and variables in columns.}
\item{y}{a vector of the responses.}
\item{F_reparam}{the \code{F} matrix for the affine reparameterization.}
\item{s_hat_reparam}{the \code{s} vector for the affine reparameterization.}
\item{gamma_init}{a vector of initial values for the dual, or a \code{"random"} to specify sampling from \code{\link[stats]{rnorm}}. Default is zero vector.}
\item{outer_eps}{absolute tolerance required for outer loop convergence.}
\item{outer_maxit}{maximum number of outer loop iterations.}
\item{inner_opt_type}{optimization type for the inner loop.}
\item{inner_owen_arg}{control arguments passed to the dual formulation (\code{owen}) optimization for the inner loop; see \code{\link{optim_owen_inner_control}}.}
\item{inner_lbfgs_arg}{a list of arguments passed to \code{\link[lbfgs]{lbfgs}}.}
\item{beta_newton_arg}{control arguments passed to the damped Newton optimization for the inner loop; see \code{\link{optim_newton_control}}.}
\item{outer_tol_type}{the type of tolerance checking for the outer loop.}
\item{verbose}{a boolean to allow console output.}
}
\description{
Run the nested algorithm for linear regression composite tests.
}
|
DownloadKGML <-
function(){
pkgEnv <- new.env(parent=emptyenv())
if(!exists("pathway_names", pkgEnv)) {
data("pathway_names", package="TPEA", envir=pkgEnv)
da2<-pkgEnv[["pathway_names"]]
}
path<-paste(system.file(package="TPEA"),"/data/",sep="")
setwd(path)
if(file.exists("DownloadKGMLfiles")==TRUE){
unlink("DownloadKGMLfiles", recursive=TRUE)
}
if(file.exists("network")==TRUE){
unlink("network", recursive=TRUE)
}
if(file.exists("updateData")==TRUE){
unlink("updateData", recursive=TRUE)
}
if(file.exists("NodeGeneRelationship")==TRUE){
unlink("NodeGeneRelationship", recursive=TRUE)
}
dir.create(paste(path,"DownloadKGMLfiles/",sep=""))
path1<-paste(system.file(package="TPEA"),"/data/DownloadKGMLfiles/",sep="")
setwd(path1)
pathway<-as.matrix(da2)
for(i in 1:nrow(pathway)){
website1<-c("http://www.kegg.jp/kegg-bin/download?entry=")
website2<-pathway[i,1]
website3<-c("&format=kgml")
website<-paste(website1,website2,website3,sep="")
download.file(website,destfile=pathway[i,1])
print(i)
}
}
| /R/DownloadKGML.R | no_license | cran/TPEA | R | false | false | 1,042 | r | DownloadKGML <-
function(){
pkgEnv <- new.env(parent=emptyenv())
if(!exists("pathway_names", pkgEnv)) {
data("pathway_names", package="TPEA", envir=pkgEnv)
da2<-pkgEnv[["pathway_names"]]
}
path<-paste(system.file(package="TPEA"),"/data/",sep="")
setwd(path)
if(file.exists("DownloadKGMLfiles")==TRUE){
unlink("DownloadKGMLfiles", recursive=TRUE)
}
if(file.exists("network")==TRUE){
unlink("network", recursive=TRUE)
}
if(file.exists("updateData")==TRUE){
unlink("updateData", recursive=TRUE)
}
if(file.exists("NodeGeneRelationship")==TRUE){
unlink("NodeGeneRelationship", recursive=TRUE)
}
dir.create(paste(path,"DownloadKGMLfiles/",sep=""))
path1<-paste(system.file(package="TPEA"),"/data/DownloadKGMLfiles/",sep="")
setwd(path1)
pathway<-as.matrix(da2)
for(i in 1:nrow(pathway)){
website1<-c("http://www.kegg.jp/kegg-bin/download?entry=")
website2<-pathway[i,1]
website3<-c("&format=kgml")
website<-paste(website1,website2,website3,sep="")
download.file(website,destfile=pathway[i,1])
print(i)
}
}
|
#! /usr/bin/Rscript --vanilla
# --default-packages=utils,stats,lattice,grid,getopts
# need to check if the line above works on the web deployment machine.
# Copyright 2010 Randall Pruim, Ryan Welch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
require(stats);
require(utils);
require(grid);
require(lattice);
omittedGenes <- character(0); # will be set in gobalenv()
warningMessages <- character(0); # will be set in gobalenv()
################################################################################################
# function definitions
################################################################################################
################################################################################
#
# takes string and converts '' and 'null' (case insensitive) to NULL, else unchanged.
#
as.filename <- function(x) {
if (! is.character(x) || toupper(x) == toupper('null') || x == '') {
return(NULL)
} else {
return(x)
}
}
################################################################################
#
# modify column names the way R does
#
char2Rname <- function(x) {
x <- gsub('-','.',x);
x <- gsub('=','.',x);
x <- gsub('\ ','.',x);
return(x)
}
################################################################################
#
# build a factor
#
MakeFactor <- function(x,levels,na.level=NA) {
f <- factor(x, levels=levels)
if (any( is.na(f))){
levels(f) <- c(levels(f),na.level)
f[ is.na(f) ] <- na.level
}
return(f)
}
################################################################################
#
# pretty print scientific notation from log10(x)
#
log2sci <- function(x) {
# write x as 10^(e) * 10^r where e is an integer and r is in [0,1)
e <- floor(x)
r <- x - e
m <- 10^(r)
return(paste(format(m,digits=3),"E",e,sep=""));
}
################################################################################
#
# Sniff a vector to see if it smells
#
Sniff <- function(vec,type=c('snp'),n=10) {
n <- min(n,length(vec))
type <- match.arg(type)
if (type == 'snp') {
yes <- union(union(
grep('rs',vec[1:n]),
grep('chr[[:digit:]]+:[[:digit:]]',vec[1:n])),
grep('chr[[:alpha:]]+:[[:digit:]]',vec[1:n])
)
if ( length(yes) == n ) return (TRUE)
return(FALSE)
}
return(FALSE)
}
################################################################################
#
# Which side should legend go on?
#
AutoLegendSide <- function(pval,pos,posRange = range(pos)) {
M <- .5 * max(pval);
left <- min(pos[pval > M]);
right <- max(pos[pval > M]);
mid <- mean(posRange);
if ( (mid - left) > (right - mid) ) {
return ('right');
}
return('left');
}
################################################################################
#
# choose a transformation for p-values
#
SetTransformation <- function(mn,mx,alreadyTransformed=FALSE) {
if (alreadyTransformed) { return ( function(x){x} ) }
if (mx > 1) { return( function(x) {x} ) } # assume -log10 transform has already been applied
if (mn < 0) { return( function(x) {-x} ) } # assume log10 transform has already been applied
return ( function(x) {-log10(x)} )
}
################################################################################
#
# Set titles correctly for D' and r^2
#
SetLDTitle <- function(col,title) {
if ( col == 'dprime' & is.null(title) ) { return ("D'"); }
if ( col == 'rsquare' & is.null(title) ) { return (expression(r^2)); }
if (is.null(title)) { return (""); }
return(title);
}
################################################################################
#
# extends default modifyList so that it handles NULL values in list differently.
#
ModifyList <- function (x, val, replaceWithNull = FALSE)
{
stopifnot(is.list(x), is.list(val));
xnames <- names(x);
for (v in names(val)) {
if (v %in% xnames && is.list(x[[v]]) && is.list(val[[v]])) {
x[[v]] <- ModifyList(x[[v]], val[[v]], replaceWithNull=replaceWithNull);
} else {
if (!is.null(val[[v]]) || replaceWithNull) {
x[[v]] <- val[[v]];
}
}
}
return(x);
}
################################################################################
#
# like lapply but on a subset of a list; rest of list unchanged.
#
sublapply <- function(x,names=names(x),fun) {
fun <- as.function(fun);
for (name in names) {
if (! is.null(x[[name]] ) ) {
x[[name]] <- fun(x[[name]]);
}
}
return(x);
}
################################################################################
#
# like modifyList, but works when names of val are unique prefixes of names of x
#
ConformList <- function(x,names,case.sensitive=FALSE,message=FALSE) {
own.ind <- 0;
for (name in names(x)) {
own.ind <- own.ind + 1;
if (case.sensitive) {
match.ind <- pmatch( name, names );
} else {
match.ind <- pmatch( toupper(name), toupper(names) );
}
if (! is.na(match.ind)) {
names(x)[own.ind] <- names[match.ind];
} else {
if (! is.null(message) ) { message(paste("No unique match for ",name,"=",x[[own.ind]],sep="")); }
}
}
return(x);
}
################################################################################
#
# like modifyList, but works when names of val are unique prefixes of names of x
#
PModifyList <- function(x,val,...) {
ModifyList(x,ConformList(val,names(x),...));
}
################################################################################
#
# Modify the list args according to the value of theme
#
ProcessOneTheme <- function(args,theme) {
if ( is.character(theme) ){
theme=paste(theme,'.theme',sep='')
return( do.call(theme,list(args=args)) )
}
return(args)
}
################################################################################
#
# process a list of themes in order
#
ProcessThemes <- function(args,themeString) {
if (!is.character(themeString)) { return(args) }
for (theme in unlist(strsplit(themeString,",")) ){
args <- ProcessOneTheme(args,theme)
}
return(args)
}
################################################################################
#
# Some themes
#
ryan.theme <- function(args) {
argUpdates <- list(
snpset=NULL,
format="pdf",
refDot=NULL,
geneFontSize=1.1,
refsnpTextSize=1.5,
axisTextSize=1.45,
legendSize=1,
legendFrameAlpha=0,
legendAlpha=0,
axisSize=1.45,
recombPos=3,
xlabPos=-2.75,
height=9,
rfrows=4
)
return(ModifyList(args,argUpdates));
}
publication.theme <- ryan.theme;
pub.theme <- ryan.theme;
black.theme <- function(args) {
argUpdates <- list(
axisTextColor='black',
rugColor='black',
frameColor='black'
)
return(ModifyList(args,argUpdates));
}
giant.theme <- function(args) {
argUpdates <- list(
rfrows=10,
recombOver=TRUE,
recombAxisColor='black',
recombAxisAlpha=1,
legend='auto',
showAnnot=TRUE,
showRefsnpAnnot=FALSE,
annotPch='25,21,21,21,21,21,24,24,24',
recombColor='cyan',
ldColors='gray50,blue,green,yellow,orange,red,purple3'
)
args <- ryan.theme(args);
args <- black.theme(args);
args <- ModifyList(args,argUpdates);
return(args);
}
#############################################################
#
# Remove temporary files (used in final clean-up)
#
RemoveTempFiles <- function (...) {
l <- list(...);
removedFiles <- list();
if (length(l) < 1) { return(removedFiles); }
method <- unlist(lapply(l,function(x) { attr(x,"method") }));
file <- unlist(lapply(l,function(x) { attr(x,"file") }));
for (i in 1:length(method)) {
if (method[i] == 'pquery') { file.remove( file[i] ); }
removedFiles <- c(removedFiles, file[i]);
}
return(removedFiles);
}
#############################################################
#
# Cleaning up at the end
#
CleanUp <- function(args,...) {
if (args[['clean']]) {
message("\nCleaning up. The following files are begin removed: ");
files <- RemoveTempFiles(...);
message(paste("\t",files,"\n"));
invisible(files);
}
}
#############################################################
#
# Obtain data. Data can be specified using a file or
# a pquery command. Pquery is a tool that simplifies
# querying SQL databases. Most users will simply pass in files
# Much of this is handled by the Python wrapper anyway.
#
GetDataFromFileOrCommand <- function(file, command, default=data.frame(), clobber=FALSE, verbose=TRUE,...) {
method <- "file";
if ( !file.exists(file) || clobber ) {
command <- paste(command,">",file);
if (verbose) { cat(paste("Getting data with",command,sep="\n")); }
system(command);
method <- 'pquery';
if (! clobber) {
assign("warningMessages",c(warningMessages,paste("Missing file:",file)), globalenv());
}
}
results <- read.file(file,...) ;
attr(results, "file") <- file;
attr(results, "command") <- command;
attr(results, "method") <- method;
return(results);
}
#############################################################
#
# Obtain data. Data can be specified using a file. When
# pquery is not available, this version will ignore the command
# and return default data if the file is missing.
#
GetDataFromFileIgnoreCommand <- function(file, command, default=data.frame(), clobber=FALSE, verbose=TRUE,...) {
method <- "file";
if (is.null(file)) {
return(default)
}
if ( !file.exists(file) ) {
# warning(paste("Missing file:",file))
return(default)
}
results <- read.file(file,...) ;
attr(results, "file") <- file;
attr(results, "method") <- method;
return(results);
}
#############################################################
#
# return an empty data from with some additonal attributes
#
empty.data.frame <- function(
file="none",command="none", method="empty.data.frame") {
result <- data.frame();
attr(result, "file") <- file;
attr(result, "command") <- command;
attr(result, "method") <- method;
}
#############################################################
#
# This is used to clone values from user specified arguemnts
# to other arguments that the user did not set.
#
MatchIfNull <- function(args,toupdate,updatewith) {
if ( is.null(args[[toupdate]]) ) {
args[[toupdate]] <- args[[updatewith]]
}
return(args)
}
#############################################################
#
# All arguments are passed in with mode character (i.e., as strings)
# This functions converts arguments to the correct mode for
# internal use.
#
AdjustModesOfArgs <- function(args) {
args <- sublapply(args,
c('legendAlpha', 'width','height',
'frameAlpha','hiAlpha','rugAlpha',
'refsnpLineAlpha', 'recombFillAlpha','recombLineAlpha', 'refsnpTextAlpha',
'ymin','ymax','legendSize','refsnpTextSize','axisSize','axisTextSize','geneFontSize','smallDot',
'largeDot','refDot'),
as.numeric);
args <- sublapply(args,
c('metal','recomb','ld','refSnpPosFile','snpsetFile','annot','refFlat'),
as.filename);
args <- sublapply(args,
c('chr','unit','xnsmall'),
as.integer);
args <- sublapply(args,
c('experimental','clobber','recombOver','recombFill','pquery',
'showRecomb','showAnnot','showRefsnpAnnot','bigDiamond','showPartialGenes','shiftGeneNames',
'clean', 'dryRun','legendMissing'),
as.logical);
args <- sublapply( args,
c('ldCuts','xat','yat','annotPch'),
function(x) { as.numeric(unlist(strsplit(x,","))) } );
args <- sublapply( args,
c('rfrows'),
function(x) { as.integer(unlist(strsplit(x,","))) } );
args <- sublapply( args,
c('ldColors', 'format', 'annotOrder'),
function(x) { unlist(strsplit(x,",")) } );
return(args);
}
#############################################################
#
# Returns text description of unit along chromosome depending
# on value of unit where unit is a number of base pairs
#
unit2char <- function(unit) {
if (unit == 1000000) { return ("(Mb)"); }
if (unit == 1000) { return ("(Kb)"); }
return("");
}
#############################################################
#
# convert position that might include mb, kb into a base pair position
#
pos2bp <- function(pos) {
unit<-1;
posN <- as.character(pos);
if (regexpr("kb",posN,ignore.case=TRUE) > 0) {
unit <- 1000;
posN <- sub("kb","",posN, ignore.case=T);
}
if (regexpr("mb",posN,ignore.case=TRUE) > 0) {
unit <- 1000000;
posN <- sub("mb","",posN, ignore.case=T);
}
# message(paste('posN = ',posN, " unit = ", unit));
return( as.numeric(posN) * unit);
}
#############################################################
#
# read file, using filename to determine method.
#
read.file <- function(file,header=T,na.strings=c('NA','','.','na'),...) {
if (! file.exists(file) ) {
return(NULL);
message(paste("Missing file: ", file));
}
# if file ends .csv, then read.csv
if ( regexpr("\\.csv",file) > 0 ) {
return(read.csv(file,header=header,na.strings=na.strings,...));
}
# if file ends .Rdata, then load
if ( regexpr("\\.Rdata",file) > 0 ) {
varName <- load(file);
return(varName);
}
# default is read.table
return(read.table(file,header=header,na.strings=na.strings,...));
}
#############################################################
#
# write file, using filename to determine method.
#
write.file <- function(x, file, append=FALSE, clobber=TRUE, na='NA') {
if (file.exists(file) && ! clobber && !append ) {
return(NULL);
message(paste("File already exists: ", file));
}
# if file ends .csv, then write.csv
if ( regexpr("\\.csv",file) > 0 ) {
return(write.csv(x,file,append=append));
}
# if file ends .Rdata, then load
if ( regexpr("\\.Rdata",file) > 0 ) {
return(save.csv(x,file));
}
# default is read.table
return(write.table(x,file,append=append));
}
#############################################################
#
# Converter from chrom (e.g., chr13) to chr (e.g., 13) format
#
chrom2chr <- function (x) {
y <- substring(x,first=4);
y[y=='X'] = '23';
y[y=='Y'] = '24';
y[y=='mito'] = '25';
y[y=='XY'] = '26';
as.numeric(y);
}
#############################################################
#
# Converter from chr (e.g., 13) to chrom (e.g., chr13) format
#
chr2chrom <- function (x) {
if (x == 23 ) { return("chrX"); }
if (x == 24 ) { return("chrY"); }
if (x == 25 ) { return("mito"); }
if (x == 26 ) { return("chrXY"); }
return (paste("chr",as.numeric(x),sep=""));
}
#############################################################
#
# Linearly rescale values to fit in interval
# If all values are equal, then return a vector the same length as x
# with all values set to constant (by default the larger end of the interval).
#
rescale <- function(x, original=c(min(x),max(x)), transformed = c(0,1) ){
if ( length(transformed) != 2 || ! is.numeric(transformed) ||
length(original) != 2 || ! is.numeric(original) )
{ return (x); }
a <- original[1]; b <- original[2];
u <- transformed[1]; v <- transformed[2];
r <- v - (b-x)/(b-a) * (v-u);
r[r < u] <- u;
r[r > v] <- v;
return(r);
}
#############################################################
#
# Flatten information originally in UCSC bed format.
# Tailored to fit nominclature and formatting used in files
# generated by Peter Chines.
#
flatten.bed <- function(x,multiplier=.001) {
if (prod(dim(flatten.bed)) == 0) {
df <- data.frame(
chrom = c("chr0","chr0","chr0"),
chr = c(0,0,0),
start= c(0,0,0),
stop = c(2,2,2),
type = c(0,2,1),
name = c('none','none','none'),
nmName = c('none','none','none'),
strand = c('+','+','+')
);
return(df);
}
x$geneName <- as.character(x$geneName);
x$name <- as.character(x$name);
x$strand <- as.character(x$strand);
lx <- dim(x)[1];
blockStart <- unlist(lapply(
strsplit(as.character(x$exonStarts),split=','),
as.numeric));
blockEnd <- unlist(lapply(strsplit(as.character(x$exonEnds),split=',')
,
as.numeric));
blockSize = blockEnd - blockStart;
nameDup = rep(x$geneName,times=x$exonCount);
nmNameDup = rep(x$name,times=x$exonCount);
startDup = rep(x$txStart,times=x$exonCount);
stopDup = rep(x$txEnd,times=x$exonCount);
chromDup = rep(x$chrom,times=x$exonCount);
strandDup = rep(x$strand,times=x$exonCount);
# types:
# 0 = txStart to txEnd (transcription region)
# 1 = exonStart to exonEnd (exons)
# 2 = cdsStart to cdsEnd (coding region)
df <- data.frame(
chrom = c(x$chrom, x$chrom, chromDup),
chr = chrom2chr(c(x$chrom, x$chrom, chromDup)),
start= c(x$txStart, x$cdsStart, blockStart),
stop = c(x$txEnd, x$cdsEnd, blockEnd ),
type = c(rep(0,lx),rep(2,lx), rep(1,length(startDup))),
name = c(x$geneName, x$geneName, nameDup),
nmName = c(x$name, x$name, nameDup),
strand = c(x$strand, x$strand, strandDup)
);
df$start <- df$start * multiplier;
df$stop <- df$stop * multiplier;
invisible(df);
}
#############################################################
#
# display reference SNP name and vertical line
#
grid.refsnp <- function(name,pos) {
grid.text(as.character(name),x=unit(pos,"native"), y=unit(.95,'npc'), just=c("center","top"),
gp=gpar(cex=args[['refsnpTextSize']],col=args[['refsnpTextColor']],alpha=args[['refsnpTextAlpha']])
);
grid.segments(
x0=unit(pos,"native"),
x1=unit(pos,"native"),
y0=unit(0,"npc"),
y1=unit(1,'npc') - unit(1.5,"lines"),
gp=gpar(
col=args[['refsnpLineColor']],
lwd=2,
alpha=args[['refsnpLineAlpha']])
);
}
#############################################################
#
# calculte width of text
#
textWidth <- function(text="",gp=gpar()) {
return ( grobWidth(textGrob(text,gp=gp)) );
}
#############################################################
#
# generate text with arrow (or just compute width of same)
# this is a bit crude and clunky
#
arrowText <- function(text,x=unit(.5,'npc'), y=unit(.5,'npc'), direction='+',name=NULL,gp=gpar(),
check.overlap=TRUE, widthOnly=FALSE) {
tWidth <- textWidth(text,gp)
aWidth <- textWidth('xx,',gp)
if (widthOnly) { return( convertWidth(tWidth + aWidth),unitTo='inches',valueOnly=TRUE ) }
cWidth <- .1 * textWidth(',',gp)
if ( direction %in% c('+','forward','->','>','right') ) {
mult = 1
} else {
mult = -1
}
tg <- textGrob(text,
x=x - mult * .5 * aWidth, y=y,
check.overlap=check.overlap,
gp=gp,
name="label")
ag <- linesGrob(
x = unit.c( x - mult * .5 * aWidth + .5 * mult * tWidth + mult * unit(.005,'npc'),
x + mult * .5 * aWidth + .5 * mult * tWidth ) ,
y=unit.c(y,y),
name="arrow",
#gp=gp,
arrow=arrow(type='open',
angle=20,
length=.75 * textWidth('x')
)
)
rect1 <- rectGrob(x=x - .5 * mult * aWidth, y=y, width=tWidth, height=.1);
rect2 <- rectGrob(x=x + .50 * mult * tWidth, y=y, width=aWidth, height=.1,gp=gpar(col="red"));
result <- gTree(children=gList(tg,ag),name=name)
attr(result,'width') <- convertX(tWidth + aWidth,'inches')
attr(result,'twidth') <- convertX(grobWidth(tg),'inches')
attr(result,'awidth') <- convertX(grobWidth(ag),'inches')
attr(result,'cWidth') <- cWidth
attr(result,'tWidth') <- tWidth
attr(result,'aWidth') <- aWidth
return(result)
}
#############################################################
#
# hilite a particular region on the plot
#
panel.hilite <- function(range=c(lo,hi),lo,hi,col="transparent",fill="blue",alpha=.1){
grid.rect( x=unit(range[1],"native"),width=unit(range[2]-range[1],"native"),
hjust=0,
gp=gpar(fill=fill,col=col, alpha=alpha)
);
}
#############################################################
#
# ribbonLegend from RGraphics example
#
ribbonLegend <- function (nlevels = NULL, breaks = NULL, cols,
scale = range(breaks),
margin = unit(0.5, "lines"), gp = NULL, vp = NULL, name = NULL)
{
gTree(nlevels = nlevels, breaks = breaks, cols = cols, scale = scale,
children = ribbonKids(nlevels, breaks, cols, scale),
childrenvp = ribbonVps(nlevels, breaks, margin, scale),
gp = gp, vp = vp, name = name, cl = "ribbonLegend")
}
widthDetails.ribbonLegend <- function (x)
{
sum(layout.widths(viewport.layout(x$childrenvp[[1]])))
}
calcBreaks <- function (nlevels, breaks, scale)
{
if (is.null(breaks)) {
seq(min(scale), max(scale), diff(scale)/nlevels)
}
else {
breaks
}
}
ribbonVps <- function (nlevels, breaks, margin, scale)
{
breaks <- format(signif(calcBreaks(nlevels, breaks, scale),
3))
vpTree(viewport(name = "layout", layout = grid.layout(3,
4, widths = unit.c(margin, unit(1, "lines"), max(unit(0.8,
"lines") + stringWidth(breaks)), margin), heights = unit.c(margin,
unit(1, "null"), margin))), vpList(viewport(layout.pos.col = 2,
layout.pos.row = 2, yscale = scale, name = "ribbon"),
viewport(layout.pos.col = 3, layout.pos.row = 2, yscale = scale,
name = "labels")))
}
ribbonKids <- function (nlevels, breaks, cols, scale)
{
breaks <- calcBreaks(nlevels, breaks, scale)
nb <- length(breaks)
tickloc <- breaks[-c(1, nb)]
gList(rectGrob(y = unit(breaks[-1], "native"), height = unit(diff(breaks),
"native"), just = "top", gp = gpar(fill = cols), vp = vpPath("layout",
"ribbon")), segmentsGrob(x1 = unit(0.5, "lines"), y0 = unit(tickloc,
"native"), y1 = unit(tickloc, "native"), vp = vpPath("layout",
"labels")), textGrob(x = unit(0.8, "lines"), y = unit(tickloc,
"native"), just = "left", label = format(signif(tickloc,
3)), vp = vpPath("layout", "labels")))
}
#############################################################
#
# make a "list" of genes in flat. returns a data frame
#
make.gene.list <- function (flat, showIso=TRUE, subset, unit, ...)
{
if ( prod(dim(flat)) <= 0 ) { return(NULL); }
df <- flat;
if (!missing(subset)) { df <- df[subset, ] }
if ( prod(dim(flat)) <= 0 ) { return(NULL); }
if (args[['showIso']]) {
df$idnum <- match(df$nmName,unique(df$nmName));
} else {
df$idnum <- match(df$name,unique(df$name));
}
df0 <- df[df$type == 0, ];
df1 <- df[df$type == 1, ];
df2 <- df[df$type == 2, ];
if ( "col" %in% names(df0) ) {
col = df0$col
fill = df0$col
}
return( data.frame(id=df0$idnum, gene=df0$name, chrom=df0$chrom, start=df0$start, stop=df0$stop,
startbp=df0$start * unit, stopbp=df0$stop*unit ) );
}
#############################################################
#
# display genes taking data from flattened bed format
#
panel.flatbed <- function (x=NULL, y=NULL, flat, fill = "navy", col = "navy", alpha = 1, textcol='black',
multiplier = 0.001, height = 2/14, buffer=0.003, subset, cex=.9, rows=2, showPartialGenes=FALSE,
shiftGeneNames=TRUE,
computeOptimalRows=FALSE, ...)
{
if ( prod(dim(flat)) <= 0 ) { return(1); }
df <- flat;
if (!missing(subset)) { df <- df[subset, ] }
df$width <- df$stop - df$start;
if (args[['showIso']]) {
df$idnum <- match(df$nmName,unique(df$nmName));
} else {
df$idnum <- match(df$name,unique(df$name));
}
df0 <- df[df$type == 0, ];
df1 <- df[df$type == 1, ];
df2 <- df[df$type == 2, ]; # unused?
if ( "col" %in% names(df0) ) {
col = df0$col
fill = df0$col
}
# removed duplicate idnums from df0
df0 <- df0[order(df0$idnum),] # sort to make sure repeated ids are adjacent
df0$new <- c(1,diff(df0$idnum)) # identify new (1) vs. repeated (0)
df0 <- df0[order(df0$idnum),] # put back into original order
df0uniq <- df0[df0$new == 1,]
# determine the row to use
maxIdnum <- max(c(0,df$idnum))
rowUse <- rep(-Inf,1+maxIdnum)
id2row <- rep(0,1+maxIdnum) # keep track of locations for each gene
# conversion to 'native' isn't working, so we convert evertyhing via inches to npc below.
# conversion utility
native2npc <- function(x) {
w <- diff(current.viewport()$xscale)
a <- current.viewport()$xscale[1]
return( (x-a) / w )
}
for (i in 1:dim(df0uniq)[1]) {
cat(paste(i,": ",df0uniq$name[i],"\n"));
leftGraphic <- native2npc(min(df$start[df$idnum == df0uniq$idnum[i]]))
rightGraphic <- native2npc(max(df$stop[df$idnum == df0uniq$idnum[i]]))
centerGraphic<- mean(c(leftGraphic,rightGraphic))
at <- arrowText(df0uniq$name[i],
x = unit((df0uniq$start[i] + df0uniq$stop[i])/2, 'native'),
y = unit(0,'npc'),
direction = df0uniq$strand[i],
check.overlap = TRUE,
gp = gpar(cex = cex, fontface='italic',col=textcol,lwd=1.5),
widthOnly=FALSE);
w <- 1.1 * convertX(attr(at,'width'),'inches',valueOnly=TRUE)
viewportWidth <- convertX(unit(1,'npc'),'inches',valueOnly=TRUE);
w <- w / viewportWidth
leftName <- centerGraphic - .5 * w
rightName <- centerGraphic + .5 * w
if (shiftGeneNames) {
if (leftName < 0) {
leftName <- 0; rightName <- w
}
if (rightName > 1) {
rightName <- 1; leftName <- 1-w
}
}
left <- min(c(leftGraphic,leftName)) - buffer
right <- max(c(rightGraphic,rightName)) + buffer
df0uniq$start[i] <- leftName
df0uniq$stop[i] <- rightName
df0uniq$left[i] <- left
df0uniq$right[i] <- right
rowToUse <- min(which(rowUse < left))
if ( showPartialGenes || (left >= 0 && right <= 1) ) {
id2row[df0uniq$idnum[i]] <- rowToUse
rowUse[rowToUse] <- right
} else {
id2row[df0uniq$idnum[i]] <- -2 # clipping will hide this
}
}
requestedRows <- rows;
optRows <- max(c(0,which(rowUse > 0)));
if (computeOptimalRows) { return (optRows) }
save(df,flat,df0,df1,df2,df0uniq,id2row,file="debug.Rdata");
rows <- min(requestedRows,optRows);
if (is.character(args[['requiredGene']]) ) {
requiredGeneIdx <- min(which(df0uniq$name==args[['requiredGene']]) )
requiredGeneIdnum <- df0uniq$idnum[requiredGeneIdx]
if (id2row[requiredGeneIdnum] > rows) {
for (id in which(id2row == 1)){
if (df0uniq$left[id] < df0uniq$right[requiredGeneIdx] &&
df0uniq$right[id] > df0uniq$left[requiredGeneIdx] ) {
id2row[id] = rows+2
}
}
id2row[requiredGeneIdnum] <- 1
}
}
if (optRows > requestedRows && opts[['warnMissingGenes']]) {
omitIdx <- which(id2row > rows)
assign("omittedGenes",as.character(df0uniq$name[omitIdx]),globalenv())
numberOfMissingGenes <- length(omittedGenes);
message <- paste(numberOfMissingGenes," gene",if(numberOfMissingGenes > 1) "s" else "", "\nomitted",sep="")
pushViewport(viewport(clip='off'));
grid.text(message ,x=unit(1,'npc') + unit(1,'lines'), y=.5, just=c('left','center'),
gp=gpar(cex=args[['axisTextSize']], col=args[['axisTextColor']], alpha=args[['frameAlpha']]));
upViewport(1);
}
increment <- 1.0/rows;
yPos <- function(id,text=FALSE) {
if (text) {
return( unit((rows-id2row[id]) * increment + 4.4/7*increment, "npc") )
} else {
return( unit( (rows-id2row[id]) * increment + 2/7*increment, "npc") )
}
}
grid.segments(x0 = multiplier + df0$start, x1 = df0$stop,
y0 = yPos(df0$idnum),
y1 = yPos(df0$idnum),
default.units = "native",
gp = gpar(col = col, alpha = alpha));
if ( "col" %in% names(df1) ) {
col = df1$col
fill = df1$col
}
grid.rect(x = multiplier + df1$start, width = df1$width,
just = "left",
y = yPos(df1$idnum),
height = unit(height * increment, "npc"),
default.units = "native",
gp = gpar(fill = fill, col = col, alpha = alpha));
if ( "textcol" %in% names(df0uniq) ) {
textcol = df0uniq$textcol
fill = df0uniq$textcol
}
for (i in 1:dim(df0uniq)[1]) {
at <- arrowText(df0uniq$name[i],
x = unit((df0uniq$start[i] + df0uniq$stop[i])/2, 'npc'),
y = yPos(df0uniq$idnum[i], text=TRUE),
direction = df0uniq$strand[i],
check.overlap = TRUE,
gp = gpar(cex = cex, fontface='italic',col=textcol,lwd=1.5));
grid.draw(at);
}
}
#############################################################
#
# Assemble a plot zooming in on a region from various pieces
# including metal output (with positions added), ld (ala newfugue), recombination rate data,
# genes data (refFlat), etc.
#
# NB: *** passing in entire args list ***
#
zplot <- function(metal,ld=NULL,recrate=NULL,refidx=NULL,nrugs=0,postlude=NULL,args=NULL,...){
refSnp <- metal$MarkerName[refidx];
metal$P.value <- as.numeric(metal$P.value);
if ( char2Rname(args[['weightCol']]) %in% names(metal) ){
metal$Weight <- metal[ ,char2Rname(args[['weightCol']]) ];
dotSizes <- rescale( log(pmax(1,metal$Weight)), c(log(1000), log(100000)),
c(args[['smallDot']],args[['largeDot']] ) ) ;
} else {
dotSizes <- rep(args[['largeDot']], dim(metal)[1] );
if (! is.null(args[['refDot']]) ) {
dotSizes[refidx] <- args[['refDot']];
}
}
if ( is.null(args[['refDot']]) ) {
# this avoids problems downstream, but dotSize[refidx] has already been set in most cases.
args[['refDot']] <- args[['largeDot']];
}
grid.newpage();
# push viewports just to calculate optimal number of rows for refFlat
pushViewport(viewport(
layout=grid.layout(2+3+4,1+2,
widths=unit(c(5,1,5),c('lines','null','lines')),
heights=unit(c(.5, 3,nrugs, 1, 1, 1, 2*args[['rfrows']], 4,.5),
c('lines','lines', 'lines','lines','null','lines', 'lines', 'lines','lines'))
)
));
pvalVp=dataViewport(
xRange,yRange,
extension=c(0,.05),
layout.pos.row=5,layout.pos.col=2,
name="pvals",
clip="off");
pushViewport(
viewport(xscale=pvalVp$xscale,
layout.pos.row=7,
layout.pos.col=2,
name="refFlatOuter")
);
optRows <- panel.flatbed(
flat=refFlat,
rows=NULL,
computeOptimalRows=TRUE,
showPartialGenes = args[['showPartialGenes']],
shiftGeneNames = args[['shiftGeneNames']],
cex=args[['geneFontSize']],
col=args[['geneColor']],
fill=args[['geneColor']],
multiplier=1/args[['unit']]
);
if ( length( args[['rfrows']] < 2 ) ) { # use value as upper bound
args[['rfrows']] <- min(args[['rfrows']], optRows)
} else { # use smallest two values as lower and upper bounds
args[['rfrows']] <- sort(args[['rfrows']])
rows <- min( args[['rfrows']][2], optRows )
args[['rfrows']] <- max( args[['rfrows']][1], rows )
}
popViewport(2);
# OK. Now we know how many rows to use and we can set up the layout we will actually use.
pushViewport(viewport(
layout=grid.layout(2+3+4,1+2,
widths=unit(c(args[['axisTextSize']]*args[['leftMarginLines']],1,args[['axisTextSize']]*args[['rightMarginLines']]),c('lines','null','lines')),
heights=unit(c(.5, 3,nrugs, 1, 1, 1, 2*args[['geneFontSize']]*args[['rfrows']], 4,.5),
c('lines','lines', 'lines','lines','null','lines', 'lines', 'lines','lines'))
)
));
##
# layout (top to bottom)
# ----------------------
# spacer
# title text
# rugs
# separation
# pvals
# separation
# genes
# subtitle text
# spacer
#
#
# layout (left to right)
# ----------------------
# vertical axes and labeling
# main data panels, titles, horizontal axes, etc.
# vertical axes and labeling
#
########## title text
titleVp=viewport(
layout.pos.row=2,layout.pos.col=2,
name="title",
clip="off");
pushViewport(titleVp);
grid.text(args[['title']],gp=gpar(cex=2,col=args[['titleColor']]));
upViewport(1);
########## pvals
# this viewport is defined above
# pvalVp=dataViewport(
# xRange,yRange,
# extension=c(0,.05),
# layout.pos.row=5,layout.pos.col=2,
# name="pvals",
# clip="off");
pushViewport(pvalVp);
grid.yaxis(at=args[['yat']],gp=gpar(cex=args[['axisSize']],col=args[['frameColor']],alpha=args[['frameAlpha']]));
# grid.xaxis(at=args[['xat']],gp=gpar(cex=args[['axisSize']],col=args[['frameColor']],alpha=args[['frameAlpha']]));
if (length(args[['ylab']]) > 0) {
grid.text(x=unit(args[['ylabPos']],'lines'),label=args[['ylab']],rot=90,
gp=gpar(cex=args[['axisTextSize']], col=args[['axisTextColor']], alpha=args[['frameAlpha']])
);
} else {
grid.text(x=unit(args[['ylabPos']],'lines'),label=expression(paste(-log[10] ,"(p-value)")),rot=90,
gp=gpar(cex=args[['axisTextSize']], col=args[['axisTextColor']], alpha=args[['frameAlpha']])
);
}
pushViewport(dataViewport(extension=c(0,.05),xRange,recrateRange,name='recrate',clip="off"));
if ( args[['showRecomb']] ) {
grid.yaxis(main=F,gp=gpar(cex=args[['axisSize']],col=args[['recombAxisColor']],alpha=args[['recombAxisAlpha']]));
grid.text(x=unit(1,'npc')+unit(args[['recombPos']],'lines'),
label="Recombination rate (cM/Mb)",rot=270,
gp=gpar(cex=args[['axisTextSize']],col=args[['recombAxisColor']],alpha=args[['recombAxisAlpha']]));
}
if ( args[['showRecomb']] && !args[['recombOver']]) {
pushViewport(dataViewport(extension=c(0,.05),xRange,recrateRange,name='recrateClipped',
clip="on"));
if (args[['recombFill']]) {
grid.polygon(x=recrate$pos,y=recrate$recomb,
gp=gpar(alpha=args[['recombFillAlpha']],col=args[['recombColor']],fill=args[['recombColor']]),
default.units='native'
);
} else {
panel.xyplot(recrate$pos,recrate$recomb,type='l',lwd=2,alpha=args[['recombLineAlpha']],col=args[['recombColor']]);
}
upViewport(1)
}
pushViewport(viewport(clip="on",xscale=pvalVp$xscale,yscale=pvalVp$yscale,name='pvalsClipped'));
grid.rect(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
if (! is.null(refidx)) {
grid.refsnp(name=refSnp,pos=metal$pos[refidx]);
}
groupIds <- sort(unique(metal$group))
print(table(metal$group));
if (args[['bigDiamond']] && args[['showRefsnpAnnot']]) {
grid.points(x=metal$pos[refidx],y=transformation(metal$P.value[refidx]),
gp=gpar(col=args[['refsnpColor']],fill=args[['refsnpColor']],cex=1.6*args[['refDot']],alpha=.2),
pch=23,
default.units='native'
);
}
for (i in groupIds) {
idx <- which(metal$group == i);
gmetal <- metal[idx,];
colors <- args[['ldColors']][gmetal$group];
colors[which(gmetal$pch %in% 21:25)] <- 'gray20';
grid.points(x=gmetal$pos,y=transformation(gmetal$P.value),
pch=gmetal$pch,
gp=gpar(
cex=dotSizes[idx],
col=colors,
fill=args[['ldColors']][gmetal$group]
));
}
if (FALSE) {
grid.points(x=metal$pos[refidx],y=transformation(metal$P.value[refidx]),
gp=gpar(col=args[['refsnpColor']],fill=args[['refsnpColor']],
cex= if (args[['bigDiamond']] & args[['showRefsnpAnnot']]) 1.6*args[['refDot']] else args[['refDot']]),
pch= if (args[['bigDiamond']] & args[['showRefsnpAnnot']]) 5 else metal$pch[refidx],
default.units='native'
);
}
if ( args[['showRecomb']] && args[['recombOver']]) {
pushViewport(dataViewport(extension=c(0,.05),xRange,recrateRange,name='recrateClipped',
clip="on"));
if (args[['recombFill']]) {
grid.polygon(x=recrate$pos,y=recrate$recomb,
gp=gpar(alpha=args[['recombFillAlpha']],col=args[['recombColor']],fill=args[['recombColor']]),
default.units='native'
);
} else {
panel.xyplot(recrate$pos,recrate$recomb,type='l',lwd=2,alpha=args[['recombLineAlpha']],col=args[['recombColor']]);
}
upViewport(1);
}
grid.rect(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
pushViewport(viewport(clip="on",name='legend'));
breaks <- union(args[['ldCuts']],c(0,1));
breaks <- sort(unique(breaks));
nb <- length(breaks);
cols <- args[['ldColors']]
cols <- rep(cols, length=nb+2);
rl <- ribbonLegend(
breaks=breaks,
cols=cols[2:(1+nb)],
gp=gpar(cex=args[['legendSize']],col=args[['frameColor']],alpha=args[['frameAlapha']])
);
if (args[['legend']] == 'auto') {
args[['legend']] = AutoLegendSide(transformation(metal$P.value),metal$pos,xRange);
}
if (tolower(args[['legend']]) %in% c('left','right')) {
pushViewport(viewport(name='legendVp',
x=if (args[['legend']] == 'left') unit(2.5,"char") else unit(1,'npc') - unit(2.5,'char'),
y=unit(1,'npc') - unit(.5,'char'),
just=c('center','top'),
width=unit(4,'char'),
height=unit(8,'lines')
));
grid.rect(gp=gpar(col='transparent',fill='white',alpha=args[['legendAlpha']]));
grid.rect(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
pushViewport(viewport(name='ribbonLegend',
y=0,
just=c('center','bottom'),
width=unit(4,'char'),
height=unit(7,'lines')
))
grid.draw(rl);
upViewport(1);
pushViewport(viewport(name='LDTitle',
clip="off",
#x=unit(2.5,"char"),
width=unit(4,"char"),
y=unit(1,'npc') - unit(.25,'char'),
just=c('center','top'),
height=unit(1,'lines')
))
grid.text(args[['LDTitle']], gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
upViewport(1);
upViewport(1);
} # end if show legend on left or right
upViewport(4);
######### subtitle space; place holder for now
pushViewport(viewport(layout.pos.row=8,layout.pos.col=2,name="subtitle"));
if(FALSE) {
grid.rect(gp=gpar(col='red'));
grid.xaxis(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
grid.text(paste('Position on',chr2chrom(args[['chr']]),"(Mb)"),
gp=gpar(col="red"));
}
upViewport(1);
########## annotation (genes)
if(args[['rfrows']] > 0) {
pushViewport(
viewport(xscale=pvalVp$xscale,
layout.pos.row=7,
layout.pos.col=2,
name="refFlatOuter")
);
pushViewport(
viewport(xscale=pvalVp$xscale,
name="refFlatInner",
clip="on")
);
panel.flatbed(
flat=refFlat,
showPartialGenes = args[['showPartialGenes']],
shiftGeneNames = args[['shiftGeneNames']],
rows=args[['rfrows']],
cex=args[['geneFontSize']],
col=args[['geneColor']],
fill=args[['geneColor']],
multiplier=1/args[['unit']]);
upViewport(1);
#grid.rect(gp=gpar(col='white'));
grid.rect(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
if ( !is.null(args[['xnsmall']]) && !is.null(args[['xat']]) ) {
grid.xaxis(at=args[['xat']], label=format(args[['xat']], nsmall=args[['xnsmall']]),
gp=gpar(cex=args[['axisSize']],col=args[['frameColor']],alpha=args[['frameAlpha']]));
} else {
grid.xaxis(at=args[['xat']],
gp=gpar(cex=args[['axisSize']],col=args[['frameColor']],alpha=args[['frameAlpha']]));
}
grid.text(paste('Position on',chr2chrom(args[['chr']]),unit2char(args[['unit']])),
y=unit(args[['xlabPos']],'lines'),just=c('center',"bottom"),
gp=gpar(cex=args[['axisTextSize']], col=args[['axisTextColor']], alpha=args[['frameAlpha']])
);
panel.hilite(
range=c(args[['hiStartBP']]/args[['unit']],args[['hiEndBP']]/args[['unit']]),
fill=args[['hiColor']],
alpha=args[['hiAlpha']]
);
upViewport(1);
}
########## rugs for snpsets
pushViewport(viewport(xscale=pvalVp$xscale,layout.pos.row=3,
layout.pos.col=2,name="rugs",clip="off"));
i <- nrugs;
for (snpset in levels(rug$snp_set)) {
grid.text(as.character(snpset),x=unit(-.25,"lines"),
y=(i-.5)/nrugs, just="right",
gp=gpar(col=args[['rugColor']], alpha=args[['rugAlpha']],cex=.90*args[['axisTextSize']])
);
i <- i-1;
}
pushViewport(viewport(xscale=pvalVp$xscale,layout.pos.row=3,
layout.pos.col=2,name="rugsClipped",clip="on"));
i <- nrugs;
for (snpset in levels(rug$snp_set)) {
panel.rug( rug[ which(rug$snp_set==snpset), "pos" ] ,
start = (i-1)/(nrugs) + (.15/nrugs),
end = (i)/(nrugs) - (.15/nrugs),
y.units=rep("native",2),
col=args[['rugColor']],
alpha=args[['rugAlpha']]
);
i <- i-1;
}
upViewport(2);
if (is.character(postlude) && file.exists(postlude)) {
source(postlude);
}
} ## end zplot
grid.log <- function(args,metal,linespacing=1.5,ascii=FALSE,debug=FALSE){
labels=c("date");
values=c(date());
# labels=c(labels,"working directory");
# values=c(values,getwd());
# labels=c(labels,"unit");
# values=c(values,args[['unit']]);
labels=c(labels,"build");
values=c(values,args[['build']]);
labels=c(labels,"display range");
values=c(values,paste( 'chr',args[['chr']],":",args[['start']], "-", args[['end']], " [",args[['startBP']],"-",args[['endBP']], "]",sep=""));
labels=c(labels,"hilite range");
values=c(values,paste( args[['hiStart']], "-", args[['hiEnd']], " [",args[['hiStartBP']],"-",args[['hiEndBP']], "]"));
labels=c(labels,"reference SNP");
values=c(values,args[['refsnp']]);
# labels=c(labels,"prefix");
# values=c(values,args[['prefix']]);
# labels=c(labels,"log");
# values=c(values,args[['log']]);
if (!is.null(args[['reload']])) {
labels=c(labels,"reload");
values=c(values,args[['reload']]);
}
if(! is.null(args[['reload']]) || debug){
labels=c(labels,"data reloaded from");
values=c(values,args[['rdata']]);
}
labels=c(labels,"number of SNPs plotted");
values=c(values,as.character(dim(metal)[1]));
labels=c(labels,paste("max",args[['pvalCol']]));
maxIdx <- which.max(transformation(metal$P.value));
maxName <- as.character(metal$MarkerName[maxIdx]);
maxNegLogP <- transformation(metal$P.value[maxIdx]);
maxPSci <- log2sci(-maxNegLogP)
values=c(values,paste(maxPSci," [", maxName ,"]",sep=""));
labels=c(labels,paste("min",args[['pvalCol']]));
minIdx <- which.min(transformation(metal$P.value));
minName <- as.character(metal$MarkerName[minIdx]);
minNegLogP <- transformation(metal$P.value[minIdx]);
minPSci <- log2sci(-minNegLogP)
values=c(values,paste(minPSci," [", minName ,"]",sep=""));
if (TRUE) {
oG <- omittedGenes;
while (length(oG) > 0) {
labels=c(labels,"omitted Genes");
values=c(values,paste(oG[1:min(length(oG),3)],collapse=", "));
oG <- oG[-(1:3)]
}
}
if (TRUE) {
w <- warningMessages;
while (length(w) > 0) {
labels=c(labels,"Warning");
values=c(values,w[1]);
w <- w[-1]
}
}
labels=paste(labels, ": ",sep='');
if (ascii) {
cat(paste(format(labels,width=20,justify="right"),values,sep=" ",collapse="\n"));
cat('\n');
cat('\nMake more plots at http://csg.sph.umich.edu/locuszoom/');
cat('\n');
} else {
grid.text(labels,x=.3,y=unit(1,'npc') - unit(linespacing *(1:length(labels)),'lines'), just='right');
grid.text(values,x=.3,y=unit(1,'npc') - unit(linespacing *(1:length(values)),'lines'), just='left');
if (FALSE && args[['showAnnot']]) {
annotlabels <- c('no annotation','framestop','splice','nonsyn','coding','utr','tfbscons','mcs44placental');
pch <- args[['annotPch']];
annotlabels <- c(annotlabels[-1],annotlabels[1])
pch <- c(pch[-1],pch[1])
key <- simpleKey(text=annotlabels);
key$points$pch=pch;
key$points$col="navy";
key$points$fill="lightskyblue";
keyGrob <- draw.key(key,draw=FALSE);
annotationBoxTop <- unit(0.95,'npc');
annotationBoxHeight <- unit(3,"lines") + grobHeight(keyGrob);
pushViewport(viewport(x=.90,y=annotationBoxTop,width=grobWidth(keyGrob),
height=annotationBoxHeight,just=c('right','top')));
grid.rect();
pushViewport(viewport(y=unit(.75,'lines'),height = grobHeight(keyGrob),just=c('center','bottom')));
draw.key(key,draw=TRUE);
popViewport();
grid.text('Annotation key',x=.5,y=unit(1,'npc') - unit(1,'lines'),just=c('center','top'))
popViewport();
}
if ( 'annot' %in% names(metal) && args[['showAnnot']] ) {
annotlabels <- levels(as.factor(metal$annot))
pch <- rep(args[['annotPch']],length=length(annotlabels));
key <- simpleKey(text=annotlabels);
key$points$pch=pch;
key$points$col="navy";
key$points$fill="lightskyblue";
keyGrob <- draw.key(key,draw=FALSE);
annotationBoxTop <- unit(0.95,'npc');
annotationBoxHeight <- unit(3,"lines") + grobHeight(keyGrob);
pushViewport(viewport(x=.90,y=annotationBoxTop,width=grobWidth(keyGrob),
height=annotationBoxHeight,just=c('right','top')));
pushViewport(viewport(y=unit(.75,'lines'),height = grobHeight(keyGrob),just=c('center','bottom')));
draw.key(key,draw=TRUE);
grid.rect();
popViewport();
grid.text('annotation key',x=.5,y=unit(1,'npc') - unit(1,'lines'),just=c('center','top'))
popViewport();
} else { if (args[['showAnnot']]) {
annotlabels <- c('no annotation','framestop','splice','nonsyn','coding','utr','tfbscons','mcs44placental');
pch <- args[['annotPch']];
annotlabels <- c(annotlabels[-1],annotlabels[1])
pch <- c(pch[-1],pch[1])
key <- simpleKey(text=annotlabels);
key$points$pch=pch;
key$points$col="navy";
key$points$fill="lightskyblue";
keyGrob <- draw.key(key,draw=FALSE);
annotationBoxTop <- unit(0.95,'npc');
annotationBoxHeight <- unit(3,"lines") + grobHeight(keyGrob);
pushViewport(viewport(x=.90,y=annotationBoxTop,width=grobWidth(keyGrob),
height=annotationBoxHeight,just=c('right','top')));
popViewport();
} }
breaks <- union(args[['ldCuts']],c(0,1));
breaks <- sort(unique(breaks));
nb <- length(breaks);
cols <- args[['ldColors']]
cols <- rep(cols, length=nb+2);
rl <- ribbonLegend(
breaks=breaks,
cols=cols[2:(1+nb)],
gp=gpar(cex=args[['legendSize']],col=args[['frameColor']],alpha=args[['frameAlapha']])
);
if ( args[['legend']] %in% c('left','right') ) {
annotlabels <- c('no annotation','framestop','splice','nonsyn','coding','utr','tfbscons','mcs44placental');
pch <- args[['annotPch']];
annotlabels <- c(annotlabels[-1],annotlabels[1])
pch <- c(pch[-1],pch[1])
key <- simpleKey(text=annotlabels);
key$points$pch=pch;
key$points$col="navy";
key$points$fill="lightskyblue";
keyGrob <- draw.key(key,draw=FALSE);
annotationBoxTop <- unit(0.95,'npc');
annotationBoxHeight <- unit(3,"lines") + grobHeight(keyGrob);
pushViewport(viewport(name='legendVpPage2',
x=unit(.9,'npc'),
y=annotationBoxTop - annotationBoxHeight - unit(2,'lines'),
just=c('right','top'),
width=unit(4,'char'),
height=unit(8,'lines')
));
grid.rect(gp=gpar(col='transparent',fill='white',alpha=args[['legendAlpha']]));
grid.rect(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
pushViewport(viewport(name='ribbonLegendPage2',
y=0,
just=c('center','bottom'),
width=unit(4,'char'),
height=unit(7,'lines')
))
grid.draw(rl);
upViewport(1);
pushViewport(viewport(name='LDTitlePage2',
clip="off",
width=unit(4,"char"),
y=unit(1,'npc') - unit(.25,'char'),
just=c('center','top'),
height=unit(1,'lines')
))
grid.text(args[['LDTitle']], gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
upViewport(1);
upViewport(1);
}
grid.text('Make more plots at http://csg.sph.umich.edu/locuszoom/', y=unit(1,'lines'), just=c('center','bottom'));
}
}
#############################################################
#
# process argument list, splitting the key=value pairs
#
argv <- function(){
args <- commandArgs(TRUE);
newl <- list()
for ( i in 1:length(args) ) {
keyval <- strsplit(args[[i]],"=")[[1]];
key <- keyval[1]; val <- keyval[2];
newl[[ key ]] <- val;
}
return(newl)
}
#################################################################################
# #
# MAIN PROGRAM BEGINS HERE #
# #
#################################################################################
flags <- list(flank=FALSE,reloaded=FALSE);
createdFiles <- list();
refSnpPos <- empty.data.frame();
#
# set program defaults -- may be overridden with command line arguments
#
default.args <- list(
theme = NULL, # select a theme (collection of settings) for plot
experimental = FALSE, # try some experimental features?
pquery = FALSE, # is pquery available?
format = "pdf", # file format (pdf or png or both)
recombTable = "results.recomb_rate", # Recomb Rate Table (for SQL)
clean=TRUE, # remove temp files?
build = "hg18", # build to use for position information
metal = "metal.tbl", # metal output file
alreadyTransformed=FALSE, # are metal p-values already -log10() -transformed?
pvalCol="P.value", # name for p-value column in metal file
posCol="pos", # name for positions column in metal file
markerCol="MarkerName", # name for MarkerName column in metal file
weightCol="Weight", # name for weights column in metal file
ymin=0, # min for p-value range (expanded to fit all p-vals if needed)
ymax=10, # max for p-value range (expanded to fit all p-vals if needed)
yat=NULL, # values for y-axis ticks
xat=NULL, # values for x-axis ticks
xnsmall=NULL, # number of digits after decimal point on x-axis labels
chr = NULL, # chromosome
start = NULL, # start of region (string, may include Mb, kb, etc.)
end = NULL, # end of region (string, may include Mb, kb, etc.)
flank = "300kb", # surround refsnp by this much
xlabPos = -3.0, # position of xaxis label (in lines relative to bottom panel)
ylabPos = -3.0, # position of yaxis label (in lines relative to left edge of panel)
ylab = "", # override default label for y-axis
recombPos = 3.0, # position of recomb label (in lines relative to right edge of panel)
axisSize = 1, # sclaing factor for axes
axisTextSize = 1, # sclaing factor for axis labels
axisTextColor = "gray30", # color of axis labels
requiredGene = NULL, # gene name (string)
refsnp = NULL, # snp name (string)
refsnpTextColor = "black", # color for ref snp label
refsnpTextSize = 1, # sclaing factor for text size
refsnpTextAlpha = 1, # alpha for ref snp label
refsnpLineColor = "transparent", # color for ref snp line (invisible by default)
refsnpLineAlpha = .5, # alpha for ref snp line
title = "", # title for plot
titleColor = "black", # color for title
thresh = 1, # only get pvalues <= thresh # this is now ignored.
width = 10, # width of pdf (inches)
height = 7, # height of pdf (inches)
leftMarginLines = 5, # margin (in lines) on left
rightMarginLines = 5, # margin (in lines) on right
unit=1000000, # bp per unit displayed in plot
ldTable = "results.ld_point6", # LD Table (for SQL)
annot=NULL, # file for annotation
showAnnot=TRUE, # show annotation for each snp?
showGenes=TRUE, # show genes?
annotCol='annotation', # column to use for annotation, if it exists
annotPch='24,24,25,22,22,8,7,21,1', # plot symbols for annotation
annotOrder=NULL, # ordering of annotation classes
showRefsnpAnnot=TRUE, # show annotation for reference snp too?
bigDiamond=FALSE, # put big diamond around refsnp?
ld=NULL, # file for LD information
ldCuts = "0,.2,.4,.6,.8,1", # cut points for LD coloring
ldColors = "gray50,navy,lightskyblue,green,orange,red,purple3", # colors for LD
ldCol='rsquare', # name for LD column
LDTitle=NULL, # title for LD legend
smallDot = .4, # smallest p-value cex
largeDot = .8, # largest p-value cex
refDot = NULL, # largest p-value cex
rfrows = '4', # max number of rows for reflat genes
warnMissingGenes = FALSE, # should we warn about missing genese on the plot?
showPartialGenes = TRUE, # should genes that don't fit completely be displayed?
shiftGeneNames = TRUE, # should genes that don't fit completely be displayed?
geneFontSize = .8, # size for gene names
geneColor = "navy", # color for genes
snpset = "Affy500,Illu318,HapMap", # SNP sets to show
snpsetFile = NULL, # use this file for SNPset data (instead of pquery)
rugColor = "gray30", # color for snpset rugs
rugAlpha = 1, # alpha for snpset rugs
metalRug = NULL, # if not null, use as label for rug of metal positions
refFlat = NULL, # use this file with refFlat info (instead of pquery)
showIso=FALSE, # show each isoform of gene separately
showRecomb = TRUE, # show recombination rate?
recomb=NULL, # rcombination rate file
recombAxisColor=NULL, # color for reccomb rate axis labeing
recombAxisAlpha=NULL, # color for reccomb rate axis labeing
recombColor='blue', # color for reccomb rate on plot
recombOver = FALSE, # overlay recombination rate? (else underlay it)
recombFill = FALSE, # fill recombination rate? (else line only)
recombFillAlpha=0.2, # recomb fill alpha
recombLineAlpha=0.8, # recomb line/text alpha
frameColor='gray30', # frame color for plots
frameAlpha=1, # frame alpha for plots
legendSize=.8, # scaling factor of legend
legendAlpha=1, # transparency of legend background
legendMissing=TRUE, # show 'missing' as category in legend?
legend='auto', # legend? (auto, left, right, or none)
hiStart=0, # start of hilite region
hiEnd=0, # end of hilite region
hiColor="blue", # hilite color
hiAlpha=0.1, # hilite alpha
clobber=TRUE, # overwrite files?
reload=NULL, # .Rdata file to reload data from
prelude=NULL, # code to execute after data is read but before plot is made (allows data modification)
postlude=NULL, # code to execute after plot is made (allows annotation)
prefix=NULL, # prefix for output files
dryRun=FALSE # show a list of the arguments and then halt
)
### default data
refSnpPos <- data.frame()
recrate.default <- data.frame(chr=NA, pos=NA, recomb=NA, chr=NA, pos=NA)[c(),,drop=FALSE]
rug.default <- data.frame(snp=NA, chr=NA, pos=NA, snp_set=NA)[c(),,drop=FALSE]
annot.default <- data.frame(snp=NA,annot_rank=NA) # [c(),,drop=FALSE]
ld.default <- data.frame(snp1='rs0000', snp2='rs0001', build=NA,
chr=0, pos1=0, pos2=2, midpoint=1, distance=2,
rsquare=0, dprime=0, r2dp=0) # [c(),,drop=FALSE]
refFlatRaw.default <- data.frame(geneName=NA, name=NA, chrom=NA, strand=NA, txStart=NA, txEnd=NA,
cdsStart=NA, cdsEnd=NA, exonCount=NA, exonStarts=NA, exonEnds=NA, status=NA)[c(),,drop=FALSE]
#
# read and process command line arguments
#
user.args <- ConformList(argv(),names(default.args),message=TRUE)
default.args <- ProcessThemes(default.args,user.args[['theme']])
args <- ModifyList(default.args,user.args);
userFile <- list(
recomb = !is.null(args[['recomb']]),
snpsetFile = !is.null(args[['snpsetFile']]),
refFlat = !is.null(args[['refFlat']]),
ld = !is.null(args[['ld']]),
annot = !is.null(args[['annot']])
);
args <- MatchIfNull(args,'recombAxisAlpha','recombLineAlpha')
args <- MatchIfNull(args,'recombAxisColor','recombColor')
args <- AdjustModesOfArgs(args);
if ( args[['pquery']] ){
GetData <- GetDataFromFileOrCommand
} else {
GetData <- GetDataFromFileIgnoreCommand
}
args[['showRefsnpAnnot']] <- args[['showAnnot']] & args[['showRefsnpAnnot']];
args[['refsnpColor']] <- args[['ldColors']][length(args[['ldColors']])];
if ( args[['dryRun']] ) {
message("Argument list:");
message(paste("\t",names(args),'=', args, "\n"));
q();
}
#
# read metal data or reload all.
#
if ( is.null(args[['reload']]) ) {
if ( file.exists( args[['metal']]) ) {
metal <- read.file(args[['metal']]);
} else {
stop(paste('No such file: ', args[['metal']]));
}
} else {
if ( file.exists(args[['reload']]) ) {
load( args[['reload']] );
flags[['reloaded']] <- TRUE;
} else {
stop(paste("Stopping: Can't reload from", args[['reload']]));
}
}
#
# column renaming in metal data.frame
#
if ( char2Rname(args[['pvalCol']]) %in% names(metal) ) {
metal$P.value <- metal[ ,char2Rname(args[['pvalCol']]) ];
} else {
stop(paste('No column named',args[['pvalCol']]));
}
transformation <- SetTransformation( min(metal$P.value,na.rm=TRUE), max(metal$P.value,na.rm=TRUE),
args[['alreadyTransformed']] );
args[['LDTitle']] <- SetLDTitle( args[['ldCol']],args[['LDTitle']] )
if ( args[['posCol']] %in% names(metal) ) {
metal$pos <- metal[ ,args[['posCol']] ];
} else {
stop(paste('No column named',args[['posCol']]));
}
if ( char2Rname(args[['markerCol']]) %in% names(metal) ) {
metal$MarkerName <- metal[ ,char2Rname(args[['markerCol']]) ];
} else {
stop(paste('No column named',args[['markerCol']]));
}
#
# if no region and no refsnp specified, choose best snp and range of data set:
#
if ( (is.null(args[['start']]) || is.null(args[['end']]) || is.null(args[['chr']]) ) && ( is.null(args[['refsnp']]) ) )
{
args[['start']] <- min(metal$pos);
args[['end']] <- max(metal$pos);
args[['chr']] <- min(metal$chr);
args[['refsnp']] <- as.character( metal$MarkerName[ order(metal$P.value)[1] ] );
args <- ModifyList(list(prefix=paste('chr',
args[['chr']],"_",args[['start']],"-",args[['end']],sep='')),
args);
args <- ModifyList(list(prefix='foo'),args);
flags[['flank']] <- FALSE;
# if region but not refsnp, choose best snp as refsnp
} else if ( !is.null(args[['start']]) && !is.null(args[['end']]) && !is.null(args[['chr']]) && is.null(args[['refsnp']] ) )
{
args <- ModifyList(
list( refsnp = as.character( metal$MarkerName[ order(metal$P.value)[1] ] ) ),
args
);
flags[['flank']] <- FALSE;
# if refsnp specifed but no region, select region flanking refsnp
} else if ( ( is.null(args[['start']]) || is.null(args[['end']]) || is.null(args[['chr']]) ) && (!is.null(args[['refsnp']]) ) )
{
args <- ModifyList( args, list( flankBP=pos2bp(args[['flank']]) ) );
refSnpPosFile <- paste(args[['refsnp']],"_pos.tbl",sep="");
command <- paste("pquery snp_pos",
" -defaults",
" -sql",
" Snp=", args[["refsnp"]],
" Build=",args[["build"]],
sep="");
if ( is.null(refSnpPos) ) { args[['showRug']] = FALSE }
refSnpPos <- GetData( refSnpPosFile, default=refSnpPos.default, command=command, clobber=TRUE);
args[['refSnpPos']] <- as.character(refSnpPos$chrpos[1]);
args[['refSnpBP']] <- pos2bp(refSnpPos$chrpos[1]);
args <- ModifyList( args, list( start=args[['refSnpBP']] - args[['flankBP']] ) ) ;
args <- ModifyList( args, list( end=args[['refSnpBP']] + args[['flankBP']] ) );
args <- ModifyList( args, list( chr=refSnpPos$chr[1] ) );
flags[['flank']] <- TRUE;
# else refsnp and region specified
} else {
flags[['flank']] <- FALSE;
}
# change refsnp to "none" if it was null, else leave as is
args <- ModifyList( list( refsnp = "none"), args);
args <- ModifyList( args, list( start=as.character(args[['start']]) ) );
args <- ModifyList( args, list( end=as.character(args[['end']]) ) );
# prefix
if (flags[['flank']]) {
args <- ModifyList(
list( prefix = paste( # #1
args[['refsnp']],
"_", args[['flank']],
sep="")
),
args
);
} else {
args <- ModifyList(
list( prefix = paste( # #2
"chr", args[['chr']],
"_", args[['start']],
"-", args[['end']],
sep="")
),
args
);
}
#log
args <- ModifyList(
list( log = paste(args[['prefix']], ".log", sep="") ),
args
);
#recomb
args <- ModifyList(
list( recomb = paste(args[['prefix']], "_recomb", ".tbl", sep="") ),
args
);
# annot
args <- ModifyList(
list( annot = paste(args[['prefix']], "_annot", ".tbl", sep="") ),
args
);
# ld
args <- ModifyList(
list( ld = paste(args[['prefix']], "_ld", ".tbl", sep="") ),
args
);
# snpsets
args <- ModifyList(
list( snpsetFile = paste(args[['prefix']], "_snpsets", ".tbl", sep="") ),
args
);
# pdf
args <- ModifyList(
list( pdf = paste(args[['prefix']], ".pdf", sep="") ),
args
);
args <- ModifyList(
list( png = paste(args[['prefix']], ".png", sep="") ),
args
);
args <- ModifyList(
list( tiff = paste(args[['prefix']], ".tiff", sep="") ),
args
);
# rdata
args <- ModifyList(
list( rdata = paste(args[['prefix']], ".Rdata", sep="") ),
args
);
# refFlat
args <- ModifyList(
list( refFlat = paste(args[['prefix']], "_refFlat.txt", sep="") ),
args
);
args <- ModifyList(args, list( startBP=pos2bp(args[['start']]), endBP=pos2bp(args[['end']]) ));
args <- ModifyList(args, list( hiStartBP=pos2bp(args[['hiStart']]), hiEndBP=pos2bp(args[['hiEnd']]) ));
#######################################################
#
# now read other (non-metal) data
#
sink(args[['log']]);
if ( is.null(args[['reload']]) ) {
# recombination rate
command <- paste("pquery recomb_in_region",
" -defaults",
" -sql",
" RecombTable=", args[["recombTable"]],
" Chr=",args[["chr"]],
" Start=",args[["start"]],
" End=",args[["end"]],
sep="");
if ( is.null(args[['recomb']]) && ! args[['pquery']] ) { args[['showRecomb']] <- FALSE }
tryCatch(
recrate <- GetData( args[['recomb']], default=recrate.default,
command=command, clobber=!userFile[['recomb']] || args[['clobber']] ),
error = function(e) { warning(e) }
)
if ( prod(dim(recrate)) == 0 ) { args[['showRecomb']] <- FALSE }
cat("\n\n");
# snpset positions
command <- paste("pquery snpset_in_region",
" -defaults",
" -sql",
' "SnpSet=',args[["snpset"]],'"',
" Chr=",args[["chr"]],
" ChrStart=",args[["start"]],
" ChrEnd=",args[["end"]],
sep="");
rug <- GetData( args[['snpsetFile']], default=rug.default, command=command,
clobber=!userFile[['snpsetFile']] || args[['clobber']] );
cat("\n\nsnpset summary:\n");
print(summary(rug));
cat("\n\n");
# annotation
if ( char2Rname(args[['annotCol']]) %in% names(metal) ) {
if (is.null(args[['annotOrder']])) {
args[['annotOrder']] <-
sort( unique( metal[,char2Rname(args[['annotCol']])] ) );
}
metal$annot <- MakeFactor(metal[,char2Rname(args[['annotCol']]) ], levels=args[['annotOrder']],
na.level='none')
pchVals <- rep(args[['annotPch']], length=length(levels(metal$annot)));
metal$pch <- pchVals[ as.numeric(metal$annot) ]
annot <- metal$annot
}
cat("\nR-DEBUG: Loading annotation data...\n");
if( args[['showAnnot']] && ! 'pch' %in% names(metal) ) {
command <- paste("pquery snp_annot_in_region",
" -defaults",
" -sql",
" Chr=",args[["chr"]],
" Start=",args[["startBP"]],
" End=",args[["endBP"]],
sep="");
if ( is.null(args[['annot']]) && !args[['pquery']] ) { args[['showAnnot']] <- FALSE }
annot <- GetData( args[['annot']], annot.default, command=command,
clobber=!userFile[['annot']] || args[['clobber']] )
if (prod(dim(annot)) == 0) { args[['showAnnot']] <- FALSE }
cat("\nR-DEBUG: Merging in annotation data...");
metal <- merge(metal, annot,
by.x='MarkerName', by.y="snp",
all.x=TRUE, all.y=FALSE);
cat(" Done.\n");
print(head(metal));
metal$annot <-
c('no annotation','framestop','splice','nonsyn','coding','utr','tfbscons','mcs44placental')[1+metal$annot_rank];
if ( is.null(args[['annotOrder']]) ) {
args[['annotOrder']] <-
c('framestop','splice','nonsyn','coding','utr','tfbscons','mcs44placental','no annotation')
}
metal$annot <- MakeFactor(metal$annot, levels=args[['annotOrder']],na.level='none')
pchVals <- rep(args[['annotPch']], length=length(levels(metal$annot)));
metal$pch <- pchVals[ as.numeric(metal$annot) ]
} else {
if (! 'pch' %in% names(metal)) {
metal$pch <- 21;
}
if (! 'annot' %in% names(metal) ) {
metal$annot <- "none"
metal$annot <- factor(metal$annot)
}
annot <- data.frame();
}
if (FALSE) { # scraps from above
cat('else: ');
pchVals <- rep(args[['annotPch']], length=length(levels(metal$annot)));
metal$pch <- pchVals[ as.numeric(metal$annot) ]
annot <- metal$annot
print(xtabs(~annot+pch,metal));
print(metal[1:4,])
}
sink('annotationTally.txt')
print( args[['annotOrder']] )
print(args[['annotPch']])
print(args[['annotOrder']])
print(table(metal$annot))
print(table(metal$pch))
print(xtabs(~annot+pch,metal))
sink()
# ld
command <- paste("pquery ld_in_region",
" -defaults",
" -sql",
" LDTable=", args[["ldTable"]],
" Chr=",args[["chr"]],
" Start=",args[["startBP"]],
" End=",args[["endBP"]],
sep="");
if ( is.null(args[['ld']]) && ! args[['pquery']] ) { args[['legend']] = 'none' }
ld <- GetData( args[['ld']], ld.default, command=command,
clobber=!userFile[['ld']] || args[['clobber']] )
cat("\n\n");
if (! is.null(args[['metalRug']]) ) {
metalRug <- data.frame(pos=metal$pos, snp_set=args[['metalRug']]);
origRug <- data.frame(pos=rug$pos,snp_set=rug$snp_set)
rug <- rbind(origRug,metalRug)
print(levels(rug))
}
save(metal,annot,recrate,ld,args,rug,file='loaded.Rdata');
if ( prod(dim(metal) ) < 1) { stop("No data read.\n"); }
# subset the data
s <- metal$pos >= args[['startBP']] &
metal$pos <= args[['endBP']] &
metal$chr == args[['chr']] ;
# & metal$P.value <= args[['thresh']];
metal <- subset(metal, s);
# merge LD info into metal data frame
refSnp <- as.character(args[['refsnp']]);
metal$group <- 1;
metal$LD <- NA;
metal$ldcut <- NA;
metal$group[metal$MarkerName == refSnp] <- length(args[['ldColors']]);
if (! is.null(ld)) {
# subset ld for reference SNP
snpCols <- which(apply(ld,2,Sniff,type="snp"))
if (length(snpCols) != 2) {
warning(paste("LD file doesn't smell right. (",
length(snpCols)," SNP cols)",sep=""))
assign("warningMessages",
c(warningMessages,"LD file doesn't smell right."),
globalenv());
break;
}
w1 <- which ( ld[,snpCols[1]] == refSnp );
w2 <- which ( ld[,snpCols[2]] == refSnp );
c1 <- c(names(ld)[snpCols[1]],names(ld)[snpCols[2]],args[['ldCol']]); # "rsquare","dprime");
c2 <- c(names(ld)[snpCols[2]],names(ld)[snpCols[1]],args[['ldCol']]); # "rsquare","dprime");
ld1 <- ld[ w1, c1, drop=FALSE ]
ld2 <- ld[ w2, c2, drop=FALSE ]
names(ld1)[1:2] <- c("refSNP","otherSNP")
names(ld2)[1:2] <- c("refSNP","otherSNP")
lld <- rbind( ld1, ld2);
if (prod(dim(lld)) > 0) {
metal <- merge(metal, lld,
by.x='MarkerName', by.y="otherSNP",
all.x=TRUE, all.y=FALSE);
if ( args[['ldCol']] %in% names(metal) ) {
metal$LD <- metal[ ,args[['ldCol']] ];
} else {
stop(paste('No column named',args[['ldCol']]));
}
metal$ldcut <- cut(metal$LD,breaks=args[['ldCuts']],include.lowest=TRUE);
metal$group <- 1 + as.numeric(metal$ldcut);
metal$group[is.na(metal$group)] <- 1;
metal$group[metal$MarkerName == refSnp] <- length(args[['ldColors']])
} else {
assign("warningMessages",c(warningMessages,'No usable LD information for reference SNP.'), globalenv());
warning("No usable LD information.");
args[['legend']] <- 'none';
}
}
save(metal,refSnp,args,file='temp.Rdata');
command <- paste("pquery refFlat_in_region",
" -defaults",
" -sql",
" Chrom=", chr2chrom(args[["chr"]]),
" Start=",args[["start"]],
" End=",args[["end"]],
" Build=",args[["build"]],
sep="");
if (is.null(args[['refFlat']]) && ! args[['pquery']]) { args[['showGenes']] <- FALSE }
refFlatRaw <- GetData( args[['refFlat']], refFlatRaw.default, command=command,
clobber = !userFile[['refFlat']] || args[['clobber']] );
summary(refFlatRaw);
# subset the refFlatdata
s <- refFlatRaw$txEnd >= args[['startBP']] &
refFlatRaw$txStart <= args[['endBP']] &
refFlatRaw$chrom == chr2chrom(args[['chr']]) ;
refFlatRaw <- subset(refFlatRaw, s);
save(refFlatRaw,args,file="refFlatRaw.Rdata");
flatten.bed(refFlatRaw,multiplier=1/args[['unit']]) -> refFlat;
summary(refFlat);
# adjust for position units
metal$pos <- metal$pos / args[['unit']];
recrate$pos <- recrate$pos / args[['unit']];
rug$pos <- rug$pos / args[['unit']];
cat("recrate summary:\n");
print(summary(recrate));
cat("\n\n");
cat("LD summary:\n");
print(summary(ld));
cat("\n\n");
cat("metal summary:\n");
print(summary(metal));
cat("\n\n");
save(metal,annot,recrate,refFlatRaw,refFlat,rug,file=args[['rdata']]);
} else {
load(args[['rdata']]);
}
if (is.character(args[['prelude']]) && file.exists(args[['prelude']])) {
source(args[['prelude']]);
}
if ( prod(dim(rug)) == 0 || !("snp_set" %in% names(rug)) ) {
nrugs <- 0;
} else {
nrugs <- length(levels(rug$snp_set));
}
xRange <- range(metal$pos,na.rm=T);
xRange <- as.numeric(c(args[['start']],args[['end']])) / args[['unit']];
refFlat <- refFlat[ which( (refFlat$start <= xRange[2]) & (refFlat$stop >= xRange[1]) ), ]
yRange <- c(min(c(args[['ymin']],transformation(metal$P.value),na.rm=T)),
max(c(args[['ymax']],transformation(metal$P.value)*1.1),na.rm=T));
recrateRange <- c(0,max(c(100,recrate$recomb),na.rm=T));
if (args[['experimental']]) {
recrate$recomb <- max(c(100,recrate$recomb),na.rm=T) - recrate$recomb;
recrateRange <- c(0,max(c(100,recrate$recomb),na.rm=T));
}
recrateRange <- rev(recrateRange);
print("recrateRange: ");
print(recrateRange);
refSnp <- as.character(args[['refsnp']]);
refidx <- match(refSnp, metal$MarkerName);
if (!args[['showRefsnpAnnot']]) {
metal$pch[refidx] <- 23; # use a diamond for ref snp
}
if ('pdf' %in% args[['format']]) {
pdf(file=args[['pdf']],width=args[['width']],height=args[['height']],version='1.4');
if ( prod(dim(metal)) == 0 ) {
message ('No data to plot.');
} else {
zplot(metal,ld,recrate,refidx,nrugs=nrugs,args=args,postlude=args[['postlude']]);
grid.newpage();
}
grid.log(args,metal);
dev.off();
}
#
# N.B. *** old png and tiff code no longer being maintained. No guarantees that this works anymore. ***
#
if ('png' %in% args[['format']]) {
args[['recombLineAlpha']] = 1;
args[['recombFillAlpha']] = 1;
args[['hiliteAlpha']] = 1;
args[['frameAlpha']]=1;
args[['hiAlpha']]=1;
args[['rugAlpha']] = 1;
args[['refsnpLineAlpha']] = 1;
args[['refsnpTextAlpha']]=1;
png(file=args[['png']],
width=args[['width']]*100,
height=args[['height']]*100);
if ( prod(dim(metal)) == 0 ) {
message ('No data to plot.');
} else {
assign("args",args,globalenv());
zplot(metal,ld,recrate,refidx,nrugs=nrugs,args=args,postlude=args[['postlude']]);
}
dev.off();
}
#
# N.B. *** old png and tiff code no longer being maintained. No guarantees that this works anymore. ***
#
if ('tiff' %in% args[['format']]) {
args[['recombLineAlpha']] = 1;
args[['recombFillAlpha']] = 1;
args[['hiliteAlpha']] = 1;
args[['frameAlpha']]=1;
args[['hiAlpha']]=1;
args[['rugAlpha']] = 1;
args[['refsnpLineAlpha']] = 1;
args[['refsnpTextAlpha']]=1;
tiff(file=args[['tiff']],
width=args[['width']]*100,
height=args[['height']]*100);
if ( prod(dim(metal)) == 0 ) {
message ('No data to plot.');
} else {
assign("args",args,globalenv());
zplot(metal,ld,recrate,refidx,nrugs=nrugs,args=args,postlude=args[['postlude']]);
}
dev.off();
}
sink(args[['log']], append=TRUE);
grid.log(args,metal,ascii=TRUE);
cat('\n\n\n');
cat("List of genes in region\n");
cat("#######################\n");
geneList <- make.gene.list(refFlat,unit=args[['unit']]);
if (! is.null(geneList)) {
digits <- 7 + ceiling(log10(max(geneList$stop)));
print(geneList,digits=digits);
}
cat('\n\n\n');
sink();
save(metal,refFlat,ld,recrate,refSnpPos,args,file='end.Rdata')
CleanUp(args,refSnpPos,recrate,rug,ld,refFlatRaw);
date();
| /locuszoom.R | no_license | freestatman/LocusZoom | R | false | false | 73,588 | r | #! /usr/bin/Rscript --vanilla
# --default-packages=utils,stats,lattice,grid,getopts
# need to check if the line above works on the web deployment machine.
# Copyright 2010 Randall Pruim, Ryan Welch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
require(stats);
require(utils);
require(grid);
require(lattice);
omittedGenes <- character(0); # will be set in gobalenv()
warningMessages <- character(0); # will be set in gobalenv()
################################################################################################
# function definitions
################################################################################################
################################################################################
#
# takes string and converts '' and 'null' (case insensitive) to NULL, else unchanged.
#
as.filename <- function(x) {
if (! is.character(x) || toupper(x) == toupper('null') || x == '') {
return(NULL)
} else {
return(x)
}
}
################################################################################
#
# modify column names the way R does
#
char2Rname <- function(x) {
x <- gsub('-','.',x);
x <- gsub('=','.',x);
x <- gsub('\ ','.',x);
return(x)
}
################################################################################
#
# build a factor
#
MakeFactor <- function(x,levels,na.level=NA) {
f <- factor(x, levels=levels)
if (any( is.na(f))){
levels(f) <- c(levels(f),na.level)
f[ is.na(f) ] <- na.level
}
return(f)
}
################################################################################
#
# pretty print scientific notation from log10(x)
#
log2sci <- function(x) {
# write x as 10^(e) * 10^r where e is an integer and r is in [0,1)
e <- floor(x)
r <- x - e
m <- 10^(r)
return(paste(format(m,digits=3),"E",e,sep=""));
}
################################################################################
#
# Sniff a vector to see if it smells
#
Sniff <- function(vec,type=c('snp'),n=10) {
n <- min(n,length(vec))
type <- match.arg(type)
if (type == 'snp') {
yes <- union(union(
grep('rs',vec[1:n]),
grep('chr[[:digit:]]+:[[:digit:]]',vec[1:n])),
grep('chr[[:alpha:]]+:[[:digit:]]',vec[1:n])
)
if ( length(yes) == n ) return (TRUE)
return(FALSE)
}
return(FALSE)
}
################################################################################
#
# Which side should legend go on?
#
AutoLegendSide <- function(pval,pos,posRange = range(pos)) {
M <- .5 * max(pval);
left <- min(pos[pval > M]);
right <- max(pos[pval > M]);
mid <- mean(posRange);
if ( (mid - left) > (right - mid) ) {
return ('right');
}
return('left');
}
################################################################################
#
# choose a transformation for p-values
#
SetTransformation <- function(mn,mx,alreadyTransformed=FALSE) {
if (alreadyTransformed) { return ( function(x){x} ) }
if (mx > 1) { return( function(x) {x} ) } # assume -log10 transform has already been applied
if (mn < 0) { return( function(x) {-x} ) } # assume log10 transform has already been applied
return ( function(x) {-log10(x)} )
}
################################################################################
#
# Set titles correctly for D' and r^2
#
SetLDTitle <- function(col,title) {
if ( col == 'dprime' & is.null(title) ) { return ("D'"); }
if ( col == 'rsquare' & is.null(title) ) { return (expression(r^2)); }
if (is.null(title)) { return (""); }
return(title);
}
################################################################################
#
# extends default modifyList so that it handles NULL values in list differently.
#
ModifyList <- function (x, val, replaceWithNull = FALSE)
{
stopifnot(is.list(x), is.list(val));
xnames <- names(x);
for (v in names(val)) {
if (v %in% xnames && is.list(x[[v]]) && is.list(val[[v]])) {
x[[v]] <- ModifyList(x[[v]], val[[v]], replaceWithNull=replaceWithNull);
} else {
if (!is.null(val[[v]]) || replaceWithNull) {
x[[v]] <- val[[v]];
}
}
}
return(x);
}
################################################################################
#
# like lapply but on a subset of a list; rest of list unchanged.
#
sublapply <- function(x,names=names(x),fun) {
fun <- as.function(fun);
for (name in names) {
if (! is.null(x[[name]] ) ) {
x[[name]] <- fun(x[[name]]);
}
}
return(x);
}
################################################################################
#
# like modifyList, but works when names of val are unique prefixes of names of x
#
ConformList <- function(x,names,case.sensitive=FALSE,message=FALSE) {
own.ind <- 0;
for (name in names(x)) {
own.ind <- own.ind + 1;
if (case.sensitive) {
match.ind <- pmatch( name, names );
} else {
match.ind <- pmatch( toupper(name), toupper(names) );
}
if (! is.na(match.ind)) {
names(x)[own.ind] <- names[match.ind];
} else {
if (! is.null(message) ) { message(paste("No unique match for ",name,"=",x[[own.ind]],sep="")); }
}
}
return(x);
}
################################################################################
#
# like modifyList, but works when names of val are unique prefixes of names of x
#
PModifyList <- function(x,val,...) {
ModifyList(x,ConformList(val,names(x),...));
}
################################################################################
#
# Modify the list args according to the value of theme
#
ProcessOneTheme <- function(args,theme) {
if ( is.character(theme) ){
theme=paste(theme,'.theme',sep='')
return( do.call(theme,list(args=args)) )
}
return(args)
}
################################################################################
#
# process a list of themes in order
#
ProcessThemes <- function(args,themeString) {
if (!is.character(themeString)) { return(args) }
for (theme in unlist(strsplit(themeString,",")) ){
args <- ProcessOneTheme(args,theme)
}
return(args)
}
################################################################################
#
# Some themes
#
ryan.theme <- function(args) {
argUpdates <- list(
snpset=NULL,
format="pdf",
refDot=NULL,
geneFontSize=1.1,
refsnpTextSize=1.5,
axisTextSize=1.45,
legendSize=1,
legendFrameAlpha=0,
legendAlpha=0,
axisSize=1.45,
recombPos=3,
xlabPos=-2.75,
height=9,
rfrows=4
)
return(ModifyList(args,argUpdates));
}
publication.theme <- ryan.theme;
pub.theme <- ryan.theme;
black.theme <- function(args) {
argUpdates <- list(
axisTextColor='black',
rugColor='black',
frameColor='black'
)
return(ModifyList(args,argUpdates));
}
giant.theme <- function(args) {
argUpdates <- list(
rfrows=10,
recombOver=TRUE,
recombAxisColor='black',
recombAxisAlpha=1,
legend='auto',
showAnnot=TRUE,
showRefsnpAnnot=FALSE,
annotPch='25,21,21,21,21,21,24,24,24',
recombColor='cyan',
ldColors='gray50,blue,green,yellow,orange,red,purple3'
)
args <- ryan.theme(args);
args <- black.theme(args);
args <- ModifyList(args,argUpdates);
return(args);
}
#############################################################
#
# Remove temporary files (used in final clean-up)
#
RemoveTempFiles <- function (...) {
l <- list(...);
removedFiles <- list();
if (length(l) < 1) { return(removedFiles); }
method <- unlist(lapply(l,function(x) { attr(x,"method") }));
file <- unlist(lapply(l,function(x) { attr(x,"file") }));
for (i in 1:length(method)) {
if (method[i] == 'pquery') { file.remove( file[i] ); }
removedFiles <- c(removedFiles, file[i]);
}
return(removedFiles);
}
#############################################################
#
# Cleaning up at the end
#
CleanUp <- function(args,...) {
if (args[['clean']]) {
message("\nCleaning up. The following files are begin removed: ");
files <- RemoveTempFiles(...);
message(paste("\t",files,"\n"));
invisible(files);
}
}
#############################################################
#
# Obtain data. Data can be specified using a file or
# a pquery command. Pquery is a tool that simplifies
# querying SQL databases. Most users will simply pass in files
# Much of this is handled by the Python wrapper anyway.
#
GetDataFromFileOrCommand <- function(file, command, default=data.frame(), clobber=FALSE, verbose=TRUE,...) {
method <- "file";
if ( !file.exists(file) || clobber ) {
command <- paste(command,">",file);
if (verbose) { cat(paste("Getting data with",command,sep="\n")); }
system(command);
method <- 'pquery';
if (! clobber) {
assign("warningMessages",c(warningMessages,paste("Missing file:",file)), globalenv());
}
}
results <- read.file(file,...) ;
attr(results, "file") <- file;
attr(results, "command") <- command;
attr(results, "method") <- method;
return(results);
}
#############################################################
#
# Obtain data. Data can be specified using a file. When
# pquery is not available, this version will ignore the command
# and return default data if the file is missing.
#
GetDataFromFileIgnoreCommand <- function(file, command, default=data.frame(), clobber=FALSE, verbose=TRUE,...) {
method <- "file";
if (is.null(file)) {
return(default)
}
if ( !file.exists(file) ) {
# warning(paste("Missing file:",file))
return(default)
}
results <- read.file(file,...) ;
attr(results, "file") <- file;
attr(results, "method") <- method;
return(results);
}
#############################################################
#
# return an empty data from with some additonal attributes
#
empty.data.frame <- function(
file="none",command="none", method="empty.data.frame") {
result <- data.frame();
attr(result, "file") <- file;
attr(result, "command") <- command;
attr(result, "method") <- method;
}
#############################################################
#
# This is used to clone values from user specified arguemnts
# to other arguments that the user did not set.
#
MatchIfNull <- function(args,toupdate,updatewith) {
if ( is.null(args[[toupdate]]) ) {
args[[toupdate]] <- args[[updatewith]]
}
return(args)
}
#############################################################
#
# All arguments are passed in with mode character (i.e., as strings)
# This functions converts arguments to the correct mode for
# internal use.
#
AdjustModesOfArgs <- function(args) {
args <- sublapply(args,
c('legendAlpha', 'width','height',
'frameAlpha','hiAlpha','rugAlpha',
'refsnpLineAlpha', 'recombFillAlpha','recombLineAlpha', 'refsnpTextAlpha',
'ymin','ymax','legendSize','refsnpTextSize','axisSize','axisTextSize','geneFontSize','smallDot',
'largeDot','refDot'),
as.numeric);
args <- sublapply(args,
c('metal','recomb','ld','refSnpPosFile','snpsetFile','annot','refFlat'),
as.filename);
args <- sublapply(args,
c('chr','unit','xnsmall'),
as.integer);
args <- sublapply(args,
c('experimental','clobber','recombOver','recombFill','pquery',
'showRecomb','showAnnot','showRefsnpAnnot','bigDiamond','showPartialGenes','shiftGeneNames',
'clean', 'dryRun','legendMissing'),
as.logical);
args <- sublapply( args,
c('ldCuts','xat','yat','annotPch'),
function(x) { as.numeric(unlist(strsplit(x,","))) } );
args <- sublapply( args,
c('rfrows'),
function(x) { as.integer(unlist(strsplit(x,","))) } );
args <- sublapply( args,
c('ldColors', 'format', 'annotOrder'),
function(x) { unlist(strsplit(x,",")) } );
return(args);
}
#############################################################
#
# Returns text description of unit along chromosome depending
# on value of unit where unit is a number of base pairs
#
unit2char <- function(unit) {
if (unit == 1000000) { return ("(Mb)"); }
if (unit == 1000) { return ("(Kb)"); }
return("");
}
#############################################################
#
# convert position that might include mb, kb into a base pair position
#
pos2bp <- function(pos) {
unit<-1;
posN <- as.character(pos);
if (regexpr("kb",posN,ignore.case=TRUE) > 0) {
unit <- 1000;
posN <- sub("kb","",posN, ignore.case=T);
}
if (regexpr("mb",posN,ignore.case=TRUE) > 0) {
unit <- 1000000;
posN <- sub("mb","",posN, ignore.case=T);
}
# message(paste('posN = ',posN, " unit = ", unit));
return( as.numeric(posN) * unit);
}
#############################################################
#
# read file, using filename to determine method.
#
read.file <- function(file,header=T,na.strings=c('NA','','.','na'),...) {
if (! file.exists(file) ) {
return(NULL);
message(paste("Missing file: ", file));
}
# if file ends .csv, then read.csv
if ( regexpr("\\.csv",file) > 0 ) {
return(read.csv(file,header=header,na.strings=na.strings,...));
}
# if file ends .Rdata, then load
if ( regexpr("\\.Rdata",file) > 0 ) {
varName <- load(file);
return(varName);
}
# default is read.table
return(read.table(file,header=header,na.strings=na.strings,...));
}
#############################################################
#
# write file, using filename to determine method.
#
write.file <- function(x, file, append=FALSE, clobber=TRUE, na='NA') {
if (file.exists(file) && ! clobber && !append ) {
return(NULL);
message(paste("File already exists: ", file));
}
# if file ends .csv, then write.csv
if ( regexpr("\\.csv",file) > 0 ) {
return(write.csv(x,file,append=append));
}
# if file ends .Rdata, then load
if ( regexpr("\\.Rdata",file) > 0 ) {
return(save.csv(x,file));
}
# default is read.table
return(write.table(x,file,append=append));
}
#############################################################
#
# Converter from chrom (e.g., chr13) to chr (e.g., 13) format
#
chrom2chr <- function (x) {
y <- substring(x,first=4);
y[y=='X'] = '23';
y[y=='Y'] = '24';
y[y=='mito'] = '25';
y[y=='XY'] = '26';
as.numeric(y);
}
#############################################################
#
# Converter from chr (e.g., 13) to chrom (e.g., chr13) format
#
chr2chrom <- function (x) {
if (x == 23 ) { return("chrX"); }
if (x == 24 ) { return("chrY"); }
if (x == 25 ) { return("mito"); }
if (x == 26 ) { return("chrXY"); }
return (paste("chr",as.numeric(x),sep=""));
}
#############################################################
#
# Linearly rescale values to fit in interval
# If all values are equal, then return a vector the same length as x
# with all values set to constant (by default the larger end of the interval).
#
rescale <- function(x, original=c(min(x),max(x)), transformed = c(0,1) ){
if ( length(transformed) != 2 || ! is.numeric(transformed) ||
length(original) != 2 || ! is.numeric(original) )
{ return (x); }
a <- original[1]; b <- original[2];
u <- transformed[1]; v <- transformed[2];
r <- v - (b-x)/(b-a) * (v-u);
r[r < u] <- u;
r[r > v] <- v;
return(r);
}
#############################################################
#
# Flatten information originally in UCSC bed format.
# Tailored to fit nominclature and formatting used in files
# generated by Peter Chines.
#
flatten.bed <- function(x,multiplier=.001) {
if (prod(dim(flatten.bed)) == 0) {
df <- data.frame(
chrom = c("chr0","chr0","chr0"),
chr = c(0,0,0),
start= c(0,0,0),
stop = c(2,2,2),
type = c(0,2,1),
name = c('none','none','none'),
nmName = c('none','none','none'),
strand = c('+','+','+')
);
return(df);
}
x$geneName <- as.character(x$geneName);
x$name <- as.character(x$name);
x$strand <- as.character(x$strand);
lx <- dim(x)[1];
blockStart <- unlist(lapply(
strsplit(as.character(x$exonStarts),split=','),
as.numeric));
blockEnd <- unlist(lapply(strsplit(as.character(x$exonEnds),split=',')
,
as.numeric));
blockSize = blockEnd - blockStart;
nameDup = rep(x$geneName,times=x$exonCount);
nmNameDup = rep(x$name,times=x$exonCount);
startDup = rep(x$txStart,times=x$exonCount);
stopDup = rep(x$txEnd,times=x$exonCount);
chromDup = rep(x$chrom,times=x$exonCount);
strandDup = rep(x$strand,times=x$exonCount);
# types:
# 0 = txStart to txEnd (transcription region)
# 1 = exonStart to exonEnd (exons)
# 2 = cdsStart to cdsEnd (coding region)
df <- data.frame(
chrom = c(x$chrom, x$chrom, chromDup),
chr = chrom2chr(c(x$chrom, x$chrom, chromDup)),
start= c(x$txStart, x$cdsStart, blockStart),
stop = c(x$txEnd, x$cdsEnd, blockEnd ),
type = c(rep(0,lx),rep(2,lx), rep(1,length(startDup))),
name = c(x$geneName, x$geneName, nameDup),
nmName = c(x$name, x$name, nameDup),
strand = c(x$strand, x$strand, strandDup)
);
df$start <- df$start * multiplier;
df$stop <- df$stop * multiplier;
invisible(df);
}
#############################################################
#
# display reference SNP name and vertical line
#
grid.refsnp <- function(name,pos) {
grid.text(as.character(name),x=unit(pos,"native"), y=unit(.95,'npc'), just=c("center","top"),
gp=gpar(cex=args[['refsnpTextSize']],col=args[['refsnpTextColor']],alpha=args[['refsnpTextAlpha']])
);
grid.segments(
x0=unit(pos,"native"),
x1=unit(pos,"native"),
y0=unit(0,"npc"),
y1=unit(1,'npc') - unit(1.5,"lines"),
gp=gpar(
col=args[['refsnpLineColor']],
lwd=2,
alpha=args[['refsnpLineAlpha']])
);
}
#############################################################
#
# calculte width of text
#
textWidth <- function(text="",gp=gpar()) {
return ( grobWidth(textGrob(text,gp=gp)) );
}
#############################################################
#
# generate text with arrow (or just compute width of same)
# this is a bit crude and clunky
#
arrowText <- function(text,x=unit(.5,'npc'), y=unit(.5,'npc'), direction='+',name=NULL,gp=gpar(),
check.overlap=TRUE, widthOnly=FALSE) {
tWidth <- textWidth(text,gp)
aWidth <- textWidth('xx,',gp)
if (widthOnly) { return( convertWidth(tWidth + aWidth),unitTo='inches',valueOnly=TRUE ) }
cWidth <- .1 * textWidth(',',gp)
if ( direction %in% c('+','forward','->','>','right') ) {
mult = 1
} else {
mult = -1
}
tg <- textGrob(text,
x=x - mult * .5 * aWidth, y=y,
check.overlap=check.overlap,
gp=gp,
name="label")
ag <- linesGrob(
x = unit.c( x - mult * .5 * aWidth + .5 * mult * tWidth + mult * unit(.005,'npc'),
x + mult * .5 * aWidth + .5 * mult * tWidth ) ,
y=unit.c(y,y),
name="arrow",
#gp=gp,
arrow=arrow(type='open',
angle=20,
length=.75 * textWidth('x')
)
)
rect1 <- rectGrob(x=x - .5 * mult * aWidth, y=y, width=tWidth, height=.1);
rect2 <- rectGrob(x=x + .50 * mult * tWidth, y=y, width=aWidth, height=.1,gp=gpar(col="red"));
result <- gTree(children=gList(tg,ag),name=name)
attr(result,'width') <- convertX(tWidth + aWidth,'inches')
attr(result,'twidth') <- convertX(grobWidth(tg),'inches')
attr(result,'awidth') <- convertX(grobWidth(ag),'inches')
attr(result,'cWidth') <- cWidth
attr(result,'tWidth') <- tWidth
attr(result,'aWidth') <- aWidth
return(result)
}
#############################################################
#
# hilite a particular region on the plot
#
panel.hilite <- function(range=c(lo,hi),lo,hi,col="transparent",fill="blue",alpha=.1){
grid.rect( x=unit(range[1],"native"),width=unit(range[2]-range[1],"native"),
hjust=0,
gp=gpar(fill=fill,col=col, alpha=alpha)
);
}
#############################################################
#
# ribbonLegend from RGraphics example
#
ribbonLegend <- function (nlevels = NULL, breaks = NULL, cols,
scale = range(breaks),
margin = unit(0.5, "lines"), gp = NULL, vp = NULL, name = NULL)
{
gTree(nlevels = nlevels, breaks = breaks, cols = cols, scale = scale,
children = ribbonKids(nlevels, breaks, cols, scale),
childrenvp = ribbonVps(nlevels, breaks, margin, scale),
gp = gp, vp = vp, name = name, cl = "ribbonLegend")
}
widthDetails.ribbonLegend <- function (x)
{
sum(layout.widths(viewport.layout(x$childrenvp[[1]])))
}
calcBreaks <- function (nlevels, breaks, scale)
{
if (is.null(breaks)) {
seq(min(scale), max(scale), diff(scale)/nlevels)
}
else {
breaks
}
}
ribbonVps <- function (nlevels, breaks, margin, scale)
{
breaks <- format(signif(calcBreaks(nlevels, breaks, scale),
3))
vpTree(viewport(name = "layout", layout = grid.layout(3,
4, widths = unit.c(margin, unit(1, "lines"), max(unit(0.8,
"lines") + stringWidth(breaks)), margin), heights = unit.c(margin,
unit(1, "null"), margin))), vpList(viewport(layout.pos.col = 2,
layout.pos.row = 2, yscale = scale, name = "ribbon"),
viewport(layout.pos.col = 3, layout.pos.row = 2, yscale = scale,
name = "labels")))
}
ribbonKids <- function (nlevels, breaks, cols, scale)
{
breaks <- calcBreaks(nlevels, breaks, scale)
nb <- length(breaks)
tickloc <- breaks[-c(1, nb)]
gList(rectGrob(y = unit(breaks[-1], "native"), height = unit(diff(breaks),
"native"), just = "top", gp = gpar(fill = cols), vp = vpPath("layout",
"ribbon")), segmentsGrob(x1 = unit(0.5, "lines"), y0 = unit(tickloc,
"native"), y1 = unit(tickloc, "native"), vp = vpPath("layout",
"labels")), textGrob(x = unit(0.8, "lines"), y = unit(tickloc,
"native"), just = "left", label = format(signif(tickloc,
3)), vp = vpPath("layout", "labels")))
}
#############################################################
#
# make a "list" of genes in flat. returns a data frame
#
make.gene.list <- function (flat, showIso=TRUE, subset, unit, ...)
{
if ( prod(dim(flat)) <= 0 ) { return(NULL); }
df <- flat;
if (!missing(subset)) { df <- df[subset, ] }
if ( prod(dim(flat)) <= 0 ) { return(NULL); }
if (args[['showIso']]) {
df$idnum <- match(df$nmName,unique(df$nmName));
} else {
df$idnum <- match(df$name,unique(df$name));
}
df0 <- df[df$type == 0, ];
df1 <- df[df$type == 1, ];
df2 <- df[df$type == 2, ];
if ( "col" %in% names(df0) ) {
col = df0$col
fill = df0$col
}
return( data.frame(id=df0$idnum, gene=df0$name, chrom=df0$chrom, start=df0$start, stop=df0$stop,
startbp=df0$start * unit, stopbp=df0$stop*unit ) );
}
#############################################################
#
# display genes taking data from flattened bed format
#
panel.flatbed <- function (x=NULL, y=NULL, flat, fill = "navy", col = "navy", alpha = 1, textcol='black',
multiplier = 0.001, height = 2/14, buffer=0.003, subset, cex=.9, rows=2, showPartialGenes=FALSE,
shiftGeneNames=TRUE,
computeOptimalRows=FALSE, ...)
{
if ( prod(dim(flat)) <= 0 ) { return(1); }
df <- flat;
if (!missing(subset)) { df <- df[subset, ] }
df$width <- df$stop - df$start;
if (args[['showIso']]) {
df$idnum <- match(df$nmName,unique(df$nmName));
} else {
df$idnum <- match(df$name,unique(df$name));
}
df0 <- df[df$type == 0, ];
df1 <- df[df$type == 1, ];
df2 <- df[df$type == 2, ]; # unused?
if ( "col" %in% names(df0) ) {
col = df0$col
fill = df0$col
}
# removed duplicate idnums from df0
df0 <- df0[order(df0$idnum),] # sort to make sure repeated ids are adjacent
df0$new <- c(1,diff(df0$idnum)) # identify new (1) vs. repeated (0)
df0 <- df0[order(df0$idnum),] # put back into original order
df0uniq <- df0[df0$new == 1,]
# determine the row to use
maxIdnum <- max(c(0,df$idnum))
rowUse <- rep(-Inf,1+maxIdnum)
id2row <- rep(0,1+maxIdnum) # keep track of locations for each gene
# conversion to 'native' isn't working, so we convert evertyhing via inches to npc below.
# conversion utility
native2npc <- function(x) {
w <- diff(current.viewport()$xscale)
a <- current.viewport()$xscale[1]
return( (x-a) / w )
}
for (i in 1:dim(df0uniq)[1]) {
cat(paste(i,": ",df0uniq$name[i],"\n"));
leftGraphic <- native2npc(min(df$start[df$idnum == df0uniq$idnum[i]]))
rightGraphic <- native2npc(max(df$stop[df$idnum == df0uniq$idnum[i]]))
centerGraphic<- mean(c(leftGraphic,rightGraphic))
at <- arrowText(df0uniq$name[i],
x = unit((df0uniq$start[i] + df0uniq$stop[i])/2, 'native'),
y = unit(0,'npc'),
direction = df0uniq$strand[i],
check.overlap = TRUE,
gp = gpar(cex = cex, fontface='italic',col=textcol,lwd=1.5),
widthOnly=FALSE);
w <- 1.1 * convertX(attr(at,'width'),'inches',valueOnly=TRUE)
viewportWidth <- convertX(unit(1,'npc'),'inches',valueOnly=TRUE);
w <- w / viewportWidth
leftName <- centerGraphic - .5 * w
rightName <- centerGraphic + .5 * w
if (shiftGeneNames) {
if (leftName < 0) {
leftName <- 0; rightName <- w
}
if (rightName > 1) {
rightName <- 1; leftName <- 1-w
}
}
left <- min(c(leftGraphic,leftName)) - buffer
right <- max(c(rightGraphic,rightName)) + buffer
df0uniq$start[i] <- leftName
df0uniq$stop[i] <- rightName
df0uniq$left[i] <- left
df0uniq$right[i] <- right
rowToUse <- min(which(rowUse < left))
if ( showPartialGenes || (left >= 0 && right <= 1) ) {
id2row[df0uniq$idnum[i]] <- rowToUse
rowUse[rowToUse] <- right
} else {
id2row[df0uniq$idnum[i]] <- -2 # clipping will hide this
}
}
requestedRows <- rows;
optRows <- max(c(0,which(rowUse > 0)));
if (computeOptimalRows) { return (optRows) }
save(df,flat,df0,df1,df2,df0uniq,id2row,file="debug.Rdata");
rows <- min(requestedRows,optRows);
if (is.character(args[['requiredGene']]) ) {
requiredGeneIdx <- min(which(df0uniq$name==args[['requiredGene']]) )
requiredGeneIdnum <- df0uniq$idnum[requiredGeneIdx]
if (id2row[requiredGeneIdnum] > rows) {
for (id in which(id2row == 1)){
if (df0uniq$left[id] < df0uniq$right[requiredGeneIdx] &&
df0uniq$right[id] > df0uniq$left[requiredGeneIdx] ) {
id2row[id] = rows+2
}
}
id2row[requiredGeneIdnum] <- 1
}
}
if (optRows > requestedRows && opts[['warnMissingGenes']]) {
omitIdx <- which(id2row > rows)
assign("omittedGenes",as.character(df0uniq$name[omitIdx]),globalenv())
numberOfMissingGenes <- length(omittedGenes);
message <- paste(numberOfMissingGenes," gene",if(numberOfMissingGenes > 1) "s" else "", "\nomitted",sep="")
pushViewport(viewport(clip='off'));
grid.text(message ,x=unit(1,'npc') + unit(1,'lines'), y=.5, just=c('left','center'),
gp=gpar(cex=args[['axisTextSize']], col=args[['axisTextColor']], alpha=args[['frameAlpha']]));
upViewport(1);
}
increment <- 1.0/rows;
yPos <- function(id,text=FALSE) {
if (text) {
return( unit((rows-id2row[id]) * increment + 4.4/7*increment, "npc") )
} else {
return( unit( (rows-id2row[id]) * increment + 2/7*increment, "npc") )
}
}
grid.segments(x0 = multiplier + df0$start, x1 = df0$stop,
y0 = yPos(df0$idnum),
y1 = yPos(df0$idnum),
default.units = "native",
gp = gpar(col = col, alpha = alpha));
if ( "col" %in% names(df1) ) {
col = df1$col
fill = df1$col
}
grid.rect(x = multiplier + df1$start, width = df1$width,
just = "left",
y = yPos(df1$idnum),
height = unit(height * increment, "npc"),
default.units = "native",
gp = gpar(fill = fill, col = col, alpha = alpha));
if ( "textcol" %in% names(df0uniq) ) {
textcol = df0uniq$textcol
fill = df0uniq$textcol
}
for (i in 1:dim(df0uniq)[1]) {
at <- arrowText(df0uniq$name[i],
x = unit((df0uniq$start[i] + df0uniq$stop[i])/2, 'npc'),
y = yPos(df0uniq$idnum[i], text=TRUE),
direction = df0uniq$strand[i],
check.overlap = TRUE,
gp = gpar(cex = cex, fontface='italic',col=textcol,lwd=1.5));
grid.draw(at);
}
}
#############################################################
#
# Assemble a plot zooming in on a region from various pieces
# including metal output (with positions added), ld (ala newfugue), recombination rate data,
# genes data (refFlat), etc.
#
# NB: *** passing in entire args list ***
#
zplot <- function(metal,ld=NULL,recrate=NULL,refidx=NULL,nrugs=0,postlude=NULL,args=NULL,...){
refSnp <- metal$MarkerName[refidx];
metal$P.value <- as.numeric(metal$P.value);
if ( char2Rname(args[['weightCol']]) %in% names(metal) ){
metal$Weight <- metal[ ,char2Rname(args[['weightCol']]) ];
dotSizes <- rescale( log(pmax(1,metal$Weight)), c(log(1000), log(100000)),
c(args[['smallDot']],args[['largeDot']] ) ) ;
} else {
dotSizes <- rep(args[['largeDot']], dim(metal)[1] );
if (! is.null(args[['refDot']]) ) {
dotSizes[refidx] <- args[['refDot']];
}
}
if ( is.null(args[['refDot']]) ) {
# this avoids problems downstream, but dotSize[refidx] has already been set in most cases.
args[['refDot']] <- args[['largeDot']];
}
grid.newpage();
# push viewports just to calculate optimal number of rows for refFlat
pushViewport(viewport(
layout=grid.layout(2+3+4,1+2,
widths=unit(c(5,1,5),c('lines','null','lines')),
heights=unit(c(.5, 3,nrugs, 1, 1, 1, 2*args[['rfrows']], 4,.5),
c('lines','lines', 'lines','lines','null','lines', 'lines', 'lines','lines'))
)
));
pvalVp=dataViewport(
xRange,yRange,
extension=c(0,.05),
layout.pos.row=5,layout.pos.col=2,
name="pvals",
clip="off");
pushViewport(
viewport(xscale=pvalVp$xscale,
layout.pos.row=7,
layout.pos.col=2,
name="refFlatOuter")
);
optRows <- panel.flatbed(
flat=refFlat,
rows=NULL,
computeOptimalRows=TRUE,
showPartialGenes = args[['showPartialGenes']],
shiftGeneNames = args[['shiftGeneNames']],
cex=args[['geneFontSize']],
col=args[['geneColor']],
fill=args[['geneColor']],
multiplier=1/args[['unit']]
);
if ( length( args[['rfrows']] < 2 ) ) { # use value as upper bound
args[['rfrows']] <- min(args[['rfrows']], optRows)
} else { # use smallest two values as lower and upper bounds
args[['rfrows']] <- sort(args[['rfrows']])
rows <- min( args[['rfrows']][2], optRows )
args[['rfrows']] <- max( args[['rfrows']][1], rows )
}
popViewport(2);
# OK. Now we know how many rows to use and we can set up the layout we will actually use.
pushViewport(viewport(
layout=grid.layout(2+3+4,1+2,
widths=unit(c(args[['axisTextSize']]*args[['leftMarginLines']],1,args[['axisTextSize']]*args[['rightMarginLines']]),c('lines','null','lines')),
heights=unit(c(.5, 3,nrugs, 1, 1, 1, 2*args[['geneFontSize']]*args[['rfrows']], 4,.5),
c('lines','lines', 'lines','lines','null','lines', 'lines', 'lines','lines'))
)
));
##
# layout (top to bottom)
# ----------------------
# spacer
# title text
# rugs
# separation
# pvals
# separation
# genes
# subtitle text
# spacer
#
#
# layout (left to right)
# ----------------------
# vertical axes and labeling
# main data panels, titles, horizontal axes, etc.
# vertical axes and labeling
#
########## title text
titleVp=viewport(
layout.pos.row=2,layout.pos.col=2,
name="title",
clip="off");
pushViewport(titleVp);
grid.text(args[['title']],gp=gpar(cex=2,col=args[['titleColor']]));
upViewport(1);
########## pvals
# this viewport is defined above
# pvalVp=dataViewport(
# xRange,yRange,
# extension=c(0,.05),
# layout.pos.row=5,layout.pos.col=2,
# name="pvals",
# clip="off");
pushViewport(pvalVp);
grid.yaxis(at=args[['yat']],gp=gpar(cex=args[['axisSize']],col=args[['frameColor']],alpha=args[['frameAlpha']]));
# grid.xaxis(at=args[['xat']],gp=gpar(cex=args[['axisSize']],col=args[['frameColor']],alpha=args[['frameAlpha']]));
if (length(args[['ylab']]) > 0) {
grid.text(x=unit(args[['ylabPos']],'lines'),label=args[['ylab']],rot=90,
gp=gpar(cex=args[['axisTextSize']], col=args[['axisTextColor']], alpha=args[['frameAlpha']])
);
} else {
grid.text(x=unit(args[['ylabPos']],'lines'),label=expression(paste(-log[10] ,"(p-value)")),rot=90,
gp=gpar(cex=args[['axisTextSize']], col=args[['axisTextColor']], alpha=args[['frameAlpha']])
);
}
pushViewport(dataViewport(extension=c(0,.05),xRange,recrateRange,name='recrate',clip="off"));
if ( args[['showRecomb']] ) {
grid.yaxis(main=F,gp=gpar(cex=args[['axisSize']],col=args[['recombAxisColor']],alpha=args[['recombAxisAlpha']]));
grid.text(x=unit(1,'npc')+unit(args[['recombPos']],'lines'),
label="Recombination rate (cM/Mb)",rot=270,
gp=gpar(cex=args[['axisTextSize']],col=args[['recombAxisColor']],alpha=args[['recombAxisAlpha']]));
}
if ( args[['showRecomb']] && !args[['recombOver']]) {
pushViewport(dataViewport(extension=c(0,.05),xRange,recrateRange,name='recrateClipped',
clip="on"));
if (args[['recombFill']]) {
grid.polygon(x=recrate$pos,y=recrate$recomb,
gp=gpar(alpha=args[['recombFillAlpha']],col=args[['recombColor']],fill=args[['recombColor']]),
default.units='native'
);
} else {
panel.xyplot(recrate$pos,recrate$recomb,type='l',lwd=2,alpha=args[['recombLineAlpha']],col=args[['recombColor']]);
}
upViewport(1)
}
pushViewport(viewport(clip="on",xscale=pvalVp$xscale,yscale=pvalVp$yscale,name='pvalsClipped'));
grid.rect(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
if (! is.null(refidx)) {
grid.refsnp(name=refSnp,pos=metal$pos[refidx]);
}
groupIds <- sort(unique(metal$group))
print(table(metal$group));
if (args[['bigDiamond']] && args[['showRefsnpAnnot']]) {
grid.points(x=metal$pos[refidx],y=transformation(metal$P.value[refidx]),
gp=gpar(col=args[['refsnpColor']],fill=args[['refsnpColor']],cex=1.6*args[['refDot']],alpha=.2),
pch=23,
default.units='native'
);
}
for (i in groupIds) {
idx <- which(metal$group == i);
gmetal <- metal[idx,];
colors <- args[['ldColors']][gmetal$group];
colors[which(gmetal$pch %in% 21:25)] <- 'gray20';
grid.points(x=gmetal$pos,y=transformation(gmetal$P.value),
pch=gmetal$pch,
gp=gpar(
cex=dotSizes[idx],
col=colors,
fill=args[['ldColors']][gmetal$group]
));
}
if (FALSE) {
grid.points(x=metal$pos[refidx],y=transformation(metal$P.value[refidx]),
gp=gpar(col=args[['refsnpColor']],fill=args[['refsnpColor']],
cex= if (args[['bigDiamond']] & args[['showRefsnpAnnot']]) 1.6*args[['refDot']] else args[['refDot']]),
pch= if (args[['bigDiamond']] & args[['showRefsnpAnnot']]) 5 else metal$pch[refidx],
default.units='native'
);
}
if ( args[['showRecomb']] && args[['recombOver']]) {
pushViewport(dataViewport(extension=c(0,.05),xRange,recrateRange,name='recrateClipped',
clip="on"));
if (args[['recombFill']]) {
grid.polygon(x=recrate$pos,y=recrate$recomb,
gp=gpar(alpha=args[['recombFillAlpha']],col=args[['recombColor']],fill=args[['recombColor']]),
default.units='native'
);
} else {
panel.xyplot(recrate$pos,recrate$recomb,type='l',lwd=2,alpha=args[['recombLineAlpha']],col=args[['recombColor']]);
}
upViewport(1);
}
grid.rect(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
pushViewport(viewport(clip="on",name='legend'));
breaks <- union(args[['ldCuts']],c(0,1));
breaks <- sort(unique(breaks));
nb <- length(breaks);
cols <- args[['ldColors']]
cols <- rep(cols, length=nb+2);
rl <- ribbonLegend(
breaks=breaks,
cols=cols[2:(1+nb)],
gp=gpar(cex=args[['legendSize']],col=args[['frameColor']],alpha=args[['frameAlapha']])
);
if (args[['legend']] == 'auto') {
args[['legend']] = AutoLegendSide(transformation(metal$P.value),metal$pos,xRange);
}
if (tolower(args[['legend']]) %in% c('left','right')) {
pushViewport(viewport(name='legendVp',
x=if (args[['legend']] == 'left') unit(2.5,"char") else unit(1,'npc') - unit(2.5,'char'),
y=unit(1,'npc') - unit(.5,'char'),
just=c('center','top'),
width=unit(4,'char'),
height=unit(8,'lines')
));
grid.rect(gp=gpar(col='transparent',fill='white',alpha=args[['legendAlpha']]));
grid.rect(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
pushViewport(viewport(name='ribbonLegend',
y=0,
just=c('center','bottom'),
width=unit(4,'char'),
height=unit(7,'lines')
))
grid.draw(rl);
upViewport(1);
pushViewport(viewport(name='LDTitle',
clip="off",
#x=unit(2.5,"char"),
width=unit(4,"char"),
y=unit(1,'npc') - unit(.25,'char'),
just=c('center','top'),
height=unit(1,'lines')
))
grid.text(args[['LDTitle']], gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
upViewport(1);
upViewport(1);
} # end if show legend on left or right
upViewport(4);
######### subtitle space; place holder for now
pushViewport(viewport(layout.pos.row=8,layout.pos.col=2,name="subtitle"));
if(FALSE) {
grid.rect(gp=gpar(col='red'));
grid.xaxis(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
grid.text(paste('Position on',chr2chrom(args[['chr']]),"(Mb)"),
gp=gpar(col="red"));
}
upViewport(1);
########## annotation (genes)
if(args[['rfrows']] > 0) {
pushViewport(
viewport(xscale=pvalVp$xscale,
layout.pos.row=7,
layout.pos.col=2,
name="refFlatOuter")
);
pushViewport(
viewport(xscale=pvalVp$xscale,
name="refFlatInner",
clip="on")
);
panel.flatbed(
flat=refFlat,
showPartialGenes = args[['showPartialGenes']],
shiftGeneNames = args[['shiftGeneNames']],
rows=args[['rfrows']],
cex=args[['geneFontSize']],
col=args[['geneColor']],
fill=args[['geneColor']],
multiplier=1/args[['unit']]);
upViewport(1);
#grid.rect(gp=gpar(col='white'));
grid.rect(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
if ( !is.null(args[['xnsmall']]) && !is.null(args[['xat']]) ) {
grid.xaxis(at=args[['xat']], label=format(args[['xat']], nsmall=args[['xnsmall']]),
gp=gpar(cex=args[['axisSize']],col=args[['frameColor']],alpha=args[['frameAlpha']]));
} else {
grid.xaxis(at=args[['xat']],
gp=gpar(cex=args[['axisSize']],col=args[['frameColor']],alpha=args[['frameAlpha']]));
}
grid.text(paste('Position on',chr2chrom(args[['chr']]),unit2char(args[['unit']])),
y=unit(args[['xlabPos']],'lines'),just=c('center',"bottom"),
gp=gpar(cex=args[['axisTextSize']], col=args[['axisTextColor']], alpha=args[['frameAlpha']])
);
panel.hilite(
range=c(args[['hiStartBP']]/args[['unit']],args[['hiEndBP']]/args[['unit']]),
fill=args[['hiColor']],
alpha=args[['hiAlpha']]
);
upViewport(1);
}
########## rugs for snpsets
pushViewport(viewport(xscale=pvalVp$xscale,layout.pos.row=3,
layout.pos.col=2,name="rugs",clip="off"));
i <- nrugs;
for (snpset in levels(rug$snp_set)) {
grid.text(as.character(snpset),x=unit(-.25,"lines"),
y=(i-.5)/nrugs, just="right",
gp=gpar(col=args[['rugColor']], alpha=args[['rugAlpha']],cex=.90*args[['axisTextSize']])
);
i <- i-1;
}
pushViewport(viewport(xscale=pvalVp$xscale,layout.pos.row=3,
layout.pos.col=2,name="rugsClipped",clip="on"));
i <- nrugs;
for (snpset in levels(rug$snp_set)) {
panel.rug( rug[ which(rug$snp_set==snpset), "pos" ] ,
start = (i-1)/(nrugs) + (.15/nrugs),
end = (i)/(nrugs) - (.15/nrugs),
y.units=rep("native",2),
col=args[['rugColor']],
alpha=args[['rugAlpha']]
);
i <- i-1;
}
upViewport(2);
if (is.character(postlude) && file.exists(postlude)) {
source(postlude);
}
} ## end zplot
grid.log <- function(args,metal,linespacing=1.5,ascii=FALSE,debug=FALSE){
labels=c("date");
values=c(date());
# labels=c(labels,"working directory");
# values=c(values,getwd());
# labels=c(labels,"unit");
# values=c(values,args[['unit']]);
labels=c(labels,"build");
values=c(values,args[['build']]);
labels=c(labels,"display range");
values=c(values,paste( 'chr',args[['chr']],":",args[['start']], "-", args[['end']], " [",args[['startBP']],"-",args[['endBP']], "]",sep=""));
labels=c(labels,"hilite range");
values=c(values,paste( args[['hiStart']], "-", args[['hiEnd']], " [",args[['hiStartBP']],"-",args[['hiEndBP']], "]"));
labels=c(labels,"reference SNP");
values=c(values,args[['refsnp']]);
# labels=c(labels,"prefix");
# values=c(values,args[['prefix']]);
# labels=c(labels,"log");
# values=c(values,args[['log']]);
if (!is.null(args[['reload']])) {
labels=c(labels,"reload");
values=c(values,args[['reload']]);
}
if(! is.null(args[['reload']]) || debug){
labels=c(labels,"data reloaded from");
values=c(values,args[['rdata']]);
}
labels=c(labels,"number of SNPs plotted");
values=c(values,as.character(dim(metal)[1]));
labels=c(labels,paste("max",args[['pvalCol']]));
maxIdx <- which.max(transformation(metal$P.value));
maxName <- as.character(metal$MarkerName[maxIdx]);
maxNegLogP <- transformation(metal$P.value[maxIdx]);
maxPSci <- log2sci(-maxNegLogP)
values=c(values,paste(maxPSci," [", maxName ,"]",sep=""));
labels=c(labels,paste("min",args[['pvalCol']]));
minIdx <- which.min(transformation(metal$P.value));
minName <- as.character(metal$MarkerName[minIdx]);
minNegLogP <- transformation(metal$P.value[minIdx]);
minPSci <- log2sci(-minNegLogP)
values=c(values,paste(minPSci," [", minName ,"]",sep=""));
if (TRUE) {
oG <- omittedGenes;
while (length(oG) > 0) {
labels=c(labels,"omitted Genes");
values=c(values,paste(oG[1:min(length(oG),3)],collapse=", "));
oG <- oG[-(1:3)]
}
}
if (TRUE) {
w <- warningMessages;
while (length(w) > 0) {
labels=c(labels,"Warning");
values=c(values,w[1]);
w <- w[-1]
}
}
labels=paste(labels, ": ",sep='');
if (ascii) {
cat(paste(format(labels,width=20,justify="right"),values,sep=" ",collapse="\n"));
cat('\n');
cat('\nMake more plots at http://csg.sph.umich.edu/locuszoom/');
cat('\n');
} else {
grid.text(labels,x=.3,y=unit(1,'npc') - unit(linespacing *(1:length(labels)),'lines'), just='right');
grid.text(values,x=.3,y=unit(1,'npc') - unit(linespacing *(1:length(values)),'lines'), just='left');
if (FALSE && args[['showAnnot']]) {
annotlabels <- c('no annotation','framestop','splice','nonsyn','coding','utr','tfbscons','mcs44placental');
pch <- args[['annotPch']];
annotlabels <- c(annotlabels[-1],annotlabels[1])
pch <- c(pch[-1],pch[1])
key <- simpleKey(text=annotlabels);
key$points$pch=pch;
key$points$col="navy";
key$points$fill="lightskyblue";
keyGrob <- draw.key(key,draw=FALSE);
annotationBoxTop <- unit(0.95,'npc');
annotationBoxHeight <- unit(3,"lines") + grobHeight(keyGrob);
pushViewport(viewport(x=.90,y=annotationBoxTop,width=grobWidth(keyGrob),
height=annotationBoxHeight,just=c('right','top')));
grid.rect();
pushViewport(viewport(y=unit(.75,'lines'),height = grobHeight(keyGrob),just=c('center','bottom')));
draw.key(key,draw=TRUE);
popViewport();
grid.text('Annotation key',x=.5,y=unit(1,'npc') - unit(1,'lines'),just=c('center','top'))
popViewport();
}
if ( 'annot' %in% names(metal) && args[['showAnnot']] ) {
annotlabels <- levels(as.factor(metal$annot))
pch <- rep(args[['annotPch']],length=length(annotlabels));
key <- simpleKey(text=annotlabels);
key$points$pch=pch;
key$points$col="navy";
key$points$fill="lightskyblue";
keyGrob <- draw.key(key,draw=FALSE);
annotationBoxTop <- unit(0.95,'npc');
annotationBoxHeight <- unit(3,"lines") + grobHeight(keyGrob);
pushViewport(viewport(x=.90,y=annotationBoxTop,width=grobWidth(keyGrob),
height=annotationBoxHeight,just=c('right','top')));
pushViewport(viewport(y=unit(.75,'lines'),height = grobHeight(keyGrob),just=c('center','bottom')));
draw.key(key,draw=TRUE);
grid.rect();
popViewport();
grid.text('annotation key',x=.5,y=unit(1,'npc') - unit(1,'lines'),just=c('center','top'))
popViewport();
} else { if (args[['showAnnot']]) {
annotlabels <- c('no annotation','framestop','splice','nonsyn','coding','utr','tfbscons','mcs44placental');
pch <- args[['annotPch']];
annotlabels <- c(annotlabels[-1],annotlabels[1])
pch <- c(pch[-1],pch[1])
key <- simpleKey(text=annotlabels);
key$points$pch=pch;
key$points$col="navy";
key$points$fill="lightskyblue";
keyGrob <- draw.key(key,draw=FALSE);
annotationBoxTop <- unit(0.95,'npc');
annotationBoxHeight <- unit(3,"lines") + grobHeight(keyGrob);
pushViewport(viewport(x=.90,y=annotationBoxTop,width=grobWidth(keyGrob),
height=annotationBoxHeight,just=c('right','top')));
popViewport();
} }
breaks <- union(args[['ldCuts']],c(0,1));
breaks <- sort(unique(breaks));
nb <- length(breaks);
cols <- args[['ldColors']]
cols <- rep(cols, length=nb+2);
rl <- ribbonLegend(
breaks=breaks,
cols=cols[2:(1+nb)],
gp=gpar(cex=args[['legendSize']],col=args[['frameColor']],alpha=args[['frameAlapha']])
);
if ( args[['legend']] %in% c('left','right') ) {
annotlabels <- c('no annotation','framestop','splice','nonsyn','coding','utr','tfbscons','mcs44placental');
pch <- args[['annotPch']];
annotlabels <- c(annotlabels[-1],annotlabels[1])
pch <- c(pch[-1],pch[1])
key <- simpleKey(text=annotlabels);
key$points$pch=pch;
key$points$col="navy";
key$points$fill="lightskyblue";
keyGrob <- draw.key(key,draw=FALSE);
annotationBoxTop <- unit(0.95,'npc');
annotationBoxHeight <- unit(3,"lines") + grobHeight(keyGrob);
pushViewport(viewport(name='legendVpPage2',
x=unit(.9,'npc'),
y=annotationBoxTop - annotationBoxHeight - unit(2,'lines'),
just=c('right','top'),
width=unit(4,'char'),
height=unit(8,'lines')
));
grid.rect(gp=gpar(col='transparent',fill='white',alpha=args[['legendAlpha']]));
grid.rect(gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
pushViewport(viewport(name='ribbonLegendPage2',
y=0,
just=c('center','bottom'),
width=unit(4,'char'),
height=unit(7,'lines')
))
grid.draw(rl);
upViewport(1);
pushViewport(viewport(name='LDTitlePage2',
clip="off",
width=unit(4,"char"),
y=unit(1,'npc') - unit(.25,'char'),
just=c('center','top'),
height=unit(1,'lines')
))
grid.text(args[['LDTitle']], gp=gpar(col=args[['frameColor']],alpha=args[['frameAlpha']]));
upViewport(1);
upViewport(1);
}
grid.text('Make more plots at http://csg.sph.umich.edu/locuszoom/', y=unit(1,'lines'), just=c('center','bottom'));
}
}
#############################################################
#
# process argument list, splitting the key=value pairs
#
argv <- function(){
args <- commandArgs(TRUE);
newl <- list()
for ( i in 1:length(args) ) {
keyval <- strsplit(args[[i]],"=")[[1]];
key <- keyval[1]; val <- keyval[2];
newl[[ key ]] <- val;
}
return(newl)
}
#################################################################################
# #
# MAIN PROGRAM BEGINS HERE #
# #
#################################################################################
flags <- list(flank=FALSE,reloaded=FALSE);
createdFiles <- list();
refSnpPos <- empty.data.frame();
#
# set program defaults -- may be overridden with command line arguments
#
default.args <- list(
theme = NULL, # select a theme (collection of settings) for plot
experimental = FALSE, # try some experimental features?
pquery = FALSE, # is pquery available?
format = "pdf", # file format (pdf or png or both)
recombTable = "results.recomb_rate", # Recomb Rate Table (for SQL)
clean=TRUE, # remove temp files?
build = "hg18", # build to use for position information
metal = "metal.tbl", # metal output file
alreadyTransformed=FALSE, # are metal p-values already -log10() -transformed?
pvalCol="P.value", # name for p-value column in metal file
posCol="pos", # name for positions column in metal file
markerCol="MarkerName", # name for MarkerName column in metal file
weightCol="Weight", # name for weights column in metal file
ymin=0, # min for p-value range (expanded to fit all p-vals if needed)
ymax=10, # max for p-value range (expanded to fit all p-vals if needed)
yat=NULL, # values for y-axis ticks
xat=NULL, # values for x-axis ticks
xnsmall=NULL, # number of digits after decimal point on x-axis labels
chr = NULL, # chromosome
start = NULL, # start of region (string, may include Mb, kb, etc.)
end = NULL, # end of region (string, may include Mb, kb, etc.)
flank = "300kb", # surround refsnp by this much
xlabPos = -3.0, # position of xaxis label (in lines relative to bottom panel)
ylabPos = -3.0, # position of yaxis label (in lines relative to left edge of panel)
ylab = "", # override default label for y-axis
recombPos = 3.0, # position of recomb label (in lines relative to right edge of panel)
axisSize = 1, # sclaing factor for axes
axisTextSize = 1, # sclaing factor for axis labels
axisTextColor = "gray30", # color of axis labels
requiredGene = NULL, # gene name (string)
refsnp = NULL, # snp name (string)
refsnpTextColor = "black", # color for ref snp label
refsnpTextSize = 1, # sclaing factor for text size
refsnpTextAlpha = 1, # alpha for ref snp label
refsnpLineColor = "transparent", # color for ref snp line (invisible by default)
refsnpLineAlpha = .5, # alpha for ref snp line
title = "", # title for plot
titleColor = "black", # color for title
thresh = 1, # only get pvalues <= thresh # this is now ignored.
width = 10, # width of pdf (inches)
height = 7, # height of pdf (inches)
leftMarginLines = 5, # margin (in lines) on left
rightMarginLines = 5, # margin (in lines) on right
unit=1000000, # bp per unit displayed in plot
ldTable = "results.ld_point6", # LD Table (for SQL)
annot=NULL, # file for annotation
showAnnot=TRUE, # show annotation for each snp?
showGenes=TRUE, # show genes?
annotCol='annotation', # column to use for annotation, if it exists
annotPch='24,24,25,22,22,8,7,21,1', # plot symbols for annotation
annotOrder=NULL, # ordering of annotation classes
showRefsnpAnnot=TRUE, # show annotation for reference snp too?
bigDiamond=FALSE, # put big diamond around refsnp?
ld=NULL, # file for LD information
ldCuts = "0,.2,.4,.6,.8,1", # cut points for LD coloring
ldColors = "gray50,navy,lightskyblue,green,orange,red,purple3", # colors for LD
ldCol='rsquare', # name for LD column
LDTitle=NULL, # title for LD legend
smallDot = .4, # smallest p-value cex
largeDot = .8, # largest p-value cex
refDot = NULL, # largest p-value cex
rfrows = '4', # max number of rows for reflat genes
warnMissingGenes = FALSE, # should we warn about missing genese on the plot?
showPartialGenes = TRUE, # should genes that don't fit completely be displayed?
shiftGeneNames = TRUE, # should genes that don't fit completely be displayed?
geneFontSize = .8, # size for gene names
geneColor = "navy", # color for genes
snpset = "Affy500,Illu318,HapMap", # SNP sets to show
snpsetFile = NULL, # use this file for SNPset data (instead of pquery)
rugColor = "gray30", # color for snpset rugs
rugAlpha = 1, # alpha for snpset rugs
metalRug = NULL, # if not null, use as label for rug of metal positions
refFlat = NULL, # use this file with refFlat info (instead of pquery)
showIso=FALSE, # show each isoform of gene separately
showRecomb = TRUE, # show recombination rate?
recomb=NULL, # rcombination rate file
recombAxisColor=NULL, # color for reccomb rate axis labeing
recombAxisAlpha=NULL, # color for reccomb rate axis labeing
recombColor='blue', # color for reccomb rate on plot
recombOver = FALSE, # overlay recombination rate? (else underlay it)
recombFill = FALSE, # fill recombination rate? (else line only)
recombFillAlpha=0.2, # recomb fill alpha
recombLineAlpha=0.8, # recomb line/text alpha
frameColor='gray30', # frame color for plots
frameAlpha=1, # frame alpha for plots
legendSize=.8, # scaling factor of legend
legendAlpha=1, # transparency of legend background
legendMissing=TRUE, # show 'missing' as category in legend?
legend='auto', # legend? (auto, left, right, or none)
hiStart=0, # start of hilite region
hiEnd=0, # end of hilite region
hiColor="blue", # hilite color
hiAlpha=0.1, # hilite alpha
clobber=TRUE, # overwrite files?
reload=NULL, # .Rdata file to reload data from
prelude=NULL, # code to execute after data is read but before plot is made (allows data modification)
postlude=NULL, # code to execute after plot is made (allows annotation)
prefix=NULL, # prefix for output files
dryRun=FALSE # show a list of the arguments and then halt
)
### default data
refSnpPos <- data.frame()
recrate.default <- data.frame(chr=NA, pos=NA, recomb=NA, chr=NA, pos=NA)[c(),,drop=FALSE]
rug.default <- data.frame(snp=NA, chr=NA, pos=NA, snp_set=NA)[c(),,drop=FALSE]
annot.default <- data.frame(snp=NA,annot_rank=NA) # [c(),,drop=FALSE]
ld.default <- data.frame(snp1='rs0000', snp2='rs0001', build=NA,
chr=0, pos1=0, pos2=2, midpoint=1, distance=2,
rsquare=0, dprime=0, r2dp=0) # [c(),,drop=FALSE]
refFlatRaw.default <- data.frame(geneName=NA, name=NA, chrom=NA, strand=NA, txStart=NA, txEnd=NA,
cdsStart=NA, cdsEnd=NA, exonCount=NA, exonStarts=NA, exonEnds=NA, status=NA)[c(),,drop=FALSE]
#
# read and process command line arguments
#
user.args <- ConformList(argv(),names(default.args),message=TRUE)
default.args <- ProcessThemes(default.args,user.args[['theme']])
args <- ModifyList(default.args,user.args);
userFile <- list(
recomb = !is.null(args[['recomb']]),
snpsetFile = !is.null(args[['snpsetFile']]),
refFlat = !is.null(args[['refFlat']]),
ld = !is.null(args[['ld']]),
annot = !is.null(args[['annot']])
);
args <- MatchIfNull(args,'recombAxisAlpha','recombLineAlpha')
args <- MatchIfNull(args,'recombAxisColor','recombColor')
args <- AdjustModesOfArgs(args);
if ( args[['pquery']] ){
GetData <- GetDataFromFileOrCommand
} else {
GetData <- GetDataFromFileIgnoreCommand
}
args[['showRefsnpAnnot']] <- args[['showAnnot']] & args[['showRefsnpAnnot']];
args[['refsnpColor']] <- args[['ldColors']][length(args[['ldColors']])];
if ( args[['dryRun']] ) {
message("Argument list:");
message(paste("\t",names(args),'=', args, "\n"));
q();
}
#
# read metal data or reload all.
#
if ( is.null(args[['reload']]) ) {
if ( file.exists( args[['metal']]) ) {
metal <- read.file(args[['metal']]);
} else {
stop(paste('No such file: ', args[['metal']]));
}
} else {
if ( file.exists(args[['reload']]) ) {
load( args[['reload']] );
flags[['reloaded']] <- TRUE;
} else {
stop(paste("Stopping: Can't reload from", args[['reload']]));
}
}
#
# column renaming in metal data.frame
#
if ( char2Rname(args[['pvalCol']]) %in% names(metal) ) {
metal$P.value <- metal[ ,char2Rname(args[['pvalCol']]) ];
} else {
stop(paste('No column named',args[['pvalCol']]));
}
transformation <- SetTransformation( min(metal$P.value,na.rm=TRUE), max(metal$P.value,na.rm=TRUE),
args[['alreadyTransformed']] );
args[['LDTitle']] <- SetLDTitle( args[['ldCol']],args[['LDTitle']] )
if ( args[['posCol']] %in% names(metal) ) {
metal$pos <- metal[ ,args[['posCol']] ];
} else {
stop(paste('No column named',args[['posCol']]));
}
if ( char2Rname(args[['markerCol']]) %in% names(metal) ) {
metal$MarkerName <- metal[ ,char2Rname(args[['markerCol']]) ];
} else {
stop(paste('No column named',args[['markerCol']]));
}
#
# if no region and no refsnp specified, choose best snp and range of data set:
#
if ( (is.null(args[['start']]) || is.null(args[['end']]) || is.null(args[['chr']]) ) && ( is.null(args[['refsnp']]) ) )
{
args[['start']] <- min(metal$pos);
args[['end']] <- max(metal$pos);
args[['chr']] <- min(metal$chr);
args[['refsnp']] <- as.character( metal$MarkerName[ order(metal$P.value)[1] ] );
args <- ModifyList(list(prefix=paste('chr',
args[['chr']],"_",args[['start']],"-",args[['end']],sep='')),
args);
args <- ModifyList(list(prefix='foo'),args);
flags[['flank']] <- FALSE;
# if region but not refsnp, choose best snp as refsnp
} else if ( !is.null(args[['start']]) && !is.null(args[['end']]) && !is.null(args[['chr']]) && is.null(args[['refsnp']] ) )
{
args <- ModifyList(
list( refsnp = as.character( metal$MarkerName[ order(metal$P.value)[1] ] ) ),
args
);
flags[['flank']] <- FALSE;
# if refsnp specifed but no region, select region flanking refsnp
} else if ( ( is.null(args[['start']]) || is.null(args[['end']]) || is.null(args[['chr']]) ) && (!is.null(args[['refsnp']]) ) )
{
args <- ModifyList( args, list( flankBP=pos2bp(args[['flank']]) ) );
refSnpPosFile <- paste(args[['refsnp']],"_pos.tbl",sep="");
command <- paste("pquery snp_pos",
" -defaults",
" -sql",
" Snp=", args[["refsnp"]],
" Build=",args[["build"]],
sep="");
if ( is.null(refSnpPos) ) { args[['showRug']] = FALSE }
refSnpPos <- GetData( refSnpPosFile, default=refSnpPos.default, command=command, clobber=TRUE);
args[['refSnpPos']] <- as.character(refSnpPos$chrpos[1]);
args[['refSnpBP']] <- pos2bp(refSnpPos$chrpos[1]);
args <- ModifyList( args, list( start=args[['refSnpBP']] - args[['flankBP']] ) ) ;
args <- ModifyList( args, list( end=args[['refSnpBP']] + args[['flankBP']] ) );
args <- ModifyList( args, list( chr=refSnpPos$chr[1] ) );
flags[['flank']] <- TRUE;
# else refsnp and region specified
} else {
flags[['flank']] <- FALSE;
}
# change refsnp to "none" if it was null, else leave as is
args <- ModifyList( list( refsnp = "none"), args);
args <- ModifyList( args, list( start=as.character(args[['start']]) ) );
args <- ModifyList( args, list( end=as.character(args[['end']]) ) );
# prefix
if (flags[['flank']]) {
args <- ModifyList(
list( prefix = paste( # #1
args[['refsnp']],
"_", args[['flank']],
sep="")
),
args
);
} else {
args <- ModifyList(
list( prefix = paste( # #2
"chr", args[['chr']],
"_", args[['start']],
"-", args[['end']],
sep="")
),
args
);
}
#log
args <- ModifyList(
list( log = paste(args[['prefix']], ".log", sep="") ),
args
);
#recomb
args <- ModifyList(
list( recomb = paste(args[['prefix']], "_recomb", ".tbl", sep="") ),
args
);
# annot
args <- ModifyList(
list( annot = paste(args[['prefix']], "_annot", ".tbl", sep="") ),
args
);
# ld
args <- ModifyList(
list( ld = paste(args[['prefix']], "_ld", ".tbl", sep="") ),
args
);
# snpsets
args <- ModifyList(
list( snpsetFile = paste(args[['prefix']], "_snpsets", ".tbl", sep="") ),
args
);
# pdf
args <- ModifyList(
list( pdf = paste(args[['prefix']], ".pdf", sep="") ),
args
);
args <- ModifyList(
list( png = paste(args[['prefix']], ".png", sep="") ),
args
);
args <- ModifyList(
list( tiff = paste(args[['prefix']], ".tiff", sep="") ),
args
);
# rdata
args <- ModifyList(
list( rdata = paste(args[['prefix']], ".Rdata", sep="") ),
args
);
# refFlat
args <- ModifyList(
list( refFlat = paste(args[['prefix']], "_refFlat.txt", sep="") ),
args
);
args <- ModifyList(args, list( startBP=pos2bp(args[['start']]), endBP=pos2bp(args[['end']]) ));
args <- ModifyList(args, list( hiStartBP=pos2bp(args[['hiStart']]), hiEndBP=pos2bp(args[['hiEnd']]) ));
#######################################################
#
# now read other (non-metal) data
#
sink(args[['log']]);
if ( is.null(args[['reload']]) ) {
# recombination rate
command <- paste("pquery recomb_in_region",
" -defaults",
" -sql",
" RecombTable=", args[["recombTable"]],
" Chr=",args[["chr"]],
" Start=",args[["start"]],
" End=",args[["end"]],
sep="");
if ( is.null(args[['recomb']]) && ! args[['pquery']] ) { args[['showRecomb']] <- FALSE }
tryCatch(
recrate <- GetData( args[['recomb']], default=recrate.default,
command=command, clobber=!userFile[['recomb']] || args[['clobber']] ),
error = function(e) { warning(e) }
)
if ( prod(dim(recrate)) == 0 ) { args[['showRecomb']] <- FALSE }
cat("\n\n");
# snpset positions
command <- paste("pquery snpset_in_region",
" -defaults",
" -sql",
' "SnpSet=',args[["snpset"]],'"',
" Chr=",args[["chr"]],
" ChrStart=",args[["start"]],
" ChrEnd=",args[["end"]],
sep="");
rug <- GetData( args[['snpsetFile']], default=rug.default, command=command,
clobber=!userFile[['snpsetFile']] || args[['clobber']] );
cat("\n\nsnpset summary:\n");
print(summary(rug));
cat("\n\n");
# annotation
if ( char2Rname(args[['annotCol']]) %in% names(metal) ) {
if (is.null(args[['annotOrder']])) {
args[['annotOrder']] <-
sort( unique( metal[,char2Rname(args[['annotCol']])] ) );
}
metal$annot <- MakeFactor(metal[,char2Rname(args[['annotCol']]) ], levels=args[['annotOrder']],
na.level='none')
pchVals <- rep(args[['annotPch']], length=length(levels(metal$annot)));
metal$pch <- pchVals[ as.numeric(metal$annot) ]
annot <- metal$annot
}
cat("\nR-DEBUG: Loading annotation data...\n");
if( args[['showAnnot']] && ! 'pch' %in% names(metal) ) {
command <- paste("pquery snp_annot_in_region",
" -defaults",
" -sql",
" Chr=",args[["chr"]],
" Start=",args[["startBP"]],
" End=",args[["endBP"]],
sep="");
if ( is.null(args[['annot']]) && !args[['pquery']] ) { args[['showAnnot']] <- FALSE }
annot <- GetData( args[['annot']], annot.default, command=command,
clobber=!userFile[['annot']] || args[['clobber']] )
if (prod(dim(annot)) == 0) { args[['showAnnot']] <- FALSE }
cat("\nR-DEBUG: Merging in annotation data...");
metal <- merge(metal, annot,
by.x='MarkerName', by.y="snp",
all.x=TRUE, all.y=FALSE);
cat(" Done.\n");
print(head(metal));
metal$annot <-
c('no annotation','framestop','splice','nonsyn','coding','utr','tfbscons','mcs44placental')[1+metal$annot_rank];
if ( is.null(args[['annotOrder']]) ) {
args[['annotOrder']] <-
c('framestop','splice','nonsyn','coding','utr','tfbscons','mcs44placental','no annotation')
}
metal$annot <- MakeFactor(metal$annot, levels=args[['annotOrder']],na.level='none')
pchVals <- rep(args[['annotPch']], length=length(levels(metal$annot)));
metal$pch <- pchVals[ as.numeric(metal$annot) ]
} else {
if (! 'pch' %in% names(metal)) {
metal$pch <- 21;
}
if (! 'annot' %in% names(metal) ) {
metal$annot <- "none"
metal$annot <- factor(metal$annot)
}
annot <- data.frame();
}
if (FALSE) { # scraps from above
cat('else: ');
pchVals <- rep(args[['annotPch']], length=length(levels(metal$annot)));
metal$pch <- pchVals[ as.numeric(metal$annot) ]
annot <- metal$annot
print(xtabs(~annot+pch,metal));
print(metal[1:4,])
}
sink('annotationTally.txt')
print( args[['annotOrder']] )
print(args[['annotPch']])
print(args[['annotOrder']])
print(table(metal$annot))
print(table(metal$pch))
print(xtabs(~annot+pch,metal))
sink()
# ld
command <- paste("pquery ld_in_region",
" -defaults",
" -sql",
" LDTable=", args[["ldTable"]],
" Chr=",args[["chr"]],
" Start=",args[["startBP"]],
" End=",args[["endBP"]],
sep="");
if ( is.null(args[['ld']]) && ! args[['pquery']] ) { args[['legend']] = 'none' }
ld <- GetData( args[['ld']], ld.default, command=command,
clobber=!userFile[['ld']] || args[['clobber']] )
cat("\n\n");
if (! is.null(args[['metalRug']]) ) {
metalRug <- data.frame(pos=metal$pos, snp_set=args[['metalRug']]);
origRug <- data.frame(pos=rug$pos,snp_set=rug$snp_set)
rug <- rbind(origRug,metalRug)
print(levels(rug))
}
save(metal,annot,recrate,ld,args,rug,file='loaded.Rdata');
if ( prod(dim(metal) ) < 1) { stop("No data read.\n"); }
# subset the data
s <- metal$pos >= args[['startBP']] &
metal$pos <= args[['endBP']] &
metal$chr == args[['chr']] ;
# & metal$P.value <= args[['thresh']];
metal <- subset(metal, s);
# merge LD info into metal data frame
refSnp <- as.character(args[['refsnp']]);
metal$group <- 1;
metal$LD <- NA;
metal$ldcut <- NA;
metal$group[metal$MarkerName == refSnp] <- length(args[['ldColors']]);
if (! is.null(ld)) {
# subset ld for reference SNP
snpCols <- which(apply(ld,2,Sniff,type="snp"))
if (length(snpCols) != 2) {
warning(paste("LD file doesn't smell right. (",
length(snpCols)," SNP cols)",sep=""))
assign("warningMessages",
c(warningMessages,"LD file doesn't smell right."),
globalenv());
break;
}
w1 <- which ( ld[,snpCols[1]] == refSnp );
w2 <- which ( ld[,snpCols[2]] == refSnp );
c1 <- c(names(ld)[snpCols[1]],names(ld)[snpCols[2]],args[['ldCol']]); # "rsquare","dprime");
c2 <- c(names(ld)[snpCols[2]],names(ld)[snpCols[1]],args[['ldCol']]); # "rsquare","dprime");
ld1 <- ld[ w1, c1, drop=FALSE ]
ld2 <- ld[ w2, c2, drop=FALSE ]
names(ld1)[1:2] <- c("refSNP","otherSNP")
names(ld2)[1:2] <- c("refSNP","otherSNP")
lld <- rbind( ld1, ld2);
if (prod(dim(lld)) > 0) {
metal <- merge(metal, lld,
by.x='MarkerName', by.y="otherSNP",
all.x=TRUE, all.y=FALSE);
if ( args[['ldCol']] %in% names(metal) ) {
metal$LD <- metal[ ,args[['ldCol']] ];
} else {
stop(paste('No column named',args[['ldCol']]));
}
metal$ldcut <- cut(metal$LD,breaks=args[['ldCuts']],include.lowest=TRUE);
metal$group <- 1 + as.numeric(metal$ldcut);
metal$group[is.na(metal$group)] <- 1;
metal$group[metal$MarkerName == refSnp] <- length(args[['ldColors']])
} else {
assign("warningMessages",c(warningMessages,'No usable LD information for reference SNP.'), globalenv());
warning("No usable LD information.");
args[['legend']] <- 'none';
}
}
save(metal,refSnp,args,file='temp.Rdata');
command <- paste("pquery refFlat_in_region",
" -defaults",
" -sql",
" Chrom=", chr2chrom(args[["chr"]]),
" Start=",args[["start"]],
" End=",args[["end"]],
" Build=",args[["build"]],
sep="");
if (is.null(args[['refFlat']]) && ! args[['pquery']]) { args[['showGenes']] <- FALSE }
refFlatRaw <- GetData( args[['refFlat']], refFlatRaw.default, command=command,
clobber = !userFile[['refFlat']] || args[['clobber']] );
summary(refFlatRaw);
# subset the refFlatdata
s <- refFlatRaw$txEnd >= args[['startBP']] &
refFlatRaw$txStart <= args[['endBP']] &
refFlatRaw$chrom == chr2chrom(args[['chr']]) ;
refFlatRaw <- subset(refFlatRaw, s);
save(refFlatRaw,args,file="refFlatRaw.Rdata");
flatten.bed(refFlatRaw,multiplier=1/args[['unit']]) -> refFlat;
summary(refFlat);
# adjust for position units
metal$pos <- metal$pos / args[['unit']];
recrate$pos <- recrate$pos / args[['unit']];
rug$pos <- rug$pos / args[['unit']];
cat("recrate summary:\n");
print(summary(recrate));
cat("\n\n");
cat("LD summary:\n");
print(summary(ld));
cat("\n\n");
cat("metal summary:\n");
print(summary(metal));
cat("\n\n");
save(metal,annot,recrate,refFlatRaw,refFlat,rug,file=args[['rdata']]);
} else {
load(args[['rdata']]);
}
if (is.character(args[['prelude']]) && file.exists(args[['prelude']])) {
source(args[['prelude']]);
}
if ( prod(dim(rug)) == 0 || !("snp_set" %in% names(rug)) ) {
nrugs <- 0;
} else {
nrugs <- length(levels(rug$snp_set));
}
xRange <- range(metal$pos,na.rm=T);
xRange <- as.numeric(c(args[['start']],args[['end']])) / args[['unit']];
refFlat <- refFlat[ which( (refFlat$start <= xRange[2]) & (refFlat$stop >= xRange[1]) ), ]
yRange <- c(min(c(args[['ymin']],transformation(metal$P.value),na.rm=T)),
max(c(args[['ymax']],transformation(metal$P.value)*1.1),na.rm=T));
recrateRange <- c(0,max(c(100,recrate$recomb),na.rm=T));
if (args[['experimental']]) {
recrate$recomb <- max(c(100,recrate$recomb),na.rm=T) - recrate$recomb;
recrateRange <- c(0,max(c(100,recrate$recomb),na.rm=T));
}
recrateRange <- rev(recrateRange);
print("recrateRange: ");
print(recrateRange);
refSnp <- as.character(args[['refsnp']]);
refidx <- match(refSnp, metal$MarkerName);
if (!args[['showRefsnpAnnot']]) {
metal$pch[refidx] <- 23; # use a diamond for ref snp
}
if ('pdf' %in% args[['format']]) {
pdf(file=args[['pdf']],width=args[['width']],height=args[['height']],version='1.4');
if ( prod(dim(metal)) == 0 ) {
message ('No data to plot.');
} else {
zplot(metal,ld,recrate,refidx,nrugs=nrugs,args=args,postlude=args[['postlude']]);
grid.newpage();
}
grid.log(args,metal);
dev.off();
}
#
# N.B. *** old png and tiff code no longer being maintained. No guarantees that this works anymore. ***
#
if ('png' %in% args[['format']]) {
args[['recombLineAlpha']] = 1;
args[['recombFillAlpha']] = 1;
args[['hiliteAlpha']] = 1;
args[['frameAlpha']]=1;
args[['hiAlpha']]=1;
args[['rugAlpha']] = 1;
args[['refsnpLineAlpha']] = 1;
args[['refsnpTextAlpha']]=1;
png(file=args[['png']],
width=args[['width']]*100,
height=args[['height']]*100);
if ( prod(dim(metal)) == 0 ) {
message ('No data to plot.');
} else {
assign("args",args,globalenv());
zplot(metal,ld,recrate,refidx,nrugs=nrugs,args=args,postlude=args[['postlude']]);
}
dev.off();
}
#
# N.B. *** old png and tiff code no longer being maintained. No guarantees that this works anymore. ***
#
if ('tiff' %in% args[['format']]) {
args[['recombLineAlpha']] = 1;
args[['recombFillAlpha']] = 1;
args[['hiliteAlpha']] = 1;
args[['frameAlpha']]=1;
args[['hiAlpha']]=1;
args[['rugAlpha']] = 1;
args[['refsnpLineAlpha']] = 1;
args[['refsnpTextAlpha']]=1;
tiff(file=args[['tiff']],
width=args[['width']]*100,
height=args[['height']]*100);
if ( prod(dim(metal)) == 0 ) {
message ('No data to plot.');
} else {
assign("args",args,globalenv());
zplot(metal,ld,recrate,refidx,nrugs=nrugs,args=args,postlude=args[['postlude']]);
}
dev.off();
}
sink(args[['log']], append=TRUE);
grid.log(args,metal,ascii=TRUE);
cat('\n\n\n');
cat("List of genes in region\n");
cat("#######################\n");
geneList <- make.gene.list(refFlat,unit=args[['unit']]);
if (! is.null(geneList)) {
digits <- 7 + ceiling(log10(max(geneList$stop)));
print(geneList,digits=digits);
}
cat('\n\n\n');
sink();
save(metal,refFlat,ld,recrate,refSnpPos,args,file='end.Rdata')
CleanUp(args,refSnpPos,recrate,rug,ld,refFlatRaw);
date();
|
# BSgenome package for representing full genomes in BioConductor
#biocLite("BSgenome", lib=bio)
library(BSgenome)
??BSgenome
available.genomes() # lists all the downloadable genomes in BioConductor website
installed.genomes() # list of available genomes in your computer
#biocLite("BSgenome.Scerevisiae.UCSC.sacCer1", lib=bio) # installing the genomes
library("BSgenome.Scerevisiae.UCSC.sacCer1") # getting the yeast genome --> we get an object type Scerevisiae
# object loaded in library -> view the sequence data
# nothing is loaded in library
seqnames(Scerevisiae) # get names of sequence
seqlengths(Scerevisiae) # get lengths of sequence
Scerevisiae$chr1 # loaded in specific chrmosome sequence into memory
class(Scerevisiae$chr1) # biostrings class
y1=Scerevisiae$chr1
length(y1)
###### human
library("BSgenome.Hsapiens.UCSC.hg19")
seqnames(Hsapiens)
seqlengths(Hsapiens)
hs17=Hsapiens$chr17
length(hs17) | /exercises/Rcode-Day1/04 db3 - GenomesDB.r | no_license | tertiarycourses/BioConductorTraining | R | false | false | 989 | r | # BSgenome package for representing full genomes in BioConductor
#biocLite("BSgenome", lib=bio)
library(BSgenome)
??BSgenome
available.genomes() # lists all the downloadable genomes in BioConductor website
installed.genomes() # list of available genomes in your computer
#biocLite("BSgenome.Scerevisiae.UCSC.sacCer1", lib=bio) # installing the genomes
library("BSgenome.Scerevisiae.UCSC.sacCer1") # getting the yeast genome --> we get an object type Scerevisiae
# object loaded in library -> view the sequence data
# nothing is loaded in library
seqnames(Scerevisiae) # get names of sequence
seqlengths(Scerevisiae) # get lengths of sequence
Scerevisiae$chr1 # loaded in specific chrmosome sequence into memory
class(Scerevisiae$chr1) # biostrings class
y1=Scerevisiae$chr1
length(y1)
###### human
library("BSgenome.Hsapiens.UCSC.hg19")
seqnames(Hsapiens)
seqlengths(Hsapiens)
hs17=Hsapiens$chr17
length(hs17) |
library('ggplot2')
plot(diffx)
trend1 <- seq(1,212)
plotdata <- cbind(trend1, diffx)
ggplot(data.frame(final_data), aes(x = set_trend(final_data$yt4_system), y = final_data$yt3_mortgages)) + geom_line()
| /plots.R | no_license | nikolakojadinovic/SUR_Models | R | false | false | 215 | r | library('ggplot2')
plot(diffx)
trend1 <- seq(1,212)
plotdata <- cbind(trend1, diffx)
ggplot(data.frame(final_data), aes(x = set_trend(final_data$yt4_system), y = final_data$yt3_mortgages)) + geom_line()
|
## Graphs the results of a run of stable_fit
graph_stable_fit<-function(l_in,subhead=""){
require(ggplot2)
parameters<-l_in$parameters
fit_mle<-l_in$fit_mle
label_names<-c("alpha","beta","gamma","delta","two_ll_n","pm","n")
df_label<-data.frame(x=rep(.2,7),
y=c(.05+(9:3)/10),
value=as.double(parameters[1,label_names]),
confint=rep(NA,7))
label_var<-c("alpha","beta","gamma","delta","2*loglik/n","pm","n")
df_label$label<-c(sprintf("%11s =%8.4f",label_var[1:5],df_label$value[1:5]),
sprintf("%11s =%8.0f",label_var[6:7],df_label$value[6:7]))
df_gph<-cbind(RcppStable:::stable_table,
ghp_value=with(RcppStable:::stable_table,ifelse(type=="q_skew", value,log(value))))
type=ifelse(nrow(parameters)==2,"mle","q")
if (type=="mle")
title<-paste("Fit Using McCulloch's Method for Initial Fit",
"and then Maximum Likelihoods",subhead, sep="\n")
else
title<-paste("Fit Using McCulloch's Method",subhead,sep="\n")
gph<-ggplot(data=df_gph)+
labs(title=title)+
geom_contour(aes(x=alpha,y=beta,z=ghp_value,color=type))
gph<-gph+
## geom_polygon(mapping=aes(x=alpha,y=beta),data=conf_ellipse,fill="grey")+
geom_point(mapping=aes(x=alpha,y=beta,shape=method),data=parameters,
color=I("black"))+
geom_text(data=df_label,mapping=aes(x=x,y=y,label=label),
color=I("blue"),hjust=0,family="mono")
show(gph)
}
require(RcppStable)
alphas=c(1.5)
betas<-c(0,.5)
for (alpha in alphas){
for (beta in betas){
print(c(alpha,beta))
n=10000
set.seed(100)
xtst<-rstable(n,alpha,beta)
q<-quantile(xtst,p=c(.05,.25,.5,.75,.95))
q_kurt<-(q[5]-q[1])/(q[4]-q[2])
q_skew<-(q[5]+q[1]-2*q[3])/(q[5]-q[1])
q_scale<-q[4]-q[2]
q_location<-q[3]
convergence=NA
input_parameters<-data.frame(alpha=alpha,beta=beta,gamma=1,delta=0,pm=0,
two_ll_n=2*sum(dstable.quick(xtst,alpha,beta,log=T))/n,
n=n,method="input",q_kurt,q_skew,q_scale,q_location,
convergence,iterations=NA,cpu_time=NA)
row.names(input_parameters)<-NULL
sf_out<-stable_fit(xtst,type="q_mle")
print(rbind(input_parameters,sf_out))
}
}
| /tests/stable_fit-ex.R | permissive | JoeDunnStable/RcppStable | R | false | false | 2,337 | r | ## Graphs the results of a run of stable_fit
graph_stable_fit<-function(l_in,subhead=""){
require(ggplot2)
parameters<-l_in$parameters
fit_mle<-l_in$fit_mle
label_names<-c("alpha","beta","gamma","delta","two_ll_n","pm","n")
df_label<-data.frame(x=rep(.2,7),
y=c(.05+(9:3)/10),
value=as.double(parameters[1,label_names]),
confint=rep(NA,7))
label_var<-c("alpha","beta","gamma","delta","2*loglik/n","pm","n")
df_label$label<-c(sprintf("%11s =%8.4f",label_var[1:5],df_label$value[1:5]),
sprintf("%11s =%8.0f",label_var[6:7],df_label$value[6:7]))
df_gph<-cbind(RcppStable:::stable_table,
ghp_value=with(RcppStable:::stable_table,ifelse(type=="q_skew", value,log(value))))
type=ifelse(nrow(parameters)==2,"mle","q")
if (type=="mle")
title<-paste("Fit Using McCulloch's Method for Initial Fit",
"and then Maximum Likelihoods",subhead, sep="\n")
else
title<-paste("Fit Using McCulloch's Method",subhead,sep="\n")
gph<-ggplot(data=df_gph)+
labs(title=title)+
geom_contour(aes(x=alpha,y=beta,z=ghp_value,color=type))
gph<-gph+
## geom_polygon(mapping=aes(x=alpha,y=beta),data=conf_ellipse,fill="grey")+
geom_point(mapping=aes(x=alpha,y=beta,shape=method),data=parameters,
color=I("black"))+
geom_text(data=df_label,mapping=aes(x=x,y=y,label=label),
color=I("blue"),hjust=0,family="mono")
show(gph)
}
require(RcppStable)
alphas=c(1.5)
betas<-c(0,.5)
for (alpha in alphas){
for (beta in betas){
print(c(alpha,beta))
n=10000
set.seed(100)
xtst<-rstable(n,alpha,beta)
q<-quantile(xtst,p=c(.05,.25,.5,.75,.95))
q_kurt<-(q[5]-q[1])/(q[4]-q[2])
q_skew<-(q[5]+q[1]-2*q[3])/(q[5]-q[1])
q_scale<-q[4]-q[2]
q_location<-q[3]
convergence=NA
input_parameters<-data.frame(alpha=alpha,beta=beta,gamma=1,delta=0,pm=0,
two_ll_n=2*sum(dstable.quick(xtst,alpha,beta,log=T))/n,
n=n,method="input",q_kurt,q_skew,q_scale,q_location,
convergence,iterations=NA,cpu_time=NA)
row.names(input_parameters)<-NULL
sf_out<-stable_fit(xtst,type="q_mle")
print(rbind(input_parameters,sf_out))
}
}
|
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = "/Users/iakobnikoleishvili/Desktop/Data Science/R/Directory for Class/File.zip", method = "curl")
unzip("File.zip")
Features <- read.table("UCI HAR Dataset/features.txt", col.names = c("N","Functions"))
Activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("Code", "Activity"))
Subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "Subject")
X_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = Features$Functions)
Y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "Code")
Subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "Subject")
X_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = Features$Functions)
Y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "Code")
x <- rbind(X_train, X_test)
y <- rbind(Y_train, Y_test)
subject <- rbind(Subject_train, Subject_test)
MergedData <- cbind(subject, y, x)
CleanData <- MergedData %>% select(Subject, Code, contains("mean"), contains("std"))
CleanData$Code <- Activities[CleanData$Code, 2]
names(CleanData)[2] = "activity"
names(CleanData)<-gsub("Acc", "Accelerometer", names(CleanData))
names(CleanData)<-gsub("Gyro", "Gyroscope", names(CleanData))
names(CleanData)<-gsub("BodyBody", "Body", names(CleanData))
names(CleanData)<-gsub("Mag", "Magnitude", names(CleanData))
names(CleanData)<-gsub("^t", "Time", names(CleanData))
names(CleanData)<-gsub("^f", "Frequency", names(CleanData))
names(CleanData)<-gsub("tBody", "TimeBody", names(CleanData))
names(CleanData)<-gsub("-mean()", "Mean", names(CleanData), ignore.case = TRUE)
names(CleanData)<-gsub("-std()", "STD", names(CleanData), ignore.case = TRUE)
names(CleanData)<-gsub("-freq()", "Frequency", names(CleanData), ignore.case = TRUE)
names(CleanData)<-gsub("angle", "Angle", names(CleanData))
names(CleanData)<-gsub("gravity", "Gravity", names(CleanData))
Final <- CleanData %>% group_by(Subject, activity) %>% summarise_all(funs(mean))
write.table(Final, "Final.txt", row.name=FALSE)
| /run_analysis.R | no_license | Nikola-web/Getting-and-Cleaning-Data-Course-Project | R | false | false | 2,159 | r | download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = "/Users/iakobnikoleishvili/Desktop/Data Science/R/Directory for Class/File.zip", method = "curl")
unzip("File.zip")
Features <- read.table("UCI HAR Dataset/features.txt", col.names = c("N","Functions"))
Activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("Code", "Activity"))
Subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "Subject")
X_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = Features$Functions)
Y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "Code")
Subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "Subject")
X_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = Features$Functions)
Y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "Code")
x <- rbind(X_train, X_test)
y <- rbind(Y_train, Y_test)
subject <- rbind(Subject_train, Subject_test)
MergedData <- cbind(subject, y, x)
CleanData <- MergedData %>% select(Subject, Code, contains("mean"), contains("std"))
CleanData$Code <- Activities[CleanData$Code, 2]
names(CleanData)[2] = "activity"
names(CleanData)<-gsub("Acc", "Accelerometer", names(CleanData))
names(CleanData)<-gsub("Gyro", "Gyroscope", names(CleanData))
names(CleanData)<-gsub("BodyBody", "Body", names(CleanData))
names(CleanData)<-gsub("Mag", "Magnitude", names(CleanData))
names(CleanData)<-gsub("^t", "Time", names(CleanData))
names(CleanData)<-gsub("^f", "Frequency", names(CleanData))
names(CleanData)<-gsub("tBody", "TimeBody", names(CleanData))
names(CleanData)<-gsub("-mean()", "Mean", names(CleanData), ignore.case = TRUE)
names(CleanData)<-gsub("-std()", "STD", names(CleanData), ignore.case = TRUE)
names(CleanData)<-gsub("-freq()", "Frequency", names(CleanData), ignore.case = TRUE)
names(CleanData)<-gsub("angle", "Angle", names(CleanData))
names(CleanData)<-gsub("gravity", "Gravity", names(CleanData))
Final <- CleanData %>% group_by(Subject, activity) %>% summarise_all(funs(mean))
write.table(Final, "Final.txt", row.name=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RenderSql.R
\name{splitSql}
\alias{splitSql}
\title{Split a single SQL string into one or more SQL statements}
\usage{
splitSql(sql)
}
\arguments{
\item{sql}{The SQL string to split into separate statements}
}
\value{
A vector of strings, one for each SQL statement
}
\description{
\code{splitSql} splits a string containing multiple SQL statements into a vector of SQL statements
}
\details{
This function is needed because some DBMSs (like ORACLE) do not accepts multiple SQL statements
being sent as one execution.
}
\examples{
splitSql("SELECT * INTO a FROM b; USE x; DROP TABLE c;")
}
| /man/splitSql.Rd | permissive | OHDSI/SqlRender | R | false | true | 668 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RenderSql.R
\name{splitSql}
\alias{splitSql}
\title{Split a single SQL string into one or more SQL statements}
\usage{
splitSql(sql)
}
\arguments{
\item{sql}{The SQL string to split into separate statements}
}
\value{
A vector of strings, one for each SQL statement
}
\description{
\code{splitSql} splits a string containing multiple SQL statements into a vector of SQL statements
}
\details{
This function is needed because some DBMSs (like ORACLE) do not accepts multiple SQL statements
being sent as one execution.
}
\examples{
splitSql("SELECT * INTO a FROM b; USE x; DROP TABLE c;")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/schemas_operations.R
\name{schemas_update_schema}
\alias{schemas_update_schema}
\title{Updates the schema definition Inactive schemas will be deleted after two
years}
\usage{
schemas_update_schema(ClientTokenId, Content, Description, RegistryName,
SchemaName, Type)
}
\arguments{
\item{ClientTokenId}{The ID of the client token.}
\item{Content}{The source of the schema definition.}
\item{Description}{The description of the schema.}
\item{RegistryName}{[required] The name of the registry.}
\item{SchemaName}{[required] The name of the schema.}
\item{Type}{The schema type for the events schema.}
}
\description{
Updates the schema definition
Inactive schemas will be deleted after two years.
}
\section{Request syntax}{
\preformatted{svc$update_schema(
ClientTokenId = "string",
Content = "string",
Description = "string",
RegistryName = "string",
SchemaName = "string",
Type = "OpenApi3"|"JSONSchemaDraft4"
)
}
}
\keyword{internal}
| /paws/man/schemas_update_schema.Rd | permissive | sanchezvivi/paws | R | false | true | 1,034 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/schemas_operations.R
\name{schemas_update_schema}
\alias{schemas_update_schema}
\title{Updates the schema definition Inactive schemas will be deleted after two
years}
\usage{
schemas_update_schema(ClientTokenId, Content, Description, RegistryName,
SchemaName, Type)
}
\arguments{
\item{ClientTokenId}{The ID of the client token.}
\item{Content}{The source of the schema definition.}
\item{Description}{The description of the schema.}
\item{RegistryName}{[required] The name of the registry.}
\item{SchemaName}{[required] The name of the schema.}
\item{Type}{The schema type for the events schema.}
}
\description{
Updates the schema definition
Inactive schemas will be deleted after two years.
}
\section{Request syntax}{
\preformatted{svc$update_schema(
ClientTokenId = "string",
Content = "string",
Description = "string",
RegistryName = "string",
SchemaName = "string",
Type = "OpenApi3"|"JSONSchemaDraft4"
)
}
}
\keyword{internal}
|
postscript("HV.Boxplot.eps", horizontal=FALSE, onefile=FALSE, height=8, width=12, pointsize=10)
resultDirectory<-"C:\\Users\\bruno\\git\\ES1-2017-METIA1-47\\experimentBaseDirectory\\AntiSpamStudy\\data";
"
qIndicator <- function(indicator, problem)
{
fileNSGAII<-paste(resultDirectory, "NSGAII", sep="/")
fileNSGAII<-paste(fileNSGAII, problem, sep="/")
fileNSGAII<-paste(fileNSGAII, indicator, sep="/")
NSGAII<-scan(fileNSGAII)
algs<-c("NSGAII")
boxplot(NSGAII,names=algs, notch = FALSE)
titulo <-paste(indicator, problem, sep=":")
title(main=titulo)
}
par(mfrow=c(1,1))
indicator<-"HV"
qIndicator(indicator, "AntiSpamFilterProblem")
| /experimentBaseDirectory/AntiSpamStudy/R/HV.Boxplot.R | no_license | bmgmo-iscteiulpt/ES1-2017-METIA1-47 | R | false | false | 638 | r | postscript("HV.Boxplot.eps", horizontal=FALSE, onefile=FALSE, height=8, width=12, pointsize=10)
resultDirectory<-"C:\\Users\\bruno\\git\\ES1-2017-METIA1-47\\experimentBaseDirectory\\AntiSpamStudy\\data";
"
qIndicator <- function(indicator, problem)
{
fileNSGAII<-paste(resultDirectory, "NSGAII", sep="/")
fileNSGAII<-paste(fileNSGAII, problem, sep="/")
fileNSGAII<-paste(fileNSGAII, indicator, sep="/")
NSGAII<-scan(fileNSGAII)
algs<-c("NSGAII")
boxplot(NSGAII,names=algs, notch = FALSE)
titulo <-paste(indicator, problem, sep=":")
title(main=titulo)
}
par(mfrow=c(1,1))
indicator<-"HV"
qIndicator(indicator, "AntiSpamFilterProblem")
|
#' @include generics.R
#'
NULL
#' @param object A Seurat object, ChromatinAssay object, Fragment object, or the
#' path to fragment file/s.
#' @param assay Name of assay to use
#' @param group.by Grouping variable to use. If set, peaks will be called
#' independently on each group of cells and then combined. Note that to call
#' peaks using subsets of cells we first split the fragment file/s used, so
#' using a grouping variable will require extra time to split the files and
#' perform multiple MACS peak calls, and will store additional files on-disk
#' that may be large. Note that we store split fragment files in the temp
#' directory, and if the program is interrupted before completing these
#' temporary files will not be removed. If NULL, peaks are called using all
#' cells together (pseudobulk).
#' @param idents List of identities to include if grouping cells (only valid if
#' also setting the \code{group.by} parameter). If NULL, peaks will be called
#' for all cell identities.
#' @param macs2.path Path to MACS program. If NULL, try to find MACS
#' automatically.
#' @param combine.peaks Controls whether peak calls from different groups of
#' cells are combined using \code{GenomicRanges::reduce} when calling peaks for
#' different groups of cells (\code{group.by} parameter). If FALSE, a list of
#' \code{GRanges} object will be returned. Note that metadata fields such as the
#' p-value, q-value, and fold-change information for each peak will be lost if
#' combining peaks.
#' @param broad Call broad peaks (\code{--broad} parameter for MACS)
#' @param outdir Path for output files
#' @param effective.genome.size Effective genome size parameter for MACS
#' (\code{-g}). Default is the human effective genome size (2.7e9).
#' @param extsize \code{extsize} parameter for MACS.
#' @param shift \code{shift} parameter for MACS.
#' @param additional.args Additional arguments passed to MACS. This should be a
#' single character string
#' @param name Name for output MACS files. This will also be placed in the
#' \code{name} field in the GRanges output.
#' @param cleanup Remove MACS output files
#' @param verbose Display messages
#' @param ... Arguments passed to other methods
#'
#' @method CallPeaks Seurat
#' @rdname CallPeaks
#'
#' @concept quantification
#'
#' @importFrom Seurat DefaultAssay
#' @importFrom Seurat Project
#' @importFrom GenomicRanges reduce
#'
#' @export
CallPeaks.Seurat <- function(
object,
assay = NULL,
group.by = NULL,
idents = NULL,
macs2.path = NULL,
broad = FALSE,
outdir = tempdir(),
combine.peaks = TRUE,
effective.genome.size = 2.7e9,
extsize = 200,
shift = -extsize/2,
additional.args = NULL,
name = Project(object),
cleanup = TRUE,
verbose = TRUE,
...
) {
assay <- SetIfNull(x = assay, y = DefaultAssay(object = object))
if (!is.null(x = group.by)) {
# first check macs2 path before we spend time splitting the files
macs2.path <- SetIfNull(
x = macs2.path,
y = unname(obj = Sys.which(names = "macs2"))
)
if (nchar(x = macs2.path) == 0) {
stop("MACS2 not found. Please install MACS:",
"https://macs3-project.github.io/MACS/")
}
# split fragment files
SplitFragments(
object = object,
assay = assay,
group.by = group.by,
idents = idents,
outdir = tempdir(),
verbose = verbose
)
# work out what all the file paths to use are
groups <- GetGroups(
object = object,
group.by = group.by,
idents = idents
)
groups <- gsub(pattern = " ", replacement = "_", x = groups)
unique.groups <- unique(x = groups)
# call peaks on each split fragment file separately
grlist <- list()
for (i in seq_along(along.with = unique.groups)) {
fragpath <- paste0(
tempdir(),
.Platform$file.sep,
unique.groups[[i]],
".bed"
)
gr <- CallPeaks(
object = fragpath,
macs2.path = macs2.path,
outdir = outdir,
broad = broad,
effective.genome.size = effective.genome.size,
extsize = extsize,
shift = shift,
additional.args = additional.args,
name = name,
cleanup = cleanup,
verbose = verbose
)
# remove split fragment file from temp dir
file.remove(fragpath)
# add ident
if (length(x = gr) > 0) {
gr$ident <- unique.groups[[i]]
grlist[[i]] <- gr
} else {
message("No peaks found for ", unique.groups[[i]])
}
}
if (combine.peaks) {
# combine peaks and reduce, maintaining ident information
gr.combined <- Reduce(f = c, x = grlist)
gr <- reduce(x = gr.combined, with.revmap = TRUE)
dset.vec <- vector(mode = "character", length = length(x = gr))
ident.vec <- gr.combined$ident
revmap <- gr$revmap
for (i in seq_len(length.out = length(x = gr))) {
datasets <- ident.vec[revmap[[i]]]
dset.vec[[i]] <- paste(unique(x = datasets), collapse = ",")
}
gr$peak_called_in <- dset.vec
gr$revmap <- NULL
} else {
gr <- grlist
}
} else {
gr <- CallPeaks(
object = object[[assay]],
macs2.path = macs2.path,
outdir = outdir,
broad = broad,
effective.genome.size = effective.genome.size,
extsize = extsize,
shift = shift,
additional.args = additional.args,
name = name,
cleanup = cleanup,
verbose = verbose,
...
)
}
return(gr)
}
#' @method CallPeaks ChromatinAssay
#' @rdname CallPeaks
#' @concept quantification
#' @export
CallPeaks.ChromatinAssay <- function(
object,
macs2.path = NULL,
outdir = tempdir(),
broad = FALSE,
effective.genome.size = 2.7e9,
extsize = 200,
shift = -extsize/2,
additional.args = NULL,
name = "macs2",
cleanup = TRUE,
verbose = TRUE,
...
) {
# get fragment files
frags <- Fragments(object = object)
# get all fragment file paths
allfragpaths <- sapply(X = frags, FUN = GetFragmentData, slot = "path")
allfragpaths <- Reduce(f = paste, x = allfragpaths)
gr <- CallPeaks(
object = allfragpaths,
macs2.path = macs2.path,
outdir = outdir,
broad = broad,
effective.genome.size = effective.genome.size,
extsize = extsize,
shift = shift,
additional.args = additional.args,
name = name,
cleanup = cleanup,
verbose = verbose,
...
)
return(gr)
}
#' @method CallPeaks Fragment
#' @rdname CallPeaks
#' @concept quantification
#' @export
CallPeaks.Fragment <- function(
object,
macs2.path = NULL,
outdir = tempdir(),
broad = FALSE,
effective.genome.size = 2.7e9,
extsize = 200,
shift = -extsize/2,
additional.args = NULL,
name = "macs2",
cleanup = TRUE,
verbose = TRUE,
...
) {
fragpath <- GetFragmentData(object = object, slot = "path")
gr <- CallPeaks(
object = fragpath,
macs2.path = macs2.path,
outdir = outdir,
broad = broad,
effective.genome.size = effective.genome.size,
extsize = extsize,
shift = shift,
additional.args = additional.args,
name = name,
cleanup = cleanup,
verbose = verbose,
...
)
return(gr)
}
#' @importFrom GenomicRanges makeGRangesFromDataFrame
#' @method CallPeaks default
#' @rdname CallPeaks
#' @concept quantification
#' @export
CallPeaks.default <- function(
object,
macs2.path = NULL,
outdir = tempdir(),
broad = FALSE,
effective.genome.size = 2.7e9,
extsize = 200,
shift = -extsize/2,
additional.args = NULL,
name = "macs2",
cleanup = TRUE,
verbose = TRUE,
...
) {
# find macs2
macs2.path <- SetIfNull(
x = macs2.path,
y = unname(obj = Sys.which(names = "macs2"))
)
if (nchar(x = macs2.path) == 0) {
stop("MACS2 not found. Please install MACS:",
"https://macs3-project.github.io/MACS/")
}
# if list of paths given, collapse to a single space-separated string
if (length(x = object) > 1) {
object <- Reduce(f = paste, x = object)
}
broadstring <- ifelse(test = broad, yes = " --broad ", no = "")
cmd <- paste0(
macs2.path,
" callpeak -t ",
object,
" -g ",
as.character(x = effective.genome.size),
broadstring,
" -f BED --nomodel --extsize ",
as.character(x = extsize),
" --shift ",
as.character(x = shift),
" -n ",
as.character(x = name),
" --outdir ",
outdir,
" ",
additional.args
)
# call macs2
system(
command = cmd,
wait = TRUE,
ignore.stderr = !verbose,
ignore.stdout = !verbose
)
if (broad) {
# read in broadpeak
df <- read.table(
file = paste0(outdir, .Platform$file.sep, name, "_peaks.broadPeak"),
col.names = c("chr", "start", "end", "name",
"score", "strand", "fold_change",
"neg_log10pvalue_summit", "neg_log10qvalue_summit")
)
files.to.remove <- paste0(
name,
c("_peaks.broadPeak", "_peaks.xls", "_peaks.gappedPeak")
)
} else {
# read in narrowpeak file
df <- read.table(
file = paste0(outdir, .Platform$file.sep, name, "_peaks.narrowPeak"),
col.names = c("chr", "start", "end", "name",
"score", "strand", "fold_change",
"neg_log10pvalue_summit", "neg_log10qvalue_summit",
"relative_summit_position")
)
files.to.remove <- paste0(
name,
c("_peaks.narrowPeak", "_peaks.xls", "_summits.bed")
)
}
gr <- makeGRangesFromDataFrame(df = df, keep.extra.columns = TRUE)
if (cleanup) {
files.to.remove <- paste0(outdir, .Platform$file.sep, files.to.remove)
for (i in files.to.remove) {
if (file.exists(i)) {
file.remove(i)
}
}
}
return(gr)
}
| /fuzzedpackages/Signac/R/peaks.R | no_license | akhikolla/testpackages | R | false | false | 9,740 | r | #' @include generics.R
#'
NULL
#' @param object A Seurat object, ChromatinAssay object, Fragment object, or the
#' path to fragment file/s.
#' @param assay Name of assay to use
#' @param group.by Grouping variable to use. If set, peaks will be called
#' independently on each group of cells and then combined. Note that to call
#' peaks using subsets of cells we first split the fragment file/s used, so
#' using a grouping variable will require extra time to split the files and
#' perform multiple MACS peak calls, and will store additional files on-disk
#' that may be large. Note that we store split fragment files in the temp
#' directory, and if the program is interrupted before completing these
#' temporary files will not be removed. If NULL, peaks are called using all
#' cells together (pseudobulk).
#' @param idents List of identities to include if grouping cells (only valid if
#' also setting the \code{group.by} parameter). If NULL, peaks will be called
#' for all cell identities.
#' @param macs2.path Path to MACS program. If NULL, try to find MACS
#' automatically.
#' @param combine.peaks Controls whether peak calls from different groups of
#' cells are combined using \code{GenomicRanges::reduce} when calling peaks for
#' different groups of cells (\code{group.by} parameter). If FALSE, a list of
#' \code{GRanges} object will be returned. Note that metadata fields such as the
#' p-value, q-value, and fold-change information for each peak will be lost if
#' combining peaks.
#' @param broad Call broad peaks (\code{--broad} parameter for MACS)
#' @param outdir Path for output files
#' @param effective.genome.size Effective genome size parameter for MACS
#' (\code{-g}). Default is the human effective genome size (2.7e9).
#' @param extsize \code{extsize} parameter for MACS.
#' @param shift \code{shift} parameter for MACS.
#' @param additional.args Additional arguments passed to MACS. This should be a
#' single character string
#' @param name Name for output MACS files. This will also be placed in the
#' \code{name} field in the GRanges output.
#' @param cleanup Remove MACS output files
#' @param verbose Display messages
#' @param ... Arguments passed to other methods
#'
#' @method CallPeaks Seurat
#' @rdname CallPeaks
#'
#' @concept quantification
#'
#' @importFrom Seurat DefaultAssay
#' @importFrom Seurat Project
#' @importFrom GenomicRanges reduce
#'
#' @export
CallPeaks.Seurat <- function(
object,
assay = NULL,
group.by = NULL,
idents = NULL,
macs2.path = NULL,
broad = FALSE,
outdir = tempdir(),
combine.peaks = TRUE,
effective.genome.size = 2.7e9,
extsize = 200,
shift = -extsize/2,
additional.args = NULL,
name = Project(object),
cleanup = TRUE,
verbose = TRUE,
...
) {
assay <- SetIfNull(x = assay, y = DefaultAssay(object = object))
if (!is.null(x = group.by)) {
# first check macs2 path before we spend time splitting the files
macs2.path <- SetIfNull(
x = macs2.path,
y = unname(obj = Sys.which(names = "macs2"))
)
if (nchar(x = macs2.path) == 0) {
stop("MACS2 not found. Please install MACS:",
"https://macs3-project.github.io/MACS/")
}
# split fragment files
SplitFragments(
object = object,
assay = assay,
group.by = group.by,
idents = idents,
outdir = tempdir(),
verbose = verbose
)
# work out what all the file paths to use are
groups <- GetGroups(
object = object,
group.by = group.by,
idents = idents
)
groups <- gsub(pattern = " ", replacement = "_", x = groups)
unique.groups <- unique(x = groups)
# call peaks on each split fragment file separately
grlist <- list()
for (i in seq_along(along.with = unique.groups)) {
fragpath <- paste0(
tempdir(),
.Platform$file.sep,
unique.groups[[i]],
".bed"
)
gr <- CallPeaks(
object = fragpath,
macs2.path = macs2.path,
outdir = outdir,
broad = broad,
effective.genome.size = effective.genome.size,
extsize = extsize,
shift = shift,
additional.args = additional.args,
name = name,
cleanup = cleanup,
verbose = verbose
)
# remove split fragment file from temp dir
file.remove(fragpath)
# add ident
if (length(x = gr) > 0) {
gr$ident <- unique.groups[[i]]
grlist[[i]] <- gr
} else {
message("No peaks found for ", unique.groups[[i]])
}
}
if (combine.peaks) {
# combine peaks and reduce, maintaining ident information
gr.combined <- Reduce(f = c, x = grlist)
gr <- reduce(x = gr.combined, with.revmap = TRUE)
dset.vec <- vector(mode = "character", length = length(x = gr))
ident.vec <- gr.combined$ident
revmap <- gr$revmap
for (i in seq_len(length.out = length(x = gr))) {
datasets <- ident.vec[revmap[[i]]]
dset.vec[[i]] <- paste(unique(x = datasets), collapse = ",")
}
gr$peak_called_in <- dset.vec
gr$revmap <- NULL
} else {
gr <- grlist
}
} else {
gr <- CallPeaks(
object = object[[assay]],
macs2.path = macs2.path,
outdir = outdir,
broad = broad,
effective.genome.size = effective.genome.size,
extsize = extsize,
shift = shift,
additional.args = additional.args,
name = name,
cleanup = cleanup,
verbose = verbose,
...
)
}
return(gr)
}
#' @method CallPeaks ChromatinAssay
#' @rdname CallPeaks
#' @concept quantification
#' @export
CallPeaks.ChromatinAssay <- function(
object,
macs2.path = NULL,
outdir = tempdir(),
broad = FALSE,
effective.genome.size = 2.7e9,
extsize = 200,
shift = -extsize/2,
additional.args = NULL,
name = "macs2",
cleanup = TRUE,
verbose = TRUE,
...
) {
# get fragment files
frags <- Fragments(object = object)
# get all fragment file paths
allfragpaths <- sapply(X = frags, FUN = GetFragmentData, slot = "path")
allfragpaths <- Reduce(f = paste, x = allfragpaths)
gr <- CallPeaks(
object = allfragpaths,
macs2.path = macs2.path,
outdir = outdir,
broad = broad,
effective.genome.size = effective.genome.size,
extsize = extsize,
shift = shift,
additional.args = additional.args,
name = name,
cleanup = cleanup,
verbose = verbose,
...
)
return(gr)
}
#' @method CallPeaks Fragment
#' @rdname CallPeaks
#' @concept quantification
#' @export
CallPeaks.Fragment <- function(
object,
macs2.path = NULL,
outdir = tempdir(),
broad = FALSE,
effective.genome.size = 2.7e9,
extsize = 200,
shift = -extsize/2,
additional.args = NULL,
name = "macs2",
cleanup = TRUE,
verbose = TRUE,
...
) {
fragpath <- GetFragmentData(object = object, slot = "path")
gr <- CallPeaks(
object = fragpath,
macs2.path = macs2.path,
outdir = outdir,
broad = broad,
effective.genome.size = effective.genome.size,
extsize = extsize,
shift = shift,
additional.args = additional.args,
name = name,
cleanup = cleanup,
verbose = verbose,
...
)
return(gr)
}
#' @importFrom GenomicRanges makeGRangesFromDataFrame
#' @method CallPeaks default
#' @rdname CallPeaks
#' @concept quantification
#' @export
CallPeaks.default <- function(
object,
macs2.path = NULL,
outdir = tempdir(),
broad = FALSE,
effective.genome.size = 2.7e9,
extsize = 200,
shift = -extsize/2,
additional.args = NULL,
name = "macs2",
cleanup = TRUE,
verbose = TRUE,
...
) {
# find macs2
macs2.path <- SetIfNull(
x = macs2.path,
y = unname(obj = Sys.which(names = "macs2"))
)
if (nchar(x = macs2.path) == 0) {
stop("MACS2 not found. Please install MACS:",
"https://macs3-project.github.io/MACS/")
}
# if list of paths given, collapse to a single space-separated string
if (length(x = object) > 1) {
object <- Reduce(f = paste, x = object)
}
broadstring <- ifelse(test = broad, yes = " --broad ", no = "")
cmd <- paste0(
macs2.path,
" callpeak -t ",
object,
" -g ",
as.character(x = effective.genome.size),
broadstring,
" -f BED --nomodel --extsize ",
as.character(x = extsize),
" --shift ",
as.character(x = shift),
" -n ",
as.character(x = name),
" --outdir ",
outdir,
" ",
additional.args
)
# call macs2
system(
command = cmd,
wait = TRUE,
ignore.stderr = !verbose,
ignore.stdout = !verbose
)
if (broad) {
# read in broadpeak
df <- read.table(
file = paste0(outdir, .Platform$file.sep, name, "_peaks.broadPeak"),
col.names = c("chr", "start", "end", "name",
"score", "strand", "fold_change",
"neg_log10pvalue_summit", "neg_log10qvalue_summit")
)
files.to.remove <- paste0(
name,
c("_peaks.broadPeak", "_peaks.xls", "_peaks.gappedPeak")
)
} else {
# read in narrowpeak file
df <- read.table(
file = paste0(outdir, .Platform$file.sep, name, "_peaks.narrowPeak"),
col.names = c("chr", "start", "end", "name",
"score", "strand", "fold_change",
"neg_log10pvalue_summit", "neg_log10qvalue_summit",
"relative_summit_position")
)
files.to.remove <- paste0(
name,
c("_peaks.narrowPeak", "_peaks.xls", "_summits.bed")
)
}
gr <- makeGRangesFromDataFrame(df = df, keep.extra.columns = TRUE)
if (cleanup) {
files.to.remove <- paste0(outdir, .Platform$file.sep, files.to.remove)
for (i in files.to.remove) {
if (file.exists(i)) {
file.remove(i)
}
}
}
return(gr)
}
|
## Functions ***************************** ####
# Function to get all unique pairs of vector elements
get_pairs <- function(x) {
save_class <- class(x)
unique_x <- unique(x) %>% as.character
x_pairs <- list()
row_ind <- 1
for(i in 1:(length(unique_x) - 1)) {
for(j in (i+1):length(unique_x)) {
x_pairs[[row_ind]] <- c(unique_x[i], unique_x[j])
row_ind <- row_ind + 1
}
}
return(x_pairs)
}
# Function to get pairwise starting points and abundances
get_pairwise_df <- function(delta_df, otu1, otu2) {
x_start <- delta_df %>%
filter(otu == otu1) %>%
pull(start)
x_change <- delta_df %>%
filter(otu == otu1) %>%
pull(unit_rate)
y_start <- delta_df %>%
filter(otu == otu2) %>%
pull(start)
y_change <- delta_df %>%
filter(otu == otu2) %>%
pull(unit_rate)
pairwise_df <- data.frame(x_start, x_change, y_start, y_change, id = 1:length(x_start))
return(pairwise_df)
}
# Set up grid, essentially a wrapper for expand.grid
make_grid <- function(x_range, y_range,
resolution = 10, padding = 0) {
x_grid <- seq(from = x_range[1] - padding, to = x_range[2] + padding, length.out = resolution)
y_grid <- seq(from = y_range[1] - padding, to = y_range[2] + padding, length.out = resolution)
df <- data.frame(x = rep(x_grid, resolution),
y = rep(y_grid, each = resolution))
return(df)
}
# Get grid averges for a single variable z
grid_average <- function(x, y, z, resolution = 10, padding = 0) {
mydf <- data.frame(x, y, z)
mydf <- mydf %>% mutate(index = 1:nrow(mydf))
# Get xy ranges
x_range <- x %>% range
y_range <- y %>% range
xy_grid <- make_grid(x_range, y_range, resolution, padding)
# Add column for cell averages
xy_grid$z <- NA
for(i in 1:nrow(xy_grid)) {
# cell observations
cell_df <- mydf %>%
filter(x < xy_grid[i, 1] & y < xy_grid[i, 2])
# cell average
xy_grid[i, "z"] <- mean(cell_df$z)
# Remove used observations from data
mydf <- mydf %>% filter(!(index %in% cell_df$index))
}
# Replace NaN --> NA
xy_grid[is.nan(xy_grid$z), "z"] <- NA
return(xy_grid)
}
multiple_2d_loess <- function(x, y, z, newdata = NULL, resolution = 10, span = .1) {
z <- data.frame(z)
# If no new data is specified, use even grid in the x,y-range
if(is.null(newdata)) {
x_range <- range(x)
y_range <- range(y)
newdata <- expand.grid(x = seq(from = x_range[1], to = x_range[2], length.out = resolution),
y = seq(from = y_range[1], to = y_range[2], length.out = resolution))
}
# Loop over z components (= vector components)
zs <- lapply(1:ncol(z), function(k) {
# Fit loess
fit <- loess(z[, k] ~ x + y, span)
# Predict on new data.
prediction <- predict(fit, newdata, se = TRUE)
# Get predicted values
predicted_z <- prediction$fit %>%
melt %>%
pull(value)
# Get predicted standard errors
predicted_se <- prediction$se %>%
melt %>%
pull(value)
return(data.frame(predicted_z, predicted_se))
}) %>%
do.call(cbind, .) %>%
as.data.frame() %>%
set_colnames(paste0(rep(colnames(z), each = ncol(z)), c("", "_se")))
# Results
res <- cbind(newdata, zs)
return(res)
}
to_range2d <- function(vec, x_range, y_range) {
vec[1] <- to_range(vec[1], x_range)
vec[2] <- to_range(vec[2], y_range)
return(vec)
}
## HitChip Atlas data ********************* ####
data("atlas1006")
# Log 10 transform
atlas1006_log <- atlas1006 %>% transform("log10")
# Subjects with multiple samples
multiple_id <- meta(atlas1006_log) %>%
filter(time != 0) %>%
pull(subject) %>%
unique()
longitudinal_atlas1006_log <- atlas1006_log %>%
subset_samples(subject %in% multiple_id)
longitudinal_atlas1006_log_abundances <- t(abundances(longitudinal_atlas1006_log))
## Bimodal taxa *************************** ####
# Bimodality coefficients
# atlas_1006_bimodality <- bimodality(longitudinal_atlas1006_log)
# Bimodal taxa
# bimodal_taxa <- names(which(atlas_1006_bimodality > 0.9))
bimodal_taxa <- c("Bacteroides fragilis et rel.",
"Dialister",
"Prevotella melaninogenica et rel.",
"Prevotella oralis et rel.",
"Uncultured Clostridiales II")
change_points <- c("Anaerofustis" = 0.4,
"Aneurinibacillus" = 0.3,
"Aquabacterium" = 0.5,
"Bacteroides intestinalis et rel." = .5,
"Bacteroides fragilis et rel." = 3.1,
"Burkholderia" = 0.5,
"Dialister" = 3.25,
"Leminorella" = 0.5,
"Prevotella melaninogenica et rel." = 4.5,
"Prevotella oralis et rel." = 4,
"Serratia" = 0.5,
"Uncultured Bacteroidetes" = 0.5,
"Uncultured Clostridiales II" = 3.4,
"Uncultured Selenomonadaceae" = 0.5,
"Wissella et rel." = 0.45)
## Rates of change ************************ ####
# Get table with abundance changes
delta_df <- lapply(meta(longitudinal_atlas1006_log) %>% pull(subject) %>% unique, function(i) {
sample_info <- meta(longitudinal_atlas1006_log) %>%
filter(subject == i) %>%
select(time, sample)
sample_abundances <- cbind(sample_info, longitudinal_atlas1006_log_abundances[sample_info$sample,
bimodal_taxa])
# Average duplicated time index observations
duplicated_time <- sample_abundances$time[which(duplicated(sample_abundances$time))]
if(length(duplicated_time) != 0) {
for(d in duplicated_time) {
mean_abundances <- sample_abundances[sample_abundances$time == d, bimodal_taxa] %>%
colMeans()
sample_abundances[sample_abundances$time == d, bimodal_taxa][1, ] <- mean_abundances
sample_abundances[sample_abundances$time == d, bimodal_taxa][2, ] <- NA
sample_abundances <- sample_abundances %>% drop_na()
}
}
deltas <- lapply(bimodal_taxa, function(otu) {
otu_info <- sample_abundances[, c("time", otu)]
delta <- lapply(1:(nrow(otu_info) - 1), function(r) {
cbind(otu_info[r+1, ] - otu_info[r, ], start = otu_info[r, 2])
}) %>% do.call(rbind, .)
delta <- delta %>%
cbind(., otu) %>%
set_colnames(c("delta_t", "delta_abundance", "start", "otu"))
# Add time step ID
delta <- delta %>%
mutate(time_id = 1:nrow(delta))
return(delta)
}) %>%
do.call(rbind, .) %>%
cbind(subject = i)
return(deltas)
}) %>% do.call(rbind, .)
# Add average change velocity
delta_df <- delta_df %>%
mutate(unit_rate = delta_abundance/delta_t)
pairwise_df <- get_pairwise_df(delta_df, bimodal_taxa[1], bimodal_taxa[2])
# Plot observations
p <- pairwise_df %>%
ggplot() +
geom_point(aes(x = x_start, y = y_start), color = "red", size = 1) +
geom_segment(aes(x = x_start, y = y_start,
xend = x_start + x_change, yend = y_start + y_change, group = id),
arrow = arrow(length = unit(0.2, "cm"), ends="last"), size = .5)
## What is this *************************** ####
gl <- 40
x_fit <- interp.loess(pairwise_df$x_start, pairwise_df$y_start, pairwise_df$x_change, gridlen = c(gl, gl))
y_fit <- interp.loess(pairwise_df$x_start, pairwise_df$y_start, pairwise_df$y_change, gridlen = c(gl, gl))
# data.frame(x_start = x_fit$x, y_start = x_fit$y,
# xend = x_fit$z, yend = y_fit$z) %>% head
df <- data.frame(x_change = (data.frame(x_fit$z) %>% melt)[, 2],
y_change = (data.frame(y_fit$z) %>% melt)[, 2],
x = rep(x_fit$x, gl),
y = rep(x_fit$y, each = gl))
q <- df %>%
ggplot() +
geom_point(aes(x = x, y = y), color = "red", size = .25) +
geom_segment(aes(x = x, y =y,
xend = x + x_change/50, yend = y + y_change/50),
arrow = arrow(length = unit(0.1, "cm"), ends="last"), size = .5) +
coord_cartesian(xlim = 2.5:3.5)
plot_grid(p, q)
## Grid averages ************************** ####
## Empirical
p_empirical_changes <- ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), color = "grey") +
geom_segment(data = pairwise_df, aes(x = x_start, y = y_start,
xend = x_start + x_change/5, yend = y_start + y_change/5),
arrow = arrow(length = unit(0.15, "cm"), ends="last"), size = .5) +
scale_color_gradient2(low = "blue", mid = "white", high = "red") +
labs(subtitle = "Empirical changes")+
coord_cartesian(ylim = 2:5)
##### Average rate in a grid
mean_x <- grid_average(pairwise_df$x_start, pairwise_df$y_start, pairwise_df$x_change, resolution = 15, padding = 0)
mean_y <- grid_average(pairwise_df$x_start, pairwise_df$y_start, pairwise_df$y_change, resolution = 15, padding = 0)
mean_xy <- full_join(mean_y, mean_x, by = c("x", "y")) %>%
set_colnames(c("x", "y", "x_change", "y_change"))
# Plot
p_emricial_grid_avegare <- ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), color = "grey") +
geom_segment(data =mean_xy, aes(x = x, y = y,
xend = x + x_change/5, yend = y + y_change/5),
arrow = arrow(length = unit(0.15, "cm"), ends="last"), size = .5) +
scale_color_gradient2(low = "blue", mid = "white", high = "red") +
labs(subtitle = "Average change per time unit, 15x15 grid")+
coord_cartesian(ylim = 2:5)
taxon1_drift <- learn_drift(delta_df, taxon = bimodal_taxa[1], grid_by = .1)
taxon2_drift <- learn_drift(delta_df, taxon = bimodal_taxa[2], grid_by = .1)
# taxon1_drift[, c("x", "drift")] %>% set_colnames(c("x", "x_drift"))
# taxon2_drift[, c("x", "drift")] %>% set_colnames(c("y", "y_drift"))
xy_grid <- expand.grid(x = taxon1_drift[, "x"], y = taxon2_drift[, "x"])
xy_grid$x_drift <- NA
xy_grid$y_drift <- NA
# Get drifts
for(i in 1:nrow(xy_grid)) {
print(i)
xy_grid[i, "x_drift"] <- taxon1_drift %>% filter(x == xy_grid[i, "x"]) %>% pull(drift)
xy_grid[i, "y_drift"] <- taxon2_drift %>% filter(x == xy_grid[i, "y"]) %>% pull(drift)
}
p_independent_drifts_learned <- ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), color = "grey") +
geom_segment(data = xy_grid, aes(x = x, y = y,
xend = x + x_drift/3, yend = y + y_drift/3),
arrow = arrow(length = unit(0.1, "cm"), ends="last"), size = .5) +
labs(subtitle = "Individual learned drifts") +
coord_cartesian(ylim = 2:5)
plot_grid(p_empirical_changes, p_emricial_grid_avegare, p_independent_drifts_learned, p_2d_drifts_learned, nrow = 1)
## Interpolate 1D valued function ********* ####
x_fit <- interp.loess(mean_xy$x, mean_xy$y, mean_xy$x_change, gridlen = c(gl, gl))
y_fit <- interp.loess(mean_xy$x, mean_xy$y, mean_xy$y_change, gridlen = c(gl, gl))
df <- data.frame(x_change = (data.frame(x_fit$z) %>% melt)[, 2],
y_change = (data.frame(y_fit$z) %>% melt)[, 2],
x = rep(x_fit$x, gl),
y = rep(x_fit$y, each = gl))
ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V2, y = V3)) +
geom_segment(data = df, aes(x = x, y =y,
xend = x + x_change/50, yend = y + y_change/50),
arrow = arrow(length = unit(0.1, "cm"), ends="last"), size = .5)
ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V2, y = V3)) +
geom_segment(data = mean_xy, aes(x = x, y =y,
xend = x + x_change, yend = y + y_change),
arrow = arrow(length = unit(0.1, "cm"), ends="last"), size = .5)
## 2D loess drift ************************* ####
# Fit loess and predict on new data
mfit <- multiple_2d_loess(x = pairwise_df$x_start, y = pairwise_df$y_start, z = pairwise_df[, c(2,4)],
resolution = 30, span = .5,
newdata = make_grid(range(otu_abundance[, bimodal_taxa[1]]),
range(otu_abundance[, bimodal_taxa[2]]), resolution = 20))
# Combine standard errors. This is probably not an ok way
# mfit <- mfit %>%
# mutate(se = x_change_se + y_change_se)
# PLOT
p_2d_drifts_learned <- ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), color = "grey") +
# geom_point(data = otu_abundance %>%
# as.data.frame() %>%
# set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), size = .2) +
# geom_point(data = mfit, aes(x = x, y= y), color = "red", size = .2) +
geom_segment(data = mfit, aes(x = x, y =y,
xend = x + x_change/2.5, yend = y + y_change/2.5),
arrow = arrow(length = unit(0.1, "cm"), ends="last"), size = .5) +
labs(subtitle = "2D learned drifts") +
coord_cartesian(ylim = 2:5)
## 2D loess dispersion ******************** ####
diffusion_2d_loess <- function(x, y, z, newdata = NULL, resolution = 10, span = .1) {
z <- data.frame(z)
# Square differencies
z <- apply(z, 2, FUN = function(i) (i^2))
# If no new data is specified, use even grid in the x,y-range
if(is.null(newdata)) {
x_range <- range(x)
y_range <- range(y)
newdata <- expand.grid(x = seq(from = x_range[1], to = x_range[2], length.out = resolution),
y = seq(from = y_range[1], to = y_range[2], length.out = resolution))
}
# Loop over z components (= vector components)
zs <- lapply(1:ncol(z), function(k) {
# Fit loess
fit <- loess(z[, k] ~ x + y, span)
# Predict on new data.
prediction <- predict(fit, newdata, se = TRUE)
# Get predicted values
predicted_z <- prediction$fit %>%
melt %>%
pull(value)
## Replace negative values with 0
predicted_z[predicted_z < 0] <- 0
# Get predicted standard errors
predicted_se <- prediction$se %>%
melt %>%
pull(value)
return(data.frame(predicted_z, predicted_se))
}) %>%
do.call(cbind, .) %>%
as.data.frame() %>%
set_colnames(c("x_diffusion", "x_diffusion_se", "y_diffusion", "y_diffusion_se"))
# Results
res <- cbind(newdata, zs)
return(res)
}
ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2)) +
# geom_point(data = otu_abundance %>%
# as.data.frame() %>%
# set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), size = .2) +
# geom_point(data = mfit, aes(x = x, y= y), color = "red", size = .2) +
geom_point(data = dispersion, aes(x = x, y =y))
## Diffusion mime 2 *********************** ####
diffusion_mime_2d <- function(data,
time = 1:90/30,
initial_value = NULL,
taxon1 = "Bacteroides fragilis et rel.",
taxon2 = "Dialister",
detail = 10,
resolution = 10,
span = .5) {
# mydata <- delta_df
mydata <- data
pairwise_df <- get_pairwise_df(mydata, taxon1, taxon2)
#####
# For some reason this doesn't respect the x,y ranges. Could be an extrapolation issue
#####
drift_fit <- multiple_2d_loess(x = pairwise_df$x_start, y = pairwise_df$y_start,
z = pairwise_df[, c("x_change", "y_change")],
resolution, span,
newdata = make_grid(range(otu_abundance[, taxon1]),
range(otu_abundance[, taxon2]), resolution))
# Remove NA
drift_fit <- drift_fit %>% drop_na
taxon_range <- data.frame(x = range(drift_fit$x), y = range(drift_fit$y))
# Initialize observation matrix
obs <- matrix(NA, length(time), 2)
# Random initial value. If out of range --> other end point
if(is.null(initial_value)) {
obs[1, ] <- to_range2d(c(abs(rnorm(1, mean(taxon_range$x), 1)),
abs(rnorm(1, mean(taxon_range$y), 1))),
taxon_range$x, taxon_range$y)
} else {
obs[1, ] <- to_range2d(initial_value, taxon_range$x, taxon_range$y)
}
# Next values *************************
for(i in 2:length(time)) {
print(i)
# Divide each time step in to a finer grid to get more stable Euler approximation
finer_x_grid <- seq(from = time[i-1], to = time[i], length.out = detail)
temp_y <- obs[i-1, ]
# Finer grid values
for(j in 2:length(finer_x_grid)) {
# Time step
dt <- finer_x_grid[j] - finer_x_grid[j-1]
# Learn drift at this point
point_drift <- multiple_2d_loess(x = pairwise_df$x_start, y = pairwise_df$y_start,
z = pairwise_df[, c(2,4)],
resolution = resolution, span = span,
newdata = data.frame(x = temp_y[1], y = temp_y[2]) %>%
set_colnames(c("x", "y")))
point_diffusion <- diffusion_2d_loess(x = pairwise_df$x_start, y = pairwise_df$y_start,
z = pairwise_df[, c(2,4)],
resolution = resolution, span = span,
newdata = data.frame(x = temp_y[1], y = temp_y[2]) %>%
set_colnames(c("x", "y")))
# New value
temp_y <- temp_y +
point_drift[, c("x_change", "y_change")]*dt +
point_diffusion[, c("x_diffusion", "y_diffusion")]*rnorm(1, 0, sqrt(dt))
temp_y <- to_range2d(temp_y, taxon_range$x, taxon_range$y)
}
obs[i, ] <- temp_y %>% as.numeric
}
res <- data.frame(x = obs[, 1], y = obs[, 2], time = time)
return(res)
}
xx <- diffusion_mime_2d(delta_df, time = 1:90/30, initial_value = c(3.5, 2.5), resolution = 20, span = .1)
ggplot() +
geom_path(data = xx, aes(x = x, y = y, color = time))
ggplot() +
geom_path(data = xx, aes(x = time, y= y))
ggplot() +
geom_path(data = xx, aes(x = time, y= x))
ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), color = "grey") +
geom_path(data = xx, aes(x = x, y = y, color = time*30)) +
scale_color_gradient(low = "blue", high = "red") +
theme_classic(15) +
labs(x = "Bacteroides fragilis et rel.", y = "Dialister", title = "Simulation") +
guides(color = guide_legend(title = "Time"))
diffusion_fit <- diffusion_2d_loess(x = pairwise_df$x_start, y = pairwise_df$y_start,
z = pairwise_df[, c("x_change", "y_change")],
resolution, span = 1.5,
newdata = make_grid(range(otu_abundance[, taxon1]),
range(otu_abundance[, taxon2]), resolution = 50))
| /HitChip_tests/multivariate_premonitions.R | no_license | velait/OUP | R | false | false | 20,082 | r | ## Functions ***************************** ####
# Function to get all unique pairs of vector elements
get_pairs <- function(x) {
save_class <- class(x)
unique_x <- unique(x) %>% as.character
x_pairs <- list()
row_ind <- 1
for(i in 1:(length(unique_x) - 1)) {
for(j in (i+1):length(unique_x)) {
x_pairs[[row_ind]] <- c(unique_x[i], unique_x[j])
row_ind <- row_ind + 1
}
}
return(x_pairs)
}
# Function to get pairwise starting points and abundances
get_pairwise_df <- function(delta_df, otu1, otu2) {
x_start <- delta_df %>%
filter(otu == otu1) %>%
pull(start)
x_change <- delta_df %>%
filter(otu == otu1) %>%
pull(unit_rate)
y_start <- delta_df %>%
filter(otu == otu2) %>%
pull(start)
y_change <- delta_df %>%
filter(otu == otu2) %>%
pull(unit_rate)
pairwise_df <- data.frame(x_start, x_change, y_start, y_change, id = 1:length(x_start))
return(pairwise_df)
}
# Set up grid, essentially a wrapper for expand.grid
make_grid <- function(x_range, y_range,
resolution = 10, padding = 0) {
x_grid <- seq(from = x_range[1] - padding, to = x_range[2] + padding, length.out = resolution)
y_grid <- seq(from = y_range[1] - padding, to = y_range[2] + padding, length.out = resolution)
df <- data.frame(x = rep(x_grid, resolution),
y = rep(y_grid, each = resolution))
return(df)
}
# Get grid averges for a single variable z
grid_average <- function(x, y, z, resolution = 10, padding = 0) {
mydf <- data.frame(x, y, z)
mydf <- mydf %>% mutate(index = 1:nrow(mydf))
# Get xy ranges
x_range <- x %>% range
y_range <- y %>% range
xy_grid <- make_grid(x_range, y_range, resolution, padding)
# Add column for cell averages
xy_grid$z <- NA
for(i in 1:nrow(xy_grid)) {
# cell observations
cell_df <- mydf %>%
filter(x < xy_grid[i, 1] & y < xy_grid[i, 2])
# cell average
xy_grid[i, "z"] <- mean(cell_df$z)
# Remove used observations from data
mydf <- mydf %>% filter(!(index %in% cell_df$index))
}
# Replace NaN --> NA
xy_grid[is.nan(xy_grid$z), "z"] <- NA
return(xy_grid)
}
multiple_2d_loess <- function(x, y, z, newdata = NULL, resolution = 10, span = .1) {
z <- data.frame(z)
# If no new data is specified, use even grid in the x,y-range
if(is.null(newdata)) {
x_range <- range(x)
y_range <- range(y)
newdata <- expand.grid(x = seq(from = x_range[1], to = x_range[2], length.out = resolution),
y = seq(from = y_range[1], to = y_range[2], length.out = resolution))
}
# Loop over z components (= vector components)
zs <- lapply(1:ncol(z), function(k) {
# Fit loess
fit <- loess(z[, k] ~ x + y, span)
# Predict on new data.
prediction <- predict(fit, newdata, se = TRUE)
# Get predicted values
predicted_z <- prediction$fit %>%
melt %>%
pull(value)
# Get predicted standard errors
predicted_se <- prediction$se %>%
melt %>%
pull(value)
return(data.frame(predicted_z, predicted_se))
}) %>%
do.call(cbind, .) %>%
as.data.frame() %>%
set_colnames(paste0(rep(colnames(z), each = ncol(z)), c("", "_se")))
# Results
res <- cbind(newdata, zs)
return(res)
}
to_range2d <- function(vec, x_range, y_range) {
vec[1] <- to_range(vec[1], x_range)
vec[2] <- to_range(vec[2], y_range)
return(vec)
}
## HitChip Atlas data ********************* ####
data("atlas1006")
# Log 10 transform
atlas1006_log <- atlas1006 %>% transform("log10")
# Subjects with multiple samples
multiple_id <- meta(atlas1006_log) %>%
filter(time != 0) %>%
pull(subject) %>%
unique()
longitudinal_atlas1006_log <- atlas1006_log %>%
subset_samples(subject %in% multiple_id)
longitudinal_atlas1006_log_abundances <- t(abundances(longitudinal_atlas1006_log))
## Bimodal taxa *************************** ####
# Bimodality coefficients
# atlas_1006_bimodality <- bimodality(longitudinal_atlas1006_log)
# Bimodal taxa
# bimodal_taxa <- names(which(atlas_1006_bimodality > 0.9))
bimodal_taxa <- c("Bacteroides fragilis et rel.",
"Dialister",
"Prevotella melaninogenica et rel.",
"Prevotella oralis et rel.",
"Uncultured Clostridiales II")
change_points <- c("Anaerofustis" = 0.4,
"Aneurinibacillus" = 0.3,
"Aquabacterium" = 0.5,
"Bacteroides intestinalis et rel." = .5,
"Bacteroides fragilis et rel." = 3.1,
"Burkholderia" = 0.5,
"Dialister" = 3.25,
"Leminorella" = 0.5,
"Prevotella melaninogenica et rel." = 4.5,
"Prevotella oralis et rel." = 4,
"Serratia" = 0.5,
"Uncultured Bacteroidetes" = 0.5,
"Uncultured Clostridiales II" = 3.4,
"Uncultured Selenomonadaceae" = 0.5,
"Wissella et rel." = 0.45)
## Rates of change ************************ ####
# Get table with abundance changes
delta_df <- lapply(meta(longitudinal_atlas1006_log) %>% pull(subject) %>% unique, function(i) {
sample_info <- meta(longitudinal_atlas1006_log) %>%
filter(subject == i) %>%
select(time, sample)
sample_abundances <- cbind(sample_info, longitudinal_atlas1006_log_abundances[sample_info$sample,
bimodal_taxa])
# Average duplicated time index observations
duplicated_time <- sample_abundances$time[which(duplicated(sample_abundances$time))]
if(length(duplicated_time) != 0) {
for(d in duplicated_time) {
mean_abundances <- sample_abundances[sample_abundances$time == d, bimodal_taxa] %>%
colMeans()
sample_abundances[sample_abundances$time == d, bimodal_taxa][1, ] <- mean_abundances
sample_abundances[sample_abundances$time == d, bimodal_taxa][2, ] <- NA
sample_abundances <- sample_abundances %>% drop_na()
}
}
deltas <- lapply(bimodal_taxa, function(otu) {
otu_info <- sample_abundances[, c("time", otu)]
delta <- lapply(1:(nrow(otu_info) - 1), function(r) {
cbind(otu_info[r+1, ] - otu_info[r, ], start = otu_info[r, 2])
}) %>% do.call(rbind, .)
delta <- delta %>%
cbind(., otu) %>%
set_colnames(c("delta_t", "delta_abundance", "start", "otu"))
# Add time step ID
delta <- delta %>%
mutate(time_id = 1:nrow(delta))
return(delta)
}) %>%
do.call(rbind, .) %>%
cbind(subject = i)
return(deltas)
}) %>% do.call(rbind, .)
# Add average change velocity
delta_df <- delta_df %>%
mutate(unit_rate = delta_abundance/delta_t)
pairwise_df <- get_pairwise_df(delta_df, bimodal_taxa[1], bimodal_taxa[2])
# Plot observations
p <- pairwise_df %>%
ggplot() +
geom_point(aes(x = x_start, y = y_start), color = "red", size = 1) +
geom_segment(aes(x = x_start, y = y_start,
xend = x_start + x_change, yend = y_start + y_change, group = id),
arrow = arrow(length = unit(0.2, "cm"), ends="last"), size = .5)
## What is this *************************** ####
gl <- 40
x_fit <- interp.loess(pairwise_df$x_start, pairwise_df$y_start, pairwise_df$x_change, gridlen = c(gl, gl))
y_fit <- interp.loess(pairwise_df$x_start, pairwise_df$y_start, pairwise_df$y_change, gridlen = c(gl, gl))
# data.frame(x_start = x_fit$x, y_start = x_fit$y,
# xend = x_fit$z, yend = y_fit$z) %>% head
df <- data.frame(x_change = (data.frame(x_fit$z) %>% melt)[, 2],
y_change = (data.frame(y_fit$z) %>% melt)[, 2],
x = rep(x_fit$x, gl),
y = rep(x_fit$y, each = gl))
q <- df %>%
ggplot() +
geom_point(aes(x = x, y = y), color = "red", size = .25) +
geom_segment(aes(x = x, y =y,
xend = x + x_change/50, yend = y + y_change/50),
arrow = arrow(length = unit(0.1, "cm"), ends="last"), size = .5) +
coord_cartesian(xlim = 2.5:3.5)
plot_grid(p, q)
## Grid averages ************************** ####
## Empirical
p_empirical_changes <- ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), color = "grey") +
geom_segment(data = pairwise_df, aes(x = x_start, y = y_start,
xend = x_start + x_change/5, yend = y_start + y_change/5),
arrow = arrow(length = unit(0.15, "cm"), ends="last"), size = .5) +
scale_color_gradient2(low = "blue", mid = "white", high = "red") +
labs(subtitle = "Empirical changes")+
coord_cartesian(ylim = 2:5)
##### Average rate in a grid
mean_x <- grid_average(pairwise_df$x_start, pairwise_df$y_start, pairwise_df$x_change, resolution = 15, padding = 0)
mean_y <- grid_average(pairwise_df$x_start, pairwise_df$y_start, pairwise_df$y_change, resolution = 15, padding = 0)
mean_xy <- full_join(mean_y, mean_x, by = c("x", "y")) %>%
set_colnames(c("x", "y", "x_change", "y_change"))
# Plot
p_emricial_grid_avegare <- ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), color = "grey") +
geom_segment(data =mean_xy, aes(x = x, y = y,
xend = x + x_change/5, yend = y + y_change/5),
arrow = arrow(length = unit(0.15, "cm"), ends="last"), size = .5) +
scale_color_gradient2(low = "blue", mid = "white", high = "red") +
labs(subtitle = "Average change per time unit, 15x15 grid")+
coord_cartesian(ylim = 2:5)
taxon1_drift <- learn_drift(delta_df, taxon = bimodal_taxa[1], grid_by = .1)
taxon2_drift <- learn_drift(delta_df, taxon = bimodal_taxa[2], grid_by = .1)
# taxon1_drift[, c("x", "drift")] %>% set_colnames(c("x", "x_drift"))
# taxon2_drift[, c("x", "drift")] %>% set_colnames(c("y", "y_drift"))
xy_grid <- expand.grid(x = taxon1_drift[, "x"], y = taxon2_drift[, "x"])
xy_grid$x_drift <- NA
xy_grid$y_drift <- NA
# Get drifts
for(i in 1:nrow(xy_grid)) {
print(i)
xy_grid[i, "x_drift"] <- taxon1_drift %>% filter(x == xy_grid[i, "x"]) %>% pull(drift)
xy_grid[i, "y_drift"] <- taxon2_drift %>% filter(x == xy_grid[i, "y"]) %>% pull(drift)
}
p_independent_drifts_learned <- ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), color = "grey") +
geom_segment(data = xy_grid, aes(x = x, y = y,
xend = x + x_drift/3, yend = y + y_drift/3),
arrow = arrow(length = unit(0.1, "cm"), ends="last"), size = .5) +
labs(subtitle = "Individual learned drifts") +
coord_cartesian(ylim = 2:5)
plot_grid(p_empirical_changes, p_emricial_grid_avegare, p_independent_drifts_learned, p_2d_drifts_learned, nrow = 1)
## Interpolate 1D valued function ********* ####
x_fit <- interp.loess(mean_xy$x, mean_xy$y, mean_xy$x_change, gridlen = c(gl, gl))
y_fit <- interp.loess(mean_xy$x, mean_xy$y, mean_xy$y_change, gridlen = c(gl, gl))
df <- data.frame(x_change = (data.frame(x_fit$z) %>% melt)[, 2],
y_change = (data.frame(y_fit$z) %>% melt)[, 2],
x = rep(x_fit$x, gl),
y = rep(x_fit$y, each = gl))
ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V2, y = V3)) +
geom_segment(data = df, aes(x = x, y =y,
xend = x + x_change/50, yend = y + y_change/50),
arrow = arrow(length = unit(0.1, "cm"), ends="last"), size = .5)
ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V2, y = V3)) +
geom_segment(data = mean_xy, aes(x = x, y =y,
xend = x + x_change, yend = y + y_change),
arrow = arrow(length = unit(0.1, "cm"), ends="last"), size = .5)
## 2D loess drift ************************* ####
# Fit loess and predict on new data
mfit <- multiple_2d_loess(x = pairwise_df$x_start, y = pairwise_df$y_start, z = pairwise_df[, c(2,4)],
resolution = 30, span = .5,
newdata = make_grid(range(otu_abundance[, bimodal_taxa[1]]),
range(otu_abundance[, bimodal_taxa[2]]), resolution = 20))
# Combine standard errors. This is probably not an ok way
# mfit <- mfit %>%
# mutate(se = x_change_se + y_change_se)
# PLOT
p_2d_drifts_learned <- ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), color = "grey") +
# geom_point(data = otu_abundance %>%
# as.data.frame() %>%
# set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), size = .2) +
# geom_point(data = mfit, aes(x = x, y= y), color = "red", size = .2) +
geom_segment(data = mfit, aes(x = x, y =y,
xend = x + x_change/2.5, yend = y + y_change/2.5),
arrow = arrow(length = unit(0.1, "cm"), ends="last"), size = .5) +
labs(subtitle = "2D learned drifts") +
coord_cartesian(ylim = 2:5)
## 2D loess dispersion ******************** ####
diffusion_2d_loess <- function(x, y, z, newdata = NULL, resolution = 10, span = .1) {
z <- data.frame(z)
# Square differencies
z <- apply(z, 2, FUN = function(i) (i^2))
# If no new data is specified, use even grid in the x,y-range
if(is.null(newdata)) {
x_range <- range(x)
y_range <- range(y)
newdata <- expand.grid(x = seq(from = x_range[1], to = x_range[2], length.out = resolution),
y = seq(from = y_range[1], to = y_range[2], length.out = resolution))
}
# Loop over z components (= vector components)
zs <- lapply(1:ncol(z), function(k) {
# Fit loess
fit <- loess(z[, k] ~ x + y, span)
# Predict on new data.
prediction <- predict(fit, newdata, se = TRUE)
# Get predicted values
predicted_z <- prediction$fit %>%
melt %>%
pull(value)
## Replace negative values with 0
predicted_z[predicted_z < 0] <- 0
# Get predicted standard errors
predicted_se <- prediction$se %>%
melt %>%
pull(value)
return(data.frame(predicted_z, predicted_se))
}) %>%
do.call(cbind, .) %>%
as.data.frame() %>%
set_colnames(c("x_diffusion", "x_diffusion_se", "y_diffusion", "y_diffusion_se"))
# Results
res <- cbind(newdata, zs)
return(res)
}
ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2)) +
# geom_point(data = otu_abundance %>%
# as.data.frame() %>%
# set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), size = .2) +
# geom_point(data = mfit, aes(x = x, y= y), color = "red", size = .2) +
geom_point(data = dispersion, aes(x = x, y =y))
## Diffusion mime 2 *********************** ####
diffusion_mime_2d <- function(data,
time = 1:90/30,
initial_value = NULL,
taxon1 = "Bacteroides fragilis et rel.",
taxon2 = "Dialister",
detail = 10,
resolution = 10,
span = .5) {
# mydata <- delta_df
mydata <- data
pairwise_df <- get_pairwise_df(mydata, taxon1, taxon2)
#####
# For some reason this doesn't respect the x,y ranges. Could be an extrapolation issue
#####
drift_fit <- multiple_2d_loess(x = pairwise_df$x_start, y = pairwise_df$y_start,
z = pairwise_df[, c("x_change", "y_change")],
resolution, span,
newdata = make_grid(range(otu_abundance[, taxon1]),
range(otu_abundance[, taxon2]), resolution))
# Remove NA
drift_fit <- drift_fit %>% drop_na
taxon_range <- data.frame(x = range(drift_fit$x), y = range(drift_fit$y))
# Initialize observation matrix
obs <- matrix(NA, length(time), 2)
# Random initial value. If out of range --> other end point
if(is.null(initial_value)) {
obs[1, ] <- to_range2d(c(abs(rnorm(1, mean(taxon_range$x), 1)),
abs(rnorm(1, mean(taxon_range$y), 1))),
taxon_range$x, taxon_range$y)
} else {
obs[1, ] <- to_range2d(initial_value, taxon_range$x, taxon_range$y)
}
# Next values *************************
for(i in 2:length(time)) {
print(i)
# Divide each time step in to a finer grid to get more stable Euler approximation
finer_x_grid <- seq(from = time[i-1], to = time[i], length.out = detail)
temp_y <- obs[i-1, ]
# Finer grid values
for(j in 2:length(finer_x_grid)) {
# Time step
dt <- finer_x_grid[j] - finer_x_grid[j-1]
# Learn drift at this point
point_drift <- multiple_2d_loess(x = pairwise_df$x_start, y = pairwise_df$y_start,
z = pairwise_df[, c(2,4)],
resolution = resolution, span = span,
newdata = data.frame(x = temp_y[1], y = temp_y[2]) %>%
set_colnames(c("x", "y")))
point_diffusion <- diffusion_2d_loess(x = pairwise_df$x_start, y = pairwise_df$y_start,
z = pairwise_df[, c(2,4)],
resolution = resolution, span = span,
newdata = data.frame(x = temp_y[1], y = temp_y[2]) %>%
set_colnames(c("x", "y")))
# New value
temp_y <- temp_y +
point_drift[, c("x_change", "y_change")]*dt +
point_diffusion[, c("x_diffusion", "y_diffusion")]*rnorm(1, 0, sqrt(dt))
temp_y <- to_range2d(temp_y, taxon_range$x, taxon_range$y)
}
obs[i, ] <- temp_y %>% as.numeric
}
res <- data.frame(x = obs[, 1], y = obs[, 2], time = time)
return(res)
}
xx <- diffusion_mime_2d(delta_df, time = 1:90/30, initial_value = c(3.5, 2.5), resolution = 20, span = .1)
ggplot() +
geom_path(data = xx, aes(x = x, y = y, color = time))
ggplot() +
geom_path(data = xx, aes(x = time, y= y))
ggplot() +
geom_path(data = xx, aes(x = time, y= x))
ggplot() +
geom_density_2d(data = otu_abundance %>%
as.data.frame() %>%
set_colnames(paste0("V", 1:5)), aes(x = V1, y = V2), color = "grey") +
geom_path(data = xx, aes(x = x, y = y, color = time*30)) +
scale_color_gradient(low = "blue", high = "red") +
theme_classic(15) +
labs(x = "Bacteroides fragilis et rel.", y = "Dialister", title = "Simulation") +
guides(color = guide_legend(title = "Time"))
diffusion_fit <- diffusion_2d_loess(x = pairwise_df$x_start, y = pairwise_df$y_start,
z = pairwise_df[, c("x_change", "y_change")],
resolution, span = 1.5,
newdata = make_grid(range(otu_abundance[, taxon1]),
range(otu_abundance[, taxon2]), resolution = 50))
|
#' Single Cell Cluster Tab UI
#'
#' @export
#' @return None
scClusteringUI <- function(id) {
#namespaces make it so that ids only need to be unique within a namespace and not across the app
ns <- NS(id)
tagList(
fluidRow(column(
8,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 515px;",
p(
tags$b('Comparing clustering solutions', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("barplot"), height = "360px") %>% withSpinner(color =
"#0dc5c1"),
textOutput(ns("barplotLegend"))
)
)),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Projecting 20% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("UMAP1"), height = "360px") %>% withSpinner(color =
"#0dc5c1"),
)
),
column(6, fluidRow(
column(
12,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Projecting 40% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("UMAP2"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
)
))),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Projecting 60% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("UMAP3"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
),
column(6, fluidRow(
column(
12,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Projecting 80% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("UMAP4"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
)
))),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Projecting 100% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("UMAP5"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
))
)
}
#' Single Cell Variable Genes Tab UI
#'
#' @export
#' @return None
varGenesUI <- function(id) {
ns <- NS(id)
tagList(
fluidRow(column(
12,
wellPanel(style = "background-color: #fff; border-color: #2c3e50; height: 50px;",
p(
tags$b('Variable genes are only computed for single datasets', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
), )
)),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Variable genes in 20% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("variableGenes1"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
),
column(6, fluidRow(
column(
12,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Variable genes in 40% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("variableGenes2"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
)
))),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Variable genes in 60% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("variableGenes3"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
),
column(6, fluidRow(
column(
12,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Variable genes in 80% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("variableGenes4"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
)
))),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px; display:center-align;",
p(
tags$b('Variable genes in 100% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("variableGenes5"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
))
)
}
#' Single Cell Cluster Tab Server
#'
#' @param seurat Reactive value containing seurat object
#' @param mito Reactive value containing user input for mitochondrial genes pattern
#' @param res Reactive value containing user input for clustering resolution
#' @param dataset1_name Reactive value containing user input of uploaded dataset name
#'
#' @export
#' @return Returns a Reactive value containing list of downsampled seurat objects
#' with reduced dimensions (PCA data) and scaled counts
#'
scClustering <-
function (input,
output,
session,
seurat,
mito,
res,
dataset1_name) {
seuratObjectsList <- reactiveValues()
dim = 15
seurat[["percent.mt"]] = PercentageFeatureSet(seurat, pattern = mito)
seurat = subset(seurat, subset = nFeature_RNA > 200 &
nFeature_RNA < 8000 & percent.mt < 14)
#Normalization
seurat = NormalizeData(seurat,
normalization.method = "LogNormalize",
scale.factor = 10000)
seurat = NormalizeData(seurat)
#scaling data
all.genes = rownames(seurat)
seurat = ScaleData(seurat, features = all.genes)
#SUBSETTING OF CELLS
#get number of cells in dataset
numCells = nrow(seurat@meta.data)
#least number of cells to test
minSubset = round(numCells * 0.2 - 1)
incrementation = minSubset
#empty list to store generated subsets
seuratObjectsList = c()
x = 0
for (i in seq(from = minSubset, to = numCells, by = incrementation)) {
print(i)
x = x + 1
#update progress bar
update_modal_progress(x / 10)
print(x)
subset = subset(seurat, cells = sample(Cells(seurat), i))
#Plot variable features
subset = FindVariableFeatures(subset,
selection.method = "vst",
nfeatures = 2000)
#Run PCA on scaled data
subset = RunPCA(subset, features = VariableFeatures(object = subset))
#clustering
subset = FindNeighbors(subset, dims = 1:dim)
subset = FindClusters(subset, resolution = res)
subset = RunUMAP(subset, dims = 1:dim)
#Plot UMAP
#p1 <- DimPlot(subset, reduction = "umap", group.by = "orig.ident")
p2 <- DimPlot(subset, reduction = "umap", label = TRUE)
assign(paste("UMAP", x, sep = ""), p2)
#append subset to list
seuratObjectsList = c(seuratObjectsList, subset)
#Add subset to list
}
#call renaming clusters function
seuratObjectsList <- renameClusters(seuratObjectsList)
combinedBarplot <- projectClusters(seuratObjectsList)
output$barplot <- renderPlot({
combinedBarplot
})
output$barplotLegend <- renderText({
"Bar plot showing similarity score of subset and full dataset. Here, the cell labels
from each subset are compared against the cell labels of
the reference dataset (full dataset); values closest to 1 are most similar to the
reference. ARI: Adjusted Rand Index; NMI: Normalized Mutual Information."
})
output$UMAP1 <- renderPlot({
UMAP1
})
output$UMAP2 <- renderPlot({
UMAP2
})
output$UMAP3 <- renderPlot({
UMAP3
})
output$UMAP4 <- renderPlot({
UMAP4
})
output$UMAP5 <- renderPlot({
UMAP5
})
#return renamed clusters
return(seuratObjectsList)
}
#' Single Cell Variable genes Tab Server
#'
#' @param seuratObjectsList Reactive value containg list of downsampled seurat objects
#' with reduced dimensions (PCA data) and reduced dimensions
#'
#' @export
#'
scVarGenes <- function(input, output, session, seuratObjectsList) {
#for each object
for (i in 1:5) {
top10 = head(VariableFeatures(seuratObjectsList[[i]]), 10)
# plot variable genes
plot1 <- VariableFeaturePlot(seuratObjectsList[[i]])
plot2 <- LabelPoints(plot = plot1,
points = top10,
repel = TRUE)
assign(paste("variableGenes", i, sep = ""), plot2)
}
output$variableGenes1 <- renderPlot({
variableGenes1
})
output$variableGenes2 <- renderPlot({
variableGenes2
})
output$variableGenes3 <- renderPlot({
variableGenes3
})
output$variableGenes4 <- renderPlot({
variableGenes4
})
output$variableGenes5 <- renderPlot({
variableGenes5
})
}
#' Function to find Mode
#'
#' @param x a vector of integer values
#'
#' @export
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
#RENAMING CLUSTERS
#function to rename clusters after subsetting
#this program
#selects cells that belong to a cluster,x, from the reference dataset.
#finds these cells in the query dataset
#finds which cluster the majority of these cells belong to in the query dataset
#renames that cluster to x
#repeats process for other clusters in reference until all clusters in the query
#dataset are renamed.
#this insures clusters are not mislabeled and allows to project query and reference clusters
#avaiding mislabelling errors
#' Function to reassign cluster labels across downsampled subsets
#'
#' @param seuratObjectsList Reactive value containg list of downsampled seurat objects
#' with reduced dimensions (PCA data) and scaled counts
#'
#' @export
#' @return Returns a Reactive value containing list of downsampled seurat objects
#' with reassigned cluster labels that correspoond across downsampled subsets
renameClusters <- function(seuratObjectsList) {
ref_subset=length(seuratObjectsList)
#function to rename clusters after subsetting
#for each object
for (i in (length(seuratObjectsList)):1) {
#make an empty list of size [max cluster#] to store new names of query subset
newNames_list <-
vector(mode = "character", length = max(as.numeric(seuratObjectsList[[i]]@active.ident)))
#for each cluster in reference
for (j in 0:max(as.numeric(as.character(seuratObjectsList[[i]]@active.ident)))) {
#select all cell_ids where seurat_cluster = j from query subset
clusterTable <-
filter(
seuratObjectsList[[i]]@meta.data,
seuratObjectsList[[i]]@meta.data$seurat_clusters == j
)
#find corresponding cell ids in the reference dataset via merging
overlappingCells = merge(clusterTable, seuratObjectsList[[ref_subset]]@meta.data, by = 0)
#Which clusters do most of these cells belong to in reference?
predominantCluster = as.numeric(as.character(Mode(
overlappingCells$seurat_clusters.y
)))
#In position of predominantcluster, store cluster label, j (as per reference dataset)
newNames_list[[j + 1]] = predominantCluster
#repeat for next cluster in first table
}
#Rename all clusters in first subset as per reference subset
names(newNames_list) = levels(seuratObjectsList[[i]])
seuratObjectsList[[i]] = RenameIdents(seuratObjectsList[[i]], newNames_list)
levels(x = seuratObjectsList[[i]])
}
rm(clusterTable)
return(seuratObjectsList)
}
#' Function to project down sampled subsets against the parent dataset and score projection quality
#'
#' @param seuratObjectsList Reactive value containg list of downsampled seurat objects
#' with reduced dimensions (PCA data), scaled counts, and cluster labels that corresponds across subsets
#'
#' @export
#' @return Returns a bar plot showing projection quality scores across subsets
#'
#'
projectClusters <- function(seuratObjectsList) {
#CALCULTE PROJECTION QUALITY************************************
ref_subset=length(seuratObjectsList)
projectionQualityTable = c()
for (i in 1:(length(seuratObjectsList))) {
overlappingCells = merge(seuratObjectsList[[i]]@active.ident,
seuratObjectsList[[ref_subset]]@active.ident,
by =
0)
overlappingCells$x<-as.numeric(as.character(overlappingCells$x))
overlappingCells$y<-as.numeric(as.character(overlappingCells$y))
#compute ARI and NMI
ARI = ARI(overlappingCells$x, overlappingCells$y)
NMI = NMI(overlappingCells$x, overlappingCells$y)
subsetSize = paste(nrow(seuratObjectsList[[i]]@meta.data) / 1000, "K", sep =
"")
projectionQualityTable = rbind(projectionQualityTable, c(i, subsetSize, ARI, NMI))
}
#num column is used to specify levels
colnames(projectionQualityTable) = c("num","Subset", "ARI", "NMI")
projectionQualityTable = data.frame(projectionQualityTable)
projectionQualityTable$ARI = as.numeric(as.character(projectionQualityTable$ARI))
projectionQualityTable$NMI = as.numeric(as.character(projectionQualityTable$NMI))
#to reorder the table by increasing size of subset
projectionQualityTable$Subset <-
factor(projectionQualityTable$Subset, levels = projectionQualityTable$Subset[order(projectionQualityTable$num)])
#melting to plot multiple y values
projectionQualityTable = melt(projectionQualityTable[,2:4], id.vars = 'Subset')
colnames(projectionQualityTable)[2] = "Key"
combinedBarplot = ggplot(projectionQualityTable, aes(x = Subset, y = value, fill =
Key)) +
geom_bar(stat = 'identity', position = 'dodge') +
theme_minimal() + scale_fill_brewer(palette = "BuPu")
return(combinedBarplot)
}
| /R/autoClustering.R | permissive | rzaied/scSubset | R | false | false | 15,638 | r | #' Single Cell Cluster Tab UI
#'
#' @export
#' @return None
scClusteringUI <- function(id) {
#namespaces make it so that ids only need to be unique within a namespace and not across the app
ns <- NS(id)
tagList(
fluidRow(column(
8,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 515px;",
p(
tags$b('Comparing clustering solutions', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("barplot"), height = "360px") %>% withSpinner(color =
"#0dc5c1"),
textOutput(ns("barplotLegend"))
)
)),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Projecting 20% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("UMAP1"), height = "360px") %>% withSpinner(color =
"#0dc5c1"),
)
),
column(6, fluidRow(
column(
12,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Projecting 40% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("UMAP2"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
)
))),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Projecting 60% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("UMAP3"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
),
column(6, fluidRow(
column(
12,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Projecting 80% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("UMAP4"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
)
))),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Projecting 100% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("UMAP5"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
))
)
}
#' Single Cell Variable Genes Tab UI
#'
#' @export
#' @return None
varGenesUI <- function(id) {
ns <- NS(id)
tagList(
fluidRow(column(
12,
wellPanel(style = "background-color: #fff; border-color: #2c3e50; height: 50px;",
p(
tags$b('Variable genes are only computed for single datasets', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
), )
)),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Variable genes in 20% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("variableGenes1"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
),
column(6, fluidRow(
column(
12,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Variable genes in 40% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("variableGenes2"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
)
))),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Variable genes in 60% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("variableGenes3"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
),
column(6, fluidRow(
column(
12,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px;",
p(
tags$b('Variable genes in 80% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("variableGenes4"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
)
))),
fluidRow(column(
6,
wellPanel(
style = "background-color: #fff; border-color: #2c3e50; height: 500px; display:center-align;",
p(
tags$b('Variable genes in 100% of the dataset', style = "font-size: 100%; font-family:Helvetica; color:#4c4c4c; text-align:left;")
),
hr(),
plotOutput(ns("variableGenes5"), height = "360px") %>% withSpinner(color =
"#0dc5c1")
)
))
)
}
#' Single Cell Cluster Tab Server
#'
#' @param seurat Reactive value containing seurat object
#' @param mito Reactive value containing user input for mitochondrial genes pattern
#' @param res Reactive value containing user input for clustering resolution
#' @param dataset1_name Reactive value containing user input of uploaded dataset name
#'
#' @export
#' @return Returns a Reactive value containing list of downsampled seurat objects
#' with reduced dimensions (PCA data) and scaled counts
#'
scClustering <-
function (input,
output,
session,
seurat,
mito,
res,
dataset1_name) {
seuratObjectsList <- reactiveValues()
dim = 15
seurat[["percent.mt"]] = PercentageFeatureSet(seurat, pattern = mito)
seurat = subset(seurat, subset = nFeature_RNA > 200 &
nFeature_RNA < 8000 & percent.mt < 14)
#Normalization
seurat = NormalizeData(seurat,
normalization.method = "LogNormalize",
scale.factor = 10000)
seurat = NormalizeData(seurat)
#scaling data
all.genes = rownames(seurat)
seurat = ScaleData(seurat, features = all.genes)
#SUBSETTING OF CELLS
#get number of cells in dataset
numCells = nrow(seurat@meta.data)
#least number of cells to test
minSubset = round(numCells * 0.2 - 1)
incrementation = minSubset
#empty list to store generated subsets
seuratObjectsList = c()
x = 0
for (i in seq(from = minSubset, to = numCells, by = incrementation)) {
print(i)
x = x + 1
#update progress bar
update_modal_progress(x / 10)
print(x)
subset = subset(seurat, cells = sample(Cells(seurat), i))
#Plot variable features
subset = FindVariableFeatures(subset,
selection.method = "vst",
nfeatures = 2000)
#Run PCA on scaled data
subset = RunPCA(subset, features = VariableFeatures(object = subset))
#clustering
subset = FindNeighbors(subset, dims = 1:dim)
subset = FindClusters(subset, resolution = res)
subset = RunUMAP(subset, dims = 1:dim)
#Plot UMAP
#p1 <- DimPlot(subset, reduction = "umap", group.by = "orig.ident")
p2 <- DimPlot(subset, reduction = "umap", label = TRUE)
assign(paste("UMAP", x, sep = ""), p2)
#append subset to list
seuratObjectsList = c(seuratObjectsList, subset)
#Add subset to list
}
#call renaming clusters function
seuratObjectsList <- renameClusters(seuratObjectsList)
combinedBarplot <- projectClusters(seuratObjectsList)
output$barplot <- renderPlot({
combinedBarplot
})
output$barplotLegend <- renderText({
"Bar plot showing similarity score of subset and full dataset. Here, the cell labels
from each subset are compared against the cell labels of
the reference dataset (full dataset); values closest to 1 are most similar to the
reference. ARI: Adjusted Rand Index; NMI: Normalized Mutual Information."
})
output$UMAP1 <- renderPlot({
UMAP1
})
output$UMAP2 <- renderPlot({
UMAP2
})
output$UMAP3 <- renderPlot({
UMAP3
})
output$UMAP4 <- renderPlot({
UMAP4
})
output$UMAP5 <- renderPlot({
UMAP5
})
#return renamed clusters
return(seuratObjectsList)
}
#' Single Cell Variable genes Tab Server
#'
#' @param seuratObjectsList Reactive value containg list of downsampled seurat objects
#' with reduced dimensions (PCA data) and reduced dimensions
#'
#' @export
#'
scVarGenes <- function(input, output, session, seuratObjectsList) {
#for each object
for (i in 1:5) {
top10 = head(VariableFeatures(seuratObjectsList[[i]]), 10)
# plot variable genes
plot1 <- VariableFeaturePlot(seuratObjectsList[[i]])
plot2 <- LabelPoints(plot = plot1,
points = top10,
repel = TRUE)
assign(paste("variableGenes", i, sep = ""), plot2)
}
output$variableGenes1 <- renderPlot({
variableGenes1
})
output$variableGenes2 <- renderPlot({
variableGenes2
})
output$variableGenes3 <- renderPlot({
variableGenes3
})
output$variableGenes4 <- renderPlot({
variableGenes4
})
output$variableGenes5 <- renderPlot({
variableGenes5
})
}
#' Function to find Mode
#'
#' @param x a vector of integer values
#'
#' @export
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
#RENAMING CLUSTERS
#function to rename clusters after subsetting
#this program
#selects cells that belong to a cluster,x, from the reference dataset.
#finds these cells in the query dataset
#finds which cluster the majority of these cells belong to in the query dataset
#renames that cluster to x
#repeats process for other clusters in reference until all clusters in the query
#dataset are renamed.
#this insures clusters are not mislabeled and allows to project query and reference clusters
#avaiding mislabelling errors
#' Function to reassign cluster labels across downsampled subsets
#'
#' @param seuratObjectsList Reactive value containg list of downsampled seurat objects
#' with reduced dimensions (PCA data) and scaled counts
#'
#' @export
#' @return Returns a Reactive value containing list of downsampled seurat objects
#' with reassigned cluster labels that correspoond across downsampled subsets
renameClusters <- function(seuratObjectsList) {
ref_subset=length(seuratObjectsList)
#function to rename clusters after subsetting
#for each object
for (i in (length(seuratObjectsList)):1) {
#make an empty list of size [max cluster#] to store new names of query subset
newNames_list <-
vector(mode = "character", length = max(as.numeric(seuratObjectsList[[i]]@active.ident)))
#for each cluster in reference
for (j in 0:max(as.numeric(as.character(seuratObjectsList[[i]]@active.ident)))) {
#select all cell_ids where seurat_cluster = j from query subset
clusterTable <-
filter(
seuratObjectsList[[i]]@meta.data,
seuratObjectsList[[i]]@meta.data$seurat_clusters == j
)
#find corresponding cell ids in the reference dataset via merging
overlappingCells = merge(clusterTable, seuratObjectsList[[ref_subset]]@meta.data, by = 0)
#Which clusters do most of these cells belong to in reference?
predominantCluster = as.numeric(as.character(Mode(
overlappingCells$seurat_clusters.y
)))
#In position of predominantcluster, store cluster label, j (as per reference dataset)
newNames_list[[j + 1]] = predominantCluster
#repeat for next cluster in first table
}
#Rename all clusters in first subset as per reference subset
names(newNames_list) = levels(seuratObjectsList[[i]])
seuratObjectsList[[i]] = RenameIdents(seuratObjectsList[[i]], newNames_list)
levels(x = seuratObjectsList[[i]])
}
rm(clusterTable)
return(seuratObjectsList)
}
#' Function to project down sampled subsets against the parent dataset and score projection quality
#'
#' @param seuratObjectsList Reactive value containg list of downsampled seurat objects
#' with reduced dimensions (PCA data), scaled counts, and cluster labels that corresponds across subsets
#'
#' @export
#' @return Returns a bar plot showing projection quality scores across subsets
#'
#'
projectClusters <- function(seuratObjectsList) {
#CALCULTE PROJECTION QUALITY************************************
ref_subset=length(seuratObjectsList)
projectionQualityTable = c()
for (i in 1:(length(seuratObjectsList))) {
overlappingCells = merge(seuratObjectsList[[i]]@active.ident,
seuratObjectsList[[ref_subset]]@active.ident,
by =
0)
overlappingCells$x<-as.numeric(as.character(overlappingCells$x))
overlappingCells$y<-as.numeric(as.character(overlappingCells$y))
#compute ARI and NMI
ARI = ARI(overlappingCells$x, overlappingCells$y)
NMI = NMI(overlappingCells$x, overlappingCells$y)
subsetSize = paste(nrow(seuratObjectsList[[i]]@meta.data) / 1000, "K", sep =
"")
projectionQualityTable = rbind(projectionQualityTable, c(i, subsetSize, ARI, NMI))
}
#num column is used to specify levels
colnames(projectionQualityTable) = c("num","Subset", "ARI", "NMI")
projectionQualityTable = data.frame(projectionQualityTable)
projectionQualityTable$ARI = as.numeric(as.character(projectionQualityTable$ARI))
projectionQualityTable$NMI = as.numeric(as.character(projectionQualityTable$NMI))
#to reorder the table by increasing size of subset
projectionQualityTable$Subset <-
factor(projectionQualityTable$Subset, levels = projectionQualityTable$Subset[order(projectionQualityTable$num)])
#melting to plot multiple y values
projectionQualityTable = melt(projectionQualityTable[,2:4], id.vars = 'Subset')
colnames(projectionQualityTable)[2] = "Key"
combinedBarplot = ggplot(projectionQualityTable, aes(x = Subset, y = value, fill =
Key)) +
geom_bar(stat = 'identity', position = 'dodge') +
theme_minimal() + scale_fill_brewer(palette = "BuPu")
return(combinedBarplot)
}
|
rankhospital <- function(state, outcome, num = "best") {
# initialising a new data frame
result <- data.frame()
# Reading the outcome file, treating column's class as character
outcomeDF <- read.csv('outcome-of-care-measures.csv', colClasses = "character")
# Getting state list
statelist <- unique(outcomeDF[,7])
# Validations
if (!(state %in% statelist)) stop("invalid state")
if (is.numeric(num) == FALSE) {
if (!(num %in% c("best", "worst"))) stop("invalid outcome")
}
# Getting the column to be read
coltoread <- if (outcome == "heart attack") 11
else if (outcome == "heart failure") 17
else if (outcome == "pneumonia") 23
else stop("invalid outcome")
# Getting the state and corresponding outcome value, data filtered by state and not NA outcome
result <- subset(outcomeDF[, c(2,coltoread)], outcomeDF[coltoread] != "Not Available" & outcomeDF[7] == state)
# Converting outcome values to numeric for sorting purpose
result[,2] <- as.numeric(result[,2])
# Getting the sorted result based on outcome and then on hospital
final <- result[order (result[2], result[1]), ]
# Getting row count in sorted dataframe
rowcnt <- nrow(final)
# Defining the rank to be fetched based on the parameter passed
rank <- if (num == "best") 1
else if (num == "worst") rowcnt
else num
# If the rank passed is more than the number of records in the frame, return NA
if (is.numeric(num) & num > rowcnt) return (NA)
# Print the hospital name (rank, first column)
final[rank,1]
}
| /rankhospital.R | no_license | promisinganuj/DataScience | R | false | false | 1,720 | r | rankhospital <- function(state, outcome, num = "best") {
# initialising a new data frame
result <- data.frame()
# Reading the outcome file, treating column's class as character
outcomeDF <- read.csv('outcome-of-care-measures.csv', colClasses = "character")
# Getting state list
statelist <- unique(outcomeDF[,7])
# Validations
if (!(state %in% statelist)) stop("invalid state")
if (is.numeric(num) == FALSE) {
if (!(num %in% c("best", "worst"))) stop("invalid outcome")
}
# Getting the column to be read
coltoread <- if (outcome == "heart attack") 11
else if (outcome == "heart failure") 17
else if (outcome == "pneumonia") 23
else stop("invalid outcome")
# Getting the state and corresponding outcome value, data filtered by state and not NA outcome
result <- subset(outcomeDF[, c(2,coltoread)], outcomeDF[coltoread] != "Not Available" & outcomeDF[7] == state)
# Converting outcome values to numeric for sorting purpose
result[,2] <- as.numeric(result[,2])
# Getting the sorted result based on outcome and then on hospital
final <- result[order (result[2], result[1]), ]
# Getting row count in sorted dataframe
rowcnt <- nrow(final)
# Defining the rank to be fetched based on the parameter passed
rank <- if (num == "best") 1
else if (num == "worst") rowcnt
else num
# If the rank passed is more than the number of records in the frame, return NA
if (is.numeric(num) & num > rowcnt) return (NA)
# Print the hospital name (rank, first column)
final[rank,1]
}
|
library( "data.table")
library(ggplot2)
library(devtools)
library(Rcpp)
library(randomForest)
set.seed(1)
#limpio la memoria
rm( list=ls() )
gc()
setwd("/home/fjf_arg_gcloud/buckets/b1/datasetsOri/")
kcampos_separador <- "\t"
karchivo_entrada_full <- "paquete_premium.txt.gz"
ds <- fread(karchivo_entrada_full, header=TRUE, sep=kcampos_separador)
ds[ , clase_binaria := as.factor(ifelse( clase_ternaria=="BAJA+2", "POS", "NEG" )) ]
ds[ , clase_ternaria := NULL ] #elimino la clase_ternaria, ya no la necesito
ds <- na.roughfix( ds )
ds = ds[foto_mes>201811]
cppFunction('NumericVector fhistC(NumericVector pcolumna, IntegerVector pdesde )
{
/* Aqui se cargan los valores para la regresion */
double x[100] ;
double y[100] ;
int n = pcolumna.size();
NumericVector out( 4*n );
for(int i = 0; i < n; i++)
{
int libre = 0 ;
int xvalor = 1 ;
for( int j= pdesde[i]-1; j<=i; j++ )
{
double a = pcolumna[j] ;
if( !R_IsNA( a ) )
{
y[ libre ]= a ;
x[ libre ]= xvalor ;
libre++ ;
}
xvalor++ ;
}
/* Si hay al menos dos valores */
if( libre > 1 )
{
double xsum = x[0] ;
double ysum = y[0] ;
double xysum = xsum * ysum ;
double xxsum = xsum * xsum ;
double vmin = y[0] ;
double vmax = y[0] ;
for( int h=1; h<libre; h++)
{
xsum += x[h] ;
ysum += y[h] ;
xysum += x[h]*y[h] ;
xxsum += x[h]*x[h] ;
if( y[h] < vmin ) vmin = y[h] ;
if( y[h] > vmax ) vmax = y[h] ;
}
out[ i ] = (libre*xysum - xsum*ysum)/(libre*xxsum -xsum*xsum) ;
out[ i + n ] = vmin ;
out[ i + 2*n ] = vmax ;
out[ i + 3*n ] = ysum / libre ;
}
else
{
out[ i ] = NA_REAL ;
out[ i + n ] = NA_REAL ;
out[ i + 2*n ] = NA_REAL ;
out[ i + 3*n ] = NA_REAL ;
}
}
return out;
}')
columnas_originales <- copy(colnames( ds ))
ds[ , mv_status01 := pmax( Master_status, Visa_status, na.rm = TRUE) ]
ds[ , mv_status02 := Master_status + Visa_status ]
ds[ , mv_status03 := pmax( ifelse( is.na(Master_status), 9, Master_status) , ifelse( is.na(Visa_status), 9, Visa_status) ) ]
ds[ , mv_status04 := ifelse( is.na(Master_status), 9, Master_status) + ifelse( is.na(Visa_status), 9, Visa_status) ]
ds[ , mv_status05 := ifelse( is.na(Master_status), 9, Master_status) + 10*ifelse( is.na(Visa_status), 9, Visa_status) ]
ds[ , mv_status06 := ifelse( is.na(Visa_status),
ifelse( is.na(Master_status), 9, Master_status),
Visa_status) ]
ds[ , mv_status07 := ifelse( is.na(Master_status),
ifelse( is.na(Visa_status), 9, Visa_status),
Master_status) ]
ds[ , mv_mfinanciacion_limite := rowSums( cbind( Master_mfinanciacion_limite, Visa_mfinanciacion_limite) , na.rm=TRUE ) ]
ds[ , mv_Fvencimiento := pmin( Master_Fvencimiento, Visa_Fvencimiento, na.rm = TRUE) ]
ds[ , mv_Finiciomora := pmin( Master_Finiciomora, Visa_Finiciomora, na.rm = TRUE) ]
ds[ , mv_msaldototal := rowSums( cbind( Master_msaldototal, Visa_msaldototal) , na.rm=TRUE ) ]
ds[ , mv_msaldopesos := rowSums( cbind( Master_msaldopesos, Visa_msaldopesos) , na.rm=TRUE ) ]
ds[ , mv_msaldodolares := rowSums( cbind( Master_msaldodolares, Visa_msaldodolares) , na.rm=TRUE ) ]
ds[ , mv_mconsumospesos := rowSums( cbind( Master_mconsumospesos, Visa_mconsumospesos) , na.rm=TRUE ) ]
ds[ , mv_mconsumosdolares := rowSums( cbind( Master_mconsumosdolares, Visa_mconsumosdolares) , na.rm=TRUE ) ]
ds[ , mv_mlimitecompra := rowSums( cbind( Master_mlimitecompra, Visa_mlimitecompra) , na.rm=TRUE ) ]
ds[ , mv_madelantopesos := rowSums( cbind( Master_madelantopesos, Visa_madelantopesos) , na.rm=TRUE ) ]
ds[ , mv_madelantodolares := rowSums( cbind( Master_madelantodolares, Visa_madelantodolares) , na.rm=TRUE ) ]
ds[ , mv_fultimo_cierre := pmax( Master_fultimo_cierre, Visa_fultimo_cierre, na.rm = TRUE) ]
ds[ , mv_mpagado := rowSums( cbind( Master_mpagado, Visa_mpagado) , na.rm=TRUE ) ]
ds[ , mv_mpagospesos := rowSums( cbind( Master_mpagospesos, Visa_mpagospesos) , na.rm=TRUE ) ]
ds[ , mv_mpagosdolares := rowSums( cbind( Master_mpagosdolares, Visa_mpagosdolares) , na.rm=TRUE ) ]
ds[ , mv_fechaalta := pmax( Master_fechaalta, Visa_fechaalta, na.rm = TRUE) ]
ds[ , mv_mconsumototal := rowSums( cbind( Master_mconsumototal, Visa_mconsumototal) , na.rm=TRUE ) ]
ds[ , mv_cconsumos := rowSums( cbind( Master_cconsumos, Visa_cconsumos) , na.rm=TRUE ) ]
ds[ , mv_cadelantosefectivo := rowSums( cbind( Master_cadelantosefectivo, Visa_cadelantosefectivo) , na.rm=TRUE ) ]
ds[ , mv_mpagominimo := rowSums( cbind( Master_mpagominimo, Visa_mpagominimo) , na.rm=TRUE ) ]
ds[ , mvr_Master_mlimitecompra:= Master_mlimitecompra / mv_mlimitecompra ]
ds[ , mvr_Visa_mlimitecompra := Visa_mlimitecompra / mv_mlimitecompra ]
ds[ , mvr_msaldototal := mv_msaldototal / mv_mlimitecompra ]
ds[ , mvr_msaldopesos := mv_msaldopesos / mv_mlimitecompra ]
ds[ , mvr_msaldopesos2 := mv_msaldopesos / mv_msaldototal ]
ds[ , mvr_msaldodolares := mv_msaldodolares / mv_mlimitecompra ]
ds[ , mvr_msaldodolares2 := mv_msaldodolares / mv_msaldototal ]
ds[ , mvr_mconsumospesos := mv_mconsumospesos / mv_mlimitecompra ]
ds[ , mvr_mconsumosdolares := mv_mconsumosdolares / mv_mlimitecompra ]
ds[ , mvr_madelantopesos := mv_madelantopesos / mv_mlimitecompra ]
ds[ , mvr_madelantodolares := mv_madelantodolares / mv_mlimitecompra ]
ds[ , mvr_mpagado := mv_mpagado / mv_mlimitecompra ]
ds[ , mvr_mpagospesos := mv_mpagospesos / mv_mlimitecompra ]
ds[ , mvr_mpagosdolares := mv_mpagosdolares / mv_mlimitecompra ]
ds[ , mvr_mconsumototal := mv_mconsumototal / mv_mlimitecompra ]
ds[ , mvr_mpagominimo := mv_mpagominimo / mv_mlimitecompra ]
ds <- na.roughfix( ds )
columnas_extendidas <- copy( setdiff( colnames(ds), columnas_originales ) )
#ordeno por numero_de_cliente y foto_mes
setorder( ds, numero_de_cliente, foto_mes )
#Esta es la cantidad de meses que utilizo para la historia
ventana_regresion <- 6
last <- nrow( ds )
kcampo_id_idx <- match( "numero_de_cliente", names(ds) )
vector_ids <- ds[[ kcampo_id_idx ]]
vector_desde <- seq( -ventana_regresion+2, nrow(ds)-ventana_regresion+1 )
vector_desde[ 1:ventana_regresion ] <- 1
for( i in 2:last ) if( vector_ids[ i-1 ] != vector_ids[ i ] ) { vector_desde[i] <- i }
for( i in 2:last ) if( vector_desde[i] < vector_desde[i-1] ) { vector_desde[i] <- vector_desde[i-1] }
columnas_no_procesar <- c( "numero_de_cliente", "foto_mes", "clase_binaria" )
columnas_originales_a_procesar <- setdiff( columnas_originales, columnas_no_procesar )
for( campo in columnas_originales_a_procesar )
{
campo_idx <- match( campo, names(ds) )
col_original <- ds[[ campo_idx ]]
nueva_col <- fhistC( col_original, vector_desde )
ds[ , paste( campo, "__tend", sep="" ):= nueva_col[ (0*last +1):(1*last) ] ]
ds[ , paste( campo, "__min" , sep="" ):= nueva_col[ (1*last +1):(2*last) ] ]
ds[ , paste( campo, "__max" , sep="" ):= nueva_col[ (2*last +1):(3*last) ] ]
}
nuevo_orden <- c( setdiff( colnames( ds ) , "clase_binaria" ) , "clase_binaria" )
setcolorder( ds, nuevo_orden )
columnas_extendidas_a_procesar <- setdiff( columnas_extendidas, columnas_no_procesar )
for( campo in columnas_extendidas_a_procesar )
{
campo_idx <- match( campo, names(ds) )
col_original <- ds[[ campo_idx ]]
nueva_col <- fhistC( col_original, vector_desde )
ds[ , paste( campo, "__tend", sep="" ):= nueva_col[ (0*last +1):(1*last) ] ]
ds[ , paste( campo, "__min" , sep="" ):= nueva_col[ (1*last +1):(2*last) ] ]
ds[ , paste( campo, "__max" , sep="" ):= nueva_col[ (2*last +1):(3*last) ] ]
}
nuevo_orden <- c( setdiff( colnames( ds ) , "clase_binaria" ) , "clase_binaria" )
setcolorder( ds, nuevo_orden )
library(ranger)
#genero el modelo de Random Forest con la libreria ranger
params <- list( "num.trees"= 500, #cantidad de arboles
"mtry"= 3,
"min.node.size"= 1, # hoja mas chica
"max.depth"= 0 # 0 significa profundidad infinita
)
ds <- na.roughfix( ds )
modelo <- ranger( formula= "clase_binaria ~ .",
data= ds[foto_mes %in% c(201906, 201907, 201908, 201909), ], #aqui considero los 6 meses de 201906 a 201912
probability= TRUE, #para que devuelva las probabilidades
num.trees= params$num.trees,
mtry= params$mtry,
min.node.size= params$min.node.size,
max.depth= params$max.depth
)
#aplico el modelo a los datos sin clase, 202001
prediccion_202001 <- predict( modelo, ds[foto_mes==202001, ] )
#genero el dataset de entrega
entrega <- as.data.table(cbind( "numero_de_cliente"= ds[ foto_mes==202001, numero_de_cliente],
"prob"= prediccion_202001$predictions[, "POS"]) )
entrega[ , estimulo := as.integer( prob > 0.025)]
#genero el archivo de salida
fwrite( entrega[ , c("numero_de_cliente", "estimulo"), with=FALSE],
sep= ",",
file= "entrega_2010_2.csv" )
| /scripts/20.10 - completo cátedra - 10.04MM.R | no_license | fedefliguer/maestria_DMEyF_TP01 | R | false | false | 9,650 | r | library( "data.table")
library(ggplot2)
library(devtools)
library(Rcpp)
library(randomForest)
set.seed(1)
#limpio la memoria
rm( list=ls() )
gc()
setwd("/home/fjf_arg_gcloud/buckets/b1/datasetsOri/")
kcampos_separador <- "\t"
karchivo_entrada_full <- "paquete_premium.txt.gz"
ds <- fread(karchivo_entrada_full, header=TRUE, sep=kcampos_separador)
ds[ , clase_binaria := as.factor(ifelse( clase_ternaria=="BAJA+2", "POS", "NEG" )) ]
ds[ , clase_ternaria := NULL ] #elimino la clase_ternaria, ya no la necesito
ds <- na.roughfix( ds )
ds = ds[foto_mes>201811]
cppFunction('NumericVector fhistC(NumericVector pcolumna, IntegerVector pdesde )
{
/* Aqui se cargan los valores para la regresion */
double x[100] ;
double y[100] ;
int n = pcolumna.size();
NumericVector out( 4*n );
for(int i = 0; i < n; i++)
{
int libre = 0 ;
int xvalor = 1 ;
for( int j= pdesde[i]-1; j<=i; j++ )
{
double a = pcolumna[j] ;
if( !R_IsNA( a ) )
{
y[ libre ]= a ;
x[ libre ]= xvalor ;
libre++ ;
}
xvalor++ ;
}
/* Si hay al menos dos valores */
if( libre > 1 )
{
double xsum = x[0] ;
double ysum = y[0] ;
double xysum = xsum * ysum ;
double xxsum = xsum * xsum ;
double vmin = y[0] ;
double vmax = y[0] ;
for( int h=1; h<libre; h++)
{
xsum += x[h] ;
ysum += y[h] ;
xysum += x[h]*y[h] ;
xxsum += x[h]*x[h] ;
if( y[h] < vmin ) vmin = y[h] ;
if( y[h] > vmax ) vmax = y[h] ;
}
out[ i ] = (libre*xysum - xsum*ysum)/(libre*xxsum -xsum*xsum) ;
out[ i + n ] = vmin ;
out[ i + 2*n ] = vmax ;
out[ i + 3*n ] = ysum / libre ;
}
else
{
out[ i ] = NA_REAL ;
out[ i + n ] = NA_REAL ;
out[ i + 2*n ] = NA_REAL ;
out[ i + 3*n ] = NA_REAL ;
}
}
return out;
}')
columnas_originales <- copy(colnames( ds ))
ds[ , mv_status01 := pmax( Master_status, Visa_status, na.rm = TRUE) ]
ds[ , mv_status02 := Master_status + Visa_status ]
ds[ , mv_status03 := pmax( ifelse( is.na(Master_status), 9, Master_status) , ifelse( is.na(Visa_status), 9, Visa_status) ) ]
ds[ , mv_status04 := ifelse( is.na(Master_status), 9, Master_status) + ifelse( is.na(Visa_status), 9, Visa_status) ]
ds[ , mv_status05 := ifelse( is.na(Master_status), 9, Master_status) + 10*ifelse( is.na(Visa_status), 9, Visa_status) ]
ds[ , mv_status06 := ifelse( is.na(Visa_status),
ifelse( is.na(Master_status), 9, Master_status),
Visa_status) ]
ds[ , mv_status07 := ifelse( is.na(Master_status),
ifelse( is.na(Visa_status), 9, Visa_status),
Master_status) ]
ds[ , mv_mfinanciacion_limite := rowSums( cbind( Master_mfinanciacion_limite, Visa_mfinanciacion_limite) , na.rm=TRUE ) ]
ds[ , mv_Fvencimiento := pmin( Master_Fvencimiento, Visa_Fvencimiento, na.rm = TRUE) ]
ds[ , mv_Finiciomora := pmin( Master_Finiciomora, Visa_Finiciomora, na.rm = TRUE) ]
ds[ , mv_msaldototal := rowSums( cbind( Master_msaldototal, Visa_msaldototal) , na.rm=TRUE ) ]
ds[ , mv_msaldopesos := rowSums( cbind( Master_msaldopesos, Visa_msaldopesos) , na.rm=TRUE ) ]
ds[ , mv_msaldodolares := rowSums( cbind( Master_msaldodolares, Visa_msaldodolares) , na.rm=TRUE ) ]
ds[ , mv_mconsumospesos := rowSums( cbind( Master_mconsumospesos, Visa_mconsumospesos) , na.rm=TRUE ) ]
ds[ , mv_mconsumosdolares := rowSums( cbind( Master_mconsumosdolares, Visa_mconsumosdolares) , na.rm=TRUE ) ]
ds[ , mv_mlimitecompra := rowSums( cbind( Master_mlimitecompra, Visa_mlimitecompra) , na.rm=TRUE ) ]
ds[ , mv_madelantopesos := rowSums( cbind( Master_madelantopesos, Visa_madelantopesos) , na.rm=TRUE ) ]
ds[ , mv_madelantodolares := rowSums( cbind( Master_madelantodolares, Visa_madelantodolares) , na.rm=TRUE ) ]
ds[ , mv_fultimo_cierre := pmax( Master_fultimo_cierre, Visa_fultimo_cierre, na.rm = TRUE) ]
ds[ , mv_mpagado := rowSums( cbind( Master_mpagado, Visa_mpagado) , na.rm=TRUE ) ]
ds[ , mv_mpagospesos := rowSums( cbind( Master_mpagospesos, Visa_mpagospesos) , na.rm=TRUE ) ]
ds[ , mv_mpagosdolares := rowSums( cbind( Master_mpagosdolares, Visa_mpagosdolares) , na.rm=TRUE ) ]
ds[ , mv_fechaalta := pmax( Master_fechaalta, Visa_fechaalta, na.rm = TRUE) ]
ds[ , mv_mconsumototal := rowSums( cbind( Master_mconsumototal, Visa_mconsumototal) , na.rm=TRUE ) ]
ds[ , mv_cconsumos := rowSums( cbind( Master_cconsumos, Visa_cconsumos) , na.rm=TRUE ) ]
ds[ , mv_cadelantosefectivo := rowSums( cbind( Master_cadelantosefectivo, Visa_cadelantosefectivo) , na.rm=TRUE ) ]
ds[ , mv_mpagominimo := rowSums( cbind( Master_mpagominimo, Visa_mpagominimo) , na.rm=TRUE ) ]
ds[ , mvr_Master_mlimitecompra:= Master_mlimitecompra / mv_mlimitecompra ]
ds[ , mvr_Visa_mlimitecompra := Visa_mlimitecompra / mv_mlimitecompra ]
ds[ , mvr_msaldototal := mv_msaldototal / mv_mlimitecompra ]
ds[ , mvr_msaldopesos := mv_msaldopesos / mv_mlimitecompra ]
ds[ , mvr_msaldopesos2 := mv_msaldopesos / mv_msaldototal ]
ds[ , mvr_msaldodolares := mv_msaldodolares / mv_mlimitecompra ]
ds[ , mvr_msaldodolares2 := mv_msaldodolares / mv_msaldototal ]
ds[ , mvr_mconsumospesos := mv_mconsumospesos / mv_mlimitecompra ]
ds[ , mvr_mconsumosdolares := mv_mconsumosdolares / mv_mlimitecompra ]
ds[ , mvr_madelantopesos := mv_madelantopesos / mv_mlimitecompra ]
ds[ , mvr_madelantodolares := mv_madelantodolares / mv_mlimitecompra ]
ds[ , mvr_mpagado := mv_mpagado / mv_mlimitecompra ]
ds[ , mvr_mpagospesos := mv_mpagospesos / mv_mlimitecompra ]
ds[ , mvr_mpagosdolares := mv_mpagosdolares / mv_mlimitecompra ]
ds[ , mvr_mconsumototal := mv_mconsumototal / mv_mlimitecompra ]
ds[ , mvr_mpagominimo := mv_mpagominimo / mv_mlimitecompra ]
ds <- na.roughfix( ds )
columnas_extendidas <- copy( setdiff( colnames(ds), columnas_originales ) )
#ordeno por numero_de_cliente y foto_mes
setorder( ds, numero_de_cliente, foto_mes )
#Esta es la cantidad de meses que utilizo para la historia
ventana_regresion <- 6
last <- nrow( ds )
kcampo_id_idx <- match( "numero_de_cliente", names(ds) )
vector_ids <- ds[[ kcampo_id_idx ]]
vector_desde <- seq( -ventana_regresion+2, nrow(ds)-ventana_regresion+1 )
vector_desde[ 1:ventana_regresion ] <- 1
for( i in 2:last ) if( vector_ids[ i-1 ] != vector_ids[ i ] ) { vector_desde[i] <- i }
for( i in 2:last ) if( vector_desde[i] < vector_desde[i-1] ) { vector_desde[i] <- vector_desde[i-1] }
columnas_no_procesar <- c( "numero_de_cliente", "foto_mes", "clase_binaria" )
columnas_originales_a_procesar <- setdiff( columnas_originales, columnas_no_procesar )
for( campo in columnas_originales_a_procesar )
{
campo_idx <- match( campo, names(ds) )
col_original <- ds[[ campo_idx ]]
nueva_col <- fhistC( col_original, vector_desde )
ds[ , paste( campo, "__tend", sep="" ):= nueva_col[ (0*last +1):(1*last) ] ]
ds[ , paste( campo, "__min" , sep="" ):= nueva_col[ (1*last +1):(2*last) ] ]
ds[ , paste( campo, "__max" , sep="" ):= nueva_col[ (2*last +1):(3*last) ] ]
}
nuevo_orden <- c( setdiff( colnames( ds ) , "clase_binaria" ) , "clase_binaria" )
setcolorder( ds, nuevo_orden )
columnas_extendidas_a_procesar <- setdiff( columnas_extendidas, columnas_no_procesar )
for( campo in columnas_extendidas_a_procesar )
{
campo_idx <- match( campo, names(ds) )
col_original <- ds[[ campo_idx ]]
nueva_col <- fhistC( col_original, vector_desde )
ds[ , paste( campo, "__tend", sep="" ):= nueva_col[ (0*last +1):(1*last) ] ]
ds[ , paste( campo, "__min" , sep="" ):= nueva_col[ (1*last +1):(2*last) ] ]
ds[ , paste( campo, "__max" , sep="" ):= nueva_col[ (2*last +1):(3*last) ] ]
}
nuevo_orden <- c( setdiff( colnames( ds ) , "clase_binaria" ) , "clase_binaria" )
setcolorder( ds, nuevo_orden )
library(ranger)
#genero el modelo de Random Forest con la libreria ranger
params <- list( "num.trees"= 500, #cantidad de arboles
"mtry"= 3,
"min.node.size"= 1, # hoja mas chica
"max.depth"= 0 # 0 significa profundidad infinita
)
ds <- na.roughfix( ds )
modelo <- ranger( formula= "clase_binaria ~ .",
data= ds[foto_mes %in% c(201906, 201907, 201908, 201909), ], #aqui considero los 6 meses de 201906 a 201912
probability= TRUE, #para que devuelva las probabilidades
num.trees= params$num.trees,
mtry= params$mtry,
min.node.size= params$min.node.size,
max.depth= params$max.depth
)
#aplico el modelo a los datos sin clase, 202001
prediccion_202001 <- predict( modelo, ds[foto_mes==202001, ] )
#genero el dataset de entrega
entrega <- as.data.table(cbind( "numero_de_cliente"= ds[ foto_mes==202001, numero_de_cliente],
"prob"= prediccion_202001$predictions[, "POS"]) )
entrega[ , estimulo := as.integer( prob > 0.025)]
#genero el archivo de salida
fwrite( entrega[ , c("numero_de_cliente", "estimulo"), with=FALSE],
sep= ",",
file= "entrega_2010_2.csv" )
|
library(tidyverse)
library(ggplot2)
data <- read.csv("https://opendata.ecdc.europa.eu/covid19/casedistribution/csv", na.strings = "", fileEncoding = "UTF-8-BOM")
df <- data.frame('dateRep','cases')
ggplot(data, aes(x=dateRep, y=cases, group=1)) +
geom_line(color="red")+
geom_point()
plt.show()
| /Line Plot.R | no_license | nkeeley/MSDS-Hackathon-Covid | R | false | false | 304 | r | library(tidyverse)
library(ggplot2)
data <- read.csv("https://opendata.ecdc.europa.eu/covid19/casedistribution/csv", na.strings = "", fileEncoding = "UTF-8-BOM")
df <- data.frame('dateRep','cases')
ggplot(data, aes(x=dateRep, y=cases, group=1)) +
geom_line(color="red")+
geom_point()
plt.show()
|
library(ape)
testtree <- read.tree("2041_41.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2041_41_unrooted.txt") | /codeml_files/newick_trees_processed/2041_41/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("2041_41.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2041_41_unrooted.txt") |
plot3 <- function() {
# Import data from csv. Expects the csv to be in the working directory.
HHdata <- read.csv("./household_power_consumption.txt",na.strings="?",header=TRUE,sep=";")
HHdata$DateTime <- paste(HHdata$Date,HHdata$Time,sep=" ")
HHdata$DateTime <- strptime(HHdata$DateTime,format="%d/%m/%Y %H:%M:%S")
DS <- subset(HHdata,as.Date(HHdata$DateTime) > as.Date("2007-01-31"))
DS <- subset(DS,as.Date(DS$DateTime)<as.Date("2007-02-03"))
##plot3
par(mfrow=c(1,1))
png(filename = "plot3.png")
plot(DS$DateTime,DS$Sub_metering_1,type="n",ylab="Energy sub metering",xlab="")
lines(DS$DateTime,DS$Sub_metering_1)
lines(DS$DateTime,DS$Sub_metering_2,col="red")
lines(DS$DateTime,DS$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=c(1,1,1), lwd=c(2.5,2.5,2.5),col=c("black","blue","red"))
dev.off()
} | /plot3.R | no_license | sptigelaar/exploratory_data_analysis_cp1 | R | false | false | 962 | r | plot3 <- function() {
# Import data from csv. Expects the csv to be in the working directory.
HHdata <- read.csv("./household_power_consumption.txt",na.strings="?",header=TRUE,sep=";")
HHdata$DateTime <- paste(HHdata$Date,HHdata$Time,sep=" ")
HHdata$DateTime <- strptime(HHdata$DateTime,format="%d/%m/%Y %H:%M:%S")
DS <- subset(HHdata,as.Date(HHdata$DateTime) > as.Date("2007-01-31"))
DS <- subset(DS,as.Date(DS$DateTime)<as.Date("2007-02-03"))
##plot3
par(mfrow=c(1,1))
png(filename = "plot3.png")
plot(DS$DateTime,DS$Sub_metering_1,type="n",ylab="Energy sub metering",xlab="")
lines(DS$DateTime,DS$Sub_metering_1)
lines(DS$DateTime,DS$Sub_metering_2,col="red")
lines(DS$DateTime,DS$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=c(1,1,1), lwd=c(2.5,2.5,2.5),col=c("black","blue","red"))
dev.off()
} |
context("Tokenizer")
test_that("Operators are tokenized correctly", {
operators <- c(
"::", ":::", "$", "@", "[", "[[", "^", "-", "+", ":",
"*", "/", "+", "-", "<", ">", "<=", ">=", "==", "!=",
"!", "&", "&&", "|", "|>", "||", "~", "->", "->>", "<-", "<<-",
"=", "?", "**", "%%", "%for%"
)
tokenized <- tokenize_string(paste(operators, collapse = " "))
for (operator in operators) {
tokens <- tokenize_string(operator)
expect_true(nrow(tokens) == 1, paste("expected a single token ('", operator, "')"))
}
})
test_that("Numbers are tokenized correctly", {
numbers <- c("1", "1.0", "0.1", ".1", "0.1E1", "1L", "1.0L", "1.5L",
"1E1", "1E-1", "1E-1L", ".100E-105L", "0.", "100.",
"1e+09", "1e+90", "1e-90", "1e-00000000000000009")
for (number in numbers) {
tokens <- tokenize_string(number)
expect_true(nrow(tokens) == 1, paste("expected a single token ('", number, "')", sep = ""))
token <- as.list(tokens[1, ])
expect_true(token$type == "number", paste("expected a number ('", token$type, "')", sep = ""))
}
})
test_that("The tokenizer accepts UTF-8 symbols", {
expect_true(nrow(tokenize_string("鬼門")) == 1)
})
test_that("The tokenizer works correctly", {
# TODO: Should newlines be absorbed as part of the comment string?
tokens <- tokenize_string("# A Comment\n")
expected <- "# A Comment\n"
compare_tokens(tokens, expected)
tokens <- tokenize_string("a <- 1 + 2\n")
compare_tokens(
tokens,
c("a", " ", "<-", " ", "1", " ", "+", " ", "2", "\n")
)
compare_tokens(
tokenize_string("a<-1"),
c("a", "<-", "1")
)
# NOTE: '-' sign tokenized separately from number
compare_tokens(
tokenize_string("a< -1"),
c("a", "<", " ", "-", "1")
)
compare_tokens("1.0E5L", "1.0E5L")
compare_tokens(".1", ".1")
compare_tokens("'\\''", "'\\''")
compare_tokens(".a", ".a")
compare_tokens("...", "...")
compare_tokens(":=", ":=")
compare_tokens("x ** 2", c("x", " ", "**", " ", "2"))
})
test_that("`[[` and `[` are tokenized correctly", {
compare_tokens("x[[1]]", c("x", "[[", "1", "]]"))
# not really valid R code, but the tokenizer should still
# get it right
compare_tokens("[[[]]]", c("[[", "[", "]", "]]"))
compare_tokens(
"x[[a[b[[c[1]]]]]]",
c("x", "[[", "a", "[", "b", "[[", "c", "[", "1",
"]", "]]", "]", "]]")
)
})
test_that("Failures during number tokenization is detected", {
tokens <- tokenize_string("1.5E---")
expect_true(tokens$type[[1]] == "invalid")
})
test_that("invalid number e.g. 1E1.5 tokenized as single entity", {
tokens <- tokenize_string("1E1.5")
expect_true(nrow(tokens) == 1)
expect_true(tokens$type[[1]] == "invalid")
})
test_that("keywords are tokenized as keywords", {
keywords <- c("if", "else", "repeat", "while", "function",
"for", "in", "next", "break",
"TRUE", "FALSE", "NULL", "Inf", "NaN", "NA",
"NA_integer_", "NA_real_", "NA_complex_", "NA_character_")
tokens <- lapply(keywords, function(keyword) {
tokenize_string(keyword)[1, ]
})
types <- unlist(lapply(tokens, `[[`, "type"))
expect_true(all(types == "keyword"))
})
test_that("comments without a trailing newline are tokenized", {
tokens <- tokenize_string("# abc")
expect_identical(tokens$type, "comment")
})
test_that("tokenization errors handled correctly", {
# previously, these reported an error where a NUL
# byte was accidentally included as part of the
# token value
tokenize_string("'abc")
tokenize_string("\"abc")
tokenize_string("%abc")
expect_true(TRUE, "we didn't segfault")
})
test_that("raw tokens are tokenized correctly", {
prefixes <- c("r", "R")
quotes <- c("'", '"')
dashes <- c("", "-", "--", "---")
lhs <- c("(", "{", "[")
all <- expand.grid(prefixes, quotes, dashes, lhs, stringsAsFactors = FALSE)
all$Var5 <- ""
all$Var5[all$Var4 == "("] <- ")"
all$Var5[all$Var4 == "{"] <- "}"
all$Var5[all$Var4 == "["] <- "]"
all$Var6 <- all$Var3
all$Var7 <- all$Var2
strings <- do.call(paste0, all)
for (string in strings) {
token <- tokenize_string(string)
expect_true(nrow(token) == 1L)
expect_true(token$type == "string")
}
})
| /tests/testthat/test-tokenize.R | permissive | kevinushey/sourcetools | R | false | false | 4,257 | r | context("Tokenizer")
test_that("Operators are tokenized correctly", {
operators <- c(
"::", ":::", "$", "@", "[", "[[", "^", "-", "+", ":",
"*", "/", "+", "-", "<", ">", "<=", ">=", "==", "!=",
"!", "&", "&&", "|", "|>", "||", "~", "->", "->>", "<-", "<<-",
"=", "?", "**", "%%", "%for%"
)
tokenized <- tokenize_string(paste(operators, collapse = " "))
for (operator in operators) {
tokens <- tokenize_string(operator)
expect_true(nrow(tokens) == 1, paste("expected a single token ('", operator, "')"))
}
})
test_that("Numbers are tokenized correctly", {
numbers <- c("1", "1.0", "0.1", ".1", "0.1E1", "1L", "1.0L", "1.5L",
"1E1", "1E-1", "1E-1L", ".100E-105L", "0.", "100.",
"1e+09", "1e+90", "1e-90", "1e-00000000000000009")
for (number in numbers) {
tokens <- tokenize_string(number)
expect_true(nrow(tokens) == 1, paste("expected a single token ('", number, "')", sep = ""))
token <- as.list(tokens[1, ])
expect_true(token$type == "number", paste("expected a number ('", token$type, "')", sep = ""))
}
})
test_that("The tokenizer accepts UTF-8 symbols", {
expect_true(nrow(tokenize_string("鬼門")) == 1)
})
test_that("The tokenizer works correctly", {
# TODO: Should newlines be absorbed as part of the comment string?
tokens <- tokenize_string("# A Comment\n")
expected <- "# A Comment\n"
compare_tokens(tokens, expected)
tokens <- tokenize_string("a <- 1 + 2\n")
compare_tokens(
tokens,
c("a", " ", "<-", " ", "1", " ", "+", " ", "2", "\n")
)
compare_tokens(
tokenize_string("a<-1"),
c("a", "<-", "1")
)
# NOTE: '-' sign tokenized separately from number
compare_tokens(
tokenize_string("a< -1"),
c("a", "<", " ", "-", "1")
)
compare_tokens("1.0E5L", "1.0E5L")
compare_tokens(".1", ".1")
compare_tokens("'\\''", "'\\''")
compare_tokens(".a", ".a")
compare_tokens("...", "...")
compare_tokens(":=", ":=")
compare_tokens("x ** 2", c("x", " ", "**", " ", "2"))
})
test_that("`[[` and `[` are tokenized correctly", {
compare_tokens("x[[1]]", c("x", "[[", "1", "]]"))
# not really valid R code, but the tokenizer should still
# get it right
compare_tokens("[[[]]]", c("[[", "[", "]", "]]"))
compare_tokens(
"x[[a[b[[c[1]]]]]]",
c("x", "[[", "a", "[", "b", "[[", "c", "[", "1",
"]", "]]", "]", "]]")
)
})
test_that("Failures during number tokenization is detected", {
tokens <- tokenize_string("1.5E---")
expect_true(tokens$type[[1]] == "invalid")
})
test_that("invalid number e.g. 1E1.5 tokenized as single entity", {
tokens <- tokenize_string("1E1.5")
expect_true(nrow(tokens) == 1)
expect_true(tokens$type[[1]] == "invalid")
})
test_that("keywords are tokenized as keywords", {
keywords <- c("if", "else", "repeat", "while", "function",
"for", "in", "next", "break",
"TRUE", "FALSE", "NULL", "Inf", "NaN", "NA",
"NA_integer_", "NA_real_", "NA_complex_", "NA_character_")
tokens <- lapply(keywords, function(keyword) {
tokenize_string(keyword)[1, ]
})
types <- unlist(lapply(tokens, `[[`, "type"))
expect_true(all(types == "keyword"))
})
test_that("comments without a trailing newline are tokenized", {
tokens <- tokenize_string("# abc")
expect_identical(tokens$type, "comment")
})
test_that("tokenization errors handled correctly", {
# previously, these reported an error where a NUL
# byte was accidentally included as part of the
# token value
tokenize_string("'abc")
tokenize_string("\"abc")
tokenize_string("%abc")
expect_true(TRUE, "we didn't segfault")
})
test_that("raw tokens are tokenized correctly", {
prefixes <- c("r", "R")
quotes <- c("'", '"')
dashes <- c("", "-", "--", "---")
lhs <- c("(", "{", "[")
all <- expand.grid(prefixes, quotes, dashes, lhs, stringsAsFactors = FALSE)
all$Var5 <- ""
all$Var5[all$Var4 == "("] <- ")"
all$Var5[all$Var4 == "{"] <- "}"
all$Var5[all$Var4 == "["] <- "]"
all$Var6 <- all$Var3
all$Var7 <- all$Var2
strings <- do.call(paste0, all)
for (string in strings) {
token <- tokenize_string(string)
expect_true(nrow(token) == 1L)
expect_true(token$type == "string")
}
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prediction_functions.R
\docType{package}
\name{prediction_googleAuthR}
\alias{prediction_googleAuthR}
\alias{prediction_googleAuthR-package}
\title{Prediction API
Lets you access a cloud hosted machine learning service that makes it easy to build smart apps}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2016-09-03 23:26:28
filename: /Users/mark/dev/R/autoGoogleAPI/googlepredictionv15.auto/R/prediction_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/devstorage.full_control
\item https://www.googleapis.com/auth/devstorage.read_only
\item https://www.googleapis.com/auth/devstorage.read_write
\item https://www.googleapis.com/auth/prediction
}
}
| /googlepredictionv15.auto/man/prediction_googleAuthR.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 837 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prediction_functions.R
\docType{package}
\name{prediction_googleAuthR}
\alias{prediction_googleAuthR}
\alias{prediction_googleAuthR-package}
\title{Prediction API
Lets you access a cloud hosted machine learning service that makes it easy to build smart apps}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2016-09-03 23:26:28
filename: /Users/mark/dev/R/autoGoogleAPI/googlepredictionv15.auto/R/prediction_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/devstorage.full_control
\item https://www.googleapis.com/auth/devstorage.read_only
\item https://www.googleapis.com/auth/devstorage.read_write
\item https://www.googleapis.com/auth/prediction
}
}
|
#' Interpret the model specified by user
#'
#' \code{assign_model} translates options specified by a user (e.g., in
#' \code{model_options}) into information that can be understood by \code{baker}.
#'
#' @details \code{assign_model} will be modified to check if data are conformable
#' to specified model.
#'
#' @param data_nplcm Data. See \code{\link{nplcm}} function for data structure.
#' @param model_options See \code{\link{nplcm}} function.
#' @param silent Default is \code{TRUE} for no messages; \code{FALSE} otherwise.
#' @return A list of model specifications:
#' \itemize{
#' \item \code{num_slice} A vector counting the No. of measurement slices for each
#' level of measurement quality (e.g., MBS, MSS, MGS representing
#' Bronze-Standard Measurements - case-control,
#' Silver-Standard Measurements and Gold-Standard
#' Measurements - case-only);
#' \item \code{nested} Local dependence specification for modeling bronze-standard
#' data. \code{TRUE} for nested models (conditional dependence given disease class);
#' \code{FALSE} for non-nested models (conditional independence given disease class).
#' One for each BrS slice.
#' \item \code{regression}
#' \itemize{
#' \item \code{do_reg_Eti} \code{TRUE} for doing etiology regression.
#' It means let the etiology fractions vary with explanatory variables.
#' \code{FALSE} otherwise;
#' \item \code{do_reg_FPR} A vector whose names represent the slices
#' of bronze-standard data. For each slice of BrS measurements,
#' \code{TRUE} does false positive rate regression. It means the false
#' positive rates, estimatable from controls, can vary with
#' covariates; \code{FALSE} otherwise.
#' \item code{is_discrete_predictor} A list of names "Eti", and
#' the names for every slice of bronze-standard data. \code{TRUE}
#' if all predictors are discrete; \code{FALSE} otherwise.
#' }
#' }
#'
#'
#' @family specification checking functions
#' @export
assign_model <- function(model_options,data_nplcm, silent=TRUE){
# load options:
likelihood <- model_options$likelihood
use_measurements <- model_options$use_measurements
prior <- model_options$prior
# load data:
Mobs <- data_nplcm$Mobs
Y <- data_nplcm$Y
X <- data_nplcm$X
nested <- likelihood$k_subclass > 1
# test the match between actual data and model_options:
use_data_sources <- c("MBS","MSS","MGS")[lookup_quality(use_measurements)]
input_data_sources <- names(Mobs)
if (!all(use_data_sources%in%input_data_sources)){
stop("==[baker] Please supply actual datasets as specified by 'use_measurements' in 'model_options'.\n==")
}
# get the length of each measurement quality:
num_slice <- rep(0,3)
names(num_slice) <- c("MBS","MSS","MGS")
for (i in seq_along(use_data_sources)){
num_slice[use_data_sources[i]] <- length(Mobs[[use_data_sources[i]]])
}
# specify regression for FPR: (only available for bronze-standard data. Silver-standard data automatically have FPR==0.)
do_reg_FPR <- is_discrete_FPR <- rep(NA,length(likelihood$FPR_formula)) # <---- a regression for each measurement slice?
names(do_reg_FPR) <- names(is_discrete_FPR) <- names(likelihood$FPR_formula)
for (i in seq_along(Mobs$MBS)){
ind_tmp <-
which(names(likelihood$FPR_formula) == names(Mobs$MBS)[i])
form_tmp <- stats::as.formula(likelihood$FPR_formula[[ind_tmp]])
if (!length(ind_tmp)) { # don't do regression if no regression formula is found:
do_reg_FPR[i] <- FALSE
} else{ # do regression if there is matched regression formula:
do_reg_FPR[i] <-
parse_nplcm_reg(form_tmp,data_nplcm,silent=silent)
}
is_discrete_FPR[i] <- FALSE
if (!is.null(X)){
is_discrete_FPR[i] <- (!is_intercept_only(form_tmp) &
!stats::is.empty.model(form_tmp) &
is_discrete(X, form_tmp))
}
}
#
# specify regression for TPR: (every measurement slice has it.)
#
# do_reg_TPR <- list() # <---- a regression for each measurement slice?
# for (i in seq_along(Mobs$MBS)){
# ind_tmp <-
# which(names(likelihood$TPR_formula) == names(Mobs$MBS)[i])
# if (!length(ind_tmp)) { # don't do regression if no regression formula is found:
# do_reg_TPR[[i]] <- FALSE
# } else{ # do regression if there is matched regression formula:
# do_reg_TPR[[i]] <-
# parse_nplcm_reg(stats::as.formula(likelihood$TPR_formula[[ind_tmp]]),data_nplcm,silent=silent)
# }
# }
#
# names(do_reg_TPR) <- names(Mobs$MBS)
#
# # if using silver-standard data:
# if ("MSS"%in% use_data_sources){
# for (i in length(Mobs$MBS)+seq_along(Mobs$MSS)){
# ind_tmp <-
# which(names(likelihood$TPR_formula) == names(Mobs$MSS)[i])
# if (!length(ind_tmp)) { # don't do regression if no regression formula is found:
# do_reg_TPR[[i]] <- FALSE
# } else{ # do regression if there is matched regression formula:
# do_reg_TPR[[i]] <-
# parse_nplcm_reg(stats::as.formula(likelihood$TPR_formula[[ind_tmp]]),data_nplcm,silent=silent)
# }
# }
# names(do_reg_TPR) <- c(names(Mobs$MBS),names(Mobs$MSS))
# }
# specify regression for etiology:
form_tmp <- stats::as.formula(likelihood$Eti_formula)
do_reg_Eti <- parse_nplcm_reg(form_tmp,data_nplcm,silent=silent)
is_discrete_Eti <- FALSE
if (!is.null(X)){ # <--- potential problem if a user input more data than needed. need fixing.
is_discrete_Eti <- (!stats::is.empty.model(form_tmp) &
!is_intercept_only(form_tmp) &
is_discrete(data.frame(X,Y)[Y==1,,drop=FALSE], form_tmp))
}
is_discrete_predictor <- list(is_discrete_Eti, is_discrete_FPR)
names(is_discrete_predictor)[1] <- "Eti"
names(is_discrete_predictor)[2] <- "FPR"
regression <- make_list(do_reg_Eti, do_reg_FPR,is_discrete_predictor)#, do_reg_TPR)
# check BrS group:
BrS_grp <- FALSE
prior_BrS <- model_options$prior$TPR_prior$BrS
GBrS_TPR <- length(unique(prior_BrS$grp))
grp_spec <- (!is.null(prior_BrS$grp) && GBrS_TPR >1 )
if (grp_spec) {
for (s in seq_along(prior_BrS$val)){
if (prior_BrS$input=="match_range" &&
(length(prior_BrS$val[[s]]$up)!=GBrS_TPR |
length(prior_BrS$val[[s]]$low)!=GBrS_TPR) ){
stop(paste0("==[baker] ",names(prior_BrS$val)[s])," needs ", GBrS_TPR,
" sets of sensitivity ranges.==")
}
}
BrS_grp <- TRUE
}
# check SS group:
SS_grp <- FALSE
prior_SS <- model_options$prior$TPR_prior$SS
grp_spec <- (!is.null(prior_SS$grp) && length(unique(prior_SS$grp)) >1 )
if (grp_spec) {SS_grp <- TRUE}
## <-------- the following are more strict grp specifications (may cause error when running old folders):
# val_spec <- (num_slice["MSS"]>0 && any(lapply(prior_SS$val,length)>1))
#
# if (grp_spec && val_spec){SS_grp <- TRUE}
# if (grp_spec && !val_spec){stop("==Specified TPR group in 'grp' of 'model_options$prior$TPR_prior$SS',
# but either there is no SS data or the length of 'val' does not match the no. of TPR groups. ==")}
# if (!grp_spec && val_spec){stop("==No 'grp' specified in 'model_options$prior$TPR_prior$SS',
# but we have >1 sets of TPRs. ==")}
# return results:
make_list(num_slice, nested, regression,BrS_grp,SS_grp)
}
| /R/assign-model.R | no_license | oslerinhealth-releases/baker | R | false | false | 7,622 | r | #' Interpret the model specified by user
#'
#' \code{assign_model} translates options specified by a user (e.g., in
#' \code{model_options}) into information that can be understood by \code{baker}.
#'
#' @details \code{assign_model} will be modified to check if data are conformable
#' to specified model.
#'
#' @param data_nplcm Data. See \code{\link{nplcm}} function for data structure.
#' @param model_options See \code{\link{nplcm}} function.
#' @param silent Default is \code{TRUE} for no messages; \code{FALSE} otherwise.
#' @return A list of model specifications:
#' \itemize{
#' \item \code{num_slice} A vector counting the No. of measurement slices for each
#' level of measurement quality (e.g., MBS, MSS, MGS representing
#' Bronze-Standard Measurements - case-control,
#' Silver-Standard Measurements and Gold-Standard
#' Measurements - case-only);
#' \item \code{nested} Local dependence specification for modeling bronze-standard
#' data. \code{TRUE} for nested models (conditional dependence given disease class);
#' \code{FALSE} for non-nested models (conditional independence given disease class).
#' One for each BrS slice.
#' \item \code{regression}
#' \itemize{
#' \item \code{do_reg_Eti} \code{TRUE} for doing etiology regression.
#' It means let the etiology fractions vary with explanatory variables.
#' \code{FALSE} otherwise;
#' \item \code{do_reg_FPR} A vector whose names represent the slices
#' of bronze-standard data. For each slice of BrS measurements,
#' \code{TRUE} does false positive rate regression. It means the false
#' positive rates, estimatable from controls, can vary with
#' covariates; \code{FALSE} otherwise.
#' \item code{is_discrete_predictor} A list of names "Eti", and
#' the names for every slice of bronze-standard data. \code{TRUE}
#' if all predictors are discrete; \code{FALSE} otherwise.
#' }
#' }
#'
#'
#' @family specification checking functions
#' @export
assign_model <- function(model_options,data_nplcm, silent=TRUE){
# load options:
likelihood <- model_options$likelihood
use_measurements <- model_options$use_measurements
prior <- model_options$prior
# load data:
Mobs <- data_nplcm$Mobs
Y <- data_nplcm$Y
X <- data_nplcm$X
nested <- likelihood$k_subclass > 1
# test the match between actual data and model_options:
use_data_sources <- c("MBS","MSS","MGS")[lookup_quality(use_measurements)]
input_data_sources <- names(Mobs)
if (!all(use_data_sources%in%input_data_sources)){
stop("==[baker] Please supply actual datasets as specified by 'use_measurements' in 'model_options'.\n==")
}
# get the length of each measurement quality:
num_slice <- rep(0,3)
names(num_slice) <- c("MBS","MSS","MGS")
for (i in seq_along(use_data_sources)){
num_slice[use_data_sources[i]] <- length(Mobs[[use_data_sources[i]]])
}
# specify regression for FPR: (only available for bronze-standard data. Silver-standard data automatically have FPR==0.)
do_reg_FPR <- is_discrete_FPR <- rep(NA,length(likelihood$FPR_formula)) # <---- a regression for each measurement slice?
names(do_reg_FPR) <- names(is_discrete_FPR) <- names(likelihood$FPR_formula)
for (i in seq_along(Mobs$MBS)){
ind_tmp <-
which(names(likelihood$FPR_formula) == names(Mobs$MBS)[i])
form_tmp <- stats::as.formula(likelihood$FPR_formula[[ind_tmp]])
if (!length(ind_tmp)) { # don't do regression if no regression formula is found:
do_reg_FPR[i] <- FALSE
} else{ # do regression if there is matched regression formula:
do_reg_FPR[i] <-
parse_nplcm_reg(form_tmp,data_nplcm,silent=silent)
}
is_discrete_FPR[i] <- FALSE
if (!is.null(X)){
is_discrete_FPR[i] <- (!is_intercept_only(form_tmp) &
!stats::is.empty.model(form_tmp) &
is_discrete(X, form_tmp))
}
}
#
# specify regression for TPR: (every measurement slice has it.)
#
# do_reg_TPR <- list() # <---- a regression for each measurement slice?
# for (i in seq_along(Mobs$MBS)){
# ind_tmp <-
# which(names(likelihood$TPR_formula) == names(Mobs$MBS)[i])
# if (!length(ind_tmp)) { # don't do regression if no regression formula is found:
# do_reg_TPR[[i]] <- FALSE
# } else{ # do regression if there is matched regression formula:
# do_reg_TPR[[i]] <-
# parse_nplcm_reg(stats::as.formula(likelihood$TPR_formula[[ind_tmp]]),data_nplcm,silent=silent)
# }
# }
#
# names(do_reg_TPR) <- names(Mobs$MBS)
#
# # if using silver-standard data:
# if ("MSS"%in% use_data_sources){
# for (i in length(Mobs$MBS)+seq_along(Mobs$MSS)){
# ind_tmp <-
# which(names(likelihood$TPR_formula) == names(Mobs$MSS)[i])
# if (!length(ind_tmp)) { # don't do regression if no regression formula is found:
# do_reg_TPR[[i]] <- FALSE
# } else{ # do regression if there is matched regression formula:
# do_reg_TPR[[i]] <-
# parse_nplcm_reg(stats::as.formula(likelihood$TPR_formula[[ind_tmp]]),data_nplcm,silent=silent)
# }
# }
# names(do_reg_TPR) <- c(names(Mobs$MBS),names(Mobs$MSS))
# }
# specify regression for etiology:
form_tmp <- stats::as.formula(likelihood$Eti_formula)
do_reg_Eti <- parse_nplcm_reg(form_tmp,data_nplcm,silent=silent)
is_discrete_Eti <- FALSE
if (!is.null(X)){ # <--- potential problem if a user input more data than needed. need fixing.
is_discrete_Eti <- (!stats::is.empty.model(form_tmp) &
!is_intercept_only(form_tmp) &
is_discrete(data.frame(X,Y)[Y==1,,drop=FALSE], form_tmp))
}
is_discrete_predictor <- list(is_discrete_Eti, is_discrete_FPR)
names(is_discrete_predictor)[1] <- "Eti"
names(is_discrete_predictor)[2] <- "FPR"
regression <- make_list(do_reg_Eti, do_reg_FPR,is_discrete_predictor)#, do_reg_TPR)
# check BrS group:
BrS_grp <- FALSE
prior_BrS <- model_options$prior$TPR_prior$BrS
GBrS_TPR <- length(unique(prior_BrS$grp))
grp_spec <- (!is.null(prior_BrS$grp) && GBrS_TPR >1 )
if (grp_spec) {
for (s in seq_along(prior_BrS$val)){
if (prior_BrS$input=="match_range" &&
(length(prior_BrS$val[[s]]$up)!=GBrS_TPR |
length(prior_BrS$val[[s]]$low)!=GBrS_TPR) ){
stop(paste0("==[baker] ",names(prior_BrS$val)[s])," needs ", GBrS_TPR,
" sets of sensitivity ranges.==")
}
}
BrS_grp <- TRUE
}
# check SS group:
SS_grp <- FALSE
prior_SS <- model_options$prior$TPR_prior$SS
grp_spec <- (!is.null(prior_SS$grp) && length(unique(prior_SS$grp)) >1 )
if (grp_spec) {SS_grp <- TRUE}
## <-------- the following are more strict grp specifications (may cause error when running old folders):
# val_spec <- (num_slice["MSS"]>0 && any(lapply(prior_SS$val,length)>1))
#
# if (grp_spec && val_spec){SS_grp <- TRUE}
# if (grp_spec && !val_spec){stop("==Specified TPR group in 'grp' of 'model_options$prior$TPR_prior$SS',
# but either there is no SS data or the length of 'val' does not match the no. of TPR groups. ==")}
# if (!grp_spec && val_spec){stop("==No 'grp' specified in 'model_options$prior$TPR_prior$SS',
# but we have >1 sets of TPRs. ==")}
# return results:
make_list(num_slice, nested, regression,BrS_grp,SS_grp)
}
|
#' @export hmoment
#' @importFrom stats embed
#' @importFrom utils data
#' @title Compute the hydrophobic moment of a protein sequence
#' @description This function compute the hmoment based on Eisenberg, D., Weiss, R. M., & Terwilliger, T. C. (1984). Hydriphobic moment is a quantitative measure of the amphiphilicity perpendicular to the axis of any periodic peptide structure, such as the a-helix or b-sheet. It can be calculated for an amino acid sequence of N residues and their associated hydrophobicities Hn.
#' @param seq An amino-acids sequence
#' @param angle A protein rotational angle (Suggested: a-helix = 100, b-sheet=160)
#' @param window A sequence fraction length
#' @return The computed maximal hydrophobic moment (uH) for a given amino-acids sequence
#' @references Eisenberg, D., Weiss, R. M., & Terwilliger, T. C. (1984). The hydrophobic moment detects periodicity in protein hydrophobicity. Proceedings of the National Academy of Sciences, 81(1), 140-144.
#' @details The hydrophobic moment was proposed by Eisenberg et al. (1982), as a quantitative measure of the amphiphilicity perpendicular to the axis of any periodic peptide structure. It is computed using the standardized Eisenberg (1984) scale, windows (fragment of sequence) of eleven amino acids (by default) and specifying the rotational angle at which it should be calculated.
#' @examples # COMPARED TO EMBOSS:HMOMENT
#' # http://emboss.bioinformatics.nl/cgi-bin/emboss/hmoment
#' # SEQUENCE: FLPVLAGLTPSIVPKLVCLLTKKC
#' # ALPHA-HELIX ANGLE=100 : 0.52
#' # BETA-SHEET ANGLE=160 : 0.271
#'
#' # ALPHA HELIX VALUE
#' hmoment(seq = "FLPVLAGLTPSIVPKLVCLLTKKC", angle = 100, window = 11)
#' # [1] 0.5199226
#'
#' # BETA SHEET VALUE
#' hmoment(seq = "FLPVLAGLTPSIVPKLVCLLTKKC", angle = 160, window = 11)
#' # [1] 0.2705906
#' @note This function was written by an anonymous reviewer of the RJournal
hmoment <- function(seq, angle = 100, window = 11) {
# Loading hydrophobicity scale
h <- AAdata$Hydrophobicity$Eisenberg
# Splitting the sequence in amino acids
aa <- aaCheck(seq)
# Setting the sequence length
pep <- lapply(aa, function(aa) {
stats::embed(aa, min(c(length(aa), window)))
})
# Applying the hmoment function to each amino acids window
hmoment <- lapply(pep, function(pep) {
angle <- angle * (pi / 180) * 1:min(c(length(pep), window))
vcos <- h[t(pep)] * cos(angle)
vsin <- h[t(pep)] * sin(angle)
dim(vcos) <- dim(vsin) <- dim(t(pep))
vcos <- colSums(vcos, na.rm = TRUE)
vsin <- colSums(vsin, na.rm = TRUE)
# Return the max value
max(sqrt(vsin * vsin + vcos * vcos) / min(c(length(pep), window)))
})
return(unlist(hmoment))
}
| /fuzzedpackages/Peptides/R/hmoment.R | no_license | akhikolla/testpackages | R | false | false | 2,732 | r | #' @export hmoment
#' @importFrom stats embed
#' @importFrom utils data
#' @title Compute the hydrophobic moment of a protein sequence
#' @description This function compute the hmoment based on Eisenberg, D., Weiss, R. M., & Terwilliger, T. C. (1984). Hydriphobic moment is a quantitative measure of the amphiphilicity perpendicular to the axis of any periodic peptide structure, such as the a-helix or b-sheet. It can be calculated for an amino acid sequence of N residues and their associated hydrophobicities Hn.
#' @param seq An amino-acids sequence
#' @param angle A protein rotational angle (Suggested: a-helix = 100, b-sheet=160)
#' @param window A sequence fraction length
#' @return The computed maximal hydrophobic moment (uH) for a given amino-acids sequence
#' @references Eisenberg, D., Weiss, R. M., & Terwilliger, T. C. (1984). The hydrophobic moment detects periodicity in protein hydrophobicity. Proceedings of the National Academy of Sciences, 81(1), 140-144.
#' @details The hydrophobic moment was proposed by Eisenberg et al. (1982), as a quantitative measure of the amphiphilicity perpendicular to the axis of any periodic peptide structure. It is computed using the standardized Eisenberg (1984) scale, windows (fragment of sequence) of eleven amino acids (by default) and specifying the rotational angle at which it should be calculated.
#' @examples # COMPARED TO EMBOSS:HMOMENT
#' # http://emboss.bioinformatics.nl/cgi-bin/emboss/hmoment
#' # SEQUENCE: FLPVLAGLTPSIVPKLVCLLTKKC
#' # ALPHA-HELIX ANGLE=100 : 0.52
#' # BETA-SHEET ANGLE=160 : 0.271
#'
#' # ALPHA HELIX VALUE
#' hmoment(seq = "FLPVLAGLTPSIVPKLVCLLTKKC", angle = 100, window = 11)
#' # [1] 0.5199226
#'
#' # BETA SHEET VALUE
#' hmoment(seq = "FLPVLAGLTPSIVPKLVCLLTKKC", angle = 160, window = 11)
#' # [1] 0.2705906
#' @note This function was written by an anonymous reviewer of the RJournal
hmoment <- function(seq, angle = 100, window = 11) {
# Loading hydrophobicity scale
h <- AAdata$Hydrophobicity$Eisenberg
# Splitting the sequence in amino acids
aa <- aaCheck(seq)
# Setting the sequence length
pep <- lapply(aa, function(aa) {
stats::embed(aa, min(c(length(aa), window)))
})
# Applying the hmoment function to each amino acids window
hmoment <- lapply(pep, function(pep) {
angle <- angle * (pi / 180) * 1:min(c(length(pep), window))
vcos <- h[t(pep)] * cos(angle)
vsin <- h[t(pep)] * sin(angle)
dim(vcos) <- dim(vsin) <- dim(t(pep))
vcos <- colSums(vcos, na.rm = TRUE)
vsin <- colSums(vsin, na.rm = TRUE)
# Return the max value
max(sqrt(vsin * vsin + vcos * vcos) / min(c(length(pep), window)))
})
return(unlist(hmoment))
}
|
GEE.var.wl <-
function(formula,id,family=gaussian,data,corstr="independence"){
#########################################################################
# Arguments:
# formula specify the model of interest
# family "gaussian", "binomial" or "poisson"
# data data frame
# corstr Working correlation structure: "independence", "AR-M", "exchangeable", "unstructured".
# value: GEE returns a list containing the following elements
# cov.beta estimate of robust variance for \hat{\beta}
# cov.var estimate of the variance-covariance matrix for robust variance.
#########################################################################
# Delete the records with missing data in predictors or outcomes;
if (is.null(data$id)){
index <- which(names(data)==id)
data$id <- data[,index]}
### na.action: only na.omit is used for gee;
init <- model.frame(formula, data)
init$num <- 1:length(init[,1])
if(any(is.na(init))){
index <- na.omit(init)$num
data <- data[index,]
### Get the design matrix;
m <- model.frame(formula, data)
mt <- attr(m, "terms")
data$response <- model.response(m, "numeric")
mat <- as.data.frame(model.matrix(formula, m))
}else{
### Get the design matrix;
m <- model.frame(formula, data)
mt <- attr(m, "terms")
data$response <- model.response(m, "numeric")
mat <- as.data.frame(model.matrix(formula, m))
}
### Fit the GEE model to get the estimate of parameters \hat{\beta};
gee.fit <- gee(formula,data=data,id=id,family=family,corstr=corstr)
beta_est <- gee.fit$coefficient
alpha <- gee.fit$working.correlation[1,2]
len <- length(beta_est)
len_vec <- len^2
### Estimate the robust variance for \hat{\beta}
data$id <- gee.fit$id
cluster<-cluster.size(data$id)
ncluster<-max(cluster$n)
size<-cluster$m
mat$subj <- rep(unique(data$id), cluster$n)
if(is.character(corstr)){
var <- switch(corstr,
"independence"=cormax.ind(ncluster),
"exchangeable"=cormax.exch(ncluster, alpha),
"AR-M"=cormax.ar1(ncluster, alpha),
"unstructured"=summary(gee.fit)$working.correlation,)
}else{
print(corstr)
stop("'working correlation structure' not recognized")
}
if(is.character(family)){
family <- switch(family,
"gaussian"="gaussian",
"binomial"="binomial",
"poisson"="poisson")
}else{
if(is.function(family)){
family <- family()[[1]]
}else{
print(family)
stop("'family' not recognized")
}
}
### Get the design matrix;
m <- model.frame(formula, data)
mat <- as.data.frame(model.matrix(formula, m))
mat$subj <- rep(unique(data$id), cluster$n)
cov.beta<-unstr<-matrix(0,nrow=len,ncol=len)
step01<-matrix(0, nrow=len, ncol=len)
for (i in 1:size){
y<-as.matrix(data$response[data$id==unique(data$id)[i]])
covariate<-as.matrix(subset(mat[,-length(mat[1,])], mat$subj==unique(data$id)[i]))
var_i=var[1:cluster$n[i],1:cluster$n[i]]
if (family=="gaussian"){
xx<-t(covariate)%*%solve(var_i)%*%covariate
step01<-step01+xx
}else if (family=="poisson"){
D<-mat.prod(covariate, exp(covariate%*%beta_est))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])
xx<-t(D)%*%solve(Vi)%*%D
step01<-step01+xx
}else if (family=="binomial"){
D<-mat.prod(covariate, exp(covariate%*%beta_est)/((1+exp(covariate%*%beta_est))^2))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])
xx<-t(D)%*%solve(Vi)%*%D
step01<-step01+xx
}
}
step<-matrix(0, nrow=cluster$n[i], ncol=cluster$n[i])
for (i in 1:size){
y<-as.matrix(data$response[data$id==unique(data$id)[i]])
covariate<-as.matrix(subset(mat[,-length(mat[1,])], mat$subj==unique(data$id)[i]))
var_i=var[1:cluster$n[i],1:cluster$n[i]]
if (family=="gaussian"){
resid<-solve(cormax.ind(cluster$n[i])-covariate%*%solve(step01)%*%t(covariate)%*%solve(var_i))%*%(y-covariate%*%beta_est)
step<-step+resid%*%t(resid)
}else if (family=="poisson"){
B<-matrix(0,nrow=cluster$n[i],ncol=cluster$n[i])
diag(B)<-1/sqrt(exp(covariate%*%beta_est))
D<-mat.prod(covariate, exp(covariate%*%beta_est))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])
resid<-B%*%solve(cormax.ind(cluster$n[i])-D%*%solve(step01)%*%t(D)%*%solve(Vi))%*%(y-exp(covariate%*%beta_est))
step<-step+resid%*%t(resid)
}else if (family=="binomial"){
B<-matrix(0,nrow=cluster$n[i],ncol=cluster$n[i])
diag(B)<-1/sqrt(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)
D<-mat.prod(covariate, exp(covariate%*%beta_est)/((1+exp(covariate%*%beta_est))^2))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])
resid<-B%*%solve(cormax.ind(cluster$n[i])-D%*%solve(step01)%*%t(D)%*%solve(Vi))%*%(y-exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est)))
step<-step+resid%*%t(resid)
}
}
unstr<-step/size
#diag(unstr)<-rep(1, cluster$n[i])
step11<-matrix(0,nrow=len,ncol=len)
step12<-matrix(0,nrow=len,ncol=len)
step13<-matrix(0,nrow=len_vec,ncol=1)
step14<-matrix(0,nrow=len_vec,ncol=len_vec)
p<-matrix(0,nrow=len_vec,ncol=size)
for (i in 1:size){
y<-as.matrix(data$response[data$id==unique(data$id)[i]])
covariate<-as.matrix(subset(mat[,-length(mat[1,])], mat$subj==unique(data$id)[i]))
var_i=var[1:cluster$n[i],1:cluster$n[i]]
if (family=="gaussian"){
xy<-t(covariate)%*%solve(var_i)%*%unstr%*%solve(var)%*%covariate
xx<-t(covariate)%*%solve(var_i)%*%covariate
step11<-step11+xx
step12<-step12+xy
step13<-step13+vec(xy)
p[,i]<-vec(xy)
}else if (family=="poisson"){
B<-matrix(0,nrow=cluster$n[i],ncol=cluster$n[i])
diag(B)<-exp(covariate%*%beta_est)
D<-mat.prod(covariate, exp(covariate%*%beta_est))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])
xy<-t(D)%*%solve(Vi)%*%sqrt(B)%*%unstr%*%sqrt(B)%*%solve(Vi)%*%D
xx<-t(D)%*%solve(Vi)%*%D
step11<-step11+xx
step12<-step12+xy
step13<-step13+vec(xy)
p[,i]<-vec(xy)
}else if (family=="binomial"){
B<-matrix(0,nrow=cluster$n[i],ncol=cluster$n[i])
diag(B)<-exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2
D<-mat.prod(covariate, exp(covariate%*%beta_est)/((1+exp(covariate%*%beta_est))^2))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])
xy<-t(D)%*%solve(Vi)%*%sqrt(B)%*%unstr%*%sqrt(B)%*%solve(Vi)%*%D
xx<-t(D)%*%solve(Vi)%*%D
step12<-step12+xy
step11<-step11+xx
step13<-step13+vec(xy)
p[,i]<-vec(xy)
}
}
for (i in 1:size){
dif<-(p[,i]-step13/size)%*%t(p[,i]-step13/size)
step14<-step14+dif
}
cov.beta<-solve(step11)%*%(step12)%*%solve(step11)
cov.var<-size/(size-1)*kronecker(solve(step11), solve(step11))%*%step14%*%kronecker(solve(step11), solve(step11))
return(list(cov.beta=diag(cov.beta), cov.var=cov.var))
}
| /R/GEE.var.wl.R | no_license | cran/geesmv | R | false | false | 7,759 | r | GEE.var.wl <-
function(formula,id,family=gaussian,data,corstr="independence"){
#########################################################################
# Arguments:
# formula specify the model of interest
# family "gaussian", "binomial" or "poisson"
# data data frame
# corstr Working correlation structure: "independence", "AR-M", "exchangeable", "unstructured".
# value: GEE returns a list containing the following elements
# cov.beta estimate of robust variance for \hat{\beta}
# cov.var estimate of the variance-covariance matrix for robust variance.
#########################################################################
# Delete the records with missing data in predictors or outcomes;
if (is.null(data$id)){
index <- which(names(data)==id)
data$id <- data[,index]}
### na.action: only na.omit is used for gee;
init <- model.frame(formula, data)
init$num <- 1:length(init[,1])
if(any(is.na(init))){
index <- na.omit(init)$num
data <- data[index,]
### Get the design matrix;
m <- model.frame(formula, data)
mt <- attr(m, "terms")
data$response <- model.response(m, "numeric")
mat <- as.data.frame(model.matrix(formula, m))
}else{
### Get the design matrix;
m <- model.frame(formula, data)
mt <- attr(m, "terms")
data$response <- model.response(m, "numeric")
mat <- as.data.frame(model.matrix(formula, m))
}
### Fit the GEE model to get the estimate of parameters \hat{\beta};
gee.fit <- gee(formula,data=data,id=id,family=family,corstr=corstr)
beta_est <- gee.fit$coefficient
alpha <- gee.fit$working.correlation[1,2]
len <- length(beta_est)
len_vec <- len^2
### Estimate the robust variance for \hat{\beta}
data$id <- gee.fit$id
cluster<-cluster.size(data$id)
ncluster<-max(cluster$n)
size<-cluster$m
mat$subj <- rep(unique(data$id), cluster$n)
if(is.character(corstr)){
var <- switch(corstr,
"independence"=cormax.ind(ncluster),
"exchangeable"=cormax.exch(ncluster, alpha),
"AR-M"=cormax.ar1(ncluster, alpha),
"unstructured"=summary(gee.fit)$working.correlation,)
}else{
print(corstr)
stop("'working correlation structure' not recognized")
}
if(is.character(family)){
family <- switch(family,
"gaussian"="gaussian",
"binomial"="binomial",
"poisson"="poisson")
}else{
if(is.function(family)){
family <- family()[[1]]
}else{
print(family)
stop("'family' not recognized")
}
}
### Get the design matrix;
m <- model.frame(formula, data)
mat <- as.data.frame(model.matrix(formula, m))
mat$subj <- rep(unique(data$id), cluster$n)
cov.beta<-unstr<-matrix(0,nrow=len,ncol=len)
step01<-matrix(0, nrow=len, ncol=len)
for (i in 1:size){
y<-as.matrix(data$response[data$id==unique(data$id)[i]])
covariate<-as.matrix(subset(mat[,-length(mat[1,])], mat$subj==unique(data$id)[i]))
var_i=var[1:cluster$n[i],1:cluster$n[i]]
if (family=="gaussian"){
xx<-t(covariate)%*%solve(var_i)%*%covariate
step01<-step01+xx
}else if (family=="poisson"){
D<-mat.prod(covariate, exp(covariate%*%beta_est))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])
xx<-t(D)%*%solve(Vi)%*%D
step01<-step01+xx
}else if (family=="binomial"){
D<-mat.prod(covariate, exp(covariate%*%beta_est)/((1+exp(covariate%*%beta_est))^2))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])
xx<-t(D)%*%solve(Vi)%*%D
step01<-step01+xx
}
}
step<-matrix(0, nrow=cluster$n[i], ncol=cluster$n[i])
for (i in 1:size){
y<-as.matrix(data$response[data$id==unique(data$id)[i]])
covariate<-as.matrix(subset(mat[,-length(mat[1,])], mat$subj==unique(data$id)[i]))
var_i=var[1:cluster$n[i],1:cluster$n[i]]
if (family=="gaussian"){
resid<-solve(cormax.ind(cluster$n[i])-covariate%*%solve(step01)%*%t(covariate)%*%solve(var_i))%*%(y-covariate%*%beta_est)
step<-step+resid%*%t(resid)
}else if (family=="poisson"){
B<-matrix(0,nrow=cluster$n[i],ncol=cluster$n[i])
diag(B)<-1/sqrt(exp(covariate%*%beta_est))
D<-mat.prod(covariate, exp(covariate%*%beta_est))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])
resid<-B%*%solve(cormax.ind(cluster$n[i])-D%*%solve(step01)%*%t(D)%*%solve(Vi))%*%(y-exp(covariate%*%beta_est))
step<-step+resid%*%t(resid)
}else if (family=="binomial"){
B<-matrix(0,nrow=cluster$n[i],ncol=cluster$n[i])
diag(B)<-1/sqrt(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)
D<-mat.prod(covariate, exp(covariate%*%beta_est)/((1+exp(covariate%*%beta_est))^2))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])
resid<-B%*%solve(cormax.ind(cluster$n[i])-D%*%solve(step01)%*%t(D)%*%solve(Vi))%*%(y-exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est)))
step<-step+resid%*%t(resid)
}
}
unstr<-step/size
#diag(unstr)<-rep(1, cluster$n[i])
step11<-matrix(0,nrow=len,ncol=len)
step12<-matrix(0,nrow=len,ncol=len)
step13<-matrix(0,nrow=len_vec,ncol=1)
step14<-matrix(0,nrow=len_vec,ncol=len_vec)
p<-matrix(0,nrow=len_vec,ncol=size)
for (i in 1:size){
y<-as.matrix(data$response[data$id==unique(data$id)[i]])
covariate<-as.matrix(subset(mat[,-length(mat[1,])], mat$subj==unique(data$id)[i]))
var_i=var[1:cluster$n[i],1:cluster$n[i]]
if (family=="gaussian"){
xy<-t(covariate)%*%solve(var_i)%*%unstr%*%solve(var)%*%covariate
xx<-t(covariate)%*%solve(var_i)%*%covariate
step11<-step11+xx
step12<-step12+xy
step13<-step13+vec(xy)
p[,i]<-vec(xy)
}else if (family=="poisson"){
B<-matrix(0,nrow=cluster$n[i],ncol=cluster$n[i])
diag(B)<-exp(covariate%*%beta_est)
D<-mat.prod(covariate, exp(covariate%*%beta_est))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est))),cluster$n[i])
xy<-t(D)%*%solve(Vi)%*%sqrt(B)%*%unstr%*%sqrt(B)%*%solve(Vi)%*%D
xx<-t(D)%*%solve(Vi)%*%D
step11<-step11+xx
step12<-step12+xy
step13<-step13+vec(xy)
p[,i]<-vec(xy)
}else if (family=="binomial"){
B<-matrix(0,nrow=cluster$n[i],ncol=cluster$n[i])
diag(B)<-exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2
D<-mat.prod(covariate, exp(covariate%*%beta_est)/((1+exp(covariate%*%beta_est))^2))
Vi <- diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])%*%var_i%*%diag(sqrt(c(exp(covariate%*%beta_est)/(1+exp(covariate%*%beta_est))^2)),cluster$n[i])
xy<-t(D)%*%solve(Vi)%*%sqrt(B)%*%unstr%*%sqrt(B)%*%solve(Vi)%*%D
xx<-t(D)%*%solve(Vi)%*%D
step12<-step12+xy
step11<-step11+xx
step13<-step13+vec(xy)
p[,i]<-vec(xy)
}
}
for (i in 1:size){
dif<-(p[,i]-step13/size)%*%t(p[,i]-step13/size)
step14<-step14+dif
}
cov.beta<-solve(step11)%*%(step12)%*%solve(step11)
cov.var<-size/(size-1)*kronecker(solve(step11), solve(step11))%*%step14%*%kronecker(solve(step11), solve(step11))
return(list(cov.beta=diag(cov.beta), cov.var=cov.var))
}
|
#' Function focuses on calculating and plotting the moving average of an observed time series.
#'
#'
#' Function returns either an overlapped or multi-paneled plot of the observed time series,
#' along with the plot for the moving average. The actual moving average values are also returned in a data.frame.
#'
#' The function uses the \href{https://github.com/deandevl/RplotterPkg}{RplotterPkg::multi_panel_grid}
#' package to draw the multi-paneled plot.
#'
#' If \code{display_plot} is TRUE then the plots will be displayed. If \code{display_plot} is FALSE then
#' the function returns a named list that includes a plot object which can be displayed from the console by entering:
#' \enumerate{
#' \item \code{grid::grid.newpage()}
#' \item \code{grid::grid.draw(plot object)}
#' }
#'
#' @param df A data frame with variables for times and corresponding values.
#' @param time_col Names the column from \code{df} for the time values. Values can
#' be numeric or Date/POSIXct.
#' @param value_col Names the value column from \code{df}.
#' @param window_n An integer that controls the backward window length of the moving average.
#' @param ma_type A string that sets the type of moving average. Accepted values are "sma" simple, "tri" triangular,
#' "wma" weighted, "exp" exponential, "mod" modified, and "spe" Spencer weighted 15 point average.
#' @param overlap A logical which if \code{TRUE} overlaps both the observed and the moving average series'. If \code{FALSE} the
#' plots are in separate panels.
#' @param title A string that sets the overall title to the plots.
#' @param ma_caption A string that sets the caption for the ma plot.
#' @param ob_caption A string that sets the caption for the observed plot.
#' @param x_title A string that defines the x axis title.
#' @param y_title A string that defines the y axis title.
#' @param x_limits A Date/POSIXct 2 element vector that sets the minimum and maximum for the x axis.
#' Use NA to refer to the existing minimum and maximum.
#' @param x_major_breaks A Date/POSIXct vector or function that defines the exact major tic locations along the x axis.
#' @param x_major_date_breaks For Date/POSIXct, a string containing the number and date unit for major breaks.
#' Examples: \code{"1 year"}, \code{"4 sec"}, \code{"3 month"}, \code{"2 week"}.
#' @param x_date_labels For Date/POSIXct, a string containing the format codes for the x axis date format.
#' This can be a strftime format for each x axis tic date label.
#' Examples: \code{"\%Y-\%m"}, \code{"\%Y/\%b/\%d"}, \code{"\%H-\%M-\%S"}.
#' @param y_limits A numeric 2 element vector that sets the minimum and maximum for the y axis.
#' The default is \code{c(1,10)}.
#' @param y_major_breaks A numeric vector or function that defines the exact major tic locations for the moving average y axis'.
#' @param show_pts A logical which if FALSE will plot only the lines.
#' @param show_major_grids A logical that controls the appearance of major grids.
#' @param show_minor_grids A logical that controls the appearance of minor grids.
#' @param show_observe A logical that controls the appearance of the observed time series.
#' @param col_width An numeric that sets the width of each plot in centimeters.
#' @param row_height A numeric that sets the height of each plot in centimeters.
#' @param display_plot A logical that if TRUE displays the plot.
#' @param png_file_path A character string with the directory and file name to produce
#' a png image of the plot.
#'
#' @return Returning a named list with:
#' \enumerate{
#' \item "ma_df" -- A data.frame/data.table with column variables for time "DateTime" and the moving average values "Value" and
#' source of the values "Source".
#' \item "plots" -- A multi-panelled/overlapped TableGrob object plotting the observed series and the moving averages.
#' Use \code{grid::grid.draw(plots)} to display the plots.
#' }
#'
#' @author Rick Dean
#'
#' @importFrom data.table data.table
#' @importFrom grid gpar
#' @importFrom grid unit
#' @importFrom grid grid.newpage
#' @importFrom grid grid.draw
#' @importFrom gtable gtable
#' @importFrom gtable gtable_add_grob
#' @importFrom rlang sym
#' @importFrom RplotterPkg create_scatter_plot
#' @importFrom RplotterPkg multi_panel_grid
#' @import ggplot2
#' @importFrom ggplot2 ggsave
#'
#' @export
graph_ma <- function(
df = NULL,
time_col = NULL,
value_col = NULL,
window_n = 4,
ma_type = "sma",
overlap = TRUE,
title = NULL,
ma_caption = NULL,
ob_caption = NULL,
x_title = NULL,
y_title = NULL,
x_limits = NULL,
x_major_breaks = waiver(),
x_major_date_breaks = waiver(),
x_date_labels = waiver(),
y_limits = NULL,
y_major_breaks = waiver(),
show_major_grids = TRUE,
show_minor_grids = TRUE,
show_pts = TRUE,
show_observe = TRUE,
col_width = 18,
row_height = 5,
display_plot = TRUE,
png_file_path = NULL
){
if(is.null(time_col) | is.null(value_col)) {
stop("Both time_col and value_col are required")
}
if(is.null(x_title)){
x_title <- time_col
}
if(is.null(y_title)){
y_title <- value_col
}
dates <- df[[time_col]]
values <- df[[value_col]]
row_heights = NULL
if(is.null(y_limits)){
y_limits <- c(min(values), max(values))
}
get_simple_ma <- function(values, window_n){
values_n <- length(values)
simple_ma <- numeric(values_n)
for (k in 1:(window_n-1)) {
simple_ma[k] <- mean(values[1:k])
}
for (k in window_n:values_n){
simple_ma[k] <- mean(values[(k - window_n + 1):k])
}
return(simple_ma)
}
if(ma_type == "sma"){
ma_name <- "Simple Moving Average"
ma = get_simple_ma(values = values, window_n = window_n)
}else if(ma_type == "tri"){
ma_name <- "Triangular Moving Average"
win_n <- ceiling((window_n + 1)/2)
ma_1 <- get_simple_ma(values = values, window_n = win_n)
ma <- get_simple_ma(values = ma_1, window_n = win_n)
}else if(ma_type == "wma"){
ma_name <- "Weighted Moving Average"
values_n <- length(values)
ma <- numeric(values_n)
for(k in 1:(window_n-1)) {
divisor <- (k * (k + 1)) / 2
ma[k] <- sum((k:1) * values[k:1]) / divisor
}
divisor <- (window_n * (window_n + 1)) / 2
for(k in window_n:values_n){
vec <- (window_n:1) * values[k:(k - window_n + 1)]
ma[k] <- sum(vec) / divisor
}
}else if(ma_type == "exp"){
ma_name <- "Exponential Moving Average"
values_n <- length(values)
ma <- numeric(values_n)
wt <- 2 / (window_n + 1)
ma[1] <- values[1]
for(k in 2:values_n) ma[k] <- wt * values[k] + (1 - wt) * ma[k-1]
}else if(ma_type == "mod"){
ma_name <- "Modified Moving Average"
values_n <- length(values)
ma <- numeric(values_n)
ma[1] <- values[1]
for(k in 2:values_n) ma[k] <- ma[k-1] + (values[k] - ma[k-1])/window_n
}else if(ma_type == "spe"){
ma_name <- "Spencer Moving Average"
values_n <- length(values) - 15
dates <- dates[1:values_n]
ma <- numeric(values_n)
weights <- c(-3, -6, -5, 3, 21, 46, 67, 74, 67, 46, 21, 3, -5, -6, -3)/320
ma <- numeric(values_n)
for(k in 1:values_n){
vals <- values[k:(k + 14)]
ma[k] <- sum(vals * weights, na.rm = TRUE)
}
}
ma_dt <- data.table(
datetime = dates,
value = ma
)
if(!show_observe){
row_heights <- row_height
a_plot <- RplotterPkg::create_scatter_plot(
df = ma_dt,
aes_x = "datetime",
aes_y = "value",
caption = ma_caption,
x_title = x_title,
y_title = y_title,
x_limits = x_limits,
x_major_breaks = x_major_breaks,
x_major_date_breaks = x_major_date_breaks,
x_date_labels = x_date_labels,
y_limits = y_limits,
y_major_breaks = y_major_breaks,
show_pts = show_pts,
show_major_grids = show_major_grids,
show_minor_grids = show_minor_grids,
connect = TRUE
)
if(!is.null(png_file_path)){
n_columns <- 1
n_rows <- 1
ggplot2::ggsave(
filename = png_file_path,
plot = a_plot,
device = "png",
width = col_width * n_columns * 1700,
height = row_height[1] * n_rows * 1700,
units = "px",
scale = .05,
dpi = 72
)
}
if(display_plot){
grid::grid.newpage()
grid::grid.draw(a_plot)
}else{
return(list(
ma_df = ma_dt,
plots = a_plot
))
}
}else if(!overlap){
row_heights <- c(row_height, row_height + .5)
#create a line plot of the observed series
obsv_plot <- RplotterPkg::create_scatter_plot(
df = df,
aes_x = time_col,
aes_y = value_col,
rot_y_tic_label = TRUE,
caption = ob_caption,
hide_x_tics = TRUE,
y_title = y_title,
x_limits = x_limits,
x_major_breaks = x_major_breaks,
x_major_date_breaks = x_major_date_breaks,
x_date_labels = x_date_labels,
y_limits = y_limits,
y_major_breaks = y_major_breaks,
show_pts = show_pts,
show_major_grids = show_major_grids,
show_minor_grids = show_minor_grids,
connect = TRUE
)
# create a line plot of the ma series
ma_plot <- RplotterPkg::create_scatter_plot(
df = ma_dt,
aes_x = "datetime",
aes_y = "value",
rot_y_tic_label = TRUE,
caption = ma_caption,
x_title = x_title,
y_title = y_title,
x_limits = x_limits,
x_major_breaks = x_major_breaks,
x_major_date_breaks = x_major_date_breaks,
x_date_labels = x_date_labels,
y_limits = y_limits,
y_major_breaks = y_major_breaks,
show_pts = show_pts,
show_major_grids = show_major_grids,
show_minor_grids = show_minor_grids,
connect = TRUE
)
# put both plots in a list to display them in separate panels
plots <- list(obsv_plot,ma_plot)
# display the plots in a multipanel
n_columns <- 1
cols <- c()
for(i in seq_along(plots)){
val <- i %% n_columns
if(val == 0){
cols <- c(cols, n_columns)
}else {
cols <- c(cols,val)
}
}
n_rows <- ceiling(length(plots)/n_columns)
rows <- c()
for(i in seq(1, n_rows, by = 1)){
for(ii in seq(1, n_columns, by = 1)){
rows <- c(rows, i)
}
}
multi_layout <- list(
plots = plots,
rows = rows,
cols = cols
)
multi_plot <- RplotterPkg::multi_panel_grid(
layout = multi_layout,
col_widths = rep(col_width, n_columns),
row_heights = row_heights,
title = title,
display_plot = FALSE
)
if(!is.null(png_file_path)){
ggplot2::ggsave(
filename = png_file_path,
plot = multi_plot,
device = "png",
width = col_width * n_columns * 1700,
height = row_height * n_rows * 1700,
units = "px",
scale = .05,
dpi = 72
)
}
if(display_plot){
grid::grid.newpage()
grid::grid.draw(multi_plot)
}else{
return(list(
ma_df = ma_dt,
plots = multi_plot
))
}
}else{
observe_dt <- data.table(
datetime = df[[time_col]],
value = df[[value_col]],
source = "Observed"
)
ma_dt[, source := ma_name]
plot_df <- rbind(observe_dt, ma_dt)
a_plot <- RplotterPkg::create_scatter_plot(
df = plot_df,
aes_x = "datetime",
aes_y = "value",
aes_color = "source",
subtitle = title,
x_title = x_title,
y_title = y_title,
x_limits = x_limits,
x_major_breaks = x_major_breaks,
x_major_date_breaks = x_major_date_breaks,
x_date_labels = x_date_labels,
y_limits = y_limits,
y_major_breaks = y_major_breaks,
show_pts = show_pts,
show_major_grids = show_major_grids,
show_minor_grids = show_minor_grids,
connect = TRUE
)
if(!is.null(png_file_path)){
n_columns <- 1
n_rows <- 1
ggplot2::ggsave(
filename = png_file_path,
plot = a_plot,
device = "png",
width = col_width * n_columns * 1700,
height = row_height * n_rows * 1700,
units = "px",
scale = .05,
dpi = 72
)
}
if(display_plot){
grid::grid.draw(a_plot)
}else{
return(list(
ma_df = ma_dt,
plots = a_plot
))
}
}
}
| /R/graph_ma.R | permissive | deandevl/RtsaPkg | R | false | false | 12,330 | r | #' Function focuses on calculating and plotting the moving average of an observed time series.
#'
#'
#' Function returns either an overlapped or multi-paneled plot of the observed time series,
#' along with the plot for the moving average. The actual moving average values are also returned in a data.frame.
#'
#' The function uses the \href{https://github.com/deandevl/RplotterPkg}{RplotterPkg::multi_panel_grid}
#' package to draw the multi-paneled plot.
#'
#' If \code{display_plot} is TRUE then the plots will be displayed. If \code{display_plot} is FALSE then
#' the function returns a named list that includes a plot object which can be displayed from the console by entering:
#' \enumerate{
#' \item \code{grid::grid.newpage()}
#' \item \code{grid::grid.draw(plot object)}
#' }
#'
#' @param df A data frame with variables for times and corresponding values.
#' @param time_col Names the column from \code{df} for the time values. Values can
#' be numeric or Date/POSIXct.
#' @param value_col Names the value column from \code{df}.
#' @param window_n An integer that controls the backward window length of the moving average.
#' @param ma_type A string that sets the type of moving average. Accepted values are "sma" simple, "tri" triangular,
#' "wma" weighted, "exp" exponential, "mod" modified, and "spe" Spencer weighted 15 point average.
#' @param overlap A logical which if \code{TRUE} overlaps both the observed and the moving average series'. If \code{FALSE} the
#' plots are in separate panels.
#' @param title A string that sets the overall title to the plots.
#' @param ma_caption A string that sets the caption for the ma plot.
#' @param ob_caption A string that sets the caption for the observed plot.
#' @param x_title A string that defines the x axis title.
#' @param y_title A string that defines the y axis title.
#' @param x_limits A Date/POSIXct 2 element vector that sets the minimum and maximum for the x axis.
#' Use NA to refer to the existing minimum and maximum.
#' @param x_major_breaks A Date/POSIXct vector or function that defines the exact major tic locations along the x axis.
#' @param x_major_date_breaks For Date/POSIXct, a string containing the number and date unit for major breaks.
#' Examples: \code{"1 year"}, \code{"4 sec"}, \code{"3 month"}, \code{"2 week"}.
#' @param x_date_labels For Date/POSIXct, a string containing the format codes for the x axis date format.
#' This can be a strftime format for each x axis tic date label.
#' Examples: \code{"\%Y-\%m"}, \code{"\%Y/\%b/\%d"}, \code{"\%H-\%M-\%S"}.
#' @param y_limits A numeric 2 element vector that sets the minimum and maximum for the y axis.
#' The default is \code{c(1,10)}.
#' @param y_major_breaks A numeric vector or function that defines the exact major tic locations for the moving average y axis'.
#' @param show_pts A logical which if FALSE will plot only the lines.
#' @param show_major_grids A logical that controls the appearance of major grids.
#' @param show_minor_grids A logical that controls the appearance of minor grids.
#' @param show_observe A logical that controls the appearance of the observed time series.
#' @param col_width An numeric that sets the width of each plot in centimeters.
#' @param row_height A numeric that sets the height of each plot in centimeters.
#' @param display_plot A logical that if TRUE displays the plot.
#' @param png_file_path A character string with the directory and file name to produce
#' a png image of the plot.
#'
#' @return Returning a named list with:
#' \enumerate{
#' \item "ma_df" -- A data.frame/data.table with column variables for time "DateTime" and the moving average values "Value" and
#' source of the values "Source".
#' \item "plots" -- A multi-panelled/overlapped TableGrob object plotting the observed series and the moving averages.
#' Use \code{grid::grid.draw(plots)} to display the plots.
#' }
#'
#' @author Rick Dean
#'
#' @importFrom data.table data.table
#' @importFrom grid gpar
#' @importFrom grid unit
#' @importFrom grid grid.newpage
#' @importFrom grid grid.draw
#' @importFrom gtable gtable
#' @importFrom gtable gtable_add_grob
#' @importFrom rlang sym
#' @importFrom RplotterPkg create_scatter_plot
#' @importFrom RplotterPkg multi_panel_grid
#' @import ggplot2
#' @importFrom ggplot2 ggsave
#'
#' @export
graph_ma <- function(
df = NULL,
time_col = NULL,
value_col = NULL,
window_n = 4,
ma_type = "sma",
overlap = TRUE,
title = NULL,
ma_caption = NULL,
ob_caption = NULL,
x_title = NULL,
y_title = NULL,
x_limits = NULL,
x_major_breaks = waiver(),
x_major_date_breaks = waiver(),
x_date_labels = waiver(),
y_limits = NULL,
y_major_breaks = waiver(),
show_major_grids = TRUE,
show_minor_grids = TRUE,
show_pts = TRUE,
show_observe = TRUE,
col_width = 18,
row_height = 5,
display_plot = TRUE,
png_file_path = NULL
){
if(is.null(time_col) | is.null(value_col)) {
stop("Both time_col and value_col are required")
}
if(is.null(x_title)){
x_title <- time_col
}
if(is.null(y_title)){
y_title <- value_col
}
dates <- df[[time_col]]
values <- df[[value_col]]
row_heights = NULL
if(is.null(y_limits)){
y_limits <- c(min(values), max(values))
}
get_simple_ma <- function(values, window_n){
values_n <- length(values)
simple_ma <- numeric(values_n)
for (k in 1:(window_n-1)) {
simple_ma[k] <- mean(values[1:k])
}
for (k in window_n:values_n){
simple_ma[k] <- mean(values[(k - window_n + 1):k])
}
return(simple_ma)
}
if(ma_type == "sma"){
ma_name <- "Simple Moving Average"
ma = get_simple_ma(values = values, window_n = window_n)
}else if(ma_type == "tri"){
ma_name <- "Triangular Moving Average"
win_n <- ceiling((window_n + 1)/2)
ma_1 <- get_simple_ma(values = values, window_n = win_n)
ma <- get_simple_ma(values = ma_1, window_n = win_n)
}else if(ma_type == "wma"){
ma_name <- "Weighted Moving Average"
values_n <- length(values)
ma <- numeric(values_n)
for(k in 1:(window_n-1)) {
divisor <- (k * (k + 1)) / 2
ma[k] <- sum((k:1) * values[k:1]) / divisor
}
divisor <- (window_n * (window_n + 1)) / 2
for(k in window_n:values_n){
vec <- (window_n:1) * values[k:(k - window_n + 1)]
ma[k] <- sum(vec) / divisor
}
}else if(ma_type == "exp"){
ma_name <- "Exponential Moving Average"
values_n <- length(values)
ma <- numeric(values_n)
wt <- 2 / (window_n + 1)
ma[1] <- values[1]
for(k in 2:values_n) ma[k] <- wt * values[k] + (1 - wt) * ma[k-1]
}else if(ma_type == "mod"){
ma_name <- "Modified Moving Average"
values_n <- length(values)
ma <- numeric(values_n)
ma[1] <- values[1]
for(k in 2:values_n) ma[k] <- ma[k-1] + (values[k] - ma[k-1])/window_n
}else if(ma_type == "spe"){
ma_name <- "Spencer Moving Average"
values_n <- length(values) - 15
dates <- dates[1:values_n]
ma <- numeric(values_n)
weights <- c(-3, -6, -5, 3, 21, 46, 67, 74, 67, 46, 21, 3, -5, -6, -3)/320
ma <- numeric(values_n)
for(k in 1:values_n){
vals <- values[k:(k + 14)]
ma[k] <- sum(vals * weights, na.rm = TRUE)
}
}
ma_dt <- data.table(
datetime = dates,
value = ma
)
if(!show_observe){
row_heights <- row_height
a_plot <- RplotterPkg::create_scatter_plot(
df = ma_dt,
aes_x = "datetime",
aes_y = "value",
caption = ma_caption,
x_title = x_title,
y_title = y_title,
x_limits = x_limits,
x_major_breaks = x_major_breaks,
x_major_date_breaks = x_major_date_breaks,
x_date_labels = x_date_labels,
y_limits = y_limits,
y_major_breaks = y_major_breaks,
show_pts = show_pts,
show_major_grids = show_major_grids,
show_minor_grids = show_minor_grids,
connect = TRUE
)
if(!is.null(png_file_path)){
n_columns <- 1
n_rows <- 1
ggplot2::ggsave(
filename = png_file_path,
plot = a_plot,
device = "png",
width = col_width * n_columns * 1700,
height = row_height[1] * n_rows * 1700,
units = "px",
scale = .05,
dpi = 72
)
}
if(display_plot){
grid::grid.newpage()
grid::grid.draw(a_plot)
}else{
return(list(
ma_df = ma_dt,
plots = a_plot
))
}
}else if(!overlap){
row_heights <- c(row_height, row_height + .5)
#create a line plot of the observed series
obsv_plot <- RplotterPkg::create_scatter_plot(
df = df,
aes_x = time_col,
aes_y = value_col,
rot_y_tic_label = TRUE,
caption = ob_caption,
hide_x_tics = TRUE,
y_title = y_title,
x_limits = x_limits,
x_major_breaks = x_major_breaks,
x_major_date_breaks = x_major_date_breaks,
x_date_labels = x_date_labels,
y_limits = y_limits,
y_major_breaks = y_major_breaks,
show_pts = show_pts,
show_major_grids = show_major_grids,
show_minor_grids = show_minor_grids,
connect = TRUE
)
# create a line plot of the ma series
ma_plot <- RplotterPkg::create_scatter_plot(
df = ma_dt,
aes_x = "datetime",
aes_y = "value",
rot_y_tic_label = TRUE,
caption = ma_caption,
x_title = x_title,
y_title = y_title,
x_limits = x_limits,
x_major_breaks = x_major_breaks,
x_major_date_breaks = x_major_date_breaks,
x_date_labels = x_date_labels,
y_limits = y_limits,
y_major_breaks = y_major_breaks,
show_pts = show_pts,
show_major_grids = show_major_grids,
show_minor_grids = show_minor_grids,
connect = TRUE
)
# put both plots in a list to display them in separate panels
plots <- list(obsv_plot,ma_plot)
# display the plots in a multipanel
n_columns <- 1
cols <- c()
for(i in seq_along(plots)){
val <- i %% n_columns
if(val == 0){
cols <- c(cols, n_columns)
}else {
cols <- c(cols,val)
}
}
n_rows <- ceiling(length(plots)/n_columns)
rows <- c()
for(i in seq(1, n_rows, by = 1)){
for(ii in seq(1, n_columns, by = 1)){
rows <- c(rows, i)
}
}
multi_layout <- list(
plots = plots,
rows = rows,
cols = cols
)
multi_plot <- RplotterPkg::multi_panel_grid(
layout = multi_layout,
col_widths = rep(col_width, n_columns),
row_heights = row_heights,
title = title,
display_plot = FALSE
)
if(!is.null(png_file_path)){
ggplot2::ggsave(
filename = png_file_path,
plot = multi_plot,
device = "png",
width = col_width * n_columns * 1700,
height = row_height * n_rows * 1700,
units = "px",
scale = .05,
dpi = 72
)
}
if(display_plot){
grid::grid.newpage()
grid::grid.draw(multi_plot)
}else{
return(list(
ma_df = ma_dt,
plots = multi_plot
))
}
}else{
observe_dt <- data.table(
datetime = df[[time_col]],
value = df[[value_col]],
source = "Observed"
)
ma_dt[, source := ma_name]
plot_df <- rbind(observe_dt, ma_dt)
a_plot <- RplotterPkg::create_scatter_plot(
df = plot_df,
aes_x = "datetime",
aes_y = "value",
aes_color = "source",
subtitle = title,
x_title = x_title,
y_title = y_title,
x_limits = x_limits,
x_major_breaks = x_major_breaks,
x_major_date_breaks = x_major_date_breaks,
x_date_labels = x_date_labels,
y_limits = y_limits,
y_major_breaks = y_major_breaks,
show_pts = show_pts,
show_major_grids = show_major_grids,
show_minor_grids = show_minor_grids,
connect = TRUE
)
if(!is.null(png_file_path)){
n_columns <- 1
n_rows <- 1
ggplot2::ggsave(
filename = png_file_path,
plot = a_plot,
device = "png",
width = col_width * n_columns * 1700,
height = row_height * n_rows * 1700,
units = "px",
scale = .05,
dpi = 72
)
}
if(display_plot){
grid::grid.draw(a_plot)
}else{
return(list(
ma_df = ma_dt,
plots = a_plot
))
}
}
}
|
systemic.par$mar <<- c(0, 0, 0, 0)
plotPeriodogram <- function(f) {
fn <- basename(f)
plotfn <- str_c('../app/www/img/periodograms/', fn, '.png')
k <- knew()
kload.system(k, f)
d <- kperiodogram(k)
if (! any(is.nan(d[,2]))) {
plot(d)
quartz.save(plotfn, dpi=200,
width=6, height=4)
}
return(d)
}
| /R/periodogram.r | permissive | stefano-meschiari/rvdb | R | false | false | 344 | r | systemic.par$mar <<- c(0, 0, 0, 0)
plotPeriodogram <- function(f) {
fn <- basename(f)
plotfn <- str_c('../app/www/img/periodograms/', fn, '.png')
k <- knew()
kload.system(k, f)
d <- kperiodogram(k)
if (! any(is.nan(d[,2]))) {
plot(d)
quartz.save(plotfn, dpi=200,
width=6, height=4)
}
return(d)
}
|
#######################################
#
#Scripts to generate length and age comps for the lingcod_2021 stock assessment from
#the hook and line survey.
#
#Generates two comps: 1) females then males, and 2) unsexed fish
#
#Author: Brian Langseth
#Created: March 2, 2021
#
#######################################
##There are a few remaining tasks
1. Need to define length bins (currently set to min/max of each dataset with bin size of 2)
2. Will need to specify survey timing, sex (currently 3), fleet number, partition, etc....
#devtools::install_github("nwfsc-assess/nwfscSurvey", build_vignettes = TRUE)
library(nwfscSurvey)
#vignette("nwfscSurvey")
##------------------------------------Scripts----------------------------------------##
#Working directory where files will be saved
setwd("U:/Stock assessments/lingcod_2021/surveys")
#John Harms provided data in email on Feb 9, 2021.
hnl_full = read.csv("qryGrandUnifiedThru2019_For2021Assessments_DWarehouse version_01072021.csv", header = TRUE)
hnl = hnl_full[hnl_full$common_name == "Lingcod",]
#No ages in this dataset
table(hnl$age_years, useNA = "always")
#Laurel Lam provided ages on March 22, 2021 from 2017-2019 for hook and line survey
hnl_ages = read.csv("H&L_Lingcod_ages.csv", header = TRUE)
#Create subfolder in length and age directory
if(!dir.exists(file.path("lengths","HooknLine"))) dir.create(file.path("lengths","HooknLine"))
if(!dir.exists(file.path("ages","HooknLine"))) dir.create(file.path("ages","HooknLine"))
############################################################################################
# Comps - all for southern model
############################################################################################
#------------------------Length-----------------------------#
lrange = range(hnl$length_cm, na.rm = TRUE)
lbins = seq(from = lrange[1], to = lrange[2], by = 2)
#Rename fields so they work with UnexpandedLFs.fn
hnl$Length_cm = hnl$length_cm
hnl$Sex = hnl$sex
hnl$Year = hnl$year
#Generate Comps
lfs = UnexpandedLFs.fn(dir = file.path("lengths"), #puts into "forSS" folder in this location
datL = hnl, lgthBins = lbins, printfolder = "HooknLine",
sex = 3, partition = 0, fleet = 1, month = 1)
file.rename(from = file.path("lengths", "HooknLine", paste0("Survey_notExpanded_Length_comp_Sex_3_bin=", min(lbins), "-", max(lbins), ".csv")),
to = file.path("lengths", "HooknLine", paste0("HNL_notExpanded_Length_comp_Sex_3_bin=", min(lbins), "-", max(lbins), ".csv")))
file.rename(from = file.path("lengths", "HooknLine", paste0("Survey_notExpanded_Length_comp_Sex_0_bin=", min(lbins), "-", max(lbins), ".csv")),
to = file.path("lengths", "HooknLine", paste0("HNL_notExpanded_Length_comp_Sex_0_bin=", min(lbins), "-", max(lbins), ".csv")))
#Visualize
PlotFreqData.fn(dir = file.path("lengths", "HooknLine"),
dat = lfs$comps, ylim=c(0, max(lbins)+4),
main = "HNL lengths Male-Female", yaxs="i", ylab="Length (cm)", dopng = TRUE)
PlotFreqData.fn(dir = file.path("lengths", "HooknLine"),
dat = lfs$comps_u, ylim=c(0, max(lbins)+4),
main = "HNL lengths Unsexed", yaxs="i", ylab="Length (cm)", dopng = TRUE)
PlotSexRatio.fn(dir = file.path("lengths", "HooknLine"),
dat = hnl, ylim = c(-0.1, 1.1), main = "HNL Sex Ratio", yaxs="i", dopng = TRUE)
age_representativeness_plot(hnl)
#------------------------Age-----------------------------#
#Marginal comps
arange = range(hnl_ages$Final.Age, na.rm = TRUE)
abins = seq(from = arange[1], to = arange[2], by = 1)
#Set up required variable names
hnl_ages$Age = hnl_ages$Final.Age
hnl_ages$Length_cm = hnl_ages$FishLength
hnl_ages$Sex = hnl_ages$FishGenderCode
hnl_ages$Year = hnl_ages$SurveyYear
afs = UnexpandedAFs.fn(dir = file.path("ages"), #Somehow stills prints to "forSS"
datA = hnl_ages, ageBins = abins, printfolder = "HooknLine",
sex = 3, partition = 0, fleet = 1, month = 1, ageErr = 1)
file.rename(from = file.path("ages", "forSS", "Survey_notExpanded_Age_comp_Sex_3_bin=1-12.csv"),
to= file.path("ages", "HooknLine", "south_Survey_Sex3_Bins_1_12_AgeComps.csv"))
if(dir.exists(file.path("ages","forSS"))) unlink(file.path("ages","forSS"),recursive = TRUE) #remove forSS file
PlotFreqData.fn(dir = file.path(getwd(), "ages", "HooknLine"), dat = afs$comps, ylim=c(0, max(abins) + 1), inch = 0.10, main = "Hook and Line", yaxs="i", ylab="Age", dopng = TRUE)
PlotSexRatio.fn(dir = file.path(getwd(), "ages", "HooknLine"), dat = hnl_ages, data.type = "age", dopng = TRUE, main = "Hook and Line")
#Currently not doing CAAL
| /data-raw/survey_comps_HNL.R | no_license | aliwhitman/Lingcod_2021 | R | false | false | 4,714 | r | #######################################
#
#Scripts to generate length and age comps for the lingcod_2021 stock assessment from
#the hook and line survey.
#
#Generates two comps: 1) females then males, and 2) unsexed fish
#
#Author: Brian Langseth
#Created: March 2, 2021
#
#######################################
##There are a few remaining tasks
1. Need to define length bins (currently set to min/max of each dataset with bin size of 2)
2. Will need to specify survey timing, sex (currently 3), fleet number, partition, etc....
#devtools::install_github("nwfsc-assess/nwfscSurvey", build_vignettes = TRUE)
library(nwfscSurvey)
#vignette("nwfscSurvey")
##------------------------------------Scripts----------------------------------------##
#Working directory where files will be saved
setwd("U:/Stock assessments/lingcod_2021/surveys")
#John Harms provided data in email on Feb 9, 2021.
hnl_full = read.csv("qryGrandUnifiedThru2019_For2021Assessments_DWarehouse version_01072021.csv", header = TRUE)
hnl = hnl_full[hnl_full$common_name == "Lingcod",]
#No ages in this dataset
table(hnl$age_years, useNA = "always")
#Laurel Lam provided ages on March 22, 2021 from 2017-2019 for hook and line survey
hnl_ages = read.csv("H&L_Lingcod_ages.csv", header = TRUE)
#Create subfolder in length and age directory
if(!dir.exists(file.path("lengths","HooknLine"))) dir.create(file.path("lengths","HooknLine"))
if(!dir.exists(file.path("ages","HooknLine"))) dir.create(file.path("ages","HooknLine"))
############################################################################################
# Comps - all for southern model
############################################################################################
#------------------------Length-----------------------------#
lrange = range(hnl$length_cm, na.rm = TRUE)
lbins = seq(from = lrange[1], to = lrange[2], by = 2)
#Rename fields so they work with UnexpandedLFs.fn
hnl$Length_cm = hnl$length_cm
hnl$Sex = hnl$sex
hnl$Year = hnl$year
#Generate Comps
lfs = UnexpandedLFs.fn(dir = file.path("lengths"), #puts into "forSS" folder in this location
datL = hnl, lgthBins = lbins, printfolder = "HooknLine",
sex = 3, partition = 0, fleet = 1, month = 1)
file.rename(from = file.path("lengths", "HooknLine", paste0("Survey_notExpanded_Length_comp_Sex_3_bin=", min(lbins), "-", max(lbins), ".csv")),
to = file.path("lengths", "HooknLine", paste0("HNL_notExpanded_Length_comp_Sex_3_bin=", min(lbins), "-", max(lbins), ".csv")))
file.rename(from = file.path("lengths", "HooknLine", paste0("Survey_notExpanded_Length_comp_Sex_0_bin=", min(lbins), "-", max(lbins), ".csv")),
to = file.path("lengths", "HooknLine", paste0("HNL_notExpanded_Length_comp_Sex_0_bin=", min(lbins), "-", max(lbins), ".csv")))
#Visualize
PlotFreqData.fn(dir = file.path("lengths", "HooknLine"),
dat = lfs$comps, ylim=c(0, max(lbins)+4),
main = "HNL lengths Male-Female", yaxs="i", ylab="Length (cm)", dopng = TRUE)
PlotFreqData.fn(dir = file.path("lengths", "HooknLine"),
dat = lfs$comps_u, ylim=c(0, max(lbins)+4),
main = "HNL lengths Unsexed", yaxs="i", ylab="Length (cm)", dopng = TRUE)
PlotSexRatio.fn(dir = file.path("lengths", "HooknLine"),
dat = hnl, ylim = c(-0.1, 1.1), main = "HNL Sex Ratio", yaxs="i", dopng = TRUE)
age_representativeness_plot(hnl)
#------------------------Age-----------------------------#
#Marginal comps
arange = range(hnl_ages$Final.Age, na.rm = TRUE)
abins = seq(from = arange[1], to = arange[2], by = 1)
#Set up required variable names
hnl_ages$Age = hnl_ages$Final.Age
hnl_ages$Length_cm = hnl_ages$FishLength
hnl_ages$Sex = hnl_ages$FishGenderCode
hnl_ages$Year = hnl_ages$SurveyYear
afs = UnexpandedAFs.fn(dir = file.path("ages"), #Somehow stills prints to "forSS"
datA = hnl_ages, ageBins = abins, printfolder = "HooknLine",
sex = 3, partition = 0, fleet = 1, month = 1, ageErr = 1)
file.rename(from = file.path("ages", "forSS", "Survey_notExpanded_Age_comp_Sex_3_bin=1-12.csv"),
to= file.path("ages", "HooknLine", "south_Survey_Sex3_Bins_1_12_AgeComps.csv"))
if(dir.exists(file.path("ages","forSS"))) unlink(file.path("ages","forSS"),recursive = TRUE) #remove forSS file
PlotFreqData.fn(dir = file.path(getwd(), "ages", "HooknLine"), dat = afs$comps, ylim=c(0, max(abins) + 1), inch = 0.10, main = "Hook and Line", yaxs="i", ylab="Age", dopng = TRUE)
PlotSexRatio.fn(dir = file.path(getwd(), "ages", "HooknLine"), dat = hnl_ages, data.type = "age", dopng = TRUE, main = "Hook and Line")
#Currently not doing CAAL
|
data<- read.table("household_power_consumption.txt", sep=";", skip=66637, nrows = 2880)
data$V1<-paste(data$V1, data$V2)
data$V1<- strptime(data$V1, format = '%d/%m/%Y %H:%M:%S')
png(filename='plot3.png', width = 480, height = 480, units = 'px')
plot(data$V1, data$V7, type='l', xlab="", ylab="Energy sub metering")
lines(data$V1, data$V8, col = 'red')
lines(data$V1, data$V9, col = 'blue')
legend('topright', c('Sub_metering_1','Sub_metering_2', 'Sub_metering_3'), lty = 1,col=c('black','red','blue'))
dev.off() | /plot3.R | no_license | redrinkwater/ExData_Plotting1 | R | false | false | 515 | r | data<- read.table("household_power_consumption.txt", sep=";", skip=66637, nrows = 2880)
data$V1<-paste(data$V1, data$V2)
data$V1<- strptime(data$V1, format = '%d/%m/%Y %H:%M:%S')
png(filename='plot3.png', width = 480, height = 480, units = 'px')
plot(data$V1, data$V7, type='l', xlab="", ylab="Energy sub metering")
lines(data$V1, data$V8, col = 'red')
lines(data$V1, data$V9, col = 'blue')
legend('topright', c('Sub_metering_1','Sub_metering_2', 'Sub_metering_3'), lty = 1,col=c('black','red','blue'))
dev.off() |
NC13_RU3_Huddling_Matrix <- read.csv("~/Dropbox/Research/SNH_health profile data for Fushing-selected/NC13_RU3_Huddling_Matrix.csv")
NC13HuddleR3=as.matrix(NC13_RU3_Huddling_Matrix[,-1])
colnames(NC13HuddleR3)=NC13_RU3_Huddling_Matrix[,1]
rownames(NC13HuddleR3)=NC13_RU3_Huddling_Matrix[,1]
##############
HuddleR3=matrix(0,nrow(NC13HuddleR3),ncol(NC13HuddleR3))
for (i in 1:ncol(NC13HuddleR3)){
for (j in 1:ncol(NC13HuddleR3)){
HuddleR3[i,j]=NC13HuddleR3[i,j]+NC13HuddleR3[j,i]
}
}
NC13HuddleR3=HuddleR3
temp=c(0.2,0.3,3,100)
Ens.huddle3=Eigen.plot2(temp, selected.id=c(1,2,3,4),NC13HuddleR3)
DCG.huddle3=DCGtree.plot(num.clusters.selected=c(2,2,16,18),
"NC13HuddleR3 tree",Ens.huddle3,temp)
########
heatmap.2(NC13HuddleR3,Rowv=as.dendrogram(DCG.huddle3),Colv=as.dendrogram(DCG.huddle3),
trace="none",col =colorRampPalette(c("white","green","green4","violet","purple"))(100))
heatmap.2(NC13HuddleR3,col =colorRampPalette(c("white","green","green4","violet","purple"))(100),
trace="none")
#########
#double check the temperature selection
temp=c(0.1,0.2,0.3,0.5,0.6,0.8,1,100)
Ens.h3=Eigen.plot2(temp, selected.id=c(1,2,3,4,5,6,7,8),Eheat3)
DCG.h3=DCGtree.plot(num.clusters.selected=c(2,5,10,10,19,20,22,24),
"NC13HuddleR1 tree",Ens.h3,temp)
########
#############
library(sparcl)
# colors the leaves of a dendrogram
y3 = cutree(DCG.huddle3, 28)
y32=cutree(DCG.huddle3,3)
ColorDendrogram(DCG.huddle3, y = y3+1, main = "NC13HuddleR3 tree", xlab="",
branchlength = 5)
#####################
#####################
Eheat3=NC13HuddleR3
small3=which(NC13HuddleR3<5,arr.ind=TRUE)
Eheat3[small3]=NC13HuddleR3[small3]/5
Eheat3[which(NC13HuddleR3>=5,arr.ind=TRUE)]=1
#######
temp=c(0.2,0.3,2,1000)
Ens.heat3=Eigen.plot2(temp, selected.id=c(1,2,3,4),Eheat3)
DCG.heat3=DCGtree.plot(num.clusters.selected=c(2,2,20,23),
"NC13HuddleR1 tree",Ens.heat3,temp)
########
y3 = cutree(DCG.heat3, 24)
y32=cutree(DCG.heat3,3)
ColorDendrogram(DCG.heat3, y = y3+1, main = "NC13HuddleR3 tree", xlab="",
branchlength = 5)
#####################
#visualize it against baseline
ColorDendrogram(DCG.huddle3, y = y1[-c(37,90)]+1, main = "NC13HuddleR3 tree", xlab="",
branchlength = 5) | /code/NC13HuddleR3.r | no_license | guanjiahui/Social-Network_rhesus-macaques | R | false | false | 2,323 | r | NC13_RU3_Huddling_Matrix <- read.csv("~/Dropbox/Research/SNH_health profile data for Fushing-selected/NC13_RU3_Huddling_Matrix.csv")
NC13HuddleR3=as.matrix(NC13_RU3_Huddling_Matrix[,-1])
colnames(NC13HuddleR3)=NC13_RU3_Huddling_Matrix[,1]
rownames(NC13HuddleR3)=NC13_RU3_Huddling_Matrix[,1]
##############
HuddleR3=matrix(0,nrow(NC13HuddleR3),ncol(NC13HuddleR3))
for (i in 1:ncol(NC13HuddleR3)){
for (j in 1:ncol(NC13HuddleR3)){
HuddleR3[i,j]=NC13HuddleR3[i,j]+NC13HuddleR3[j,i]
}
}
NC13HuddleR3=HuddleR3
temp=c(0.2,0.3,3,100)
Ens.huddle3=Eigen.plot2(temp, selected.id=c(1,2,3,4),NC13HuddleR3)
DCG.huddle3=DCGtree.plot(num.clusters.selected=c(2,2,16,18),
"NC13HuddleR3 tree",Ens.huddle3,temp)
########
heatmap.2(NC13HuddleR3,Rowv=as.dendrogram(DCG.huddle3),Colv=as.dendrogram(DCG.huddle3),
trace="none",col =colorRampPalette(c("white","green","green4","violet","purple"))(100))
heatmap.2(NC13HuddleR3,col =colorRampPalette(c("white","green","green4","violet","purple"))(100),
trace="none")
#########
#double check the temperature selection
temp=c(0.1,0.2,0.3,0.5,0.6,0.8,1,100)
Ens.h3=Eigen.plot2(temp, selected.id=c(1,2,3,4,5,6,7,8),Eheat3)
DCG.h3=DCGtree.plot(num.clusters.selected=c(2,5,10,10,19,20,22,24),
"NC13HuddleR1 tree",Ens.h3,temp)
########
#############
library(sparcl)
# colors the leaves of a dendrogram
y3 = cutree(DCG.huddle3, 28)
y32=cutree(DCG.huddle3,3)
ColorDendrogram(DCG.huddle3, y = y3+1, main = "NC13HuddleR3 tree", xlab="",
branchlength = 5)
#####################
#####################
Eheat3=NC13HuddleR3
small3=which(NC13HuddleR3<5,arr.ind=TRUE)
Eheat3[small3]=NC13HuddleR3[small3]/5
Eheat3[which(NC13HuddleR3>=5,arr.ind=TRUE)]=1
#######
temp=c(0.2,0.3,2,1000)
Ens.heat3=Eigen.plot2(temp, selected.id=c(1,2,3,4),Eheat3)
DCG.heat3=DCGtree.plot(num.clusters.selected=c(2,2,20,23),
"NC13HuddleR1 tree",Ens.heat3,temp)
########
y3 = cutree(DCG.heat3, 24)
y32=cutree(DCG.heat3,3)
ColorDendrogram(DCG.heat3, y = y3+1, main = "NC13HuddleR3 tree", xlab="",
branchlength = 5)
#####################
#visualize it against baseline
ColorDendrogram(DCG.huddle3, y = y1[-c(37,90)]+1, main = "NC13HuddleR3 tree", xlab="",
branchlength = 5) |
\name{GenomeAxisTrack-class}
\Rdversion{1.1}
\docType{class}
\alias{GenomeAxisTrack-class}
\alias{GenomeAxisTrack}
\alias{[,GenomeAxisTrack-method}
\alias{[,GenomeAxisTrack,ANY,ANY-method}
\alias{collapseTrack,GenomeAxisTrack-method}
\alias{drawGD,GenomeAxisTrack-method}
\alias{end,GenomeAxisTrack-method}
\alias{end<-,GenomeAxisTrack-method}
\alias{initialize,GenomeAxisTrack-method}
\alias{length,GenomeAxisTrack-method}
\alias{range,GenomeAxisTrack-method}
\alias{ranges,GenomeAxisTrack-method}
\alias{ranges<-,GenomeAxisTrack-method}
\alias{show,GenomeAxisTrack-method}
\alias{start,GenomeAxisTrack-method}
\alias{start<-,GenomeAxisTrack-method}
\alias{strand,GenomeAxisTrack-method}
\alias{subset,GenomeAxisTrack-method}
\alias{values,GenomeAxisTrack-method}
\alias{width,GenomeAxisTrack-method}
\title{GenomeAxisTrack class and methods}
\description{A class representing a customizable genomic axis.}
\section{Objects from the class}{
Objects can be created using the constructor function
\code{GenomeAxisTrack}.
}
\usage{
GenomeAxisTrack(range=NULL, name="Axis", id, ...)
}
\arguments{
\item{range}{Optional \code{\linkS4class{GRanges}} or
\code{\linkS4class{IRanges}} object to highlight certain regions on
the axis.}
\item{name}{Character scalar of the track's name used in the title
panel when plotting.}
\item{id}{A character vector of the same length as \code{range}
containing identifiers for the ranges. If missing, the constructor
will try to extract the ids from \code{names(range)}.}
\item{\dots}{Additional items which will all be interpreted as further
display parameters. See \code{\link{settings}} and the "Display
Parameters" section below for details.}
}
\details{
A \code{GenomeAxisTrack} can be customized using the familiar display
parameters. By providing a \code{GRanges} or \code{IRanges} object to
the constructor, ranges on the axis can be further highlighted.\\
With the \code{scale} display parameter, a small scale indicator can
be shown instead of the entire genomic axis. The scale can either be
provided as a fraction of the plotting region (it will be rounded to
the nearest human readable absolute value) or as an absolute value and
is always displayed in bp, kb, mb or gb units. Note that most display
parameters for the \code{GenomeAxisTrack} are ignored when a scale is
used insterad of the full axis. In particular, only the parameters
\code{exponent}, \code{alpha}, \code{lwd}, \code{col}, \code{cex},
\code{distFromAxis} and \code{labelPos} are used.
}
\value{
The return value of the constructor function is a new object of class
\code{GenomeAxisTrack}.
}
\section{Slots}{
\describe{
\item{\code{range}:}{Object of class \code{\linkS4class{GRanges}},
highlighted on the axis. }
\item{\code{dp}:}{Object of class
\code{\linkS4class{DisplayPars}}, inherited from class
\code{\linkS4class{GdObject}}}
\item{\code{name}:}{Object of class \code{"character"}, inherited
from class \code{\linkS4class{GdObject}}}
\item{\code{imageMap}:}{Object of class
\code{\linkS4class{ImageMap}}, inherited from class
\code{\linkS4class{GdObject}}}
}
}
\section{Extends}{
Class \code{"\linkS4class{GdObject}"}, directly.
}
\section{Methods}{
In the following code chunks, \code{obj} is considered to be an object of class \code{GenomeAxisTrack}.
\bold{\emph{Exported in the name space:}}
\describe{
\item{[}{\code{signature(x="GenomeAxisTrack")}: subset the
\code{GRanges} object in the \code{range} slot. For most
applications, the \code{subset} method may be more appropriate.
\emph{Additional Arguments:}
\describe{
\item{}{\code{i}: subsetting incides.}
}
\emph{Examples:}
\describe{
\item{}{\code{obj[1:5]}}
}
}
\item{start, end, width}{\code{signature(x="GenomeAxisTrack")}: the
start or end coordinates of the track items, or their width in
genomic coordinates.
\emph{Usage:}
\code{start(x)}
\code{end(x)}
\code{width(x)}
\emph{Examples:}
\describe{
\item{}{\code{start(obj)}}
\item{}{\code{end(obj)}}
\item{}{\code{width(obj)}}
}
}
\item{range}{\code{signature(x="GenomeAxisTrack")}: return the genomic
coordinates for the track as an object of class
\code{\linkS4class{IRanges}}.
\emph{Usage:}
\code{range(x)}
\emph{Examples:}
\describe{
\item{}{\code{range(obj)}}
}
}
\item{ranges}{\code{signature(x="GenomeAxisTrack")}: return the genomic
coordinates for the track along with all additional annotation
information as an object of class \code{\linkS4class{GRanges}}.
\emph{Usage:}
\code{ranges(x)}
\emph{Examples:}
\describe{
\item{}{\code{ranges(obj)}}
}
}
\item{strand}{\code{signature(x="GenomeAxisTrack")}: return a vector of
strand specifiers for all track items, in the form '+' for the
Watson strand, '-' for the Crick strand or '*' for either of the
two.
\emph{Usage:}
\code{strand(x)}
\emph{Examples:}
\describe{
\item{}{\code{strand(obj)}}
}
}
\item{values}{\code{signature(x="GenomeAxisTrack")}: return all
additional annotation information except for the genomic coordinates
for the track items.
\emph{Usage:}
\code{values(x)}
\emph{Examples:}
\describe{
\item{}{\code{values(obj)}}
}
}
\item{subset}{\code{signature(x="GenomeAxisTrack")}: subset a
\code{GenomeAxisTrack} by coordinates and sort if necessary.
\emph{Usage:}
\code{subset(x, from, to, sort=FALSE, ...)}
\emph{Additional Arguments:}
\describe{
\item{}{\code{from}, \code{to}: the coordinates range to subset
to.}
\item{}{\code{sort}: sort the object after subsetting. Usually
not necessary.}
\item{}{\code{\dots}: additional arguments are ignored.}
}
\emph{Examples:}
\describe{
\item{}{\code{subset(obj, from=10, to=20, sort=TRUE)}}
}
}
\item{length}{\code{signature(x="GenomeAxisTrack")}: return the
number of items stored in the \code{ranges} slot.
\emph{Usage:}
\code{length(x)}
\emph{Examples:}
\describe{
\item{}{\code{length(obj)}}
}
}
}
\emph{Internal methods:}
\describe{
\item{drawGD}{\code{signature(GdObject="GenomeAxisTrack")}: the
workhorse function to plot the object.
\emph{Usage:}
\code{drawGD(GdObject, minBase, maxBase, prepare=FALSE,
subset=TRUE, ...)}
\emph{Additional Arguments:}
\describe{
\item{}{\code{minBase}, \code{maxBase}: the coordinate range to
plot.}
\item{}{\code{prepare}: run method in preparation or in
production mode.}
\item{}{\code{subset}: subset the object to the visible region
or skip the potentially expensive subsetting operation.}
\item{}{\code{\dots}: all further arguments are ignored.}
}
\emph{Examples:}
\describe{
\item{}{\code{Gviz:::drawGD(obj)}}
\item{}{\code{Gviz:::drawGD(obj, minBase=1, maxBase=100)}}
\item{}{\code{Gviz:::drawGD(obj, prepare=TRUE,
subset=FALSE)}}
}
}
\item{collapseTrack}{\code{signature(GdObject="GenomeAxisTrack")}:
preprocess the track before plotting. This will collapse
overlapping track items based on the available resolution and
increase the width and height of all track objects to a minimum
value to avoid rendering issues. See \code{\link{collapsing}} for
details.
\emph{Usage:}
\code{collapseTrack(GdObject, diff=.pxResolution(coord="x"))}
\emph{Additional Arguments:}
\describe{
\item{}{\code{diff}: the minimum pixel width to display,
everything below that will be inflated to a width of
\code{diff}.}
}
\emph{Examples:}
\describe{
\item{}{\code{Gviz:::collapseTrack(obj)}}
}
}
\item{initialize}{\code{signature(.Object="GenomeAxisTrack")}:
initialize the object }
\item{show}{\code{signature(object="GenomeAxisTrack")}: show a
human-readable summary of the object }
}
\bold{\emph{Inherited:}}
\describe{
\item{displayPars}{\code{signature(x="GenomeAxisTrack", name="character")}:
list the value of the display parameter \code{name}. See
\code{\link{settings}} for details on display parameters and
customization.
\emph{Usage:}
\code{displayPars(x, name)}
\emph{Examples:}
\describe{
\item{}{\code{displayPars(obj, "col")}}
}
}
\item{displayPars}{\code{signature(x="GenomeAxisTrack", name="missing")}:
list the value of all available display parameters. See
\code{\link{settings}} for details on display parameters and
customization.
\emph{Examples:}
\describe{
\item{}{\code{displayPars(obj)}}
}
}
\item{getPar}{\code{signature(x="GenomeAxisTrack", name="character")}:
alias for the \code{displayPars} method. See
\code{\link{settings}} for details on display parameters and
customization.
\emph{Usage:}
\code{getPar(x, name)}
\emph{Examples:}
\describe{
\item{}{\code{getPar(obj, "col")}}
}
}
\item{getPar}{\code{signature(x="GenomeAxisTrack", name="missing")}: alias
for the \code{displayPars} method. See \code{\link{settings}} for
details on display parameters and customization.
\emph{Examples:}
\describe{
\item{}{\code{getPar(obj)}}
}
}
\item{displayPars<-}{\code{signature(x="GenomeAxisTrack", value="list")}:
set display parameters using the values of the named list in
\code{value}. See \code{\link{settings}} for details on display
parameters and customization.
\emph{Usage:}
\code{displayPars<-(x, value)}
\emph{Examples:}
\describe{
\item{}{\code{displayPars(obj) <- list(col="red", lwd=2)}}
}
}
\item{setPar}{\code{signature(x="GenomeAxisTrack", value="character")}: set
the single display parameter \code{name} to \code{value}. Note
that display parameters in the \code{GenomeAxisTrack} class are
pass-by-reference, so no re-assignmnet to the symbol \code{obj} is
necessary. See \code{\link{settings}} for details on display
parameters and customization.
\emph{Usage:}
\code{setPar(x, name, value)}
\emph{Additional Arguments:}
\describe{
\item{}{\code{name}: the name of the display parameter to set.}
}
\emph{Examples:}
\describe{
\item{}{\code{setPar(obj, "col", "red")}}
}
}
\item{setPar}{\code{signature(x="GenomeAxisTrack", value="list")}: set
display parameters by the values of the named list in
\code{value}. Note that display parameters in the \code{GenomeAxisTrack}
class are pass-by-reference, so no re-assignmnet to the symbol
\code{obj} is necessary. See \code{\link{settings}} for details on
display parameters and customization.
\emph{Examples:}
\describe{
\item{}{\code{setPar(obj, list(col="red", lwd=2))}}
}
}
\item{group}{\code{signature(GdObject="GenomeAxisTrack")}: return
grouping information for the individual items in the track. Unless
overwritten in one of the sub-classes, this usualy returns
\code{NULL}.
\emph{Usage:}
\code{group(GdObject)}
\emph{Examples:}
\describe{
\item{}{\code{group(obj)}}
}
}
\item{names}{\code{signature(x="GenomeAxisTrack")}: return the value of
the \code{name} slot.
\emph{Usage:}
\code{names(x)}
\emph{Examples:}
\describe{
\item{}{\code{names(obj)}}
}
}
\item{names<-}{\code{signature(x="GenomeAxisTrack", value="character")}:
set the value of the \code{name} slot.
\emph{Usage:}
\code{names<-(x, value)}
\emph{Examples:}
\describe{
\item{}{\code{names(obj) <- "foo"}}
}
}
\item{coords}{\code{signature(ImageMap="GenomeAxisTrack")}: return the
coordinates from the internal image map.
\emph{Usage:}
\code{coords(ImageMap)}
\emph{Examples:}
\describe{
\item{}{\code{coords(obj)}}
}
}
\item{tags}{\code{signature(x="GenomeAxisTrack")}: return the tags from the
internal image map.
\emph{Usage:}
\code{tags(x)}
\emph{Examples:}
\describe{
\item{}{\code{tags(obj)}}
}
}
\item{drawAxis}{\code{signature(GdObject="GenomeAxisTrack")}: add a
y-axis to the title panel of a track if necessary. Unless
overwritten in one of the sub-classes this usualy does not plot
anything and returns \code{NULL}.
\emph{Usage:}
\code{drawAxis(x, ...)}
\emph{Additional Arguments:}
\describe{
\item{}{\code{\dots}: all further arguments are ignored.}
}
\emph{Examples:}
\describe{
\item{}{\code{Gviz:::drawAxis(obj)}}
}
}
\item{drawGrid}{\code{signature(GdObject="GenomeAxisTrack")}: superpose a
grid on top of a track if necessary. Unless overwritten in one of
the sub-classes this usualy does not plot anything and returns
\code{NULL}.
\emph{Usage:}
\code{drawGrid(GdObject, ...)}
\emph{Additional Arguments:}
\describe{
\item{}{\code{\dots}: additional arguments are ignored.}
}
\emph{Examples:}
\describe{
\item{}{\code{Gviz:::drawGrid(obj)}}
}
}
}
}
\section{Display Parameters}{
The following display parameters are set for objects of class
\code{GenomeAxisTrack} upon instantiation, unless one or more of them
have already been set by one of the optional sub-class initializers,
which always get precedence over these global defaults. See
\code{\link{settings}} for details on setting graphical parameters
for tracks. \describe{
\item{}{\code{add35=FALSE}: Logical scalar. Add 3' to 5' direction
indicators.}
\item{}{\code{add53=FALSE}: Logical scalar. Add 5' to 3' direction
indicators.}
\item{}{\code{background.title="transparent"}: Character scalar.
The background color for the title panel. Defaults to omit the
background.}
\item{}{\code{cex=0.8}: Numeric scalar. The overall font expansion
factor for the axis annotation text.}
\item{}{\code{cex.id=0.7}: Numeric scalar. The text size for the
optional range annotation.}
\item{}{\code{col="darkgray"}: Character scalar. The color for the
axis lines and tickmarks.}
\item{}{\code{col.id="white"}: Character scalar. The text color
for the optional range annotation.}
\item{}{\code{col.range="cornsilk4"}: Character scalar. The border
color for highlighted regions on the axis.}
\item{}{\code{distFromAxis=1}: Numeric scalar. Control the distance
of the axis annotation from the tick marks.}
\item{}{\code{exponent=NULL}: Numeric scalar. The exponent for the
axis coordinates, e.g., 3 means mb, 6 means gb, etc. The default
is to automatically determine the optimal exponent.}
\item{}{\code{fill.range="cornsilk3"}: Character scalar. The fill
color for highlighted regions on the axis.}
\item{}{\code{fontcolor="#808080"}: Character scalar. The font
color for the axis annotation text.}
\item{}{\code{fontsize=10}: Numeric scalar. Font size for the axis
annotation text in points.}
\item{}{\code{labelPos="alternating"}: Character vector, one in
"alternating", "revAlternating", "above" or "below". The vertical
positioning of the axis labels. If \code{scale} is not \code{NULL},
the possible values are "above", "below" and "beside". }
\item{}{\code{littleTicks=FALSE}: Logical scalar. Add more fine-grained
tick marks.}
\item{}{\code{lwd=2}: Numeric scalar. The line width for the axis
elementes.}
\item{}{\code{showId=FALSE}: Logical scalar. Show the optional
range highlighting annotation.}
\item{}{\code{showTitle=FALSE}: Logical scalar. Plot a title panel.
Defaults to omit the title panel.}
\item{}{\code{size=NULL}: Numeric scalar. The relative size of the
track. Can be overridden in the \code{\link{plotTracks}} function.
Defaults to the ideal size based on the other track settings.}
\item{}{\code{scale=NULL}: Numeric scalar. If not \code{NULL} a
small scale is drawn instead of the full axis, if the value is
between 0 and 1 it is interpreted as a fraction of the current
plotting region, otherwise as an absolute length value in genomic
coordinates.}
}
Additional display parameters are being inherited from the respective
parent classes. Note that not all of them may have an effect on the
plotting of \code{GenomeAxisTrack} objects.
\describe{
\item{}{\code{\linkS4class{GdObject}}:
\describe{
\item{}{\code{alpha=1}: Numeric scalar. The transparency for
all track items.}
\item{}{\code{background.panel="transparent"}: Integer or
character scalar. The background color of the content panel.}
\item{}{\code{cex.axis=NULL}: Numeric scalar. The expansion
factor for the axis annotation. Defaults to \code{NULL}, in
which case it is computed based on the available space.}
\item{}{\code{cex.title=NULL}: Numeric scalar. The expansion
factor for the title panel. This effects the fontsize of both
the title and the axis, if any. Defaults to \code{NULL},
which means that the text size is automatically adjusted to
the available space.}
\item{}{\code{col.axis="white"}: Integer or character scalar.
The font and line color for the y axis, if any.}
\item{}{\code{col.frame="lightgray"}: Integer or character
scalar. The line color used for the panel frame, if
\code{frame==TRUE}}
\item{}{\code{col.grid="#808080"}: Integer or character scalar.
Default line color for grid lines, both when \code{type=="g"}
in \code{\link{DataTrack}}s and when display parameter
\code{grid==TRUE}.}
\item{}{\code{col.line=NULL}: Integer or character scalar.
Default colors for plot lines. Usually the same as the global
\code{col} parameter.}
\item{}{\code{col.symbol=NULL}: Integer or character scalar.
Default colors for plot symbols. Usually the same as the
global \code{col} parameter.}
\item{}{\code{col.title="white"}: Integer or character scalar.
The font color for the title panels.}
\item{}{\code{collapse=TRUE}: Boolean controlling wether to
collapse the content of the track to accomodate the minimum
current device resolution. See \code{\link{collapsing}} for
details.}
\item{}{\code{fill="lightgray"}: Integer or character scalar.
Default fill color setting for all plotting elements, unless
there is a more specific control defined elsewhere.}
\item{}{\code{fontface=1}: Integer or character scalar. The
font face for all text.}
\item{}{\code{fontface.title=2}: Integer or character scalar.
The font face for the title panels.}
\item{}{\code{fontfamily="sans"}: Integer or character scalar.
The font family for all text.}
\item{}{\code{fontfamily.title="sans"}: Integer or character
scalar. The font family for the title panels.}
\item{}{\code{frame=FALSE}: Boolean. Draw a frame around the
track when plotting.}
\item{}{\code{grid=FALSE}: Boolean, switching on/off the plotting
of a grid.}
\item{}{\code{h=-1}: Integer scalar. Parameter controlling the
number of horizontal grid lines, see \code{\link{panel.grid}}
for details.}
\item{}{\code{lineheight=1}: Numeric scalar. The font line
height for all text.}
\item{}{\code{lty="solid"}: Numeric scalar. Default line type
setting for all plotting elements, unless there is a more
specific control defined elsewhere.}
\item{}{\code{lty.grid="solid"}: Integer or character scalar.
Default line type for grid lines, both when \code{type=="g"}
in \code{\link{DataTrack}}s and when display parameter
\code{grid==TRUE}.}
\item{}{\code{lwd.grid=1}: Numeric scalar. Default line width
for grid lines, both when \code{type=="g"} in \code{\link{DataTrack}}s
and when display parameter \code{grid==TRUE}.}
\item{}{\code{min.distance=1}: Numeric scalar. The minimum
pixel distance before collapsing range items, only if
\code{collapse==TRUE}. See \code{\link{collapsing}} for details.}
\item{}{\code{min.height=3}: Numeric scalar. The minimum range
height in pixels to display. All ranges are expanded to this
size in order to avoid rendering issues. See \code{\link{collapsing}}
for details.}
\item{}{\code{min.width=1}: Numeric scalar. The minimum range
width in pixels to display. All ranges are expanded to this
size in order to avoid rendering issues. See \code{\link{collapsing}}
for details.}
\item{}{\code{showAxis=TRUE}: Boolean controlling whether to
plot a y axis (only applies to track types where axes are
implemented).}
\item{}{\code{v=-1}: Integer scalar. Parameter controlling the
number of vertical grid lines, see \code{\link{panel.grid}}
for details.}
}
}
}
}
\author{Florian Hahne}
\seealso{
\code{\linkS4class{AnnotationTrack}}
\code{\linkS4class{DisplayPars}}
\code{\linkS4class{GdObject}}
\code{\linkS4class{GRanges}}
\code{\linkS4class{ImageMap}}
\code{\linkS4class{IRanges}}
\code{\linkS4class{RangeTrack}}
\code{\linkS4class{StackedTrack}}
\code{\link{collapsing}}
\code{\link{DataTrack}}
\code{\link{grouping}}
\code{\link{panel.grid}}
\code{\link{plotTracks}}
\code{\link{settings}}
}
\examples{
## Construct object
axTrack <- GenomeAxisTrack(name="Axis",
range=IRanges::IRanges(start=c(100, 300, 800), end=c(150, 400, 1000)))
\dontshow{
## For some annoying reason the postscript device does not know about
## the sans font
if(!interactive())
{
font <- ps.options()$family
displayPars(axTrack) <- list(fontfamily=font, fontfamily.title=font)
}
}
## Plotting
plotTracks(axTrack, from=0, to=1100)
## Track names
names(axTrack)
names(axTrack) <- "foo"
## Subsetting and splitting
subTrack <- subset(axTrack, from=0, to=500)
length(subTrack)
subTrack[1]
split(axTrack, c(1,1,2))
## Accessors
start(axTrack)
end(axTrack)
width(axTrack)
strand(axTrack)
range(axTrack)
ranges(axTrack)
## Annotation
values(axTrack)
## Grouping
group(axTrack)
## HTML image map
coords(axTrack)
tags(axTrack)
axTrack <- plotTracks(axTrack)$foo
coords(axTrack)
tags(axTrack)
## adding an axis to another track
data(cyp2b10)
grTrack <- GeneRegionTrack(start=26682683, end=26711643,
rstart=cyp2b10$start, rends=cyp2b10$end, chromosome=7, genome="mm9",
transcript=cyp2b10$transcript, gene=cyp2b10$gene, symbol=cyp2b10$symbol,
name="Cyp2b10", strand=cyp2b10$strand)
plotTracks(list(grTrack, GenomeAxisTrack()))
plotTracks(list(grTrack, GenomeAxisTrack(scale=0.1)))
plotTracks(list(grTrack, GenomeAxisTrack(scale=5000)))
plotTracks(list(grTrack, GenomeAxisTrack(scale=0.5, labelPos="below")))
}
\keyword{classes}
| /inst/Rpkgs/Gviz/man/GenomeAxisTrack-class.Rd | no_license | lianos/GvizX | R | false | false | 24,666 | rd | \name{GenomeAxisTrack-class}
\Rdversion{1.1}
\docType{class}
\alias{GenomeAxisTrack-class}
\alias{GenomeAxisTrack}
\alias{[,GenomeAxisTrack-method}
\alias{[,GenomeAxisTrack,ANY,ANY-method}
\alias{collapseTrack,GenomeAxisTrack-method}
\alias{drawGD,GenomeAxisTrack-method}
\alias{end,GenomeAxisTrack-method}
\alias{end<-,GenomeAxisTrack-method}
\alias{initialize,GenomeAxisTrack-method}
\alias{length,GenomeAxisTrack-method}
\alias{range,GenomeAxisTrack-method}
\alias{ranges,GenomeAxisTrack-method}
\alias{ranges<-,GenomeAxisTrack-method}
\alias{show,GenomeAxisTrack-method}
\alias{start,GenomeAxisTrack-method}
\alias{start<-,GenomeAxisTrack-method}
\alias{strand,GenomeAxisTrack-method}
\alias{subset,GenomeAxisTrack-method}
\alias{values,GenomeAxisTrack-method}
\alias{width,GenomeAxisTrack-method}
\title{GenomeAxisTrack class and methods}
\description{A class representing a customizable genomic axis.}
\section{Objects from the class}{
Objects can be created using the constructor function
\code{GenomeAxisTrack}.
}
\usage{
GenomeAxisTrack(range=NULL, name="Axis", id, ...)
}
\arguments{
\item{range}{Optional \code{\linkS4class{GRanges}} or
\code{\linkS4class{IRanges}} object to highlight certain regions on
the axis.}
\item{name}{Character scalar of the track's name used in the title
panel when plotting.}
\item{id}{A character vector of the same length as \code{range}
containing identifiers for the ranges. If missing, the constructor
will try to extract the ids from \code{names(range)}.}
\item{\dots}{Additional items which will all be interpreted as further
display parameters. See \code{\link{settings}} and the "Display
Parameters" section below for details.}
}
\details{
A \code{GenomeAxisTrack} can be customized using the familiar display
parameters. By providing a \code{GRanges} or \code{IRanges} object to
the constructor, ranges on the axis can be further highlighted.\\
With the \code{scale} display parameter, a small scale indicator can
be shown instead of the entire genomic axis. The scale can either be
provided as a fraction of the plotting region (it will be rounded to
the nearest human readable absolute value) or as an absolute value and
is always displayed in bp, kb, mb or gb units. Note that most display
parameters for the \code{GenomeAxisTrack} are ignored when a scale is
used insterad of the full axis. In particular, only the parameters
\code{exponent}, \code{alpha}, \code{lwd}, \code{col}, \code{cex},
\code{distFromAxis} and \code{labelPos} are used.
}
\value{
The return value of the constructor function is a new object of class
\code{GenomeAxisTrack}.
}
\section{Slots}{
\describe{
\item{\code{range}:}{Object of class \code{\linkS4class{GRanges}},
highlighted on the axis. }
\item{\code{dp}:}{Object of class
\code{\linkS4class{DisplayPars}}, inherited from class
\code{\linkS4class{GdObject}}}
\item{\code{name}:}{Object of class \code{"character"}, inherited
from class \code{\linkS4class{GdObject}}}
\item{\code{imageMap}:}{Object of class
\code{\linkS4class{ImageMap}}, inherited from class
\code{\linkS4class{GdObject}}}
}
}
\section{Extends}{
Class \code{"\linkS4class{GdObject}"}, directly.
}
\section{Methods}{
In the following code chunks, \code{obj} is considered to be an object of class \code{GenomeAxisTrack}.
\bold{\emph{Exported in the name space:}}
\describe{
\item{[}{\code{signature(x="GenomeAxisTrack")}: subset the
\code{GRanges} object in the \code{range} slot. For most
applications, the \code{subset} method may be more appropriate.
\emph{Additional Arguments:}
\describe{
\item{}{\code{i}: subsetting incides.}
}
\emph{Examples:}
\describe{
\item{}{\code{obj[1:5]}}
}
}
\item{start, end, width}{\code{signature(x="GenomeAxisTrack")}: the
start or end coordinates of the track items, or their width in
genomic coordinates.
\emph{Usage:}
\code{start(x)}
\code{end(x)}
\code{width(x)}
\emph{Examples:}
\describe{
\item{}{\code{start(obj)}}
\item{}{\code{end(obj)}}
\item{}{\code{width(obj)}}
}
}
\item{range}{\code{signature(x="GenomeAxisTrack")}: return the genomic
coordinates for the track as an object of class
\code{\linkS4class{IRanges}}.
\emph{Usage:}
\code{range(x)}
\emph{Examples:}
\describe{
\item{}{\code{range(obj)}}
}
}
\item{ranges}{\code{signature(x="GenomeAxisTrack")}: return the genomic
coordinates for the track along with all additional annotation
information as an object of class \code{\linkS4class{GRanges}}.
\emph{Usage:}
\code{ranges(x)}
\emph{Examples:}
\describe{
\item{}{\code{ranges(obj)}}
}
}
\item{strand}{\code{signature(x="GenomeAxisTrack")}: return a vector of
strand specifiers for all track items, in the form '+' for the
Watson strand, '-' for the Crick strand or '*' for either of the
two.
\emph{Usage:}
\code{strand(x)}
\emph{Examples:}
\describe{
\item{}{\code{strand(obj)}}
}
}
\item{values}{\code{signature(x="GenomeAxisTrack")}: return all
additional annotation information except for the genomic coordinates
for the track items.
\emph{Usage:}
\code{values(x)}
\emph{Examples:}
\describe{
\item{}{\code{values(obj)}}
}
}
\item{subset}{\code{signature(x="GenomeAxisTrack")}: subset a
\code{GenomeAxisTrack} by coordinates and sort if necessary.
\emph{Usage:}
\code{subset(x, from, to, sort=FALSE, ...)}
\emph{Additional Arguments:}
\describe{
\item{}{\code{from}, \code{to}: the coordinates range to subset
to.}
\item{}{\code{sort}: sort the object after subsetting. Usually
not necessary.}
\item{}{\code{\dots}: additional arguments are ignored.}
}
\emph{Examples:}
\describe{
\item{}{\code{subset(obj, from=10, to=20, sort=TRUE)}}
}
}
\item{length}{\code{signature(x="GenomeAxisTrack")}: return the
number of items stored in the \code{ranges} slot.
\emph{Usage:}
\code{length(x)}
\emph{Examples:}
\describe{
\item{}{\code{length(obj)}}
}
}
}
\emph{Internal methods:}
\describe{
\item{drawGD}{\code{signature(GdObject="GenomeAxisTrack")}: the
workhorse function to plot the object.
\emph{Usage:}
\code{drawGD(GdObject, minBase, maxBase, prepare=FALSE,
subset=TRUE, ...)}
\emph{Additional Arguments:}
\describe{
\item{}{\code{minBase}, \code{maxBase}: the coordinate range to
plot.}
\item{}{\code{prepare}: run method in preparation or in
production mode.}
\item{}{\code{subset}: subset the object to the visible region
or skip the potentially expensive subsetting operation.}
\item{}{\code{\dots}: all further arguments are ignored.}
}
\emph{Examples:}
\describe{
\item{}{\code{Gviz:::drawGD(obj)}}
\item{}{\code{Gviz:::drawGD(obj, minBase=1, maxBase=100)}}
\item{}{\code{Gviz:::drawGD(obj, prepare=TRUE,
subset=FALSE)}}
}
}
\item{collapseTrack}{\code{signature(GdObject="GenomeAxisTrack")}:
preprocess the track before plotting. This will collapse
overlapping track items based on the available resolution and
increase the width and height of all track objects to a minimum
value to avoid rendering issues. See \code{\link{collapsing}} for
details.
\emph{Usage:}
\code{collapseTrack(GdObject, diff=.pxResolution(coord="x"))}
\emph{Additional Arguments:}
\describe{
\item{}{\code{diff}: the minimum pixel width to display,
everything below that will be inflated to a width of
\code{diff}.}
}
\emph{Examples:}
\describe{
\item{}{\code{Gviz:::collapseTrack(obj)}}
}
}
\item{initialize}{\code{signature(.Object="GenomeAxisTrack")}:
initialize the object }
\item{show}{\code{signature(object="GenomeAxisTrack")}: show a
human-readable summary of the object }
}
\bold{\emph{Inherited:}}
\describe{
\item{displayPars}{\code{signature(x="GenomeAxisTrack", name="character")}:
list the value of the display parameter \code{name}. See
\code{\link{settings}} for details on display parameters and
customization.
\emph{Usage:}
\code{displayPars(x, name)}
\emph{Examples:}
\describe{
\item{}{\code{displayPars(obj, "col")}}
}
}
\item{displayPars}{\code{signature(x="GenomeAxisTrack", name="missing")}:
list the value of all available display parameters. See
\code{\link{settings}} for details on display parameters and
customization.
\emph{Examples:}
\describe{
\item{}{\code{displayPars(obj)}}
}
}
\item{getPar}{\code{signature(x="GenomeAxisTrack", name="character")}:
alias for the \code{displayPars} method. See
\code{\link{settings}} for details on display parameters and
customization.
\emph{Usage:}
\code{getPar(x, name)}
\emph{Examples:}
\describe{
\item{}{\code{getPar(obj, "col")}}
}
}
\item{getPar}{\code{signature(x="GenomeAxisTrack", name="missing")}: alias
for the \code{displayPars} method. See \code{\link{settings}} for
details on display parameters and customization.
\emph{Examples:}
\describe{
\item{}{\code{getPar(obj)}}
}
}
\item{displayPars<-}{\code{signature(x="GenomeAxisTrack", value="list")}:
set display parameters using the values of the named list in
\code{value}. See \code{\link{settings}} for details on display
parameters and customization.
\emph{Usage:}
\code{displayPars<-(x, value)}
\emph{Examples:}
\describe{
\item{}{\code{displayPars(obj) <- list(col="red", lwd=2)}}
}
}
\item{setPar}{\code{signature(x="GenomeAxisTrack", value="character")}: set
the single display parameter \code{name} to \code{value}. Note
that display parameters in the \code{GenomeAxisTrack} class are
pass-by-reference, so no re-assignmnet to the symbol \code{obj} is
necessary. See \code{\link{settings}} for details on display
parameters and customization.
\emph{Usage:}
\code{setPar(x, name, value)}
\emph{Additional Arguments:}
\describe{
\item{}{\code{name}: the name of the display parameter to set.}
}
\emph{Examples:}
\describe{
\item{}{\code{setPar(obj, "col", "red")}}
}
}
\item{setPar}{\code{signature(x="GenomeAxisTrack", value="list")}: set
display parameters by the values of the named list in
\code{value}. Note that display parameters in the \code{GenomeAxisTrack}
class are pass-by-reference, so no re-assignmnet to the symbol
\code{obj} is necessary. See \code{\link{settings}} for details on
display parameters and customization.
\emph{Examples:}
\describe{
\item{}{\code{setPar(obj, list(col="red", lwd=2))}}
}
}
\item{group}{\code{signature(GdObject="GenomeAxisTrack")}: return
grouping information for the individual items in the track. Unless
overwritten in one of the sub-classes, this usualy returns
\code{NULL}.
\emph{Usage:}
\code{group(GdObject)}
\emph{Examples:}
\describe{
\item{}{\code{group(obj)}}
}
}
\item{names}{\code{signature(x="GenomeAxisTrack")}: return the value of
the \code{name} slot.
\emph{Usage:}
\code{names(x)}
\emph{Examples:}
\describe{
\item{}{\code{names(obj)}}
}
}
\item{names<-}{\code{signature(x="GenomeAxisTrack", value="character")}:
set the value of the \code{name} slot.
\emph{Usage:}
\code{names<-(x, value)}
\emph{Examples:}
\describe{
\item{}{\code{names(obj) <- "foo"}}
}
}
\item{coords}{\code{signature(ImageMap="GenomeAxisTrack")}: return the
coordinates from the internal image map.
\emph{Usage:}
\code{coords(ImageMap)}
\emph{Examples:}
\describe{
\item{}{\code{coords(obj)}}
}
}
\item{tags}{\code{signature(x="GenomeAxisTrack")}: return the tags from the
internal image map.
\emph{Usage:}
\code{tags(x)}
\emph{Examples:}
\describe{
\item{}{\code{tags(obj)}}
}
}
\item{drawAxis}{\code{signature(GdObject="GenomeAxisTrack")}: add a
y-axis to the title panel of a track if necessary. Unless
overwritten in one of the sub-classes this usualy does not plot
anything and returns \code{NULL}.
\emph{Usage:}
\code{drawAxis(x, ...)}
\emph{Additional Arguments:}
\describe{
\item{}{\code{\dots}: all further arguments are ignored.}
}
\emph{Examples:}
\describe{
\item{}{\code{Gviz:::drawAxis(obj)}}
}
}
\item{drawGrid}{\code{signature(GdObject="GenomeAxisTrack")}: superpose a
grid on top of a track if necessary. Unless overwritten in one of
the sub-classes this usualy does not plot anything and returns
\code{NULL}.
\emph{Usage:}
\code{drawGrid(GdObject, ...)}
\emph{Additional Arguments:}
\describe{
\item{}{\code{\dots}: additional arguments are ignored.}
}
\emph{Examples:}
\describe{
\item{}{\code{Gviz:::drawGrid(obj)}}
}
}
}
}
\section{Display Parameters}{
The following display parameters are set for objects of class
\code{GenomeAxisTrack} upon instantiation, unless one or more of them
have already been set by one of the optional sub-class initializers,
which always get precedence over these global defaults. See
\code{\link{settings}} for details on setting graphical parameters
for tracks. \describe{
\item{}{\code{add35=FALSE}: Logical scalar. Add 3' to 5' direction
indicators.}
\item{}{\code{add53=FALSE}: Logical scalar. Add 5' to 3' direction
indicators.}
\item{}{\code{background.title="transparent"}: Character scalar.
The background color for the title panel. Defaults to omit the
background.}
\item{}{\code{cex=0.8}: Numeric scalar. The overall font expansion
factor for the axis annotation text.}
\item{}{\code{cex.id=0.7}: Numeric scalar. The text size for the
optional range annotation.}
\item{}{\code{col="darkgray"}: Character scalar. The color for the
axis lines and tickmarks.}
\item{}{\code{col.id="white"}: Character scalar. The text color
for the optional range annotation.}
\item{}{\code{col.range="cornsilk4"}: Character scalar. The border
color for highlighted regions on the axis.}
\item{}{\code{distFromAxis=1}: Numeric scalar. Control the distance
of the axis annotation from the tick marks.}
\item{}{\code{exponent=NULL}: Numeric scalar. The exponent for the
axis coordinates, e.g., 3 means mb, 6 means gb, etc. The default
is to automatically determine the optimal exponent.}
\item{}{\code{fill.range="cornsilk3"}: Character scalar. The fill
color for highlighted regions on the axis.}
\item{}{\code{fontcolor="#808080"}: Character scalar. The font
color for the axis annotation text.}
\item{}{\code{fontsize=10}: Numeric scalar. Font size for the axis
annotation text in points.}
\item{}{\code{labelPos="alternating"}: Character vector, one in
"alternating", "revAlternating", "above" or "below". The vertical
positioning of the axis labels. If \code{scale} is not \code{NULL},
the possible values are "above", "below" and "beside". }
\item{}{\code{littleTicks=FALSE}: Logical scalar. Add more fine-grained
tick marks.}
\item{}{\code{lwd=2}: Numeric scalar. The line width for the axis
elementes.}
\item{}{\code{showId=FALSE}: Logical scalar. Show the optional
range highlighting annotation.}
\item{}{\code{showTitle=FALSE}: Logical scalar. Plot a title panel.
Defaults to omit the title panel.}
\item{}{\code{size=NULL}: Numeric scalar. The relative size of the
track. Can be overridden in the \code{\link{plotTracks}} function.
Defaults to the ideal size based on the other track settings.}
\item{}{\code{scale=NULL}: Numeric scalar. If not \code{NULL} a
small scale is drawn instead of the full axis, if the value is
between 0 and 1 it is interpreted as a fraction of the current
plotting region, otherwise as an absolute length value in genomic
coordinates.}
}
Additional display parameters are being inherited from the respective
parent classes. Note that not all of them may have an effect on the
plotting of \code{GenomeAxisTrack} objects.
\describe{
\item{}{\code{\linkS4class{GdObject}}:
\describe{
\item{}{\code{alpha=1}: Numeric scalar. The transparency for
all track items.}
\item{}{\code{background.panel="transparent"}: Integer or
character scalar. The background color of the content panel.}
\item{}{\code{cex.axis=NULL}: Numeric scalar. The expansion
factor for the axis annotation. Defaults to \code{NULL}, in
which case it is computed based on the available space.}
\item{}{\code{cex.title=NULL}: Numeric scalar. The expansion
factor for the title panel. This effects the fontsize of both
the title and the axis, if any. Defaults to \code{NULL},
which means that the text size is automatically adjusted to
the available space.}
\item{}{\code{col.axis="white"}: Integer or character scalar.
The font and line color for the y axis, if any.}
\item{}{\code{col.frame="lightgray"}: Integer or character
scalar. The line color used for the panel frame, if
\code{frame==TRUE}}
\item{}{\code{col.grid="#808080"}: Integer or character scalar.
Default line color for grid lines, both when \code{type=="g"}
in \code{\link{DataTrack}}s and when display parameter
\code{grid==TRUE}.}
\item{}{\code{col.line=NULL}: Integer or character scalar.
Default colors for plot lines. Usually the same as the global
\code{col} parameter.}
\item{}{\code{col.symbol=NULL}: Integer or character scalar.
Default colors for plot symbols. Usually the same as the
global \code{col} parameter.}
\item{}{\code{col.title="white"}: Integer or character scalar.
The font color for the title panels.}
\item{}{\code{collapse=TRUE}: Boolean controlling wether to
collapse the content of the track to accomodate the minimum
current device resolution. See \code{\link{collapsing}} for
details.}
\item{}{\code{fill="lightgray"}: Integer or character scalar.
Default fill color setting for all plotting elements, unless
there is a more specific control defined elsewhere.}
\item{}{\code{fontface=1}: Integer or character scalar. The
font face for all text.}
\item{}{\code{fontface.title=2}: Integer or character scalar.
The font face for the title panels.}
\item{}{\code{fontfamily="sans"}: Integer or character scalar.
The font family for all text.}
\item{}{\code{fontfamily.title="sans"}: Integer or character
scalar. The font family for the title panels.}
\item{}{\code{frame=FALSE}: Boolean. Draw a frame around the
track when plotting.}
\item{}{\code{grid=FALSE}: Boolean, switching on/off the plotting
of a grid.}
\item{}{\code{h=-1}: Integer scalar. Parameter controlling the
number of horizontal grid lines, see \code{\link{panel.grid}}
for details.}
\item{}{\code{lineheight=1}: Numeric scalar. The font line
height for all text.}
\item{}{\code{lty="solid"}: Numeric scalar. Default line type
setting for all plotting elements, unless there is a more
specific control defined elsewhere.}
\item{}{\code{lty.grid="solid"}: Integer or character scalar.
Default line type for grid lines, both when \code{type=="g"}
in \code{\link{DataTrack}}s and when display parameter
\code{grid==TRUE}.}
\item{}{\code{lwd.grid=1}: Numeric scalar. Default line width
for grid lines, both when \code{type=="g"} in \code{\link{DataTrack}}s
and when display parameter \code{grid==TRUE}.}
\item{}{\code{min.distance=1}: Numeric scalar. The minimum
pixel distance before collapsing range items, only if
\code{collapse==TRUE}. See \code{\link{collapsing}} for details.}
\item{}{\code{min.height=3}: Numeric scalar. The minimum range
height in pixels to display. All ranges are expanded to this
size in order to avoid rendering issues. See \code{\link{collapsing}}
for details.}
\item{}{\code{min.width=1}: Numeric scalar. The minimum range
width in pixels to display. All ranges are expanded to this
size in order to avoid rendering issues. See \code{\link{collapsing}}
for details.}
\item{}{\code{showAxis=TRUE}: Boolean controlling whether to
plot a y axis (only applies to track types where axes are
implemented).}
\item{}{\code{v=-1}: Integer scalar. Parameter controlling the
number of vertical grid lines, see \code{\link{panel.grid}}
for details.}
}
}
}
}
\author{Florian Hahne}
\seealso{
\code{\linkS4class{AnnotationTrack}}
\code{\linkS4class{DisplayPars}}
\code{\linkS4class{GdObject}}
\code{\linkS4class{GRanges}}
\code{\linkS4class{ImageMap}}
\code{\linkS4class{IRanges}}
\code{\linkS4class{RangeTrack}}
\code{\linkS4class{StackedTrack}}
\code{\link{collapsing}}
\code{\link{DataTrack}}
\code{\link{grouping}}
\code{\link{panel.grid}}
\code{\link{plotTracks}}
\code{\link{settings}}
}
\examples{
## Construct object
axTrack <- GenomeAxisTrack(name="Axis",
range=IRanges::IRanges(start=c(100, 300, 800), end=c(150, 400, 1000)))
\dontshow{
## For some annoying reason the postscript device does not know about
## the sans font
if(!interactive())
{
font <- ps.options()$family
displayPars(axTrack) <- list(fontfamily=font, fontfamily.title=font)
}
}
## Plotting
plotTracks(axTrack, from=0, to=1100)
## Track names
names(axTrack)
names(axTrack) <- "foo"
## Subsetting and splitting
subTrack <- subset(axTrack, from=0, to=500)
length(subTrack)
subTrack[1]
split(axTrack, c(1,1,2))
## Accessors
start(axTrack)
end(axTrack)
width(axTrack)
strand(axTrack)
range(axTrack)
ranges(axTrack)
## Annotation
values(axTrack)
## Grouping
group(axTrack)
## HTML image map
coords(axTrack)
tags(axTrack)
axTrack <- plotTracks(axTrack)$foo
coords(axTrack)
tags(axTrack)
## adding an axis to another track
data(cyp2b10)
grTrack <- GeneRegionTrack(start=26682683, end=26711643,
rstart=cyp2b10$start, rends=cyp2b10$end, chromosome=7, genome="mm9",
transcript=cyp2b10$transcript, gene=cyp2b10$gene, symbol=cyp2b10$symbol,
name="Cyp2b10", strand=cyp2b10$strand)
plotTracks(list(grTrack, GenomeAxisTrack()))
plotTracks(list(grTrack, GenomeAxisTrack(scale=0.1)))
plotTracks(list(grTrack, GenomeAxisTrack(scale=5000)))
plotTracks(list(grTrack, GenomeAxisTrack(scale=0.5, labelPos="below")))
}
\keyword{classes}
|
salary_test<-read.csv("C://Users//Gany//Desktop//Naive Bayes//SalaryData_Test.csv" ,stringsAsFactors = F)
View(salary_test)
str(salary_test)
salary_test$educationno <- as.factor(salary_test$educationno)
salary_test$ workclass<-as.factor(salary_test$ workclass)
salary_test$education<-as.factor(salary_test$education)
salary_test$maritalstatus<-as.factor(salary_test$maritalstatus)
salary_test$occupation<-as.factor(salary_test$occupation)
salary_test$relationship<-as.factor(salary_test$relationship)
salary_test$race<-as.factor(salary_test$race)
salary_test$sex<-as.factor(salary_test$sex)
salary_test$native<-as.factor(salary_test$native)
salary_test$Salary<-as.factor(salary_test$Salary)
class(salary_test)
str(salary_test)
salary_train<-read.csv("C://Users//Gany//Desktop//Naive Bayes//SalaryData_Train.csv",stringsAsFactors = F)
View(salary_train)
str(salary_train)
salary_train$educationno <- as.factor(salary_train$educationno)
salary_train$workclass<-as.factor(salary_train$workclass)
salary_train$education<-as.factor(salary_train$education)
salary_train$maritalstatus<-as.factor(salary_train$maritalstatus)
salary_train$occupation<-as.factor(salary_train$occupation)
salary_train$relationship<-as.factor(salary_train$relationship)
salary_train$race<-as.factor(salary_train$race)
salary_train$sex<-as.factor(salary_train$sex)
salary_train$native<-as.factor(salary_train$native)
salary_train$Salary <-as.factor(salary_train$Salary)
str(salary_train)
class(salary_train)
library(naivebayes)
library(ggplot2)
library(caret)
library(psych)
library(e1071)
#Visualization
# Plot and ggplot
ggplot(data=salary_train,aes(x=salary_train$Salary, y = salary_train$age, fill = salary_train$Salary)) +
geom_boxplot() +
ggtitle("Box Plot")
# Naive Bayes Model
library(e1071)
model <-naiveBayes(salary_train$Salary~.,data=salary_train)
model
model_pred <- predict(model,salary_test)
mean(model_pred==salary_test$Salary)
confusionMatrix(model_pred,salary_test$Salary)
# Here <=50K is misclassified some 1919 are misclassified as >50k and
#811 of >50K are misclassified as <=50K and the positive class is <=50k
#Accuracy of the model is 81.87%
#so majority salary is <=50K | /salary data.R | no_license | Amirthavarsinipriya/Assignments | R | false | false | 2,231 | r | salary_test<-read.csv("C://Users//Gany//Desktop//Naive Bayes//SalaryData_Test.csv" ,stringsAsFactors = F)
View(salary_test)
str(salary_test)
salary_test$educationno <- as.factor(salary_test$educationno)
salary_test$ workclass<-as.factor(salary_test$ workclass)
salary_test$education<-as.factor(salary_test$education)
salary_test$maritalstatus<-as.factor(salary_test$maritalstatus)
salary_test$occupation<-as.factor(salary_test$occupation)
salary_test$relationship<-as.factor(salary_test$relationship)
salary_test$race<-as.factor(salary_test$race)
salary_test$sex<-as.factor(salary_test$sex)
salary_test$native<-as.factor(salary_test$native)
salary_test$Salary<-as.factor(salary_test$Salary)
class(salary_test)
str(salary_test)
salary_train<-read.csv("C://Users//Gany//Desktop//Naive Bayes//SalaryData_Train.csv",stringsAsFactors = F)
View(salary_train)
str(salary_train)
salary_train$educationno <- as.factor(salary_train$educationno)
salary_train$workclass<-as.factor(salary_train$workclass)
salary_train$education<-as.factor(salary_train$education)
salary_train$maritalstatus<-as.factor(salary_train$maritalstatus)
salary_train$occupation<-as.factor(salary_train$occupation)
salary_train$relationship<-as.factor(salary_train$relationship)
salary_train$race<-as.factor(salary_train$race)
salary_train$sex<-as.factor(salary_train$sex)
salary_train$native<-as.factor(salary_train$native)
salary_train$Salary <-as.factor(salary_train$Salary)
str(salary_train)
class(salary_train)
library(naivebayes)
library(ggplot2)
library(caret)
library(psych)
library(e1071)
#Visualization
# Plot and ggplot
ggplot(data=salary_train,aes(x=salary_train$Salary, y = salary_train$age, fill = salary_train$Salary)) +
geom_boxplot() +
ggtitle("Box Plot")
# Naive Bayes Model
library(e1071)
model <-naiveBayes(salary_train$Salary~.,data=salary_train)
model
model_pred <- predict(model,salary_test)
mean(model_pred==salary_test$Salary)
confusionMatrix(model_pred,salary_test$Salary)
# Here <=50K is misclassified some 1919 are misclassified as >50k and
#811 of >50K are misclassified as <=50K and the positive class is <=50k
#Accuracy of the model is 81.87%
#so majority salary is <=50K |
library(testthat)
library(grattan)
library(data.table)
if (requireNamespace("taxstats", quietly = TRUE)){
library(taxstats)
}
library(dplyr)
library(dtplyr)
library(magrittr)
library(survey)
library(zoo)
test_check("grattan")
| /timings/dc9fc0498de0dfc0227e3741bb85ebfd27ef7601/grattan/tests/testthat.R | no_license | HughParsonage/grattan | R | false | false | 230 | r | library(testthat)
library(grattan)
library(data.table)
if (requireNamespace("taxstats", quietly = TRUE)){
library(taxstats)
}
library(dplyr)
library(dtplyr)
library(magrittr)
library(survey)
library(zoo)
test_check("grattan")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prey_survival_simulation.R
\name{survival_simulation_driver}
\alias{survival_simulation_driver}
\title{Run a Full Simulation}
\source{
defaults based on Steel et al. 2020. "Applying the mean free-path length model to juvenile Chinook salmon migrating in the Sacramento River, California"
and Michel et al. 2018. "Non-native fish predator density and molecular-based diet estimates suggest differing effects of predator species on Juvenile Salmon in the San Joaquin River, California"
}
\usage{
survival_simulation_driver(
number_of_fish,
mean_length,
n_transects,
sd_length = (1.7/14) * mean_length,
transect_length = 1000,
lit_zone_size = 5,
channel_width = 100,
grid_size = 15,
reaction_dis = 0.5
)
}
\arguments{
\item{number_of_fish}{number (pos integer) of prey fish desired}
\item{mean_length}{mean length of fish in cm}
\item{n_transects}{integer of transects in the model}
\item{sd_length}{std dev of fish length in cm; default is 1.7 and scales with mean}
\item{transect_length}{length of each transect in meters; default is 1000}
\item{lit_zone_size}{the size of the littoral zone (i.e., nearshore area) in meters; default is 5}
\item{channel_width}{width of the channel in meters; default is 100}
\item{grid_size}{length of side of raster grid in meters; default is 15}
\item{reaction_dis}{maximum distance (in m) away from a predator that can trigger an encounter; default is 0.50}
}
\value{
the proportion of surviving fish
}
\description{
Runs a full simulation with a user-specified number of fish. Users can also adjust fish
mean length and sd, environment size, grid size, and predator reaction distance.
The model runs through the following:
calculates predators and their positions,
calculates grid cells and their encounter probabilities,
calculates a unique path for each fish,
simulates and resolves encounters for each fish in each cell,
determines survival after all fish have gone through each cell.
}
\details{
The return value is the proportion of survivors.
}
\note{
this function can be parallelized; e.g., by setting plan(multisession)
}
\examples{
survival_simulation_driver (number_of_fish = 20, mean_length = 10, n_transects = 5)
}
| /man/survival_simulation_driver.Rd | permissive | mrguyperson/predpackplus | R | false | true | 2,268 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prey_survival_simulation.R
\name{survival_simulation_driver}
\alias{survival_simulation_driver}
\title{Run a Full Simulation}
\source{
defaults based on Steel et al. 2020. "Applying the mean free-path length model to juvenile Chinook salmon migrating in the Sacramento River, California"
and Michel et al. 2018. "Non-native fish predator density and molecular-based diet estimates suggest differing effects of predator species on Juvenile Salmon in the San Joaquin River, California"
}
\usage{
survival_simulation_driver(
number_of_fish,
mean_length,
n_transects,
sd_length = (1.7/14) * mean_length,
transect_length = 1000,
lit_zone_size = 5,
channel_width = 100,
grid_size = 15,
reaction_dis = 0.5
)
}
\arguments{
\item{number_of_fish}{number (pos integer) of prey fish desired}
\item{mean_length}{mean length of fish in cm}
\item{n_transects}{integer of transects in the model}
\item{sd_length}{std dev of fish length in cm; default is 1.7 and scales with mean}
\item{transect_length}{length of each transect in meters; default is 1000}
\item{lit_zone_size}{the size of the littoral zone (i.e., nearshore area) in meters; default is 5}
\item{channel_width}{width of the channel in meters; default is 100}
\item{grid_size}{length of side of raster grid in meters; default is 15}
\item{reaction_dis}{maximum distance (in m) away from a predator that can trigger an encounter; default is 0.50}
}
\value{
the proportion of surviving fish
}
\description{
Runs a full simulation with a user-specified number of fish. Users can also adjust fish
mean length and sd, environment size, grid size, and predator reaction distance.
The model runs through the following:
calculates predators and their positions,
calculates grid cells and their encounter probabilities,
calculates a unique path for each fish,
simulates and resolves encounters for each fish in each cell,
determines survival after all fish have gone through each cell.
}
\details{
The return value is the proportion of survivors.
}
\note{
this function can be parallelized; e.g., by setting plan(multisession)
}
\examples{
survival_simulation_driver (number_of_fish = 20, mean_length = 10, n_transects = 5)
}
|
library(AppliedPredictiveModeling)
library(lmridge)
library(elasticnet)
library(pls)
data("solubility")
| /ICA/HW2/scripts/header.r | no_license | AlefCS/University-Courses | R | false | false | 105 | r | library(AppliedPredictiveModeling)
library(lmridge)
library(elasticnet)
library(pls)
data("solubility")
|
# Not all species were counted in all counts; here we summarize the species present in each count
# read in aerial count data from Stalmans et al Plos One paper
aerial_data <- read_csv(here::here("data", "aerial-count", "stalmans-plosone-data.csv"))
unique(aerial_data$Species)
# change the names to match mine
aerial_data <- aerial_data %>%
mutate(Species = fct_recode(Species, "Wildebeest" = "Blue wildebeest")) %>%
mutate(Species = fct_recode(Species, "Wildebeest" = "Blue Wildebeest")) %>%
mutate(Species = fct_recode(Species, "Reedbuck" = "Common reedbuck")) %>%
mutate(Species = fct_recode(Species, "Duiker_common" = "Duiker grey")) %>%
mutate(Species = fct_recode(Species, "Duiker_red" = "Duiker red")) %>%
mutate(Species = fct_recode(Species, "Sable_antelope" = "Sable")) %>%
mutate(Species = fct_recode(Species, "Oribi" = "oribi"))
unique(aerial_data$Species)
# count records per year
aerial_summary <- aerial_data %>%
group_by(Count, Species) %>%
arrange() %>%
summarise(Number = n()) %>%
pivot_wider(names_from = Count, values_from = Number)
aerial_summary
write.csv(aerial_summary, "data/species-counts-by-year.csv")
# Only within hex grid cells ----------------------------------------------
hex_summary <- read_csv(here::here("data", "hex-summary.csv")) %>%
group_by(Count, Species) %>%
arrange() %>%
summarise(Number = n()) %>%
pivot_wider(names_from = Count, values_from = Number)
| /summarize-species-in-counts.R | no_license | kaitlyngaynor/gorongosa-aerial-comparison | R | false | false | 1,449 | r | # Not all species were counted in all counts; here we summarize the species present in each count
# read in aerial count data from Stalmans et al Plos One paper
aerial_data <- read_csv(here::here("data", "aerial-count", "stalmans-plosone-data.csv"))
unique(aerial_data$Species)
# change the names to match mine
aerial_data <- aerial_data %>%
mutate(Species = fct_recode(Species, "Wildebeest" = "Blue wildebeest")) %>%
mutate(Species = fct_recode(Species, "Wildebeest" = "Blue Wildebeest")) %>%
mutate(Species = fct_recode(Species, "Reedbuck" = "Common reedbuck")) %>%
mutate(Species = fct_recode(Species, "Duiker_common" = "Duiker grey")) %>%
mutate(Species = fct_recode(Species, "Duiker_red" = "Duiker red")) %>%
mutate(Species = fct_recode(Species, "Sable_antelope" = "Sable")) %>%
mutate(Species = fct_recode(Species, "Oribi" = "oribi"))
unique(aerial_data$Species)
# count records per year
aerial_summary <- aerial_data %>%
group_by(Count, Species) %>%
arrange() %>%
summarise(Number = n()) %>%
pivot_wider(names_from = Count, values_from = Number)
aerial_summary
write.csv(aerial_summary, "data/species-counts-by-year.csv")
# Only within hex grid cells ----------------------------------------------
hex_summary <- read_csv(here::here("data", "hex-summary.csv")) %>%
group_by(Count, Species) %>%
arrange() %>%
summarise(Number = n()) %>%
pivot_wider(names_from = Count, values_from = Number)
|
## ======================================================================
## These functions are slightly adopted versions of the function published in Ruscio, J., & Kaczetow, W. (2008). Simulating Multivariate Nonnormal Data Using an Iterative Algorithm. Multivariate Behavioral Research, 43(3), 355–381. doi:10.1080/00273170802285693
## ======================================================================
################################################################################################################
GenData <- function(Pop, rho, N=min(sapply(Pop, length)), N.Factors = 0, Max.Trials = 10, Initial.Multiplier = 1, seed = NA)
{
# Initialize variables and (if applicable) set random number seed (step 1) -------------------------------------
k <- length(Pop)
Data <- matrix(0, nrow = N, ncol = k) # Matrix to store the simulated data
Iteration <- 0 # Iteration counter
Best.RMSR <- 1 # Lowest RMSR correlation
Trials.Without.Improvement <- 0 # Trial counter
if (!is.na(seed)) set.seed(seed) # If user specified a nonzero seed, set it
Distributions <- matrix(NA, nrow=N, ncol=k)
Target.Corr <- matrix(c(1, rho, rho, 1), nrow=2)
# Generate distribution for each variable (step 2) -------------------------------------------------------------
for (i in 1:k) {
Distributions[, i] <- sort(sample(Pop[[i]], N, replace = TRUE))
}
# This implementation of GenData bootstraps each variable's score distribution from a supplied data set.
# Users should modify this block of the program, as needed, to generate the desired distribution(s).
#
# For example, to sample from chi-square distributions with 2 df, replace the 2nd line in this block with:
# Distributions[,i] <- sort(rchisq(N, df = 2))
#
# Or, one can drop the loop and use a series of commands that samples variables from specified populations:
# Distributions[,1] <- sort(rnorm(N, 0, 1)) # Standard normal distribution
# Distributions[,2] <- sort(runif(N, 0, 1)) # Uniform distribution ranging from 0 - 1
# Distributions[,3] <- sort(rlnorm(N, 0, 1)) # Log-normal distribution, log scale M = 0, SD = 1
# Distributions[,4] <- sort(rexp(N, rate = 1)) # Exponential distribution with rate = 1
# Distributions[,5] <- sort(rpois(N, lambda = 4)) # Poisson distribution with lambda = 4
# Distributions[,6] <- sort(rbinom(N, 10, .25) # Binominal distribution, size = 10 and p = .25
# Distributions[,7] <- sort(rbinom(N, 2, .25) # Binary distribution with p = .25
#
# All of the commands shown above draw random samples from specified population distributions. As an
# alternative, one can reproduce distributions without sampling error. For example, working with a
# supplied data set, one can replace the 2nd line in this block with:
# Disrributions[,i] <- Supplied.Data[,i]
# Alternatively, idealized distributions can be reproduced. For example, uniform quantiles can be
# created and used to generate data from common distributions:
# Uniform.Quantiles <- seq(from = 0, to = 1, length = (N + 2))[2:(N + 1)] # quantiles 0, 1 dropped
# Distributions[,1] <- qnorm(Uniform.Quantiles, 0, 1) # Standard normal distribution
# Distributions[,2] <- qunif(Uniform.Quantiles, 0, 1) # Uniform distribution ranging from 0 to 1
# Distributions[,3] <- qchisq(Uniform.Quantiles, df = 2) # Chi-square distribution with 2 df
#
# Note that when score distributions are generated from specified populations rather than bootstrapped from
# a supplied data set, the user must provide the target correlation matrix (see the next block). This is
# true regardless of whether the distributions incorporate sampling error.
# Calculate and store a copy of the target correlation matrix (step 3) -----------------------------------------
#Target.Corr <- cor(Supplied.Data)
Intermediate.Corr <- Target.Corr
# This implementation of GenData calculates the target correlation matrix from a supplied data set.
# Alternatively, the user can modify the program to generate data with user-defined sample size, number of
# variables, and target correlation matrix by redefining the function as follows:
# GenData <- function(N, k, Target.Corr, N.Factors = 0, Max.Trials = 5, Initial.Multiplier = 1, seed = 0)
# In this case, one would also remove the program lines that calculate N, k, and Target.Corr.
# To generate data in which variables are uncorrelated, one would remove the “sort” function from step 2
# and terminate the program before step 3 begins by returning the Distributions object as the data set.
# If number of latent factors was not specified, determine it through parallel analysis (step 4) ---------------
if (N.Factors == 0)
{
Eigenvalues.Observed <- eigen(Intermediate.Corr)$values
Eigenvalues.Random <- matrix(0, nrow = 100, ncol = k)
Random.Data <- matrix(0, nrow = N, ncol = k)
for (i in 1:100)
{
for (j in 1:k)
Random.Data[,j] <- sample(Distributions[,j], size = N, replace = TRUE)
Eigenvalues.Random[i,] <- eigen(cor(Random.Data))$values
}
Eigenvalues.Random <- apply(Eigenvalues.Random, 2, mean) # calculate mean eigenvalue for each factor
N.Factors <- max(1, sum(Eigenvalues.Observed > Eigenvalues.Random))
}
# Generate random normal data for shared and unique components, initialize factor loadings (steps 5, 6) --------
Shared.Comp <- matrix(rnorm(N * N.Factors, 0, 1), nrow = N, ncol = N.Factors)
Unique.Comp <- matrix(rnorm(N * k, 0, 1), nrow = N, ncol = k)
Shared.Load <- matrix(0, nrow = k, ncol = N.Factors)
Unique.Load <- matrix(0, nrow = k, ncol = 1)
# Begin loop that ends when specified number of iterations pass without improvement in RMSR correlation --------
while (Trials.Without.Improvement < Max.Trials)
{
Iteration <- Iteration + 1
# Calculate factor loadings and apply to reproduce desired correlations (steps 7, 8) ---------------------------
Fact.Anal <- Factor.Analysis(Intermediate.Corr, Corr.Matrix = TRUE, N.Factors = N.Factors)
if (N.Factors == 1) {
Shared.Load[,1] <- Fact.Anal$loadings
} else {
Shared.Load <- Fact.Anal$loadings
}
Shared.Load[Shared.Load > 1] <- 1
Shared.Load[Shared.Load < -1] <- -1
if (Shared.Load[1,1] < 0) Shared.Load <- Shared.Load * -1
Shared.Load.sq <- Shared.Load * Shared.Load
for (i in 1:k)
if (sum(Shared.Load.sq[i,]) < 1) {
Unique.Load[i,1] <- (1 - sum(Shared.Load.sq[i,]))
} else {
Unique.Load[i,1] <- 0
}
Unique.Load <- sqrt(Unique.Load)
for (i in 1:k) {
Data[,i] <- (Shared.Comp %*% t(Shared.Load))[,i] + Unique.Comp[,i] * Unique.Load[i,1]
}
# the %*% operator = matrix multiplication, and the t() function = transpose (both used again in step 13)
# Replace normal with nonnormal distributions (step 9) ---------------------------------------------------------
for (i in 1:k)
{
Data <- Data[sort.list(Data[,i]),]
Data[,i] <- Distributions[,i]
}
# Calculate RMSR correlation, compare to lowest value, take appropriate action (steps 10, 11, 12) --------------
Reproduced.Corr <- cor(Data)
Residual.Corr <- Target.Corr - Reproduced.Corr
RMSR <- sqrt(sum(Residual.Corr[lower.tri(Residual.Corr)] * Residual.Corr[lower.tri(Residual.Corr)]) /
(.5 * (k * k - k)))
if (RMSR < Best.RMSR) {
Best.RMSR <- RMSR
Best.Corr <- Intermediate.Corr
Best.Res <- Residual.Corr
Intermediate.Corr <- Intermediate.Corr + Initial.Multiplier * Residual.Corr
Trials.Without.Improvement <- 0
} else {
Trials.Without.Improvement <- Trials.Without.Improvement + 1
Current.Multiplier <- Initial.Multiplier * .5 ^ Trials.Without.Improvement
Intermediate.Corr <- Best.Corr + Current.Multiplier * Best.Res
}
} # end of the while loop
# Construct the data set with the lowest RMSR correlation (step 13) --------------------------------------------
Fact.Anal <- Factor.Analysis(Best.Corr, Corr.Matrix = TRUE, N.Factors = N.Factors)
if (N.Factors == 1) {
Shared.Load[,1] <- Fact.Anal$loadings
} else {
Shared.Load <- Fact.Anal$loadings
}
Shared.Load[Shared.Load > 1] <- 1
Shared.Load[Shared.Load < -1] <- -1
if (Shared.Load[1,1] < 0) {Shared.Load <- Shared.Load * -1}
Shared.Load.sq <- Shared.Load * Shared.Load
for (i in 1:k) {
if (sum(Shared.Load.sq[i,]) < 1) {
Unique.Load[i,1] <- (1 - sum(Shared.Load.sq[i,]))
} else {
Unique.Load[i,1] <- 0
}
}
Unique.Load <- sqrt(Unique.Load)
for (i in 1:k) {
Data[,i] <- (Shared.Comp %*% t(Shared.Load))[,i] + Unique.Comp[,i] * Unique.Load[i,1]
}
Data <- apply(Data, 2, scale) # standardizes each variable in the matrix
for (i in 1:k)
{
Data <- Data[sort.list(Data[,i]),]
Data[,i] <- Distributions[,i]
}
Data <- Data[sample(1:N, N, replace = FALSE), ] # randomize order of cases
# Report the results and return the simulated data set (step 14) -----------------------------------------------
Iteration <- Iteration - Max.Trials
cat("\nN =",N,", k =",k,",",Iteration,"Iterations,",N.Factors,"Factors, RMSR r =",round(Best.RMSR,3),"\n")
cat("Target correlation rho =", rho, "; obtained correlation =", round(cor(Data)[1, 2], 5))
return(Data)
}
################################################################################################################
Factor.Analysis <- function(Data, Corr.Matrix = FALSE, Max.Iter = 50, N.Factors = 0)
{
Data <- as.matrix(Data)
k <- dim(Data)[2]
if (N.Factors == 0) N.Factors <- k
if (!Corr.Matrix) Cor.Matrix <- cor(Data)
else Cor.Matrix <- Data
Criterion <- .001
Old.H2 <- rep(99, k)
H2 <- rep(0, k)
Change <- 1
Iter <- 0
Factor.Loadings <- matrix(nrow = k, ncol = N.Factors)
while ((Change >= Criterion) & (Iter < Max.Iter))
{
Iter <- Iter + 1
Eig <- eigen(Cor.Matrix)
L <- sqrt(Eig$values[1:N.Factors])
for (i in 1:N.Factors)
Factor.Loadings[,i] <- Eig$vectors[,i] * L[i]
for (i in 1:k)
H2[i] <- sum(Factor.Loadings[i,] * Factor.Loadings[i,])
Change <- max(abs(Old.H2 - H2))
Old.H2 <- H2
diag(Cor.Matrix) <- H2
}
if (N.Factors == k) N.Factors <- sum(Eig$values > 1)
return(list(loadings = Factor.Loadings[,1:N.Factors], factors = N.Factors))
}
| /00-Ruscio_GenData.R | permissive | johannes-titz/corEvol | R | false | false | 10,754 | r | ## ======================================================================
## These functions are slightly adopted versions of the function published in Ruscio, J., & Kaczetow, W. (2008). Simulating Multivariate Nonnormal Data Using an Iterative Algorithm. Multivariate Behavioral Research, 43(3), 355–381. doi:10.1080/00273170802285693
## ======================================================================
################################################################################################################
GenData <- function(Pop, rho, N=min(sapply(Pop, length)), N.Factors = 0, Max.Trials = 10, Initial.Multiplier = 1, seed = NA)
{
# Initialize variables and (if applicable) set random number seed (step 1) -------------------------------------
k <- length(Pop)
Data <- matrix(0, nrow = N, ncol = k) # Matrix to store the simulated data
Iteration <- 0 # Iteration counter
Best.RMSR <- 1 # Lowest RMSR correlation
Trials.Without.Improvement <- 0 # Trial counter
if (!is.na(seed)) set.seed(seed) # If user specified a nonzero seed, set it
Distributions <- matrix(NA, nrow=N, ncol=k)
Target.Corr <- matrix(c(1, rho, rho, 1), nrow=2)
# Generate distribution for each variable (step 2) -------------------------------------------------------------
for (i in 1:k) {
Distributions[, i] <- sort(sample(Pop[[i]], N, replace = TRUE))
}
# This implementation of GenData bootstraps each variable's score distribution from a supplied data set.
# Users should modify this block of the program, as needed, to generate the desired distribution(s).
#
# For example, to sample from chi-square distributions with 2 df, replace the 2nd line in this block with:
# Distributions[,i] <- sort(rchisq(N, df = 2))
#
# Or, one can drop the loop and use a series of commands that samples variables from specified populations:
# Distributions[,1] <- sort(rnorm(N, 0, 1)) # Standard normal distribution
# Distributions[,2] <- sort(runif(N, 0, 1)) # Uniform distribution ranging from 0 - 1
# Distributions[,3] <- sort(rlnorm(N, 0, 1)) # Log-normal distribution, log scale M = 0, SD = 1
# Distributions[,4] <- sort(rexp(N, rate = 1)) # Exponential distribution with rate = 1
# Distributions[,5] <- sort(rpois(N, lambda = 4)) # Poisson distribution with lambda = 4
# Distributions[,6] <- sort(rbinom(N, 10, .25) # Binominal distribution, size = 10 and p = .25
# Distributions[,7] <- sort(rbinom(N, 2, .25) # Binary distribution with p = .25
#
# All of the commands shown above draw random samples from specified population distributions. As an
# alternative, one can reproduce distributions without sampling error. For example, working with a
# supplied data set, one can replace the 2nd line in this block with:
# Disrributions[,i] <- Supplied.Data[,i]
# Alternatively, idealized distributions can be reproduced. For example, uniform quantiles can be
# created and used to generate data from common distributions:
# Uniform.Quantiles <- seq(from = 0, to = 1, length = (N + 2))[2:(N + 1)] # quantiles 0, 1 dropped
# Distributions[,1] <- qnorm(Uniform.Quantiles, 0, 1) # Standard normal distribution
# Distributions[,2] <- qunif(Uniform.Quantiles, 0, 1) # Uniform distribution ranging from 0 to 1
# Distributions[,3] <- qchisq(Uniform.Quantiles, df = 2) # Chi-square distribution with 2 df
#
# Note that when score distributions are generated from specified populations rather than bootstrapped from
# a supplied data set, the user must provide the target correlation matrix (see the next block). This is
# true regardless of whether the distributions incorporate sampling error.
# Calculate and store a copy of the target correlation matrix (step 3) -----------------------------------------
#Target.Corr <- cor(Supplied.Data)
Intermediate.Corr <- Target.Corr
# This implementation of GenData calculates the target correlation matrix from a supplied data set.
# Alternatively, the user can modify the program to generate data with user-defined sample size, number of
# variables, and target correlation matrix by redefining the function as follows:
# GenData <- function(N, k, Target.Corr, N.Factors = 0, Max.Trials = 5, Initial.Multiplier = 1, seed = 0)
# In this case, one would also remove the program lines that calculate N, k, and Target.Corr.
# To generate data in which variables are uncorrelated, one would remove the “sort” function from step 2
# and terminate the program before step 3 begins by returning the Distributions object as the data set.
# If number of latent factors was not specified, determine it through parallel analysis (step 4) ---------------
if (N.Factors == 0)
{
Eigenvalues.Observed <- eigen(Intermediate.Corr)$values
Eigenvalues.Random <- matrix(0, nrow = 100, ncol = k)
Random.Data <- matrix(0, nrow = N, ncol = k)
for (i in 1:100)
{
for (j in 1:k)
Random.Data[,j] <- sample(Distributions[,j], size = N, replace = TRUE)
Eigenvalues.Random[i,] <- eigen(cor(Random.Data))$values
}
Eigenvalues.Random <- apply(Eigenvalues.Random, 2, mean) # calculate mean eigenvalue for each factor
N.Factors <- max(1, sum(Eigenvalues.Observed > Eigenvalues.Random))
}
# Generate random normal data for shared and unique components, initialize factor loadings (steps 5, 6) --------
Shared.Comp <- matrix(rnorm(N * N.Factors, 0, 1), nrow = N, ncol = N.Factors)
Unique.Comp <- matrix(rnorm(N * k, 0, 1), nrow = N, ncol = k)
Shared.Load <- matrix(0, nrow = k, ncol = N.Factors)
Unique.Load <- matrix(0, nrow = k, ncol = 1)
# Begin loop that ends when specified number of iterations pass without improvement in RMSR correlation --------
while (Trials.Without.Improvement < Max.Trials)
{
Iteration <- Iteration + 1
# Calculate factor loadings and apply to reproduce desired correlations (steps 7, 8) ---------------------------
Fact.Anal <- Factor.Analysis(Intermediate.Corr, Corr.Matrix = TRUE, N.Factors = N.Factors)
if (N.Factors == 1) {
Shared.Load[,1] <- Fact.Anal$loadings
} else {
Shared.Load <- Fact.Anal$loadings
}
Shared.Load[Shared.Load > 1] <- 1
Shared.Load[Shared.Load < -1] <- -1
if (Shared.Load[1,1] < 0) Shared.Load <- Shared.Load * -1
Shared.Load.sq <- Shared.Load * Shared.Load
for (i in 1:k)
if (sum(Shared.Load.sq[i,]) < 1) {
Unique.Load[i,1] <- (1 - sum(Shared.Load.sq[i,]))
} else {
Unique.Load[i,1] <- 0
}
Unique.Load <- sqrt(Unique.Load)
for (i in 1:k) {
Data[,i] <- (Shared.Comp %*% t(Shared.Load))[,i] + Unique.Comp[,i] * Unique.Load[i,1]
}
# the %*% operator = matrix multiplication, and the t() function = transpose (both used again in step 13)
# Replace normal with nonnormal distributions (step 9) ---------------------------------------------------------
for (i in 1:k)
{
Data <- Data[sort.list(Data[,i]),]
Data[,i] <- Distributions[,i]
}
# Calculate RMSR correlation, compare to lowest value, take appropriate action (steps 10, 11, 12) --------------
Reproduced.Corr <- cor(Data)
Residual.Corr <- Target.Corr - Reproduced.Corr
RMSR <- sqrt(sum(Residual.Corr[lower.tri(Residual.Corr)] * Residual.Corr[lower.tri(Residual.Corr)]) /
(.5 * (k * k - k)))
if (RMSR < Best.RMSR) {
Best.RMSR <- RMSR
Best.Corr <- Intermediate.Corr
Best.Res <- Residual.Corr
Intermediate.Corr <- Intermediate.Corr + Initial.Multiplier * Residual.Corr
Trials.Without.Improvement <- 0
} else {
Trials.Without.Improvement <- Trials.Without.Improvement + 1
Current.Multiplier <- Initial.Multiplier * .5 ^ Trials.Without.Improvement
Intermediate.Corr <- Best.Corr + Current.Multiplier * Best.Res
}
} # end of the while loop
# Construct the data set with the lowest RMSR correlation (step 13) --------------------------------------------
Fact.Anal <- Factor.Analysis(Best.Corr, Corr.Matrix = TRUE, N.Factors = N.Factors)
if (N.Factors == 1) {
Shared.Load[,1] <- Fact.Anal$loadings
} else {
Shared.Load <- Fact.Anal$loadings
}
Shared.Load[Shared.Load > 1] <- 1
Shared.Load[Shared.Load < -1] <- -1
if (Shared.Load[1,1] < 0) {Shared.Load <- Shared.Load * -1}
Shared.Load.sq <- Shared.Load * Shared.Load
for (i in 1:k) {
if (sum(Shared.Load.sq[i,]) < 1) {
Unique.Load[i,1] <- (1 - sum(Shared.Load.sq[i,]))
} else {
Unique.Load[i,1] <- 0
}
}
Unique.Load <- sqrt(Unique.Load)
for (i in 1:k) {
Data[,i] <- (Shared.Comp %*% t(Shared.Load))[,i] + Unique.Comp[,i] * Unique.Load[i,1]
}
Data <- apply(Data, 2, scale) # standardizes each variable in the matrix
for (i in 1:k)
{
Data <- Data[sort.list(Data[,i]),]
Data[,i] <- Distributions[,i]
}
Data <- Data[sample(1:N, N, replace = FALSE), ] # randomize order of cases
# Report the results and return the simulated data set (step 14) -----------------------------------------------
Iteration <- Iteration - Max.Trials
cat("\nN =",N,", k =",k,",",Iteration,"Iterations,",N.Factors,"Factors, RMSR r =",round(Best.RMSR,3),"\n")
cat("Target correlation rho =", rho, "; obtained correlation =", round(cor(Data)[1, 2], 5))
return(Data)
}
################################################################################################################
Factor.Analysis <- function(Data, Corr.Matrix = FALSE, Max.Iter = 50, N.Factors = 0)
{
Data <- as.matrix(Data)
k <- dim(Data)[2]
if (N.Factors == 0) N.Factors <- k
if (!Corr.Matrix) Cor.Matrix <- cor(Data)
else Cor.Matrix <- Data
Criterion <- .001
Old.H2 <- rep(99, k)
H2 <- rep(0, k)
Change <- 1
Iter <- 0
Factor.Loadings <- matrix(nrow = k, ncol = N.Factors)
while ((Change >= Criterion) & (Iter < Max.Iter))
{
Iter <- Iter + 1
Eig <- eigen(Cor.Matrix)
L <- sqrt(Eig$values[1:N.Factors])
for (i in 1:N.Factors)
Factor.Loadings[,i] <- Eig$vectors[,i] * L[i]
for (i in 1:k)
H2[i] <- sum(Factor.Loadings[i,] * Factor.Loadings[i,])
Change <- max(abs(Old.H2 - H2))
Old.H2 <- H2
diag(Cor.Matrix) <- H2
}
if (N.Factors == k) N.Factors <- sum(Eig$values > 1)
return(list(loadings = Factor.Loadings[,1:N.Factors], factors = N.Factors))
}
|
## Following is a pair of functions that cache the inverse of a matrix
## To create a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
## To initialize the inverse property
i <- NULL
## To set the matrix
setmatrix <- function(matrix) {
m <<- matrix
i <<- NULL
}
## To get the matrix
getmatrix <- function() {
## To return the matrix
m
}
## To set the inverse of the matrix
setinverse <- function(inverse) {
i <<- inverse
}
## To get the inverse of the matrix
getinverse <- function() {
## To return the inverse property
i
}
## To return a list with named elements as functions in it
list(setmatrix = setmatrix, getmatrix = getmatrix, setinverse = setinverse, getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by 'makeCacheMatrix' above.
## If the inverse has already been calculated (and the matrix has not changed),
## 'cacheSolve' would retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## To return a matrix that is the inverse of 'x'
m <- x$getinverse()
## To return the inverse if it has already been set
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## Otherwise, get the matrix from our object
data <- x$getmatrix()
## To calculate the inverse and store it in the cache
inverse <- solve(data)
x$setinverse(inverse)
## To return the inverse
inverse
}
| /cachematrix.R | no_license | krisqianzhu/rprogramming | R | false | false | 1,629 | r | ## Following is a pair of functions that cache the inverse of a matrix
## To create a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
## To initialize the inverse property
i <- NULL
## To set the matrix
setmatrix <- function(matrix) {
m <<- matrix
i <<- NULL
}
## To get the matrix
getmatrix <- function() {
## To return the matrix
m
}
## To set the inverse of the matrix
setinverse <- function(inverse) {
i <<- inverse
}
## To get the inverse of the matrix
getinverse <- function() {
## To return the inverse property
i
}
## To return a list with named elements as functions in it
list(setmatrix = setmatrix, getmatrix = getmatrix, setinverse = setinverse, getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by 'makeCacheMatrix' above.
## If the inverse has already been calculated (and the matrix has not changed),
## 'cacheSolve' would retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## To return a matrix that is the inverse of 'x'
m <- x$getinverse()
## To return the inverse if it has already been set
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## Otherwise, get the matrix from our object
data <- x$getmatrix()
## To calculate the inverse and store it in the cache
inverse <- solve(data)
x$setinverse(inverse)
## To return the inverse
inverse
}
|
# Title: DAL6 (In-Class Exercise)
# Author: Jacob A. Bose
# Author's Email: jabose@clemson.edu
# Date Created: 2021-03-28
# Purpose:
#
# Set Up####
# Libraries
library(tidyverse)
# Data
load("DALs/DAL6/autm_example_df.Rdata")
load("DALs/DAL6/gtd_raw.Rdata")
# Make analysis df
gtd_df <- gtd_raw_1_
# Regression Practice - AUTM Dataset ####
# scatterplot
autm_example_df %>%
ggplot(aes(x = totResExp, y = grossLicInc, color = institution)) +
geom_point() +
theme(legend.position = "bottom")+
facet_wrap(~institution, scales = "free")
# Create Northwestern Data
nu_df <- autm_example_df %>%
filter(institution == "Northwestern Univ.")
warf_df <- autm_example_df %>%
filter(institution == "W.A.R.F./University of Wisconsin Madison")
#
# lm NU
summary(lm(data = nu_df, grossLicInc ~ totResExp))
# lm Warf
summary(lm(data=warf_df, grossLicInc ~ totResExp ))
# scatterplot with categorical variable
autm_example_df %>%
ggplot(aes(x = totResExp, y = grossLicInc, color = institution)) +
geom_point() +
theme(legend.position = "bottom") +
geom_smooth(method="lm", se=FALSE)+
facet_wrap(~institution, scales= "free")
#geom_smooth(method="lm", se=FALSE) +
#facet_wrap(~institution, scales="free")
# lm autm_example_df + institution variable
summary(lm(data=autm_example_df, grossLicInc ~ totResExp + institution))
# GTD Data ####
# Does the number of attacks increase over time?
gtd_df %>%
group_by(iyear) %>%
summarize(attacks = n()) %>%
arrange(iyear) %>%
ggplot(aes(x=iyear, y=attacks)) +
geom_point() +
geom_smooth(method="lm")
# By how much?
# Create gtd_by_year_df
gtd_by_year_df <- gtd_df %>%
group_by(iyear) %>%
summarize(attacks = n())
# lm gtd_by_year_df
summary(lm(data = gtd_by_year_df, attacks ~ iyear))
# How do we better fit the line?
# Divide data into three separate periods
first_data <- gtd_by_year_df %>%
filter(iyear<1995)
middle_data <- gtd_by_year_df %>%
filter(iyear>=1995 & iyear<2014)
late_data <- gtd_by_year_df %>%
filter(iyear>=2014)
# Regressions
summary(lm(data=first_data, attacks ~ iyear))
summary(lm(data=middle_data, attacks ~ iyear))
summary(lm(data=late_data, attacks ~ iyear))
# Want to see this in the same readout?
first_data <- gtd_by_year_df %>%
filter(iyear<1995) %>%
mutate(period="first")
middle_data <- gtd_by_year_df %>%
filter(iyear>=1995 & iyear<2014) %>%
mutate(period="middle")
late_data <- gtd_by_year_df %>%
filter(iyear>=2014)%>%
mutate(period="late")
# Bind Rows
gtd_by_year_df <- bind_rows(first_data, middle_data, late_data)
gtd_by_year_df
#lm gtd_by_year_df with peirod as factor
summary(lm(data=gtd_by_year_df, attacks ~ iyear + period))
gtd_by_year_df %>%
ggplot(aes(x=iyear, y=attacks, color = period)) +
geom_point() +
geom_smooth(method="lm")
# Copyright (c) Jacob Bose, 2021
| /Bose - DAL6.R | no_license | jabose/bose-posc-3410 | R | false | false | 2,870 | r | # Title: DAL6 (In-Class Exercise)
# Author: Jacob A. Bose
# Author's Email: jabose@clemson.edu
# Date Created: 2021-03-28
# Purpose:
#
# Set Up####
# Libraries
library(tidyverse)
# Data
load("DALs/DAL6/autm_example_df.Rdata")
load("DALs/DAL6/gtd_raw.Rdata")
# Make analysis df
gtd_df <- gtd_raw_1_
# Regression Practice - AUTM Dataset ####
# scatterplot
autm_example_df %>%
ggplot(aes(x = totResExp, y = grossLicInc, color = institution)) +
geom_point() +
theme(legend.position = "bottom")+
facet_wrap(~institution, scales = "free")
# Create Northwestern Data
nu_df <- autm_example_df %>%
filter(institution == "Northwestern Univ.")
warf_df <- autm_example_df %>%
filter(institution == "W.A.R.F./University of Wisconsin Madison")
#
# lm NU
summary(lm(data = nu_df, grossLicInc ~ totResExp))
# lm Warf
summary(lm(data=warf_df, grossLicInc ~ totResExp ))
# scatterplot with categorical variable
autm_example_df %>%
ggplot(aes(x = totResExp, y = grossLicInc, color = institution)) +
geom_point() +
theme(legend.position = "bottom") +
geom_smooth(method="lm", se=FALSE)+
facet_wrap(~institution, scales= "free")
#geom_smooth(method="lm", se=FALSE) +
#facet_wrap(~institution, scales="free")
# lm autm_example_df + institution variable
summary(lm(data=autm_example_df, grossLicInc ~ totResExp + institution))
# GTD Data ####
# Does the number of attacks increase over time?
gtd_df %>%
group_by(iyear) %>%
summarize(attacks = n()) %>%
arrange(iyear) %>%
ggplot(aes(x=iyear, y=attacks)) +
geom_point() +
geom_smooth(method="lm")
# By how much?
# Create gtd_by_year_df
gtd_by_year_df <- gtd_df %>%
group_by(iyear) %>%
summarize(attacks = n())
# lm gtd_by_year_df
summary(lm(data = gtd_by_year_df, attacks ~ iyear))
# How do we better fit the line?
# Divide data into three separate periods
first_data <- gtd_by_year_df %>%
filter(iyear<1995)
middle_data <- gtd_by_year_df %>%
filter(iyear>=1995 & iyear<2014)
late_data <- gtd_by_year_df %>%
filter(iyear>=2014)
# Regressions
summary(lm(data=first_data, attacks ~ iyear))
summary(lm(data=middle_data, attacks ~ iyear))
summary(lm(data=late_data, attacks ~ iyear))
# Want to see this in the same readout?
first_data <- gtd_by_year_df %>%
filter(iyear<1995) %>%
mutate(period="first")
middle_data <- gtd_by_year_df %>%
filter(iyear>=1995 & iyear<2014) %>%
mutate(period="middle")
late_data <- gtd_by_year_df %>%
filter(iyear>=2014)%>%
mutate(period="late")
# Bind Rows
gtd_by_year_df <- bind_rows(first_data, middle_data, late_data)
gtd_by_year_df
#lm gtd_by_year_df with peirod as factor
summary(lm(data=gtd_by_year_df, attacks ~ iyear + period))
gtd_by_year_df %>%
ggplot(aes(x=iyear, y=attacks, color = period)) +
geom_point() +
geom_smooth(method="lm")
# Copyright (c) Jacob Bose, 2021
|
library(rmutil)
dlaplace(10, 2, 1)
plaplace(10, 2, 1)
qlaplace(10, 2, 1)
rlaplace(10, 2, 1)
rndm <- r(D)(100)
rndm <- as.data.frame(rndm+1)
rndm <- cbind(c(1:nrow(rndm)),
rndm,
c(rep(0,nrow(rndm))),
c(rep(0,nrow(rndm))))
names(rndm)[4] <- "inwestycja"
names(rndm)[3] <- "kierunek"
names(rndm)[1] <- "index"
names(rndm)[2] <- "zmiana"
rndm$kierunek[rndm$zmiana > 1] <- "1"
rndm$kierunek[rndm$zmiana < 1] <- "-1"
usun <- rndm$index[rndm$kierunek == "-1"]
usun <- usun[-sample(1:length(usun), size = (length(usun)*0.3255))]
rndm <- rndm[-c(usun),]
table(rndm$kierunek)[2]/sum(table(rndm$kierunek)) # skutecznosc 75% dla 0.3255
rndm$inwestycja <- 100
rndm$inwestycja <- rndm$inwestycja*
cumprod(rndm$zmiana)*
(cumprod(rep(0.996,nrow(rndm))))
length(rndm$inwestycja)
plot(rndm$inwestycja, type = "l") | /symulacja laplace.R | no_license | m-dadej/various-scripts-in-probability- | R | false | false | 871 | r | library(rmutil)
dlaplace(10, 2, 1)
plaplace(10, 2, 1)
qlaplace(10, 2, 1)
rlaplace(10, 2, 1)
rndm <- r(D)(100)
rndm <- as.data.frame(rndm+1)
rndm <- cbind(c(1:nrow(rndm)),
rndm,
c(rep(0,nrow(rndm))),
c(rep(0,nrow(rndm))))
names(rndm)[4] <- "inwestycja"
names(rndm)[3] <- "kierunek"
names(rndm)[1] <- "index"
names(rndm)[2] <- "zmiana"
rndm$kierunek[rndm$zmiana > 1] <- "1"
rndm$kierunek[rndm$zmiana < 1] <- "-1"
usun <- rndm$index[rndm$kierunek == "-1"]
usun <- usun[-sample(1:length(usun), size = (length(usun)*0.3255))]
rndm <- rndm[-c(usun),]
table(rndm$kierunek)[2]/sum(table(rndm$kierunek)) # skutecznosc 75% dla 0.3255
rndm$inwestycja <- 100
rndm$inwestycja <- rndm$inwestycja*
cumprod(rndm$zmiana)*
(cumprod(rep(0.996,nrow(rndm))))
length(rndm$inwestycja)
plot(rndm$inwestycja, type = "l") |
args<-commandArgs(TRUE)
trainPath = args[1]
CVPath = args[2]
testPath = args[3]
predCV = args[4]
predTest = args[5]
RMSEPath = args[6]
model.type= args[7]
input1 = args[8]
dataTrain = read.csv(trainPath, sep="\t")
dataCV = read.csv(CVPath, sep="\t")
dataTest = read.csv(testPath, sep="\t")
if(model.type=="OLS"){
## Ordinary Least Squares
library(ipred)
fit = lm(y~0 + .,data=dataTrain)
errFit = errorest(y~0+.,data=dataTrain,model=lm)
summary(fit)
print(errFit)
error = errFit$error
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
if(model.type=="OLSI"){
## Ordinary Least Squares with interaction terms
library(ipred)
formula = paste("y~0 + (.)^",input1,sep="")
fit = lm(y~0 + (.)^2,data=dataTrain)
errFit = errorest(y~0+(.)^2,data=dataTrain,model=lm)
summary(fit)
print(errFit)
error = errFit$error
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
if(model.type=="RR"){
## Ridge Regression
library(ipred)
library(ridge)
input1 = as.numeric(input1)
fit = linearRidge(y~0+.,data=dataTrain,nPCs=input1)
ridgeModel = function(formula, data) {
mod <- linearRidge(formula, data=data,nPCs=input1)
function(newdata) predict(mod, newdata)
}
errFit = errorest(y~0+.,data=dataTrain,model=ridgeModel)
print(fit)
print("Ridge lambdas")
print(fit$lambda)
print(errFit)
error = errFit$error
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
if(model.type=="Lasso"){
## Lasso Regression
library(glmnet)
y = data.matrix(dataTrain$y)
drops = c("y")
x = data.matrix(dataTrain[,!(names(dataTrain) %in% drops)])
errFit = cv.glmnet(x,y)
print(errFit)
error = sqrt(mean(errFit$cvm))
fit = glmnet(x,y)
dataCVMat = data.matrix(dataCV[,!(names(dataCV) %in% drops)])
CVPredictions = predict(fit,dataCVMat)
TestPredictions= predict(fit,as.matrix(dataTest))
}
if(model.type=="BRT"){
library(ipred)
## Bagged Regression Trees
fit = bagging(y~0+.,data=dataTrain)
errFit = errorest(y~0+.,data=dataTrain,model=bagging)
print(fit)
print(errFit)
error = errFit$error
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
if(model.type=="BMAR"){
library(BMA)
library(ipred)
## Bayesian Model Averaging Regression
y = dataTrain$y
drops = c("y")
x = dataTrain[,!(names(dataTrain) %in% drops)]
fit = bicreg(x, y)
errBicReg = function(formula,data){
y = data$y
drops = c("y")
x = data[,!(names(data) %in% drops)]
bicreg(x,y)
}
summary(fit)
errFit = errorest(y~0+.,data=dataTrain,model=errBicReg)
print(errFit)
error = errFit$error
cvp = predict(fit,dataCV)
tp = predict(fit,dataTest)
CVPredictions = unlist(cvp[1])
testPreidctions = unlist(tp[1] )
}
if(model.type=="RFR"){
library(randomForest)
library(ipred)
## Random Forest
fit = randomForest(y ~0+., data=dataTrain,importance=TRUE, sampsize=1000, ntree=100)
randFor = function(formula,data){
randomForest(y ~0+., data=data,importance=TRUE, sampsize=1000, ntree=100)
}
errFit = errorest(y~0+.,data=dataTrain,model=randFor)
print(errFit)
error = errFit$error
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
if(model.type=="CIRF"){
## Conditional Inference Random Forest
## Not Working
library(party)
library(languageR)
fit <- cforest(y ~ 0 + ., data = dataTrain)
}
if(model.type=="GBRT"){
## Gradient Boosted Regression Tree
library(mboost)
input1 = as.numeric(input1)
fit <- blackboost(y ~ 0+., data = dataTrain,control = boost_control(mstop = input1))
cv10f <- cv(model.weights(fit), type = "kfold")
cvm <- cvrisk(fit)
print(cvm)
mstop(cvm)
fit <- blackboost(y ~ 0+., data = dataTrain,control = boost_control(mstop = mstop(cvm)))
error = min(cvm)
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
write(CVPredictions, file = predCV, ncolumns=1)
write(TestPredictions, file = predTest, ncolumns=1)
write(error,file=RMSEPath,ncolumns=1) | /Hybrid/basicEnsembles.R | no_license | jarviscolin/HybridMovieRecommendationSystem | R | false | false | 4,122 | r | args<-commandArgs(TRUE)
trainPath = args[1]
CVPath = args[2]
testPath = args[3]
predCV = args[4]
predTest = args[5]
RMSEPath = args[6]
model.type= args[7]
input1 = args[8]
dataTrain = read.csv(trainPath, sep="\t")
dataCV = read.csv(CVPath, sep="\t")
dataTest = read.csv(testPath, sep="\t")
if(model.type=="OLS"){
## Ordinary Least Squares
library(ipred)
fit = lm(y~0 + .,data=dataTrain)
errFit = errorest(y~0+.,data=dataTrain,model=lm)
summary(fit)
print(errFit)
error = errFit$error
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
if(model.type=="OLSI"){
## Ordinary Least Squares with interaction terms
library(ipred)
formula = paste("y~0 + (.)^",input1,sep="")
fit = lm(y~0 + (.)^2,data=dataTrain)
errFit = errorest(y~0+(.)^2,data=dataTrain,model=lm)
summary(fit)
print(errFit)
error = errFit$error
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
if(model.type=="RR"){
## Ridge Regression
library(ipred)
library(ridge)
input1 = as.numeric(input1)
fit = linearRidge(y~0+.,data=dataTrain,nPCs=input1)
ridgeModel = function(formula, data) {
mod <- linearRidge(formula, data=data,nPCs=input1)
function(newdata) predict(mod, newdata)
}
errFit = errorest(y~0+.,data=dataTrain,model=ridgeModel)
print(fit)
print("Ridge lambdas")
print(fit$lambda)
print(errFit)
error = errFit$error
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
if(model.type=="Lasso"){
## Lasso Regression
library(glmnet)
y = data.matrix(dataTrain$y)
drops = c("y")
x = data.matrix(dataTrain[,!(names(dataTrain) %in% drops)])
errFit = cv.glmnet(x,y)
print(errFit)
error = sqrt(mean(errFit$cvm))
fit = glmnet(x,y)
dataCVMat = data.matrix(dataCV[,!(names(dataCV) %in% drops)])
CVPredictions = predict(fit,dataCVMat)
TestPredictions= predict(fit,as.matrix(dataTest))
}
if(model.type=="BRT"){
library(ipred)
## Bagged Regression Trees
fit = bagging(y~0+.,data=dataTrain)
errFit = errorest(y~0+.,data=dataTrain,model=bagging)
print(fit)
print(errFit)
error = errFit$error
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
if(model.type=="BMAR"){
library(BMA)
library(ipred)
## Bayesian Model Averaging Regression
y = dataTrain$y
drops = c("y")
x = dataTrain[,!(names(dataTrain) %in% drops)]
fit = bicreg(x, y)
errBicReg = function(formula,data){
y = data$y
drops = c("y")
x = data[,!(names(data) %in% drops)]
bicreg(x,y)
}
summary(fit)
errFit = errorest(y~0+.,data=dataTrain,model=errBicReg)
print(errFit)
error = errFit$error
cvp = predict(fit,dataCV)
tp = predict(fit,dataTest)
CVPredictions = unlist(cvp[1])
testPreidctions = unlist(tp[1] )
}
if(model.type=="RFR"){
library(randomForest)
library(ipred)
## Random Forest
fit = randomForest(y ~0+., data=dataTrain,importance=TRUE, sampsize=1000, ntree=100)
randFor = function(formula,data){
randomForest(y ~0+., data=data,importance=TRUE, sampsize=1000, ntree=100)
}
errFit = errorest(y~0+.,data=dataTrain,model=randFor)
print(errFit)
error = errFit$error
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
if(model.type=="CIRF"){
## Conditional Inference Random Forest
## Not Working
library(party)
library(languageR)
fit <- cforest(y ~ 0 + ., data = dataTrain)
}
if(model.type=="GBRT"){
## Gradient Boosted Regression Tree
library(mboost)
input1 = as.numeric(input1)
fit <- blackboost(y ~ 0+., data = dataTrain,control = boost_control(mstop = input1))
cv10f <- cv(model.weights(fit), type = "kfold")
cvm <- cvrisk(fit)
print(cvm)
mstop(cvm)
fit <- blackboost(y ~ 0+., data = dataTrain,control = boost_control(mstop = mstop(cvm)))
error = min(cvm)
CVPredictions = predict(fit,dataCV)
TestPredictions= predict(fit,dataTest)
}
write(CVPredictions, file = predCV, ncolumns=1)
write(TestPredictions, file = predTest, ncolumns=1)
write(error,file=RMSEPath,ncolumns=1) |
gauher <- function (n) {
EPS <- 3e-14
PIM4 <- 0.751125544464943
MAXIT <- 10
m <- trunc((n + 1) / 2)
x <- w <- rep(-1, n)
for (i in 1:m) {
if (i == 1) {
z <- sqrt(2 * n + 1) - 1.85575 * (2 * n + 1)^(-0.16667)
}
else if (i == 2) {
z <- z - 1.14 * n^0.426 / z
}
else if (i == 3) {
z <- 1.86 * z - 0.86 * x[1]
}
else if (i == 4) {
z <- 1.91 * z - 0.91 * x[2]
}
else {
z <- 2 * z - x[i - 2]
}
for (its in 1:MAXIT) {
p1 <- PIM4
p2 <- 0
for (j in 1:n) {
p3 <- p2
p2 <- p1
p1 <- z * sqrt(2/j) * p2 - sqrt((j - 1)/j) * p3
}
pp <- sqrt(2 * n) * p2
z1 <- z
z <- z1 - p1/pp
if (abs(z - z1) <= EPS)
break
}
x[i] <- z
x[n + 1 - i] <- -z
w[i] <- 2 / (pp * pp)
w[n + 1 - i] <- w[i]
}
list(x = x, w = w)
}
dmvnorm <- function (x, mu, Sigma, log = FALSE) {
if (!is.matrix(x))
x <- rbind(x)
p <- length(mu)
ed <- eigen(Sigma, symmetric = TRUE)
ev <- ed$values
evec <- ed$vectors
if (!all(ev >= -1e-06 * abs(ev[1])))
stop("'Sigma' is not positive definite")
ss <- x - rep(mu, each = nrow(x))
inv.Sigma <- evec %*% (t(evec) / ev)
quad <- 0.5 * rowSums((ss %*% inv.Sigma) * ss)
fact <- - 0.5 * (p * log(2 * pi) + determinant(Sigma, logarithm = TRUE)$modulus)
if (log)
as.vector(fact - quad)
else
as.vector(exp(fact - quad))
}
logLik.bin <- function (thetas, id, y, X, Z, GHk = 5, extraParam) {
#thetas <- relist(thetas, lis.thetas)
#betas <- thetas$betas
#ncz <- ncol(Z)
#D <- matrix(0, ncz, ncz)
#D[lower.tri(D, TRUE)] <- thetas$D
#D <- D + t(D)
#diag(D) <- diag(D) / 2
#
betas<-thetas[1:4]
Sigma2YiRI<-thetas[5]
Sigma2YiRS<-thetas[6]
Sigma2YiRIRS<-thetas[7]
Sigma2YjRI<-thetas[8]
Sigma2YjRS<-thetas[9]
Sigma2YjRIRS<-thetas[10]
Dold <- matrix(0,ncol=4,nrow=4)
Dnew <- matrix(0,ncol=4,nrow=4)
Dold <- extraParam # extraParam is VarCorr(Models[[i]])$id
#re-align columns, rows
#old: glmer, 1,2,3,4
#new: Florios, 1,3,2,4
Dnew[1,1] = Dold[1,1]
Dnew[2,2] = Dold[3,3]
Dnew[3,3] = Dold[2,2]
Dnew[4,4] = Dold[4,4]
Dnew[2,1] = Dold[3,1]
Dnew[3,2] = Dold[2,3]
Dnew[4,3] = Dold[4,2]
Dnew[3,1] = Dold[2,1]
Dnew[4,2] = Dold[4,3]
Dnew[4,1] = Dold[4,1]
#now: fill in upper triangular: D new is symmetric
for (ii in 1:4) {
for (jj in ii:4) {
Dnew[ii,jj] <- Dnew[jj,ii]
}
}
###D<- cbind(c(Sigma2YiRI,Sigma2YiRIRS,0,0),c(Sigma2YiRIRS,Sigma2YiRS,0,0),c(0,0,Sigma2YjRI,Sigma2YjRIRS),c(0,0,Sigma2YjRIRS,Sigma2YjRS))
#instead: put full D matrix, as obtained from glmer() run
#now plug-in the thetas, in order to perform numerical derivatives wrt theta (scores, hessians)
Dnew[1,1] = Sigma2YiRI
Dnew[2,2] = Sigma2YiRS
Dnew[2,1] = Sigma2YiRIRS
Dnew[1,2] = Dnew[2,1]
Dnew[3,3] = Sigma2YjRI
Dnew[4,4] = Sigma2YjRS
Dnew[4,3] = Sigma2YjRIRS
Dnew[3,4] = Dnew[4,3]
#simplify notation: D=Dnew, and proceed
D <-nearPD(Dnew)
ncz <- ncol(Z)
GH <- gauher(GHk)
b <- as.matrix(expand.grid(rep(list(GH$x), ncz)))
dimnames(b) <- NULL
k <- nrow(b)
wGH <- as.matrix(expand.grid(rep(list(GH$w), ncz)))
wGH <- 2^(ncz/2) * apply(wGH, 1, prod) * exp(rowSums(b * b))
b <- sqrt(2) * b
Ztb <- Z %*% t(b)
#
mu.y <- plogis(as.vector(X %*% betas) + Ztb)
logBinom <- dbinom(y, 1, mu.y, TRUE)
log.p.yb <- rowsum(logBinom, id)
log.p.b <- dmvnorm(b, rep(0, ncol(Z)), D, TRUE)
p.yb <- exp(log.p.yb + rep(log.p.b, each = nrow(log.p.yb)))
p.y <- c(p.yb %*% wGH)
#-sum(log(p.y), na.rm = TRUE) #logLik as min, original
sum(log(p.y), na.rm = TRUE) #logLik as max, CLIC heuristic
}
score.bin <- function (thetas, id, y, X, Z, GHk = 5, extraParam) {
fd (thetas, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = GHk, extraParam)
}
cd <- function (x, f, ..., eps = 0.001) {
n <- length(x)
res <- numeric(n)
ex <- pmax(abs(x), 1)
for (i in 1:n) {
x1 <- x2 <- x
x1[i] <- x[i] + eps * ex[i]
x2[i] <- x[i] - eps * ex[i]
diff.f <- c(f(x1, ...) - f(x2, ...))
diff.x <- x1[i] - x2[i]
res[i] <- diff.f/diff.x
}
res
}
fd <- function (x, f, ..., eps = 1e-04) {
f0 <- f(x, ...)
n <- length(x)
res <- numeric(n)
ex <- pmax(abs(x), 1)
for (i in 1:n) {
x1 <- x
x1[i] <- x[i] + eps * ex[i]
diff.f <- c(f(x1, ...) - f0)
diff.x <- x1[i] - x[i]
res[i] <- diff.f/diff.x
}
res
}
hess.bin <- function (thetas, id, y, X, Z, GHk = 5, extraParam) {
#cdd(thetas, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = GHk)
res <- matrix(nrow=length(thetas),ncol=length(thetas))
#for (i in 1:length(thetas)) {
#for (j in 1:length(thetas)) {
thetasIJ <- thetas
res <- cdd(thetasIJ, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = 5, extraParam)
#}
#}
res
}
cdd <- function (x, f, ..., eps = 0.0005) {
n <- length(x)
res <- matrix(nrow=n,ncol=n)
ex <- pmax(abs(x), 1)
celem=0
for (i in 1:n) {
#for (j in 1:n){ hess: symmetric, economy: compute upper triangular part only
for (j in i:n){
celem=celem+1
cat("Computing Element No...:", celem, "of hessian\n")
x1 <- x2 <- x3 <- x4 <- x
if (i != j) {
# mixed 2nd order derivatives
# where i--1 first move
# where j--2 second move
for (k in 1:n) {
if ( i== k) {
x1[k] = x[k] + eps * ex[i]
x2[k] = x[k] + eps * ex[i]
x3[k] = x[k] - eps * ex[i]
x4[k] = x[k] - eps * ex[i]
}
if ( j== k) {
x1[k] = x[k] + eps * ex[i]
x2[k] = x[k] - eps * ex[i]
x3[k] = x[k] + eps * ex[i]
x4[k] = x[k] - eps * ex[i]
}
#x1 [1] = x[1] + eps * ex[i]
#x1 [2] = x[2] + eps * ex[i]
#x1 [3] = x[3]
#x2 [1] = x[1] + eps * ex[i]
#x2 [2] = x[2] - eps * ex[i]
#x2 [3] = x[3]
#x3 [1] = x[1] - eps * ex[i]
#x3 [2] = x[2] + eps * ex[i]
#x3 [3] = x[3]
#x4 [1] = x[1] - eps * ex[i]
#x4 [2] = x[2] - eps * ex[i]
#x4 [3] = x[3]
diff.f <- c(f(x1, ...) -f(x2, ...) -f(x3, ...) +f(x4, ...))
diff.x <- 2 * eps * ex[i]
diff.y <- 2 * eps * ex[i]
res[i,j] <- diff.f / (diff.x * diff.y) # (1 -1 -1 + 1 / h^2)
}
}
if (i == j) {
# pure 2nd order derivatives
# where i--1 first move
# where j--2 second move
for (k in 1:n) {
if ( i== k) {
x1[k] = x[k] + eps * ex[i]
x2[k] = x[k]
x3[k] = x[k] - eps * ex[i]
}
}
#x1 [1] = x[1] + eps * ex[i]
#x1 [2] = x[2]
#x1 [3] = x[3]
#x2 [1] = x[1]
#x2 [2] = x[2]
#x2 [3] = x[3]
#x3 [1] = x[1] - eps * ex[i]
#x3 [2] = x[2]
#x3 [3] = x[3]
diff.f <- c(f(x1, ...) -2*f(x2, ...) +f(x3, ...) )
diff.x <- eps * ex[i]
res[i,i] <- diff.f / (diff.x * diff.x) # (1 -2 + 1 / h^2)
}
}
}
#now compute lower triangular also
for (i in 1:n) {
for (j in 1:i) {
res[i,j] <- res[j,i]
}
}
res # return Hessian in res matrix
#as.vector(res) # return Hessian in res matrix as a vector by Rows
}
nearPD <- function (M, eig.tol = 1e-06, conv.tol = 1e-07, posd.tol = 1e-08, maxits = 100) {
# based on function nearcor() submitted to R-help by Jens Oehlschlagel on 2007-07-13, and
# function posdefify() from package `sfsmisc'
if (!(is.numeric(M) && is.matrix(M) && identical(M, t(M))))
stop("Input matrix M must be square and symmetric.\n")
inorm <- function (x) max(rowSums(abs(x)))
n <- ncol(M)
U <- matrix(0, n, n)
X <- M
iter <- 0
converged <- FALSE
while (iter < maxits && !converged) {
Y <- X
T <- Y - U
e <- eigen(Y, symmetric = TRUE)
Q <- e$vectors
d <- e$values
D <- if (length(d) > 1) diag(d) else as.matrix(d)
p <- (d > eig.tol * d[1])
QQ <- Q[, p, drop = FALSE]
X <- QQ %*% D[p, p, drop = FALSE] %*% t(QQ)
U <- X - T
X <- (X + t(X)) / 2
conv <- inorm(Y - X) / inorm(Y)
iter <- iter + 1
converged <- conv <= conv.tol
}
X <- (X + t(X)) / 2
e <- eigen(X, symmetric = TRUE)
d <- e$values
Eps <- posd.tol * abs(d[1])
if (d[n] < Eps) {
d[d < Eps] <- Eps
Q <- e$vectors
o.diag <- diag(X)
X <- Q %*% (d * t(Q))
D <- sqrt(pmax(Eps, o.diag) / diag(X))
X[] <- D * X * rep(D, each = n)
}
(X + t(X)) / 2
}
bdiag <- function (...) {
mlist<-list(...)
## handle case in which list of matrices is given
if (length(mlist) == 1)
mlist <- unlist(mlist, rec = FALSE)
csdim <- rbind(c(0,0), apply(sapply(mlist,dim), 1, cumsum ))
ret <- array(0,dim=csdim[length(mlist)+1,])
add1 <- matrix(rep(1:0,2),nc=2)
for(i in seq(along=mlist)){
indx<-apply(csdim[i:(i+1),]+add1,2,function(x)x[1]:x[2])
## non-square matrix
if(is.null(dim(indx)))ret[indx[[1]],indx[[2]]]<-mlist[[i]]
## square matrix
else ret[indx[,1],indx[,2]]<-mlist[[i]]
}
ret
}
#' Computes the AVE, DWAVE, WAVE, CH-EXP and CH-ECDF methods for multivariate GLMMs (m-GLMMs)
#'
#' It acts on the Models structure for all pairs of items and returns the estimates for the m-GLMM parameters with 5 different methods
#'@param Models A list which contains the lme4 model objects taken from the pairwise separate estimations (list of size Q*(Q-1)/2)
#'@param ModelsOne A list which contains the lme4 model objects taken from the univariate separate estimations (list of size Q)
#'@param Data a data.frame with the data. 1st column id, 2nd column time, remaining Q columns are the y 0/1 values (Q items)
#'@param GHk Number of Gauss-Hermite points per dimension of integration
#'@param n number of individuals
#'@param Q the number of items. Set this to four.
#'@param extraParam a helper list which depends on pairwise estimates
#'@param extraParamOne a helper list which depends on univariate estimates
#'@param m an integer which is useful for subsequent runs
#'@return The estimated parameters of the model with methods AVE, DWAVE, WAVE, CH-EXP and CH-ECDF
#'@export
aveThetas2 <- function (Models,ModelsOne, Data, GHk = 5, n, Q, extraParam, extraParamOne, m=1) {
# Compute K matrix
#environment(logLik.bin) <- environment(score.bin) <- environment()
#environment(logLik.bin.BIG) <- environment(score.bin.BIG) <- environment()
#environment(logLik.bin.BIG) <- environment(score.bin.BIG) <- environment(estimateModelFit2)
#environment(logLik.bin.BIG) <- environment(score.bin.BIG) <- environment(estimateModelFit2) <- environment()
environment(logLik.bin.BIG) <- environment(hess.bin.BIG.Vect) <- environment(score.bin.BIG) <-
environment(logLik.bin) <- environment(hess.bin.Vect) <- environment(score.bin) <-
environment(estimateModelFit2) <- environment()
DataRaw <- Data
Sigma2YiRIone <- vector("list", Q)
Sigma2YiRSone <- vector("list", Q)
Sigma2YiRIRSone <- vector("list", Q)
for (ii in 1:Q) {
Sigma2YiRIone[[ii]] <- VarCorr(ModelsOne[[ii]])$id[1,1]
Sigma2YiRSone[[ii]] <- VarCorr(ModelsOne[[ii]])$id[2,2]
Sigma2YiRIRSone[[ii]] <- VarCorr(ModelsOne[[ii]])$id[1,2]
}
Klis <- vector("list", n)
for (i in 1:n) {
cat("Individual No...:", i, "for scores computation\n")
pairs <- combn(Q, 2)
P <- ncol(pairs)
Scores <- vector("list", P)
Sigma2YiRI <- vector("list", P)
Sigma2YiRS <- vector("list", P)
Sigma2YiRIRS <- vector("list", P)
Sigma2YjRI <- vector("list", P)
Sigma2YjRS <- vector("list", P)
Sigma2YjRIRS <- vector("list", P)
for (p in 1:P) {
cp <-p
Sigma2YiRI[[cp]] <- VarCorr(Models[[p]])$id[1,1]
Sigma2YiRS[[cp]] <- VarCorr(Models[[p]])$id[3,3]
Sigma2YiRIRS[[cp]] <- VarCorr(Models[[p]])$id[3,1]
Sigma2YjRI[[cp]] <- VarCorr(Models[[p]])$id[2,2]
Sigma2YjRS[[cp]] <- VarCorr(Models[[p]])$id[4,4]
Sigma2YjRIRS[[cp]] <- VarCorr(Models[[p]])$id[4,2]
prs <- pairs[, p]
yi <- Data[[paste("y", prs[1], sep = "")]]
yj <- Data[[paste("y", prs[2], sep = "")]]
DD <- do.call(rbind, list(Data[1:3], Data[1:3]))
DD$outcome <- gl(2, nrow(Data))
DD$y <- c(yi, yj)
DD.i <- DD[(ind.i <- DD$id == i), ]
D <- nearPD(VarCorr(Models[[p]])$id)
indices <- 1:Q
indic <- indices[c(-prs[1],-prs[2])]
#Scores[[p]] <- score.bin(fixef(Models[[p]]), id = DD.i$id, y = DD.i$y,
# X = model.matrix(Models[[p]])[ind.i, ],
# Z = model.matrix(Models[[p]])[ind.i, seq_len(2*ncz)],
# GHk = GHk)
###Scores[[p]] <- score.bin(c(fixef(Models[[p]]),SigmaYi[[p]],SigmaYj[[p]],rhoYiYj[[p]]), id = DD.i$id, y = DD.i$y,
### X = model.matrix(Models[[p]])[ind.i, ],
### Z = cbind(model.matrix(Models[[p]])[ind.i, seq_len(2*ncz)]),
### GHk = GHk)
Scores[[p]] <- #score.bin(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
#score.bin.BIG(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
##score.bin.BIG(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
score.bin.BIG(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]],
fixef(ModelsOne[[indic[1]]]),Sigma2YiRIone[[indic[1]]],Sigma2YiRSone[[indic[1]]],Sigma2YiRIRSone[[indic[1]]],
fixef(ModelsOne[[indic[2]]]),Sigma2YiRIone[[indic[2]]],Sigma2YiRSone[[indic[2]]],Sigma2YiRIRSone[[indic[2]]]),
id = DD.i$id, y = DD.i$y,
X = model.matrix(Models[[p]])[ind.i, ],
Z = cbind(model.matrix(Models[[p]])[ind.i, ]),
GHk = GHk,
extraParam = extraParam[[p]],
Data = DD.i,
r = p,
i = i)
#thetas <- c(fixef(Models[[p]]),SigmaYi[[p]],SigmaYj[[p]],rhoYiYj[[p]])
#ii<- prs[1]
#jj<- prs[2]
#ic[ii] <- thetas[1]
#ic[jj] <- thetas[2]
#sl[ii] <- thetas[3]
#sl[jj] <- thetas[4]
#sig[ii] <- thetas[5]
#sig[jj] <- thetas[6]
#rh[p] <- thetas[7]
}
#kkk <- P * length( c( fixef(Models[[1]]) , SigmaYi[[1]], SigmaYj[[1]], rhoYiYj[[1]] ))
kkk <- P * length( c( fixef(Models[[1]]) , Sigma2YiRI[[1]], Sigma2YiRS[[1]], Sigma2YiRIRS[[1]], Sigma2YjRI[[1]], Sigma2YjRS[[1]], Sigma2YjRIRS[[1]],
rep(c(fixef(ModelsOne[[1]]), Sigma2YiRIone[[1]], Sigma2YiRSone[[1]], Sigma2YiRIRSone[[1]]),2)))
K <- matrix(0, kkk, kkk)
ee <- expand.grid(1:P, 1:P)
ss <- sapply(Scores, length)
ss2 <- cumsum(ss)
ss1 <- c(1, ss2[-P] + 1)
for (ii in 1:nrow(ee)) {
k <- ee$Var1[ii]
j <- ee$Var2[ii]
row.ind <- seq(ss1[k], ss2[k])
col.ind <- seq(ss1[j], ss2[j])
K[row.ind, col.ind] <- Scores[[k]] %o% Scores[[j]]
K[row.ind, col.ind] <- Scores[[k]] %o% Scores[[j]]
}
Klis[[i]] <- K
}
K <- Reduce("+", Klis)
# Extract J matrix
###J <- lapply(Models, vcov)
##J<-matrix(0,18,18)
Hessians<- vector("list", P)
for (p in 1:P) {
cat("Hessian No: ...",p,"\n")
prs <- pairs[, p]
yi <- Data[[paste("y", prs[1], sep = "")]]
yj <- Data[[paste("y", prs[2], sep = "")]]
DD <- do.call(rbind, list(Data[1:3], Data[1:3]))
DD$outcome <- gl(2, nrow(Data))
DD$y <- c(yi, yj)
#DD.i <- DD[(ind.i <- DD$id == i), ] #take all observations now, from all i=1:n
D <- nearPD(VarCorr(Models[[p]])$id)
indices <- 1:Q
indic <- indices[c(-prs[1],-prs[2])]
#Hessians[[p]] <- hess.bin(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]] ),
# id = DD$id, y = DD$y,
# X = model.matrix(Models[[p]])[, ],
# Z = cbind(model.matrix(Models[[p]])[, seq_len(2*ncz)]),
# GHk = GHk,
# extraParam = extraParam[[p]])
Hessians[[p]] <- #hess.bin.Vect(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]] ),
hess.bin.BIG.Vect(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]],
fixef(ModelsOne[[indic[1]]]),Sigma2YiRIone[[indic[1]]],Sigma2YiRSone[[indic[1]]],Sigma2YiRIRSone[[indic[1]]],
fixef(ModelsOne[[indic[2]]]),Sigma2YiRIone[[indic[2]]],Sigma2YiRSone[[indic[2]]],Sigma2YiRIRSone[[indic[2]]]),
id = DD$id, y = DD$y,
X = model.matrix(Models[[p]])[, ],
Z = cbind(model.matrix(Models[[p]])[, ]),
GHk = GHk,
extraParam = extraParam[[p]],
Data = DD,
r = p,
i = n+1)
#thetas <- c(fixef(Models[[p]]),SigmaYi[[p]],SigmaYj[[p]],rhoYiYj[[p]])
#ii<- prs[1]
#jj<- prs[2]
#ic[ii] <- thetas[1]
#ic[jj] <- thetas[2]
#sl[ii] <- thetas[3]
#sl[jj] <- thetas[4]
#sig[ii] <- thetas[5]
#sig[jj] <- thetas[6]
#rh[p] <- thetas[7]
}
J <- do.call(bdiag, Hessians)
## Now J is redefined as the block matrix 108x108, or (6x18) x (6x18)
###J <- bdiag(lapply(J, function (x) solve(matrix(x@x, x@Dim[1], x@Dim[2]))))
# Compute A matrix
nbetas <- length( c(fixef(Models[[1]]) , Sigma2YiRI[[1]], Sigma2YiRS[[1]], Sigma2YiRIRS[[1]], Sigma2YjRI[[1]], Sigma2YjRS[[1]], Sigma2YjRIRS[[1]],
rep(c(fixef(ModelsOne[[1]]), Sigma2YiRIone[[1]], Sigma2YiRSone[[1]], Sigma2YiRIRSone[[1]]),2) ) )
#thetas <- c( sapply(Models, fixef) )
thetas <- numeric(0)
for (ii in 1:20*P) {
thetas[ii]=0
}
for (ii in 1:P) {
thetas[(ii-1)*20+1]=fixef(Models[[ii]])[1]
thetas[(ii-1)*20+2]=fixef(Models[[ii]])[2]
thetas[(ii-1)*20+3]=fixef(Models[[ii]])[3]
thetas[(ii-1)*20+4]=fixef(Models[[ii]])[4]
thetas[(ii-1)*20+5]=Sigma2YiRI[[ii]]
thetas[(ii-1)*20+6]=Sigma2YiRS[[ii]]
thetas[(ii-1)*20+7]=Sigma2YiRIRS[[ii]]
thetas[(ii-1)*20+8]=Sigma2YjRI[[ii]]
thetas[(ii-1)*20+9]=Sigma2YjRS[[ii]]
thetas[(ii-1)*20+10]=Sigma2YjRIRS[[ii]]
p<-ii
prs <- pairs[, p]
indices <- 1:Q
indic <- indices[c(-prs[1],-prs[2])]
thetas[(ii-1)*20+11]=fixef(ModelsOne[[indic[1]]])[1]
thetas[(ii-1)*20+12]=fixef(ModelsOne[[indic[1]]])[2]
thetas[(ii-1)*20+13]=Sigma2YiRIone[[indic[1]]]
thetas[(ii-1)*20+14]=Sigma2YiRSone[[indic[1]]]
thetas[(ii-1)*20+15]=Sigma2YiRIRSone[[indic[1]]]
thetas[(ii-1)*20+16]=fixef(ModelsOne[[indic[2]]])[1]
thetas[(ii-1)*20+17]=fixef(ModelsOne[[indic[2]]])[2]
thetas[(ii-1)*20+18]=Sigma2YiRIone[[indic[2]]]
thetas[(ii-1)*20+19]=Sigma2YiRSone[[indic[2]]]
thetas[(ii-1)*20+20]=Sigma2YiRIRSone[[indic[2]]]
}
Kone <- vector("list", P)
HelpMat <- vector("list", P)
CLIC <- vector("list", P)
wtCLIC <- vector("list", P)
wtCLIC_B <- vector("list", P)
CLIC_stdz <- vector("list", P)
for (p in 1:6) {
Kone[[p]] <- K[((p-1)*nbetas+1):((p-1)*nbetas+nbetas),((p-1)*nbetas+1):((p-1)*nbetas+nbetas)]
}
for (p in 1:6) {
HelpMat[[p]] <- Kone[[p]] %*% solve(Hessians[[p]])
}
for (p in 1:6) {
#CLIC[[p]] <- logLik(Models[[p]]) + sum(diag(HelpMat[[p]])) #sum(diag(A)) is trace(A) in R language for A square matrix
prs <- pairs[, p]
yi <- Data[[paste("y", prs[1], sep = "")]]
yj <- Data[[paste("y", prs[2], sep = "")]]
DD$y <- c(yi, yj)
#DD.i <- DD[(ind.i <- DD$id == i), ] #take all observations now, from all i=1:n
D <- nearPD(VarCorr(Models[[p]])$id)
indices <- 1:Q
indic <- indices[c(-prs[1],-prs[2])]
CLIC[[p]] <- logLik.bin.BIG(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]],
fixef(ModelsOne[[indic[1]]]),Sigma2YiRIone[[indic[1]]],Sigma2YiRSone[[indic[1]]],Sigma2YiRIRSone[[indic[1]]],
fixef(ModelsOne[[indic[2]]]),Sigma2YiRIone[[indic[2]]],Sigma2YiRSone[[indic[2]]],Sigma2YiRIRSone[[indic[2]]]),
id = DD$id, y = DD$y,
X = model.matrix(Models[[p]])[, ],
Z = cbind(model.matrix(Models[[p]])[, ]),
GHk = GHk,
extraParam = extraParam[[p]],
Data = DD,
r = p,
i = n+1) + sum(diag(HelpMat[[p]])) #sum(diag(A)) is trace(A) in R language for A square matrix
}
#logLik_Models <- c(logLik(Models[[1]]),logLik(Models[[2]]),logLik(Models[[3]]),
# logLik(Models[[4]]),logLik(Models[[5]]),logLik(Models[[6]]))
trace_Models <- c(sum(diag(HelpMat[[1]])),sum(diag(HelpMat[[2]])),sum(diag(HelpMat[[3]])),
sum(diag(HelpMat[[4]])),sum(diag(HelpMat[[5]])),sum(diag(HelpMat[[6]])))
CLIC_Models <- c(CLIC[[1]],CLIC[[2]],CLIC[[3]],CLIC[[4]],CLIC[[5]],CLIC[[6]])
## 7.9.2015: KJF. Standardize the CLIC values to mean 0 and sd 1
mu_CLIC <-mean(CLIC_Models)
sigma_CLIC <- sd(CLIC_Models)
CLIC_Models_stdz <- (CLIC_Models - mu_CLIC) / sigma_CLIC
logLik_Models <- CLIC_Models - trace_Models #simplify, not call again logLik.bin
write.table(logLik_Models,paste(m,"_logLik_Models.txt",sep=""),row.names=F,col.names=F)
write.table(trace_Models,paste(m,"_trace_Models.txt",sep=""),row.names=F,col.names=F)
write.table(CLIC_Models,paste(m,"_CLIC_Models.txt",sep=""),row.names=F,col.names=F)
#weights <- exp(CLIC)
#A2 <- weights/sum(weights)
#7.9.2015. KJF. standardization in CLIC values for weights
for (p in 1:6) {
CLIC_stdz[[p]] <- CLIC_Models_stdz[p]
}
for (p in 1:6) {
#wtCLIC[[p]] <- exp(CLIC[[p]]) #VV idea
##wtCLIC[[p]] <- exp(mpfr(CLIC[[p]],80)) #KF idea
wtCLIC[[p]] <- exp(CLIC_stdz[[p]]) #KF idea, standardization 7.9.2015
#wtCLIC[[p]] <- as.numeric(exp(as.brob(CLIC[[p]]))) #KF idea
###wtCLIC[[p]] <- - 1 / CLIC[[p]] #KF idea
}
# 4.10.2015, KJF add.
# CLIC ecdf way: B' way
a=CLIC_Models_stdz
f=ecdf(a)
wtCLIC_B[[1]] = f(a)[1]
wtCLIC_B[[2]] = f(a)[2]
wtCLIC_B[[3]] = f(a)[3]
wtCLIC_B[[4]] = f(a)[4]
wtCLIC_B[[5]] = f(a)[5]
wtCLIC_B[[6]] = f(a)[6]
# 4.10.2015, end KJF add.
OmegaVec <- wtCLIC
OmegaVec_B <- wtCLIC_B
A<- computeWeightMatrixAVE_by6() # to re-write for 20x120 case KF 25.6.2015, todo:
#Omega <- matrix(0,P*nbetas,P*nbetas)
#A2 <- matrix(0, Q*(nbetas)/2, length(thetas))
A2 <- matrix(0, 20, 120)
A2_B <- matrix(0, 20, 120)
##A2 <- new("mpfrMatrix", mpfr(rep(0,20*120),80),Dim = c(20L, 120L))
##validObject(A2)
for (i in seq_len(Q*nbetas/4)) {
#ii <- inter == levels(inter)[i] just take all 6 which are intcpt, so odd
###ii <- rep(c(TRUE,FALSE,FALSE),choose(length(times),2))
ii <- A[i,]==(1/6) #adjacency matrix for global parameter i according to A = computeWeightMatrixAVE_by6()
i2 <- which(ii==T)
jj <- c(intervalBIG(i2[1]),intervalBIG(i2[2]),intervalBIG(i2[3]),intervalBIG(i2[4]),intervalBIG(i2[5]),intervalBIG(i2[6]))
#weights <- OmegaVec[jj]
weights <- c(OmegaVec[[jj[1]]], OmegaVec[[jj[2]]], OmegaVec[[jj[3]]], OmegaVec[[jj[4]]], OmegaVec[[jj[5]]], OmegaVec[[jj[6]]])
weights_B <- c(OmegaVec_B[[jj[1]]],OmegaVec_B[[jj[2]]],OmegaVec_B[[jj[3]]],OmegaVec_B[[jj[4]]],OmegaVec_B[[jj[5]]],OmegaVec_B[[jj[6]]])
A2[i, ii] <- weights / sum(weights)
A2_B[i, ii] <- weights_B / sum(weights_B)
}
#ind.outcome <- c(apply(pairs, 2, rep, length.out = nbetas))
#ind.param <- rep(rep(seq_len(nbetas/2), each = 2), length.out = length(thetas))
#inter <- interaction(ind.outcome, ind.param)
#inter <- factor(inter, levels = sort(levels(inter)))
#A <- matrix(0, Q*nbetas/2, length(thetas))
#for (i in seq_len(Q*nbetas/2)) {
# A[i, inter == levels(inter)[i]] <- 1 / sum(pairs == 1)
#}
#Compute A to be 16 x 48 ad hoc (by pencil)
A<- computeWeightMatrixAVE_by6()
write.table(A,paste(m,"_A.txt",sep=""),row.names=F,col.names=F)
#A2 <- asNumeric(A2)
#A2 <- matrix(A2,nrow=20,ncol=120)
write.table(A2,paste(m,"_A2.txt",sep=""),row.names=F,col.names=F)
write.table(A2_B,paste(m,"_A2_B.txt",sep=""),row.names=F,col.names=F)
# pairwise average betas
##ave.betas <- c(A %*% thetas)
# standrd errors for betas
##se.betas <- sqrt(diag(A %*% solve(J, K) %*% solve(J) %*% t(A)))
# CLIC heuristic
ave.betas2 <- c(A2 %*% thetas)
# standrd errors for betas
se.betas2 <- sqrt(diag(A2 %*% solve(J, K) %*% solve(J) %*% t(A2)))
# 4.10.2015. KJF add, B' way for CLIC heuristic with ecdf()
# CLIC heuristic
ave.betas5 <- c(A2_B %*% thetas)
# standrd errors for betas
se.betas5 <- sqrt(diag(A2_B %*% solve(J, K) %*% solve(J) %*% t(A2_B)))
# 4.10.2015. end KJF add, B' way for CLIC heuristic with ecdf()
#now regular methods
#JJ,KK
#KK
KKlis <- vector("list", n)
for (i in 1:n) {
cat("Individual No...:", i, "for sscores computation\n")
SScores <- vector("list", P)
ic <- vector("list",Q)
sl <- vector("list",Q)
sigz <- vector("list",Q)
sigw <- vector("list",Q)
###rh <- vector("list",P)
for (p in 1:P) {
prs <- pairs[, p]
yi <- Data[[paste("y", prs[1], sep = "")]]
yj <- Data[[paste("y", prs[2], sep = "")]]
DD$y <- c(yi, yj)
DD.i <- DD[(ind.i <- DD$id == i), ]
D <- nearPD(VarCorr(Models[[p]])$id)
#Scores[[p]] <- score.bin(fixef(Models[[p]]), id = DD.i$id, y = DD.i$y,
# X = model.matrix(Models[[p]])[ind.i, ],
# Z = model.matrix(Models[[p]])[ind.i, seq_len(2*ncz)],
# GHk = GHk)
###Scores[[p]] <- score.bin(c(fixef(Models[[p]]),SigmaYi[[p]],SigmaYj[[p]],rhoYiYj[[p]]), id = DD.i$id, y = DD.i$y,
### X = model.matrix(Models[[p]])[ind.i, ],
### Z = cbind(model.matrix(Models[[p]])[ind.i, seq_len(2*ncz)]),
### GHk = GHk)
SScores[[p]] <- score.bin(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
#score.bin.BIG(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
#score.bin.BIG(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
id = DD.i$id, y = DD.i$y,
X = model.matrix(Models[[p]])[ind.i, ],
Z = cbind(model.matrix(Models[[p]])[ind.i, ]),
GHk = GHk,
extraParam = extraParam[[p]])
#thetas <- c(fixef(Models[[p]]),SigmaYi[[p]],SigmaYj[[p]],rhoYiYj[[p]])
#ii<- prs[1]
#jj<- prs[2]
#ic[ii] <- thetas[1]
#ic[jj] <- thetas[2]
#sl[ii] <- thetas[3]
#sl[jj] <- thetas[4]
#sig[ii] <- thetas[5]
#sig[jj] <- thetas[6]
#rh[p] <- thetas[7]
}
#kkk <- P * length( c( fixef(Models[[1]]) , SigmaYi[[1]], SigmaYj[[1]], rhoYiYj[[1]] ))
kkkk <- P * length( c( fixef(Models[[1]]) , Sigma2YiRI[[1]], Sigma2YiRS[[1]], Sigma2YiRIRS[[1]], Sigma2YjRI[[1]], Sigma2YjRS[[1]], Sigma2YjRIRS[[1]] ))
KK <- matrix(0, kkkk, kkkk)
ee <- expand.grid(1:P, 1:P)
ss <- sapply(SScores, length)
ss2 <- cumsum(ss)
ss1 <- c(1, ss2[-P] + 1)
for (ii in 1:nrow(ee)) {
k <- ee$Var1[ii]
j <- ee$Var2[ii]
row.ind <- seq(ss1[k], ss2[k])
col.ind <- seq(ss1[j], ss2[j])
KK[row.ind, col.ind] <- SScores[[k]] %o% SScores[[j]]
KK[row.ind, col.ind] <- SScores[[k]] %o% SScores[[j]]
}
KKlis[[i]] <- KK
}
KK <- Reduce("+", KKlis)
# Extract J matrix
HHessians<- vector("list", P)
for (p in 1:P) {
cat("HHessian No: ...",p,"\n")
prs <- pairs[, p]
yi <- Data[[paste("y", prs[1], sep = "")]]
yj <- Data[[paste("y", prs[2], sep = "")]]
DD$y <- c(yi, yj)
#DD.i <- DD[(ind.i <- DD$id == i), ] #take all observations now, from all i=1:n
D <- nearPD(VarCorr(Models[[p]])$id)
#Hessians[[p]] <- hess.bin(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]] ),
# id = DD$id, y = DD$y,
# X = model.matrix(Models[[p]])[, ],
# Z = cbind(model.matrix(Models[[p]])[, seq_len(2*ncz)]),
# GHk = GHk,
# extraParam = extraParam[[p]])
HHessians[[p]] <- hess.bin.Vect(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]] ),
#hess.bin.BIG.Vect(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]] ),
id = DD$id, y = DD$y,
X = model.matrix(Models[[p]])[, ],
Z = cbind(model.matrix(Models[[p]])[, ]),
GHk = GHk,
extraParam = extraParam[[p]])
}
JJ <- do.call(bdiag, HHessians)
# Compute A matrix
nbetasS <- length( c(fixef(Models[[1]]) , Sigma2YiRI[[1]], Sigma2YiRS[[1]], Sigma2YiRIRS[[1]], Sigma2YjRI[[1]], Sigma2YjRS[[1]], Sigma2YjRIRS[[1]]) )
#thetas <- c( sapply(Models, fixef) )
thetasS <- numeric(0)
for (ii in 1:10*P) {
thetasS[ii]=0
}
for (ii in 1:P) {
thetasS[(ii-1)*10+1]=fixef(Models[[ii]])[1]
thetasS[(ii-1)*10+2]=fixef(Models[[ii]])[2]
thetasS[(ii-1)*10+3]=fixef(Models[[ii]])[3]
thetasS[(ii-1)*10+4]=fixef(Models[[ii]])[4]
thetasS[(ii-1)*10+5]=Sigma2YiRI[[ii]]
thetasS[(ii-1)*10+6]=Sigma2YiRS[[ii]]
thetasS[(ii-1)*10+7]=Sigma2YiRIRS[[ii]]
thetasS[(ii-1)*10+8]=Sigma2YjRI[[ii]]
thetasS[(ii-1)*10+9]=Sigma2YjRS[[ii]]
thetasS[(ii-1)*10+10]=Sigma2YjRIRS[[ii]]
}
AA <- computeWeightMatrixAVE()
#Compute JJ,KK,thetasS,AA READY
# pairwise average betas
ave.betas <- c(AA %*% thetasS)
# standrd errors for betas
se.betas <- sqrt(diag(AA %*% solve(JJ, KK) %*% solve(JJ) %*% t(AA)))
# DWAVE algorithm is method no. 3:
#Step 1: Vi computation
Sigma <- solve(JJ, KK) %*% solve(JJ)
Vi <- Sigma
#Step 2: Omega computation with filter (mask)
Omega<-matrix(0,P*nbetasS,P*nbetasS)
crow=0
ccol=0
for (ii in 1:P) {
crow= (ii-1)*nbetasS
for (jj in 1:P) {
ccol = (jj-1)*nbetasS
for (kk in 1:nbetasS) {
Omega[crow+kk, ccol+kk] <- Vi[crow+kk, ccol+kk]
}
}
}
#Step 3: Omega inversion in new Omega
if ( abs(det(Omega)) > 10^(-10) )
{
Omega <- solve(Omega)
}
if ( abs(det(Omega)) < 10^(-10) )
{
Omega <- ginv(Omega)
}
#Step 4: Formulas for A
A<- matrix(0,nbetasS,P*nbetasS)
crow=0
ccol=0
crow2=0
ccol2=0
for (kk in 1:nbetasS) {
#compute denom, which does not depend on r
denom <-0
for (ii in 1:P) {
for (jj in 1:P) {
crow2 <- (ii-1)*nbetasS
ccol2 <- (jj-1)*nbetasS
denom <- denom + Omega[crow2+kk,ccol2+kk]
}
}
#compute nom, which depends on r
for (r in 1:P) {
nom <-0
for (l in 1:P) {
crow <- (r-1)*nbetasS
ccol <- (l-1)*nbetasS
nom <- nom + Omega[crow+kk,ccol+kk]
}
# nom is ready
# for everyy kk (outer loop) and r (inner loop) compute nom/denom
A[kk,(r-1)*nbetasS+kk] <- nom/denom
}
}
# pairwise average betas
A5<-A #for now
A3 <- computeWeightMatrixWAVE(A5)
ave.betas3 <- c(A3 %*% thetasS)
# standrd errors for betas
se.betas3 <- sqrt(diag(A3 %*% solve(JJ, KK) %*% solve(JJ) %*% t(A3)))
# WAVE algorithm is method no. 4:
###Omega <- solve(Sigma * JJ)
if (det(Sigma) > 10^(-10)) {
#Sigma <- solve(J, K) %*% solve(J)
Omega <- solve(Sigma * JJ)
}
if (det(Sigma) < 10^(-10)) {
# Sigma <- solve(J, K) %*% solve(J)
Omega <- ginv(Sigma * JJ) # use ginv() function from MASS, generalized inverse, seems robust to det -->0
}
npairs <- sum(pairs == 1)
A4 <- matrix(0, Q*nbetasS/2, length(thetasS))
for (i in seq_len(Q*nbetasS/2)) {
#ii <- inter == levels(inter)[i]
ii <- AA[i,]==(1/3) #adjacency matrix for global parameter i according to AA = computeWeightMatrixAVE()
weights <- diag(Omega[ii, ii])
A4[i, ii] <- weights / sum(weights)
}
ave.betas4 <- c(A4 %*% thetasS)
# standrd errors for betas
###se.betas2 <- sqrt(diag(A2 %*% solve(J, K) %*% solve(J) %*% t(A2)))
###se.betas3 <- sqrt(diag(A3 %*% solve(J, K) %*% solve(J) %*% t(A3)))
se.betas4 <- sqrt(diag(A4 %*% solve(JJ, KK) %*% solve(JJ) %*% t(A4)))
write.table(AA,paste(m,"_AA.txt",sep=""),row.names=F,col.names=F)
write.table(A2,paste(m,"_A2.txt",sep=""),row.names=F,col.names=F)
write.table(A3,paste(m,"_A3.txt",sep=""),row.names=F,col.names=F)
write.table(A4,paste(m,"_A4.txt",sep=""),row.names=F,col.names=F)
write.table(A2_B,paste(m,"_A2_B.txt",sep=""),row.names=F,col.names=F)
write.table(ave.betas,paste(m,"_ave.betas.txt",sep=""),row.names=F,col.names=F)
write.table(ave.betas2,paste(m,"_ave.betas2.txt",sep=""),row.names=F,col.names=F)
write.table(ave.betas3,paste(m,"_ave.betas3.txt",sep=""),row.names=F,col.names=F)
write.table(ave.betas4,paste(m,"_ave.betas4.txt",sep=""),row.names=F,col.names=F)
write.table(ave.betas5,paste(m,"_ave.betas5.txt",sep=""),row.names=F,col.names=F)
write.table(se.betas,paste(m,"_se.betas.txt",sep=""),row.names=F,col.names=F)
write.table(se.betas2,paste(m,"_se.betas2.txt",sep=""),row.names=F,col.names=F)
write.table(se.betas3,paste(m,"_se.betas3.txt",sep=""),row.names=F,col.names=F)
write.table(se.betas4,paste(m,"_se.betas4.txt",sep=""),row.names=F,col.names=F)
write.table(se.betas5,paste(m,"_se.betas5.txt",sep=""),row.names=F,col.names=F)
# results
cbind("Value(ave)" = ave.betas, "SE(ave)" = se.betas,
"Value(clic-h-exp)" = ave.betas2, "SE(clic-h-exp)" = se.betas2,
"Value(dwave)" = ave.betas3, "SE(dwave)" = se.betas3,
"Value(wave)" = ave.betas4, "SE(wave)" = se.betas4,
"Value(clic-h-ecdf)" = ave.betas5, "SE(clic-h-ecdf)" = se.betas5)
###cbind("Value(ave)" = ave.betas.Florios) #, "SE(ave)" = se.betas,
# "Value(wave)" = ave.betas2, "SE(wave)" = se.betas2)
}
interval <- function(x) {
if ( x %% 10 != 0) {
res <- x %/% 10 + 1
}
else {
x <- x-1
res <- x %/% 10 + 1
}
return(res)
}
intervalBIG <- function(x) {
if ( x %% 20 != 0) {
res <- x %/% 20 + 1
}
else {
x <- x-1
res <- x %/% 20 + 1
}
return(res)
}
computeWeightMatrixAVE_by6 <- function() {
#A is 18 x 42 so that se are also computed
A <- matrix(0,20,120)
#A <- matrix(0,Q*5,P*(5+5))
A[1,c(1,21,41,71,91,111)]=1
A[2,c(3,23,43,72,92,112)]=1
A[3,c(5,25,45,73,93,113)]=1
A[4,c(6,26,46,74,94,114)]=1
A[5,c(7,27,47,75,95,115)]=1
A[6,c(2,31,51,61,81,116)]=1
A[7,c(4,32,52,63,83,117)]=1
A[8,c(8,33,53,65,85,118)]=1
A[9,c(9,34,54,66,86,119)]=1
A[10,c(10,35,55,67,87,120)]=1
A[11,c(11,22,56,62,96,101)]=1
A[12,c(12,24,57,64,97,103)]=1
A[13,c(13,28,58,68,98,105)]=1
A[14,c(14,29,59,69,99,106)]=1
A[15,c(15,30,60,70,100,107)]=1
A[16,c(16,36,42,76,82,102)]=1
A[17,c(17,37,44,77,84,104)]=1
A[18,c(18,38,48,78,88,108)]=1
A[19,c(19,39,49,79,89,109)]=1
A[20,c(20,40,50,80,90,110)]=1
## now also for rho's
## skip rho's
##finalize A
A<- (1/6)*A
A
}
computeWeightMatrixAVE <- function() {
#A is 18 x 42 so that se are also computed
A <- matrix(0,20,60)
#A <- matrix(0,Q*5,P*(5+5))
A[1,1]=1
A[1,11]=1
A[1,21]=1
A[2,3]=1
A[2,13]=1
A[2,23]=1
A[3,5]=1
A[3,15]=1
A[3,25]=1
A[4,6]=1
A[4,16]=1
A[4,26]=1
A[5,7]=1
A[5,17]=1
A[5,27]=1
A[6,2]=1
A[6,31]=1
A[6,41]=1
A[7,4]=1
A[7,33]=1
A[7,43]=1
A[8,8]=1
A[8,35]=1
A[8,45]=1
A[9,9]=1
A[9,36]=1
A[9,46]=1
A[10,10]=1
A[10,37]=1
A[10,47]=1
A[11,12]=1
A[11,32]=1
A[11,51]=1
A[12,14]=1
A[12,34]=1
A[12,53]=1
A[13,18]=1
A[13,38]=1
A[13,55]=1
A[14,19]=1
A[14,39]=1
A[14,56]=1
A[15,20]=1
A[15,40]=1
A[15,57]=1
A[16,22]=1
A[16,42]=1
A[16,52]=1
A[17,24]=1
A[17,44]=1
A[17,54]=1
A[18,28]=1
A[18,48]=1
A[18,58]=1
A[19,29]=1
A[19,49]=1
A[19,59]=1
A[20,30]=1
A[20,50]=1
A[20,60]=1
## now also for rho's
## skip rho's
##finalize A
A<- (1/3)*A
A
}
computeWeightMatrixWAVE <- function(A2) {
#A2 is 16 x 48 so that se are also computed
A3 <- matrix(0,20,60)
#A2 <- matrix(0,Q*5,P*(5+5))
nzPattern <- computeWeightMatrixAVE()
#the logic is to loop on the columns of A2 and fill in elements (1 for each column), in the spot of nzPattern with diagOmega elements
for (j in 1:60) {
for (i in 1:20) {
if (nzPattern[i,j]!=0) {
A3[i,j] <- colSums(A2)[j] # an easy way to assign the unique element of each column in A2 to A3 suitable position
}
}
}
##finalize A3, so that rows add up to 1
valueDenom <- rowSums(A3)
for (i in 1:20) {
A3[i,]<- A3[i,] / valueDenom[i]
}
A3
}
hess.bin.Vect <- function (thetas, id, y, X, Z, GHk = GHk, extraParam) {
#cddVect(thetas, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = GHk)
res <- matrix(nrow=length(thetas),ncol=length(thetas))
#for (i in 1:length(thetas)) {
#for (j in 1:length(thetas)) {
#thetasIJ <- thetas
res <- cddVect(thetas, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = GHk, extraParam)
#}
#}
res
}
cddVect <- function (x0, f, ..., eps = 0.0005) {
# Translate Matlab to R from URL:
# http://grizzly.la.psu.edu/~suj14/programs.html
# Matlab code: SUNG Jae Jun, PhD
# R code: Kostas Florios, PhD
# Matlab comments
#% Compute the Hessian of a real-valued function numerically
#% This is a translation of the Gauss command, hessp(fun,x0), considering only
#% the real arguments.
#% f: real-valued function (1 by 1)
#% x0: k by 1, real vector
#% varargin: various passing arguments
#% H: k by k, Hessian of f at x0, symmetric matrix
#initializations
xx0 <- x0
k <- length(x0)
x0 <- matrix(0,k,1)
x0[,1] <- xx0
dax0 <- matrix(0,k,1)
hessian <- matrix(0,nrow=k,ncol=k)
grdd <- matrix(0,nrow=k,ncol=1)
#eps <- 6.0554544523933429e-6
eps <- 0.0005
H <- matrix(0,nrow=k,ncol=k)
# Computaion of stepsize (dh)
ax0=abs(x0)
for (i in 1:k) {
if (x0[i,1] != 0) {
dax0[i,1] <- x0[i,1]/ax0[i,1]
}
else {
dax0[i,1] <- 1
}
}
#dh <- eps*max(ax0, (1e-2)*matrix(1,k,1))*dax0
dh <- eps*dax0
xdh=x0+dh
dh=xdh-x0; # This increases precision slightly
ee <- matrix(0,nrow=k,ncol=k)
I <- diag(1,k)
for (i in 1:k) {
ee[,i] <- I[,i]*dh
}
# Computation of f0=f(x0)
f0 <- f(x0, ...)
# Compute forward step
for (i in 1:k) {
grdd[i,1] <- f(x0+ee[,i], ...)
}
# Compute 'double' forward step
for (i in 1:k) {
cat("Computing Row No...:", i, "of hessian\n")
for (j in i:k) {
hessian[i,j] <- f(x0+(ee[,i]+ee[,j]), ...)
if ( i!=j) {
hessian[j,i] <- hessian[i,j]
}
}
}
l <- t(matrix(1,k,1))
grdd <- kronecker(l,grdd)
#H <- (((hessian - grdd) - t(grdd)) + f0[1,1]*matrix(1,nrow=k,ncol=k) ) / kronecker(dh,t(dh))
H <- (((hessian - grdd) - t(grdd)) + f0*matrix(1,nrow=k,ncol=k) ) / kronecker(dh,t(dh))
return(H)
}
score.bin.BIG <- function (thetas, id, y, X, Z, GHk = 5, extraParam, Data=Data, r=r, i=i) {
fd (thetas, logLik.bin.BIG, id = id, y = y, X = X, Z = Z, GHk = GHk, extraParam, Data=Data, r=r, i=i)
}
hess.bin.BIG.Vect <- function (thetas, id, y, X, Z, GHk = GHk, extraParam, Data, r, i) {
#cddVect(thetas, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = GHk)
res <- matrix(nrow=length(thetas),ncol=length(thetas))
#for (i in 1:length(thetas)) {
#for (j in 1:length(thetas)) {
#thetasIJ <- thetas
res <- cddVect(thetas, logLik.bin.BIG, id = id, y = y, X = X, Z = Z, GHk = GHk, extraParam, Data, r, i)
#}
#}
res
}
logLik.bin.BIG <- function (thetas, id, y, X, Z, GHk = 5, extraParam, Data, r, i) {
#environment(aveThetas)<- environment(logLik.bin) <- environment(score.bin) <- environment()
#environment(aveThetas)<- environment()
environment(aveThetas2)<- environment()
res<-0
cp<-0
##thetasP <- vector("list", P)
#1: pair 1-2
###thetasP[[1]] = c( thetas[c(1,2,5,6,9,13)], thetas[c(17)], thetas[c(10,14,18)] )
#2: pair 1-3
###thetasP[[2]] = c( thetas[c(1,3,5,7,9,13)], thetas[c(17)], thetas[c(11,15,19)] )
#3: pair 1-4
###thetasP[[3]] = c( thetas[c(1,4,5,8,9,13)], thetas[c(17)], thetas[c(12,16,20)] )
#4: pair 2-3
###thetasP[[4]] = c( thetas[c(2,3,6,7,10,14)], thetas[c(18)], thetas[c(11,15,19)] )
#5: pair 2-4
###thetasP[[5]] = c( thetas[c(2,4,6,8,10,14)], thetas[c(18)], thetas[c(12,16,20)] )
#6: pair 3-4
###thetasP[[6]] = c( thetas[c(3,4,7,8,11,15)], thetas[c(19)], thetas[c(12,16,20)] )
##thetasP <- thetas
##thetasP <- as.vector(thetasP)
thetasP <- thetas[1:10]
thetasP <- as.vector(thetasP)
thQ <- thetas[11:20]
thQ <- as.vector(thQ)
p <- r
#DD <- do.call(rbind, list(Data[1:3], Data[1:3]))
#DD$outcome <- gl(2, nrow(Data))
p<-r
prs <- pairs[, p]
DD <- Data
res <- logLik.bin(thetasP,
id = DD$id, y = DD$y,
X = rbind(model.matrix(Models[[p]])[1:(dim(Data)[1]/2),],
model.matrix(Models[[p]])[(n*length(times)+1):(n*length(times)+(dim(Data)[1])/2),]),
Z = rbind(model.matrix(Models[[p]])[1:(dim(Data)[1]/2),],
model.matrix(Models[[p]])[(n*length(times)+1):(n*length(times)+(dim(Data)[1]/2)),]),
GHk = GHk,
extraParam = extraParam)
indices <- 1:Q
indic <- indices[c(-prs[1],-prs[2])]
##thetasQ <- vector("list", Q)
#1: item 1
##thetasQ[[1]] = c(fixef(ModelsOne[[1]]),Sigma2YiRIone[[1]],Sigma2YiRSone[[1]],Sigma2YiRIRSone[[1]])
#2: item 2
##thetasQ[[2]] = c(fixef(ModelsOne[[2]]),Sigma2YiRIone[[2]],Sigma2YiRSone[[2]],Sigma2YiRIRSone[[2]])
#3: item 3
##thetasQ[[3]] = c(fixef(ModelsOne[[3]]),Sigma2YiRIone[[3]],Sigma2YiRSone[[3]],Sigma2YiRIRSone[[3]])
#4: item 4
##thetasQ[[4]] = c(fixef(ModelsOne[[4]]),Sigma2YiRIone[[4]],Sigma2YiRSone[[4]],Sigma2YiRIRSone[[4]])
thetasQ <- vector("list", 2)
thetasQ[[1]] <- thetas[11:15]
thetasQ[[2]] <- thetas[16:20]
DDD <- do.call(rbind, list(Data[1:3]))
DDD$outcome <- gl(1, nrow(Data))
for (k in 1:length(indic) ) {
ii <- indic[k]
if (i <= n) {
ind.i <- DataRaw$id == i
}
else {
ind.i <- rep(T,n*length(times))
}
yi <- DataRaw[[paste("y", indic[k], sep = "")]][ind.i]
DDD <- DataRaw[ind.i,]
DDD$y <- c(yi)
res <- res + logLik.bin.One(thetasQ[[k]],
id = DDD$id, y=DDD$y,
X = model.matrix(ModelsOne[[ii]])[1:(dim(Data)[1]/2),],
Z = model.matrix(ModelsOne[[ii]])[1:(dim(Data)[1]/2),],
GHk = GHk,
extraParamOne = extraParamOne[[ii]])
}
res
}
logLik.bin.One <- function (thetas, id, y, X, Z, GHk = 5, extraParamOne) {
#thetas <- relist(thetas, lis.thetas)
#betas <- thetas$betas
#ncz <- ncol(Z)
#D <- matrix(0, ncz, ncz)
#D[lower.tri(D, TRUE)] <- thetas$D
#D <- D + t(D)
#diag(D) <- diag(D) / 2
#
betas<-thetas[1:2]
Sigma2YiRI<-thetas[3]
Sigma2YiRS<-thetas[4]
Sigma2YiRIRS<-thetas[5]
Dold <- matrix(0,ncol=2,nrow=2)
Dnew <- matrix(0,ncol=2,nrow=2)
Dold <- extraParamOne # extraParam is VarCorr(Models[[i]])$id
#re-align columns, rows
#old: glmer, 1,2,3,4
#new: Florios, 1,3,2,4
#now plug-in the thetas, in order to perform numerical derivatives wrt theta (scores, hessians)
Dnew[1,1] = Sigma2YiRI
Dnew[2,2] = Sigma2YiRS
Dnew[2,1] = Sigma2YiRIRS
Dnew[1,2] = Dnew[2,1]
#simplify notation: D=Dnew, and proceed
D <-nearPD(Dnew)
ncz <- ncol(Z)
GH <- gauher(GHk)
b <- as.matrix(expand.grid(rep(list(GH$x), ncz)))
dimnames(b) <- NULL
k <- nrow(b)
wGH <- as.matrix(expand.grid(rep(list(GH$w), ncz)))
wGH <- 2^(ncz/2) * apply(wGH, 1, prod) * exp(rowSums(b * b))
b <- sqrt(2) * b
Ztb <- Z %*% t(b)
#
mu.y <- plogis(as.vector(X %*% betas) + Ztb)
logBinom <- dbinom(y, 1, mu.y, TRUE)
log.p.yb <- rowsum(logBinom, id)
log.p.b <- dmvnorm(b, rep(0, ncol(Z)), D, TRUE)
p.yb <- exp(log.p.yb + rep(log.p.b, each = nrow(log.p.yb)))
p.y <- c(p.yb %*% wGH)
#-sum(log(p.y), na.rm = TRUE) #logLik as min, original
sum(log(p.y), na.rm = TRUE) #logLik as max, CLIC heuristic
}
gc()
| /R/functions2.R | no_license | kflorios/CHmGLMM | R | false | false | 48,461 | r | gauher <- function (n) {
EPS <- 3e-14
PIM4 <- 0.751125544464943
MAXIT <- 10
m <- trunc((n + 1) / 2)
x <- w <- rep(-1, n)
for (i in 1:m) {
if (i == 1) {
z <- sqrt(2 * n + 1) - 1.85575 * (2 * n + 1)^(-0.16667)
}
else if (i == 2) {
z <- z - 1.14 * n^0.426 / z
}
else if (i == 3) {
z <- 1.86 * z - 0.86 * x[1]
}
else if (i == 4) {
z <- 1.91 * z - 0.91 * x[2]
}
else {
z <- 2 * z - x[i - 2]
}
for (its in 1:MAXIT) {
p1 <- PIM4
p2 <- 0
for (j in 1:n) {
p3 <- p2
p2 <- p1
p1 <- z * sqrt(2/j) * p2 - sqrt((j - 1)/j) * p3
}
pp <- sqrt(2 * n) * p2
z1 <- z
z <- z1 - p1/pp
if (abs(z - z1) <= EPS)
break
}
x[i] <- z
x[n + 1 - i] <- -z
w[i] <- 2 / (pp * pp)
w[n + 1 - i] <- w[i]
}
list(x = x, w = w)
}
dmvnorm <- function (x, mu, Sigma, log = FALSE) {
if (!is.matrix(x))
x <- rbind(x)
p <- length(mu)
ed <- eigen(Sigma, symmetric = TRUE)
ev <- ed$values
evec <- ed$vectors
if (!all(ev >= -1e-06 * abs(ev[1])))
stop("'Sigma' is not positive definite")
ss <- x - rep(mu, each = nrow(x))
inv.Sigma <- evec %*% (t(evec) / ev)
quad <- 0.5 * rowSums((ss %*% inv.Sigma) * ss)
fact <- - 0.5 * (p * log(2 * pi) + determinant(Sigma, logarithm = TRUE)$modulus)
if (log)
as.vector(fact - quad)
else
as.vector(exp(fact - quad))
}
logLik.bin <- function (thetas, id, y, X, Z, GHk = 5, extraParam) {
#thetas <- relist(thetas, lis.thetas)
#betas <- thetas$betas
#ncz <- ncol(Z)
#D <- matrix(0, ncz, ncz)
#D[lower.tri(D, TRUE)] <- thetas$D
#D <- D + t(D)
#diag(D) <- diag(D) / 2
#
betas<-thetas[1:4]
Sigma2YiRI<-thetas[5]
Sigma2YiRS<-thetas[6]
Sigma2YiRIRS<-thetas[7]
Sigma2YjRI<-thetas[8]
Sigma2YjRS<-thetas[9]
Sigma2YjRIRS<-thetas[10]
Dold <- matrix(0,ncol=4,nrow=4)
Dnew <- matrix(0,ncol=4,nrow=4)
Dold <- extraParam # extraParam is VarCorr(Models[[i]])$id
#re-align columns, rows
#old: glmer, 1,2,3,4
#new: Florios, 1,3,2,4
Dnew[1,1] = Dold[1,1]
Dnew[2,2] = Dold[3,3]
Dnew[3,3] = Dold[2,2]
Dnew[4,4] = Dold[4,4]
Dnew[2,1] = Dold[3,1]
Dnew[3,2] = Dold[2,3]
Dnew[4,3] = Dold[4,2]
Dnew[3,1] = Dold[2,1]
Dnew[4,2] = Dold[4,3]
Dnew[4,1] = Dold[4,1]
#now: fill in upper triangular: D new is symmetric
for (ii in 1:4) {
for (jj in ii:4) {
Dnew[ii,jj] <- Dnew[jj,ii]
}
}
###D<- cbind(c(Sigma2YiRI,Sigma2YiRIRS,0,0),c(Sigma2YiRIRS,Sigma2YiRS,0,0),c(0,0,Sigma2YjRI,Sigma2YjRIRS),c(0,0,Sigma2YjRIRS,Sigma2YjRS))
#instead: put full D matrix, as obtained from glmer() run
#now plug-in the thetas, in order to perform numerical derivatives wrt theta (scores, hessians)
Dnew[1,1] = Sigma2YiRI
Dnew[2,2] = Sigma2YiRS
Dnew[2,1] = Sigma2YiRIRS
Dnew[1,2] = Dnew[2,1]
Dnew[3,3] = Sigma2YjRI
Dnew[4,4] = Sigma2YjRS
Dnew[4,3] = Sigma2YjRIRS
Dnew[3,4] = Dnew[4,3]
#simplify notation: D=Dnew, and proceed
D <-nearPD(Dnew)
ncz <- ncol(Z)
GH <- gauher(GHk)
b <- as.matrix(expand.grid(rep(list(GH$x), ncz)))
dimnames(b) <- NULL
k <- nrow(b)
wGH <- as.matrix(expand.grid(rep(list(GH$w), ncz)))
wGH <- 2^(ncz/2) * apply(wGH, 1, prod) * exp(rowSums(b * b))
b <- sqrt(2) * b
Ztb <- Z %*% t(b)
#
mu.y <- plogis(as.vector(X %*% betas) + Ztb)
logBinom <- dbinom(y, 1, mu.y, TRUE)
log.p.yb <- rowsum(logBinom, id)
log.p.b <- dmvnorm(b, rep(0, ncol(Z)), D, TRUE)
p.yb <- exp(log.p.yb + rep(log.p.b, each = nrow(log.p.yb)))
p.y <- c(p.yb %*% wGH)
#-sum(log(p.y), na.rm = TRUE) #logLik as min, original
sum(log(p.y), na.rm = TRUE) #logLik as max, CLIC heuristic
}
score.bin <- function (thetas, id, y, X, Z, GHk = 5, extraParam) {
fd (thetas, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = GHk, extraParam)
}
cd <- function (x, f, ..., eps = 0.001) {
n <- length(x)
res <- numeric(n)
ex <- pmax(abs(x), 1)
for (i in 1:n) {
x1 <- x2 <- x
x1[i] <- x[i] + eps * ex[i]
x2[i] <- x[i] - eps * ex[i]
diff.f <- c(f(x1, ...) - f(x2, ...))
diff.x <- x1[i] - x2[i]
res[i] <- diff.f/diff.x
}
res
}
fd <- function (x, f, ..., eps = 1e-04) {
f0 <- f(x, ...)
n <- length(x)
res <- numeric(n)
ex <- pmax(abs(x), 1)
for (i in 1:n) {
x1 <- x
x1[i] <- x[i] + eps * ex[i]
diff.f <- c(f(x1, ...) - f0)
diff.x <- x1[i] - x[i]
res[i] <- diff.f/diff.x
}
res
}
hess.bin <- function (thetas, id, y, X, Z, GHk = 5, extraParam) {
#cdd(thetas, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = GHk)
res <- matrix(nrow=length(thetas),ncol=length(thetas))
#for (i in 1:length(thetas)) {
#for (j in 1:length(thetas)) {
thetasIJ <- thetas
res <- cdd(thetasIJ, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = 5, extraParam)
#}
#}
res
}
cdd <- function (x, f, ..., eps = 0.0005) {
n <- length(x)
res <- matrix(nrow=n,ncol=n)
ex <- pmax(abs(x), 1)
celem=0
for (i in 1:n) {
#for (j in 1:n){ hess: symmetric, economy: compute upper triangular part only
for (j in i:n){
celem=celem+1
cat("Computing Element No...:", celem, "of hessian\n")
x1 <- x2 <- x3 <- x4 <- x
if (i != j) {
# mixed 2nd order derivatives
# where i--1 first move
# where j--2 second move
for (k in 1:n) {
if ( i== k) {
x1[k] = x[k] + eps * ex[i]
x2[k] = x[k] + eps * ex[i]
x3[k] = x[k] - eps * ex[i]
x4[k] = x[k] - eps * ex[i]
}
if ( j== k) {
x1[k] = x[k] + eps * ex[i]
x2[k] = x[k] - eps * ex[i]
x3[k] = x[k] + eps * ex[i]
x4[k] = x[k] - eps * ex[i]
}
#x1 [1] = x[1] + eps * ex[i]
#x1 [2] = x[2] + eps * ex[i]
#x1 [3] = x[3]
#x2 [1] = x[1] + eps * ex[i]
#x2 [2] = x[2] - eps * ex[i]
#x2 [3] = x[3]
#x3 [1] = x[1] - eps * ex[i]
#x3 [2] = x[2] + eps * ex[i]
#x3 [3] = x[3]
#x4 [1] = x[1] - eps * ex[i]
#x4 [2] = x[2] - eps * ex[i]
#x4 [3] = x[3]
diff.f <- c(f(x1, ...) -f(x2, ...) -f(x3, ...) +f(x4, ...))
diff.x <- 2 * eps * ex[i]
diff.y <- 2 * eps * ex[i]
res[i,j] <- diff.f / (diff.x * diff.y) # (1 -1 -1 + 1 / h^2)
}
}
if (i == j) {
# pure 2nd order derivatives
# where i--1 first move
# where j--2 second move
for (k in 1:n) {
if ( i== k) {
x1[k] = x[k] + eps * ex[i]
x2[k] = x[k]
x3[k] = x[k] - eps * ex[i]
}
}
#x1 [1] = x[1] + eps * ex[i]
#x1 [2] = x[2]
#x1 [3] = x[3]
#x2 [1] = x[1]
#x2 [2] = x[2]
#x2 [3] = x[3]
#x3 [1] = x[1] - eps * ex[i]
#x3 [2] = x[2]
#x3 [3] = x[3]
diff.f <- c(f(x1, ...) -2*f(x2, ...) +f(x3, ...) )
diff.x <- eps * ex[i]
res[i,i] <- diff.f / (diff.x * diff.x) # (1 -2 + 1 / h^2)
}
}
}
#now compute lower triangular also
for (i in 1:n) {
for (j in 1:i) {
res[i,j] <- res[j,i]
}
}
res # return Hessian in res matrix
#as.vector(res) # return Hessian in res matrix as a vector by Rows
}
nearPD <- function (M, eig.tol = 1e-06, conv.tol = 1e-07, posd.tol = 1e-08, maxits = 100) {
# based on function nearcor() submitted to R-help by Jens Oehlschlagel on 2007-07-13, and
# function posdefify() from package `sfsmisc'
if (!(is.numeric(M) && is.matrix(M) && identical(M, t(M))))
stop("Input matrix M must be square and symmetric.\n")
inorm <- function (x) max(rowSums(abs(x)))
n <- ncol(M)
U <- matrix(0, n, n)
X <- M
iter <- 0
converged <- FALSE
while (iter < maxits && !converged) {
Y <- X
T <- Y - U
e <- eigen(Y, symmetric = TRUE)
Q <- e$vectors
d <- e$values
D <- if (length(d) > 1) diag(d) else as.matrix(d)
p <- (d > eig.tol * d[1])
QQ <- Q[, p, drop = FALSE]
X <- QQ %*% D[p, p, drop = FALSE] %*% t(QQ)
U <- X - T
X <- (X + t(X)) / 2
conv <- inorm(Y - X) / inorm(Y)
iter <- iter + 1
converged <- conv <= conv.tol
}
X <- (X + t(X)) / 2
e <- eigen(X, symmetric = TRUE)
d <- e$values
Eps <- posd.tol * abs(d[1])
if (d[n] < Eps) {
d[d < Eps] <- Eps
Q <- e$vectors
o.diag <- diag(X)
X <- Q %*% (d * t(Q))
D <- sqrt(pmax(Eps, o.diag) / diag(X))
X[] <- D * X * rep(D, each = n)
}
(X + t(X)) / 2
}
bdiag <- function (...) {
mlist<-list(...)
## handle case in which list of matrices is given
if (length(mlist) == 1)
mlist <- unlist(mlist, rec = FALSE)
csdim <- rbind(c(0,0), apply(sapply(mlist,dim), 1, cumsum ))
ret <- array(0,dim=csdim[length(mlist)+1,])
add1 <- matrix(rep(1:0,2),nc=2)
for(i in seq(along=mlist)){
indx<-apply(csdim[i:(i+1),]+add1,2,function(x)x[1]:x[2])
## non-square matrix
if(is.null(dim(indx)))ret[indx[[1]],indx[[2]]]<-mlist[[i]]
## square matrix
else ret[indx[,1],indx[,2]]<-mlist[[i]]
}
ret
}
#' Computes the AVE, DWAVE, WAVE, CH-EXP and CH-ECDF methods for multivariate GLMMs (m-GLMMs)
#'
#' It acts on the Models structure for all pairs of items and returns the estimates for the m-GLMM parameters with 5 different methods
#'@param Models A list which contains the lme4 model objects taken from the pairwise separate estimations (list of size Q*(Q-1)/2)
#'@param ModelsOne A list which contains the lme4 model objects taken from the univariate separate estimations (list of size Q)
#'@param Data a data.frame with the data. 1st column id, 2nd column time, remaining Q columns are the y 0/1 values (Q items)
#'@param GHk Number of Gauss-Hermite points per dimension of integration
#'@param n number of individuals
#'@param Q the number of items. Set this to four.
#'@param extraParam a helper list which depends on pairwise estimates
#'@param extraParamOne a helper list which depends on univariate estimates
#'@param m an integer which is useful for subsequent runs
#'@return The estimated parameters of the model with methods AVE, DWAVE, WAVE, CH-EXP and CH-ECDF
#'@export
aveThetas2 <- function (Models,ModelsOne, Data, GHk = 5, n, Q, extraParam, extraParamOne, m=1) {
# Compute K matrix
#environment(logLik.bin) <- environment(score.bin) <- environment()
#environment(logLik.bin.BIG) <- environment(score.bin.BIG) <- environment()
#environment(logLik.bin.BIG) <- environment(score.bin.BIG) <- environment(estimateModelFit2)
#environment(logLik.bin.BIG) <- environment(score.bin.BIG) <- environment(estimateModelFit2) <- environment()
environment(logLik.bin.BIG) <- environment(hess.bin.BIG.Vect) <- environment(score.bin.BIG) <-
environment(logLik.bin) <- environment(hess.bin.Vect) <- environment(score.bin) <-
environment(estimateModelFit2) <- environment()
DataRaw <- Data
Sigma2YiRIone <- vector("list", Q)
Sigma2YiRSone <- vector("list", Q)
Sigma2YiRIRSone <- vector("list", Q)
for (ii in 1:Q) {
Sigma2YiRIone[[ii]] <- VarCorr(ModelsOne[[ii]])$id[1,1]
Sigma2YiRSone[[ii]] <- VarCorr(ModelsOne[[ii]])$id[2,2]
Sigma2YiRIRSone[[ii]] <- VarCorr(ModelsOne[[ii]])$id[1,2]
}
Klis <- vector("list", n)
for (i in 1:n) {
cat("Individual No...:", i, "for scores computation\n")
pairs <- combn(Q, 2)
P <- ncol(pairs)
Scores <- vector("list", P)
Sigma2YiRI <- vector("list", P)
Sigma2YiRS <- vector("list", P)
Sigma2YiRIRS <- vector("list", P)
Sigma2YjRI <- vector("list", P)
Sigma2YjRS <- vector("list", P)
Sigma2YjRIRS <- vector("list", P)
for (p in 1:P) {
cp <-p
Sigma2YiRI[[cp]] <- VarCorr(Models[[p]])$id[1,1]
Sigma2YiRS[[cp]] <- VarCorr(Models[[p]])$id[3,3]
Sigma2YiRIRS[[cp]] <- VarCorr(Models[[p]])$id[3,1]
Sigma2YjRI[[cp]] <- VarCorr(Models[[p]])$id[2,2]
Sigma2YjRS[[cp]] <- VarCorr(Models[[p]])$id[4,4]
Sigma2YjRIRS[[cp]] <- VarCorr(Models[[p]])$id[4,2]
prs <- pairs[, p]
yi <- Data[[paste("y", prs[1], sep = "")]]
yj <- Data[[paste("y", prs[2], sep = "")]]
DD <- do.call(rbind, list(Data[1:3], Data[1:3]))
DD$outcome <- gl(2, nrow(Data))
DD$y <- c(yi, yj)
DD.i <- DD[(ind.i <- DD$id == i), ]
D <- nearPD(VarCorr(Models[[p]])$id)
indices <- 1:Q
indic <- indices[c(-prs[1],-prs[2])]
#Scores[[p]] <- score.bin(fixef(Models[[p]]), id = DD.i$id, y = DD.i$y,
# X = model.matrix(Models[[p]])[ind.i, ],
# Z = model.matrix(Models[[p]])[ind.i, seq_len(2*ncz)],
# GHk = GHk)
###Scores[[p]] <- score.bin(c(fixef(Models[[p]]),SigmaYi[[p]],SigmaYj[[p]],rhoYiYj[[p]]), id = DD.i$id, y = DD.i$y,
### X = model.matrix(Models[[p]])[ind.i, ],
### Z = cbind(model.matrix(Models[[p]])[ind.i, seq_len(2*ncz)]),
### GHk = GHk)
Scores[[p]] <- #score.bin(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
#score.bin.BIG(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
##score.bin.BIG(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
score.bin.BIG(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]],
fixef(ModelsOne[[indic[1]]]),Sigma2YiRIone[[indic[1]]],Sigma2YiRSone[[indic[1]]],Sigma2YiRIRSone[[indic[1]]],
fixef(ModelsOne[[indic[2]]]),Sigma2YiRIone[[indic[2]]],Sigma2YiRSone[[indic[2]]],Sigma2YiRIRSone[[indic[2]]]),
id = DD.i$id, y = DD.i$y,
X = model.matrix(Models[[p]])[ind.i, ],
Z = cbind(model.matrix(Models[[p]])[ind.i, ]),
GHk = GHk,
extraParam = extraParam[[p]],
Data = DD.i,
r = p,
i = i)
#thetas <- c(fixef(Models[[p]]),SigmaYi[[p]],SigmaYj[[p]],rhoYiYj[[p]])
#ii<- prs[1]
#jj<- prs[2]
#ic[ii] <- thetas[1]
#ic[jj] <- thetas[2]
#sl[ii] <- thetas[3]
#sl[jj] <- thetas[4]
#sig[ii] <- thetas[5]
#sig[jj] <- thetas[6]
#rh[p] <- thetas[7]
}
#kkk <- P * length( c( fixef(Models[[1]]) , SigmaYi[[1]], SigmaYj[[1]], rhoYiYj[[1]] ))
kkk <- P * length( c( fixef(Models[[1]]) , Sigma2YiRI[[1]], Sigma2YiRS[[1]], Sigma2YiRIRS[[1]], Sigma2YjRI[[1]], Sigma2YjRS[[1]], Sigma2YjRIRS[[1]],
rep(c(fixef(ModelsOne[[1]]), Sigma2YiRIone[[1]], Sigma2YiRSone[[1]], Sigma2YiRIRSone[[1]]),2)))
K <- matrix(0, kkk, kkk)
ee <- expand.grid(1:P, 1:P)
ss <- sapply(Scores, length)
ss2 <- cumsum(ss)
ss1 <- c(1, ss2[-P] + 1)
for (ii in 1:nrow(ee)) {
k <- ee$Var1[ii]
j <- ee$Var2[ii]
row.ind <- seq(ss1[k], ss2[k])
col.ind <- seq(ss1[j], ss2[j])
K[row.ind, col.ind] <- Scores[[k]] %o% Scores[[j]]
K[row.ind, col.ind] <- Scores[[k]] %o% Scores[[j]]
}
Klis[[i]] <- K
}
K <- Reduce("+", Klis)
# Extract J matrix
###J <- lapply(Models, vcov)
##J<-matrix(0,18,18)
Hessians<- vector("list", P)
for (p in 1:P) {
cat("Hessian No: ...",p,"\n")
prs <- pairs[, p]
yi <- Data[[paste("y", prs[1], sep = "")]]
yj <- Data[[paste("y", prs[2], sep = "")]]
DD <- do.call(rbind, list(Data[1:3], Data[1:3]))
DD$outcome <- gl(2, nrow(Data))
DD$y <- c(yi, yj)
#DD.i <- DD[(ind.i <- DD$id == i), ] #take all observations now, from all i=1:n
D <- nearPD(VarCorr(Models[[p]])$id)
indices <- 1:Q
indic <- indices[c(-prs[1],-prs[2])]
#Hessians[[p]] <- hess.bin(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]] ),
# id = DD$id, y = DD$y,
# X = model.matrix(Models[[p]])[, ],
# Z = cbind(model.matrix(Models[[p]])[, seq_len(2*ncz)]),
# GHk = GHk,
# extraParam = extraParam[[p]])
Hessians[[p]] <- #hess.bin.Vect(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]] ),
hess.bin.BIG.Vect(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]],
fixef(ModelsOne[[indic[1]]]),Sigma2YiRIone[[indic[1]]],Sigma2YiRSone[[indic[1]]],Sigma2YiRIRSone[[indic[1]]],
fixef(ModelsOne[[indic[2]]]),Sigma2YiRIone[[indic[2]]],Sigma2YiRSone[[indic[2]]],Sigma2YiRIRSone[[indic[2]]]),
id = DD$id, y = DD$y,
X = model.matrix(Models[[p]])[, ],
Z = cbind(model.matrix(Models[[p]])[, ]),
GHk = GHk,
extraParam = extraParam[[p]],
Data = DD,
r = p,
i = n+1)
#thetas <- c(fixef(Models[[p]]),SigmaYi[[p]],SigmaYj[[p]],rhoYiYj[[p]])
#ii<- prs[1]
#jj<- prs[2]
#ic[ii] <- thetas[1]
#ic[jj] <- thetas[2]
#sl[ii] <- thetas[3]
#sl[jj] <- thetas[4]
#sig[ii] <- thetas[5]
#sig[jj] <- thetas[6]
#rh[p] <- thetas[7]
}
J <- do.call(bdiag, Hessians)
## Now J is redefined as the block matrix 108x108, or (6x18) x (6x18)
###J <- bdiag(lapply(J, function (x) solve(matrix(x@x, x@Dim[1], x@Dim[2]))))
# Compute A matrix
nbetas <- length( c(fixef(Models[[1]]) , Sigma2YiRI[[1]], Sigma2YiRS[[1]], Sigma2YiRIRS[[1]], Sigma2YjRI[[1]], Sigma2YjRS[[1]], Sigma2YjRIRS[[1]],
rep(c(fixef(ModelsOne[[1]]), Sigma2YiRIone[[1]], Sigma2YiRSone[[1]], Sigma2YiRIRSone[[1]]),2) ) )
#thetas <- c( sapply(Models, fixef) )
thetas <- numeric(0)
for (ii in 1:20*P) {
thetas[ii]=0
}
for (ii in 1:P) {
thetas[(ii-1)*20+1]=fixef(Models[[ii]])[1]
thetas[(ii-1)*20+2]=fixef(Models[[ii]])[2]
thetas[(ii-1)*20+3]=fixef(Models[[ii]])[3]
thetas[(ii-1)*20+4]=fixef(Models[[ii]])[4]
thetas[(ii-1)*20+5]=Sigma2YiRI[[ii]]
thetas[(ii-1)*20+6]=Sigma2YiRS[[ii]]
thetas[(ii-1)*20+7]=Sigma2YiRIRS[[ii]]
thetas[(ii-1)*20+8]=Sigma2YjRI[[ii]]
thetas[(ii-1)*20+9]=Sigma2YjRS[[ii]]
thetas[(ii-1)*20+10]=Sigma2YjRIRS[[ii]]
p<-ii
prs <- pairs[, p]
indices <- 1:Q
indic <- indices[c(-prs[1],-prs[2])]
thetas[(ii-1)*20+11]=fixef(ModelsOne[[indic[1]]])[1]
thetas[(ii-1)*20+12]=fixef(ModelsOne[[indic[1]]])[2]
thetas[(ii-1)*20+13]=Sigma2YiRIone[[indic[1]]]
thetas[(ii-1)*20+14]=Sigma2YiRSone[[indic[1]]]
thetas[(ii-1)*20+15]=Sigma2YiRIRSone[[indic[1]]]
thetas[(ii-1)*20+16]=fixef(ModelsOne[[indic[2]]])[1]
thetas[(ii-1)*20+17]=fixef(ModelsOne[[indic[2]]])[2]
thetas[(ii-1)*20+18]=Sigma2YiRIone[[indic[2]]]
thetas[(ii-1)*20+19]=Sigma2YiRSone[[indic[2]]]
thetas[(ii-1)*20+20]=Sigma2YiRIRSone[[indic[2]]]
}
Kone <- vector("list", P)
HelpMat <- vector("list", P)
CLIC <- vector("list", P)
wtCLIC <- vector("list", P)
wtCLIC_B <- vector("list", P)
CLIC_stdz <- vector("list", P)
for (p in 1:6) {
Kone[[p]] <- K[((p-1)*nbetas+1):((p-1)*nbetas+nbetas),((p-1)*nbetas+1):((p-1)*nbetas+nbetas)]
}
for (p in 1:6) {
HelpMat[[p]] <- Kone[[p]] %*% solve(Hessians[[p]])
}
for (p in 1:6) {
#CLIC[[p]] <- logLik(Models[[p]]) + sum(diag(HelpMat[[p]])) #sum(diag(A)) is trace(A) in R language for A square matrix
prs <- pairs[, p]
yi <- Data[[paste("y", prs[1], sep = "")]]
yj <- Data[[paste("y", prs[2], sep = "")]]
DD$y <- c(yi, yj)
#DD.i <- DD[(ind.i <- DD$id == i), ] #take all observations now, from all i=1:n
D <- nearPD(VarCorr(Models[[p]])$id)
indices <- 1:Q
indic <- indices[c(-prs[1],-prs[2])]
CLIC[[p]] <- logLik.bin.BIG(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]],
fixef(ModelsOne[[indic[1]]]),Sigma2YiRIone[[indic[1]]],Sigma2YiRSone[[indic[1]]],Sigma2YiRIRSone[[indic[1]]],
fixef(ModelsOne[[indic[2]]]),Sigma2YiRIone[[indic[2]]],Sigma2YiRSone[[indic[2]]],Sigma2YiRIRSone[[indic[2]]]),
id = DD$id, y = DD$y,
X = model.matrix(Models[[p]])[, ],
Z = cbind(model.matrix(Models[[p]])[, ]),
GHk = GHk,
extraParam = extraParam[[p]],
Data = DD,
r = p,
i = n+1) + sum(diag(HelpMat[[p]])) #sum(diag(A)) is trace(A) in R language for A square matrix
}
#logLik_Models <- c(logLik(Models[[1]]),logLik(Models[[2]]),logLik(Models[[3]]),
# logLik(Models[[4]]),logLik(Models[[5]]),logLik(Models[[6]]))
trace_Models <- c(sum(diag(HelpMat[[1]])),sum(diag(HelpMat[[2]])),sum(diag(HelpMat[[3]])),
sum(diag(HelpMat[[4]])),sum(diag(HelpMat[[5]])),sum(diag(HelpMat[[6]])))
CLIC_Models <- c(CLIC[[1]],CLIC[[2]],CLIC[[3]],CLIC[[4]],CLIC[[5]],CLIC[[6]])
## 7.9.2015: KJF. Standardize the CLIC values to mean 0 and sd 1
mu_CLIC <-mean(CLIC_Models)
sigma_CLIC <- sd(CLIC_Models)
CLIC_Models_stdz <- (CLIC_Models - mu_CLIC) / sigma_CLIC
logLik_Models <- CLIC_Models - trace_Models #simplify, not call again logLik.bin
write.table(logLik_Models,paste(m,"_logLik_Models.txt",sep=""),row.names=F,col.names=F)
write.table(trace_Models,paste(m,"_trace_Models.txt",sep=""),row.names=F,col.names=F)
write.table(CLIC_Models,paste(m,"_CLIC_Models.txt",sep=""),row.names=F,col.names=F)
#weights <- exp(CLIC)
#A2 <- weights/sum(weights)
#7.9.2015. KJF. standardization in CLIC values for weights
for (p in 1:6) {
CLIC_stdz[[p]] <- CLIC_Models_stdz[p]
}
for (p in 1:6) {
#wtCLIC[[p]] <- exp(CLIC[[p]]) #VV idea
##wtCLIC[[p]] <- exp(mpfr(CLIC[[p]],80)) #KF idea
wtCLIC[[p]] <- exp(CLIC_stdz[[p]]) #KF idea, standardization 7.9.2015
#wtCLIC[[p]] <- as.numeric(exp(as.brob(CLIC[[p]]))) #KF idea
###wtCLIC[[p]] <- - 1 / CLIC[[p]] #KF idea
}
# 4.10.2015, KJF add.
# CLIC ecdf way: B' way
a=CLIC_Models_stdz
f=ecdf(a)
wtCLIC_B[[1]] = f(a)[1]
wtCLIC_B[[2]] = f(a)[2]
wtCLIC_B[[3]] = f(a)[3]
wtCLIC_B[[4]] = f(a)[4]
wtCLIC_B[[5]] = f(a)[5]
wtCLIC_B[[6]] = f(a)[6]
# 4.10.2015, end KJF add.
OmegaVec <- wtCLIC
OmegaVec_B <- wtCLIC_B
A<- computeWeightMatrixAVE_by6() # to re-write for 20x120 case KF 25.6.2015, todo:
#Omega <- matrix(0,P*nbetas,P*nbetas)
#A2 <- matrix(0, Q*(nbetas)/2, length(thetas))
A2 <- matrix(0, 20, 120)
A2_B <- matrix(0, 20, 120)
##A2 <- new("mpfrMatrix", mpfr(rep(0,20*120),80),Dim = c(20L, 120L))
##validObject(A2)
for (i in seq_len(Q*nbetas/4)) {
#ii <- inter == levels(inter)[i] just take all 6 which are intcpt, so odd
###ii <- rep(c(TRUE,FALSE,FALSE),choose(length(times),2))
ii <- A[i,]==(1/6) #adjacency matrix for global parameter i according to A = computeWeightMatrixAVE_by6()
i2 <- which(ii==T)
jj <- c(intervalBIG(i2[1]),intervalBIG(i2[2]),intervalBIG(i2[3]),intervalBIG(i2[4]),intervalBIG(i2[5]),intervalBIG(i2[6]))
#weights <- OmegaVec[jj]
weights <- c(OmegaVec[[jj[1]]], OmegaVec[[jj[2]]], OmegaVec[[jj[3]]], OmegaVec[[jj[4]]], OmegaVec[[jj[5]]], OmegaVec[[jj[6]]])
weights_B <- c(OmegaVec_B[[jj[1]]],OmegaVec_B[[jj[2]]],OmegaVec_B[[jj[3]]],OmegaVec_B[[jj[4]]],OmegaVec_B[[jj[5]]],OmegaVec_B[[jj[6]]])
A2[i, ii] <- weights / sum(weights)
A2_B[i, ii] <- weights_B / sum(weights_B)
}
#ind.outcome <- c(apply(pairs, 2, rep, length.out = nbetas))
#ind.param <- rep(rep(seq_len(nbetas/2), each = 2), length.out = length(thetas))
#inter <- interaction(ind.outcome, ind.param)
#inter <- factor(inter, levels = sort(levels(inter)))
#A <- matrix(0, Q*nbetas/2, length(thetas))
#for (i in seq_len(Q*nbetas/2)) {
# A[i, inter == levels(inter)[i]] <- 1 / sum(pairs == 1)
#}
#Compute A to be 16 x 48 ad hoc (by pencil)
A<- computeWeightMatrixAVE_by6()
write.table(A,paste(m,"_A.txt",sep=""),row.names=F,col.names=F)
#A2 <- asNumeric(A2)
#A2 <- matrix(A2,nrow=20,ncol=120)
write.table(A2,paste(m,"_A2.txt",sep=""),row.names=F,col.names=F)
write.table(A2_B,paste(m,"_A2_B.txt",sep=""),row.names=F,col.names=F)
# pairwise average betas
##ave.betas <- c(A %*% thetas)
# standrd errors for betas
##se.betas <- sqrt(diag(A %*% solve(J, K) %*% solve(J) %*% t(A)))
# CLIC heuristic
ave.betas2 <- c(A2 %*% thetas)
# standrd errors for betas
se.betas2 <- sqrt(diag(A2 %*% solve(J, K) %*% solve(J) %*% t(A2)))
# 4.10.2015. KJF add, B' way for CLIC heuristic with ecdf()
# CLIC heuristic
ave.betas5 <- c(A2_B %*% thetas)
# standrd errors for betas
se.betas5 <- sqrt(diag(A2_B %*% solve(J, K) %*% solve(J) %*% t(A2_B)))
# 4.10.2015. end KJF add, B' way for CLIC heuristic with ecdf()
#now regular methods
#JJ,KK
#KK
KKlis <- vector("list", n)
for (i in 1:n) {
cat("Individual No...:", i, "for sscores computation\n")
SScores <- vector("list", P)
ic <- vector("list",Q)
sl <- vector("list",Q)
sigz <- vector("list",Q)
sigw <- vector("list",Q)
###rh <- vector("list",P)
for (p in 1:P) {
prs <- pairs[, p]
yi <- Data[[paste("y", prs[1], sep = "")]]
yj <- Data[[paste("y", prs[2], sep = "")]]
DD$y <- c(yi, yj)
DD.i <- DD[(ind.i <- DD$id == i), ]
D <- nearPD(VarCorr(Models[[p]])$id)
#Scores[[p]] <- score.bin(fixef(Models[[p]]), id = DD.i$id, y = DD.i$y,
# X = model.matrix(Models[[p]])[ind.i, ],
# Z = model.matrix(Models[[p]])[ind.i, seq_len(2*ncz)],
# GHk = GHk)
###Scores[[p]] <- score.bin(c(fixef(Models[[p]]),SigmaYi[[p]],SigmaYj[[p]],rhoYiYj[[p]]), id = DD.i$id, y = DD.i$y,
### X = model.matrix(Models[[p]])[ind.i, ],
### Z = cbind(model.matrix(Models[[p]])[ind.i, seq_len(2*ncz)]),
### GHk = GHk)
SScores[[p]] <- score.bin(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
#score.bin.BIG(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
#score.bin.BIG(c(fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]]),
id = DD.i$id, y = DD.i$y,
X = model.matrix(Models[[p]])[ind.i, ],
Z = cbind(model.matrix(Models[[p]])[ind.i, ]),
GHk = GHk,
extraParam = extraParam[[p]])
#thetas <- c(fixef(Models[[p]]),SigmaYi[[p]],SigmaYj[[p]],rhoYiYj[[p]])
#ii<- prs[1]
#jj<- prs[2]
#ic[ii] <- thetas[1]
#ic[jj] <- thetas[2]
#sl[ii] <- thetas[3]
#sl[jj] <- thetas[4]
#sig[ii] <- thetas[5]
#sig[jj] <- thetas[6]
#rh[p] <- thetas[7]
}
#kkk <- P * length( c( fixef(Models[[1]]) , SigmaYi[[1]], SigmaYj[[1]], rhoYiYj[[1]] ))
kkkk <- P * length( c( fixef(Models[[1]]) , Sigma2YiRI[[1]], Sigma2YiRS[[1]], Sigma2YiRIRS[[1]], Sigma2YjRI[[1]], Sigma2YjRS[[1]], Sigma2YjRIRS[[1]] ))
KK <- matrix(0, kkkk, kkkk)
ee <- expand.grid(1:P, 1:P)
ss <- sapply(SScores, length)
ss2 <- cumsum(ss)
ss1 <- c(1, ss2[-P] + 1)
for (ii in 1:nrow(ee)) {
k <- ee$Var1[ii]
j <- ee$Var2[ii]
row.ind <- seq(ss1[k], ss2[k])
col.ind <- seq(ss1[j], ss2[j])
KK[row.ind, col.ind] <- SScores[[k]] %o% SScores[[j]]
KK[row.ind, col.ind] <- SScores[[k]] %o% SScores[[j]]
}
KKlis[[i]] <- KK
}
KK <- Reduce("+", KKlis)
# Extract J matrix
HHessians<- vector("list", P)
for (p in 1:P) {
cat("HHessian No: ...",p,"\n")
prs <- pairs[, p]
yi <- Data[[paste("y", prs[1], sep = "")]]
yj <- Data[[paste("y", prs[2], sep = "")]]
DD$y <- c(yi, yj)
#DD.i <- DD[(ind.i <- DD$id == i), ] #take all observations now, from all i=1:n
D <- nearPD(VarCorr(Models[[p]])$id)
#Hessians[[p]] <- hess.bin(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]] ),
# id = DD$id, y = DD$y,
# X = model.matrix(Models[[p]])[, ],
# Z = cbind(model.matrix(Models[[p]])[, seq_len(2*ncz)]),
# GHk = GHk,
# extraParam = extraParam[[p]])
HHessians[[p]] <- hess.bin.Vect(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]] ),
#hess.bin.BIG.Vect(c( fixef(Models[[p]]),Sigma2YiRI[[p]],Sigma2YiRS[[p]],Sigma2YiRIRS[[p]],Sigma2YjRI[[p]],Sigma2YjRS[[p]],Sigma2YjRIRS[[p]] ),
id = DD$id, y = DD$y,
X = model.matrix(Models[[p]])[, ],
Z = cbind(model.matrix(Models[[p]])[, ]),
GHk = GHk,
extraParam = extraParam[[p]])
}
JJ <- do.call(bdiag, HHessians)
# Compute A matrix
nbetasS <- length( c(fixef(Models[[1]]) , Sigma2YiRI[[1]], Sigma2YiRS[[1]], Sigma2YiRIRS[[1]], Sigma2YjRI[[1]], Sigma2YjRS[[1]], Sigma2YjRIRS[[1]]) )
#thetas <- c( sapply(Models, fixef) )
thetasS <- numeric(0)
for (ii in 1:10*P) {
thetasS[ii]=0
}
for (ii in 1:P) {
thetasS[(ii-1)*10+1]=fixef(Models[[ii]])[1]
thetasS[(ii-1)*10+2]=fixef(Models[[ii]])[2]
thetasS[(ii-1)*10+3]=fixef(Models[[ii]])[3]
thetasS[(ii-1)*10+4]=fixef(Models[[ii]])[4]
thetasS[(ii-1)*10+5]=Sigma2YiRI[[ii]]
thetasS[(ii-1)*10+6]=Sigma2YiRS[[ii]]
thetasS[(ii-1)*10+7]=Sigma2YiRIRS[[ii]]
thetasS[(ii-1)*10+8]=Sigma2YjRI[[ii]]
thetasS[(ii-1)*10+9]=Sigma2YjRS[[ii]]
thetasS[(ii-1)*10+10]=Sigma2YjRIRS[[ii]]
}
AA <- computeWeightMatrixAVE()
#Compute JJ,KK,thetasS,AA READY
# pairwise average betas
ave.betas <- c(AA %*% thetasS)
# standrd errors for betas
se.betas <- sqrt(diag(AA %*% solve(JJ, KK) %*% solve(JJ) %*% t(AA)))
# DWAVE algorithm is method no. 3:
#Step 1: Vi computation
Sigma <- solve(JJ, KK) %*% solve(JJ)
Vi <- Sigma
#Step 2: Omega computation with filter (mask)
Omega<-matrix(0,P*nbetasS,P*nbetasS)
crow=0
ccol=0
for (ii in 1:P) {
crow= (ii-1)*nbetasS
for (jj in 1:P) {
ccol = (jj-1)*nbetasS
for (kk in 1:nbetasS) {
Omega[crow+kk, ccol+kk] <- Vi[crow+kk, ccol+kk]
}
}
}
#Step 3: Omega inversion in new Omega
if ( abs(det(Omega)) > 10^(-10) )
{
Omega <- solve(Omega)
}
if ( abs(det(Omega)) < 10^(-10) )
{
Omega <- ginv(Omega)
}
#Step 4: Formulas for A
A<- matrix(0,nbetasS,P*nbetasS)
crow=0
ccol=0
crow2=0
ccol2=0
for (kk in 1:nbetasS) {
#compute denom, which does not depend on r
denom <-0
for (ii in 1:P) {
for (jj in 1:P) {
crow2 <- (ii-1)*nbetasS
ccol2 <- (jj-1)*nbetasS
denom <- denom + Omega[crow2+kk,ccol2+kk]
}
}
#compute nom, which depends on r
for (r in 1:P) {
nom <-0
for (l in 1:P) {
crow <- (r-1)*nbetasS
ccol <- (l-1)*nbetasS
nom <- nom + Omega[crow+kk,ccol+kk]
}
# nom is ready
# for everyy kk (outer loop) and r (inner loop) compute nom/denom
A[kk,(r-1)*nbetasS+kk] <- nom/denom
}
}
# pairwise average betas
A5<-A #for now
A3 <- computeWeightMatrixWAVE(A5)
ave.betas3 <- c(A3 %*% thetasS)
# standrd errors for betas
se.betas3 <- sqrt(diag(A3 %*% solve(JJ, KK) %*% solve(JJ) %*% t(A3)))
# WAVE algorithm is method no. 4:
###Omega <- solve(Sigma * JJ)
if (det(Sigma) > 10^(-10)) {
#Sigma <- solve(J, K) %*% solve(J)
Omega <- solve(Sigma * JJ)
}
if (det(Sigma) < 10^(-10)) {
# Sigma <- solve(J, K) %*% solve(J)
Omega <- ginv(Sigma * JJ) # use ginv() function from MASS, generalized inverse, seems robust to det -->0
}
npairs <- sum(pairs == 1)
A4 <- matrix(0, Q*nbetasS/2, length(thetasS))
for (i in seq_len(Q*nbetasS/2)) {
#ii <- inter == levels(inter)[i]
ii <- AA[i,]==(1/3) #adjacency matrix for global parameter i according to AA = computeWeightMatrixAVE()
weights <- diag(Omega[ii, ii])
A4[i, ii] <- weights / sum(weights)
}
ave.betas4 <- c(A4 %*% thetasS)
# standrd errors for betas
###se.betas2 <- sqrt(diag(A2 %*% solve(J, K) %*% solve(J) %*% t(A2)))
###se.betas3 <- sqrt(diag(A3 %*% solve(J, K) %*% solve(J) %*% t(A3)))
se.betas4 <- sqrt(diag(A4 %*% solve(JJ, KK) %*% solve(JJ) %*% t(A4)))
write.table(AA,paste(m,"_AA.txt",sep=""),row.names=F,col.names=F)
write.table(A2,paste(m,"_A2.txt",sep=""),row.names=F,col.names=F)
write.table(A3,paste(m,"_A3.txt",sep=""),row.names=F,col.names=F)
write.table(A4,paste(m,"_A4.txt",sep=""),row.names=F,col.names=F)
write.table(A2_B,paste(m,"_A2_B.txt",sep=""),row.names=F,col.names=F)
write.table(ave.betas,paste(m,"_ave.betas.txt",sep=""),row.names=F,col.names=F)
write.table(ave.betas2,paste(m,"_ave.betas2.txt",sep=""),row.names=F,col.names=F)
write.table(ave.betas3,paste(m,"_ave.betas3.txt",sep=""),row.names=F,col.names=F)
write.table(ave.betas4,paste(m,"_ave.betas4.txt",sep=""),row.names=F,col.names=F)
write.table(ave.betas5,paste(m,"_ave.betas5.txt",sep=""),row.names=F,col.names=F)
write.table(se.betas,paste(m,"_se.betas.txt",sep=""),row.names=F,col.names=F)
write.table(se.betas2,paste(m,"_se.betas2.txt",sep=""),row.names=F,col.names=F)
write.table(se.betas3,paste(m,"_se.betas3.txt",sep=""),row.names=F,col.names=F)
write.table(se.betas4,paste(m,"_se.betas4.txt",sep=""),row.names=F,col.names=F)
write.table(se.betas5,paste(m,"_se.betas5.txt",sep=""),row.names=F,col.names=F)
# results
cbind("Value(ave)" = ave.betas, "SE(ave)" = se.betas,
"Value(clic-h-exp)" = ave.betas2, "SE(clic-h-exp)" = se.betas2,
"Value(dwave)" = ave.betas3, "SE(dwave)" = se.betas3,
"Value(wave)" = ave.betas4, "SE(wave)" = se.betas4,
"Value(clic-h-ecdf)" = ave.betas5, "SE(clic-h-ecdf)" = se.betas5)
###cbind("Value(ave)" = ave.betas.Florios) #, "SE(ave)" = se.betas,
# "Value(wave)" = ave.betas2, "SE(wave)" = se.betas2)
}
interval <- function(x) {
if ( x %% 10 != 0) {
res <- x %/% 10 + 1
}
else {
x <- x-1
res <- x %/% 10 + 1
}
return(res)
}
intervalBIG <- function(x) {
if ( x %% 20 != 0) {
res <- x %/% 20 + 1
}
else {
x <- x-1
res <- x %/% 20 + 1
}
return(res)
}
computeWeightMatrixAVE_by6 <- function() {
#A is 18 x 42 so that se are also computed
A <- matrix(0,20,120)
#A <- matrix(0,Q*5,P*(5+5))
A[1,c(1,21,41,71,91,111)]=1
A[2,c(3,23,43,72,92,112)]=1
A[3,c(5,25,45,73,93,113)]=1
A[4,c(6,26,46,74,94,114)]=1
A[5,c(7,27,47,75,95,115)]=1
A[6,c(2,31,51,61,81,116)]=1
A[7,c(4,32,52,63,83,117)]=1
A[8,c(8,33,53,65,85,118)]=1
A[9,c(9,34,54,66,86,119)]=1
A[10,c(10,35,55,67,87,120)]=1
A[11,c(11,22,56,62,96,101)]=1
A[12,c(12,24,57,64,97,103)]=1
A[13,c(13,28,58,68,98,105)]=1
A[14,c(14,29,59,69,99,106)]=1
A[15,c(15,30,60,70,100,107)]=1
A[16,c(16,36,42,76,82,102)]=1
A[17,c(17,37,44,77,84,104)]=1
A[18,c(18,38,48,78,88,108)]=1
A[19,c(19,39,49,79,89,109)]=1
A[20,c(20,40,50,80,90,110)]=1
## now also for rho's
## skip rho's
##finalize A
A<- (1/6)*A
A
}
computeWeightMatrixAVE <- function() {
#A is 18 x 42 so that se are also computed
A <- matrix(0,20,60)
#A <- matrix(0,Q*5,P*(5+5))
A[1,1]=1
A[1,11]=1
A[1,21]=1
A[2,3]=1
A[2,13]=1
A[2,23]=1
A[3,5]=1
A[3,15]=1
A[3,25]=1
A[4,6]=1
A[4,16]=1
A[4,26]=1
A[5,7]=1
A[5,17]=1
A[5,27]=1
A[6,2]=1
A[6,31]=1
A[6,41]=1
A[7,4]=1
A[7,33]=1
A[7,43]=1
A[8,8]=1
A[8,35]=1
A[8,45]=1
A[9,9]=1
A[9,36]=1
A[9,46]=1
A[10,10]=1
A[10,37]=1
A[10,47]=1
A[11,12]=1
A[11,32]=1
A[11,51]=1
A[12,14]=1
A[12,34]=1
A[12,53]=1
A[13,18]=1
A[13,38]=1
A[13,55]=1
A[14,19]=1
A[14,39]=1
A[14,56]=1
A[15,20]=1
A[15,40]=1
A[15,57]=1
A[16,22]=1
A[16,42]=1
A[16,52]=1
A[17,24]=1
A[17,44]=1
A[17,54]=1
A[18,28]=1
A[18,48]=1
A[18,58]=1
A[19,29]=1
A[19,49]=1
A[19,59]=1
A[20,30]=1
A[20,50]=1
A[20,60]=1
## now also for rho's
## skip rho's
##finalize A
A<- (1/3)*A
A
}
computeWeightMatrixWAVE <- function(A2) {
#A2 is 16 x 48 so that se are also computed
A3 <- matrix(0,20,60)
#A2 <- matrix(0,Q*5,P*(5+5))
nzPattern <- computeWeightMatrixAVE()
#the logic is to loop on the columns of A2 and fill in elements (1 for each column), in the spot of nzPattern with diagOmega elements
for (j in 1:60) {
for (i in 1:20) {
if (nzPattern[i,j]!=0) {
A3[i,j] <- colSums(A2)[j] # an easy way to assign the unique element of each column in A2 to A3 suitable position
}
}
}
##finalize A3, so that rows add up to 1
valueDenom <- rowSums(A3)
for (i in 1:20) {
A3[i,]<- A3[i,] / valueDenom[i]
}
A3
}
hess.bin.Vect <- function (thetas, id, y, X, Z, GHk = GHk, extraParam) {
#cddVect(thetas, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = GHk)
res <- matrix(nrow=length(thetas),ncol=length(thetas))
#for (i in 1:length(thetas)) {
#for (j in 1:length(thetas)) {
#thetasIJ <- thetas
res <- cddVect(thetas, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = GHk, extraParam)
#}
#}
res
}
cddVect <- function (x0, f, ..., eps = 0.0005) {
# Translate Matlab to R from URL:
# http://grizzly.la.psu.edu/~suj14/programs.html
# Matlab code: SUNG Jae Jun, PhD
# R code: Kostas Florios, PhD
# Matlab comments
#% Compute the Hessian of a real-valued function numerically
#% This is a translation of the Gauss command, hessp(fun,x0), considering only
#% the real arguments.
#% f: real-valued function (1 by 1)
#% x0: k by 1, real vector
#% varargin: various passing arguments
#% H: k by k, Hessian of f at x0, symmetric matrix
#initializations
xx0 <- x0
k <- length(x0)
x0 <- matrix(0,k,1)
x0[,1] <- xx0
dax0 <- matrix(0,k,1)
hessian <- matrix(0,nrow=k,ncol=k)
grdd <- matrix(0,nrow=k,ncol=1)
#eps <- 6.0554544523933429e-6
eps <- 0.0005
H <- matrix(0,nrow=k,ncol=k)
# Computaion of stepsize (dh)
ax0=abs(x0)
for (i in 1:k) {
if (x0[i,1] != 0) {
dax0[i,1] <- x0[i,1]/ax0[i,1]
}
else {
dax0[i,1] <- 1
}
}
#dh <- eps*max(ax0, (1e-2)*matrix(1,k,1))*dax0
dh <- eps*dax0
xdh=x0+dh
dh=xdh-x0; # This increases precision slightly
ee <- matrix(0,nrow=k,ncol=k)
I <- diag(1,k)
for (i in 1:k) {
ee[,i] <- I[,i]*dh
}
# Computation of f0=f(x0)
f0 <- f(x0, ...)
# Compute forward step
for (i in 1:k) {
grdd[i,1] <- f(x0+ee[,i], ...)
}
# Compute 'double' forward step
for (i in 1:k) {
cat("Computing Row No...:", i, "of hessian\n")
for (j in i:k) {
hessian[i,j] <- f(x0+(ee[,i]+ee[,j]), ...)
if ( i!=j) {
hessian[j,i] <- hessian[i,j]
}
}
}
l <- t(matrix(1,k,1))
grdd <- kronecker(l,grdd)
#H <- (((hessian - grdd) - t(grdd)) + f0[1,1]*matrix(1,nrow=k,ncol=k) ) / kronecker(dh,t(dh))
H <- (((hessian - grdd) - t(grdd)) + f0*matrix(1,nrow=k,ncol=k) ) / kronecker(dh,t(dh))
return(H)
}
score.bin.BIG <- function (thetas, id, y, X, Z, GHk = 5, extraParam, Data=Data, r=r, i=i) {
fd (thetas, logLik.bin.BIG, id = id, y = y, X = X, Z = Z, GHk = GHk, extraParam, Data=Data, r=r, i=i)
}
hess.bin.BIG.Vect <- function (thetas, id, y, X, Z, GHk = GHk, extraParam, Data, r, i) {
#cddVect(thetas, logLik.bin, id = id, y = y, X = X, Z = Z, GHk = GHk)
res <- matrix(nrow=length(thetas),ncol=length(thetas))
#for (i in 1:length(thetas)) {
#for (j in 1:length(thetas)) {
#thetasIJ <- thetas
res <- cddVect(thetas, logLik.bin.BIG, id = id, y = y, X = X, Z = Z, GHk = GHk, extraParam, Data, r, i)
#}
#}
res
}
logLik.bin.BIG <- function (thetas, id, y, X, Z, GHk = 5, extraParam, Data, r, i) {
#environment(aveThetas)<- environment(logLik.bin) <- environment(score.bin) <- environment()
#environment(aveThetas)<- environment()
environment(aveThetas2)<- environment()
res<-0
cp<-0
##thetasP <- vector("list", P)
#1: pair 1-2
###thetasP[[1]] = c( thetas[c(1,2,5,6,9,13)], thetas[c(17)], thetas[c(10,14,18)] )
#2: pair 1-3
###thetasP[[2]] = c( thetas[c(1,3,5,7,9,13)], thetas[c(17)], thetas[c(11,15,19)] )
#3: pair 1-4
###thetasP[[3]] = c( thetas[c(1,4,5,8,9,13)], thetas[c(17)], thetas[c(12,16,20)] )
#4: pair 2-3
###thetasP[[4]] = c( thetas[c(2,3,6,7,10,14)], thetas[c(18)], thetas[c(11,15,19)] )
#5: pair 2-4
###thetasP[[5]] = c( thetas[c(2,4,6,8,10,14)], thetas[c(18)], thetas[c(12,16,20)] )
#6: pair 3-4
###thetasP[[6]] = c( thetas[c(3,4,7,8,11,15)], thetas[c(19)], thetas[c(12,16,20)] )
##thetasP <- thetas
##thetasP <- as.vector(thetasP)
thetasP <- thetas[1:10]
thetasP <- as.vector(thetasP)
thQ <- thetas[11:20]
thQ <- as.vector(thQ)
p <- r
#DD <- do.call(rbind, list(Data[1:3], Data[1:3]))
#DD$outcome <- gl(2, nrow(Data))
p<-r
prs <- pairs[, p]
DD <- Data
res <- logLik.bin(thetasP,
id = DD$id, y = DD$y,
X = rbind(model.matrix(Models[[p]])[1:(dim(Data)[1]/2),],
model.matrix(Models[[p]])[(n*length(times)+1):(n*length(times)+(dim(Data)[1])/2),]),
Z = rbind(model.matrix(Models[[p]])[1:(dim(Data)[1]/2),],
model.matrix(Models[[p]])[(n*length(times)+1):(n*length(times)+(dim(Data)[1]/2)),]),
GHk = GHk,
extraParam = extraParam)
indices <- 1:Q
indic <- indices[c(-prs[1],-prs[2])]
##thetasQ <- vector("list", Q)
#1: item 1
##thetasQ[[1]] = c(fixef(ModelsOne[[1]]),Sigma2YiRIone[[1]],Sigma2YiRSone[[1]],Sigma2YiRIRSone[[1]])
#2: item 2
##thetasQ[[2]] = c(fixef(ModelsOne[[2]]),Sigma2YiRIone[[2]],Sigma2YiRSone[[2]],Sigma2YiRIRSone[[2]])
#3: item 3
##thetasQ[[3]] = c(fixef(ModelsOne[[3]]),Sigma2YiRIone[[3]],Sigma2YiRSone[[3]],Sigma2YiRIRSone[[3]])
#4: item 4
##thetasQ[[4]] = c(fixef(ModelsOne[[4]]),Sigma2YiRIone[[4]],Sigma2YiRSone[[4]],Sigma2YiRIRSone[[4]])
thetasQ <- vector("list", 2)
thetasQ[[1]] <- thetas[11:15]
thetasQ[[2]] <- thetas[16:20]
DDD <- do.call(rbind, list(Data[1:3]))
DDD$outcome <- gl(1, nrow(Data))
for (k in 1:length(indic) ) {
ii <- indic[k]
if (i <= n) {
ind.i <- DataRaw$id == i
}
else {
ind.i <- rep(T,n*length(times))
}
yi <- DataRaw[[paste("y", indic[k], sep = "")]][ind.i]
DDD <- DataRaw[ind.i,]
DDD$y <- c(yi)
res <- res + logLik.bin.One(thetasQ[[k]],
id = DDD$id, y=DDD$y,
X = model.matrix(ModelsOne[[ii]])[1:(dim(Data)[1]/2),],
Z = model.matrix(ModelsOne[[ii]])[1:(dim(Data)[1]/2),],
GHk = GHk,
extraParamOne = extraParamOne[[ii]])
}
res
}
logLik.bin.One <- function (thetas, id, y, X, Z, GHk = 5, extraParamOne) {
#thetas <- relist(thetas, lis.thetas)
#betas <- thetas$betas
#ncz <- ncol(Z)
#D <- matrix(0, ncz, ncz)
#D[lower.tri(D, TRUE)] <- thetas$D
#D <- D + t(D)
#diag(D) <- diag(D) / 2
#
betas<-thetas[1:2]
Sigma2YiRI<-thetas[3]
Sigma2YiRS<-thetas[4]
Sigma2YiRIRS<-thetas[5]
Dold <- matrix(0,ncol=2,nrow=2)
Dnew <- matrix(0,ncol=2,nrow=2)
Dold <- extraParamOne # extraParam is VarCorr(Models[[i]])$id
#re-align columns, rows
#old: glmer, 1,2,3,4
#new: Florios, 1,3,2,4
#now plug-in the thetas, in order to perform numerical derivatives wrt theta (scores, hessians)
Dnew[1,1] = Sigma2YiRI
Dnew[2,2] = Sigma2YiRS
Dnew[2,1] = Sigma2YiRIRS
Dnew[1,2] = Dnew[2,1]
#simplify notation: D=Dnew, and proceed
D <-nearPD(Dnew)
ncz <- ncol(Z)
GH <- gauher(GHk)
b <- as.matrix(expand.grid(rep(list(GH$x), ncz)))
dimnames(b) <- NULL
k <- nrow(b)
wGH <- as.matrix(expand.grid(rep(list(GH$w), ncz)))
wGH <- 2^(ncz/2) * apply(wGH, 1, prod) * exp(rowSums(b * b))
b <- sqrt(2) * b
Ztb <- Z %*% t(b)
#
mu.y <- plogis(as.vector(X %*% betas) + Ztb)
logBinom <- dbinom(y, 1, mu.y, TRUE)
log.p.yb <- rowsum(logBinom, id)
log.p.b <- dmvnorm(b, rep(0, ncol(Z)), D, TRUE)
p.yb <- exp(log.p.yb + rep(log.p.b, each = nrow(log.p.yb)))
p.y <- c(p.yb %*% wGH)
#-sum(log(p.y), na.rm = TRUE) #logLik as min, original
sum(log(p.y), na.rm = TRUE) #logLik as max, CLIC heuristic
}
gc()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sqlanalogs.R
\name{right}
\alias{right}
\alias{left}
\title{SQL Analog to left & right substring functions}
\usage{
right(vec, n, trimws = TRUE, sameclass = FALSE)
left(vec, n, trimws = TRUE, sameclass = FALSE)
}
\arguments{
\item{vec}{A vector of any datatype: \code{character, numeric, logical, factor}, etc.}
\item{n}{Integer scalar. The number of characters you want to keep.}
\item{trimws}{Should the applicable leading/trailing whitespace be removed first? Default is \code{TRUE}.}
\item{sameclass}{Should the output be the same class as the input? Defaults to \code{FALSE} \emph{(returns a character vector no matter the input)}. Generally, you don't want this \emph{(expecially for POSIX classes which is sketchy at best)}.}
}
\description{
Vectorized wrapper for taking the left/right N characters from a string, number, factor, etc., just like in SQL. I find myself constantly trimming strings and grabbing only the first/last few characters so this saves some typing.
}
\section{right}{
R Analog to SQL's \code{RIGHT()} function. Defaults to trim leading whitespace.
}
\section{left}{
R Analog to SQL's \code{LEFT()} function. Defaults to trim trailing whitespace.
}
\examples{
right(vec = 'SomethingLong', n = 4)
right(vec = 425575.4, n = 5)
right(vec = 'AnotherThing ', n = 7, trimws = FALSE)
right(vec = 401.98, 4, sameclass = TRUE)
left(vec = 'SomethingLong', n = 4)
left(vec = 40000.00, n = 2)
left(vec = ' AnotherThing', n = 4, trimws = FALSE)
left(vec = 400, 1, sameclass = TRUE)
}
\keyword{analog}
\keyword{left}
\keyword{right}
\keyword{sql}
| /man/sqlanalogs.Rd | permissive | Paul-James/pjames | R | false | true | 1,653 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sqlanalogs.R
\name{right}
\alias{right}
\alias{left}
\title{SQL Analog to left & right substring functions}
\usage{
right(vec, n, trimws = TRUE, sameclass = FALSE)
left(vec, n, trimws = TRUE, sameclass = FALSE)
}
\arguments{
\item{vec}{A vector of any datatype: \code{character, numeric, logical, factor}, etc.}
\item{n}{Integer scalar. The number of characters you want to keep.}
\item{trimws}{Should the applicable leading/trailing whitespace be removed first? Default is \code{TRUE}.}
\item{sameclass}{Should the output be the same class as the input? Defaults to \code{FALSE} \emph{(returns a character vector no matter the input)}. Generally, you don't want this \emph{(expecially for POSIX classes which is sketchy at best)}.}
}
\description{
Vectorized wrapper for taking the left/right N characters from a string, number, factor, etc., just like in SQL. I find myself constantly trimming strings and grabbing only the first/last few characters so this saves some typing.
}
\section{right}{
R Analog to SQL's \code{RIGHT()} function. Defaults to trim leading whitespace.
}
\section{left}{
R Analog to SQL's \code{LEFT()} function. Defaults to trim trailing whitespace.
}
\examples{
right(vec = 'SomethingLong', n = 4)
right(vec = 425575.4, n = 5)
right(vec = 'AnotherThing ', n = 7, trimws = FALSE)
right(vec = 401.98, 4, sameclass = TRUE)
left(vec = 'SomethingLong', n = 4)
left(vec = 40000.00, n = 2)
left(vec = ' AnotherThing', n = 4, trimws = FALSE)
left(vec = 400, 1, sameclass = TRUE)
}
\keyword{analog}
\keyword{left}
\keyword{right}
\keyword{sql}
|
options(repr.plot.width=10,repr.plot.height=10,
repr.plot.bg='slategray')
c0<-'white'; c1<-'#3333ff'; c2<-'#ff3333'
edges<-c('A','B', 'A','C', 'A','F', 'B','C', 'B','D',
'C','D', 'C','E', 'D','E', 'D','F', 'E','F')
weights<-c(3,8,16,4,7,2,6,5,4,2)
g<-graph(edges,directed=FALSE)%>%
set_edge_attr('weight',value=weights)
shortest_path<-get.shortest.paths(g,'A','F')
E(g)$color<-c1
E(g,path=unlist(shortest_path$vpath))$color<-c2
plot(g,layout=layout_nicely(g),vertex.label.cex=2.5,
vertex.color=c1,vertex.size=25,
vertex.label.color=c0,vertex.frame.color=c0,
edge.label.color=c0,edge.label=E(g)$weight,
edge.label.cex=2.5,edge.width=E(g)$weight)
| /r_recipes/graph_plot.R | no_license | OlgaBelitskaya/cookbooks | R | false | false | 688 | r |
options(repr.plot.width=10,repr.plot.height=10,
repr.plot.bg='slategray')
c0<-'white'; c1<-'#3333ff'; c2<-'#ff3333'
edges<-c('A','B', 'A','C', 'A','F', 'B','C', 'B','D',
'C','D', 'C','E', 'D','E', 'D','F', 'E','F')
weights<-c(3,8,16,4,7,2,6,5,4,2)
g<-graph(edges,directed=FALSE)%>%
set_edge_attr('weight',value=weights)
shortest_path<-get.shortest.paths(g,'A','F')
E(g)$color<-c1
E(g,path=unlist(shortest_path$vpath))$color<-c2
plot(g,layout=layout_nicely(g),vertex.label.cex=2.5,
vertex.color=c1,vertex.size=25,
vertex.label.color=c0,vertex.frame.color=c0,
edge.label.color=c0,edge.label=E(g)$weight,
edge.label.cex=2.5,edge.width=E(g)$weight)
|
BASE_DIR <- "C:/Projects/M5Comp/"
forecasts_path <- "results/combined_forecasts/intermittent_sbj_non_intermittent_rnn_forecasts.txt"
output_path <- "results/submissions/intermittent_sbj_non_intermittent_rnn_forecasts.csv"
results <- read.csv(paste0(BASE_DIR, forecasts_path), header=FALSE)
sales_dataset <- read.csv(paste0(BASE_DIR,"dataset/comp_data/comp_data/sales_train_validation.csv"))
validation_results <- results[,c(1:28)]
evaluation_results <- results[,c(29:56)]
validation_results$id = paste0(sales_dataset$item_id,"_",sales_dataset$store_id,"_validation")
evaluation_results$id = paste0(sales_dataset$item_id,"_",sales_dataset$store_id,"_evaluation")
final_validation_results <- validation_results[,c(29,1:28)]
final_evaluation_results <- evaluation_results[,c(29,1:28)]
names <- c("id", sprintf("F%s",seq(1:28)))
colnames(final_validation_results) <- names
colnames(final_evaluation_results) <- names
final <- rbind(final_validation_results, final_evaluation_results)
write.csv(final, paste0(BASE_DIR, output_path), row.names = FALSE)
| /scripts/make_submission.R | no_license | HansikaPH/M5Comp | R | false | false | 1,058 | r | BASE_DIR <- "C:/Projects/M5Comp/"
forecasts_path <- "results/combined_forecasts/intermittent_sbj_non_intermittent_rnn_forecasts.txt"
output_path <- "results/submissions/intermittent_sbj_non_intermittent_rnn_forecasts.csv"
results <- read.csv(paste0(BASE_DIR, forecasts_path), header=FALSE)
sales_dataset <- read.csv(paste0(BASE_DIR,"dataset/comp_data/comp_data/sales_train_validation.csv"))
validation_results <- results[,c(1:28)]
evaluation_results <- results[,c(29:56)]
validation_results$id = paste0(sales_dataset$item_id,"_",sales_dataset$store_id,"_validation")
evaluation_results$id = paste0(sales_dataset$item_id,"_",sales_dataset$store_id,"_evaluation")
final_validation_results <- validation_results[,c(29,1:28)]
final_evaluation_results <- evaluation_results[,c(29,1:28)]
names <- c("id", sprintf("F%s",seq(1:28)))
colnames(final_validation_results) <- names
colnames(final_evaluation_results) <- names
final <- rbind(final_validation_results, final_evaluation_results)
write.csv(final, paste0(BASE_DIR, output_path), row.names = FALSE)
|
#===============================================================================
# Title : Summarising NMFI-AMR systematic literature review database
# Data version : Data received from Poojan Shrestha on 15-07-2019
# Manuscript : NMFI-Antimicrobial resistance analysis
# URL : https://www.iddo.org/non-malarial-febrile-illness-map
# Script date : 14-04-2021
# Script : Prabin Dahal; prabin.dahal@iddo.org
# R Version : R version 4.0.4 (2021-02-15)
#===============================================================================
#rm(list=ls())
require(tableone)
require(dplyr)
require(plyr) # for revalue
# meta-analysis packages
require(meta)
require(metafor)
require(metasens)
#-----------------------
# Read analysis dataset
#-----------------------
dat0<-read.csv("D:/_IDDO/_nmfi_amr/NMFI AMR/Data/nmfi_amr_analysis_set_final.csv")
dat<- dat0
#-----------------------------------
# Look at pathogen-drug combination
#-----------------------------------
drugbug <- dat[which(dat$pathogen_2=="Klebsiella pneumoniae" & dat$antibiotic %in% c("Ceftriaxone")),]
drugbug <- droplevels(drugbug)
# number of unique articles
nrow(unique<-drugbug[which(!duplicated(drugbug$article_id)),])
drugbug_africa<- drugbug[which(drugbug$region=="Africa"),]
drugbug_asia<- drugbug[which(drugbug$region=="Asia"),]
#===========================================================================================
# Part I: Summarise the number of events and total isolates tested by region and time-period
#===========================================================================================
#--------------------------------------------------------------
# Exclude studies with either missing numerator or denominator
#--------------------------------------------------------------
drugbug1 <- drugbug[which(!is.na(drugbug$number_resistant) & !is.na(drugbug$total_invasive_isolates)),]
drugbug_africa1 <- drugbug_africa[which(!is.na(drugbug_africa$number_resistant) & !is.na(drugbug_africa$total_invasive_isolates)),]
drugbug_asia1 <- drugbug_asia[which(!is.na(drugbug_asia$number_resistant) & !is.na(drugbug_asia$total_invasive_isolates)),]
# number of unique articles
nrow(unique<-drugbug1[which(!duplicated(drugbug1$article_id)),])
#----------------------------------
# Total isolates and total events
#----------------------------------
drugbug1 %>%
#dplyr::group_by(year_cat) %>%
dplyr::summarise(
n_pub= length(unique(article_id)),
n_site= length(unique(site_code)),
n_resis = sum(number_resistant, na.rm=T),
n_total= sum(total_invasive_isolates, na.rm=T),
paste = paste(sum(number_resistant, na.rm=T), sum(total_invasive_isolates, na.rm=T), sep="/")
)
#-------------------------------------------
# Total isolates and total events for Africa
#-------------------------------------------
drugbug_africa1 %>%
#dplyr::group_by(year_cat) %>%
dplyr::summarise(
n_site= length(unique(site_code)),
n_resis = sum(number_resistant, na.rm=T),
n_total= sum(total_invasive_isolates, na.rm=T),
paste = paste(sum(number_resistant, na.rm=T), sum(total_invasive_isolates, na.rm=T), sep="/")
)
drugbug_africa1 %>%
dplyr::group_by(year_cat) %>%
dplyr::summarise(
n_site= length(unique(site_code)),
n_resis = sum(number_resistant, na.rm=T),
n_total= sum(total_invasive_isolates, na.rm=T),
paste = paste(sum(number_resistant, na.rm=T), sum(total_invasive_isolates, na.rm=T), sep="/")
)
#-------------------------------------------
# Total isolates and total events for Asia
#-------------------------------------------
drugbug_asia1 %>%
#dplyr::group_by(year_cat) %>%
dplyr::summarise(
n_site= length(unique(site_code)),
n_resis = sum(number_resistant, na.rm=T),
n_total= sum(total_invasive_isolates, na.rm=T),
paste = paste(sum(number_resistant, na.rm=T), sum(total_invasive_isolates, na.rm=T), sep="/")
)
drugbug_asia1 %>%
dplyr::group_by(year_cat) %>%
dplyr::summarise(
n_site= length(unique(site_code)),
n_resis = sum(number_resistant, na.rm=T),
n_total= sum(total_invasive_isolates, na.rm=T),
paste = paste(sum(number_resistant, na.rm=T), sum(total_invasive_isolates, na.rm=T), sep="/")
)
#=======================================================
# Part II: Carry out meta-analysis of single proportions
#=======================================================
#--------------------------------------------------
# Some studies have split data into multiple rows
# Aggregate by study sites for meta-analysis
#--------------------------------------------------
drugbug1 <- drugbug1 %>%
dplyr::group_by(site_code,year_cat,region) %>%
dplyr::summarise(
number_resistant = sum(number_resistant),
total_invasive_isolates =sum(total_invasive_isolates)
)
drugbug_africa1 <- drugbug_africa1 %>%
dplyr::group_by(site_code,year_cat) %>%
dplyr::summarise(
number_resistant = sum(number_resistant),
total_invasive_isolates =sum(total_invasive_isolates)
)
drugbug_asia1 <- drugbug_asia1 %>%
dplyr::group_by(site_code,year_cat) %>%
dplyr::summarise(
number_resistant = sum(number_resistant),
total_invasive_isolates =sum(total_invasive_isolates)
)
#----------------
# Meta-analysis
#----------------
(ma1 <- metaprop(
data = drugbug1,
number_resistant,
total_invasive_isolates,
studlab = site_code
)
)
update.meta(ma1, byvar=region, comb.random = TRUE, comb.fixed = F)
# Examine the influence of any single study
metainf(ma1, pooled = "random")
# Repeat analysis by removing studies that only tested <10 isolates
update(ma1, subset = total_invasive_isolates > 1)
update(ma1, subset = total_invasive_isolates > 2)
update(ma1, subset = total_invasive_isolates > 5)
update(ma1, subset = total_invasive_isolates > 10)
#--------------------------------------------------------------------------------------
# Asses funnel plot asymmetry and derive adjusted estimated using trim-and-fill method
#--------------------------------------------------------------------------------------
metabias(ma1)
trimfill(ma1)
#----------------------
# Look at Africa only
#----------------------
# Carry out overall meta-analysis of proportion
(afr1 <- metaprop(
data = drugbug_africa1,
number_resistant,
total_invasive_isolates,
studlab = site_code
)
)
update.meta(afr1, byvar=year_cat, comb.random = TRUE, comb.fixed = F)
# Warning of potential unstable estimates were returned
# Time period sub-group (1980-1989) from 1 study site had no events
require(binom)
binom.confint(0,8, method="wilson")
drugbug_africa2 <- drugbug_africa1[which(drugbug_africa1$year_cat!="1980-1989"),]
# Carry out overall meta-analysis of proportion
(afr2 <- metaprop(
data = drugbug_africa2,
number_resistant,
total_invasive_isolates
)
)
update.meta(afr2, byvar=year_cat, comb.random = TRUE, comb.fixed = F)
#----------------------
# Look at Asia only
#----------------------
# Carry out overall meta-analysis of proportion
(asia1 <- metaprop(
data = drugbug_asia1 ,
number_resistant,
total_invasive_isolates,
studlab = site_code
)
)
update.meta(asia1, byvar=year_cat, comb.random = TRUE, comb.fixed = F)
# Compare estimates for 2000s against 2010s
drugbug_asia2<- drugbug_asia1[which(drugbug_asia1$year_cat!="1990-1999"),]
# Carry out overall meta-analysis of proportion
(asia2 <- metaprop(
data = drugbug_asia2,
number_resistant,
total_invasive_isolates,
studlab = site_code
)
)
update.meta(asia2, byvar=year_cat, comb.random = TRUE, comb.fixed = F)
## End Code | /8_Klebsiella pneumoniae_Cephalosporin_Ceftriaxone.R | no_license | PrabinDahal/IDDO_NMFI_Antimicrobial-Resistance | R | false | false | 7,727 | r | #===============================================================================
# Title : Summarising NMFI-AMR systematic literature review database
# Data version : Data received from Poojan Shrestha on 15-07-2019
# Manuscript : NMFI-Antimicrobial resistance analysis
# URL : https://www.iddo.org/non-malarial-febrile-illness-map
# Script date : 14-04-2021
# Script : Prabin Dahal; prabin.dahal@iddo.org
# R Version : R version 4.0.4 (2021-02-15)
#===============================================================================
#rm(list=ls())
require(tableone)
require(dplyr)
require(plyr) # for revalue
# meta-analysis packages
require(meta)
require(metafor)
require(metasens)
#-----------------------
# Read analysis dataset
#-----------------------
dat0<-read.csv("D:/_IDDO/_nmfi_amr/NMFI AMR/Data/nmfi_amr_analysis_set_final.csv")
dat<- dat0
#-----------------------------------
# Look at pathogen-drug combination
#-----------------------------------
drugbug <- dat[which(dat$pathogen_2=="Klebsiella pneumoniae" & dat$antibiotic %in% c("Ceftriaxone")),]
drugbug <- droplevels(drugbug)
# number of unique articles
nrow(unique<-drugbug[which(!duplicated(drugbug$article_id)),])
drugbug_africa<- drugbug[which(drugbug$region=="Africa"),]
drugbug_asia<- drugbug[which(drugbug$region=="Asia"),]
#===========================================================================================
# Part I: Summarise the number of events and total isolates tested by region and time-period
#===========================================================================================
#--------------------------------------------------------------
# Exclude studies with either missing numerator or denominator
#--------------------------------------------------------------
drugbug1 <- drugbug[which(!is.na(drugbug$number_resistant) & !is.na(drugbug$total_invasive_isolates)),]
drugbug_africa1 <- drugbug_africa[which(!is.na(drugbug_africa$number_resistant) & !is.na(drugbug_africa$total_invasive_isolates)),]
drugbug_asia1 <- drugbug_asia[which(!is.na(drugbug_asia$number_resistant) & !is.na(drugbug_asia$total_invasive_isolates)),]
# number of unique articles
nrow(unique<-drugbug1[which(!duplicated(drugbug1$article_id)),])
#----------------------------------
# Total isolates and total events
#----------------------------------
drugbug1 %>%
#dplyr::group_by(year_cat) %>%
dplyr::summarise(
n_pub= length(unique(article_id)),
n_site= length(unique(site_code)),
n_resis = sum(number_resistant, na.rm=T),
n_total= sum(total_invasive_isolates, na.rm=T),
paste = paste(sum(number_resistant, na.rm=T), sum(total_invasive_isolates, na.rm=T), sep="/")
)
#-------------------------------------------
# Total isolates and total events for Africa
#-------------------------------------------
drugbug_africa1 %>%
#dplyr::group_by(year_cat) %>%
dplyr::summarise(
n_site= length(unique(site_code)),
n_resis = sum(number_resistant, na.rm=T),
n_total= sum(total_invasive_isolates, na.rm=T),
paste = paste(sum(number_resistant, na.rm=T), sum(total_invasive_isolates, na.rm=T), sep="/")
)
drugbug_africa1 %>%
dplyr::group_by(year_cat) %>%
dplyr::summarise(
n_site= length(unique(site_code)),
n_resis = sum(number_resistant, na.rm=T),
n_total= sum(total_invasive_isolates, na.rm=T),
paste = paste(sum(number_resistant, na.rm=T), sum(total_invasive_isolates, na.rm=T), sep="/")
)
#-------------------------------------------
# Total isolates and total events for Asia
#-------------------------------------------
drugbug_asia1 %>%
#dplyr::group_by(year_cat) %>%
dplyr::summarise(
n_site= length(unique(site_code)),
n_resis = sum(number_resistant, na.rm=T),
n_total= sum(total_invasive_isolates, na.rm=T),
paste = paste(sum(number_resistant, na.rm=T), sum(total_invasive_isolates, na.rm=T), sep="/")
)
drugbug_asia1 %>%
dplyr::group_by(year_cat) %>%
dplyr::summarise(
n_site= length(unique(site_code)),
n_resis = sum(number_resistant, na.rm=T),
n_total= sum(total_invasive_isolates, na.rm=T),
paste = paste(sum(number_resistant, na.rm=T), sum(total_invasive_isolates, na.rm=T), sep="/")
)
#=======================================================
# Part II: Carry out meta-analysis of single proportions
#=======================================================
#--------------------------------------------------
# Some studies have split data into multiple rows
# Aggregate by study sites for meta-analysis
#--------------------------------------------------
drugbug1 <- drugbug1 %>%
dplyr::group_by(site_code,year_cat,region) %>%
dplyr::summarise(
number_resistant = sum(number_resistant),
total_invasive_isolates =sum(total_invasive_isolates)
)
drugbug_africa1 <- drugbug_africa1 %>%
dplyr::group_by(site_code,year_cat) %>%
dplyr::summarise(
number_resistant = sum(number_resistant),
total_invasive_isolates =sum(total_invasive_isolates)
)
drugbug_asia1 <- drugbug_asia1 %>%
dplyr::group_by(site_code,year_cat) %>%
dplyr::summarise(
number_resistant = sum(number_resistant),
total_invasive_isolates =sum(total_invasive_isolates)
)
#----------------
# Meta-analysis
#----------------
(ma1 <- metaprop(
data = drugbug1,
number_resistant,
total_invasive_isolates,
studlab = site_code
)
)
update.meta(ma1, byvar=region, comb.random = TRUE, comb.fixed = F)
# Examine the influence of any single study
metainf(ma1, pooled = "random")
# Repeat analysis by removing studies that only tested <10 isolates
update(ma1, subset = total_invasive_isolates > 1)
update(ma1, subset = total_invasive_isolates > 2)
update(ma1, subset = total_invasive_isolates > 5)
update(ma1, subset = total_invasive_isolates > 10)
#--------------------------------------------------------------------------------------
# Asses funnel plot asymmetry and derive adjusted estimated using trim-and-fill method
#--------------------------------------------------------------------------------------
metabias(ma1)
trimfill(ma1)
#----------------------
# Look at Africa only
#----------------------
# Carry out overall meta-analysis of proportion
(afr1 <- metaprop(
data = drugbug_africa1,
number_resistant,
total_invasive_isolates,
studlab = site_code
)
)
update.meta(afr1, byvar=year_cat, comb.random = TRUE, comb.fixed = F)
# Warning of potential unstable estimates were returned
# Time period sub-group (1980-1989) from 1 study site had no events
require(binom)
binom.confint(0,8, method="wilson")
drugbug_africa2 <- drugbug_africa1[which(drugbug_africa1$year_cat!="1980-1989"),]
# Carry out overall meta-analysis of proportion
(afr2 <- metaprop(
data = drugbug_africa2,
number_resistant,
total_invasive_isolates
)
)
update.meta(afr2, byvar=year_cat, comb.random = TRUE, comb.fixed = F)
#----------------------
# Look at Asia only
#----------------------
# Carry out overall meta-analysis of proportion
(asia1 <- metaprop(
data = drugbug_asia1 ,
number_resistant,
total_invasive_isolates,
studlab = site_code
)
)
update.meta(asia1, byvar=year_cat, comb.random = TRUE, comb.fixed = F)
# Compare estimates for 2000s against 2010s
drugbug_asia2<- drugbug_asia1[which(drugbug_asia1$year_cat!="1990-1999"),]
# Carry out overall meta-analysis of proportion
(asia2 <- metaprop(
data = drugbug_asia2,
number_resistant,
total_invasive_isolates,
studlab = site_code
)
)
update.meta(asia2, byvar=year_cat, comb.random = TRUE, comb.fixed = F)
## End Code |
#######################
###load data
#######################
rm(list=ls())
options(max.print=999999)
options(stringsAsFactors = FALSE)
library(tidyverse)
library(xml2)
library(rvest)
library(stringr)
list.files()%>%str_subset(".RData")
load("search_05_04_2019.RData")
summary(res)
res$Placa%>%str_sub(start=-1)%>%as.numeric()%>%{.%%2}%>%{!.}->res$placa_par
res[,c("Placa","placa_par")]
res%>%select(setdiff(colnames(res),c("http","descr","phone")))%>% map(unique)
res%>%filter(!Combustible %in% c("Gasolina", "Gasolina y gas"))%>%
filter(!Versión %in% (res$Versión%>%str_subset("4x2")))%>%
filter(!Tracción %in% (res$Tracción%>%str_subset("4x2")))%>%
filter(!Transmisión %in% (res$Transmisión%>%str_subset("Automática")))%>%
filter(!Tipo %in% (res$Tipo%>%str_subset("Ambulancias")))->res_f1
res_f1%>%select(setdiff(colnames(res),c("http","descr","phone")))%>% map(unique)
res_f1$location%>%str_subset(fixed("bog", ignore_case=TRUE))%>%unique()->loc_bog
res_f1$location%>%str_subset(fixed("cund", ignore_case=TRUE))%>%unique()->loc_cun
loc_bog_cun<-union(loc_bog,loc_cun)
res_f1%>%filter(Único.dueño %in% "Sí")%>%
filter(precio <60000000)%>%
filter(placa_par)%>%
filter(location %in% loc_bog_cun)%>%
filter(!Placa %in% c("RHU-514","COC-844","QIA010","OBI-928"))%>%
{.[order(.$Recorrido,.$Año,.$precio,decreasing = T),]}->res_f1_1
res_f1$available<-NA
res_f1$dir_meeting<-NA
res_f1$contact_name<-NA
| /load_search_05_04_2019.R | no_license | nmolanog/web_scrapping_tucarro | R | false | false | 1,433 | r | #######################
###load data
#######################
rm(list=ls())
options(max.print=999999)
options(stringsAsFactors = FALSE)
library(tidyverse)
library(xml2)
library(rvest)
library(stringr)
list.files()%>%str_subset(".RData")
load("search_05_04_2019.RData")
summary(res)
res$Placa%>%str_sub(start=-1)%>%as.numeric()%>%{.%%2}%>%{!.}->res$placa_par
res[,c("Placa","placa_par")]
res%>%select(setdiff(colnames(res),c("http","descr","phone")))%>% map(unique)
res%>%filter(!Combustible %in% c("Gasolina", "Gasolina y gas"))%>%
filter(!Versión %in% (res$Versión%>%str_subset("4x2")))%>%
filter(!Tracción %in% (res$Tracción%>%str_subset("4x2")))%>%
filter(!Transmisión %in% (res$Transmisión%>%str_subset("Automática")))%>%
filter(!Tipo %in% (res$Tipo%>%str_subset("Ambulancias")))->res_f1
res_f1%>%select(setdiff(colnames(res),c("http","descr","phone")))%>% map(unique)
res_f1$location%>%str_subset(fixed("bog", ignore_case=TRUE))%>%unique()->loc_bog
res_f1$location%>%str_subset(fixed("cund", ignore_case=TRUE))%>%unique()->loc_cun
loc_bog_cun<-union(loc_bog,loc_cun)
res_f1%>%filter(Único.dueño %in% "Sí")%>%
filter(precio <60000000)%>%
filter(placa_par)%>%
filter(location %in% loc_bog_cun)%>%
filter(!Placa %in% c("RHU-514","COC-844","QIA010","OBI-928"))%>%
{.[order(.$Recorrido,.$Año,.$precio,decreasing = T),]}->res_f1_1
res_f1$available<-NA
res_f1$dir_meeting<-NA
res_f1$contact_name<-NA
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/haematopoietic.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0,family="gaussian",standardize=FALSE)
sink('./haematopoietic_004.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/haematopoietic/haematopoietic_004.R | no_license | esbgkannan/QSMART | R | false | false | 358 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/haematopoietic.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0,family="gaussian",standardize=FALSE)
sink('./haematopoietic_004.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' @export
#'
#' @title Generate information table
#' @param data.in Peak data table within a list.
#' @param bn Character vector containing batch names as found within sample names.
#' @param gn Character vector containing group names as found within sample names
#' (determined in BCT call).
#' @param p.id Column index of first sample data column (usually named 'Raw abundance').
#' @description Function to produce the required information table from original input
#' table.
#' @return Returns a list containing the origianl input along with an information table
#' element.
bct.infogen <- function(data.in, bn, gn, p.id) {
if (is.list(data.in) & !is.data.frame(data.in)) {
data.in <- data.in[[1]]
}
data.in <- bct.tabwrap(data.in, c("character", "data.frame"))
gn <- as.character(gn)
# get group name indices
id.g <- data.frame(ncol = length(gn), nrow = 2)
for (i in 1:length(gn)) {
id.g[1, i] <- as.numeric(which(data.in[1, p.id:ncol(data.in)] == gn[i]))
id.g[2, i] <- gn[i]
}
id.g <- id.g[, order(as.numeric(id.g[1, ]))]
# create grouping vector according to id.g
s.id <- as.vector(t(data.in[1, p.id:ncol(data.in)]))
for (i in 1:length(gn)) {
if (i < length(gn)) {
s.id[as.numeric(id.g[1, i]):as.numeric(id.g[1, (i + 1)])] <- as.character(id.g[2, i])
} else {
s.id[as.numeric(id.g[1, i]):length(s.id)] <- as.character(id.g[2, i])
}
}
s.n <- as.vector(t(data.in[2, p.id:ncol(data.in)]))
b.id <- list()
for (i in 1:length(bn)) {
b.id[[i]] <- grep(bn[i], s.n)
}
B <- s.n
for (i in 1:length(b.id)) {
B[b.id[[i]]] <- bn[i]
}
info <- data.frame('Names' = s.n, 'SCode' = s.id, 'Batch' = B)
info <- bct.tabwrap(info, c('factor', 'data.frame'))
return(list(P = data.in, I = info))
}
| /Package/R/bct_infogen.R | no_license | ntorbica/bct | R | false | false | 1,880 | r | #' @export
#'
#' @title Generate information table
#' @param data.in Peak data table within a list.
#' @param bn Character vector containing batch names as found within sample names.
#' @param gn Character vector containing group names as found within sample names
#' (determined in BCT call).
#' @param p.id Column index of first sample data column (usually named 'Raw abundance').
#' @description Function to produce the required information table from original input
#' table.
#' @return Returns a list containing the origianl input along with an information table
#' element.
bct.infogen <- function(data.in, bn, gn, p.id) {
if (is.list(data.in) & !is.data.frame(data.in)) {
data.in <- data.in[[1]]
}
data.in <- bct.tabwrap(data.in, c("character", "data.frame"))
gn <- as.character(gn)
# get group name indices
id.g <- data.frame(ncol = length(gn), nrow = 2)
for (i in 1:length(gn)) {
id.g[1, i] <- as.numeric(which(data.in[1, p.id:ncol(data.in)] == gn[i]))
id.g[2, i] <- gn[i]
}
id.g <- id.g[, order(as.numeric(id.g[1, ]))]
# create grouping vector according to id.g
s.id <- as.vector(t(data.in[1, p.id:ncol(data.in)]))
for (i in 1:length(gn)) {
if (i < length(gn)) {
s.id[as.numeric(id.g[1, i]):as.numeric(id.g[1, (i + 1)])] <- as.character(id.g[2, i])
} else {
s.id[as.numeric(id.g[1, i]):length(s.id)] <- as.character(id.g[2, i])
}
}
s.n <- as.vector(t(data.in[2, p.id:ncol(data.in)]))
b.id <- list()
for (i in 1:length(bn)) {
b.id[[i]] <- grep(bn[i], s.n)
}
B <- s.n
for (i in 1:length(b.id)) {
B[b.id[[i]]] <- bn[i]
}
info <- data.frame('Names' = s.n, 'SCode' = s.id, 'Batch' = B)
info <- bct.tabwrap(info, c('factor', 'data.frame'))
return(list(P = data.in, I = info))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxamodel_cor.R
\name{taxamodel_corr}
\alias{taxamodel_corr}
\title{Report the correlation coefficient of selected model}
\usage{
taxamodel_corr(taxa, rank, method)
}
\arguments{
\item{taxa}{A string.}
\item{rank}{A string.}
\item{method}{A string.}
}
\value{
the correlation coefficient of the \code{taxa} ~ \code{rank} \code{method} model.
}
\description{
Report the correlation coefficient of selected model
}
\examples{
\dontrun{
taxamodeo_cor("Animalia", "Phylum", "logistic")
}
}
| /man/taxamodel_corr.Rd | no_license | hhsieh/biotaxa_Rpackage | R | false | true | 566 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxamodel_cor.R
\name{taxamodel_corr}
\alias{taxamodel_corr}
\title{Report the correlation coefficient of selected model}
\usage{
taxamodel_corr(taxa, rank, method)
}
\arguments{
\item{taxa}{A string.}
\item{rank}{A string.}
\item{method}{A string.}
}
\value{
the correlation coefficient of the \code{taxa} ~ \code{rank} \code{method} model.
}
\description{
Report the correlation coefficient of selected model
}
\examples{
\dontrun{
taxamodeo_cor("Animalia", "Phylum", "logistic")
}
}
|
library(tidyr)
library(shiny)
library(rsconnect)
library(dplyr)
library(plotly)
library(shinythemes)
library(openintro)
rsconnect::setAccountInfo(name='anthony-m',
token='E05F98E148F47EA3F9CD950CE9235DB9',
secret='MvbJwHFNGN27ixLX/kB0zbqDLh2Monn/SbOzGKFg')
df <- read.csv('https://raw.githubusercontent.com/Anth350z/Data-608-module-3/master/cleaned-cdc-mortality-1999-2010-2.csv',header = T)
data <- df %>% filter( df$Year == 2010 )
data <- data %>% arrange(data$Crude.Rate)
data2 <- df %>% complete(ICD.Chapter,State, Year = full_seq(1999:2010, T)) %>%
fill(0)
ui <- navbarPage('DATA-608',theme = shinytheme("united"),
tabPanel( 'Question 1',
headerPanel('(2010) Mortality rates by State'),
sidebarPanel(
selectInput('cause','Cause', data$ICD.Chapter,selected = T)
),
mainPanel(
plotlyOutput('plot')
)
),
tabPanel( 'Question 2',
headerPanel('Mortality rates by State vs Population Average'),
sidebarPanel(
selectInput('cause2','Cause', df$ICD.Chapter,selected = T),
selectInput('state','State', df$State,selected = T)
),
mainPanel(
textOutput('text'),
plotlyOutput('plot2')
)
)
)
server <- function(input, output) {
data.2 <- reactive(data2 %>% group_by(Year) %>% filter(ICD.Chapter == input$cause2) %>% summarise(sum(Crude.Rate ,na.rm = T) / sum(complete.cases(Crude.Rate))) %>%
select('sum(Crude.Rate, na.rm = T)/sum(complete.cases(Crude.Rate))') %>%
rename('population.avg' = 'sum(Crude.Rate, na.rm = T)/sum(complete.cases(Crude.Rate))' ) %>%
cbind(filter(data2,ICD.Chapter == input$cause2,State == input$state)) )
output$plot <- renderPlotly({
plot_ly(subset(data, data$ICD.Chapter == input$cause),x = ~abbr2state(State),y=~Crude.Rate , type='scatter',mode = 'lines+markers',
line = list(color = 'orange', width = 2) ) %>% layout( xaxis = list(categoryorder = "array",
categoryarray = data$State,tickangle=60,title="States"))
})
output$plot2 <- renderPlotly({
plot_ly(data.2(),x = ~Year,y=~Crude.Rate , type='scatter',name = paste(abbr2state(input$state),'-Crude Rates'),mode = 'lines+markers',line = list(color = 'orange', width = 2)) %>%
add_trace(y = ~population.avg, name = 'Population AVG Crude Rates', mode = 'lines+markers')
})
output$text <- renderText({
abbr2state(input$state)
})
}
shinyApp(ui = ui, server = server)
| /shiny_3.R | no_license | Anth350z/Data-608-module-3 | R | false | false | 3,312 | r | library(tidyr)
library(shiny)
library(rsconnect)
library(dplyr)
library(plotly)
library(shinythemes)
library(openintro)
rsconnect::setAccountInfo(name='anthony-m',
token='E05F98E148F47EA3F9CD950CE9235DB9',
secret='MvbJwHFNGN27ixLX/kB0zbqDLh2Monn/SbOzGKFg')
df <- read.csv('https://raw.githubusercontent.com/Anth350z/Data-608-module-3/master/cleaned-cdc-mortality-1999-2010-2.csv',header = T)
data <- df %>% filter( df$Year == 2010 )
data <- data %>% arrange(data$Crude.Rate)
data2 <- df %>% complete(ICD.Chapter,State, Year = full_seq(1999:2010, T)) %>%
fill(0)
ui <- navbarPage('DATA-608',theme = shinytheme("united"),
tabPanel( 'Question 1',
headerPanel('(2010) Mortality rates by State'),
sidebarPanel(
selectInput('cause','Cause', data$ICD.Chapter,selected = T)
),
mainPanel(
plotlyOutput('plot')
)
),
tabPanel( 'Question 2',
headerPanel('Mortality rates by State vs Population Average'),
sidebarPanel(
selectInput('cause2','Cause', df$ICD.Chapter,selected = T),
selectInput('state','State', df$State,selected = T)
),
mainPanel(
textOutput('text'),
plotlyOutput('plot2')
)
)
)
server <- function(input, output) {
data.2 <- reactive(data2 %>% group_by(Year) %>% filter(ICD.Chapter == input$cause2) %>% summarise(sum(Crude.Rate ,na.rm = T) / sum(complete.cases(Crude.Rate))) %>%
select('sum(Crude.Rate, na.rm = T)/sum(complete.cases(Crude.Rate))') %>%
rename('population.avg' = 'sum(Crude.Rate, na.rm = T)/sum(complete.cases(Crude.Rate))' ) %>%
cbind(filter(data2,ICD.Chapter == input$cause2,State == input$state)) )
output$plot <- renderPlotly({
plot_ly(subset(data, data$ICD.Chapter == input$cause),x = ~abbr2state(State),y=~Crude.Rate , type='scatter',mode = 'lines+markers',
line = list(color = 'orange', width = 2) ) %>% layout( xaxis = list(categoryorder = "array",
categoryarray = data$State,tickangle=60,title="States"))
})
output$plot2 <- renderPlotly({
plot_ly(data.2(),x = ~Year,y=~Crude.Rate , type='scatter',name = paste(abbr2state(input$state),'-Crude Rates'),mode = 'lines+markers',line = list(color = 'orange', width = 2)) %>%
add_trace(y = ~population.avg, name = 'Population AVG Crude Rates', mode = 'lines+markers')
})
output$text <- renderText({
abbr2state(input$state)
})
}
shinyApp(ui = ui, server = server)
|
#Reset R
rm(list=ls())
#Tell R where to go
setwd("/Users/jtomkins/data/new_chimp_analysis/mummer/chimp_on_human/")
#Confirm location
getwd()
#List files in directory
dir(path = ".")
dir()
ident <- read.csv("R_nucmer_summary_perc_ident.txt.csv")
#To get proper order for X axis in plot...
#Turn 'Chrom column into a character vector
ident$Chrom <- as.character(ident$Chrom)
#Then turn it back into an ordered factor
ident$Chrom <- factor(ident$Chrom, levels=unique(ident$Chrom))
str(ident)
summary(ident)
aln <- read.csv("R_nucmer_summary_aln_len.txt.csv")
#To get proper order for X axis in plot...
#Turn 'Chrom column into a character vector
aln$Chrom <- as.character(aln$Chrom)
#Then turn it back into an ordered factor
aln$Chrom <- factor(aln$Chrom, levels=unique(aln$Chrom))
str(aln)
summary(aln)
library(ggplot2)
#library(RColorBrewer)
p.ident <- ggplot(ident, aes (x=factor(Chrom), y=Mean)) +
geom_bar(stat="identity", position="dodge", fill="darkseagreen") +
labs(x="Chimp chromosome", y= "Average (mean) Percent Identity", title="Average Percent Identity - Chimp on Human") +
theme(axis.text.x = element_text(face="bold", color="darkblue", size=20)) +
theme(axis.text.x = element_text(angle=60)) +
theme(axis.text.y = element_text(face="bold", color="firebrick", size=20)) +
scale_y_continuous(breaks=c(0,50,85,90,100)) +
theme(axis.title = element_text(size=20,face="bold")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size=25, color = "chocolate4")) +
theme( axis.line = element_line(colour = "black",
size = 1, linetype = "solid"))
p.ident
p.aln <- ggplot(aln, aes (x=factor(Chrom), y=Mean)) +
geom_bar(stat="identity", position="dodge", fill="darkseagreen") +
labs(x="Chimp chromosome", y= "Mean alignment length (bases)", title="Average Alignment Length - Chimp on Human") +
theme(axis.text.x = element_text(face="bold", color="darkblue", size=20)) +
theme(axis.text.x = element_text(angle=60)) +
theme(axis.text.y = element_text(face="bold", color="firebrick", size=20)) +
scale_y_continuous(breaks=c(0,300,600,900,1200,1500)) +
theme(axis.title = element_text(size=20,face="bold")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size=25, color = "chocolate4")) +
theme( axis.line = element_line(colour = "black",
size = 1, linetype = "solid"))
p.aln | /nucmer/plot_nucmer_summaries.R | no_license | jt-icr/chimp_human_dna | R | false | false | 2,413 | r | #Reset R
rm(list=ls())
#Tell R where to go
setwd("/Users/jtomkins/data/new_chimp_analysis/mummer/chimp_on_human/")
#Confirm location
getwd()
#List files in directory
dir(path = ".")
dir()
ident <- read.csv("R_nucmer_summary_perc_ident.txt.csv")
#To get proper order for X axis in plot...
#Turn 'Chrom column into a character vector
ident$Chrom <- as.character(ident$Chrom)
#Then turn it back into an ordered factor
ident$Chrom <- factor(ident$Chrom, levels=unique(ident$Chrom))
str(ident)
summary(ident)
aln <- read.csv("R_nucmer_summary_aln_len.txt.csv")
#To get proper order for X axis in plot...
#Turn 'Chrom column into a character vector
aln$Chrom <- as.character(aln$Chrom)
#Then turn it back into an ordered factor
aln$Chrom <- factor(aln$Chrom, levels=unique(aln$Chrom))
str(aln)
summary(aln)
library(ggplot2)
#library(RColorBrewer)
p.ident <- ggplot(ident, aes (x=factor(Chrom), y=Mean)) +
geom_bar(stat="identity", position="dodge", fill="darkseagreen") +
labs(x="Chimp chromosome", y= "Average (mean) Percent Identity", title="Average Percent Identity - Chimp on Human") +
theme(axis.text.x = element_text(face="bold", color="darkblue", size=20)) +
theme(axis.text.x = element_text(angle=60)) +
theme(axis.text.y = element_text(face="bold", color="firebrick", size=20)) +
scale_y_continuous(breaks=c(0,50,85,90,100)) +
theme(axis.title = element_text(size=20,face="bold")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size=25, color = "chocolate4")) +
theme( axis.line = element_line(colour = "black",
size = 1, linetype = "solid"))
p.ident
p.aln <- ggplot(aln, aes (x=factor(Chrom), y=Mean)) +
geom_bar(stat="identity", position="dodge", fill="darkseagreen") +
labs(x="Chimp chromosome", y= "Mean alignment length (bases)", title="Average Alignment Length - Chimp on Human") +
theme(axis.text.x = element_text(face="bold", color="darkblue", size=20)) +
theme(axis.text.x = element_text(angle=60)) +
theme(axis.text.y = element_text(face="bold", color="firebrick", size=20)) +
scale_y_continuous(breaks=c(0,300,600,900,1200,1500)) +
theme(axis.title = element_text(size=20,face="bold")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size=25, color = "chocolate4")) +
theme( axis.line = element_line(colour = "black",
size = 1, linetype = "solid"))
p.aln |
library(dplyr)
library(reshape2)
library(ggplot2)
temporada1 <- "https://www.football-daata.co.uk/mmz4281/1718/SP1.csv"
temporada2 <- "https://www.football-data.co.uk/mmz4281/1819/SP1.csv"
temporada3 <- "https://www.football-data.co.uk/mmz4281/1920/SP1.csv"
temporada1_csv <- read.csv(file = temporada1)
temporada2_csv <- read.csv(file = temporada2)
temporada3_csv <- read.csv(file = temporada3)
lista <- list(temporada1_csv, temporada2_csv,temporada3_csv)
temporadas <- lapply(lista, select, Date, HomeTeam, AwayTeam, FTHG, FTAG, FTR)
temporadas[[1]] <- mutate(temporadas[[1]], Date = as.Date(Date, "%d/%m/%y"))
temporadas[[2]] <- mutate(temporadas[[2]], Date = as.Date(Date, "%d/%m/%Y"))
temporadas[[3]] <- mutate(temporadas[[3]], Date = as.Date(Date, "%d/%m/%Y"))
dataf <- do.call(rbind, temporadas)
##probabildades conjuntas
(golescasa <- round(table(dataf$FTHG)/dim(dataf)[1], 3))
(golesvisita <- round(table(dataf$FTAG)/dim(dataf)[1], 3))
(pconjunta <- round(table(dataf$FTHG, dataf$FTAG)/dim(dataf)[1], 3))
#cociente
table <- pconjunta/outer(golescasa, golesvisita, "*")
table
| /Postwork Sesion 4 EQUIPO 3/postwork4sol2.R | no_license | BettySanchez7/DataScience_R | R | false | false | 1,103 | r |
library(dplyr)
library(reshape2)
library(ggplot2)
temporada1 <- "https://www.football-daata.co.uk/mmz4281/1718/SP1.csv"
temporada2 <- "https://www.football-data.co.uk/mmz4281/1819/SP1.csv"
temporada3 <- "https://www.football-data.co.uk/mmz4281/1920/SP1.csv"
temporada1_csv <- read.csv(file = temporada1)
temporada2_csv <- read.csv(file = temporada2)
temporada3_csv <- read.csv(file = temporada3)
lista <- list(temporada1_csv, temporada2_csv,temporada3_csv)
temporadas <- lapply(lista, select, Date, HomeTeam, AwayTeam, FTHG, FTAG, FTR)
temporadas[[1]] <- mutate(temporadas[[1]], Date = as.Date(Date, "%d/%m/%y"))
temporadas[[2]] <- mutate(temporadas[[2]], Date = as.Date(Date, "%d/%m/%Y"))
temporadas[[3]] <- mutate(temporadas[[3]], Date = as.Date(Date, "%d/%m/%Y"))
dataf <- do.call(rbind, temporadas)
##probabildades conjuntas
(golescasa <- round(table(dataf$FTHG)/dim(dataf)[1], 3))
(golesvisita <- round(table(dataf$FTAG)/dim(dataf)[1], 3))
(pconjunta <- round(table(dataf$FTHG, dataf$FTAG)/dim(dataf)[1], 3))
#cociente
table <- pconjunta/outer(golescasa, golesvisita, "*")
table
|
#' Filter out low count genes and barcodes from count matrix
#'
#' This function is used for filtering out low count genes and barcodes
#' from count matrix based on total gene expression count (row sums) and
#' barcode expression count (column sums). \code{CB2FindCell}
#' has already integrated this function into it with \code{g_threshold = 0}
#' and \code{b_threshold = 0}. If users plan to customize their filtering
#' threshold, this function can be applied to the raw expression
#' count matrix prior to running \code{CB2FindCell}.
#'
#' @param dat Input count matrix to be filtered.
#'
#' @param g_threshold Nonnegative integer. Default: \code{0}. Filtering
#' threshold for genes. Any gene whose total expression count is less or
#' equal to \code{g_threshold} will be filtered out.
#'
#' @param b_threshold Nonnegative integer. Default: \code{0}. Filtering
#' threshold for barcodes. Any barcode whose total count is less or equal
#' to \code{b_threshold} will be filtered out.
#'
#' @return A filtered matrix with the same format as input matrix.
#' @examples
#' data(mbrainSub)
#' dim(mbrainSub)
#' mbrainSub_f <- FilterGB(mbrainSub)
#' dim(mbrainSub_f)
#'
#' @importFrom Matrix colSums
#' @importFrom Matrix rowSums
#'
#' @export
FilterGB <- function(dat,
g_threshold = 0,
b_threshold = 0) {
#filter barcodes and genes
bc <- colSums(dat)
dat <- dat[, bc > b_threshold]
gc <- rowSums(dat)
dat <- dat[gc > g_threshold, ]
return(dat)
}
| /R/FilterGB.R | no_license | zijianni/scCB2 | R | false | false | 1,568 | r | #' Filter out low count genes and barcodes from count matrix
#'
#' This function is used for filtering out low count genes and barcodes
#' from count matrix based on total gene expression count (row sums) and
#' barcode expression count (column sums). \code{CB2FindCell}
#' has already integrated this function into it with \code{g_threshold = 0}
#' and \code{b_threshold = 0}. If users plan to customize their filtering
#' threshold, this function can be applied to the raw expression
#' count matrix prior to running \code{CB2FindCell}.
#'
#' @param dat Input count matrix to be filtered.
#'
#' @param g_threshold Nonnegative integer. Default: \code{0}. Filtering
#' threshold for genes. Any gene whose total expression count is less or
#' equal to \code{g_threshold} will be filtered out.
#'
#' @param b_threshold Nonnegative integer. Default: \code{0}. Filtering
#' threshold for barcodes. Any barcode whose total count is less or equal
#' to \code{b_threshold} will be filtered out.
#'
#' @return A filtered matrix with the same format as input matrix.
#' @examples
#' data(mbrainSub)
#' dim(mbrainSub)
#' mbrainSub_f <- FilterGB(mbrainSub)
#' dim(mbrainSub_f)
#'
#' @importFrom Matrix colSums
#' @importFrom Matrix rowSums
#'
#' @export
FilterGB <- function(dat,
g_threshold = 0,
b_threshold = 0) {
#filter barcodes and genes
bc <- colSums(dat)
dat <- dat[, bc > b_threshold]
gc <- rowSums(dat)
dat <- dat[gc > g_threshold, ]
return(dat)
}
|
install.packages("sqldf")
install.packages("DBI")
install.packages("RSQLite")
library(sqldf)
library(DBI)
library(RSQLite)
select <- function(table, cols = '*', cn = con) {
#table: database table to select table from
#cols (optional): string of columns to select from table. Default is * (all columns)
#cn = sqlite3.Connection object
#Example:
#con <- dbConnect(SQLite(), dbname = "mydata.db")
#select("tablename")
#-OR-
#select("tablename", "col1, col2, col3")
selString = sprintf("select %s from %s", cols, table)
dbGetQuery(con, selString)
}
# Ensure there is no existing db connection
if (exists("db")) dbDisconnect(con)
# connect to the sqlite file
con <- dbConnect(SQLite(), dbname = "aduniverse.db")
# display all table names in the database
dbListTables(con)
# Reads permits table and pull into a data frame
permits <- dbReadTable(con, "permits")
# Select in all entries in parcels db table
dbGetQuery( con,sprintf("select %s from %s", "*", "permits") )
# Select all entries with type_occ ADU
dbGetQuery(con, "select * from permits where type_occ = 'ADU'")
# disconnect from the database
dbDisconnect()
| /ADUniverse/tools/ADUniverseRSQL.R | permissive | actuarial-tools/ADUniverse | R | false | false | 1,144 | r | install.packages("sqldf")
install.packages("DBI")
install.packages("RSQLite")
library(sqldf)
library(DBI)
library(RSQLite)
select <- function(table, cols = '*', cn = con) {
#table: database table to select table from
#cols (optional): string of columns to select from table. Default is * (all columns)
#cn = sqlite3.Connection object
#Example:
#con <- dbConnect(SQLite(), dbname = "mydata.db")
#select("tablename")
#-OR-
#select("tablename", "col1, col2, col3")
selString = sprintf("select %s from %s", cols, table)
dbGetQuery(con, selString)
}
# Ensure there is no existing db connection
if (exists("db")) dbDisconnect(con)
# connect to the sqlite file
con <- dbConnect(SQLite(), dbname = "aduniverse.db")
# display all table names in the database
dbListTables(con)
# Reads permits table and pull into a data frame
permits <- dbReadTable(con, "permits")
# Select in all entries in parcels db table
dbGetQuery( con,sprintf("select %s from %s", "*", "permits") )
# Select all entries with type_occ ADU
dbGetQuery(con, "select * from permits where type_occ = 'ADU'")
# disconnect from the database
dbDisconnect()
|
library("knitr")
library("rgl")
#knit("copper_arsenate_arse.Rmd")
#markdownToHTML('copper_arsenate_arse.md', 'copper_arsenate_arse.html', options=c("use_xhml"))
#system("pandoc -s copper_arsenate_arse.html -o copper_arsenate_arse.pdf")
knit2html('copper_arsenate_arse.Rmd')
| /FDA_Pesticide_Glossary/copper_arsenate_arse.R | permissive | andrewdefries/andrewdefries.github.io | R | false | false | 276 | r | library("knitr")
library("rgl")
#knit("copper_arsenate_arse.Rmd")
#markdownToHTML('copper_arsenate_arse.md', 'copper_arsenate_arse.html', options=c("use_xhml"))
#system("pandoc -s copper_arsenate_arse.html -o copper_arsenate_arse.pdf")
knit2html('copper_arsenate_arse.Rmd')
|
#' Compute ranks of rows of matrix and summarize them into a choice suggestion.
#'
#' This function allows getting out the choice (of a column representing a stock) from four
#' rows of numbers quantifying the four orders of exact stochastic dominance comparisons.
#' If the last or 10-th row for ``choice" has 1 then the stock representing
#' that column is to be chosen. That is it should get the largest
#' (portfolio) weight. If the original matrix row names are SD1 to SD4,
#' the same names are repeated for the extra rows representing their ranks.
#' The row name for ``sum of ranks" is
#' sumRanks. Finally, the ranks associated with sumRanks provide the row named choice
#' along the bottom (10-th) row of the output matrix called "out."
#'
#' @param mtx {matrix to be ranked by row and summarized}
#' @return a matrix called `out' having 10 rows and p columns (p=No.of stocks).
#' Row Numbers 1 to 4 have SD1 to SD4 evaluation of areas over ECDFs.
#' There are 6 more rows. Row No.5= SD1 ranks,
#' Row No.6= SD2 ranks, Row No.7= SD3 ranks, Row No.8= SD4 ranks
#' Row No.9= sum of the ranks in earlier four rows for ranks of SD1 to SD4
#' Row No.10= choice rank based on all four (SD1 to SD4) added together
#' Thus, the tenth row yields choice priority number for each stock (asset)
#' after combining the all four criteria.
### @note %% ~~further notes~~
#' @author Prof. H. D. Vinod, Economics Dept., Fordham University, NY
#' @seealso \code{\link{exactSdMtx}}
#'
#' @export
summaryRank=function(mtx){
n=NROW(mtx)
p=NCOL(mtx)
mtxrank=mtx #place to store
for (i in 1:n){mtxrank[i,]=(p+1)-rank(mtx[i,],
na.last = TRUE, ties.method = "average")}
sumRanks=apply(mtxrank,2,sum)
choice=rank(sumRanks,na.last=TRUE,ties.method = "average")
out=rbind(mtx,mtxrank,sumRanks,choice)
return(out)
}#end summaryRank function
| /R/summaryRank.R | no_license | cran/generalCorr | R | false | false | 1,902 | r | #' Compute ranks of rows of matrix and summarize them into a choice suggestion.
#'
#' This function allows getting out the choice (of a column representing a stock) from four
#' rows of numbers quantifying the four orders of exact stochastic dominance comparisons.
#' If the last or 10-th row for ``choice" has 1 then the stock representing
#' that column is to be chosen. That is it should get the largest
#' (portfolio) weight. If the original matrix row names are SD1 to SD4,
#' the same names are repeated for the extra rows representing their ranks.
#' The row name for ``sum of ranks" is
#' sumRanks. Finally, the ranks associated with sumRanks provide the row named choice
#' along the bottom (10-th) row of the output matrix called "out."
#'
#' @param mtx {matrix to be ranked by row and summarized}
#' @return a matrix called `out' having 10 rows and p columns (p=No.of stocks).
#' Row Numbers 1 to 4 have SD1 to SD4 evaluation of areas over ECDFs.
#' There are 6 more rows. Row No.5= SD1 ranks,
#' Row No.6= SD2 ranks, Row No.7= SD3 ranks, Row No.8= SD4 ranks
#' Row No.9= sum of the ranks in earlier four rows for ranks of SD1 to SD4
#' Row No.10= choice rank based on all four (SD1 to SD4) added together
#' Thus, the tenth row yields choice priority number for each stock (asset)
#' after combining the all four criteria.
### @note %% ~~further notes~~
#' @author Prof. H. D. Vinod, Economics Dept., Fordham University, NY
#' @seealso \code{\link{exactSdMtx}}
#'
#' @export
summaryRank=function(mtx){
n=NROW(mtx)
p=NCOL(mtx)
mtxrank=mtx #place to store
for (i in 1:n){mtxrank[i,]=(p+1)-rank(mtx[i,],
na.last = TRUE, ties.method = "average")}
sumRanks=apply(mtxrank,2,sum)
choice=rank(sumRanks,na.last=TRUE,ties.method = "average")
out=rbind(mtx,mtxrank,sumRanks,choice)
return(out)
}#end summaryRank function
|
waRRior.snippets.verbose("loading startup libraries:", verbose_ = T)
require(RCurl)
require(plyr)
require(dplyr)
require(RJSONIO)
waRRior.snippets.verbose("startup libraries loaded.", verbose_ = T)
| /libraries/waRRior.libraries.startup.R | permissive | joelgsponer/waRRior | R | false | false | 198 | r | waRRior.snippets.verbose("loading startup libraries:", verbose_ = T)
require(RCurl)
require(plyr)
require(dplyr)
require(RJSONIO)
waRRior.snippets.verbose("startup libraries loaded.", verbose_ = T)
|
library(dplyr)
library(readxl)
library(lubridate)
library(ggplot2)
library(broom)
library(tidyr)
resp<- read_excel("H:/0_HarrisLab/1_CURRENT PROJECT FOLDERS/Patapsco/data/RAW_DATA/respiration_data.xlsx")
prof<- read_excel("H:/0_HarrisLab/1_CURRENT PROJECT FOLDERS/Patapsco/data/RAW_DATA/vertical_profile_data.xlsx")
prof$date<-as.POSIXct.Date(prof$date)
prof2<-prof %>%
filter(station=="middle")%>%
mutate(creek=case_when(
creek == "Inner harbour" ~ "Inner Harbor",
creek == "Inner Harbour" ~ "Inner Harbor",
TRUE ~ as.character(creek)
)) %>%
group_by(creek, date) %>%
filter(sonde_depth==max(sonde_depth)) %>%
mutate(ODO_conc=as.numeric(ODO_conc))
join<-resp %>%
filter(type=="Initial") %>%
mutate(date=date_collected) %>%
full_join(., prof2, by=c("creek","date"))
plot<-join %>%
ggplot(aes(x=DO_conc, y=ODO_conc, color=creek))+
geom_point()+xlab("Respiration")+ylab("sonde")+theme_classic()
plot
lm<-lm(data = join, DO_conc ~ ODO_conc)
summary(lm)
lm2<-tidy(lm)
| /DO_stuff.R | no_license | umces-cbl/patapsco_tributaries | R | false | false | 1,045 | r | library(dplyr)
library(readxl)
library(lubridate)
library(ggplot2)
library(broom)
library(tidyr)
resp<- read_excel("H:/0_HarrisLab/1_CURRENT PROJECT FOLDERS/Patapsco/data/RAW_DATA/respiration_data.xlsx")
prof<- read_excel("H:/0_HarrisLab/1_CURRENT PROJECT FOLDERS/Patapsco/data/RAW_DATA/vertical_profile_data.xlsx")
prof$date<-as.POSIXct.Date(prof$date)
prof2<-prof %>%
filter(station=="middle")%>%
mutate(creek=case_when(
creek == "Inner harbour" ~ "Inner Harbor",
creek == "Inner Harbour" ~ "Inner Harbor",
TRUE ~ as.character(creek)
)) %>%
group_by(creek, date) %>%
filter(sonde_depth==max(sonde_depth)) %>%
mutate(ODO_conc=as.numeric(ODO_conc))
join<-resp %>%
filter(type=="Initial") %>%
mutate(date=date_collected) %>%
full_join(., prof2, by=c("creek","date"))
plot<-join %>%
ggplot(aes(x=DO_conc, y=ODO_conc, color=creek))+
geom_point()+xlab("Respiration")+ylab("sonde")+theme_classic()
plot
lm<-lm(data = join, DO_conc ~ ODO_conc)
summary(lm)
lm2<-tidy(lm)
|
########################################################################################
# step one: crop and reproject to New Zealand at 1 km scale
########################################################################################
#
# scope -------------------------------------------------
# transorming the projection (CRS) to even-sized cells
# and from the original, global extents to something that encompasses New Zealand
# library --------------------------------------------------------------
library(ggmap)
library(raster)
library(rgdal)
rm(list = ls())
# 1-km raster functions -------------------------------------------------
# notes: function requirements -------------------------------------------------------------
# raw.raster = sourced raster layer, somewhere from the deep dark depths of the internet, loaded as a '.grd' R-raster file
# mask.layer = this layer outlines the general extent by which we want to plot our region, andm ore importantly, the cells of our region of interest (i.e. masks islands, other countries)
# raster.name = short name for raster that will be with it for life
# save = save file path and raster name, relative to wd()
# New Zealand 1 km function -----------------------------------------------------------------------
nz_1km <- function(raw.raster, raster.name, mask.layer, save)
{
# set things up
projection(raw.raster) <- "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
crop.extent <- extent(mask.layer)
names(raw.raster) <- raster.name
# crop larger extent
crop.raster <- crop(raw.raster, crop.extent)
# mask offshore values
masked.raster <- mask(crop.raster, mask.layer)
# for some reason extents are slightly different
extent(masked.raster) <- crop.extent
plot(masked.raster)
# save
writeRaster(masked.raster, save, overwrite = T)
return(masked.raster)
} # fun end
# test 1km function -------------------------------------------------------------------------
raw.raster <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/CGIR CSI Aridity and Evaporation/Global Aridity - Annual/AI_annual/ai_yr/hdr.adf")/10000
raster.name <- "arid"
mask.layer <- raster("Data files/New Zealand/nz 1-km.grd")
save <- "Data files/EVs/step 1 - 1-km cropped/arid.grd"
nz_1km(raw.raster, raster.name, mask.layer, save)
raster(save)
# --------------------------------------------------------------------------------------
# New Zealand 1 km function for HII -----------------------------------------------------------------------
nz_1km_hii <- function(raw.raster, raster.name, mask.layer, save)
{
# set things up
projection(raw.raster) <- "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
crop.extent <- extent(mask.layer)
names(raw.raster) <- raster.name
# crop larger extent
crop.raster <- crop(raw.raster, crop.extent)
# for some reason extents are slightly different
extent(crop.raster) <- crop.extent
# mask offshore values
masked.raster <- mask(crop.raster, mask.layer)
# for some reason extents are still slightly different
extent(masked.raster) <- crop.extent
plot(masked.raster)
# save
writeRaster(masked.raster, save, overwrite = T)
return(masked.raster)
} # fun end
# tested with hii --------------------
# ----------------------------------------------------------------------------------------
# New Zealand 1 km function for >1 km res rasters --------------------------------------------------
# for rasters which will be the opposite of aggregated (disaggregated?) back to 1 km for cropping purposes
nz_1km_disag <- function(raw.raster, raster.name, mask.layer, save)
{
# set things up
projection(raw.raster) <- "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
crop.extent <- extent(mask.layer)
names(raw.raster) <- raster.name
# crop larger extent
crop.raster <- crop(raw.raster, crop.extent)
crop.raster
plot(crop.raster)
# reproject to 1 km res
repro.raster <- projectRaster(crop.raster, mask.layer, res = 0.008333334, crs = "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0", method="ngb")
repro.raster
plot(repro.raster)
# mask offshore values
masked.raster <- mask(repro.raster, mask.layer)
# for some reason extents are slightly different
extent(masked.raster) <- crop.extent
plot(masked.raster)
# save
writeRaster(masked.raster, save, overwrite = T)
return(masked.raster)
} # fun end
# test 1km disag --------------------------------------------------------------------
# st <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Harmonised World Soil Database/WEBBSOIL_548/WEBBSOIL_548/data/wrtext.asc")
# save <- "Data files/EVs/step 1 - 1-km cropped/st.grd"
#
# raw.raster <- st
#
# # set things up
# projection(raw.raster) <- "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
# crop.extent <- extent(mask.layer)
# names(raw.raster) <- raster.name
#
# # crop larger extent
# crop.raster <- crop(raw.raster, crop.extent)
# crop.raster
# plot(crop.raster)
#
# # reproject to 1 km res
# repro.raster <- projectRaster(crop.raster, mask.layer, res = 0.008333334, crs = "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0", method="ngb")
# repro.raster
# plot(repro.raster)
#
#
# # mask offshore values
# masked.raster <- mask(repro.raster, mask.layer)
# masked.raster
# plot(masked.raster)
#
# # for some reason extents are slightly different
# extent(masked.raster) <- crop.extent
#########################################################################
# run functions
#########################################################################
# requirements --------------------------------------------------------
mask.layer <- raster("Data files/New Zealand/nz 1-km.grd")
# (1) aridity --------------------------------------------------------
# note: dividied by 10,000 from original raster values
arid <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/CGIR CSI Aridity and Evaporation/Global Aridity - Annual/AI_annual/ai_yr/hdr.adf")/10000
save <- "Data files/EVs/step 1 - 1-km cropped/arid.grd"
nz_1km(arid, "arid", mask.layer, save)
# (2) potential evapo-transpiration (pet)-------------------------------------------
pet <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/CGIR CSI Aridity and Evaporation/Global PET - Annual/PET_he_annual/pet_he_yr/hdr.adf")
save <- "Data files/EVs/step 1 - 1-km cropped/pet.grd"
nz_1km(pet, "pet", mask.layer, save)
# (3) elevation -------------------------------------------
elev <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/CGIR Elevation/Elevation 30 sec/GloElev_30as.asc")
save <- "Data files/EVs/step 1 - 1-km cropped/elev.grd"
nz_1km(elev, "elev", mask.layer, save)
# (4) Annual Mean Temperature (AMT) -------------------------------------------
amt <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_01.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/amt.grd"
nz_1km(amt,"amt", mask.layer, save)
# (5) Mean Diurnal Range (Mean of monthly (max temp - min temp)) (MDR) -----------------------------
mdr <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_02.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/mdr.grd"
nz_1km(mdr, "mdr", mask.layer, save)
# (6) Isothermality (BIO2/BIO7) (* 1) (ISO) -------------------------------------------
iso <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_03.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/iso.grd"
nz_1km(iso, "iso", mask.layer, save)
# (7) Temperature Seasonality (standard deviation *100) (Note I divided by 100) (TS) ----------------------
ts <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_04.tif")/100
save <- "Data files/EVs/step 1 - 1-km cropped/ts.grd"
nz_1km(ts, "ts", mask.layer, save)
# (8) Max Temperature of Warmest Month (twarmm) ------------------------------------------
twarmm <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_05.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/twarmm.grd"
nz_1km(twarmm, "twarmm", mask.layer, save)
# (9) Min Temperature of Coldest Month (tcoldm) ----------------------------------------
tcoldm <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_06.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/tcoldm.grd"
nz_1km(tcoldm, "tcoldm", mask.layer, save)
# (10) Temperature Annual Range (BIO5-BIO6) (TAR) ------------------------------------
tar <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_07.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/tar.grd"
nz_1km(tar, "tar", mask.layer, save)
# (11) Mean Temperature of Wettest Quarter (TWETQ) ------------------------------------
twetq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_08.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/twetq.grd"
nz_1km(twetq, "twetq", mask.layer, save)
# (12) Mean Temperature of Driest Quarter (TDRYQ) -------------------------------------------
tdryq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_09.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/tdryq.grd"
nz_1km(tdryq, "tdryq", mask.layer, save)
# (13) Mean Temperature of Warmest Quarter (TWARMQ) -------------------------------------------
twarmq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_10.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/twarmq.grd"
nz_1km(twarmq, "twarmq", mask.layer, save)
# (14) Mean Temperature of Coldest Quarter (TCOLDQ) -------------------------------------------
tcoldq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_11.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/tcoldq.grd"
nz_1km(tcoldq, "tcoldq", mask.layer, save)
# (15) Annual Precipitation (AP) -------------------------------------------
ap <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_12.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/ap.grd"
nz_1km(ap, "ap", mask.layer, save)
# (16) Precipitation of Wettest Month (PWETM) -----------------------------------------
pwetm <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_13.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pwetm.grd"
nz_1km(pwetm, "pwetm", mask.layer, save)
# (17) Precipitation of Driest Month (PDRYM) -------------------------------------------
pdrym <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_14.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pdrym.grd"
nz_1km(pdrym, "pdrym", mask.layer, save)
# (18) Precipitation Seasonality (Coefficient of Variation) (PS) ------------------------
ps <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_15.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/ps.grd"
nz_1km(ps, "ps", mask.layer, save)
# (19) Precipitation of Wettest Quarter (PWETQ) ------------------------------------------
pwetq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_16.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pwetq.grd"
nz_1km(pwetq, "pwetq", mask.layer, save)
# (20) Precipitation of Driest Quarter (PDRYQ) -------------------------------------------
pdryq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_17.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pdrym.grd"
nz_1km(pdrym, "pdrym", mask.layer, save)
# (21) Precipitation of Warmest Quarter (PWARMQ) -------------------------------------------
pwarmq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_18.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pwarmq.grd"
nz_1km(pwarmq, "pwarmq", mask.layer, save)
# (22) Precipitation of Coldest Quarter (PCOLDQ) -------------------------------------------
pcoldq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_19.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pcoldq.grd"
nz_1km(pcoldq, "pcoldq", mask.layer, save)
# (23) Human influence index --------------------------------------------------------
hii <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/The Human Influence Index (HII)/hii-global-geo-grid/hii_v2geo/hdr.adf")
save <- "Data files/EVs/step 1 - 1-km cropped/hii.grd"
nz_1km_hii(hii, "hii", mask.layer, save)
# (24) potential storage of water derived from soil texture (mm)) (st) -------------------
st <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Harmonised World Soil Database/WEBBSOIL_548/WEBBSOIL_548/data/wrtext.asc")
save <- "Data files/EVs/step 1 - 1-km cropped/st.grd"
nz_1km_disag(st, "st", mask.layer, save)
# (25) potential storage of water in the root zone (mm) (rz) -------------------------------------------
rz <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Harmonised World Soil Database/WEBBSOIL_548/WEBBSOIL_548/data/wrroot.asc")
save <- "Data files/EVs/step 1 - 1-km cropped/rz.grd"
nz_1km_disag(rz, "rz", mask.layer, save)
# (26) potential storage of water in the soil profile (mm) (sp) -------------------------------------------
sp <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Harmonised World Soil Database/WEBBSOIL_548/WEBBSOIL_548/data/wrprof.asc")
save <- "Data files/EVs/step 1 - 1-km cropped/sp.grd"
nz_1km_disag(sp, "sp", mask.layer, save)
# (27) plant available [soil] water capacity -------------------------------------------
# Australian extent -- ignore
# pawc <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Plant water capacity/PAWC_1m/pawc_1m/hdr.adf")
# save <- "Data files/EVs/step 1 - 1-km cropped/pawc.grd"
#
# nz_1km(pawc, "pawc", mask.layer, save)
# (28) Plant extractable [soil] water capacity -------------------------------------------
# again, dunno where I put og rasters (think I did them in ARC GIS?)
# so I am doing it here
pewc <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Plant water capacity/DUNNESOIL_545/dunne_soil.dat")
save <- "Data files/EVs/step 1 - 1-km cropped/pewc.grd"
nz_1km_disag(pewc, "pewc", mask.layer, save)
# --------------------------------------------------------------------------------
| /Rscripts/1. predictor variables/archive/step 1 - 1-km NZ rasters.R | no_license | khemming/NZ | R | false | false | 14,998 | r |
########################################################################################
# step one: crop and reproject to New Zealand at 1 km scale
########################################################################################
#
# scope -------------------------------------------------
# transorming the projection (CRS) to even-sized cells
# and from the original, global extents to something that encompasses New Zealand
# library --------------------------------------------------------------
library(ggmap)
library(raster)
library(rgdal)
rm(list = ls())
# 1-km raster functions -------------------------------------------------
# notes: function requirements -------------------------------------------------------------
# raw.raster = sourced raster layer, somewhere from the deep dark depths of the internet, loaded as a '.grd' R-raster file
# mask.layer = this layer outlines the general extent by which we want to plot our region, andm ore importantly, the cells of our region of interest (i.e. masks islands, other countries)
# raster.name = short name for raster that will be with it for life
# save = save file path and raster name, relative to wd()
# New Zealand 1 km function -----------------------------------------------------------------------
nz_1km <- function(raw.raster, raster.name, mask.layer, save)
{
# set things up
projection(raw.raster) <- "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
crop.extent <- extent(mask.layer)
names(raw.raster) <- raster.name
# crop larger extent
crop.raster <- crop(raw.raster, crop.extent)
# mask offshore values
masked.raster <- mask(crop.raster, mask.layer)
# for some reason extents are slightly different
extent(masked.raster) <- crop.extent
plot(masked.raster)
# save
writeRaster(masked.raster, save, overwrite = T)
return(masked.raster)
} # fun end
# test 1km function -------------------------------------------------------------------------
raw.raster <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/CGIR CSI Aridity and Evaporation/Global Aridity - Annual/AI_annual/ai_yr/hdr.adf")/10000
raster.name <- "arid"
mask.layer <- raster("Data files/New Zealand/nz 1-km.grd")
save <- "Data files/EVs/step 1 - 1-km cropped/arid.grd"
nz_1km(raw.raster, raster.name, mask.layer, save)
raster(save)
# --------------------------------------------------------------------------------------
# New Zealand 1 km function for HII -----------------------------------------------------------------------
nz_1km_hii <- function(raw.raster, raster.name, mask.layer, save)
{
# set things up
projection(raw.raster) <- "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
crop.extent <- extent(mask.layer)
names(raw.raster) <- raster.name
# crop larger extent
crop.raster <- crop(raw.raster, crop.extent)
# for some reason extents are slightly different
extent(crop.raster) <- crop.extent
# mask offshore values
masked.raster <- mask(crop.raster, mask.layer)
# for some reason extents are still slightly different
extent(masked.raster) <- crop.extent
plot(masked.raster)
# save
writeRaster(masked.raster, save, overwrite = T)
return(masked.raster)
} # fun end
# tested with hii --------------------
# ----------------------------------------------------------------------------------------
# New Zealand 1 km function for >1 km res rasters --------------------------------------------------
# for rasters which will be the opposite of aggregated (disaggregated?) back to 1 km for cropping purposes
nz_1km_disag <- function(raw.raster, raster.name, mask.layer, save)
{
# set things up
projection(raw.raster) <- "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
crop.extent <- extent(mask.layer)
names(raw.raster) <- raster.name
# crop larger extent
crop.raster <- crop(raw.raster, crop.extent)
crop.raster
plot(crop.raster)
# reproject to 1 km res
repro.raster <- projectRaster(crop.raster, mask.layer, res = 0.008333334, crs = "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0", method="ngb")
repro.raster
plot(repro.raster)
# mask offshore values
masked.raster <- mask(repro.raster, mask.layer)
# for some reason extents are slightly different
extent(masked.raster) <- crop.extent
plot(masked.raster)
# save
writeRaster(masked.raster, save, overwrite = T)
return(masked.raster)
} # fun end
# test 1km disag --------------------------------------------------------------------
# st <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Harmonised World Soil Database/WEBBSOIL_548/WEBBSOIL_548/data/wrtext.asc")
# save <- "Data files/EVs/step 1 - 1-km cropped/st.grd"
#
# raw.raster <- st
#
# # set things up
# projection(raw.raster) <- "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
# crop.extent <- extent(mask.layer)
# names(raw.raster) <- raster.name
#
# # crop larger extent
# crop.raster <- crop(raw.raster, crop.extent)
# crop.raster
# plot(crop.raster)
#
# # reproject to 1 km res
# repro.raster <- projectRaster(crop.raster, mask.layer, res = 0.008333334, crs = "+proj=utm +zone=48 +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0", method="ngb")
# repro.raster
# plot(repro.raster)
#
#
# # mask offshore values
# masked.raster <- mask(repro.raster, mask.layer)
# masked.raster
# plot(masked.raster)
#
# # for some reason extents are slightly different
# extent(masked.raster) <- crop.extent
#########################################################################
# run functions
#########################################################################
# requirements --------------------------------------------------------
mask.layer <- raster("Data files/New Zealand/nz 1-km.grd")
# (1) aridity --------------------------------------------------------
# note: dividied by 10,000 from original raster values
arid <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/CGIR CSI Aridity and Evaporation/Global Aridity - Annual/AI_annual/ai_yr/hdr.adf")/10000
save <- "Data files/EVs/step 1 - 1-km cropped/arid.grd"
nz_1km(arid, "arid", mask.layer, save)
# (2) potential evapo-transpiration (pet)-------------------------------------------
pet <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/CGIR CSI Aridity and Evaporation/Global PET - Annual/PET_he_annual/pet_he_yr/hdr.adf")
save <- "Data files/EVs/step 1 - 1-km cropped/pet.grd"
nz_1km(pet, "pet", mask.layer, save)
# (3) elevation -------------------------------------------
elev <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/CGIR Elevation/Elevation 30 sec/GloElev_30as.asc")
save <- "Data files/EVs/step 1 - 1-km cropped/elev.grd"
nz_1km(elev, "elev", mask.layer, save)
# (4) Annual Mean Temperature (AMT) -------------------------------------------
amt <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_01.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/amt.grd"
nz_1km(amt,"amt", mask.layer, save)
# (5) Mean Diurnal Range (Mean of monthly (max temp - min temp)) (MDR) -----------------------------
mdr <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_02.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/mdr.grd"
nz_1km(mdr, "mdr", mask.layer, save)
# (6) Isothermality (BIO2/BIO7) (* 1) (ISO) -------------------------------------------
iso <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_03.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/iso.grd"
nz_1km(iso, "iso", mask.layer, save)
# (7) Temperature Seasonality (standard deviation *100) (Note I divided by 100) (TS) ----------------------
ts <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_04.tif")/100
save <- "Data files/EVs/step 1 - 1-km cropped/ts.grd"
nz_1km(ts, "ts", mask.layer, save)
# (8) Max Temperature of Warmest Month (twarmm) ------------------------------------------
twarmm <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_05.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/twarmm.grd"
nz_1km(twarmm, "twarmm", mask.layer, save)
# (9) Min Temperature of Coldest Month (tcoldm) ----------------------------------------
tcoldm <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_06.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/tcoldm.grd"
nz_1km(tcoldm, "tcoldm", mask.layer, save)
# (10) Temperature Annual Range (BIO5-BIO6) (TAR) ------------------------------------
tar <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_07.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/tar.grd"
nz_1km(tar, "tar", mask.layer, save)
# (11) Mean Temperature of Wettest Quarter (TWETQ) ------------------------------------
twetq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_08.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/twetq.grd"
nz_1km(twetq, "twetq", mask.layer, save)
# (12) Mean Temperature of Driest Quarter (TDRYQ) -------------------------------------------
tdryq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_09.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/tdryq.grd"
nz_1km(tdryq, "tdryq", mask.layer, save)
# (13) Mean Temperature of Warmest Quarter (TWARMQ) -------------------------------------------
twarmq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_10.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/twarmq.grd"
nz_1km(twarmq, "twarmq", mask.layer, save)
# (14) Mean Temperature of Coldest Quarter (TCOLDQ) -------------------------------------------
tcoldq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_11.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/tcoldq.grd"
nz_1km(tcoldq, "tcoldq", mask.layer, save)
# (15) Annual Precipitation (AP) -------------------------------------------
ap <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_12.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/ap.grd"
nz_1km(ap, "ap", mask.layer, save)
# (16) Precipitation of Wettest Month (PWETM) -----------------------------------------
pwetm <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_13.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pwetm.grd"
nz_1km(pwetm, "pwetm", mask.layer, save)
# (17) Precipitation of Driest Month (PDRYM) -------------------------------------------
pdrym <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_14.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pdrym.grd"
nz_1km(pdrym, "pdrym", mask.layer, save)
# (18) Precipitation Seasonality (Coefficient of Variation) (PS) ------------------------
ps <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_15.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/ps.grd"
nz_1km(ps, "ps", mask.layer, save)
# (19) Precipitation of Wettest Quarter (PWETQ) ------------------------------------------
pwetq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_16.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pwetq.grd"
nz_1km(pwetq, "pwetq", mask.layer, save)
# (20) Precipitation of Driest Quarter (PDRYQ) -------------------------------------------
pdryq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_17.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pdrym.grd"
nz_1km(pdrym, "pdrym", mask.layer, save)
# (21) Precipitation of Warmest Quarter (PWARMQ) -------------------------------------------
pwarmq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_18.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pwarmq.grd"
nz_1km(pwarmq, "pwarmq", mask.layer, save)
# (22) Precipitation of Coldest Quarter (PCOLDQ) -------------------------------------------
pcoldq <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Worldclim/wc2.0_bio_30s_19.tif")
save <- "Data files/EVs/step 1 - 1-km cropped/pcoldq.grd"
nz_1km(pcoldq, "pcoldq", mask.layer, save)
# (23) Human influence index --------------------------------------------------------
hii <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/The Human Influence Index (HII)/hii-global-geo-grid/hii_v2geo/hdr.adf")
save <- "Data files/EVs/step 1 - 1-km cropped/hii.grd"
nz_1km_hii(hii, "hii", mask.layer, save)
# (24) potential storage of water derived from soil texture (mm)) (st) -------------------
st <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Harmonised World Soil Database/WEBBSOIL_548/WEBBSOIL_548/data/wrtext.asc")
save <- "Data files/EVs/step 1 - 1-km cropped/st.grd"
nz_1km_disag(st, "st", mask.layer, save)
# (25) potential storage of water in the root zone (mm) (rz) -------------------------------------------
rz <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Harmonised World Soil Database/WEBBSOIL_548/WEBBSOIL_548/data/wrroot.asc")
save <- "Data files/EVs/step 1 - 1-km cropped/rz.grd"
nz_1km_disag(rz, "rz", mask.layer, save)
# (26) potential storage of water in the soil profile (mm) (sp) -------------------------------------------
sp <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Harmonised World Soil Database/WEBBSOIL_548/WEBBSOIL_548/data/wrprof.asc")
save <- "Data files/EVs/step 1 - 1-km cropped/sp.grd"
nz_1km_disag(sp, "sp", mask.layer, save)
# (27) plant available [soil] water capacity -------------------------------------------
# Australian extent -- ignore
# pawc <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Plant water capacity/PAWC_1m/pawc_1m/hdr.adf")
# save <- "Data files/EVs/step 1 - 1-km cropped/pawc.grd"
#
# nz_1km(pawc, "pawc", mask.layer, save)
# (28) Plant extractable [soil] water capacity -------------------------------------------
# again, dunno where I put og rasters (think I did them in ARC GIS?)
# so I am doing it here
pewc <- raster("C:/Users/s436862/Dropbox/Poaceae/Data files/EVs/Plant water capacity/DUNNESOIL_545/dunne_soil.dat")
save <- "Data files/EVs/step 1 - 1-km cropped/pewc.grd"
nz_1km_disag(pewc, "pewc", mask.layer, save)
# --------------------------------------------------------------------------------
|
context("Insertions")
insrt <- Insertion(anchor = 6, name = "Low", `function` = "subtotal", args = c(1, 2))
insrts <- Insertions(data=list(list(anchor = 6, name = "Low",
`function` = "subtotal", args = c(1, 2)),
list(anchor = 7, name = "High",
`function` = "subtotal", args = c(9, 10))))
test_that("Insertion and insertion inheritence, base methods", {
expect_equal(anchor(insrt), 6)
expect_equal(name(insrt), "Low")
expect_equal(arguments(insrt), c(1, 2))
expect_equal(anchors(insrts), c(6, 7))
expect_equal(funcs(insrts), c('subtotal', 'subtotal'))
})
insrt2 <- insrt
test_that("Insertion setters", {
anchor(insrt2) <- 1
expect_equal(anchor(insrt2), 1)
name(insrt2) <- "Low low"
expect_equal(name(insrt2), "Low low")
subtotals(insrt2) <- c(10, 20)
expect_equal(arguments(insrt2), c(10, 20))
})
test_that("Insertion can take an anchor of int, top, or bottom", {
anchor(insrt2) <- "top"
expect_equal(anchor(insrt2), "top")
anchor(insrt2) <- "bottom"
expect_equal(anchor(insrt2), "bottom")
anchor(insrt2) <- 4
expect_equal(anchor(insrt2), 4)
})
test_that("Anchors can be converted from subtotal/header to insertion", {
sub <- Subtotal(name = "name", categories = c(1, 2), after = 1)
sub_top <- Subtotal(name = "name", categories = c(1, 2), position = "top")
sub_bottom <- Subtotal(name = "name", categories = c(1, 2), position = "bottom")
# TODO: check category names with a categories object
expect_equal(anchor(sub), 1)
expect_equal(anchor(sub_top), "top")
expect_equal(anchor(sub_bottom), "bottom")
})
test_that("Insertion setter validation", {
expect_error(anchor(insrt2) <- "one",
paste0("an anchor must be a numeric or the character ", dQuote("top"),
" or ", dQuote("bottom")))
expect_error(name(insrt2) <- 2, 'Names must be of class "character"')
expect_error(subtotals(insrt2) <- "3, 4", "a subtotal must be a numeric")
})
test_that("Insertion validation", {
expect_error(Insertion(anchor=0),
"invalid class .*Insertion.* object:.* Missing: .*name*")
expect_error(Insertion(name='bar'),
"invalid class .*Insertion.* object:.* Missing: .*anchor*")
expect_error(Insertion(anchor=0, name='bar', `function`='baz'),
"If an Insertion has a .*function.* it must also have .*args.*")
})
test_that("Insertion and insertions show methods", {
expect_output(insrt,
get_output(data.frame(anchor=c(6),
name=c("Low"),
func=c("subtotal"),
args=c("1 and 2"))))
expect_output(insrts,
get_output(data.frame(anchor=c(6, 7),
name=c("Low", "High"),
func=c("subtotal", "subtotal"),
args=c("1 and 2", "9 and 10"))))
})
test_that("Insertion and insertions show methods with hetrogeneous insertions", {
insrts <- Insertions(Subtotal(name = "Cats A+B", after = "B",
categories = c("A", "B")),
Heading(name = "The end", after = "D"))
expect_output(insrts,
get_output(data.frame(
anchor = c("B", "D"),
name = c("Cats A+B", "The end"),
func = c("subtotal", NA),
# NA is a string because we serialPaste them
args = c("A and B", "NA"))),
fixed = TRUE)
})
test_that("args returns NA when not found", {
expect_equal(arguments(Insertion(anchor='foo', name='bar')), NA)
})
| /tests/testthat/test-insertions.R | no_license | GitBrianLaw/rcrunch | R | false | false | 3,850 | r | context("Insertions")
insrt <- Insertion(anchor = 6, name = "Low", `function` = "subtotal", args = c(1, 2))
insrts <- Insertions(data=list(list(anchor = 6, name = "Low",
`function` = "subtotal", args = c(1, 2)),
list(anchor = 7, name = "High",
`function` = "subtotal", args = c(9, 10))))
test_that("Insertion and insertion inheritence, base methods", {
expect_equal(anchor(insrt), 6)
expect_equal(name(insrt), "Low")
expect_equal(arguments(insrt), c(1, 2))
expect_equal(anchors(insrts), c(6, 7))
expect_equal(funcs(insrts), c('subtotal', 'subtotal'))
})
insrt2 <- insrt
test_that("Insertion setters", {
anchor(insrt2) <- 1
expect_equal(anchor(insrt2), 1)
name(insrt2) <- "Low low"
expect_equal(name(insrt2), "Low low")
subtotals(insrt2) <- c(10, 20)
expect_equal(arguments(insrt2), c(10, 20))
})
test_that("Insertion can take an anchor of int, top, or bottom", {
anchor(insrt2) <- "top"
expect_equal(anchor(insrt2), "top")
anchor(insrt2) <- "bottom"
expect_equal(anchor(insrt2), "bottom")
anchor(insrt2) <- 4
expect_equal(anchor(insrt2), 4)
})
test_that("Anchors can be converted from subtotal/header to insertion", {
sub <- Subtotal(name = "name", categories = c(1, 2), after = 1)
sub_top <- Subtotal(name = "name", categories = c(1, 2), position = "top")
sub_bottom <- Subtotal(name = "name", categories = c(1, 2), position = "bottom")
# TODO: check category names with a categories object
expect_equal(anchor(sub), 1)
expect_equal(anchor(sub_top), "top")
expect_equal(anchor(sub_bottom), "bottom")
})
test_that("Insertion setter validation", {
expect_error(anchor(insrt2) <- "one",
paste0("an anchor must be a numeric or the character ", dQuote("top"),
" or ", dQuote("bottom")))
expect_error(name(insrt2) <- 2, 'Names must be of class "character"')
expect_error(subtotals(insrt2) <- "3, 4", "a subtotal must be a numeric")
})
test_that("Insertion validation", {
expect_error(Insertion(anchor=0),
"invalid class .*Insertion.* object:.* Missing: .*name*")
expect_error(Insertion(name='bar'),
"invalid class .*Insertion.* object:.* Missing: .*anchor*")
expect_error(Insertion(anchor=0, name='bar', `function`='baz'),
"If an Insertion has a .*function.* it must also have .*args.*")
})
test_that("Insertion and insertions show methods", {
expect_output(insrt,
get_output(data.frame(anchor=c(6),
name=c("Low"),
func=c("subtotal"),
args=c("1 and 2"))))
expect_output(insrts,
get_output(data.frame(anchor=c(6, 7),
name=c("Low", "High"),
func=c("subtotal", "subtotal"),
args=c("1 and 2", "9 and 10"))))
})
test_that("Insertion and insertions show methods with hetrogeneous insertions", {
insrts <- Insertions(Subtotal(name = "Cats A+B", after = "B",
categories = c("A", "B")),
Heading(name = "The end", after = "D"))
expect_output(insrts,
get_output(data.frame(
anchor = c("B", "D"),
name = c("Cats A+B", "The end"),
func = c("subtotal", NA),
# NA is a string because we serialPaste them
args = c("A and B", "NA"))),
fixed = TRUE)
})
test_that("args returns NA when not found", {
expect_equal(arguments(Insertion(anchor='foo', name='bar')), NA)
})
|
# Class specification and constructors for mizer base parameters class
# Class has members to store parameters of size based model
# Copyright 2012 Finlay Scott and Julia Blanchard.
# Copyright 2018 Gustav Delius and Richard Southwell.
# Development has received funding from the European Commission's Horizon 2020
# Research and Innovation Programme under Grant Agreement No. 634495
# for the project MINOUW (http://minouw-project.eu/).
# Distributed under the GPL 3 or later
# Maintainer: Gustav Delius, University of York, <gustav.delius@york.ac.uk>
#Naming conventions:
#S4 classes and constructors: AClass
#S4 methods: aMethod
#functions a_function
# Validity function - pretty long...
# Not documented as removed later on
valid_MizerParams <- function(object) {
errors <- character()
# grab some dims
length_w <- length(object@w)
length_w_full <- length(object@w_full)
# Check dw and dw_full are correct length
if(length(object@dw) != length_w){
msg <- paste("dw is length ", length(object@dw), " and w is length ", length_w, ". These should be the same length", sep="")
errors <- c(errors, msg)
}
if(length(object@dw_full) != length_w_full){
msg <- paste("dw_full is length ", length(object@dw_full), " and w_full is length ", length_w_full, ". These should be the same length", sep="")
errors <- c(errors, msg)
}
# Check the array dimensions are good
# 2D arrays
if(!all(c(
length(dim(object@psi)),
length(dim(object@intake_max)),
length(dim(object@search_vol)),
length(dim(object@activity)),
length(dim(object@std_metab)),
length(dim(object@mu_b)),
length(dim(object@ft_pred_kernel_e)),
length(dim(object@ft_pred_kernel_p)),
length(dim(object@interaction)),
length(dim(object@catchability))) == 2)){
msg <- "psi, intake_max, search_vol, activity, std_metab, mu_b, ft_pred_kernel_e, ft_pred_kernel_p, interaction and catchability must all be two dimensions"
errors <- c(errors, msg)
}
# 3D arrays
if(length(dim(object@selectivity)) != 3){
msg <- "selectivity must be three dimensions"
errors <- c(errors, msg)
}
# Check number of species is equal across relevant slots
if(!all(c(
dim(object@psi)[1],
dim(object@intake_max)[1],
dim(object@search_vol)[1],
dim(object@std_metab)[1],
dim(object@ft_pred_kernel_e)[1],
dim(object@ft_pred_kernel_p)[1],
dim(object@activity)[1],
dim(object@mu_b)[1],
dim(object@selectivity)[2],
dim(object@catchability)[2],
dim(object@interaction)[1],
dim(object@interaction)[2]) ==
dim(object@species_params)[1])){
msg <- "The number of species in the model must be consistent across the species_params, psi, intake_max, search_vol, activity, mu_b, interaction (dim 1), selectivity, ft_pred_kernel_e, ft_pred_kernel_p, catchability and interaction (dim 2) slots"
errors <- c(errors, msg)
}
# Check number of size groups
if(!all(c(
dim(object@psi)[2],
dim(object@intake_max)[2],
dim(object@search_vol)[2],
dim(object@activity)[2],
dim(object@std_metab)[2],
dim(object@selectivity)[3]) ==
length_w)){
msg <- "The number of size bins in the model must be consistent across the w, psi, intake_max, search_vol, activity, and selectivity (dim 3) slots"
errors <- c(errors, msg)
}
# Check numbe of gears
if(!isTRUE(all.equal(dim(object@selectivity)[1], dim(object@catchability)[1]))){
msg <- "The number of fishing gears must be consistent across the catchability and selectivity (dim 1) slots"
errors <- c(errors, msg)
}
# Check names of dimnames of arrays
# sp dimension
if(!all(c(
names(dimnames(object@psi))[1],
names(dimnames(object@intake_max))[1],
names(dimnames(object@search_vol))[1],
names(dimnames(object@activity))[1],
names(dimnames(object@std_metab))[1],
names(dimnames(object@mu_b))[1],
names(dimnames(object@ft_pred_kernel_e))[1],
names(dimnames(object@ft_pred_kernel_p))[1],
names(dimnames(object@selectivity))[2],
names(dimnames(object@catchability))[2]) == "sp")){
msg <- "Name of first dimension of psi, intake_max, search_vol, std_metab, activity, mu_b, ft_pred_kernel_e, ft_pred_kernel_p and the second dimension of selectivity and catchability must be 'sp'"
errors <- c(errors, msg)
}
#interaction dimension names
if(names(dimnames(object@interaction))[1] != "predator"){
msg <- "The first dimension of interaction must be called 'predator'"
errors <- c(errors, msg)
}
if(names(dimnames(object@interaction))[2] != "prey"){
msg <- "The first dimension of interaction must be called 'prey'"
errors <- c(errors, msg)
}
# w dimension
if(!all(c(
names(dimnames(object@psi))[2],
names(dimnames(object@intake_max))[2],
names(dimnames(object@search_vol))[2],
names(dimnames(object@std_metab))[2],
names(dimnames(object@activity))[2],
names(dimnames(object@selectivity))[3]) == "w")){
msg <- "Name of second dimension of psi, intake_max, search_vol, std_metab, activity and third dimension of selectivity must be 'w'"
errors <- c(errors, msg)
}
if(!all(c(
names(dimnames(object@selectivity))[1],
names(dimnames(object@catchability))[1]) == "gear")){
msg <- "Name of first dimension of selectivity and catchability must be 'gear'"
errors <- c(errors, msg)
}
# Check dimnames of species are identical
# Bit tricky this one as I don't know of a way to compare lots of vectors at the same time. Just use == and the recycling rule
if(!all(c(
dimnames(object@psi)[[1]],
dimnames(object@intake_max)[[1]],
dimnames(object@search_vol)[[1]],
dimnames(object@std_metab)[[1]],
dimnames(object@ft_pred_kernel_e)[[1]],
dimnames(object@ft_pred_kernel_p)[[1]],
dimnames(object@activity)[[1]],
dimnames(object@mu_b)[[1]],
dimnames(object@selectivity)[[2]],
dimnames(object@catchability)[[2]],
dimnames(object@interaction)[[1]],
dimnames(object@interaction)[[2]]) ==
object@species_params$species)){
msg <- "The species names of species_params, psi, intake_max, search_vol, std_metab, mu_b, ft_pred_kernel_e, ft_pred_kernel_p, activity, selectivity, catchability and interaction must all be the same"
errors <- c(errors, msg)
}
# Check dimnames of w
if(!all(c(
dimnames(object@psi)[[2]],
dimnames(object@intake_max)[[2]],
dimnames(object@search_vol)[[2]],
dimnames(object@std_metab)[[2]],
dimnames(object@activity)[[2]]) ==
dimnames(object@selectivity)[[3]])){
msg <- "The size names of psi, intake_max, search_vol, std_metab, activityand selectivity must all be the same"
errors <- c(errors, msg)
}
# Check dimnames of gear
if(!isTRUE(all.equal(
dimnames(object@catchability)[[1]],
dimnames(object@selectivity)[[1]]))){
msg <- "The gear names of selectivity and catchability must all be the same"
errors <- c(errors, msg)
}
# Check the vector slots
if(length(object@rr_pp) != length(object@w_full)){
msg <- "rr_pp must be the same length as w_full"
errors <- c(errors, msg)
}
if(length(object@cc_pp) != length(object@w_full)){
msg <- "cc_pp must be the same length as w_full"
errors <- c(errors, msg)
}
# SRR
# Must have two arguments: rdi amd species_params
if(!isTRUE(all.equal(names(formals(object@srr)), c("rdi", "species_params")))){
msg <- "Arguments of srr function must be 'rdi' and 'species_params'"
errors <- c(errors, msg)
}
# species_params data.frame must have columns:
# species, z0, alpha, eRepro
species_params_cols <- c("species","z0","alpha","erepro")
if (!all(species_params_cols %in% names(object@species_params))){
msg <- "species_params data.frame must have 'species', 'z0', 'alpha' and 'erepro' columms"
errors <- c(errors,msg)
}
# must also have SRR params but sorted out yet
# species_params
# Column check done in constructor
# If everything is OK
if (length(errors) == 0) TRUE else errors
}
#### Class definition ####
#' A class to hold the parameters for a size based model.
#'
#' These parameters include the model species, their life history parameters and
#' the size ranges of the model.
#'
#' \linkS4class{MizerParams} objects can be created using a range of
#' \code{MizerParams} constructor methods.
#'
#' Dynamic simulations are performed using the \code{\link{project}} method on
#' objects of this class.
#'
#' @slot w A numeric vector of size bins used for the community (i.e. fish) part
#' of the model. These are usually spaced on a log10 scale
#' @slot dw The absolute difference between the size bins specified in the w
#' slot. A vector the same length as the w slot. The final value is the same
#' as the second to last value
#' @slot w_full A numeric vector of size bins used for the whole model (i.e. the
#' community and background spectra) . These are usually spaced on a log10
#' scale
#' @slot dw_full The absolute difference between the size bins specified in the
#' w_full slot. A vector the same length as the w_full slot. The final value
#' is the same as the second to last value
#' @slot psi An array (species x size) that holds the allocation to reproduction
#' for each species at size, \eqn{\psi_i(w)}
#' @slot intake_max An array (species x size) that holds the maximum intake for
#' each species at size, \eqn{h_i w^n}
#' @slot search_vol An array (species x size) that holds the search volume for
#' each species at size, \eqn{\gamma_i w^q}
#' @slot activity An array (species x size) that holds the activity for each
#' species at size, \eqn{k_i w}
#' @slot std_metab An array (species x size) that holds the standard metabolism
#' for each species at size, \eqn{k_{s.i} w^p}
#' @slot mu_b An array (species x size) that holds the background death
#' \eqn{\mu_{b.i}(w)}
#' @slot ft_pred_kernel_e An array (species x log of predator/prey size ratio) that holds
#' the Fourier transform of the feeding kernel in a form appropriate for
#' evaluating the available energy integral
#' @slot ft_pred_kernel_p An array (species x log of predator/prey size ratio) that holds
#' the Fourier transform of the feeding kernel in a form appropriate for
#' evaluating the predation mortality integral
#' @slot rr_pp A vector the same length as the w_full slot. The size specific
#' growth rate of the background spectrum, \eqn{r_0 w^{p-1}}
#' @slot cc_pp A vector the same length as the w_full slot. The size specific
#' carrying capacity of the background spectrum, \eqn{\kappa w^{-\lambda}}
#' @slot sc The community abundance of the scaling community
#' @slot species_params A data.frame to hold the species specific parameters
#' (see the mizer vignette, Table 2, for details)
#' @slot interaction The species specific interaction matrix, \eqn{\theta_{ij}}
#' @slot srr Function to calculate the realised (density dependent) recruitment.
#' Has two arguments which are rdi and species_params
#' @slot selectivity An array (gear x species x w) that holds the selectivity of
#' each gear for species and size, \eqn{S_{g,i,w}}
#' @slot catchability An array (gear x species) that holds the catchability of
#' each species by each gear, \eqn{Q_{g,i}}
#' @slot initial_n An array (species x size) that holds abundance of each species
#' at each weight at our candidate steady state solution.
#' @slot initial_n_pp A vector the same length as the w_full slot that describes
#' the abundance of the background background resource at each weight.
#' @slot n Exponent of maximum intake rate.
#' @slot p Exponent of metabolic cost.
#' @slot lambda Exponent of resource spectrum.
#' @slot q Exponent for volumetric search rate.
#' @slot f0 Initial feeding level.
#' @slot kappa Magnitude of resource spectrum.
#' @slot A Abundance multipliers.
#' @slot linecolour A named vector of colour values, named by species. Used
#' to give consistent colours to species in plots.
#' @slot linetype A named vector of linetypes, named by species. Used
#' to give consistent colours to species in plots.
#' @note The \linkS4class{MizerParams} class is fairly complex with a large number of
#' slots, many of which are multidimensional arrays. The dimensions of these
#' arrays is strictly enforced so that \code{MizerParams} objects are
#' consistent in terms of number of species and number of size classes.
#'
#' Although it is possible to build a \code{MizerParams} object by hand it is
#' not recommended and several constructors are available.
#'
#' The \code{MizerParams} class does not hold any dynamic information, e.g.
#' abundances or harvest effort through time. These are held in
#' \linkS4class{MizerSim} objects.
#' @seealso \code{\link{project}} \code{\link{MizerSim}}
#' @export
setClass(
"MizerParams",
representation(
w = "numeric",
dw = "numeric",
w_full = "numeric",
dw_full = "numeric",
psi = "array",
initial_n = "array",
intake_max = "array",
search_vol = "array",
activity = "array",
std_metab = "array",
ft_pred_kernel_e = "array",
ft_pred_kernel_p = "array",
mu_b = "array",
rr_pp = "numeric",
cc_pp = "numeric", # was NinPP, carrying capacity of background
sc = "numeric",
initial_n_pp = "numeric",
species_params = "data.frame",
interaction = "array",
srr = "function",
selectivity = "array",
catchability = "array",
n = "numeric",
p = "numeric",
lambda = "numeric",
q = "numeric",
f0 = "numeric",
kappa = "numeric",
A = "numeric",
linecolour = "character",
linetype = "character"
),
prototype = prototype(
w = NA_real_,
dw = NA_real_,
w_full = NA_real_,
dw_full = NA_real_,
n = NA_real_,
p = NA_real_,
lambda = NA_real_,
q = NA_real_,
f0 = NA_real_,
kappa = NA_real_,
psi = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
initial_n = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
intake_max = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
search_vol = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
activity = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
std_metab = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
ft_pred_kernel_e = array(NA,dim = c(1,1), dimnames = list(sp = NULL,k = NULL)),
ft_pred_kernel_p = array(NA,dim = c(1,1), dimnames = list(sp = NULL,k = NULL)),
mu_b = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
rr_pp = NA_real_,
cc_pp = NA_real_,
sc = NA_real_,
initial_n_pp = NA_real_,
A = NA_real_,
linecolour = NA_character_,
linetype = NA_character_,
#speciesParams = data.frame(),
interaction = array(
NA,dim = c(1,1), dimnames = list(predator = NULL, prey = NULL)
), # which dimension is prey and which is prey?
selectivity = array(
NA, dim = c(1,1,1), dimnames = list(gear = NULL, sp = NULL, w = NULL)
),
catchability = array(
NA, dim = c(1,1), dimnames = list(gear = NULL, sp = NULL)
)
),
validity = valid_MizerParams
)
#### Constructors ####
#' Constructors for objects of \code{MizerParams} class
#'
#' Constructor method for the \linkS4class{MizerParams} class. Provides the
#' simplest way of making a \code{MizerParams} object to be used in a
#' simulation.
#'
#' @param object A data frame of species specific parameter values (see notes
#' below).
#' @param interaction Optional argument to specify the interaction matrix of the
#' species (predator by prey). If missing a default interaction is used where
#' all interactions between species are set to 1. Note that any dimnames of
#' the interaction matrix argument are ignored by the constructor. The
#' dimnames of the interaction matrix in the returned \code{MizerParams}
#' object are taken from the species names in the \code{species_params} slot.
#' This means that the order of the columns and rows of the interaction matrix
#' argument should be the same as the species name in the
#' \code{species_params} slot.
#' @param min_w The smallest size of the community spectrum.
#' @param max_w The largest size of the community spectrum.
#' Default value is the largest w_inf in the community x 1.1.
#' @param no_w The number of size bins in the community spectrum.
#' @param min_w_pp The smallest size of the background spectrum.
#' @param no_w_pp Obsolete argument that is no longer used because the number
#' of plankton size bins is determined because all size bins have to
#' be logarithmically equally spaced.
#' @param n Scaling of the intake. Default value is 2/3.
#' @param p Scaling of the standard metabolism. Default value is 0.7.
#' @param q Exponent of the search volume. Default value is 0.8.
#' @param r_pp Growth rate of the primary productivity. Default value is 10.
#' @param kappa Carrying capacity of the resource spectrum. Default
#' value is 1e11.
#' @param lambda Exponent of the resource spectrum. Default value is
#' (2+q-n).
#' @param w_pp_cutoff The cut off size of the background spectrum.
#' Default value is 10.
#' @param f0 Average feeding level. Used to calculated \code{h} and
#' \code{gamma} if those are not columns in the species data frame. Also
#' requires \code{k_vb} (the von Bertalanffy K parameter) to be a column
#' in the species data frame. If \code{h} and \code{gamma} are supplied
#' then this argument is ignored. Default is 0.6..
#' @param z0pre If \code{z0}, the mortality from other sources, is not
#' a column in the species data frame, it is calculated as
#' z0pre * w_inf ^ z0exp. Default value is 0.6.
#' @param z0exp If \code{z0}, the mortality from other sources, is not
#' a column in the species data frame, it is calculated as
#' z0pre * w_inf ^ z0exp. Default value is n-1.
#' @param species_names Names of the species. Generally not needed as normally
#' taken from the \code{object} data.frame.
#' @param gear_names Names of the gears that catch each species. Generally not
#' needed as normally taken from the \code{object} data.frame. Default is
#' \code{species_names}.
#' @param ... Additional arguments.
#'
#' @return An object of type \code{MizerParams}
#' @note The only essential argument to the \code{MizerParams} constructor is a
#' data frame which contains the species data. The data frame is arranged
#' species by parameter, so each column of the parameter data frame is a
#' parameter and each row has the parameters for one of the species in the
#' model.
#'
#' There are some essential columns that must be included in the parameter
#' data.frame and that do not have default values. Other columns do have
#' default values, so that if they are not included in the species parameter
#' data frame, they will be automatically added when the \code{MizerParams}
#' object is created. See the accompanying vignette for details of these
#' columns.
#'
#' An additional constructor method which takes an integer of the number of
#' species in the model. This is only used in internally to set up a
#' \code{MizerParams} object with the correct dimensions. It is not recommended
#' that this method is used by users.
#' @seealso \code{\link{project}} \linkS4class{MizerSim}
#' @export
#' @examples
#' data(NS_species_params_gears)
#' data(inter)
#' params <- MizerParams(NS_species_params_gears, inter)
setGeneric('MizerParams', function(object, interaction, ...)
standardGeneric('MizerParams'))
#### Basic constructor ####
#' Basic constructor with only the number of species as dispatching argument
#'
#' Only really used to make MizerParams of the right size and shouldn't be used
#' by user
#' @rdname MizerParams
setMethod('MizerParams', signature(object='numeric', interaction='missing'),
function(object, min_w = 0.001, max_w = 1000, no_w = 100, min_w_pp = 1e-10, no_w_pp = NA, species_names=1:object, gear_names=species_names){
#args <- list(...)
if (!is.na(no_w_pp))
warning("New mizer code does not support the parameter no_w_pp")
# Some checks
if (length(species_names) != object)
stop("species_names must be the same length as the value of object argument")
no_sp <- length(species_names)
# Set up grids:
# Community grid
w <- 10^(seq(from=log10(min_w),to=log10(max_w),length.out=no_w))
dw <- diff(w)
# Correctly defined dw by using the proper ratio (successive dw's have a fixed ratio).
dw[no_w] <- dw[no_w-1]*(dw[no_w-1]/dw[no_w-2])
# Set up full grid - background + community
# ERROR if dw > w, nw must be at least... depends on minw, maxw and nw
if(w[1] <= dw[1])
stop("Your size bins are too close together. You should consider increasing the number of bins, or changing the size range")
# For fft methods we need a constant log step size throughout.
# Therefore we use as many steps as are necessary to almost reach min_w_pp.
x_pp <- rev(seq(from=log10(min_w), log10(min_w_pp), by=log10(min_w/max_w)/(no_w-1))[-1])
w_full <- c(10^x_pp, w)
no_w_full <- length(w_full)
dw_full <- diff(w_full)
dw_full[no_w_full] <- dw_full[no_w_full-1]*(dw_full[no_w_full-1]/dw_full[no_w_full-2])
# Basic arrays for templates
mat1 <- array(NA, dim=c(no_sp, no_w),
dimnames = list(sp=species_names, w=signif(w,3)))
mat2 <- array(NA, dim=c(no_sp, no_w, no_w_full),
dimnames = list(sp=species_names, w_pred=signif(w,3),
w_prey=signif(w_full,3)))
ft_pred_kernel_e <- array(NA, dim=c(no_sp, no_w_full),
dimnames = list(sp=species_names, k=1:no_w_full))
# We do not know the second dimension of ft_pred_kernel_p until the species
# parameters determining the predation kernel are know.
# So for now we set it to 2
ft_pred_kernel_p <- array(NA, dim=c(no_sp, 2),
dimnames = list(sp=species_names, k=1:2))
selectivity <- array(0, dim=c(length(gear_names), no_sp, no_w),
dimnames=list(gear=gear_names, sp=species_names,
w=signif(w,3)))
catchability <- array(0, dim=c(length(gear_names), no_sp),
dimnames = list(gear=gear_names, sp=species_names))
interaction <- array(1, dim=c(no_sp, no_sp),
dimnames = list(predator = species_names,
prey = species_names))
vec1 <- as.numeric(rep(NA, no_w_full))
names(vec1) <- signif(w_full,3)
# Make an empty data.frame for species_params
# This is just to pass validity check.
# The project method uses the columns species z0 alpha erepro
# so these must be in there
# There is also a seperate function to check the dataframe that is
# passed in by users (not used in validity check)
species_params <- data.frame(species = species_names,
z0 = NA, alpha = NA, erepro = NA)
# Make an empty srr function, just to pass validity check
srr <- function(rdi, species_params) return(0)
# Make colour and linetype scales for use in plots
# Colour-blind-friendly palettes
# From http://dr-k-lo.blogspot.co.uk/2013/07/a-color-blind-friendly-palette-for-r.html
# cbbPalette <- c("#000000", "#009E73", "#e79f00", "#9ad0f3", "#0072B2", "#D55E00",
# "#CC79A7", "#F0E442")
# From http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/#a-colorblind-friendly-palette
cbbPalette <- c("#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
linecolour <- rep(cbbPalette, length.out = no_sp)
names(linecolour) <- as.character(species_names)
linecolour <- c(linecolour, "Total" = "black", "Plankton" = "green",
"Background" = "grey")
linetype <-rep(c("solid", "dashed", "dotted", "dotdash", "longdash",
"twodash"), length.out = no_sp)
names(linetype) <- as.character(species_names)
linetype <- c(linetype, "Total" = "solid", "Plankton" = "solid",
"Background" = "solid")
# Make the new object
# Should Z0, rrPP and ccPP have names (species names etc)?
res <- new("MizerParams",
w = w, dw = dw, w_full = w_full, dw_full = dw_full,
psi = mat1, initial_n = mat1, intake_max = mat1, search_vol = mat1,
activity = mat1,
std_metab = mat1, mu_b = mat1, ft_pred_kernel_e = ft_pred_kernel_e,
ft_pred_kernel_p = ft_pred_kernel_p,
selectivity=selectivity, catchability=catchability,
rr_pp = vec1, cc_pp = vec1, sc = w, initial_n_pp = vec1,
species_params = species_params,
interaction = interaction, srr = srr,
A=as.numeric(rep(NA, dim(interaction)[1])),
linecolour = linecolour, linetype = linetype)
return(res)
}
)
#### Main constructor ####
#' Constructor that takes the species_params data.frame and the interaction matrix
#' @rdname MizerParams
setMethod('MizerParams', signature(object='data.frame', interaction='matrix'),
function(object, interaction, n = 2/3, p = 0.7, q = 0.8, r_pp = 10,
kappa = 1e11, lambda = (2+q-n), w_pp_cutoff = 10,
max_w = max(object$w_inf)*1.1, f0 = 0.6,
z0pre = 0.6, z0exp = n-1, ...){
row.names(object) <- object$species
# Set default values for column values if missing
# If no gear_name column in object, then named after species
if(!("gear" %in% colnames(object)))
object$gear <- object$species
no_gear <- length(unique(object$gear))
# If no k column (activity coefficient) in object, then set to 0
if(!("k" %in% colnames(object)))
object$k <- 0
# If no alpha column in object, then set to 0.6
# Should this be a column? Or just an argument?
if(!("alpha" %in% colnames(object)))
object$alpha <- 0.6
# If no erepro column in object, then set to 1
if(!("erepro" %in% colnames(object)))
object$erepro <- 1
# If no sel_func column in species_params, set to 'knife_edge'
if(!("sel_func" %in% colnames(object))){
message("\tNote: No sel_func column in species data frame. Setting selectivity to be 'knife_edge' for all species.")
object$sel_func <- 'knife_edge'
# Set default selectivity size
if(!("knife_edge_size" %in% colnames(object))){
message("Note: \tNo knife_edge_size column in species data frame. Setting knife edge selectivity equal to w_mat.")
object$knife_edge_size <- object$w_mat
}
}
# If no catchability column in species_params, set to 1
if(!("catchability" %in% colnames(object)))
object$catchability <- 1
# Sort out h column If not passed in directly, is calculated from f0 and
# k_vb if they are also passed in
if(!("h" %in% colnames(object))){
message("Note: \tNo h column in species data frame so using f0 and k_vb to calculate it.")
if(!("k_vb" %in% colnames(object))){
stop("\t\tExcept I can't because there is no k_vb column in the species data frame")
}
object$h <- ((3 * object$k_vb) / (object$alpha * f0)) * (object$w_inf ^ (1/3))
}
# Sorting out gamma column
if(!("gamma" %in% colnames(object))){
message("Note: \tNo gamma column in species data frame so using f0, h, beta, sigma, lambda and kappa to calculate it.")
ae <- sqrt(2*pi) * object$sigma * object$beta^(lambda-2) * exp((lambda-2)^2 * object$sigma^2 / 2)
object$gamma <- (object$h / (kappa * ae)) * (f0 / (1 - f0))
}
# Sort out z0 column
if(!("z0" %in% colnames(object))){
message("Note: \tNo z0 column in species data frame so using z0 = z0pre * w_inf ^ z0exp.")
object$z0 = z0pre*object$w_inf^z0exp # background natural mortality
}
# Sort out ks column
if(!("ks" %in% colnames(object))){
message("Note: \tNo ks column in species data frame so using ks = h * 0.2.")
object$ks <- object$h * 0.2
}
# Check essential columns: species (name), wInf, wMat, h, gamma, ks, beta, sigma
check_species_params_dataframe(object)
no_sp <- nrow(object)
# Make an empty object of the right dimensions
res <- MizerParams(no_sp, species_names=object$species,
gear_names=unique(object$gear), max_w=max_w,...)
res@n <- n
res@p <- p
res@lambda <- lambda
res@q <- q
res@f0 <- f0
res@kappa <- kappa
# If not w_min column in species_params, set to w_min of community
if (!("w_min" %in% colnames(object)))
object$w_min <- min(res@w)
# Check min_w argument is not > w_min in species_params
if(any(object$w_min < min(res@w)))
stop("One or more of your w_min values is less than the smallest size of the community spectrum")
# Add w_min_idx column which has the reference index of the size class closest
# to w_min - this is a short cut for later on and prevents repetition.
if (!("w_min_idx" %in% names(object))) {
object$w_min_idx <- as.vector(
tapply(object$w_min,1:length(object$w_min),
function(w_min,wx) max(which(wx<=w_min)),wx=res@w))
}
# Start filling the slots
res@species_params <- object
# Check dims of interaction argument - make sure it's right
if (!isTRUE(all.equal(dim(res@interaction), dim(interaction))))
stop("interaction matrix is not of the right dimensions. Must be number of species x number of species")
# Check that all values of interaction matrix are 0 - 1. Issue warning if not
if(!all((interaction>=0) & (interaction<=1)))
warning("Values in the interaction matrix should be between 0 and 1")
# In case user has supplied names to interaction matrix which are wrong order
for (dim_check in 1:length(dimnames(res@interaction))){
if (!is.null(dimnames(interaction)[[dim_check]]) & (!(isTRUE(all.equal(dimnames(res@interaction)[[dim_check]],dimnames(interaction)[[dim_check]])))))
warning("Dimnames of interaction matrix do not match the order of species names in the species data.frame. I am now ignoring your dimnames so your interaction matrix may be in the wrong order.")}
res@interaction[] <- interaction
# Now fill up the slots using default formulations:
# psi - allocation to reproduction - from original Setup() function
res@psi[] <- unlist(tapply(res@w,1:length(res@w),function(wx,w_inf,w_mat,n){
((1 + (wx/(w_mat))^-10)^-1) * (wx/w_inf)^(1-n)},w_inf=object$w_inf,w_mat=object$w_mat,n=n))
# Set w < 10% of w_mat to 0
res@psi[unlist(tapply(res@w,1:length(res@w),function(wx,w_mat)wx<(w_mat*0.1) ,w_mat=object$w_mat))] <- 0
# Set all w > w_inf to 1 # Check this is right...
res@psi[unlist(tapply(res@w,1:length(res@w),function(wx,w_inf)(wx/w_inf)>1,w_inf=object$w_inf))] <- 1
# note sure what a and n0_mult are in get_initial_n
res@intake_max[] <- unlist(tapply(res@w,1:length(res@w),function(wx,h,n)h * wx^n,h=object$h,n=n))
res@search_vol[] <- unlist(tapply(res@w,1:length(res@w),function(wx,gamma,q)gamma * wx^q, gamma=object$gamma, q=q))
res@activity[] <- unlist(tapply(res@w,1:length(res@w),function(wx,k)k * wx,k=object$k))
res@std_metab[] <- unlist(tapply(res@w,1:length(res@w),function(wx,ks,p)ks * wx^p, ks=object$ks,p=p))
res@mu_b[] <- res@species_params$z0
Beta <- log(res@species_params$beta)
sigma <- res@species_params$sigma
Dx <- res@w[2]/res@w[1] - 1 # dw = w Dx
# w_full has the weights from the smallest relevant plankton, to the largest fish
xFull <- log(res@w_full)
xFull <- xFull - xFull[1]
# ft_pred_kernel_e is an array (species x log of predator/prey size ratio)
# that holds the Fourier transform of the feeding kernel in a form
# appropriate for evaluating the available energy integral
res@ft_pred_kernel_e <- matrix(0, nrow = dim(res@interaction)[1], ncol=length(xFull))
noSpecies <- dim(res@interaction)[1]
for(i in 1:noSpecies){
# We compute the feeding kernel terms and their fft.
res@ft_pred_kernel_e[i, ] <- Dx*fft(exp(-(xFull - Beta[i])^2/(2*sigma[i]^2)))
}
# rr is the log of the maximal predator/prey mass ratio
# Here we use default rr= beta + 3*sigma
rr <- Beta + 3*sigma
# Perturb rr so it falls on grid points
dx <- xFull[2]-xFull[1]
rr <- dx*ceiling(rr/dx)
# Determine period used
P <- max(xFull[length(xFull)] + rr)
# Determine number of x points used in period
no_P <- 1+ceiling(P/dx) # P/dx should already be integer
# vector of values for log predator/prey mass ratio
x_P <- (-1:(no_P-2))*dx
# The dimension of ft_pred_kernel_p was not know at the time the res object
# was initialised. Hence we need to create it with the right dimension here.
res@ft_pred_kernel_p <- matrix(0, nrow = noSpecies, ncol = no_P)
dimnames(res@ft_pred_kernel_p) <- list(sp=rownames(res@std_metab),k=(1:no_P))
for (j in 1:noSpecies){
phi <- rep(0, no_P)
# Our phi is a periodic extension of the normal feeding kernel.
# For 0<=x<=P we use phi[x-P] as our
# value of the period P extension of phi, since support(phi)=[-rr,0]
phi[x_P-P >= -rr[j]] <- exp(-(Beta[j]-P+x_P[x_P-P >= -rr[j]])^2/(2*sigma[j]^2))
# We also save the fft of this vector, so we don't have to use too many fft s in the time evolution
res@ft_pred_kernel_p[j, ] <- Dx*fft(phi)
}
# Background spectrum
res@rr_pp[] <- r_pp * res@w_full^(n-1) #weight specific plankton growth rate ##
res@cc_pp[] <- kappa*res@w_full^(-lambda) # the resource carrying capacity - one for each mp and m (130 of them)
res@cc_pp[res@w_full>w_pp_cutoff] <- 0 #set density of sizes < plankton cutoff size
# Set the SRR to be a Beverton Holt esque relationship
# Can add more functional forms or user specifies own
res@initial_n_pp <- res@cc_pp
res@srr <- function(rdi, species_params){
return(rdi / (1 + rdi/species_params$r_max))
}
# Set fishing parameters: selectivity and catchability
# At the moment, each species is only caught by 1 gear so in species_params
# there are the columns: gear_name and sel_func.
# BEWARE! This routine assumes that each species has only one gear operating on it
# So we can just go row by row through the species parameters
# However, I really hope we can do something better soon
for (g in 1:nrow(object)){
# Do selectivity first
# get args
# These as.characters are annoying - but factors everywhere
arg <- names(formals(as.character(object[g,'sel_func'])))
# lop off w as that is always the first argument of the selectivity functions
arg <- arg[!(arg %in% "w")]
if(!all(arg %in% colnames(object)))
stop("All of the arguments needed for the selectivity function are not in the parameter dataframe")
# Check that there is only one column in object with the same name
# Check that column of arguments exists
par <- c(w=list(res@w),as.list(object[g,arg]))
sel <- do.call(as.character(object[g,'sel_func']), args=par)
# Dump Sel in the right place
res@selectivity[as.character(object[g,'gear']), g, ] <- sel
# Now do catchability
res@catchability[as.character(object[g,'gear']), g] <- object[g,"catchability"]
}
# Store colours and linetypes in slots if contained in species parameters
if ("linetype" %in% names(object)) {
linetype <- object$linetype[!is.na(object$linetype)]
res@linetype[object$species[!is.na(object$linetype)]] <- linetype
}
if ("linecolour" %in% names(object)) {
linecolour <- object$linecolour[!is.na(object$linecolour)]
res@linecolour[object$species[!is.na(object$linecolour)]] <- linecolour
}
# Remove catchabiliy from species data.frame, now stored in slot
#params@species_params[,names(params@species_params) != "catchability"]
res@species_params <- res@species_params[,-which(names(res@species_params)=="catchability")]
res@initial_n <- res@psi
res@initial_n <- get_initial_n(res)
res@A <- rep(1,no_sp)
return(res)
}
)
#### theta = 1 constructor ####
# If interaction is missing, make one of the right size and fill with 1s
#' Constructor based on the species_params data.frame only with no interaction
#' @rdname MizerParams
setMethod('MizerParams', signature(object='data.frame', interaction='missing'),
function(object, ...){
interaction <- matrix(1,nrow=nrow(object), ncol=nrow(object))
res <- MizerParams(object,interaction, ...)
return(res)
})
# Check that the species_params dataset is OK
# internal only
check_species_params_dataframe <- function(species_params){
# Check species_params dataframe (with a function) for essential cols
# Essential columns: species (name) # wInf # wMat # h # gamma - search Volume # ks # beta # z0
essential_cols <- c("species","w_inf","w_mat","h","gamma","ks","beta","sigma", "z0")
missing_cols <- !(essential_cols %in% colnames(species_params))
if(any(missing_cols))
{
errors <- character()
for (i in essential_cols[missing_cols])
errors <- paste(errors, i, sep=" ")
stop("You are missing these columns from the input dataframe:\n", errors)
}
return(TRUE)
}
| /R/MizerParams-class.R | no_license | maxlindmark/mizer-rewiring | R | false | false | 37,183 | r | # Class specification and constructors for mizer base parameters class
# Class has members to store parameters of size based model
# Copyright 2012 Finlay Scott and Julia Blanchard.
# Copyright 2018 Gustav Delius and Richard Southwell.
# Development has received funding from the European Commission's Horizon 2020
# Research and Innovation Programme under Grant Agreement No. 634495
# for the project MINOUW (http://minouw-project.eu/).
# Distributed under the GPL 3 or later
# Maintainer: Gustav Delius, University of York, <gustav.delius@york.ac.uk>
#Naming conventions:
#S4 classes and constructors: AClass
#S4 methods: aMethod
#functions a_function
# Validity function - pretty long...
# Not documented as removed later on
valid_MizerParams <- function(object) {
errors <- character()
# grab some dims
length_w <- length(object@w)
length_w_full <- length(object@w_full)
# Check dw and dw_full are correct length
if(length(object@dw) != length_w){
msg <- paste("dw is length ", length(object@dw), " and w is length ", length_w, ". These should be the same length", sep="")
errors <- c(errors, msg)
}
if(length(object@dw_full) != length_w_full){
msg <- paste("dw_full is length ", length(object@dw_full), " and w_full is length ", length_w_full, ". These should be the same length", sep="")
errors <- c(errors, msg)
}
# Check the array dimensions are good
# 2D arrays
if(!all(c(
length(dim(object@psi)),
length(dim(object@intake_max)),
length(dim(object@search_vol)),
length(dim(object@activity)),
length(dim(object@std_metab)),
length(dim(object@mu_b)),
length(dim(object@ft_pred_kernel_e)),
length(dim(object@ft_pred_kernel_p)),
length(dim(object@interaction)),
length(dim(object@catchability))) == 2)){
msg <- "psi, intake_max, search_vol, activity, std_metab, mu_b, ft_pred_kernel_e, ft_pred_kernel_p, interaction and catchability must all be two dimensions"
errors <- c(errors, msg)
}
# 3D arrays
if(length(dim(object@selectivity)) != 3){
msg <- "selectivity must be three dimensions"
errors <- c(errors, msg)
}
# Check number of species is equal across relevant slots
if(!all(c(
dim(object@psi)[1],
dim(object@intake_max)[1],
dim(object@search_vol)[1],
dim(object@std_metab)[1],
dim(object@ft_pred_kernel_e)[1],
dim(object@ft_pred_kernel_p)[1],
dim(object@activity)[1],
dim(object@mu_b)[1],
dim(object@selectivity)[2],
dim(object@catchability)[2],
dim(object@interaction)[1],
dim(object@interaction)[2]) ==
dim(object@species_params)[1])){
msg <- "The number of species in the model must be consistent across the species_params, psi, intake_max, search_vol, activity, mu_b, interaction (dim 1), selectivity, ft_pred_kernel_e, ft_pred_kernel_p, catchability and interaction (dim 2) slots"
errors <- c(errors, msg)
}
# Check number of size groups
if(!all(c(
dim(object@psi)[2],
dim(object@intake_max)[2],
dim(object@search_vol)[2],
dim(object@activity)[2],
dim(object@std_metab)[2],
dim(object@selectivity)[3]) ==
length_w)){
msg <- "The number of size bins in the model must be consistent across the w, psi, intake_max, search_vol, activity, and selectivity (dim 3) slots"
errors <- c(errors, msg)
}
# Check numbe of gears
if(!isTRUE(all.equal(dim(object@selectivity)[1], dim(object@catchability)[1]))){
msg <- "The number of fishing gears must be consistent across the catchability and selectivity (dim 1) slots"
errors <- c(errors, msg)
}
# Check names of dimnames of arrays
# sp dimension
if(!all(c(
names(dimnames(object@psi))[1],
names(dimnames(object@intake_max))[1],
names(dimnames(object@search_vol))[1],
names(dimnames(object@activity))[1],
names(dimnames(object@std_metab))[1],
names(dimnames(object@mu_b))[1],
names(dimnames(object@ft_pred_kernel_e))[1],
names(dimnames(object@ft_pred_kernel_p))[1],
names(dimnames(object@selectivity))[2],
names(dimnames(object@catchability))[2]) == "sp")){
msg <- "Name of first dimension of psi, intake_max, search_vol, std_metab, activity, mu_b, ft_pred_kernel_e, ft_pred_kernel_p and the second dimension of selectivity and catchability must be 'sp'"
errors <- c(errors, msg)
}
#interaction dimension names
if(names(dimnames(object@interaction))[1] != "predator"){
msg <- "The first dimension of interaction must be called 'predator'"
errors <- c(errors, msg)
}
if(names(dimnames(object@interaction))[2] != "prey"){
msg <- "The first dimension of interaction must be called 'prey'"
errors <- c(errors, msg)
}
# w dimension
if(!all(c(
names(dimnames(object@psi))[2],
names(dimnames(object@intake_max))[2],
names(dimnames(object@search_vol))[2],
names(dimnames(object@std_metab))[2],
names(dimnames(object@activity))[2],
names(dimnames(object@selectivity))[3]) == "w")){
msg <- "Name of second dimension of psi, intake_max, search_vol, std_metab, activity and third dimension of selectivity must be 'w'"
errors <- c(errors, msg)
}
if(!all(c(
names(dimnames(object@selectivity))[1],
names(dimnames(object@catchability))[1]) == "gear")){
msg <- "Name of first dimension of selectivity and catchability must be 'gear'"
errors <- c(errors, msg)
}
# Check dimnames of species are identical
# Bit tricky this one as I don't know of a way to compare lots of vectors at the same time. Just use == and the recycling rule
if(!all(c(
dimnames(object@psi)[[1]],
dimnames(object@intake_max)[[1]],
dimnames(object@search_vol)[[1]],
dimnames(object@std_metab)[[1]],
dimnames(object@ft_pred_kernel_e)[[1]],
dimnames(object@ft_pred_kernel_p)[[1]],
dimnames(object@activity)[[1]],
dimnames(object@mu_b)[[1]],
dimnames(object@selectivity)[[2]],
dimnames(object@catchability)[[2]],
dimnames(object@interaction)[[1]],
dimnames(object@interaction)[[2]]) ==
object@species_params$species)){
msg <- "The species names of species_params, psi, intake_max, search_vol, std_metab, mu_b, ft_pred_kernel_e, ft_pred_kernel_p, activity, selectivity, catchability and interaction must all be the same"
errors <- c(errors, msg)
}
# Check dimnames of w
if(!all(c(
dimnames(object@psi)[[2]],
dimnames(object@intake_max)[[2]],
dimnames(object@search_vol)[[2]],
dimnames(object@std_metab)[[2]],
dimnames(object@activity)[[2]]) ==
dimnames(object@selectivity)[[3]])){
msg <- "The size names of psi, intake_max, search_vol, std_metab, activityand selectivity must all be the same"
errors <- c(errors, msg)
}
# Check dimnames of gear
if(!isTRUE(all.equal(
dimnames(object@catchability)[[1]],
dimnames(object@selectivity)[[1]]))){
msg <- "The gear names of selectivity and catchability must all be the same"
errors <- c(errors, msg)
}
# Check the vector slots
if(length(object@rr_pp) != length(object@w_full)){
msg <- "rr_pp must be the same length as w_full"
errors <- c(errors, msg)
}
if(length(object@cc_pp) != length(object@w_full)){
msg <- "cc_pp must be the same length as w_full"
errors <- c(errors, msg)
}
# SRR
# Must have two arguments: rdi amd species_params
if(!isTRUE(all.equal(names(formals(object@srr)), c("rdi", "species_params")))){
msg <- "Arguments of srr function must be 'rdi' and 'species_params'"
errors <- c(errors, msg)
}
# species_params data.frame must have columns:
# species, z0, alpha, eRepro
species_params_cols <- c("species","z0","alpha","erepro")
if (!all(species_params_cols %in% names(object@species_params))){
msg <- "species_params data.frame must have 'species', 'z0', 'alpha' and 'erepro' columms"
errors <- c(errors,msg)
}
# must also have SRR params but sorted out yet
# species_params
# Column check done in constructor
# If everything is OK
if (length(errors) == 0) TRUE else errors
}
#### Class definition ####
#' A class to hold the parameters for a size based model.
#'
#' These parameters include the model species, their life history parameters and
#' the size ranges of the model.
#'
#' \linkS4class{MizerParams} objects can be created using a range of
#' \code{MizerParams} constructor methods.
#'
#' Dynamic simulations are performed using the \code{\link{project}} method on
#' objects of this class.
#'
#' @slot w A numeric vector of size bins used for the community (i.e. fish) part
#' of the model. These are usually spaced on a log10 scale
#' @slot dw The absolute difference between the size bins specified in the w
#' slot. A vector the same length as the w slot. The final value is the same
#' as the second to last value
#' @slot w_full A numeric vector of size bins used for the whole model (i.e. the
#' community and background spectra) . These are usually spaced on a log10
#' scale
#' @slot dw_full The absolute difference between the size bins specified in the
#' w_full slot. A vector the same length as the w_full slot. The final value
#' is the same as the second to last value
#' @slot psi An array (species x size) that holds the allocation to reproduction
#' for each species at size, \eqn{\psi_i(w)}
#' @slot intake_max An array (species x size) that holds the maximum intake for
#' each species at size, \eqn{h_i w^n}
#' @slot search_vol An array (species x size) that holds the search volume for
#' each species at size, \eqn{\gamma_i w^q}
#' @slot activity An array (species x size) that holds the activity for each
#' species at size, \eqn{k_i w}
#' @slot std_metab An array (species x size) that holds the standard metabolism
#' for each species at size, \eqn{k_{s.i} w^p}
#' @slot mu_b An array (species x size) that holds the background death
#' \eqn{\mu_{b.i}(w)}
#' @slot ft_pred_kernel_e An array (species x log of predator/prey size ratio) that holds
#' the Fourier transform of the feeding kernel in a form appropriate for
#' evaluating the available energy integral
#' @slot ft_pred_kernel_p An array (species x log of predator/prey size ratio) that holds
#' the Fourier transform of the feeding kernel in a form appropriate for
#' evaluating the predation mortality integral
#' @slot rr_pp A vector the same length as the w_full slot. The size specific
#' growth rate of the background spectrum, \eqn{r_0 w^{p-1}}
#' @slot cc_pp A vector the same length as the w_full slot. The size specific
#' carrying capacity of the background spectrum, \eqn{\kappa w^{-\lambda}}
#' @slot sc The community abundance of the scaling community
#' @slot species_params A data.frame to hold the species specific parameters
#' (see the mizer vignette, Table 2, for details)
#' @slot interaction The species specific interaction matrix, \eqn{\theta_{ij}}
#' @slot srr Function to calculate the realised (density dependent) recruitment.
#' Has two arguments which are rdi and species_params
#' @slot selectivity An array (gear x species x w) that holds the selectivity of
#' each gear for species and size, \eqn{S_{g,i,w}}
#' @slot catchability An array (gear x species) that holds the catchability of
#' each species by each gear, \eqn{Q_{g,i}}
#' @slot initial_n An array (species x size) that holds abundance of each species
#' at each weight at our candidate steady state solution.
#' @slot initial_n_pp A vector the same length as the w_full slot that describes
#' the abundance of the background background resource at each weight.
#' @slot n Exponent of maximum intake rate.
#' @slot p Exponent of metabolic cost.
#' @slot lambda Exponent of resource spectrum.
#' @slot q Exponent for volumetric search rate.
#' @slot f0 Initial feeding level.
#' @slot kappa Magnitude of resource spectrum.
#' @slot A Abundance multipliers.
#' @slot linecolour A named vector of colour values, named by species. Used
#' to give consistent colours to species in plots.
#' @slot linetype A named vector of linetypes, named by species. Used
#' to give consistent colours to species in plots.
#' @note The \linkS4class{MizerParams} class is fairly complex with a large number of
#' slots, many of which are multidimensional arrays. The dimensions of these
#' arrays is strictly enforced so that \code{MizerParams} objects are
#' consistent in terms of number of species and number of size classes.
#'
#' Although it is possible to build a \code{MizerParams} object by hand it is
#' not recommended and several constructors are available.
#'
#' The \code{MizerParams} class does not hold any dynamic information, e.g.
#' abundances or harvest effort through time. These are held in
#' \linkS4class{MizerSim} objects.
#' @seealso \code{\link{project}} \code{\link{MizerSim}}
#' @export
setClass(
"MizerParams",
representation(
w = "numeric",
dw = "numeric",
w_full = "numeric",
dw_full = "numeric",
psi = "array",
initial_n = "array",
intake_max = "array",
search_vol = "array",
activity = "array",
std_metab = "array",
ft_pred_kernel_e = "array",
ft_pred_kernel_p = "array",
mu_b = "array",
rr_pp = "numeric",
cc_pp = "numeric", # was NinPP, carrying capacity of background
sc = "numeric",
initial_n_pp = "numeric",
species_params = "data.frame",
interaction = "array",
srr = "function",
selectivity = "array",
catchability = "array",
n = "numeric",
p = "numeric",
lambda = "numeric",
q = "numeric",
f0 = "numeric",
kappa = "numeric",
A = "numeric",
linecolour = "character",
linetype = "character"
),
prototype = prototype(
w = NA_real_,
dw = NA_real_,
w_full = NA_real_,
dw_full = NA_real_,
n = NA_real_,
p = NA_real_,
lambda = NA_real_,
q = NA_real_,
f0 = NA_real_,
kappa = NA_real_,
psi = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
initial_n = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
intake_max = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
search_vol = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
activity = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
std_metab = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
ft_pred_kernel_e = array(NA,dim = c(1,1), dimnames = list(sp = NULL,k = NULL)),
ft_pred_kernel_p = array(NA,dim = c(1,1), dimnames = list(sp = NULL,k = NULL)),
mu_b = array(NA,dim = c(1,1), dimnames = list(sp = NULL,w = NULL)),
rr_pp = NA_real_,
cc_pp = NA_real_,
sc = NA_real_,
initial_n_pp = NA_real_,
A = NA_real_,
linecolour = NA_character_,
linetype = NA_character_,
#speciesParams = data.frame(),
interaction = array(
NA,dim = c(1,1), dimnames = list(predator = NULL, prey = NULL)
), # which dimension is prey and which is prey?
selectivity = array(
NA, dim = c(1,1,1), dimnames = list(gear = NULL, sp = NULL, w = NULL)
),
catchability = array(
NA, dim = c(1,1), dimnames = list(gear = NULL, sp = NULL)
)
),
validity = valid_MizerParams
)
#### Constructors ####
#' Constructors for objects of \code{MizerParams} class
#'
#' Constructor method for the \linkS4class{MizerParams} class. Provides the
#' simplest way of making a \code{MizerParams} object to be used in a
#' simulation.
#'
#' @param object A data frame of species specific parameter values (see notes
#' below).
#' @param interaction Optional argument to specify the interaction matrix of the
#' species (predator by prey). If missing a default interaction is used where
#' all interactions between species are set to 1. Note that any dimnames of
#' the interaction matrix argument are ignored by the constructor. The
#' dimnames of the interaction matrix in the returned \code{MizerParams}
#' object are taken from the species names in the \code{species_params} slot.
#' This means that the order of the columns and rows of the interaction matrix
#' argument should be the same as the species name in the
#' \code{species_params} slot.
#' @param min_w The smallest size of the community spectrum.
#' @param max_w The largest size of the community spectrum.
#' Default value is the largest w_inf in the community x 1.1.
#' @param no_w The number of size bins in the community spectrum.
#' @param min_w_pp The smallest size of the background spectrum.
#' @param no_w_pp Obsolete argument that is no longer used because the number
#' of plankton size bins is determined because all size bins have to
#' be logarithmically equally spaced.
#' @param n Scaling of the intake. Default value is 2/3.
#' @param p Scaling of the standard metabolism. Default value is 0.7.
#' @param q Exponent of the search volume. Default value is 0.8.
#' @param r_pp Growth rate of the primary productivity. Default value is 10.
#' @param kappa Carrying capacity of the resource spectrum. Default
#' value is 1e11.
#' @param lambda Exponent of the resource spectrum. Default value is
#' (2+q-n).
#' @param w_pp_cutoff The cut off size of the background spectrum.
#' Default value is 10.
#' @param f0 Average feeding level. Used to calculated \code{h} and
#' \code{gamma} if those are not columns in the species data frame. Also
#' requires \code{k_vb} (the von Bertalanffy K parameter) to be a column
#' in the species data frame. If \code{h} and \code{gamma} are supplied
#' then this argument is ignored. Default is 0.6..
#' @param z0pre If \code{z0}, the mortality from other sources, is not
#' a column in the species data frame, it is calculated as
#' z0pre * w_inf ^ z0exp. Default value is 0.6.
#' @param z0exp If \code{z0}, the mortality from other sources, is not
#' a column in the species data frame, it is calculated as
#' z0pre * w_inf ^ z0exp. Default value is n-1.
#' @param species_names Names of the species. Generally not needed as normally
#' taken from the \code{object} data.frame.
#' @param gear_names Names of the gears that catch each species. Generally not
#' needed as normally taken from the \code{object} data.frame. Default is
#' \code{species_names}.
#' @param ... Additional arguments.
#'
#' @return An object of type \code{MizerParams}
#' @note The only essential argument to the \code{MizerParams} constructor is a
#' data frame which contains the species data. The data frame is arranged
#' species by parameter, so each column of the parameter data frame is a
#' parameter and each row has the parameters for one of the species in the
#' model.
#'
#' There are some essential columns that must be included in the parameter
#' data.frame and that do not have default values. Other columns do have
#' default values, so that if they are not included in the species parameter
#' data frame, they will be automatically added when the \code{MizerParams}
#' object is created. See the accompanying vignette for details of these
#' columns.
#'
#' An additional constructor method which takes an integer of the number of
#' species in the model. This is only used in internally to set up a
#' \code{MizerParams} object with the correct dimensions. It is not recommended
#' that this method is used by users.
#' @seealso \code{\link{project}} \linkS4class{MizerSim}
#' @export
#' @examples
#' data(NS_species_params_gears)
#' data(inter)
#' params <- MizerParams(NS_species_params_gears, inter)
setGeneric('MizerParams', function(object, interaction, ...)
standardGeneric('MizerParams'))
#### Basic constructor ####
#' Basic constructor with only the number of species as dispatching argument
#'
#' Only really used to make MizerParams of the right size and shouldn't be used
#' by user
#' @rdname MizerParams
setMethod('MizerParams', signature(object='numeric', interaction='missing'),
function(object, min_w = 0.001, max_w = 1000, no_w = 100, min_w_pp = 1e-10, no_w_pp = NA, species_names=1:object, gear_names=species_names){
#args <- list(...)
if (!is.na(no_w_pp))
warning("New mizer code does not support the parameter no_w_pp")
# Some checks
if (length(species_names) != object)
stop("species_names must be the same length as the value of object argument")
no_sp <- length(species_names)
# Set up grids:
# Community grid
w <- 10^(seq(from=log10(min_w),to=log10(max_w),length.out=no_w))
dw <- diff(w)
# Correctly defined dw by using the proper ratio (successive dw's have a fixed ratio).
dw[no_w] <- dw[no_w-1]*(dw[no_w-1]/dw[no_w-2])
# Set up full grid - background + community
# ERROR if dw > w, nw must be at least... depends on minw, maxw and nw
if(w[1] <= dw[1])
stop("Your size bins are too close together. You should consider increasing the number of bins, or changing the size range")
# For fft methods we need a constant log step size throughout.
# Therefore we use as many steps as are necessary to almost reach min_w_pp.
x_pp <- rev(seq(from=log10(min_w), log10(min_w_pp), by=log10(min_w/max_w)/(no_w-1))[-1])
w_full <- c(10^x_pp, w)
no_w_full <- length(w_full)
dw_full <- diff(w_full)
dw_full[no_w_full] <- dw_full[no_w_full-1]*(dw_full[no_w_full-1]/dw_full[no_w_full-2])
# Basic arrays for templates
mat1 <- array(NA, dim=c(no_sp, no_w),
dimnames = list(sp=species_names, w=signif(w,3)))
mat2 <- array(NA, dim=c(no_sp, no_w, no_w_full),
dimnames = list(sp=species_names, w_pred=signif(w,3),
w_prey=signif(w_full,3)))
ft_pred_kernel_e <- array(NA, dim=c(no_sp, no_w_full),
dimnames = list(sp=species_names, k=1:no_w_full))
# We do not know the second dimension of ft_pred_kernel_p until the species
# parameters determining the predation kernel are know.
# So for now we set it to 2
ft_pred_kernel_p <- array(NA, dim=c(no_sp, 2),
dimnames = list(sp=species_names, k=1:2))
selectivity <- array(0, dim=c(length(gear_names), no_sp, no_w),
dimnames=list(gear=gear_names, sp=species_names,
w=signif(w,3)))
catchability <- array(0, dim=c(length(gear_names), no_sp),
dimnames = list(gear=gear_names, sp=species_names))
interaction <- array(1, dim=c(no_sp, no_sp),
dimnames = list(predator = species_names,
prey = species_names))
vec1 <- as.numeric(rep(NA, no_w_full))
names(vec1) <- signif(w_full,3)
# Make an empty data.frame for species_params
# This is just to pass validity check.
# The project method uses the columns species z0 alpha erepro
# so these must be in there
# There is also a seperate function to check the dataframe that is
# passed in by users (not used in validity check)
species_params <- data.frame(species = species_names,
z0 = NA, alpha = NA, erepro = NA)
# Make an empty srr function, just to pass validity check
srr <- function(rdi, species_params) return(0)
# Make colour and linetype scales for use in plots
# Colour-blind-friendly palettes
# From http://dr-k-lo.blogspot.co.uk/2013/07/a-color-blind-friendly-palette-for-r.html
# cbbPalette <- c("#000000", "#009E73", "#e79f00", "#9ad0f3", "#0072B2", "#D55E00",
# "#CC79A7", "#F0E442")
# From http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/#a-colorblind-friendly-palette
cbbPalette <- c("#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
linecolour <- rep(cbbPalette, length.out = no_sp)
names(linecolour) <- as.character(species_names)
linecolour <- c(linecolour, "Total" = "black", "Plankton" = "green",
"Background" = "grey")
linetype <-rep(c("solid", "dashed", "dotted", "dotdash", "longdash",
"twodash"), length.out = no_sp)
names(linetype) <- as.character(species_names)
linetype <- c(linetype, "Total" = "solid", "Plankton" = "solid",
"Background" = "solid")
# Make the new object
# Should Z0, rrPP and ccPP have names (species names etc)?
res <- new("MizerParams",
w = w, dw = dw, w_full = w_full, dw_full = dw_full,
psi = mat1, initial_n = mat1, intake_max = mat1, search_vol = mat1,
activity = mat1,
std_metab = mat1, mu_b = mat1, ft_pred_kernel_e = ft_pred_kernel_e,
ft_pred_kernel_p = ft_pred_kernel_p,
selectivity=selectivity, catchability=catchability,
rr_pp = vec1, cc_pp = vec1, sc = w, initial_n_pp = vec1,
species_params = species_params,
interaction = interaction, srr = srr,
A=as.numeric(rep(NA, dim(interaction)[1])),
linecolour = linecolour, linetype = linetype)
return(res)
}
)
#### Main constructor ####
#' Constructor that takes the species_params data.frame and the interaction matrix
#' @rdname MizerParams
setMethod('MizerParams', signature(object='data.frame', interaction='matrix'),
function(object, interaction, n = 2/3, p = 0.7, q = 0.8, r_pp = 10,
kappa = 1e11, lambda = (2+q-n), w_pp_cutoff = 10,
max_w = max(object$w_inf)*1.1, f0 = 0.6,
z0pre = 0.6, z0exp = n-1, ...){
row.names(object) <- object$species
# Set default values for column values if missing
# If no gear_name column in object, then named after species
if(!("gear" %in% colnames(object)))
object$gear <- object$species
no_gear <- length(unique(object$gear))
# If no k column (activity coefficient) in object, then set to 0
if(!("k" %in% colnames(object)))
object$k <- 0
# If no alpha column in object, then set to 0.6
# Should this be a column? Or just an argument?
if(!("alpha" %in% colnames(object)))
object$alpha <- 0.6
# If no erepro column in object, then set to 1
if(!("erepro" %in% colnames(object)))
object$erepro <- 1
# If no sel_func column in species_params, set to 'knife_edge'
if(!("sel_func" %in% colnames(object))){
message("\tNote: No sel_func column in species data frame. Setting selectivity to be 'knife_edge' for all species.")
object$sel_func <- 'knife_edge'
# Set default selectivity size
if(!("knife_edge_size" %in% colnames(object))){
message("Note: \tNo knife_edge_size column in species data frame. Setting knife edge selectivity equal to w_mat.")
object$knife_edge_size <- object$w_mat
}
}
# If no catchability column in species_params, set to 1
if(!("catchability" %in% colnames(object)))
object$catchability <- 1
# Sort out h column If not passed in directly, is calculated from f0 and
# k_vb if they are also passed in
if(!("h" %in% colnames(object))){
message("Note: \tNo h column in species data frame so using f0 and k_vb to calculate it.")
if(!("k_vb" %in% colnames(object))){
stop("\t\tExcept I can't because there is no k_vb column in the species data frame")
}
object$h <- ((3 * object$k_vb) / (object$alpha * f0)) * (object$w_inf ^ (1/3))
}
# Sorting out gamma column
if(!("gamma" %in% colnames(object))){
message("Note: \tNo gamma column in species data frame so using f0, h, beta, sigma, lambda and kappa to calculate it.")
ae <- sqrt(2*pi) * object$sigma * object$beta^(lambda-2) * exp((lambda-2)^2 * object$sigma^2 / 2)
object$gamma <- (object$h / (kappa * ae)) * (f0 / (1 - f0))
}
# Sort out z0 column
if(!("z0" %in% colnames(object))){
message("Note: \tNo z0 column in species data frame so using z0 = z0pre * w_inf ^ z0exp.")
object$z0 = z0pre*object$w_inf^z0exp # background natural mortality
}
# Sort out ks column
if(!("ks" %in% colnames(object))){
message("Note: \tNo ks column in species data frame so using ks = h * 0.2.")
object$ks <- object$h * 0.2
}
# Check essential columns: species (name), wInf, wMat, h, gamma, ks, beta, sigma
check_species_params_dataframe(object)
no_sp <- nrow(object)
# Make an empty object of the right dimensions
res <- MizerParams(no_sp, species_names=object$species,
gear_names=unique(object$gear), max_w=max_w,...)
res@n <- n
res@p <- p
res@lambda <- lambda
res@q <- q
res@f0 <- f0
res@kappa <- kappa
# If not w_min column in species_params, set to w_min of community
if (!("w_min" %in% colnames(object)))
object$w_min <- min(res@w)
# Check min_w argument is not > w_min in species_params
if(any(object$w_min < min(res@w)))
stop("One or more of your w_min values is less than the smallest size of the community spectrum")
# Add w_min_idx column which has the reference index of the size class closest
# to w_min - this is a short cut for later on and prevents repetition.
if (!("w_min_idx" %in% names(object))) {
object$w_min_idx <- as.vector(
tapply(object$w_min,1:length(object$w_min),
function(w_min,wx) max(which(wx<=w_min)),wx=res@w))
}
# Start filling the slots
res@species_params <- object
# Check dims of interaction argument - make sure it's right
if (!isTRUE(all.equal(dim(res@interaction), dim(interaction))))
stop("interaction matrix is not of the right dimensions. Must be number of species x number of species")
# Check that all values of interaction matrix are 0 - 1. Issue warning if not
if(!all((interaction>=0) & (interaction<=1)))
warning("Values in the interaction matrix should be between 0 and 1")
# In case user has supplied names to interaction matrix which are wrong order
for (dim_check in 1:length(dimnames(res@interaction))){
if (!is.null(dimnames(interaction)[[dim_check]]) & (!(isTRUE(all.equal(dimnames(res@interaction)[[dim_check]],dimnames(interaction)[[dim_check]])))))
warning("Dimnames of interaction matrix do not match the order of species names in the species data.frame. I am now ignoring your dimnames so your interaction matrix may be in the wrong order.")}
res@interaction[] <- interaction
# Now fill up the slots using default formulations:
# psi - allocation to reproduction - from original Setup() function
res@psi[] <- unlist(tapply(res@w,1:length(res@w),function(wx,w_inf,w_mat,n){
((1 + (wx/(w_mat))^-10)^-1) * (wx/w_inf)^(1-n)},w_inf=object$w_inf,w_mat=object$w_mat,n=n))
# Set w < 10% of w_mat to 0
res@psi[unlist(tapply(res@w,1:length(res@w),function(wx,w_mat)wx<(w_mat*0.1) ,w_mat=object$w_mat))] <- 0
# Set all w > w_inf to 1 # Check this is right...
res@psi[unlist(tapply(res@w,1:length(res@w),function(wx,w_inf)(wx/w_inf)>1,w_inf=object$w_inf))] <- 1
# note sure what a and n0_mult are in get_initial_n
res@intake_max[] <- unlist(tapply(res@w,1:length(res@w),function(wx,h,n)h * wx^n,h=object$h,n=n))
res@search_vol[] <- unlist(tapply(res@w,1:length(res@w),function(wx,gamma,q)gamma * wx^q, gamma=object$gamma, q=q))
res@activity[] <- unlist(tapply(res@w,1:length(res@w),function(wx,k)k * wx,k=object$k))
res@std_metab[] <- unlist(tapply(res@w,1:length(res@w),function(wx,ks,p)ks * wx^p, ks=object$ks,p=p))
res@mu_b[] <- res@species_params$z0
Beta <- log(res@species_params$beta)
sigma <- res@species_params$sigma
Dx <- res@w[2]/res@w[1] - 1 # dw = w Dx
# w_full has the weights from the smallest relevant plankton, to the largest fish
xFull <- log(res@w_full)
xFull <- xFull - xFull[1]
# ft_pred_kernel_e is an array (species x log of predator/prey size ratio)
# that holds the Fourier transform of the feeding kernel in a form
# appropriate for evaluating the available energy integral
res@ft_pred_kernel_e <- matrix(0, nrow = dim(res@interaction)[1], ncol=length(xFull))
noSpecies <- dim(res@interaction)[1]
for(i in 1:noSpecies){
# We compute the feeding kernel terms and their fft.
res@ft_pred_kernel_e[i, ] <- Dx*fft(exp(-(xFull - Beta[i])^2/(2*sigma[i]^2)))
}
# rr is the log of the maximal predator/prey mass ratio
# Here we use default rr= beta + 3*sigma
rr <- Beta + 3*sigma
# Perturb rr so it falls on grid points
dx <- xFull[2]-xFull[1]
rr <- dx*ceiling(rr/dx)
# Determine period used
P <- max(xFull[length(xFull)] + rr)
# Determine number of x points used in period
no_P <- 1+ceiling(P/dx) # P/dx should already be integer
# vector of values for log predator/prey mass ratio
x_P <- (-1:(no_P-2))*dx
# The dimension of ft_pred_kernel_p was not know at the time the res object
# was initialised. Hence we need to create it with the right dimension here.
res@ft_pred_kernel_p <- matrix(0, nrow = noSpecies, ncol = no_P)
dimnames(res@ft_pred_kernel_p) <- list(sp=rownames(res@std_metab),k=(1:no_P))
for (j in 1:noSpecies){
phi <- rep(0, no_P)
# Our phi is a periodic extension of the normal feeding kernel.
# For 0<=x<=P we use phi[x-P] as our
# value of the period P extension of phi, since support(phi)=[-rr,0]
phi[x_P-P >= -rr[j]] <- exp(-(Beta[j]-P+x_P[x_P-P >= -rr[j]])^2/(2*sigma[j]^2))
# We also save the fft of this vector, so we don't have to use too many fft s in the time evolution
res@ft_pred_kernel_p[j, ] <- Dx*fft(phi)
}
# Background spectrum
res@rr_pp[] <- r_pp * res@w_full^(n-1) #weight specific plankton growth rate ##
res@cc_pp[] <- kappa*res@w_full^(-lambda) # the resource carrying capacity - one for each mp and m (130 of them)
res@cc_pp[res@w_full>w_pp_cutoff] <- 0 #set density of sizes < plankton cutoff size
# Set the SRR to be a Beverton Holt esque relationship
# Can add more functional forms or user specifies own
res@initial_n_pp <- res@cc_pp
res@srr <- function(rdi, species_params){
return(rdi / (1 + rdi/species_params$r_max))
}
# Set fishing parameters: selectivity and catchability
# At the moment, each species is only caught by 1 gear so in species_params
# there are the columns: gear_name and sel_func.
# BEWARE! This routine assumes that each species has only one gear operating on it
# So we can just go row by row through the species parameters
# However, I really hope we can do something better soon
for (g in 1:nrow(object)){
# Do selectivity first
# get args
# These as.characters are annoying - but factors everywhere
arg <- names(formals(as.character(object[g,'sel_func'])))
# lop off w as that is always the first argument of the selectivity functions
arg <- arg[!(arg %in% "w")]
if(!all(arg %in% colnames(object)))
stop("All of the arguments needed for the selectivity function are not in the parameter dataframe")
# Check that there is only one column in object with the same name
# Check that column of arguments exists
par <- c(w=list(res@w),as.list(object[g,arg]))
sel <- do.call(as.character(object[g,'sel_func']), args=par)
# Dump Sel in the right place
res@selectivity[as.character(object[g,'gear']), g, ] <- sel
# Now do catchability
res@catchability[as.character(object[g,'gear']), g] <- object[g,"catchability"]
}
# Store colours and linetypes in slots if contained in species parameters
if ("linetype" %in% names(object)) {
linetype <- object$linetype[!is.na(object$linetype)]
res@linetype[object$species[!is.na(object$linetype)]] <- linetype
}
if ("linecolour" %in% names(object)) {
linecolour <- object$linecolour[!is.na(object$linecolour)]
res@linecolour[object$species[!is.na(object$linecolour)]] <- linecolour
}
# Remove catchabiliy from species data.frame, now stored in slot
#params@species_params[,names(params@species_params) != "catchability"]
res@species_params <- res@species_params[,-which(names(res@species_params)=="catchability")]
res@initial_n <- res@psi
res@initial_n <- get_initial_n(res)
res@A <- rep(1,no_sp)
return(res)
}
)
#### theta = 1 constructor ####
# If interaction is missing, make one of the right size and fill with 1s
#' Constructor based on the species_params data.frame only with no interaction
#' @rdname MizerParams
setMethod('MizerParams', signature(object='data.frame', interaction='missing'),
function(object, ...){
interaction <- matrix(1,nrow=nrow(object), ncol=nrow(object))
res <- MizerParams(object,interaction, ...)
return(res)
})
# Check that the species_params dataset is OK
# internal only
check_species_params_dataframe <- function(species_params){
# Check species_params dataframe (with a function) for essential cols
# Essential columns: species (name) # wInf # wMat # h # gamma - search Volume # ks # beta # z0
essential_cols <- c("species","w_inf","w_mat","h","gamma","ks","beta","sigma", "z0")
missing_cols <- !(essential_cols %in% colnames(species_params))
if(any(missing_cols))
{
errors <- character()
for (i in essential_cols[missing_cols])
errors <- paste(errors, i, sep=" ")
stop("You are missing these columns from the input dataframe:\n", errors)
}
return(TRUE)
}
|
/combinados-NOUSAR.R | permissive | deerme/tvs-comparativo | R | false | false | 9,857 | r | ||
#' Pipe operator
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom dplyr %>%
#' @examples
#' x <- 5 %>% sum(10)
#'
#' @usage lhs \%>\% rhs
#' @return return value of rhs function.
NULL
tidy_results <- function(wide_res, features, groups) {
res <- Reduce(cbind, lapply(wide_res, as.numeric)) %>% data.frame()
colnames(res) <- names(wide_res)
res$feature <- rep(features, times = length(groups))
res$group <- rep(groups, each = length(features))
res %>% dplyr::select(
.data$feature,
.data$group,
.data$avgExpr,
.data$logFC,
.data$statistic,
.data$z,
.data$auc,
.data$pval,
.data$padj,
.data$pct_in,
.data$pct_out
)
# res <- Reduce(cbind,
# lapply(names(wide_res), function(label) {
# res <- wide_res[[label]]
# colnames(res) <- paste(label, groups, sep = '.')
# res
# })) %>% data.frame()
# res$feature <- features
# res %>%
# reshape2::melt(id.vars = c('feature')) %>%
# # tidyr::gather(key, val, -feature) %>%
# tidyr::separate(.data$variable, c('metric', 'group'), '[[.]]') %>%
# tidyr::spread(.data$metric, .data$value) %>%
# dplyr::select(
# .data$feature,
# .data$group,
# .data$avgExpr,
# .data$logFC,
# .data$statistic,
# .data$auc,
# .data$pval,
# .data$padj,
# .data$pct_in,
# .data$pct_out
# )
}
compute_ustat <- function(Xr, cols, n1n2, group.size) {
grs <- sumGroups(Xr, cols)
if (is(Xr, 'dgCMatrix')) {
gnz <- (group.size - nnzeroGroups(Xr, cols))
zero.ranks <- (nrow(Xr) - diff(Xr@p) + 1) / 2
ustat <- t((t(gnz) * zero.ranks)) + grs - group.size *
(group.size + 1 ) / 2
} else {
ustat <- grs - group.size * (group.size + 1 ) / 2
}
return(ustat)
}
compute_pval <- function(ustat, ties, N, n1n2) {
z <- ustat - .5 * n1n2
z <- z - sign(z) * .5
.x1 <- N ^ 3 - N
.x2 <- 1 / (12 * (N^2 - N))
rhs <- lapply(ties, function(tvals) {
(.x1 - sum(tvals ^ 3 - tvals)) * .x2
}) %>% unlist
usigma <- sqrt(matrix(n1n2, ncol = 1) %*% matrix(rhs, nrow = 1))
z <- t(z / usigma)
pvals <- matrix(2 * pnorm(-abs(as.numeric(z))), ncol = ncol(z))
return(pvals)
}
compute_z <- function(ustat, ties, N, n1n2) {
z <- ustat - .5 * n1n2
z <- z - sign(z) * .5
.x1 <- N ^ 3 - N
.x2 <- 1 / (12 * (N^2 - N))
rhs <- lapply(ties, function(tvals) {
(.x1 - sum(tvals ^ 3 - tvals)) * .x2
}) %>% unlist
usigma <- sqrt(matrix(n1n2, ncol = 1) %*% matrix(rhs, nrow = 1))
z <- t(z / usigma)
return(z)
}
#' rank_matrix
#'
#' Utility function to rank columns of matrix
#'
#' @param X feature by observation matrix.
#'
#' @examples
#'
#' data(exprs)
#' rank_res <- rank_matrix(exprs)
#'
#' @return List with 2 items
#' \itemize{
#' \item X_ranked - matrix of entry ranks
#' \item ties - list of tied group sizes
#' }
#' @export
rank_matrix <- function(X) {
UseMethod('rank_matrix')
}
#' @rdname rank_matrix
#' @export
rank_matrix.dgCMatrix <- function(X) {
Xr <- Matrix(X, sparse = TRUE)
ties <- cpp_rank_matrix_dgc(Xr@x, Xr@p, nrow(Xr), ncol(Xr))
return(list(X_ranked = Xr, ties = ties))
}
#' @rdname rank_matrix
#' @export
rank_matrix.matrix <- function(X) {
cpp_rank_matrix_dense(X)
}
#' sumGroups
#'
#' Utility function to sum over group labels
#'
#' @param X matrix
#' @param y group labels
#' @param MARGIN whether observations are rows (=2) or columns (=1)
#'
#' @examples
#'
#' data(exprs)
#' data(y)
#' sumGroups_res <- sumGroups(exprs, y, 1)
#' sumGroups_res <- sumGroups(t(exprs), y, 2)
#'
#' @return Matrix of groups by features
#' @export
sumGroups <- function(X, y, MARGIN=2) {
if (MARGIN == 2 & nrow(X) != length(y)) {
stop('wrong dims')
} else if (MARGIN == 1 & ncol(X) != length(y)) {
stop('wrong dims')
}
UseMethod('sumGroups')
}
#' @rdname sumGroups
#' @export
sumGroups.dgCMatrix <- function(X, y, MARGIN=2) {
if (MARGIN == 1) {
cpp_sumGroups_dgc_T(X@x, X@p, X@i, ncol(X), nrow(X), as.integer(y) - 1,
length(unique(y)))
} else {
cpp_sumGroups_dgc(X@x, X@p, X@i, ncol(X), as.integer(y) - 1,
length(unique(y)))
}
}
#' @rdname sumGroups
#' @export
sumGroups.matrix <- function(X, y, MARGIN=2) {
if (MARGIN == 1) {
cpp_sumGroups_dense_T(X, as.integer(y) - 1, length(unique(y)))
} else {
cpp_sumGroups_dense(X, as.integer(y) - 1, length(unique(y)))
}
}
#' nnzeroGroups
#'
#' Utility function to compute number of zeros-per-feature within group
#'
#' @param X matrix
#' @param y group labels
#' @param MARGIN whether observations are rows (=2) or columns (=1)
#'
#' @examples
#'
#' data(exprs)
#' data(y)
#' nnz_res <- nnzeroGroups(exprs, y, 1)
#' nnz_res <- nnzeroGroups(t(exprs), y, 2)
#'
#' @return Matrix of groups by features
#' @export
nnzeroGroups <- function(X, y, MARGIN=2) {
if (MARGIN == 2 & nrow(X) != length(y)) {
stop('wrong dims')
} else if (MARGIN == 1 & ncol(X) != length(y)) {
stop('wrong dims')
}
UseMethod('nnzeroGroups')
}
#' @rdname nnzeroGroups
#' @export
nnzeroGroups.dgCMatrix <- function(X, y, MARGIN=2) {
if (MARGIN == 1) {
cpp_nnzeroGroups_dgc_T(X@p, X@i, ncol(X), nrow(X), as.integer(y) - 1,
length(unique(y)))
} else {
cpp_nnzeroGroups_dgc(X@p, X@i, ncol(X), as.integer(y) - 1,
length(unique(y)))
}
}
#' @rdname nnzeroGroups
#' @export
nnzeroGroups.matrix <- function(X, y, MARGIN=2) {
if (MARGIN == 1) {
cpp_nnzeroGroups_dense_T(X, as.integer(y) - 1, length(unique(y)))
} else {
cpp_nnzeroGroups_dense(X, as.integer(y) - 1, length(unique(y)))
}
}
| /R/utils.R | no_license | jordansquair/presto | R | false | false | 6,017 | r | #' Pipe operator
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom dplyr %>%
#' @examples
#' x <- 5 %>% sum(10)
#'
#' @usage lhs \%>\% rhs
#' @return return value of rhs function.
NULL
tidy_results <- function(wide_res, features, groups) {
res <- Reduce(cbind, lapply(wide_res, as.numeric)) %>% data.frame()
colnames(res) <- names(wide_res)
res$feature <- rep(features, times = length(groups))
res$group <- rep(groups, each = length(features))
res %>% dplyr::select(
.data$feature,
.data$group,
.data$avgExpr,
.data$logFC,
.data$statistic,
.data$z,
.data$auc,
.data$pval,
.data$padj,
.data$pct_in,
.data$pct_out
)
# res <- Reduce(cbind,
# lapply(names(wide_res), function(label) {
# res <- wide_res[[label]]
# colnames(res) <- paste(label, groups, sep = '.')
# res
# })) %>% data.frame()
# res$feature <- features
# res %>%
# reshape2::melt(id.vars = c('feature')) %>%
# # tidyr::gather(key, val, -feature) %>%
# tidyr::separate(.data$variable, c('metric', 'group'), '[[.]]') %>%
# tidyr::spread(.data$metric, .data$value) %>%
# dplyr::select(
# .data$feature,
# .data$group,
# .data$avgExpr,
# .data$logFC,
# .data$statistic,
# .data$auc,
# .data$pval,
# .data$padj,
# .data$pct_in,
# .data$pct_out
# )
}
compute_ustat <- function(Xr, cols, n1n2, group.size) {
grs <- sumGroups(Xr, cols)
if (is(Xr, 'dgCMatrix')) {
gnz <- (group.size - nnzeroGroups(Xr, cols))
zero.ranks <- (nrow(Xr) - diff(Xr@p) + 1) / 2
ustat <- t((t(gnz) * zero.ranks)) + grs - group.size *
(group.size + 1 ) / 2
} else {
ustat <- grs - group.size * (group.size + 1 ) / 2
}
return(ustat)
}
compute_pval <- function(ustat, ties, N, n1n2) {
z <- ustat - .5 * n1n2
z <- z - sign(z) * .5
.x1 <- N ^ 3 - N
.x2 <- 1 / (12 * (N^2 - N))
rhs <- lapply(ties, function(tvals) {
(.x1 - sum(tvals ^ 3 - tvals)) * .x2
}) %>% unlist
usigma <- sqrt(matrix(n1n2, ncol = 1) %*% matrix(rhs, nrow = 1))
z <- t(z / usigma)
pvals <- matrix(2 * pnorm(-abs(as.numeric(z))), ncol = ncol(z))
return(pvals)
}
compute_z <- function(ustat, ties, N, n1n2) {
z <- ustat - .5 * n1n2
z <- z - sign(z) * .5
.x1 <- N ^ 3 - N
.x2 <- 1 / (12 * (N^2 - N))
rhs <- lapply(ties, function(tvals) {
(.x1 - sum(tvals ^ 3 - tvals)) * .x2
}) %>% unlist
usigma <- sqrt(matrix(n1n2, ncol = 1) %*% matrix(rhs, nrow = 1))
z <- t(z / usigma)
return(z)
}
#' rank_matrix
#'
#' Utility function to rank columns of matrix
#'
#' @param X feature by observation matrix.
#'
#' @examples
#'
#' data(exprs)
#' rank_res <- rank_matrix(exprs)
#'
#' @return List with 2 items
#' \itemize{
#' \item X_ranked - matrix of entry ranks
#' \item ties - list of tied group sizes
#' }
#' @export
rank_matrix <- function(X) {
UseMethod('rank_matrix')
}
#' @rdname rank_matrix
#' @export
rank_matrix.dgCMatrix <- function(X) {
Xr <- Matrix(X, sparse = TRUE)
ties <- cpp_rank_matrix_dgc(Xr@x, Xr@p, nrow(Xr), ncol(Xr))
return(list(X_ranked = Xr, ties = ties))
}
#' @rdname rank_matrix
#' @export
rank_matrix.matrix <- function(X) {
cpp_rank_matrix_dense(X)
}
#' sumGroups
#'
#' Utility function to sum over group labels
#'
#' @param X matrix
#' @param y group labels
#' @param MARGIN whether observations are rows (=2) or columns (=1)
#'
#' @examples
#'
#' data(exprs)
#' data(y)
#' sumGroups_res <- sumGroups(exprs, y, 1)
#' sumGroups_res <- sumGroups(t(exprs), y, 2)
#'
#' @return Matrix of groups by features
#' @export
sumGroups <- function(X, y, MARGIN=2) {
if (MARGIN == 2 & nrow(X) != length(y)) {
stop('wrong dims')
} else if (MARGIN == 1 & ncol(X) != length(y)) {
stop('wrong dims')
}
UseMethod('sumGroups')
}
#' @rdname sumGroups
#' @export
sumGroups.dgCMatrix <- function(X, y, MARGIN=2) {
if (MARGIN == 1) {
cpp_sumGroups_dgc_T(X@x, X@p, X@i, ncol(X), nrow(X), as.integer(y) - 1,
length(unique(y)))
} else {
cpp_sumGroups_dgc(X@x, X@p, X@i, ncol(X), as.integer(y) - 1,
length(unique(y)))
}
}
#' @rdname sumGroups
#' @export
sumGroups.matrix <- function(X, y, MARGIN=2) {
if (MARGIN == 1) {
cpp_sumGroups_dense_T(X, as.integer(y) - 1, length(unique(y)))
} else {
cpp_sumGroups_dense(X, as.integer(y) - 1, length(unique(y)))
}
}
#' nnzeroGroups
#'
#' Utility function to compute number of zeros-per-feature within group
#'
#' @param X matrix
#' @param y group labels
#' @param MARGIN whether observations are rows (=2) or columns (=1)
#'
#' @examples
#'
#' data(exprs)
#' data(y)
#' nnz_res <- nnzeroGroups(exprs, y, 1)
#' nnz_res <- nnzeroGroups(t(exprs), y, 2)
#'
#' @return Matrix of groups by features
#' @export
nnzeroGroups <- function(X, y, MARGIN=2) {
if (MARGIN == 2 & nrow(X) != length(y)) {
stop('wrong dims')
} else if (MARGIN == 1 & ncol(X) != length(y)) {
stop('wrong dims')
}
UseMethod('nnzeroGroups')
}
#' @rdname nnzeroGroups
#' @export
nnzeroGroups.dgCMatrix <- function(X, y, MARGIN=2) {
if (MARGIN == 1) {
cpp_nnzeroGroups_dgc_T(X@p, X@i, ncol(X), nrow(X), as.integer(y) - 1,
length(unique(y)))
} else {
cpp_nnzeroGroups_dgc(X@p, X@i, ncol(X), as.integer(y) - 1,
length(unique(y)))
}
}
#' @rdname nnzeroGroups
#' @export
nnzeroGroups.matrix <- function(X, y, MARGIN=2) {
if (MARGIN == 1) {
cpp_nnzeroGroups_dense_T(X, as.integer(y) - 1, length(unique(y)))
} else {
cpp_nnzeroGroups_dense(X, as.integer(y) - 1, length(unique(y)))
}
}
|
#source data
Data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
SubData <- Data[Data$Date %in% c("1/2/2007","2/2/2007") ,]
#variables
datetime <- strptime(paste(SubData$Date, SubData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalactivepower <- as.numeric(SubData$Global_active_power)
globalreactivepower <- as.numeric(SubData$Global_reactive_power)
voltage <- as.numeric((SubData$Voltage))
Submeter1 <- as.numeric(SubData$Sub_metering_1)
Submeter2 <- as.numeric(SubData$Sub_metering_2)
Submeter3 <- as.numeric(SubData$Sub_metering_3)
#plotting
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime,globalactivepower, type = "l", xlab = "", ylab = "Global Active Power", cex = 0.2)
plot(datetime, voltage, type = "l", xlab = "", ylab = "Voltage")
plot(datetime,Submeter1, type = "l", xlab = "datetime", ylab = "Energy Sub Metering")
lines(datetime, Submeter2, type = "l", col = "red")
lines(datetime, Submeter3, type = "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
plot(datetime, globalreactivepower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
| /plot4.R | no_license | aswinan/ExData_Plotting1 | R | false | false | 1,284 | r | #source data
Data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
SubData <- Data[Data$Date %in% c("1/2/2007","2/2/2007") ,]
#variables
datetime <- strptime(paste(SubData$Date, SubData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalactivepower <- as.numeric(SubData$Global_active_power)
globalreactivepower <- as.numeric(SubData$Global_reactive_power)
voltage <- as.numeric((SubData$Voltage))
Submeter1 <- as.numeric(SubData$Sub_metering_1)
Submeter2 <- as.numeric(SubData$Sub_metering_2)
Submeter3 <- as.numeric(SubData$Sub_metering_3)
#plotting
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime,globalactivepower, type = "l", xlab = "", ylab = "Global Active Power", cex = 0.2)
plot(datetime, voltage, type = "l", xlab = "", ylab = "Voltage")
plot(datetime,Submeter1, type = "l", xlab = "datetime", ylab = "Energy Sub Metering")
lines(datetime, Submeter2, type = "l", col = "red")
lines(datetime, Submeter3, type = "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
plot(datetime, globalreactivepower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coxph_plots.R
\name{gg_coxsnell}
\alias{gg_coxsnell}
\title{Overall model fit diagnostic via Cox-Snell residuals.}
\usage{
gg_coxsnell(fit, type = c("cumu_hazard", "cdf"))
}
\arguments{
\item{fit}{
the result of fitting a Cox regression model, using the \code{coxph} function.
}
\item{type}{\code{character}. Optional argument. If \code{"cumu_hazard"} plots
cumulative hazard (Nelson-Aalen estimate) vs. Cox-Snell residuals.
If \code{"cdf"} plot empirical cumulative distribution Function (Breslow estimate)
vs. Cox-Snell residuals.}
}
\value{
A \code{\link[ggplot2]{ggplot}} object.
}
\description{
\code{gg_coxsnell} extracts Cox-Snell residuals from an \code{\link[survival]{coxph}}
object (which for a correct model should be a censored sample from Exp(1)) and
the cumulative hazard rate of the residuals vs. the residuals.
}
\examples{
library(survival)
data("tongue", package="KMsurv")
cox.tongue <- coxph(Surv(time, delta)~as.factor(type), data=tongue)
gg_coxsnell(cox.tongue) +
geom_abline(intercept=0, slope=1, col=2)
gg_coxsnell(cox.tongue, type="cdf") +
geom_line(aes(y=F), col=2)
}
| /man/gg_coxsnell.Rd | permissive | LorenzHaller/ldatools | R | false | true | 1,182 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coxph_plots.R
\name{gg_coxsnell}
\alias{gg_coxsnell}
\title{Overall model fit diagnostic via Cox-Snell residuals.}
\usage{
gg_coxsnell(fit, type = c("cumu_hazard", "cdf"))
}
\arguments{
\item{fit}{
the result of fitting a Cox regression model, using the \code{coxph} function.
}
\item{type}{\code{character}. Optional argument. If \code{"cumu_hazard"} plots
cumulative hazard (Nelson-Aalen estimate) vs. Cox-Snell residuals.
If \code{"cdf"} plot empirical cumulative distribution Function (Breslow estimate)
vs. Cox-Snell residuals.}
}
\value{
A \code{\link[ggplot2]{ggplot}} object.
}
\description{
\code{gg_coxsnell} extracts Cox-Snell residuals from an \code{\link[survival]{coxph}}
object (which for a correct model should be a censored sample from Exp(1)) and
the cumulative hazard rate of the residuals vs. the residuals.
}
\examples{
library(survival)
data("tongue", package="KMsurv")
cox.tongue <- coxph(Surv(time, delta)~as.factor(type), data=tongue)
gg_coxsnell(cox.tongue) +
geom_abline(intercept=0, slope=1, col=2)
gg_coxsnell(cox.tongue, type="cdf") +
geom_line(aes(y=F), col=2)
}
|
setwd('C:\\Users\\JASBIR-PC\\Desktop\\1223\\1223\\1223\\Programs')
#1(a)
find_max<- function(v){
max = v[1]
for (i in 2:length(v)) {
if(v[i]>max){
max = v[i]
}
}
return(max)
}
vector = c(read.csv("data.csv")[,5])
MAX = find_max(vector)
MAX
#1(b)
sum_even=function(v){
sum=0
for(i in 1:length(v)){
if(v[i]%%2==0){
sum=sum+v[i]
}
}
return(sum)
}
SUM = sum_even(vector)
SUM
#1(c)
search_vector = function(v){
num = as.numeric(readline(prompt = "Enter the number to search :: "))
if(is.numeric(num)){
for(i in 1:length(v)){
if(v[i] == num){
return(i)
}
}
}else{
return(-1)
}
}
check = search_vector(vector)
if(check==-1){
print("NOT FOUND")
}else{
cat("FOUND AT LOCATION ::",check)
}
#1(d)
factorial_num = function(num){
if(num==0){
return(1)
}else{
return(num*factorial_num(num-1))
}
}
num = as.integer(readline(prompt = "Enter a positive integer number :: "))
if(is.integer(num) && num>=0){
result = factorial_num(num)
cat("Factorial of ",num ," is = ",result)
}else{
cat("INVALID INPUT")
}
#1(d)
Mean <- function(v){
total=0
for(i in 1:length(v)){
total = total + v[i]
}
return(total/length(v))
}
Std_Dev<-function(v){
error =0
Mean = Mean(v)
for(i in 1:length(v)){
error = error+(v[i]-Mean)^2
}
return(sqrt(error/(length(v)-1)))
}
M = Mean(vector)
std_Dev = Std_Dev(vector)
#1(e)
Check_Prime = function(num){
if(num<=1){
return(FALSE)
}
for(i in 2:ceiling(num/2)){
if(num%%i ==0){
return(FALSE)
}
}
return(TRUE)
}
num = as.integer(readline(prompt = "Enter a positive integer number :: "))
check = Check_Prime(num)
if(check){
cat(num," is a Prime Number")
}else{
cat(num," is not a Prime Number")
}
#1(f)
digit_sum <- function(num){
if(num==0){
return(num)
}else{
return(num%%10+digit_sum(floor(num/10)))
}
}
num = as.integer(readline(prompt = "Enter a positive integer number :: "))
if(!is.na(num) && num>=0){
result = digit_sum(num)
cat("Sum of digits of ",num," is = ",result)
}else{
cat(num," is an Invalid Input for the given function")
}
| /Prac1.R | no_license | JasbirCodeSpace/MachineLearning | R | false | false | 2,288 | r | setwd('C:\\Users\\JASBIR-PC\\Desktop\\1223\\1223\\1223\\Programs')
#1(a)
find_max<- function(v){
max = v[1]
for (i in 2:length(v)) {
if(v[i]>max){
max = v[i]
}
}
return(max)
}
vector = c(read.csv("data.csv")[,5])
MAX = find_max(vector)
MAX
#1(b)
sum_even=function(v){
sum=0
for(i in 1:length(v)){
if(v[i]%%2==0){
sum=sum+v[i]
}
}
return(sum)
}
SUM = sum_even(vector)
SUM
#1(c)
search_vector = function(v){
num = as.numeric(readline(prompt = "Enter the number to search :: "))
if(is.numeric(num)){
for(i in 1:length(v)){
if(v[i] == num){
return(i)
}
}
}else{
return(-1)
}
}
check = search_vector(vector)
if(check==-1){
print("NOT FOUND")
}else{
cat("FOUND AT LOCATION ::",check)
}
#1(d)
factorial_num = function(num){
if(num==0){
return(1)
}else{
return(num*factorial_num(num-1))
}
}
num = as.integer(readline(prompt = "Enter a positive integer number :: "))
if(is.integer(num) && num>=0){
result = factorial_num(num)
cat("Factorial of ",num ," is = ",result)
}else{
cat("INVALID INPUT")
}
#1(d)
Mean <- function(v){
total=0
for(i in 1:length(v)){
total = total + v[i]
}
return(total/length(v))
}
Std_Dev<-function(v){
error =0
Mean = Mean(v)
for(i in 1:length(v)){
error = error+(v[i]-Mean)^2
}
return(sqrt(error/(length(v)-1)))
}
M = Mean(vector)
std_Dev = Std_Dev(vector)
#1(e)
Check_Prime = function(num){
if(num<=1){
return(FALSE)
}
for(i in 2:ceiling(num/2)){
if(num%%i ==0){
return(FALSE)
}
}
return(TRUE)
}
num = as.integer(readline(prompt = "Enter a positive integer number :: "))
check = Check_Prime(num)
if(check){
cat(num," is a Prime Number")
}else{
cat(num," is not a Prime Number")
}
#1(f)
digit_sum <- function(num){
if(num==0){
return(num)
}else{
return(num%%10+digit_sum(floor(num/10)))
}
}
num = as.integer(readline(prompt = "Enter a positive integer number :: "))
if(!is.na(num) && num>=0){
result = digit_sum(num)
cat("Sum of digits of ",num," is = ",result)
}else{
cat(num," is an Invalid Input for the given function")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.