blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
362c832e3df85b176627fdd40a320062ceca8858
|
14ab055cf7c88ebe702c16c594dbc65b7c542181
|
/alluvials/defect_rate.R
|
9f93b58c562df960b3517cee41c38362381fe64e
|
[] |
no_license
|
ryanburge/cces_panel
|
d9f83d1808d31bc8d649f23f4d1585cecb23ddbb
|
9f5a12c5ea7ab5e35124fa5abe9b226114383baa
|
refs/heads/master
| 2021-09-09T13:44:01.072012
| 2018-03-16T16:55:32
| 2018-03-16T16:55:32
| 109,161,371
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,195
|
r
|
defect_rate.R
|
## Create the Defection Rate Bar Chart
test <- cces %>%
select(caseid, religpew_10, religpew_12, religpew_14)
count <- test %>% group_by(religpew_10) %>%
count(religpew_14) %>% mutate(pct = prop.table(n)) %>% ungroup(religpew_10)
defect <- count[count$religpew_10==count$religpew_14,]
defect <- defect %>%
filter(n > 200) %>%
mutate(rate = 1 - pct) %>%
ungroup(religpew_10)
defect <- defect %>%
mutate(relig = recode(religpew_10, "1 = 'Protestant'; 2 = 'Catholic'; 3 = 'Mormon'; 4 = 'Orthodox'; 5 = 'Jewish'; 6 = 'Muslim'; 7 = 'Buddhist'; 8 = 'Hindu'; 9 = 'Atheist'; 10 = 'Agnostic'; 11 = 'Nothing'")) %>%
select(relig, rate)
defect %>%
ggplot(., aes(x=reorder(relig, -rate), y = rate)) + geom_col(fill = "dodgerblue3", color = "black") +
scale_y_continuous(labels = scales::percent) +
bar_rb() +
labs(x= "Religious Tradition", y = "Defection Rate", title = "The Defection Rate of Each Religious Tradition (2010-2014)", subtitle = "Defection rate is the % of Individuals Who Claimed a Different Tradition in 2014 than they did in 2010", caption = "CCES: 2010-2014")
ggsave(file="defection_rate_religs.png", type = "cairo-png", width = 18, height = 15)
|
59b0a7c44f3174d70f271c2e4277f02b688936a7
|
a5bd747b4d1ac2800d355b61adee8765d1cc663f
|
/R/setPriors.R
|
43bc055e9def053b6085298f880e504e8c99ef07
|
[] |
no_license
|
bonata/HMSC
|
d97cdf0a597565cbe9cf08ed7fe93c35f620a320
|
567f2f7cd118f6c405a833e56b9b5f64a9e2abf2
|
refs/heads/master
| 2020-04-03T13:56:05.314479
| 2018-09-30T16:17:52
| 2018-09-30T16:17:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,477
|
r
|
setPriors.R
|
#' @title setPriors
#'
#' @description Sets or resets priors to the Hmsc object
#'
#' @param V0 scale matrix in the Wishart prior distribution for the Beta parameters
#' @param f0 number of degreees of freedom in the Wishart prior distribution for the Beta parameters
#' @param mGamma mean for the prior multivariate Gaussian distribution for Gamma parameters
#' @param UGamma covariance matrix for the prior multivariate Gaussian distribution for Gamma parameters
#'
#' @examples
#'
setPriors = function(V0=NULL, f0=NULL, mGamma=NULL, UGamma=NULL, aSigma=NULL, bSigma=NULL,
nu=NULL, a1=NULL, b1=NULL, a2=NULL, b2=NULL, rhopw=NULL, setDefault=FALSE){
if(!is.null(V0)){
if(!isSymmetric(V0) || nrow(V0) != self$nc || ncol(V0) != self$nc)
stop("HMSC.setPriors: V0 must be a positive definite matrix of size equal to number of covariates nc")
self$V0 = V0
} else if(setDefault){
self$V0 = diag(self$nc)
}
if(!is.null(f0)){
if(f0 < self$nc)
stop("HMSC.setPriors: f0 must be greater than number of covariates in the model nc")
self$f0 = f0
} else if(setDefault){
self$f0 = self$nc+1
}
if(!is.null(mGamma)){
if(length(mGamma) != self$nc)
stop("HMSC.setPriors: mGamma must be a vector of length equal to number of covariates nc")
self$mGamma = mGamma
} else if(setDefault){
self$mGamma = rep(0, self$nc*self$nt)
}
if(!is.null(UGamma)){
if(!isSymmetric(UGamma) || nrow(UGamma) != self$nc || ncol(UGamma) != self$nc)
stop("HMSC.setPriors: UGamma must be a positive definite matrix of size equal to nc x nt")
self$UGamma = UGamma
} else if(setDefault){
self$UGamma = diag(self$nc * self$nt)
}
if(!is.null(aSigma)){
self$aSigma = aSigma
} else if(setDefault){
self$aSigma = rep(1, self$ns)
}
if(!is.null(bSigma)){
self$bSigma = bSigma
} else if(setDefault){
self$bSigma = rep(0.3, self$ns)
}
if(!is.null(rhopw)){
if(is.null(self$C))
stop("HMSC.setPriors: prior for phylogeny given, but not phylogenic relationship matrix was specified")
if(ncol(rhopw)!=2)
stop("HMSC.setPriors: rhopw must be a matrix with two columns")
self$rhopw = rhopw
} else if(setDefault){
rhoN = 100
self$rhopw = cbind(c(0:rhoN)/rhoN, c(0.5,rep(0.5/rhoN,rhoN)))
}
# print("Hi, it is setPriors!")
}
Hmsc$set("public", "setPriors", setPriors, overwrite=TRUE)
|
7d816391dfb9499b215825a0193ac068ff88351e
|
50b816887e74e050ff9e3ad03da691835e6be6a2
|
/r_scripts/work/hw4.R
|
ffd273e910489e27dbfb0b701cea0491adbce8b6
|
[] |
no_license
|
alexsalo/time-series-class
|
d79964abf5e472d9f88b0e8179e6dd597f8ca692
|
f65f957074745c35732ea8fffce973d40452d604
|
refs/heads/master
| 2020-12-07T17:09:33.692052
| 2016-01-19T16:35:18
| 2016-01-19T16:35:18
| 41,764,052
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,146
|
r
|
hw4.R
|
source('C:/Dropbox/Baylor/TimeSeries/R/ts_hw/ts_hw/timeslab.R')
dyn.load('C:/Dropbox/Baylor/TimeSeries/R/ts_hw/ts_hw/timeslab2015_64.dll')
install.packages('knitr', dependencies = TRUE)
library(knitr)
### C4.2
gen_wn_corr <- function(n, nreps=20, m=25, sig2=1)
{
data = vector()
for (i in 1:nreps){ # Creating the Ensemble
data = rbind(data, corr(rnorm(n,0,sig2), m)$corr)
}
max = max(data); max = 0.4
min = min(data); min = -0.4
R = 0.3; G = 0.4; B = 0.9;
color = rgb(R, G, B, 0.6)
plot(data[1,1:m], ylim=c(min, max), type="l", xlab = "v", col=color,
ylab = expression(hat(rho[v])), main = "Correlogram")
for (i in 2:nreps){
greens = seq(0, 0.5, length.out=nreps)
color = rgb(R, G + greens[i], B, 0.6)
lines(data[i,1:m], type="l", col=color)
}
abline(h=0.0, col = 'red', lwd=1.5)
means = colMeans(data)
lines(means, type="l", lwd=2)
return(means)
}
cors.mean = gen_wn_corr(75, nreps=20, m=25, sig2=1)
mean(cors.mean)
par(mfrow=c(3,1),mar = c(2,2,2,2))
for (n in c(30, 250, 2500)) {
cors.mean = gen_wn_corr(n, nreps=20, m=25, sig2=1)
}
### C4.3
gen_wn_perdgm <- function(n, nreps=20, sig2=1)
{
lognorm = function(x){log(stdf(x, sig2, exp(-6), exp(6)))}
data = vector()
for (i in 1:nreps){ # Creating the Ensemble
data = rbind(data, perdgm(rnorm(n,0,sig2)))
}
R = 0.3; G = 0.4; B = 0.9;
color = rgb(R, G, B, 0.6)
m = (n/2)+1
plot(freqs(n), lognorm(data[1,1:m]), type="l", col=color,
xlim = c(0, 0.5), ylim = c(-6, 6), main = "Periodogram",
xlab = expression(omega), ylab = expression(hat(f)*(omega)))
for (i in 2:nreps){
greens = seq(0, 0.5, length.out=nreps)
color = rgb(R, G + greens[i], B, 0.6)
lines(freqs(n), lognorm(data[i,1:m]), type="l", col=color)
}
abline(h=lognorm(sig2), col = 'red', lwd=1.5)
means = colMeans(data)
lines(freqs(n), lognorm(means), type="l", lwd=2)
return(means)
}
perd.mean = gen_wn_perdgm(n=75, nreps=20, sig2=1)
mean(perd.mean)
par(mfrow=c(3,1),mar = c(2,2,2,2))
for (n in c(30, 75, 500)) {
perd.mean = gen_wn_perdgm(n, nreps=20, sig2=5)
}
### C4.4
x = wn(n=200); descplot(x, m=20)
y = filt(beta=c(0.9), beta0=1, x=x); descplot(y, m=20)
y = filt(beta=c(-0.9), beta0=1, x=x); descplot(y, m=20)
y = filt(beta=c(0, 0, 0, 0.8), beta0=1, x=x); descplot(y, m=20)
y = filt(beta=c(-0.7, -0.1, 0.6), beta0=1, x=x); descplot(y, m=20)
### C4.5
linearTrendDiff=function(a,b,sig2,n){
lt = b*seq(1,n) + a + rnorm(n,0,sig2)
d = diff(lt, 1)
#
lt.corr=corr(lt,40)$corr; d.corr=corr(d,40)$corr
lt.var=corr(lt,40)$var; d.var=corr(d,40)$var
lt.pgdm=perdgm(lt); d.pgdm=perdgm(d)
#
par(mfrow=c(3,2),mar = c(2,2,2,2))
plot(lt, type="l", main="Linear Trend Time Plot")
plot(d, type="l", main="First Diff Time Plot")
abline(h=b, lwd=2, lty=2)
#
lim <- qnorm((1+(1-0.05)^(1/40))/2)/sqrt(n)
plot(lt.corr, ylim = c(-1,1), type="l", main="Linear Trend Correlogram")
abline(h=lim, lty=2); abline(h=-lim, lty=2)
plot(d.corr, ylim = c(-1,1), type="l", main="First Diff Correlogram")
abline(h=lim, lty=2); abline(h=-lim, lty=2)
#
plotsp(lt.pgdm, n, lt.var)
plotsp(d.pgdm, n-1, d.var) #n-1 for difference
}
linearTrendDiff(1,2,0.25,200)
|
82e773c0efb3e7a27fc7d7c67e170969b3fa2b90
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612968324-test.R
|
3f9480b831eca5bff2c9d45139d7ed43154c9839
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 748
|
r
|
1612968324-test.R
|
testlist <- list(x1 = -1.50493695303394e+206, x2 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y1 = c(2.81773963954832e-202, 5.18365505937542e-130, NaN, 3.32535613919497e-93, 2.77448001743148e+180, 3.07594210200033e-304, 1.27654717701454e+74, NaN, NaN, NaN, 1.38523893523259e-309, 0, -5.48612406879369e+303, -4.15087034103719e+305, 2.77448001764258e+180, 2.77448001762435e+180, 2.77448001762435e+180, -1.78179153423514e+92, NaN, 2.77448001764258e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.15162051651561e-312 ), y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result)
|
45376ebab3b4818da9f75fc5764bf1506b335a7a
|
90b16bff7561d50820e03bc0704d1e1f681dd570
|
/scripts/collect_resid_for_assoc.R
|
69c099775031422ff68bfa7cc06dbd3148865282
|
[] |
no_license
|
e-macdonald-dunlop/catalogue_omics_clocks
|
d08a177a21e9fdebc94f93100e54fcf180167ee5
|
67533bbe8d5fe2c3c85c04a4c987e7f4dcd4700b
|
refs/heads/main
| 2023-03-19T06:26:39.548487
| 2021-03-05T11:01:18
| 2021-03-05T11:01:18
| 333,200,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,887
|
r
|
collect_resid_for_assoc.R
|
library(data.table)
library(yaml)
library(ggplot2)
library(plyr)
library(reshape2)
library(RColorBrewer)
library(gdata)
library(Hmisc)
args<-commandArgs(T)
if(!file.exists(args[1])){dir.create(args[1])}
setwd(args[1])
source("/exports/igmm/eddie/wilson-lab/projects/prj_107_omics_clock_pipeline/scripts/eddie/pipeline_functions.R")
args<-c("dexa/new","horvath_cpgs","lipidomics","metabolon_metabolomics_new_new","pheno/fewer","hannum_cpgs","igg_glycomics","metabolon_complex_lipids_new","nmr","protein_new","combined_new")
heading("Get Clocks")
#read in iids
base<-fread("/exports/igmm/eddie/wilson-lab/data/base_data/orcades/phenotypes/orcades_base_phenotypes.tsv",data.table=F)
base<-base[,"iid",drop=F]
base<-base[!grepl("NIMS",base$iid),,drop=FALSE]
full<-base
template_path<-"/exports/igmm/eddie/wilson-lab/projects/prj_086_omics_clocks/final/XXX/YYY/QQQ/st03_pred_obs_resid_ZZZ_1.tsv"
template_path_2<-"/exports/igmm/eddie/wilson-lab/projects/prj_086_omics_clocks/final/XXX/YYY/QQQ/st03_ZZZ_data_1.tsv"
methods<-c("fixed_alpha","lasso","cv_alpha") #
data_set<-c("testing","training")
clock_types<-c("p03_make_clock","p06_core_model_prediction","a_minus_b/p03_make_clock","b_only/p03_make_clock","a_only/p03_make_clock","3_pcs/p03_make_clock","5_pcs/p03_make_clock","10_pcs/p03_make_clock","20_pcs/p03_make_clock") #
for(panel in args){
writeLines(paste0(panel,"\n"))
for(clock_type in clock_types){
writeLines(paste0(clock_type,"\n"))
for(method in methods){
if(clock_type=="p06_core_model_prediction"){
#testing
if(file.exists(paste0(gsub("XXX",method,gsub("YYY",panel,gsub("QQQ",clock_type,gsub("ZZZ","testing",template_path))))))){
panel_data<-read.table(paste0(gsub("XXX",method,gsub("YYY",panel,gsub("QQQ",clock_type,gsub("ZZZ","testing",template_path))))),header=T,stringsAsFactors=F)
iids<-fread(paste0(gsub("XXX",method,gsub("YYY",panel,gsub("QQQ",clock_type,gsub("ZZZ","testing",gsub("1","core",template_path_2)))))),header=T,select="iid",stringsAsFactors=F,data.table=F)
panel_data<-cbind(iids,panel_data)
for_full<-panel_data[,c("iid","resid")]
#training
panel_data<-read.table(paste0(gsub("XXX",method,gsub("YYY",panel,gsub("QQQ",clock_type,gsub("ZZZ","training",template_path))))),header=T,stringsAsFactors=F)
iids<-fread(paste0(gsub("XXX",method,gsub("YYY",panel,gsub("QQQ",clock_type,gsub("ZZZ","training",gsub("1","core",template_path_2)))))),header=T,select="iid",stringsAsFactors=F,data.table=F)
panel_data<-cbind(iids,panel_data)
panel_data<-panel_data[,c("iid","resid")]
for_full<-rbind(for_full,panel_data)
names(for_full)[2]<-paste0(panel,"_",method,"_",clock_type)
full<-merge(full,for_full,by="iid",all.x=T)
}
}else{
#testing
if(file.exists(paste0(gsub("XXX",method,gsub("YYY",panel,gsub("QQQ",clock_type,gsub("ZZZ","testing",template_path))))))){
panel_data<-read.table(paste0(gsub("XXX",method,gsub("YYY",panel,gsub("QQQ",clock_type,gsub("ZZZ","testing",template_path))))),header=T,stringsAsFactors=F)
iids<-fread(paste0(gsub("XXX",method,gsub("YYY",panel,gsub("QQQ",clock_type,gsub("ZZZ","testing",template_path_2))))),header=T,select="iid",stringsAsFactors=F,data.table=F)
panel_data<-cbind(iids,panel_data)
for_full<-panel_data[,c("iid","resid")]
#training
panel_data<-read.table(paste0(gsub("XXX",method,gsub("YYY",panel,gsub("QQQ",clock_type,gsub("ZZZ","training",template_path))))),header=T,stringsAsFactors=F)
iids<-fread(paste0(gsub("XXX",method,gsub("YYY",panel,gsub("QQQ",clock_type,gsub("ZZZ","training",template_path_2))))),header=T,select="iid",stringsAsFactors=F,data.table=F)
panel_data<-cbind(iids,panel_data)
panel_data<-panel_data[,c("iid","resid")]
for_full<-rbind(for_full,panel_data)
names(for_full)[2]<-paste0(panel,"_",method,"_",clock_type)
full<-merge(full,for_full,by="iid",all.x=T)
}
}
}
}
}
head(full)
dim(full)
write.table(full,"all_clock_types_resid_training_testing_13_07_2020.tsv",col.names=TRUE,row.names=FALSE,quote=FALSE,sep="\t")
#read in age_at_vene
data<-fread("/exports/igmm/eddie/wilson-lab/data/processing/orcades/phenotypes/p03_age_at_vene/age_month_of_vene.tsv",data.table=F)
head(data)
dim(data)
age<-data[,c("iid","age_at_vene")]
head(age)
dim(age)
final_data<-merge(full,age,by="iid",all.x=T)
#need to change names
colnames(final_data)<-gsub("dexa/new","dexa",colnames(final_data))
colnames(final_data)<-gsub("metabolon_metabolomics_new_new","metabolon_metabolomics_new",colnames(final_data))
colnames(final_data)<-gsub("pheno/fewer","pheno",colnames(final_data))
#need to remove slashes fromthe names
colnames(final_data)<-gsub("\\/","_",colnames(final_data))
write.table(final_data,"all_clocks_resid_training_testing_age_13_07_2020.tsv",col.names=T,row.names=F,quote=F,sep="\t")
|
bb9f9c57754d10876f7609366d8cf4ce4d1f62b6
|
91b827e48e510661d90d81d0155130b2703ae51d
|
/analysis/2-binary-adjusted/2c-PR-prev-ipcw.R
|
eff209b0c065e5733c37e542fe0fb4b90020b448
|
[] |
no_license
|
LaKwong/WBB-STH-Kato-Katz
|
2f0cf1a906ced8badb35b18df72adbcf41013534
|
d4d97093fb4e04f6cb7bcf919e41c42ea25cff83
|
refs/heads/master
| 2020-09-02T05:37:17.925364
| 2019-04-24T19:43:26
| 2019-04-24T19:43:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,680
|
r
|
2c-PR-prev-ipcw.R
|
##############################################
# WASH Benefits Bangladesh STH Kato-Katz Study
# Primary outcome analysis
# STH IPCW analysis
# by Jade Benjamin-Chung
# jadebc@berkeley.edu
##############################################
rm(list=ls())
source(here::here("0-config.R"))
#----------------------------------------------
# load and pre-process analysis dataset
#----------------------------------------------
data = read.csv(sth_data_path,stringsAsFactors=TRUE)
d=preprocess.sth(data)
d=preprocess.adj.sth(d)
# roof and landphone excluded due to low prevalence
W=c("hfiacat","index","momage","momheight","momedu",
"Nlt18","Ncomp","watmin","walls","floor",
"elec","asset_wardrobe","asset_table","asset_chair","asset_khat","asset_chouki",
"asset_tv","asset_refrig","asset_bike","asset_moto","asset_sewmach","asset_mobile")
dW=d[,c("block","tr","clusterid","sth","al","hw","tt","hasoutcome",W)]
#----------------------------------------------
# H1: Unadjusted prevalence ratios; each arm vs.
# control. PR, CI, P-value
#----------------------------------------------
trlist=c("Water","Sanitation","Handwashing",
"WSH","Nutrition","Nutrition + WSH")
SL.library=c("SL.mean","SL.glm","SL.bayesglm","SL.gam","SL.glmnet")
est.al.h1=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$al,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.hw.h1=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$hw,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.tt.h1=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$tt,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.sth.h1=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$sth,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
al_rr_h1_ipcw_j=format.tmle(est.al.h1,family="binomial")$rr
al_rd_h1_ipcw_j=format.tmle(est.al.h1,family="binomial")$rd
hw_rr_h1_ipcw_j=format.tmle(est.hw.h1,family="binomial")$rr
hw_rd_h1_ipcw_j=format.tmle(est.hw.h1,family="binomial")$rd
tt_rr_h1_ipcw_j=format.tmle(est.tt.h1,family="binomial")$rr
tt_rd_h1_ipcw_j=format.tmle(est.tt.h1,family="binomial")$rd
sth_rr_h1_ipcw_j=format.tmle(est.sth.h1,family="binomial")$rr
sth_rd_h1_ipcw_j=format.tmle(est.sth.h1,family="binomial")$rd
rownames(al_rr_h1_ipcw_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(hw_rr_h1_ipcw_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(tt_rr_h1_ipcw_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(sth_rr_h1_ipcw_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(al_rd_h1_ipcw_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(hw_rd_h1_ipcw_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(tt_rd_h1_ipcw_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(sth_rd_h1_ipcw_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
#----------------------------------------------
# H2: Unadjusted prevalence ratios; combined WSH vs.
# single arms. PR, CI, P-value
#----------------------------------------------
trlist=c("Water","Sanitation","Handwashing")
est.al.h2=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$al,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c(x,"WSH"),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.hw.h2=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$hw,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c(x,"WSH"),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.tt.h2=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$tt,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c(x,"WSH"),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.sth.h2=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$sth,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c(x,"WSH"),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
al_rr_h2_ipcw_j=format.tmle(est.al.h2,family="binomial")$rr
al_rd_h2_ipcw_j=format.tmle(est.al.h2,family="binomial")$rd
hw_rr_h2_ipcw_j=format.tmle(est.hw.h2,family="binomial")$rr
hw_rd_h2_ipcw_j=format.tmle(est.hw.h2,family="binomial")$rd
tt_rr_h2_ipcw_j=format.tmle(est.tt.h2,family="binomial")$rr
tt_rd_h2_ipcw_j=format.tmle(est.tt.h2,family="binomial")$rd
sth_rr_h2_ipcw_j=format.tmle(est.sth.h2,family="binomial")$rr
sth_rd_h2_ipcw_j=format.tmle(est.sth.h2,family="binomial")$rd
rownames(al_rr_h2_ipcw_j)=c("WSH vs Water","WSH vs Sanitation","WSH vs Handwashing")
rownames(hw_rr_h2_ipcw_j)=c("WSH vs Water","WSH vs Sanitation","WSH vs Handwashing")
rownames(tt_rr_h2_ipcw_j)=c("WSH vs Water","WSH vs Sanitation","WSH vs Handwashing")
rownames(sth_rr_h2_ipcw_j)=c("WSH vs Water","WSH vs Sanitation","WSH vs Handwashing")
rownames(al_rd_h2_ipcw_j)=c("WSH vs Water","WSH vs Sanitation","WSH vs Handwashing")
rownames(hw_rd_h2_ipcw_j)=c("WSH vs Water","WSH vs Sanitation","WSH vs Handwashing")
rownames(tt_rd_h2_ipcw_j)=c("WSH vs Water","WSH vs Sanitation","WSH vs Handwashing")
rownames(sth_rd_h2_ipcw_j)=c("WSH vs Water","WSH vs Sanitation","WSH vs Handwashing")
#----------------------------------------------
# H3: Unadjusted prevalence ratios; combined WSH Nvs.
# single arms. PR, CI, P-value
#----------------------------------------------
trlist=c("WSH","Nutrition")
est.al.h3=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$al,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c(x,"Nutrition + WSH"),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.hw.h3=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$hw,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c(x,"Nutrition + WSH"),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.tt.h3=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$tt,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c(x,"Nutrition + WSH"),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.sth.h3=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW$sth,tr=dW$tr,
pair=dW$block, id=dW$block,W=dW[,W],Delta=dW$hasoutcome,
family="binomial",contrast=c(x,"Nutrition + WSH"),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
al_rr_h3_ipcw_j=format.tmle(est.al.h3,family="binomial")$rr
al_rd_h3_ipcw_j=format.tmle(est.al.h3,family="binomial")$rd
hw_rr_h3_ipcw_j=format.tmle(est.hw.h3,family="binomial")$rr
hw_rd_h3_ipcw_j=format.tmle(est.hw.h3,family="binomial")$rd
tt_rr_h3_ipcw_j=format.tmle(est.tt.h3,family="binomial")$rr
tt_rd_h3_ipcw_j=format.tmle(est.tt.h3,family="binomial")$rd
sth_rr_h3_ipcw_j=format.tmle(est.sth.h3,family="binomial")$rr
sth_rd_h3_ipcw_j=format.tmle(est.sth.h3,family="binomial")$rd
rownames(al_rr_h3_ipcw_j)=c("Nutrition + WSH vs WSH","Nutrition + WSH vs Nutrition")
rownames(hw_rr_h3_ipcw_j)=c("Nutrition + WSH vs WSH","Nutrition + WSH vs Nutrition")
rownames(tt_rr_h3_ipcw_j)=c("Nutrition + WSH vs WSH","Nutrition + WSH vs Nutrition")
rownames(sth_rr_h3_ipcw_j)=c("Nutrition + WSH vs WSH","Nutrition + WSH vs Nutrition")
rownames(al_rd_h3_ipcw_j)=c("Nutrition + WSH vs WSH","Nutrition + WSH vs Nutrition")
rownames(hw_rd_h3_ipcw_j)=c("Nutrition + WSH vs WSH","Nutrition + WSH vs Nutrition")
rownames(tt_rd_h3_ipcw_j)=c("Nutrition + WSH vs WSH","Nutrition + WSH vs Nutrition")
rownames(sth_rd_h3_ipcw_j)=c("Nutrition + WSH vs WSH","Nutrition + WSH vs Nutrition")
#----------------------------------------------
# save objects
#----------------------------------------------
save(al_rr_h1_ipcw_j,hw_rr_h1_ipcw_j,tt_rr_h1_ipcw_j,sth_rr_h1_ipcw_j,
al_rd_h1_ipcw_j,hw_rd_h1_ipcw_j,tt_rd_h1_ipcw_j,sth_rd_h1_ipcw_j,
al_rr_h2_ipcw_j,hw_rr_h2_ipcw_j,tt_rr_h2_ipcw_j,sth_rr_h2_ipcw_j,
al_rd_h2_ipcw_j,hw_rd_h2_ipcw_j,tt_rd_h2_ipcw_j,sth_rd_h2_ipcw_j,
al_rr_h3_ipcw_j,hw_rr_h3_ipcw_j,tt_rr_h3_ipcw_j,sth_rr_h3_ipcw_j,
al_rd_h3_ipcw_j,hw_rd_h3_ipcw_j,tt_rd_h3_ipcw_j,sth_rd_h3_ipcw_j,
file=paste0(save_data_path, "sth_pr_ipcw.RData"))
|
44a39e8291d641dee3c6e66a9a321bcea032fa3f
|
031d1fc59f9c6b4e66d38d799e4039ecf4be235a
|
/R/cluster/scr_move_DA_rsig.R
|
9327d3b37ce3f2ebf3a15705ffb84dc231b00582
|
[
"MIT"
] |
permissive
|
rbchan/scr-move
|
cc76c59795996843b63efd312e3901525354cc2f
|
30d3ed9f8c3f554b6867f6dc923a6fa143de4551
|
refs/heads/main
| 2023-04-11T11:57:30.973400
| 2021-10-31T14:11:56
| 2021-10-31T14:11:56
| 355,622,082
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,010
|
r
|
scr_move_DA_rsig.R
|
## Fit SCR+move model in JAGS and using custom Gibbs sampler
## Must include model for capture of telemetered guys
## Otherwise, some of them would have all zero encounter histories, which
## isn't consistent with the (S)CR likelihood
## First we have to build the R package on the cluster
print(olibs <- .libPaths())
.libPaths("./") ## Add local library for new package
print(.libPaths())
install.packages("../../scrmove", repos=NULL, lib=.libPaths()[1])
## library(scrmove)
## Load the data
load("../deer_scr_telem.RData")
## Set up the workers (one core for each chain)
library(parallel)
nChains <- 4
(nCores <- detectCores())
nThreads <- nCores/nChains
if(file.exists("parallel_outfile_DA_rsig.Rout")) unlink("parallel_outfile_DA_rsig.Rout")
cl1 <- makeCluster(nChains, outfile="parallel_outfile_DA_rsig.Rout")
clusterExport(cl1, c("y.cap", "y.det", "u", "cam.locs", "oper", "nThreads"))
## Set environment variables and load package on each core
invisible(clusterEvalQ(cl1, {
Sys.setenv(OMP_PLACES = "threads")
Sys.setenv(OMP_PROC_BIND = "true")
Sys.setenv(OMP_PROC_BIND = "spread")
Sys.setenv(OMP_SCHEDULE = "dynamic")
Sys.setenv(OMP_NUM_THREADS = nThreads)
## print(Sys.getenv())
print(system("env | grep ^OMP*"))
## source("../scr_move_semi_capdet_mcmc.R")
.libPaths("./")
library(scrmove, lib.loc=.libPaths()[1])
}))
## if(1==2) {
## source("../../scrmove/R/scr_move_DA_rsig_capdet_mcmc.R")
## }
## Do a short run to evaluate performance and burnin.
system.time({
fm5p.1 <- clusterEvalQ(cl1, {
fmp1 <- scrMoveDArsig(ycap=y.cap,
ydet=y.det,
u=u,
M=300,
x=cam.locs,
oper=oper,
random.sig=TRUE,
plotit=FALSE,
n.iters=1000, n.mci=200L,
buffer=5000, trim=100, nthreads=nThreads,
report=100, verbose=FALSE,
## block.rhosigma=TRUE,
block.lam0kappa=TRUE,
## tune order: rho, log.sigma, log.sigma.mu, log.sigma.sd
## lam0, kappa, p, s, u, cov(lam0,kappa)
tune=c(6.9e-4, 0.4, 0.04, 0.02, 0.15, 1.5, 0.05, 300, 300, 3.01e-3))
return(fmp1)
})
})
## save(fmp1, file="fmp4_1-1.gzip")
save(fm5p.1, file="fm5p_1.gzip")
## Do a longer run without storing posterior samples of s and u
system.time({
fm5p.2 <- clusterEvalQ(cl1, {
fmp2 <- scrMoveDArsig(ycap=y.cap,
ydet=y.det,
u=u,
M=300,
x=cam.locs,
oper=oper,
random.sig=TRUE,
plotit=FALSE,
n.iters=10000, n.mci=200L,
buffer=5000, trim=100, nthreads=nThreads,
report=100, verbose=FALSE,
inits=fmp1$final.state,
## block.rhosigma=TRUE,
block.lam0kappa=TRUE,
## tune order: rho, log.sigma, log.sigma.mu, log.sigma.sd
## lam0, kappa, p, s, u, cov(lam0,kappa)
tune=c(6.9e-4, 0.4, 0.04, 0.02, 0.15, 1.5, 0.05, 300, 300, 3.01e-3))
return(fmp2)
})
}) ## 1990 it/hr
save(fm5p.2, file="fm5p_2.gzip")
## Keep every 20th sample of s and u
## TODO: Run this with increasing values of n.mci
system.time({
fm5p.3 <- clusterEvalQ(cl1, {
fmp3 <- scrMoveDArsig(ycap=y.cap,
ydet=y.det,
u=u,
M=300,
x=cam.locs,
oper=oper,
random.sig=TRUE,
plotit=FALSE,
n.iters=100000, n.mci=200L,
buffer=5000, trim=100, nthreads=nThreads,
su.post.keep=20,
report=100, verbose=FALSE,
inits=fmp2$final.state,
## block.rhosigma=TRUE,
block.lam0kappa=TRUE,
## tune order: rho, log.sigma, log.sigma.mu, log.sigma.sd
## lam0, kappa, p, s, u, cov(lam0,kappa)
tune=c(6.9e-4, 0.4, 0.04, 0.02, 0.15, 1.5, 0.05, 300, 300, 3.01e-3))
return(fmp3)
})
}) ##
save(fm5p.3, file="fm5p_3.gzip")
stopCluster(cl1)
|
949ab8fca114c8f467258dde240a1a9f4e006ef7
|
effe14a2cd10c729731f08b501fdb9ff0b065791
|
/paws/man/s3_get_bucket_notification.Rd
|
b940414eab451c82e7f89798b5d3cbbe11499b1c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
peoplecure/paws
|
8fccc08d40093bb25e2fdf66dd5e38820f6d335a
|
89f044704ef832a85a71249ce008f01821b1cf88
|
refs/heads/master
| 2020-06-02T16:00:40.294628
| 2019-06-08T23:00:39
| 2019-06-08T23:00:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 897
|
rd
|
s3_get_bucket_notification.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3_operations.R
\name{s3_get_bucket_notification}
\alias{s3_get_bucket_notification}
\title{No longer used, see the GetBucketNotificationConfiguration operation}
\usage{
s3_get_bucket_notification(Bucket)
}
\arguments{
\item{Bucket}{[required] Name of the bucket to get the notification configuration for.}
}
\description{
No longer used, see the GetBucketNotificationConfiguration operation.
}
\section{Request syntax}{
\preformatted{svc$get_bucket_notification(
Bucket = "string"
)
}
}
\examples{
# The following example returns notification configuration set on a
# bucket.
\donttest{svc$get_bucket_notification(
Bucket = "examplebucket"
)}
# The following example returns notification configuration set on a
# bucket.
\donttest{svc$get_bucket_notification(
Bucket = "examplebucket"
)}
}
\keyword{internal}
|
c41ef438baf688fc8e9ab41a3949f219b3c7c97c
|
467dd55648968dfe4ca81cfafb9ff233a122f59f
|
/Machine Learning/Classifiers kit/Part II/NaiveBayes.R
|
ca862ba584dbd3ce5056a4a60116ae09aeccd464
|
[] |
no_license
|
aminamp/University-Projects
|
f745b8518936d34b30c9231ec9335cae18f80b2b
|
e7c7ef3c74b56dc574cd49dcc4efae53336006d9
|
refs/heads/master
| 2021-01-18T01:25:48.195700
| 2016-09-20T17:04:32
| 2016-09-20T17:04:32
| 68,672,488
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
r
|
NaiveBayes.R
|
library(MASS)
library(class)
library(e1071)
pima <- read.table("http://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data",header=F,sep=",")
for (i in 1:10 ) {
indexes = sample(1:nrow(pima), size=0.1*nrow(pima))
test = pima[indexes,]
train = pima[-indexes,]
model <- naiveBayes(as.factor(train$V9) ~ ., data = train)
pred <- predict(model, test[,1:8])
accuracy[i] <- sum(pred == test[,9])/length(pred)
}
accuracy * 100
mean(accuracy * 100)
|
22222c37740dd8b0991adb9d1cbd0107ca812f8a
|
9716a6d75781d99a1cfdfc09cb55b7f53ea46b81
|
/AUD/bop/tot/code/tot.r
|
102dbaa02d956a5e520c118546e01e492cdd40ad
|
[] |
no_license
|
ricardianambivalence/Rproject
|
17ae2017de25cb8e934fbce5bc4619b86ce49194
|
a7136c6cf654cc8c1eda4d74d98764e3037b5764
|
refs/heads/master
| 2021-01-01T06:04:47.397872
| 2014-01-26T23:37:43
| 2014-01-26T23:37:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,536
|
r
|
tot.r
|
# {{{ packages and functions
require(ggplot2)
require(reshape2)
require(gridExtra)
require(xts)
source('~/R/Rhelpers/helperFuncts.r')
source('~/R/Rhelpers/RAcolorpal.r')
# }}}
# {{{ PATHS
projectPATH <- "~/R/aud/bop/tot/"
codePATH <- file.path(projectPATH, "code")
dataPATH <- file.path(projectPATH, "data")
plotPATH <- file.path(projectPATH, "plot")
# }}}
# get data -- beef it up to get data from abs / rba later ...
load(file.path(dataPATH, "tot_CPX.rdata"))
# {{{ plots
gp_totLvl <- ggplot(subset(meltx(totx), variable %in% 'TermsOfTrade'),
aes(x = date, y = value)) +
labs(y = NULL, x = NULL) +
labs(title = "Aus. Terms of Trade (NSA): level and Annual Ave (%YoY)") +
geom_line(size = 1.2) +
geom_vline(xintercept = as.numeric(as.POSIXct("2012-12-31")),
linetype=4, col = 'red')
#
gp_annAveYoY <- ggplot(subset(meltx(totx), variable %in% 'tot_annAve_YoY'),
aes(x = date, y = value)) +
labs(y = NULL, x = NULL) +
labs(title = NULL) +
theme(legend.position = 'none') +
theme(legend.title = element_blank()) +
geom_bar(stat = 'identity', color = 'blue', fill = 'blue') +
geom_vline(xintercept = as.numeric(as.POSIXct("2012-12-31")),
linetype=4, col = 'red')
png(file.path(plotPATH, "tot_twin.png"))
grid.arrange(gp_totLvl, gp_annAveYoY, heights = c(2/3, 1/3),
sub = textGrob("www.ricardianambivalence.com", x=1, hjust=1, vjust=0))
dev.off()
gp_rbaCPxLvl <- ggplot(subset(meltx(rbacpxx['19840601::20130430']), variable %in% 'RBA_CmdPxIdx'),
aes(x = date, y = value)) +
labs(y = NULL, x = NULL) +
labs(title = "RBA AUD$ Export Prices (NSA): level and Annual Ave (%YoY)") +
geom_line(size = 1.2)
#
gp_RBannAveYoY <- ggplot(subset(meltx(rbacpxx['19840601::20130430']), variable %in% 'RBA_CPx_aaYoY'),
aes(x = date, y = value)) +
labs(y = NULL, x = NULL) +
labs(title = NULL) +
theme(legend.position = 'none') +
theme(legend.title = element_blank()) +
geom_bar(stat = 'identity', color = 'blue', fill = 'blue')
png(file.path(plotPATH, "rba_twin.png"))
grid.arrange(gp_rbaCPxLvl, gp_RBannAveYoY, heights = c(2/3, 1/3),
sub = textGrob("www.ricardianambivalence.com", x=1, hjust=1, vjust=0))
dev.off()
# }}}
|
cac81a2d2d1dab6b249091cdcc422c6d3524da74
|
537404b0e7cc3794e2fb767573eafb3a8b245efa
|
/Package Installer.R
|
5df223858d2e433b4dd67130aaa09d672c430b18
|
[] |
no_license
|
anhnguyendepocen/Data-Science-3
|
b58620111f743c4d2654c38778bea4deb7d31261
|
a3e27011f3d9ee005bbb3a058b289e2f6a46dd4b
|
refs/heads/master
| 2022-02-18T08:04:04.370157
| 2019-10-04T01:25:08
| 2019-10-04T01:25:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 510
|
r
|
Package Installer.R
|
install <- function(packages){
new.packages <- packages[!(packages %in% installed.packages()[, "Package"])]
if (length(new.packages))
install.packages(new.packages, dependencies = TRUE)
sapply(packages, require, character.only = TRUE)
}
# usage
required.packages <- c("ggplot2", "dplyr", "reshape2", "devtools", "tidyverse", "caret","randomForest","knitr",
"stringr","tidyr", "leaflet","ggmap", "lubridate", "readxl", "readr","rvest", "magrittr" )
install(required.packages)
|
cbb1bb7983d12c52c627ef8f992c0b84dd2bf074
|
b79b7709a0cef8482b70f98ebd54022895bd7e73
|
/bootstrapping_exe.R
|
957003cc8df855a314f4cc214543cf95cb4ef03e
|
[] |
no_license
|
jeku7901-CU/Barcode_07_28_2020
|
07e0b549830d4390611f669946cabed6e04da504
|
52c724b8338be6ae57b12c52785a9d4782620984
|
refs/heads/master
| 2023-07-01T01:59:04.583589
| 2021-08-05T12:37:52
| 2021-08-05T12:37:52
| 283,261,887
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,399
|
r
|
bootstrapping_exe.R
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
print(args[1])
print(args[2])
#setwd("/Users/jeku7901/olwin_dowell_labs/reanalysis_09_26_20/bootstrapping/")
pruned.merged.tab.noTRs.numeric <- read.csv(args[1], header = T, row.names = 1)
pruned.merged.tab.noTRs.numeric <- data.matrix(pruned.merged.tab.noTRs.numeric)
tmp7 <- data.frame()
for (j in 1:ncol(pruned.merged.tab.noTRs.numeric))
{
tmp1 <- list()
tmp2 <- data.frame()
tmp3 <- data.frame()
tmp4 <- data.frame()
tmp5 <- c()
tmp6 <- data.frame()
for (i in 1:nrow(pruned.merged.tab.noTRs.numeric))
{
if(pruned.merged.tab.noTRs.numeric[i,j] == 0){}
else {
tmp1[[i]] <- rep(row.names(pruned.merged.tab.noTRs.numeric)[i], pruned.merged.tab.noTRs.numeric[i,j]) # Fix the 0's
tmp2 <- data.frame(unlist(as.vector(tmp1))) # turns it into a longform df
}
}
for (k in 1:500) # 500 subsamples of 5600 barcodes each time
{
tmp3 <- sample(tmp2[,1], 10000) # samples 5600 from this longform df
tmp4 <- as.data.frame(table(tmp3)) #tabulates the repeat number for this 5600 subsample
tmp5 <- length(which(tmp4$Freq > 0)) # unique (make more than 0) ----
tmp6[k,1] <- c(tmp5)
}
var.name <- paste(as.character(colnames(pruned.merged.tab.noTRs.numeric)[j]), "bootstrapped", sep = ".")
colnames(tmp6) <- var.name
tmp7[1:500,j] <- tmp6
print(j)
}
write.table(tmp7, args[2])
|
8dacdc24a74d6e8391a699fd7914a0549669cd61
|
81449b3859ba496fa486875122321f4572c77d10
|
/cachematrix.R
|
10aaa5a18b97024581a587eea2fcc9f12e5986d3
|
[] |
no_license
|
ravinan/ProgrammingAssignment2
|
a3de7c4f8bc78921ad1b1edb42fa864e3c6c9da8
|
64cddbe8b14a81931c5572ba0163c157a453a0fc
|
refs/heads/master
| 2022-12-12T03:07:20.434880
| 2020-09-07T16:33:50
| 2020-09-07T16:33:50
| 293,095,964
| 0
| 0
| null | 2020-09-05T14:50:27
| 2020-09-05T14:50:26
| null |
UTF-8
|
R
| false
| false
| 1,512
|
r
|
cachematrix.R
|
## Two short R functions that:
## 1) store a square matrix and it's inverse (makeCacheMatrix),
## and
## 2) compute and return the matrix inverse as well as store the matrix inverse
## for future use (cacheSolve).
## The function makeCacheMatrix stores a square matrix in the local variable x and cache's
## it's inverse in the local variable m. The square matrix x is it's only formal argument.
## The local variables are x and the matrix inverse, m.
## The setter and getter functions of makeCacheMatrix
## are: set, get, setmean, getmean
## The return values of these four functions are returned as elements of a list.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(inv) m <<- inv
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## The function cacheSolve accesses and returns the matrix inverse of the makeCacheMatrix
## object passed to it.
## It's formal arguments are the makeCacheMatrix object, x, and ... arguments to be passed to
## the solve() function.
## It's local variables are the makeCacheMatrix object, x, and the matrix inverse, m.
## The return variable is the matrix inverse, m.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
e0a81ec2756cc8f505be03887c8110501642a7d3
|
f23b0f652e57bc759259ba57028fda8c10fc436c
|
/man/f7Select.Rd
|
f0168ec18d14dd8a4c2eeddcbbfe1de7e4e65b98
|
[] |
no_license
|
CristianPachacama/shinyMobile
|
57e2d48018196ec999a9ae5207c248283a963b76
|
7e6ef87b42e6f23fe776b9ad9f182a1363c77d04
|
refs/heads/master
| 2020-09-27T19:55:35.522503
| 2019-12-07T00:45:55
| 2019-12-07T00:45:55
| 226,598,443
| 1
| 1
| null | 2019-12-08T01:26:08
| 2019-12-08T01:26:08
| null |
UTF-8
|
R
| false
| true
| 889
|
rd
|
f7Select.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f7-inputs.R
\name{f7Select}
\alias{f7Select}
\title{Create an f7 select input}
\usage{
f7Select(inputId, label, choices)
}
\arguments{
\item{inputId}{Select input id.}
\item{label}{Select input label.}
\item{choices}{Select input choices.}
}
\description{
Create an f7 select input
}
\examples{
if(interactive()){
library(shiny)
library(shinyMobile)
shiny::shinyApp(
ui = f7Page(
title = "My app",
f7SingleLayout(
navbar = f7Navbar(title = "f7Select"),
f7Select(
inputId = "variable",
label = "Choose a variable:",
choices = colnames(mtcars)[-1]
),
tableOutput("data")
)
),
server = function(input, output) {
output$data <- renderTable({
mtcars[, c("mpg", input$variable), drop = FALSE]
}, rownames = TRUE)
}
)
}
}
|
bff24f245d7daa5b9093bf97d74e60e14ea3fefb
|
2adc513c87e5f60db34f991e8634a1d141b0eccc
|
/tests/testthat/test-CLR.r
|
315ac72ae378d5f497ebc22570ed84e100f86906
|
[
"MIT"
] |
permissive
|
hyunseungkang/ivmodel
|
663608701e58d37ee6347c58b07eace83a0b06d5
|
825fa71c52961bc8b1066f6acb1bd24e93abcbba
|
refs/heads/master
| 2023-04-14T12:29:29.634364
| 2023-04-10T16:04:52
| 2023-04-10T16:04:52
| 71,493,810
| 9
| 2
|
MIT
| 2020-01-03T19:24:12
| 2016-10-20T18:48:54
|
R
|
UTF-8
|
R
| false
| false
| 4,232
|
r
|
test-CLR.r
|
context("Check accuracy of CLR using Card data")
test_that("CLR test without exogeneous covariates and intercept, single-IV", {
Y=card.data[,"lwage"]
D=card.data[,"educ"]
Z=card.data[,"nearc4"]
foo = ivmodel(Y=Y,D=D,Z=Z,intercept=FALSE)
teststat_out = CLR(foo)
expect_equal(as.numeric(teststat_out$ci),c(0.462792094452, 0.470402558674))
expect_equal(as.numeric(teststat_out$p.value),0)
expect_equal(as.numeric(teststat_out$test.stat),6679.92118721)
})
test_that("CLR test without exogeneous covariates and intercept, mult-IV", {
Y=card.data[,"lwage"]
D=card.data[,"educ"]
Z=card.data[,c("nearc4","nearc2")]
foo = ivmodel(Y=Y,D=D,Z=Z,intercept=FALSE)
teststat_out = CLR(foo)
expect_equal(as.numeric(teststat_out$ci),c(0.463889965605, 0.471283479854))
expect_equal(as.numeric(teststat_out$p.value),0)
expect_equal(as.numeric(teststat_out$test.stat),8327.04383792)
})
test_that("CLR test with intercept, but without exogeneous covariates, single-IV", {
Y=card.data[,"lwage"]
D=card.data[,"educ"]
Z=card.data[,"nearc4"]
foo = ivmodel(Y=Y,D=D,Z=Z)
teststat_out = CLR(foo)
expect_equal(as.numeric(teststat_out$ci),c(0.143037537021, 0.250862570435))
expect_equal(as.numeric(teststat_out$p.value),0)
expect_equal(as.numeric(teststat_out$test.stat),82.7445324192)
})
test_that("CLR test with intercept, but without exogeneous covariates, mult-IV", {
Y=card.data[,"lwage"]
D=card.data[,"educ"]
Z=card.data[,c("nearc4","nearc2")]
foo = ivmodel(Y=Y,D=D,Z=Z)
teststat_out = CLR(foo)
expect_equal(as.numeric(teststat_out$ci),c(0.158839802213, 0.27417906789))
expect_equal(as.numeric(teststat_out$p.value),0)
expect_equal(as.numeric(teststat_out$test.stat),99.3797747842)
})
test_that("CLR test with exogeneous covariates, but no intercept, single-IV", {
Y=card.data[,"lwage"]
D=card.data[,"educ"]
Z=card.data[,"nearc4"]
Xname=c("exper", "expersq", "black", "south", "smsa", "reg661",
"reg662", "reg663", "reg664", "reg665", "reg666", "reg667",
"reg668", "smsa66")
X=card.data[,Xname]
foo = ivmodel(Y=Y,D=D,Z=Z,X=X,intercept=FALSE)
teststat_out = CLR(foo)
expect_equal(as.numeric(teststat_out$ci),c(0.280764970946, 0.346153931564))
expect_equal(as.numeric(teststat_out$p.value),0)
expect_equal(as.numeric(teststat_out$test.stat),113.307424185)
})
test_that("CLR test with exogeneous covariates, but no intercept, multi-IV", {
Y=card.data[,"lwage"]
D=card.data[,"educ"]
Z=card.data[,c("nearc4","nearc2")]
Xname=c("exper", "expersq", "black", "south", "smsa", "reg661",
"reg662", "reg663", "reg664", "reg665", "reg666", "reg667",
"reg668", "smsa66")
X=card.data[,Xname]
foo = ivmodel(Y=Y,D=D,Z=Z,X=X,intercept=FALSE)
teststat_out = CLR(foo)
expect_equal(as.numeric(teststat_out$ci),c(0.309132826837, 0.356869350444))
expect_equal(as.numeric(teststat_out$p.value),0)
expect_equal(as.numeric(teststat_out$test.stat),267.050612418)
})
test_that("CLR test with exogeneous covariates and intercept, single-IV", {
Y=card.data[,"lwage"]
D=card.data[,"educ"]
Z=card.data[,"nearc4"]
Xname=c("exper", "expersq", "black", "south", "smsa", "reg661",
"reg662", "reg663", "reg664", "reg665", "reg666", "reg667",
"reg668", "smsa66")
X=card.data[,Xname]
foo = ivmodel(Y=Y,D=D,Z=Z,X=X)
teststat_out = CLR(foo)
expect_equal(as.numeric(teststat_out$ci),c(0.0248043722948, 0.284824550722))
expect_equal(as.numeric(teststat_out$p.value),0.0200276297596)
expect_equal(as.numeric(teststat_out$test.stat),5.41527923822)
})
test_that("CLR test with exogeneous covariates, but no intercept, multi-IV", {
Y=card.data[,"lwage"]
D=card.data[,"educ"]
Z=card.data[,c("nearc4","nearc2")]
Xname=c("exper", "expersq", "black", "south", "smsa", "reg661",
"reg662", "reg663", "reg664", "reg665", "reg666", "reg667",
"reg668", "smsa66")
X=card.data[,Xname]
foo = ivmodel(Y=Y,D=D,Z=Z,X=X)
teststat_out = CLR(foo)
expect_equal(as.numeric(teststat_out$ci),c(0.0621199910211, 0.336180869927))
expect_equal(as.numeric(teststat_out$p.value),0.00346295807184)
expect_equal(as.numeric(teststat_out$test.stat),9.26245429367)
})
|
21c5c1eb90136f5b02e04dc14af66ea129453661
|
70ceafccb7ed3005e64521551eae6657385118e5
|
/R-Portable/library/Matrix/tests/Simple.R
|
721bbd2fcf9ea32dac61c70e90a2e4bbcab68c65
|
[
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"GPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
ksasso/Electron_ShinyApp_Deployment
|
6e63686b27bc38607bca1e5b50ed7cd58f6a4a3b
|
1402f4d6bbb4a415bce07ebfddc8b76704f11f97
|
refs/heads/master
| 2023-07-06T11:48:21.413305
| 2020-04-30T12:53:11
| 2020-04-30T12:53:11
| 120,221,429
| 100
| 24
|
CC0-1.0
| 2023-06-25T03:17:43
| 2018-02-04T20:42:11
|
HTML
|
UTF-8
|
R
| false
| false
| 40,260
|
r
|
Simple.R
|
#### Currently a collection of simple tests
## (since 'Matrix' takes long to load, rather have fewer source files!)
##-------- *BEFORE* attaching Matrix: --------------------------------
str(Matrix::Matrix)# -> load the namespace
T <- new("ngTMatrix", i=0L, j=2L, Dim = c(2L,6L))
T
as(T, "CsparseMatrix")
## gave Error in asMethod(object) : could not find function ".M.classEnv"
## from 0.999375-23 to *-25
## another even shorter version of this:
n <- new("dgCMatrix")
n
## this:
m <- Matrix::Matrix(cbind(1,0,diag(x=2:4)))
m
mt <- m + table(gl(3,5), gl(5,3))# failed in Matrix <= 1.2.9
mt
stopifnot(is(mt, "sparseMatrix"))
##--------------------------------------------------------------------
library(Matrix)
source(system.file("test-tools.R", package = "Matrix"))# identical3() etc
if(interactive()) {
options(error = recover, Matrix.verbose = TRUE, warn = 1)
} else options( Matrix.verbose = TRUE, warn = 1)
# ^^^^^^ to show Matrix.msg()s
### Matrix() ''smartness''
(d4 <- d40 <- Matrix(diag(4)))
(z4 <- Matrix(0*diag(4)))
(o4 <- Matrix(1+diag(4)))
(tr <- Matrix(cbind(1,0:1)))
(M4 <- Matrix(m4 <- cbind(0,rbind(6*diag(3),0))))
dM4 <- Matrix(M4, sparse = FALSE)
d4. <- diag(4); dimnames(d4.) <- dns <- rep(list(LETTERS[1:4]), 2)
d4di<- as(d4., "diagonalMatrix")
d4d <- as(d4., "denseMatrix")
stopifnot(identical(d4di@x, numeric()), # was "named" unnecessarily
identical(dimnames(d4 <- Matrix(d4.)), dns), identical(unname(d4), d40),
identical3(d4, as(d4., "Matrix"), as(d4., "diagonalMatrix")),
is(d4d, "denseMatrix"))
class(mN <- Matrix(NA, 3,4)) # NA *is* logical
validObject(Matrix(NA))
bd4 <- bdiag(M4,dM4,M4)
stopifnotValid(o4, "dsyMatrix")
stopifnotValid(M4, "dtCMatrix")
stopifnot(validObject(dM4), validObject(mN),
identical(bdiag(M4), bdiag(dM4)),
identical(bd4@p, c(0L,0:3,3:6,6:9)),
identical(bd4@i, c(0:2, 4:6, 8:10)), bd4@x == 6
)
assert.EQ.mat(dM4, m4)
assert.EQ.mat(M4^M4, m4^m4)
assert.EQ.mat(mN, matrix(NA, 3,4))
assert.EQ.mat(bdiag(diag(4)), diag(4))
sL <- Matrix(, 3,4, sparse=TRUE)# -> "lgC"
trS <- Matrix(tr, sparse=TRUE)# failed in 0.9975-11
stopifnotValid(d4, "diagonalMatrix"); stopifnotValid(z4, "diagonalMatrix")
stopifnotValid(tr, "triangularMatrix"); stopifnotValid(trS, "triangularMatrix")
stopifnot(all(is.na(sL@x)), ## not yet: all(is.na(sL)),
!any(sL, na.rm=TRUE), all(!sL, na.rm=TRUE),
validObject(Matrix(c(NA,0), 4, 3, byrow = TRUE)),
validObject(Matrix(c(NA,0), 4, 4)))
stopifnotValid(Matrix(c(NA,0,0,0), 4, 4), "sparseMatrix")
I <- i1 <- I1 <- Diagonal(1)
I1[1,1] <- i1[1, ] <- I [ ,1] <- NA
stopifnot(identical3(I,i1,I1))
image(d4) # gave infinite recursion
## Steve Walker, Mar 12, 2014:
n <- 7
(M <- triu(Matrix(seq_len(n^2), n, sparse=TRUE)))
im <- image(M) # should be an n-by-n image plot, but is not
stopifnot(n == diff(sort(im$y.limits)))
## ylimits were too small (by 1 on each side)
assertError( Matrix(factor(letters)) )
mlp <- matrix(.leap.seconds)## 24 x 1 numeric matrix
Mlp <- Matrix(.leap.seconds)
assert.EQ.mat(Mlp, mlp)
if(FALSE) { ## TODO -- once R itself does better ...
.Leap.seconds <- as.POSIXlt(.leap.seconds)
mLp <- matrix(.Leap.seconds)## 24 x 1 list each of length 24 -- yuck!!!
MLp <- Matrix(.Leap.seconds)## --> error (for now)
}
E <- rep(c(TRUE,NA,TRUE), length=8)
F <- new("nsparseVector", length = 8L, i = c(2L, 5L, 8L))
e <- as(E, "sparseVector"); f <- as(F,"lsparseVector")
Fv <- as.vector(F, "any") # failed in Matrix <= 1.2.0, and base::as.vector(.) failed too:
stopifnot(E | as.vector(F), identical(E | F, F | E),
all(e | f), all(E | F), # <- failed Ops.spv.spv
identical(Fv, base::as.vector(F)),
is.logical(Fv), which(Fv) == c(2,5,8))
dT <- new("dgTMatrix",
i = c(1:2,1:2), j=rep(1:2, each=2), Dim = c(4L, 4L), x = c(1, 1, NA, 2))
dt <- new("dtTMatrix", i = 0:3, j = 0:3, Dim = c(4L, 4L), x = c(1,0,0,0),
uplo = "U", diag = "N")
c1 <- as(dT, "CsparseMatrix")
c2 <- as(dt, "CsparseMatrix")
isValid(lc <- c1 > c2,"lgCMatrix")
isValid(lt <- dT > dt,"lgCMatrix")
stopifnot(identical(lc,lt))
M <- Diagonal(4); M[1,2] <- 2 ; M
cM <- crossprod(M) # >> as_cholmod_l_triplet(): could not reallocate for internal diagU2N()
stopifnot(identical(cM, tcrossprod(t(M))))
if(doExtras) { ## formerly in MM-only's ./AAA_latest.R
## 2010-11-29 --- prompted by BDR:
mlp <- matrix(.leap.seconds)## 24 x 1 numeric matrix
Mlp <- Matrix(.leap.seconds)
assert.EQ.mat(Mlp, mlp)
S.na <- spMatrix(3, 4, c(1,2,3), c(2,3,3), c(NA,1,0))
show(S.na <- S.na - 2 * S.na)
show(L <- S.na != 0)
M0 <- Matrix(0, 3, 4)
show(Ln0 <- S.na != rep(0, prod(dim(L))))
stopifnot(Q.eq(L, Ln0), identical(Ln0, M0 != S.na))
}## (doExtras) only
### Unit-diagonal and unitriangular {methods need diagU2N() or similar}
I <- Diagonal(3)
(T <- as(I,"TsparseMatrix")) # unitriangular
(C <- as(I,"CsparseMatrix")) # (ditto)
lT <- as(T,"lMatrix")
lC <- as(C,"lMatrix")
stopifnot(
identical((n0 <- I != 0), Diagonal(3, TRUE)), I@diag == "U",
identical(n0, I & TRUE), identical(n0, I | FALSE),
identical(n0, TRUE & I), identical(n0, FALSE | I),
all(n0 == !(I == 0)), all(I == n0), identical(n0 == I, I == n0)
,
identical4(lT, as(Diagonal(3, x=TRUE),"TsparseMatrix"), T & TRUE, TRUE & T),
identical4(lC, as(Diagonal(3, x=TRUE),"CsparseMatrix"), C & TRUE, TRUE & C),
identical3(lT, T | FALSE, FALSE | T),
identical3(lC, C | FALSE, FALSE | C),
TRUE)
I[,1] <- NA; I[2,2] <- NA ; I[3,] <- NaN
stopifnotValid(I, "sparseMatrix")
I # gave error in printSpMatrix() - because of R bug in format.info()
L <- spMatrix(9, 30, i = rep(1:9, 3), 1:27, (1:27) %% 4 != 1)
M <- drop0(crossprod(L))
diag(M) <- diag(M) + 5 # to make it pos.def.
M. <- M[1:12,1:12] # small ex
N3 <- as(Matrix(upper.tri(diag(3))), "nMatrix")
stopifnotValid(bdN <- bdiag(N3, N3),"nsparseMatrix")
stopifnot(identical(L, L == TRUE), ## used to give infinite recursion
all(drop0((0 - L) != 0) == drop0(L)))
L[sample(length(L), 10)] <- NA
ll <- as(L,"logical")
stopifnot(all.equal(mean(L, na.rm=TRUE),
mean(ll, na.rm=TRUE), tol= 1e-14),
all.equal(mean(L, na.rm=TRUE, trim=1/4),# <- with a warning
mean(ll, na.rm=TRUE, trim=1/4), tol= 1e-14))
## Examples where is.na(.) was wrong:
validObject(sc <- new("dsCMatrix", i=as.integer(c(0,0:1,1:2,0:1,3)), Dim=c(4L,4L),
p = c(0L,1L,3L,5L,8L), x = c(0,NA,NA,0:1,0,NA,1)))
validObject(gc <- as(sc, "generalMatrix"))
stopifnot(isSymmetric(M), isSymmetric(M.),
is(bdiag(M., M.),"symmetricMatrix"),
is(bdN, "triangularMatrix"),
all(sc == gc | (is.na(sc) & is.na(gc))),
all.equal(N3,N3),
tail(all.equal(N3, t(N3)), 1) == all.equal(1,-1),# ~= "Mean relative difference: 2"
all((bdN != t(bdN)) == (bdN + t(bdN))), # <nsparse> != <nsparse> failed to work...
!any((0+bdN) > bdN), # <dsparse> o <nsparse>
!any(bdN != (0+bdN)), # <nsparse> o <dsparse>
length(grep("Length", all.equal(M., (vM <- as.vector(M.))))) > 0,
identical(M., (M2 <- Matrix(vM, 12,12))),
all.equal(M., M2, tolerance =0)
)
Filter(function(.) inherits(get(.), "symmetricMatrix"), ls())
## [1] "cM" "M" "M." "M2" "o4" "sc"
tt <- as(kronecker(cM, Diagonal(x = c(10,1))), "symmetricMatrix")
dimnames(tt) <- list(NULL, cn <- letters[1:ncol(tt)])
stopifnotValid(tt, "dsTMatrix")
(cc <- as(tt, "CsparseMatrix")) # shows *symmetric* dimnames
stopifnot(identical3( cc @Dimnames, tt @Dimnames, list(NULL, cn)),
identical3(t(cc)@Dimnames, t(tt)@Dimnames, list(cn, NULL)),
identical3(dimnames(cc), dimnames(tt), list(cn, cn)))# now symmetric !
stopifnot(identical3(dimnames(cc),
dimnames(as(cc, "generalMatrix")), ## should fixup dimnames to *symmetric*
dimnames(as(tt, "generalMatrix"))))
## --> .Call(Csparse_symmetric_to_general, from)
mat <- as(cc, "matrix") ## --> should fixup dimnames to *symmetric*
mat # should print *symmetric* dimnames
stopifnot(identical3(dimnames(cc), dimnames(mat), dimnames(as(tt, "matrix"))))
selectMethod(coerce, c("dsCMatrix", "denseMatrix"))
dmat <- as(cc, "denseMatrix") ## --> gave Error (!!) in Matrix 1.1-5
stopifnot(identical3(tt@Dimnames, dmat@Dimnames, list(NULL, cn)))
dmat # should print *symmetric* dimnames (not modifying dmat as it did intermittently)
stopifnot(identical(dmat@Dimnames, list(NULL, cn)))
ttdm <- as(tt, "denseMatrix")
stopifnot(all.equal(dmat, ttdm),
## ^^^^^^ not identical(): 'x' slot differs, as only "U" is needed
identical(as(dmat, "dspMatrix"), as(ttdm, "dspMatrix")),
identical(dimnames(cc), dimnames(dmat)),
## coercing back should give original :
identical(cc, as(dmat, "sparseMatrix")),
identical(uniqTsparse(tt), as(ttdm, "TsparseMatrix")))
## MM: now *if* cc is "truly symmetric", these dimnames should be, too:
d5 <- cn[1:5]; dnm5 <- list(d5,d5)
stopifnot(identical(dimnames( cc [1:5, 1:5]), dnm5),
identical(dimnames(t(cc)[1:5, 1:5]), dnm5))
## large sparse ones: these now directly "go sparse":
str(m0 <- Matrix(0, nrow=100, ncol = 1000))
str(l0 <- Matrix(FALSE, nrow=100, ncol = 200))
stopifnot(all(!l0),
identical(FALSE, any(l0)))
if(!interactive()) warnings()
## really large {length(<dense equivalent>) is beyond R's limits}:
op <- options(warn = 2) # warnings (e.g. integer overflow!) become errors:
n <- 50000L
stopifnot(n^2 > .Machine$integer.max)
## had integer overflow in index constructions:
x <- 1:n
D <- Diagonal(n, x=x[n:1])
summary(D)# special method
summary(D != 0)
stopifnot(identical(x*D, (Dx <- D*x)),
identical(D != 0, as(D, "lMatrix")),
identical(Dx, local({d <- D; d@x <- d@x * x; d})))
Lrg <- new("dgTMatrix", Dim = c(n,n))
l0 <- as(as(Lrg, "lMatrix"), "lgCMatrix")
d0 <- as(l0, "dgCMatrix")
if(FALSE) { #_____________________ FIXME: Should use cholmod_l_*() everywhere (?)____
## problem in Csparse_to_dense :
dl0 <- as(l0, "denseMatrix")
dd0 <- as(d0, "denseMatrix")
## currently, both give --- Error in asMethod(object) :
## Cholmod error 'problem too large' at file ../Core/cholmod_dense.c, line 105
##--> And there it is 'Int_max' ==> ../src/CHOLMOD/Include/cholmod_internal.h
## defines 'Int_max' and does that depending of "mode", and
## MM thinks we should use the "DLONG" mode now -- for 64-bit integers!
## ==> Then Int_max := SuiteSparse_long_max := LONG_MAX
## (the latter from ../src/SuiteSparse_config/SuiteSparse_config.h )
## ==> use cholmod_l_<foo> instead of cholmod_<foo> in *many places*
## check they are ok
stopifnot(identical(dim(dl0), c(n,n)), identical(dim(dd0), c(n,n)),
!any(dl0), all(dd0 == 0))
rm(dl0, dd0)# too large to keep in memory and pass to checkMatrix()
}
diag(Lrg[2:9,1:8]) <- 1:8
## ==: Lrg[2:9,1:8] <- `diag<-`(Lrg[2:9,1:8], 1:8)
e1 <- try(Lrg == Lrg) # error message almost ok
(memGB <- Sys.memGB("MemFree")) # from test-tools-1.R
## __vv__
e2 <- if(doExtras && is.finite(memGB) && memGB > 30) { # need around 18 GB
try(!Lrg) # now *works* on 64-bit machines with enough RAM
## and immediately errors if LONG_VECTORs are not available
} # else NULL
str(e2) # error, NULL or "worked"
ina <- is.na(Lrg)# "all FALSE"
stopifnot(grep("too large", e1) == 1,
if(inherits(e2, "try-error")) grep("too large", e2) == 1
else is.null(e2) || length(e2@x) == n^2,
!any(ina))# <- gave warning previously
stopifnot(suppressWarnings(any(Lrg)))# (double -> logical warning)
rm(e2)# too large...
## with dimnames:
v <- c(a=1, b=2:3)
m <- as.matrix(v)
M <- as(v, "dgeMatrix")
stopifnot(identical(dimnames(m), list(c("a", "b1", "b2"), NULL)),
identical(M, as(m, "dgeMatrix")),
identical(dimnames(M), dimnames(m)))
## dimnames(.) of symmpart() / skewpart() :
ns <- c("symmpart", "skewpart", "forceSymmetric")
symFUNs <- setNames(lapply(ns, get), ns); rm(ns)
chkSS <- function(m) {
r <- lapply(symFUNs, function(fn) fn(m))
m0 <- as(m, "matrix")
r0 <- lapply(symFUNs, function(fn) fn(m0))
stopifnotValid(fS <- r [["forceSymmetric"]], "symmetricMatrix")
stopifnotValid(fS0 <- r0[["forceSymmetric"]], "symmetricMatrix")
dnms <- dimnames(m)
d.sy <- dimnames(r[["symmpart"]])
id <- if(is.null(dnms[[2]]) && !is.null(dnms[[1]])) 1 else 2
stopifnot(identical(d.sy, dnms[c(id,id)]),
identical(d.sy, dimnames(r [["skewpart"]])),
identical(d.sy, dimnames(r0[["skewpart"]])),
all(m == with(r, symmpart + skewpart)),
all(m0 == with(r0, symmpart + skewpart)),
identical(dS <- dimnames(fS), dimnames(fS0)),
identical(dS[1], dS[2]))
}
for(m in list(Matrix(1:4, 2,2), Matrix(c(0, rep(1:0, 3),0:1), 3,3))) {
cat("\n---\nm:\n"); show(m)
chkSS(m)
dn <- list(row = paste0("r", 1:nrow(m)), col = paste0("var.", 1:ncol(m)))
dimnames(m) <- dn ; chkSS(m)
colnames(m) <- NULL ; chkSS(m)
dimnames(m) <- unname(dn) ; chkSS(m)
}
m. <- matrix(c(0, 0, 2:0), 3, 5)
dimnames(m.) <- list(LETTERS[1:3], letters[1:5])
(m0 <- m <- Matrix(m.))
m@Dimnames[[2]] <- m@Dimnames[[1]]
## not valid anymore:
(val <- validObject(m, test=TRUE)); stopifnot(is.character(val))
dm <- as(m0, "denseMatrix"); rm(m)
stopifnot(all.equal(rcond(dm), rcond(m.), tolerance = 1e-14),
##^^^^^^^ dm and m. are both dense, interestingly small differences
## show in at least one case of optimized BLAS
all.equal(rcond(dm), 0.4899474520656),
## show(<dgRMatrix>) had revealed a bug in C:
identical(capture.output(show(as(m0, "RsparseMatrix")))[-(1:2)],
gsub("0", ".", capture.output(show(m.))[-1])))
m.1 <- m.; dimnames(m.1) <- list(row=NULL, col=NULL)
M.1 <- Matrix(m.1, sparse=TRUE)
show(M.1)# had bug in .formatSparseSimple()
###-- Sparse Triangular :
g5 <- new("dgCMatrix", Dim = c(5L, 5L),
x = c(10, 1, 3, 10, 1, 10, 1, 10, 10),
i = c(0L,2L,4L, 1L, 3L,2L,4L, 3L, 4L),
p = c(0L, 3L, 5L, 7:9))
t5 <- as(g5, "triangularMatrix") # fine
stopifnot(class(t5) == "dtCMatrix",
identical(t5, tril(g5)))
## This is really a regression test for 'methods::selectMethod()'
## Maybe move to R once 'Matrix' is recommended
sm <- selectMethod(coerce, c("dgCMatrix", "triangularMatrix"), verbose=TRUE)
stopifnot(identical(sm(g5), t5))
dimnames(t5) <- list(row=paste0("r",1:5), col=paste0("C.",1:5))
s5 <- symmpart(t5) # gave an error
(t1 <- new("dtTMatrix", x= c(3,7), i= 0:1, j=3:2,
Dim= as.integer(c(4,4))))
## Diagonal o Sparse
I4 <- Diagonal(4)
D4 <- Diagonal(4, x=1:4)
validObject(t1)
validObject(t2 <- t1 + I4)
validObject(tt2 <- t(t1) + I4)
validObject(t1c <- as(t1, "CsparseMatrix"))
validObject(t2c <- as(t2, "CsparseMatrix"))
stopifnotValid(2 * I4, "diagonalMatrix")
stopifnotValid(D4 * 3, "diagonalMatrix")
stopifnotValid(I4 / 5, "diagonalMatrix")
stopifnotValid(D4 / 2, "diagonalMatrix")
stopifnotValid(t1c + I4,"triangularMatrix")
stopifnotValid(t2c + I4,"triangularMatrix")
stopifnot(identical(t1, t(t(t1))),
identical(t1c, t(t(t1c))),
c(class(t2), class(t1c), class(t2c), class(tt2)) == "dtCMatrix",
identical(t(tt2), t2))
assert.EQ.mat(t1, as(t1c, "matrix"))
D4. <- D4 * (A4 <- Matrix(1:4, 4,4))
D4p <- A4 + D4
Lg1 <- D4 > 0 & D4 > 1
nLg <- !Lg1
nnLg <- !nLg
D4m <- D4 * 4:1
assert.EQ.mat(D4., diag(x= (1:4)^2))
assert.EQ.mat(D4p, diag(x= (1:4)) + (1:4))
assert.EQ.mat(D4m, diag(x=c(4,6,6,4)))
assert.EQ.mat(Lg1, diag(x= c(FALSE, rep(TRUE,3))))
stopifnot(is(Lg1, "diagonalMatrix"), is(D4m, "diagonalMatrix"),
is(D4., "diagonalMatrix"),
is(nLg, "symmetricMatrix"), is(nnLg, "symmetricMatrix"),
identical3(Lg1, Matrix(nnLg), as(nnLg, "diagonalMatrix")),
all(Lg1 != (!Lg1)))
## tri[lu](<diagonal>)
td3 <- triu(diag(3)); stopifnot(is(td3, "triangularMatrix"), td3@uplo == "U")
Ld3 <- tril(diag(3)); stopifnot(is(Ld3, "triangularMatrix"), Ld3@uplo == "L")
## the latter did fail till 2014-12-20
D3 <- Diagonal(3)
stopifnot(identical3(D3, tril(D3), triu(D3)))
## methods were missing
## as(<diag>, <anything>) :
str(cls <- names(getClass("Matrix")@subclasses))# all Matrix classes
for(cl in cls)
if(canCoerce(I4, cl)) {
cat(cl,":")
M <- as(I4, cl)
M. <- as(D4, cl)
stopifnot(diag(4) == as(M,"matrix"),
if(is(cl,"dMatrix")) diag(x=1:4) == as(M.,"matrix") else TRUE)
cat(" [Ok]\n")
}
s4 <- as(D4,"sparseMatrix")
v <- c(11,2,2,12); s4[2:3,2:3] <- v; validObject(s4)
s4. <- D4; s4.[2:3,2:3] <- v; validObject(s4.)
stopifnot(all(s4 == s4.))
## now assign symmetrically to symmetricMatrix
s4 <- as(as(D4,"sparseMatrix"),"symmetricMatrix")
s4[2:3,2:3] <- v
validObject(s4)
stopifnot(is(s4,"symmetricMatrix"))
assert.EQ.mat(s4, as(s4.,"matrix"),tol=0)
## lower-triangular unit-diagonal
L <- new("dtCMatrix", i = 1L, p = c(0:1, 1L), Dim = c(2L, 2L),
x = 0.5, uplo = "L", diag = "U")
stopifnot(range(L) == 0:1, all.equal(mean(L), 5/8))
## from 0-diagonal to unit-diagonal triangular {low-level step}:
tu <- t1 ; tu@diag <- "U"
tu
validObject(cu <- as(tu, "dtCMatrix"))
validObject(cnu <- diagU2N(cu))# <- testing diagU2N
validObject(tu. <- as(cu, "dtTMatrix"))
validObject(tt <- as(cu, "TsparseMatrix"))
stopifnot(## NOT: identical(tu, tu.), # since T* is not unique!
identical(cu, as(tu., "dtCMatrix")),
length(cnu@i) == length(cu@i) + nrow(cu),
identical(cu, diagN2U(cnu)),# <- testing diagN2U
all(cu >= 0, na.rm = TRUE), all(cu >= 0),
any(cu >= 7))
validObject(tcu <- t(cu))
validObject(ttu <- t(tu))
validObject(ltu <- as(ttu, "lMatrix"))
validObject(ldtu <- as(ltu, "denseMatrix"))
validObject(Cltu <- as(ltu, "CsparseMatrix"))
stopifnot(identical(asCsp(ttu > 0), asCsp(ltu)),
all(ltu == as(ttu > 0,"denseMatrix")))
ltu - (ttu > 0) # failed
assert.EQ.mat(cu, as(tu,"matrix"), tol=0)
assert.EQ.mat(cnu, as(tu,"matrix"), tol=0)
C <- suppressWarnings(Matrix(c(0,1,0,0), 5,5)) + Diagonal(5)
(tU <- diagN2U(tril(C))) # dtC Unitriangular
ntU <- as(tU, "nMatrix")
nT <- as(ntU, "TsparseMatrix")
R <- as(tU, "RsparseMatrix")
Tt <- diagU2N(R) # used to accidentally drop the diag.
stopifnot(R@x == c(1,1,1), diag(Tt) == 1)
lcu <- new("ltCMatrix", Dim = c(4L, 4L), i = c(0:1, 0L), p = c(0L, 0:3),
x = c(TRUE, FALSE, FALSE), uplo = "U", diag = "U")
(lTu <- as(lcu,"TsparseMatrix"))# prints wrongly (in Matrix 0.999375-31)
stopifnot(identical3(rowSums(lcu), rowSums(lTu), rowSums(drop0(lcu))))
(ncu <- as(lcu, "nMatrix"))# -- gives the "pattern" of lcu, i.e. FALSE are *there*
ncn <- diagU2N(ncu)
(cncn <- crossprod(ncn))# works -> "nsCMatrix"
stopifnot(identical(ncu, as(lcu,"nsparseMatrix")),
identical(rowSums(ncu), c(3:1, 1L)),
Q.eq(ncn, ncu),
Q.eq(crossprod(drop0(lcu)), crossprod(lcu)),# crossprod works -> "dsCMatrix"
identical(crossprod(ncu), cncn),
Q.eq(cncn, t(ncu) %*% ncu)) #used to seg.fault
U <- new("dtCMatrix", Dim = c(6L, 6L),
i = c(0:1, 0L, 2:3, 1L, 4L),
p = c(0L,0L,0L, 2:3, 5L, 7L),
x = rep.int(-0.5, 7), diag = "U")
validObject(U)
U. <- solve(iU <- solve(U))#-> gave segmentation fault
stopifnot(validObject(U), ## had a case where solve(U) modified U !
validObject(iU),
validObject(U.),
## no rounding error, since have iU@x * 8 is integer :
identical(U, diagN2U(drop0(U.))))
## <sparse> o <numeric> (of length > 1):
stopifnotValid(tm <- tu * 1:8, "sparseMatrix")
stopifnot(identical4(tm, cu * 1:8, 1:8 * cu, 1:8 * tu))
cu[1,2] <- tu[1,2] <- NA
mu <- as(tu,"matrix")
stopifnotValid(cu, "CsparseMatrix"); stopifnotValid(cu, "triangularMatrix")
stopifnotValid(tu, "TsparseMatrix"); stopifnotValid(tu, "triangularMatrix")
stopifnot(identical(cu * 1:8, tu * 1:8), # but are no longer triangular
identical(cu > .1, as(tu > .1, "CsparseMatrix")),
all(cu >= 0, na.rm=TRUE), !all(cu >= 1), is.na(all(tu >= 0)),
## Csparse_drop: preserves triangularity incl diag="U"
identical(cu, .Call(Matrix:::Csparse_drop, cu, 0.))
)
assert.EQ.mat(cu * 1:8, mu * 1:8)
ina <- is.na(as(cu,"matrix"))
## These 3 were each different (2008-03) !!
stopifnot(all(ina == is.na(cu)),
all(ina == is.na(as(cu,"generalMatrix"))),
all(ina == as(is.na(as(cu,"matrix")),"nMatrix")))
set.seed(7)
xx <- rpois(10, 50)
Samp <- function(n,size) sample(n, size, replace=TRUE)
Tn <- sparseMatrix(i=Samp(8, 50), j=Samp(9,50), x=xx, giveCsparse=FALSE)
Tn
stopifnot(xx == Tn@x,
max(xx) < max(Tn), 0 == min(Tn),
(sT <- sum(Tn)) == sum(colSums(Tn)), sT == sum(Tn@x),
range(Tn) == range(as(Tn, "CsparseMatrix")))
## tu. is diag "U", but tu2 not:
tu2 <- as(as(tu., "generalMatrix"), "triangularMatrix")
assert.EQ.mat(cu, mu, tol=0)
stopifnot(identical3(cu[cu > 1], tu [tu > 1], mu [mu > 1]),
identical3(cu <= 1, tu <= 1, as(mu <= 1, "lMatrix")),# all lgeMatrix
identical3(cu[cu <= 1], tu[tu <= 1], mu[mu <= 1]),
identical3(cu , triu(cu ), t(t(cu))),
identical3(tu , triu(tu ), t(t(tu))),
identical3(tu., triu(tu.), t(t(tu.))),
identical(tu2, triu(tu2)),
identical(tcu , tril(tcu)),
identical(ttu , tril(ttu)),
identical(t(tu), tril(t(tu)))
)
assert.EQ.mat(triu(cu), as.matrix(triu(as.matrix(cu))))
for(k in -1:1)
assert.EQ.mat(tril(cu,k), as.matrix(tril(as.matrix(cu),k)))
(dtr <- Matrix(local({m <- diag(2); m[1,2] <- 3;m})))
identical(dtr, triu(dtr))
assert.EQ.mat(tril(dtr), diag(2))
(t4 <- new("dgTMatrix", i = 3:0, j = 0:3, x = rep(1,4), Dim = as.integer(c(4,4))))
c4 <- as(t4, "CsparseMatrix")
## the same but "dsT" (symmetric)
suppressWarnings(M <- Matrix(c(0, rep(c(0,0:1),4)), 4,4))# warning:.. length [13] is not ..multiple
tt <- as(M, "TsparseMatrix")
stopifnot(all.equal(triu(t4) + tril(t4), c4),
all.equal(triu(tt) + tril(tt), c4))
###-- Numeric Dense: Crossprod & Solve
set.seed(123)
mm. <- mm <- Matrix(rnorm(500 * 150), nc = 150)
stopifnot(validObject(mm))
xpx <- crossprod(mm)
stopifnot(identical(mm, mm.))# once upon a time, mm was altered by crossprod()
stopifnotValid(xpx, "dpoMatrix")
str(mm) # 'dge*"
str(xpx)# 'dpo*"
xpy <- crossprod(mm, rnorm(500))
res <- solve(xpx, xpy)
str(xpx)# now with Cholesky factor
stopifnot(validObject(xpx),
validObject(xpy),
validObject(res))
stopifnot(all.equal(xpx %*% res, xpy, tolerance = 1e-12))
lp <- xpx >= 1
slp <- as(lp, "sparseMatrix")
ltlp <- lp[ lower.tri(lp) ]
sltlp <- slp[ lower.tri(slp) ]
dim(ij <- which(lower.tri(lp), arr.ind = TRUE))
ss <- slp[ij] # now fast (!)
stopifnot(identical4(lp[ij], ltlp, sltlp, as(lp, "matrix")[ij]),
identical(ss, sltlp),
isValid(lp, "lsyMatrix"), lp@uplo == "U")
###-- more solve() methods {was ./solve.R }
## first for "dgeMatrix" and all kinds of RHS :
(m6 <- 1 + as(diag(0:5), "dgeMatrix"))
rcond(m6)
I6 <- as(diag(6), "dgeMatrix")
stopifnot(all.equal(I6, m6 %*% solve(m6)),
all.equal(I6, solve(m6) %*% m6) )
(i6 <- solve(m6, Matrix(1:6)))
stopifnot(identical(i6, as(cbind(c(-4, rep(1,5))), "dgeMatrix")),
identical(i6, solve(m6, 1:6)),
identical(i6, solve(m6, matrix(1:6))),
identical(i6, solve(m6, matrix(c(1,2,3,4,5,6))))
)
## solve(<sparse>)
(m <- t1+ t(t1) + Diagonal(4))
i.m <- solve(as.mat(m))
I1 <- m %*% i.m
o4 <- diag(I1)
im <- solve(m)# is now sparse {not yet}
(I2 <- m %*% im)
(ms <- as(m, "symmetricMatrix"))
## solve(<sparse>, <sparse>):
s.mm <- solve(m,m)
s.mms <- solve(m, ms)
## these now work "fully-sparse"
s.ms2 <- solve(ms, ms)
s.msm <- solve(ms, m)
I4c <- as(Matrix(diag(4),sparse=TRUE), "generalMatrix")
stopifnot(isValid(im, "Matrix"), isValid(I2, "Matrix"), class(I4c) == "dgCMatrix",
all.equal(I1, as(I2,"dgeMatrix"), tolerance = 1e-14),
all.equal(diag(4), as.mat(I2), tolerance = 1e-12),
all.equal(s.mm, I2, tolerance = 1e-14),
all.equal(s.mms, I2, tolerance = 1e-14),
all.equal(s.ms2, s.msm, tolerance = 4e-15),
all.equal(s.ms2, I4c , tolerance = 4e-15),
abs(o4 - 1) < 1e-14)
image(T125 <- kronecker(kronecker(t5,t5),t5),
main = paste("T125:",class(T125)))
dim(T3k <- kronecker(t5,kronecker(T125, t5)))
system.time(IT3 <- solve(T3k))# incredibly fast
I. <- drop0(zapsmall(IT3 %*% T3k))
I.. <- diagN2U(I.)
I <- Diagonal(5^5)
stopifnotValid(IT3, "dtCMatrix")
stopifnot(## something like the equivalent of all(I. == Diagonal(3125)) :
identical(as(I., "diagonalMatrix"), I),
identical(as(I..,"diagonalMatrix"), I)
)
## printSpMatrix() ; "suppressing (columns | rows) .." {and do it correctly!}
IT3
op <- options(width = 70, max.print = 1000)
T125[-(1:50),] ## suppression ... is it correctly done?
###-- row- and column operations {was ./rowcolOps.R }
set.seed(321)
(m1 <- round(Matrix(rnorm(25), 5), 2))
m1k <- Matrix(round(rnorm(1000), 2), 50, 20)
m.m <- as(m1k, "matrix")
stopifnot(all.equal(colMeans(m1k), colMeans(m.m)),
all.equal(colSums (m1k), colSums (m.m)),
all.equal(rowMeans(m1k), rowMeans(m.m)),
all.equal(rowSums (m1k), rowSums (m.m)),
all.equal(colMeans(m1k, na.rm=TRUE), colMeans(m.m, na.rm=TRUE)),
all.equal(colSums (m1k, na.rm=TRUE), colSums (m.m, na.rm=TRUE)),
all.equal(rowMeans(m1k, na.rm=TRUE), rowMeans(m.m, na.rm=TRUE)),
all.equal(rowSums (m1k, na.rm=TRUE), rowSums (m.m, na.rm=TRUE)) )
###-- kronecker for nonsparse uses Matrix(.):
stopifnotValid(kr <- kronecker(m1, m6), "Matrix")
assert.EQ.mat(kr,
kronecker(as(m1, "matrix"),
as(m6, "matrix")), tol = 0)
## sparse:
(kt1 <- kronecker(t1, tu))
kt2 <- kronecker(t1c, cu)
stopifnot(identical(Matrix:::uniq(kt1), Matrix:::uniq(kt2)))
## but kt1 and kt2, both "dgT" are different since entries are not ordered!
ktf <- kronecker(as.matrix(t1), as.matrix(tu))
if(FALSE) # FIXME? our kronecker treats "0 * NA" as "0" for structural-0
assert.EQ.mat(kt2, ktf, tol= 0)
(cs1 <- colSums(kt1))
NA.or.True <- function(x) is.na(x) | x
eq <- (cs1 == colSums(as(kt1, "matrix")))
stopifnot(NA.or.True(eq), identical(is.na(eq), is.na(cs1)))
nt1 <- as(kt1, "nMatrix") # no NA's anymore
(ng1 <- as(as(nt1, "generalMatrix"),"CsparseMatrix")) # ngC
dg1 <- as(ng1, "dMatrix")# dgC
lt1 <- kt1 > 5
nt1 <- as(lt1, "nMatrix")
(colSums(nt1, sparseResult = TRUE))
(colSums(kt1, sparseResult = TRUE)) # dsparse, with NA
(colSums(lt1, sparseResult = TRUE)) # isparse, with NA
(colSums(lt1, sparseResult = TRUE, na.rm = TRUE))
(colSums(nt1, sparseResult = TRUE)) # isparse, no NA
## check correct sparseness of both:
for(M in list(kt1, nt1, ng1, dg1, lt1, nt1)) {
m <- as(M, "matrix")
for(na.rm in c(FALSE,TRUE)) {
cs <- colSums(M, na.rm = na.rm)
cs. <- colSums(M, na.rm = na.rm, sparseResult = TRUE)
rs <- rowSums(M, na.rm = na.rm)
rs. <- rowSums(M, na.rm = na.rm, sparseResult = TRUE)
stopifnotValid(cs., "sparseVector")
stopifnotValid(rs., "sparseVector")
stopifnot(identical(cs, as(cs., "vector")),
identical(rs, as(rs., "vector")),
{eq <- cs == colSums(m, na.rm = na.rm) ; ineq <- is.na(eq)
all(ineq | eq) && identical(ineq, is.na(cs)) },
{eq <- rs == rowSums(m, na.rm = na.rm) ; ineq <- is.na(eq)
all(ineq | eq) && identical(ineq, is.na(rs)) } )
}
}
i1 <- cs. == 1
cs2 <- cs.
cs2[i1] <- 0 # failed in *-31 !!
## now *index* with a NA-sparseVector :
i2 <- i1 ; i2[3] <- NA ; li2 <- as.logical(i2)
cs3 <- cs. ; cs3 [i2] <- 0
v3 <- as(cs.,"vector"); v3[li2] <- 0
cs4 <- cs. ; cs4[li2] <- 0
stopifnot(length(i1@x) == 2, identical(li2, as(i2,"vector")),
identical(cs3, cs4),
cs3 == v3, all(as(v3, "sparseVector") == cs3)
## indexing simple "numeric" with sparseVector:
## see 'R_FIXME' in ../R/sparseVector.R
## , identical(v3[i2], v3[li2])
## TODO:
## sub-assigning into simple "numeric" with sparseVector index:
)
M <- Matrix(c(2:0,1),2); M. <- as(M, "sparseMatrix")
(N <- as(crossprod(kronecker(diag(2), M)) > 0,
"nMatrix"))
(L. <- as(N,"lMatrix"))
stopifnot(identical(N, as(L.,"nMatrix")),
identical(kronecker( c(1,0), M),
kronecker(cbind(1:0), M)))
assert.EQ.mat(kronecker(M, c(1,0,0)),
kronecker(as.matrix(M), c(1,0,0)))
## coercion from "dpo" or "dsy"
xx <- as(xpx, "dsyMatrix")
stopifnot(isSymmetric(xxS <- as(xx, "sparseMatrix")),
isSymmetric(xpxS <- as(xpx, "sparseMatrix")))
tm <- matrix(0, 8,8)
tm[cbind(c(1,1,2,7,8),
c(3,6,4,8,8))] <- c(2,-30,15,20,80)
(tM <- Matrix(tm)) ## dtC
(mM <- Matrix(m <- (tm + t(tm)))) ## dsC
mT <- as(mM, "dsTMatrix")
gC <- as(as(mT, "dgTMatrix"), "dgCMatrix")
lT <- as(Matrix(TRUE, 2,2),"TsparseMatrix")
## Check that mT, lT, and gC print properly :
pr.mT <- capture.output(mT)
pr.lT <- capture.output(lT)[-(1:2)]
nn <- unlist(strsplit(gsub(" +\\.", "", sub("^....", "", pr.mT[-(1:2)])), " "))
stopifnot(as.numeric(nn[nn != ""]) == m[m != 0],
identical(1:2, grep("|", pr.lT, fixed=TRUE)),
identical(pr.lT, capture.output(as(lT, "nMatrix"))[-(1:2)]),
capture.output(gC)[-1] == pr.mT[-1])
assert.EQ.mat(tM, tm, tol=0)
assert.EQ.mat(gC, m, tol=0)
assert.EQ.mat(mT, m, tol=0)
stopifnotValid(mM, "dsCMatrix")
stopifnotValid(tM, "dtCMatrix")
stopifnot(identical(mT, as(mM, "TsparseMatrix"))
, identical(gC, as(mM, "generalMatrix"))
## coercions general <-> symmetric
, identical(as(as(mM, "generalMatrix"), "symmetricMatrix"), mM)
, identical(as(as(mM, "dgTMatrix"), "symmetricMatrix"), mT)
, identical(as(as(tM, "generalMatrix"),"triangularMatrix"), tM)
, identical(tM + Diagonal(8), tMD <- Diagonal(8) + tM)
)
stopifnotValid(tMD, "dtCMatrix")
eM <- eigen(mM) # works thanks to base::as.matrix hack in ../R/zzz.R
stopifnot(all.equal(eM$values,
{ v <- c(162.462112512353, 30.0665927567458)
c(v, 15, 0, 0, 160-v[1], -15, -v[2])}, tol=1e-14))
##--- symmetric -> pos.def. needs valid test:
m5 <- Matrix(diag(5) - 1)
assertError(as(m5, "dpoMatrix"))# not pos.definite!
pm5 <- as(m5, "dspMatrix") # packed
assertError(as(pm5, "dppMatrix"))# not pos.definite!
sm <- as(Matrix(diag(5) + 1),"dspMatrix")
pm <- as(sm,"dpoMatrix")## gave infinite recursion (for a day or so)
pp <- as(pm,"dppMatrix")
x <- round(100 * crossprod(Matrix(runif(25),5)))
D <- Diagonal(5, round(1000*runif(5)))
px <- pack(x)
stopifnot(is(x, "dpoMatrix"), is(px,"dppMatrix"), is(D, "ddiMatrix"))
class(x+D)#--> now "dsyMatrix"
stopifnot(is(x+D, "symmetricMatrix"),
is(D+px, "dspMatrix"),
identical(x+D, D+x), identical(px+D, D+px), identical(pack(x-D), px-D))
tx <- tril(x)
ptx <- pack(tx)
stopifnot(is(tx, "dtrMatrix"), is(ptx, "dtpMatrix"),
is(t(tx), "dtrMatrix"), is(t(ptx), "dtpMatrix"),
is(D + tx, "dtrMatrix"), is(tx + D, "dtrMatrix"),
is(ptx + D, "dtpMatrix"), is(D + ptx, "dtpMatrix"))
###-- dense nonzero pattern:
class(m <- Matrix(TRUE,2,2)) # lsy
isValid(n <- as(m, "nMatrix"), "nsyMatrix")
## 1)
as(n,"CsparseMatrix") # used to give CHOLMOD error: invalid xtype...
ls2 <- as(m, "CsparseMatrix") # works fine
## and really 'm' and 'n' are interally slot identical (!!!)
as(n,"sparseMatrix")
as(m,"sparseMatrix")
### -- now when starting with nsparse :
nT <- new("ngTMatrix",
i = as.integer(c(0, 1, 0)),
j = as.integer(c(0, 0, 1)), Dim = as.integer(c(2,2)))
(nC <- as(nT, "ngCMatrix"))
str(nC)# of course, no 'x' slot
tt <- as(nT,"denseMatrix") # nge (was lge "wrongly")
stopifnot(is(tt,"ngeMatrix"),
identical(as(tt, "lMatrix"),
as(as(nT, "lMatrix"), "denseMatrix")))
tt
as(nC,"denseMatrix")
###-- sparse nonzero pattern : ----------
(nkt <- as(as(as(kt1, "generalMatrix"), "CsparseMatrix"), "ngCMatrix"))# ok
dkt <- as(nkt, "denseMatrix")
(clt <- crossprod(nkt))
stopifnotValid(nkt, "ngCMatrix")
stopifnotValid(clt, "nsCMatrix")
suppressWarnings(crossprod(clt)) ## warning "crossprod() of symmetric ..."
## a Csparse with *repeated* entry is not valid!
assertError(new("ngCMatrix", p = c(0L,2L), i = c(0L,0L), Dim = 2:1))
### "d" <-> "l" for (symmetric) sparse : ---------------------------------------
suppressWarnings( data(KNex) ) ## may warn, as 'Matrix' is recommended
## and exist more than once at check-time
mm <- KNex$mm
xpx <- crossprod(mm)
## extract nonzero pattern
nxpx <- as(xpx, "nsCMatrix")
show(nxpx) ## now ok, since subsetting works
r <- nxpx[1:2,]
lmm <- as(mm, "lgCMatrix")
nmm <- as(lmm, "nMatrix")
xlx <- crossprod(lmm)
x.x <- crossprod(nmm)
## now A = lxpx and B = xlx should be close, but not quite the same
## since <x,y> = 0 is well possible when x!=0 and y!=0 .
## However, A[i,j] != 0 ==> B[i,j] != 0:
A <- as(as(nxpx, "lMatrix"), "TsparseMatrix")
B <- as(as(xlx, "lMatrix"), "TsparseMatrix")
ij <- function(a) a@i + ncol(a) * a@j
stopifnot(all(ij(A) %in% ij(B)))
l3 <- upper.tri(matrix(,3,3))
validObject(c3 <- as(l3, "CsparseMatrix"))
stopifnotValid(c3, "lMatrix")# lgC
(M <- Matrix(l3))
stopifnotValid(M, "ltCMatrix")
stopifnotValid(M2 <- M %x% M, "triangularMatrix") # is "dtT" (why not "dtC" ?)
stopifnot(dim(M2) == c(9,9), identical(M2, kronecker(M,M)))
M3 <- M %x% M2 #ok
(cM3 <- colSums(M3, sparse=TRUE))
identical(as.vector(cM3),
as(rev(rowSums(M3, sparse=TRUE)), "vector"))
M. <- M2 %x% M # gave infinite recursion
## diagonal, sparse & interactions
stopifnotValid(as(Diagonal(3), "TsparseMatrix"), "TsparseMatrix")
stopifnotValid(X <- Diagonal(7) + 1.5 * tM[1:7,1:7], "sparseMatrix")
stopifnotValid(X, "triangularMatrix")
stopifnotValid(XX <- X - chol(crossprod(X)), "triangularMatrix")
X
XX
XX <- as(drop0(XX), "dsCMatrix")
stopifnot(identical(XX, Matrix(0, nrow(X), ncol(X))))
M <- Matrix(m., sparse = FALSE)
(sM <- Matrix(m.))
class(dlM <- M >= 1)
stopifnot(identical(dlM, !(M < 1)))
stopifnotValid(sM, "sparseMatrix")
stopifnotValid(dlM, "denseMatrix")
(lM <- as(dlM, "sparseMatrix"))
lM2 <- as(dlM, "CsparseMatrix") #-> now ok
lM0 <- Matrix:::as_Csparse(dlM)
stopifnot(identical3(lM, lM2, lM0))
selectMethod("coerce", c("lgeMatrix", "CsparseMatrix"),
useInherited = c(from = TRUE, to = FALSE))
ms0 <- Matrix(c(0,1,1,0), 2,2)
ms <- as(ms0, "TsparseMatrix")
cs <- as(ms, "CsparseMatrix")
ll <- as(ms, "lMatrix")
lt <- as(ll, "lgTMatrix")
nn <- as(cs, "nsparseMatrix")
l2 <- as(cs, "lsparseMatrix")
nt <- triu(nn)
n3 <- as(nt, "lsparseMatrix")
da <- nt + t(nt)
dm <- nt * t(nt) + da
##
mnt <- as(nt, "matrix")
m <- rbind(v <- 2:3)
assert.EQ.mat(nt %*% v, mnt %*% v)
assert.EQ.mat(v %*% nt, v %*% mnt)
assert.EQ.mat( crossprod(nt, v), crossprod(mnt,v))
assert.EQ.mat( crossprod(v, nt), crossprod(v,mnt))
assert.EQ.mat(tcrossprod(v, nt), tcrossprod(v,mnt))
assert.EQ.mat(tcrossprod(nt, m), tcrossprod(mnt, m))
##
stopifnotValid(ms, "dsTMatrix")
stopifnot(as(ms0,"matrix") == as(ll, "matrix"), # coercing num |-> log
as(lt, "matrix") == as(ll, "matrix"),
identical(ms, as(ll, "dMatrix")),
identical4(as(ll, "CsparseMatrix"), as(cs, "lMatrix"),# lsC*
as(nn, "lsparseMatrix"), l2),
identical3(da, dm, as(cs, "generalMatrix")), # dgC*
identical(as(da, "lMatrix"), as(lt, "CsparseMatrix")) # lgC*
)
## Dense *packed* ones:
s4 <- as(D4, "symmetricMatrix")
sp <- as(as(as(D4, "symmetricMatrix"),"denseMatrix"),"dspMatrix")
tp <- as(triu(sp),"dtpMatrix")
tpL <- as(tril(sp),"dtpMatrix")
(spL <- t(sp))
stopifnot(sp @uplo=="U", tp @uplo=="U",
spL@uplo=="L", tpL@uplo=="L")
## band():
n <- 4 ; m <- 6
r1 <- Matrix(1:24, n,m)
validObject(M1 <- band(r1, 0,0))
(M1 <- as(M1, "sparseMatrix"))
r2 <- Matrix(1:18, 3, 6)
stopifnot(identical(M1, bandSparse(n,m, k=0, diag = list(diag(r1)))),
identical(band(r2, 0,4),
band(r2, 0,3) + band(r2, 4,4)))
s1 <- as(r1, "sparseMatrix") # such that band(s1) is sparse, too
for(k1 in (-n):m)
for(k2 in k1:m) {
stopifnotValid(br1 <- band(r1, k1,k2), "ddenseMatrix")
stopifnotValid(bs1 <- band(s1, k1,k2), "CsparseMatrix")
stopifnot(all(r1 == s1))
}
D. <- Diagonal(x= c(-2,3:4)); D.[lower.tri(D.)] <- 1:3 ; D.
D0 <- Diagonal(x= 0:3); D0[upper.tri(D0)] <- 1:6 ; D0
stopifnot(all.equal(list(modulus = structure(24, logarithm = FALSE), sign = -1L),
unclass(determinant(D.,FALSE)), tol=1e-15),
det(Matrix(0,1)) == 0,
all.equal(list(modulus = structure(0, logarithm = FALSE), sign = 1L),
unclass(determinant(D0,FALSE)), tol=0)
)
### More sparseVector checks: -------------------------------
validObject(new("isparseVector"))
R <- sv <- as(D4, "sparseVector")
## dim(<sparseVector>) <- (n1,n2) --> sparse Matrix :
dim(R) <- dim(D4)
stopifnotValid(sv,"sparseVector")
stopifnotValid(R, "sparseMatrix")
stopifnot(identical(D4, as(R, "diagonalMatrix")))
iv <- c(rep(0, 5), 3, 0,0,7,0,0,0)
sv <- as(iv, "sparseVector")
sv. <- as(as.integer(iv), "sparseVector")
## Note: Method with signature "numeric#sparseVector" chosen ...
(sv2 <- as(sv, "isparseVector")) ## gave error
as(sv, "zsparseVector")
stopifnot(identical(sv., sv2),
identical( Matrix(sv, 3,4, byrow=TRUE),
t(Matrix(sv, 4,3))))
options(warn = 0)# no longer error
## "Large" sparse:
n <- 100000
m <- 50000 ; nnz <- 47
M <- spMatrix(n, m,
i = sample(n, nnz, replace = TRUE),
j = sample(m, nnz, replace = TRUE),
x = round(rnorm(nnz),1))
validObject(Mv <- as(M, "sparseVector"))
validObject(Dv <- as(Diagonal(60000), "sparseVector"))
validObject(LD <- Diagonal(60000, TRUE))
validObject(Lv <- as(LD, "sparseVector"))
Dm <- Dv; dim(Dm) <- c(180000L, 20000L)
stopifnot(!doExtras || isValid(Md <- M * rowSums(M, sparseResult=TRUE), "sparseMatrix"),
LD@diag == "U",
isValid(Dm, "sparseMatrix"),
identical(Dv, as(Dm, "sparseVector")))
p. <- new("dtCMatrix", i = c(2:3, 2L), p = c(0L, 2:3, 3L, 3L),
Dim = c(4L, 4L), x = rep(-0.5, 3), uplo = "L", diag = "U")
assert.EQ.mat(solve(solve(p.)), as(p., "matrix"))
dimnames(p.)[[1]] <- paste(1:4)
ii <- is.na(p.)
stopifnot(all(!ii), !any(as(ii, "denseMatrix")))# used to fail
lst <- ls()
table(istri <- sapply(lst, function(.) is(get(.),"triangularMatrix")))
table(triC <- sapply(lst[istri], function(.) class(get(.))))
table(uniC <- sapply(lst[istri], function(.) get(.)@diag == "U"))
lsUtr <- lst[istri][uniC]
(di <- sapply(lsUtr, function(.) dim(get(.))))
## TODO: use %*%, crossprod(), .. on all those 4 x 4 -- and check "triangular rules"
r <- tryCatch(chol2inv(Diagonal(x=1:10), pi=pi), warning=identity)
stopifnot(grepl("extra argument pi .*chol2inv\\(Diagonal", r$message))
assertError(new("ltrMatrix", Dim = c(2L,2L), x=TRUE))# gave "illegal" object w/o error
assertError(new("ntrMatrix", Dim = c(2L,2L)))# dito
cat('Time elapsed: ', (.pt <- proc.time()),'\n') # "stats"
cat("doExtras:",doExtras,"\n")
if(doExtras) {
cat("checkMatrix() of all: \n---------\n")
Sys.setlocale("LC_COLLATE", "C") # to keep ls() reproducible
for(nm in ls()) if(is(.m <- get(nm), "Matrix")) {
cat("\n", rep("-",nchar(nm)),"\n",nm, ":\n", sep='')
checkMatrix(.m)
}
cat('Time elapsed: ', proc.time() - .pt,'\n') # "stats"
}
dtr <- tr4 <- triu(Matrix(1:16, 4,4))
dtr@x[Matrix:::indTri(4, upper=FALSE, diag=FALSE)] <- 100*(-3:2)
stopifnot(all.equal(dtr, tr4), # because are same *as* simple matrices
dtr@x[1:4] == c(1, -(3:1)*100),
range(tr4) == c(0,16),
range(dtr) == c(0,16)) # <- failed
## new("nsyMatrix") + new("lgeMatrix") # failed
cln <- sort(outer(c("l","n"), paste0(c("ge","sy"), "Matrix"), paste0))
dim(c.c <- as.matrix(expand.grid(cln, cln, KEEP.OUT.ATTRS=FALSE))) # 16 x 2
## clTry <- function(expr) class(tryCatch(expr, error=identity))[[1]]
## '+' [Arith] failed -- now fixed
cbind(c.c, Res = apply(c.c, 1, function(x) class(new(x[1]) + new(x[2]))))
## '<' [Compare] works fine
cbind(c.c, Res = apply(c.c, 1, function(x) class(new(x[1]) < new(x[2]))))
if(!interactive()) warnings()
## Platform - and other such info -- so we find it in old saved outputs
.libPaths()
SysI <- Sys.info()
structure(Sys.info()[c(4,5,1:3)], class="simple.list")
sessionInfo()
c(Matrix = packageDescription("Matrix")$Built)
if(SysI[["sysname"]] == "Linux" && require("sfsmisc")) local({
nn <- names(.Sc <- sfsmisc::Sys.cpuinfo())
nn <- names(.Sc <- .Sc[nn != "flags"])
print(.Sc[grep("\\.[0-9]$", nn, invert=TRUE)])
})
|
18bb7c04846ccedea73bc7f962a0f8cab38c35b8
|
36e231c2defb96012d7e75269654b43ac859d714
|
/Analysis_after_BAM_Scripts/positionOverlay_Poolseq.R
|
ac4b0a09ad4345bc2a44867605eabbdd42ddb5b1
|
[] |
no_license
|
PaulKnoops/Experimental_Evolution_Sequence_Repo
|
179b9b4124f19b707a604aa20d27a2b822953cc7
|
11f6af2ec5634181b11469f4a7f9cebf4e1ed5fe
|
refs/heads/master
| 2020-03-12T16:01:25.734641
| 2018-05-04T18:51:18
| 2018-05-04T18:51:18
| 130,705,291
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,626
|
r
|
positionOverlay_Poolseq.R
|
#Overlay Plots
require(data.table)
require(tidyverse)
xcs <- fread('../Data/Positions/candidatePos.csv')
#Positions
require(tidyverse)
require(data.table)
Zxc_sig <- fread('../Data/poolseq_outputs/poolseq_FDR_Sigpos.csv')
Zxc_length <- fread('../Data/poolseq_outputs/SelCoef_Full_LENGTHS.csv')
aa <- Zxc_length$x[1]
bb <- Zxc_length$x[2]
cc <- Zxc_length$x[3]
dd <- Zxc_length$x[4]
ee <- Zxc_length$x[5]
ff <- Zxc_length$x[6]
#
Zxc_count <- Zxc_sig %>%
group_by(chr, pos) %>%
mutate(count = n())
Zxc_count2 <- Zxc_count[which(Zxc_count$count==1),]
Zxc_count2 <- Zxc_count2[which(Zxc_count2$Treatment=='Sel'),]
mmaaxx <- max(Zxc_count2$meanSelCoef)
Zxc_count2$sig <- ifelse(Zxc_count2$pos %in% xcs$pos & Zxc_count2$chr %in% xcs$chr, "Yes", 'No')
nnonSig_3R <- Zxc_count2[which(Zxc_count2$sig=="No"),]
ssig_3R <- Zxc_count2[which(Zxc_count2$sig=="Yes"),]
Chr_plot_sig <- ggplot(data = nnonSig_3R, aes(x=number, y=meanSelCoef, colour=chr)) +
geom_point(size = 0.8, alpha=0.5, show.legend = F) +
scale_x_discrete(limits=c(aa/2, aa+(bb/2), (aa+bb+(cc/2)), (aa+bb+cc+(dd/2)), (aa+bb+cc+dd+(ee/2)), (aa+bb+cc+dd+ee+(ff/2))), labels = c('X', "2L", "2R", '3L', '3R', '4')) +
scale_colour_manual(values=c("#56B4E9", "#E69F00", 'grey30', 'grey46', 'wheat3', 'lemonchiffon4')) +
theme(text = element_text(size=20),
axis.text.x= element_text(size=15),
axis.text.y= element_text(size=15)) +
ylab("SelCoef") +
theme(panel.background = element_blank()) +
geom_point(size=2, colour='black', alpha=1, show.legend = F, data=ssig_3R, aes(x=number, y=meanSelCoef)) +
ylim(0,mmaaxx)
Chr_plot_sig
|
267b2a7e3904ea5b7abc1946bd694156d7c5f71d
|
dca44395dbf60e1743c65bced7b26838bd676781
|
/KCDC/imputation.tool/imputation.tool.memory.check.R
|
d8a05b6935ab48d71df03516626046ff549a5225
|
[] |
no_license
|
ksmpooh/SungminCode
|
1b550c375125ea7869917de337aa093160aa03eb
|
33b266b80389664282a2d4d6eb9c2db593442a5f
|
refs/heads/master
| 2023-08-03T16:22:35.085299
| 2023-07-31T16:12:33
| 2023-07-31T16:12:33
| 106,177,934
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,373
|
r
|
imputation.tool.memory.check.R
|
setwd("c:/Users/user/Desktop/KCDC/imputation.tool/")
#################TEST
loglist <- read.table("memory.check/impute4/log.list.txt",header = T)
head(loglist)
df <- read.table("memory.check/impute4/100000001_105000000.log",header = T)
head(df)
table(df$COMMAND)
process_list <- data.frame(table(df$COMMAND))$Var1
out <- matrix(nrow = 1,ncol = 5)
out <- as.data.frame(out)
colnames(out) <-c("COMMAND","X.CPU","X.MEM","VSZ","RSS")
for (i in process_list) {
ref = df[df$COMMAND == i,]
ref = ref[ref$RSS == max(ref$RSS),c("COMMAND","X.CPU","X.MEM","VSZ","RSS")]
ref = ref[1,]
#print(ref)
# print(max(df[df$COMMAND == i,]$RSS))
out = rbind(out,ref)
}
out
ref
##########g20201230 all
setwd("c:/Users/user/Desktop/KCDC/imputation.tool/")
loglist <- read.table("memory.check/impute4/log.list.txt",header = T)
out <- matrix(nrow = 1,ncol = 6)
out <- as.data.frame(out)
colnames(out) <-c("chunk","COMMAND","X.CPU","X.MEM","VSZ","RSS")
for (i in loglist$log) {
df <- read.table(paste0("memory.check/impute4/",i,".log"),header = T)
process_list <- data.frame(table(df$COMMAND))$Var1
for (j in process_list) {
ref = df[df$COMMAND == j,]
ref = ref[ref$RSS == max(ref$RSS),c("COMMAND","X.CPU","X.MEM","VSZ","RSS")]
ref = ref[1,]
ref$chunk = i
out = rbind(out,ref)
}
}
head(out)
row.names(out) <- NULL
out <- out[2:nrow(out),]
|
f23abcae41b4412914b0afd57da767a5310c0749
|
a0ceb8a810553581850def0d17638c3fd7003895
|
/scripts/rstudioserver_analysis/WKM_and_BM_together/LDA_TSS_downstream_BM_checks.R
|
5fd18c366dc4df6c3aa490d608ad33cf64783cad
|
[] |
no_license
|
jakeyeung/sortchicAllScripts
|
9e624762ca07c40d23e16dbd793ef9569c962473
|
ecf27415e4e92680488b6f228c813467617e7ee5
|
refs/heads/master
| 2023-04-15T22:48:52.272410
| 2022-10-24T10:45:24
| 2022-10-24T10:45:24
| 556,698,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,571
|
r
|
LDA_TSS_downstream_BM_checks.R
|
# Jake Yeung
# Date of Creation: 2020-06-23
# File: ~/projects/scchic/scripts/rstudioserver_analysis/WKM_and_BM_together/LDA_TSS_downstream.R
#
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(hash)
library(igraph)
library(umap)
library(scchicFuncs)
library(JFuncs)
FitClstVar <- function(jrow, jmeta){
fit.input <- data.frame(exprs = jrow, xvar = jmeta$xvar, clst = jmeta$clst, stringsAsFactors = TRUE)
jfit <- lm(exprs ~ xvar:clst + clst, fit.input)
return(jfit)
}
outdir <- "/home/jyeung/hub_oudenaarden/jyeung/data/WKM_BM_merged/from_rstudioserver/var_slope_estimates"
fname <- paste0("MouseBM_log_lm_fits.", Sys.Date(), ".CleanUpEryth.RData")
fnamepdf <- paste0("MouseBM_log_lm_fits.", Sys.Date(), ".CleanUpEryth.pdf")
outf <- file.path(outdir, fname)
# pdf(file = file.path(outdir, fnamepdf), useDingbats = FALSE)
jmarks <- c("H3K4me1", "H3K4me3", "H3K27me3"); names(jmarks) <- jmarks
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
jchromos <- paste("chr", c(seq(19), "X", "Y"), sep = "")
# Load annots -------------------------------------------------------------
RenameClusterBM <- function(clstr.orig, bm.rename){
# clstr.orig <- "Bcells-Cd83_topic10"
clstr.new <- paste0("z", clstr.orig)
for (cname in names(bm.rename)){
if (startsWith(clstr.orig, prefix = cname)){
clstr.new <- bm.rename[[cname]]
} else{
}
}
return(clstr.new)
}
bm.rename <- as.list(hash(c("Bcells", "Eryth", "HSCs", "Neutrophils"), c("lymph", "eryth", "HSPCs", "granu")))
dat.annot.lst.BM <- lapply(jmarks, function(jmark){
inf.annot <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/glmpca_analyses/GLMPCA_outputs.KeepBestPlates2.celltyping/GLMPCA_celltyping.", jmark, ".AllMerged.mergesize_1000.nbins_1000.penalty_1.covar_ncuts.var.log2.CenteredAndScaled.RData")
assertthat::assert_that(file.exists(inf.annot))
load(inf.annot, v=T)
dat.umap.glm.fillNAs <- subset(dat.umap.glm.fillNAs, !is.na(cluster))
dat.umap.glm.fillNAs$cluster <- sapply(dat.umap.glm.fillNAs$cluster, RenameClusterBM, bm.rename)
return(dat.umap.glm.fillNAs)
})
# Load data ---------------------------------------------------------------
hubprefix <- "/home/jyeung/hub_oudenaarden"
jdate <- "2020-06-23"
jspecies <- "MouseBM"
jdist <- 10000
jmark <- "H3K4me3"
out.objs <- lapply(jmarks, function(jmark){
indir <- file.path(hubprefix, paste0("jyeung/data/scChiC/raw_demultiplexed/LDA_outputs_all/ldaAnalysisTSS_BM_WKM_dists/lda_TSS.", jspecies, ".", jmark, ".TSSdist_", jdist, ".", jdate, ".K-30.binarize.FALSE"))
assertthat::assert_that(dir.exists(indir))
fname <- paste0("ldaOut.", jspecies, ".", jmark, ".TSSdist_", jdist, ".", jdate, ".K-30.Robj")
inf <- file.path(indir, fname)
load(inf, v=T)
return(list(out.lda = out.lda, count.mat = count.mat))
})
out.ldas <- lapply(out.objs, function(x) x$out.lda)
count.mat <- lapply(out.objs, function(x) x$count.mat)
# Plot output -------------------------------------------------------------
tm.result.lst <- lapply(out.ldas, function(out.lda){
tm.result <- posterior(out.lda)
tm.result <- AddTopicToTmResult(tm.result, jsep = "")
})
# make umaps, connect to annots
dat.umaps.lst <- lapply(tm.result.lst, function(tm.result){
dat.umap <- DoUmapAndLouvain(tm.result$topics, jsettings) %>%
rowwise() %>%
mutate(cond = ifelse(grepl("cd41", cell, ignore.case = TRUE), "zCd41Enriched", "Unenriched"))
return(dat.umap)
})
dat.umap.merged <- lapply(jmarks, function(jmark){
left_join(dat.umaps.lst[[jmark]], subset(dat.annot.lst.BM[[jmark]], select = c(cell, cluster, cluster.orig)))
})
m.umaps.lst <- lapply(jmarks, function(jmark){
dat.umap <- dat.umaps.lst[[jmark]]
m <- ggplot(dat.umap, aes(x = umap1, y = umap2, color = louvain)) + geom_point(alpha = 0.3) + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
ggtitle(paste(jmark, "dist:", jdist))
})
JFuncs::multiplot(m.umaps.lst$H3K4me1, m.umaps.lst$H3K4me3, m.umaps.lst$H3K27me3, cols = 3)
m.umaps.lst <- lapply(jmarks, function(jmark){
dat.umap <- dat.umaps.lst[[jmark]]
m <- ggplot(dat.umap, aes(x = umap1, y = umap2, color = cond)) + geom_point(alpha = 0.3) + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
ggtitle(paste(jmark, "dist:", jdist))
})
JFuncs::multiplot(m.umaps.lst$H3K4me1, m.umaps.lst$H3K4me3, m.umaps.lst$H3K27me3, cols = 3)
# Plot imputed ------------------------------------------------------------
dat.imputed.lst <- lapply(tm.result.lst, function(tm.result){
dat.imput <- t(log(tm.result$topics %*% tm.result$terms)) # natural log links with Poisson regression
})
# Load RNAseq genes ------------------------------------------------------
inf.de <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/rdata_robjs/de_genes_stringent_objects/de_genes_sorted_and_giladi.WithHouseKeepAndNotExpressed.FixExprsOther.RData"
load(inf.de, v=T)
# Plot gene sets ---------------------------------------------------------
rnames.all <- Reduce(intersect, lapply(dat.imputed.lst, rownames))
jgenes.all <- sapply(rnames.all, function(x) strsplit(x, ";")[[1]][[2]])
jens.all <- sapply(jgenes.all, AssignHash, g2e.hash)
e2g.hash <- hash::invert(g2e.hash)
# filter by gene sets?
rnames.bygset <- lapply(de.ens.sorted.stringent, function(ens.vec){
jgenes <- sapply(ens.vec, AssignHash, e2g.hash)
jgenes.filt <- jgenes.all %in% jgenes
rnames.filt <- rnames.all[jgenes.filt]
})
# plot gene sets onto UMAP
gset.dat <- lapply(rnames.bygset, function(rnames){
lapply(jmarks, function(jmark){
jmat <- dat.imputed.lst[[jmark]]
jmat.filt <- jmat[rnames, ]
jmat.means <- colMeans(jmat.filt)
exprs.dat <- data.frame(exprs = jmat.means, cell = names(jmat.means), mark= jmark, stringsAsFactors = FALSE)
left_join(dat.umap.merged[[jmark]], exprs.dat)
})
})
dat.vars.lst <- lapply(jmarks, function(jmark){
print(jmark)
dat.impute <- dat.imputed.lst[[jmark]]
dat.tmp <- CalculateVarAll(dat.impute, jchromos = jchromos) %>%
left_join(., dat.umap.merged[[jmark]])
# left_join(., subset(gset.dat[[jgset]][[jmark]] %>% dplyr::select(c(cell, exprs))))
})
jgene <- "Ednra"
jgene <- "F3"
jgene <- "Aldh3b2"
jgene <- "Mogat2"
jgene <- "Ddx60"
jgene <- "Ifnlr1"
jgene <- "Rnd1"
jgene <- "Celsr3"
jgene <- "Sox6"
jgene <- "Pax5"
jgene <- "Sgms2"
jgene <- "Lgals4"
jgene <- "Cebpb"
jgene <- "S100a7a"
jgene <- "Hbb-y"
jgene <- "Sox6"
jgene <- "Irf4"
# jgene <- "Retnlg"
# jgene <- ""
exprs.lst <- lapply(jmarks, function(jmark){
print(jmark)
jmat <- dat.imputed.lst[[jmark]]
(rnames.keep <- grep(jgene, rownames(jmat), value = TRUE))
if (length(rnames.keep) == 1){
exprs.vec <- jmat[rnames.keep, ]
} else {
exprs.vec <- colMeans(jmat[rnames.keep, ])
}
exprs.dat <- data.frame(exprs = exprs.vec, cell = names(exprs.vec), rname = rnames.keep, stringsAsFactors = FALSE)
left_join(dat.umaps.lst[[jmark]], exprs.dat)
})
# exprs.lst <- lapply(jmarks, function(jmark){
# print(jmark)
# jmat <- dat.imputed.lst[[jmark]]
# exprs.vec <- colMeans(jmat)
# exprs.dat <- data.frame(exprs = exprs.vec, cell = names(exprs.vec), stringsAsFactors = FALSE)
# left_join(dat.umaps.lst[[jmark]], exprs.dat)
# })
m.exprs.lst <- lapply(jmarks, function(jmark){
dat.umap <- exprs.lst[[jmark]]
m <- ggplot(dat.umap, aes(x = umap1, y = umap2, color = exprs)) + geom_point(alpha = 0.3) + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "bottom") +
ggtitle(paste(jmark, jgene))
if (jmark == "H3K27me3"){
m <- m + scale_color_viridis_c(direction = 1)
} else {
m <- m + scale_color_viridis_c(direction = 1)
}
})
JFuncs::multiplot(m.exprs.lst$H3K4me1, m.exprs.lst$H3K4me3, m.exprs.lst$H3K27me3, cols = 3)
# Downstream --------------------------------------------------------------
# dat.an
dat.umap.merged2 <- lapply(jmarks, function(jmark){
jtmp <- left_join(dat.umap.merged[[jmark]], exprs.lst[[jmark]]) %>%
left_join(., dat.vars.lst[[jmark]]) %>%
filter(cluster %in% c("eryth", "granu", "HSPCs", "lymph")) %>%
ungroup() %>%
mutate(xvar = max(cell.var.within.sum.norm) - cell.var.within.sum.norm)
})
cbPalette <- c("#696969", "#32CD32", "#56B4E9", "#FFB6C1", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
ggplot(dat.umap.merged2$H3K27me3, aes(x = cluster, y = exprs)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.merged2$H3K27me3, aes(x = cell.var.within.sum.norm, y = exprs, color = cluster)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.merged2$H3K27me3, aes(x = xvar, y = exprs, color = cluster)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.merged2$H3K27me3, aes(x = xvar, y = exprs, color = cluster)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + facet_wrap(~cluster)
ggplot(dat.umap.merged2$H3K27me3, aes(x = cluster, y = exprs, fill = cluster)) + geom_boxplot() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.merged2$H3K4me1, aes(x = xvar, y = exprs, color = cluster)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.merged2$H3K27me3 %>% filter(cluster %in% c("eryth", "granu", "HSPCs", "lymph")), aes(x = cell.var.within.sum.norm, y = exprs, color = cluster)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
ggplot(dat.umap.merged2$H3K27me3 %>% filter(cluster %in% c("eryth", "granu", "HSPCs", "lymph")), aes(x = cell.var.within.sum.norm, y = exprs, color = cluster)) + geom_point() + theme_bw() +
theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank())
|
7d5f64b9321262a4823ed30524da51455b698dfb
|
de4f8bf5c5f873508d950dd500828e2173428ab8
|
/scripts/Script 1 Preparing and Analyzing Phenotypic Data.R
|
a04319fa9dfa16039c35c854f2049fe90d3423b1
|
[] |
no_license
|
kraltendorf/IWG_Yield_Components
|
289ec03902881bf59e106bda7839a9462e67b01c
|
4e84ca1146889cf196fa8b8bbd616552043df382
|
refs/heads/master
| 2023-02-04T13:14:14.381873
| 2020-12-17T21:44:18
| 2020-12-17T21:44:18
| 304,757,777
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,938
|
r
|
Script 1 Preparing and Analyzing Phenotypic Data.R
|
# Project: IWG_Yield_Components
# Script 1 - Preparing and Analyzing Phenotypic Data
# Author: Kayla R. Altendorf
# Date: 05/21/2020
# Required Pacakges:
library("dplyr")
library("tidyr")
library("lme4")
library("lmerTest")
library("emmeans")
library("stringr")
library("reshape")
library("plyr")
library("multcompView")
# Declare where you want the output to go
path <- c("/users/kayla.altendorf/OneDrive - USDA/Publications/IWG Yield Components/Scripts for GitHub/output/")
dir.create(path)
#### Step 1: Load Phenotypic Data ####
# this is the same format that is available from the intermediate wheatgrass database
# using this dataset requires a bit of fanagalling, but for posterity it's best to have
# one version of the data in use
dat <- read.table("/Users/kayla.altendorf/OneDrive - USDA/Publications/IWG Yield Components/Scripts for GitHub/data/NAM_Data.txt", header = T, sep = "\t")
# select the traits that will be used for this analysis
traits <- c("FLFWD", "FLFLNG", "PTHT", "STMNUM", "STMDIA", "SPKDEN", "HDEMPER", "ZDK",
"FLORSPK", "SDSFLOR", "SPLYLD", "SPKYLD", "SDMG", "SPKHD")
dat1 <- dat %>% filter(trait_id %in% traits)
# I prefer to work with full length names, so I'll sub them in here
my_names <- c("flag_leaf_width", "flag_leaf_length", "height", "reproductive_tiller_ct",
"stem_diameter", "spikelet_density", "emergence_percent", "anthesis_score", "florets_per_spikelet",
"floret_site_utilization", "yield_per_plant",
"yield_per_spike", "thousand_grain_weight", "spikelets_per_spike")
trait_names <- data.frame(trait_id = traits, trait_id_full = my_names)
dat2 <- left_join(dat1, trait_names, by = "trait_id")
#### Step 2: Format Phenotypic Data ####
# get rid of the unnecessary columns
# note: since spikelets per spike was taken three times, the samples need separate names so they can be averaged
dat3 <- dat2 %>%
dplyr::rename(year = phenotype_year, # rename cols to match personal preference
famID = family_name,
col = range) %>%
mutate(loc = substr(experiment_id, 4, 6), # extract location
trait_id_full = case_when(trait_id_full == "spikelets_per_spike" ~ paste(trait_id_full, sample_number, sep = ""),
trait_id_full != "spikelets_per_spike" ~ paste(trait_id_full)), # give each subsample of spikelets_per_spike a unique name
parent = substr(germplasm_id, 6, 6)) %>% # extract parent (C for common, d for donor, p for parent)
filter(parent != "P") %>% # filter to exclude parents, as this project deals only with progeny
select(famID, germplasm_id, loc, year, rep, trait_id_full, phenotype_value, plant_id) %>%
pivot_wider(names_from = trait_id_full, values_from = phenotype_value) %>%
select(-plant_id) %>% # pivot to wide format
mutate(spikelets_per_spike1 = as.numeric(as.character(spikelets_per_spike1)), # make various traits as numeric
spikelets_per_spike2 = as.numeric(as.character(spikelets_per_spike2)),
spikelets_per_spike3 = as.numeric(as.character(spikelets_per_spike3)),
flag_leaf_length = as.numeric(as.character(flag_leaf_length)),
flag_leaf_width = as.numeric(as.character(flag_leaf_width)),
loc = str_replace(loc, "SAL", "TLI")) # replace SAL (Salina) with TLI
# take the average for spikelets and then create a column for merging
# and calculate flag leaf area (length * width)
dat4 <- dat3 %>% mutate(spikelets_per_spike = rowMeans(dat3[,10:12], na.rm = T)) %>% # calculate the average spikelets per spike
select(-spikelets_per_spike1, -spikelets_per_spike2, -spikelets_per_spike3) %>%
mutate(merge_col = paste(germplasm_id, loc, year, rep, sep = "_"),
flag_leaf_area = (flag_leaf_width*0.1) * flag_leaf_length) %>% # convert width mm to cm
select(-famID, -germplasm_id, -loc, -year, -rep, -flag_leaf_width, -flag_leaf_length)
# change feekes scores to coded values for easier analysis
feekes <- data.frame(anthesis_score = c("49", "51", "53", "55", "57", "59", "61", "65", "69", "71"), coded = 1:10)
dat5 <- left_join(dat4, feekes, by = "anthesis_score") %>%
select(-anthesis_score) %>%
dplyr::rename(anthesis_score = coded)
# the database includes *all* entries, including parents, plants that were identified later as selfs, and so on.
# futhermore, the population itself is not balanced (e.g. unequal numbers of individuals within families,
# entries per location and year), which causes problems with the ANOVA
# to address this, we will load in a backbone dataset, which is balanced using NA values,
# and we'll format the data to match.
# read in the backbone csv
backbone <- read.csv("/Users/kayla.altendorf/OneDrive - USDA/Publications/IWG Yield Components/Scripts for GitHub/data/backbone.csv")
# plantID3 is the balanced plantID
# example:
backbone %>% group_by(loc, year, rep, famID) %>% tally() # all have 133 entries
# left join with the data
dat6 <- left_join(backbone, dat5, by = "merge_col") %>% select(-merge_col)
dat6 %>% group_by(loc, year, rep, famID) %>% tally() # make sure it's still balanced
# change all selfs to NA
for (i in 1:nrow(dat6)) {
if (! is.na(dat6$self[i])) { # if self is not NA (e.g. if it is outcross or self)
dat6[i, 13:25] <- NA # change all phneotype data columns to NA
}
}
# write out the final dataset
write.csv(dat6, paste(path, "data.csv", sep = "/"), row.names = F)
# read it back in to convert everything to numerics
dat <- read.csv(paste(path, "data.csv", sep = "/"))
dat$year <- as.factor(dat$year)
dat$rep <- as.factor(dat$rep)
#### Step 3: Analysis of Variance for Combined Analysis ####
traits <- colnames(dat)[13:25] # grab the trait names from column headers
# make data as factor
dat$rep <- as.factor(dat$rep)
dat$year <- as.factor(dat$year)
dat$plantID3 <- as.factor(dat$plantID3)
dat$famID <- as.factor(dat$famID)
# run through each trait
traits
# height
model <- lmer(height ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "height", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), Letters = c(LETTERS))
write.table(anova, paste(path, "/height/height_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/height/height_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# reproductive_tiller_ct
model <- lmer(sqrt(reproductive_tiller_ct) ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model)
plot(model)
dir.create(paste(path, "reproductive_tiller_ct", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), type = "response", Letters = c(LETTERS))
write.table(anova, paste(path, "/reproductive_tiller_ct/reproductive_tiller_ct_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/reproductive_tiller_ct/reproductive_tiller_ct_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# florets_per_spikelet
model <- lmer(florets_per_spikelet ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "florets_per_spikelet", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), Letters = c(LETTERS))
write.table(anova, paste(path, "/florets_per_spikelet/florets_per_spikelet_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/florets_per_spikelet/florets_per_spikelet_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# emergence_percent
model <- lmer(emergence_percent ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "emergence_percent", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), Letters = c(LETTERS))
write.table(anova, paste(path, "/emergence_percent/emergence_percent_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/emergence_percent/emergence_percent_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# stem_diameter
model <- lmer(stem_diameter ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "stem_diameter", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), Letters = c(LETTERS))
write.table(anova, paste(path, "/stem_diameter/stem_diameter_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/stem_diameter/stem_diameter_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# yield_per_plant
dat.yield_per_plant<- dat %>% # making a new data frame to account for missing data
mutate(loc_rep=paste(loc,"_",rep, sep = ""))
model <- lmer(sqrt(yield_per_plant) ~ famID * loc + (1|plantID3) + (1|loc:plantID3), data = filter(dat.yield_per_plant, year==2017 & loc_rep == c("STP_1", "TLI_2")))
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "yield_per_plant", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ loc), type = "response", Letters = c(LETTERS))
write.table(anova, paste(path, "/yield_per_plant/yield_per_plant_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/yield_per_plant/yield_per_plant_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# floret_site_utilization
model <-lmer(sqrt(floret_site_utilization) ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "floret_site_utilization", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), type = "response", Letters = c(LETTERS))
write.table(anova, paste(path, "/floret_site_utilization/floret_site_utilization_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/floret_site_utilization/floret_site_utilization_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# spikelet_density
model <-lmer(sqrt(spikelet_density) ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "spikelet_density", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), type = "response", Letters = c(LETTERS))
write.table(anova, paste(path, "/spikelet_density/spikelet_density_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/spikelet_density/spikelet_density_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# yield_per_spike
model <-lmer(sqrt(yield_per_spike) ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "yield_per_spike", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), type = "response", Letters = c(LETTERS))
write.table(anova, paste(path, "/yield_per_spike/yield_per_spike_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/yield_per_spike/yield_per_spike_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# thousand_grain_weight
model <- lmer(thousand_grain_weight ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "thousand_grain_weight", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), Letters = c(LETTERS))
write.table(anova, paste(path, "/thousand_grain_weight/thousand_grain_weight_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/thousand_grain_weight/thousand_grain_weight_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# spikelets_per_spike
model <- lmer(spikelets_per_spike ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "spikelets_per_spike", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), Letters = c(LETTERS))
write.table(anova, paste(path, "/spikelets_per_spike/spikelets_per_spike_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/spikelets_per_spike/spikelets_per_spike_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# flag_leaf_area
model <-lmer(log(flag_leaf_area) ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "flag_leaf_area", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), type = "response", Letters = c(LETTERS))
write.table(anova, paste(path, "/flag_leaf_area/flag_leaf_area_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/flag_leaf_area/flag_leaf_area_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
# anthesis_score
model <- lmer(anthesis_score ~ famID * loc * year + (1|loc:rep) + (1|loc:famID:plantID3), data = dat)
anova <- anova(model, type="II")
plot(model)
dir.create(paste(path, "anthesis_score", sep = "/"))
emmeans_loc_year <- CLD(emmeans(model, ~ year * loc), Letters = c(LETTERS))
write.table(anova, paste(path, "/anthesis_score/anthesis_score_anova.txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_loc_year, paste(path, "/anthesis_score/anthesis_score_emmeans_loc_year.txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
#### Step 4: Analysis of Variance within Environments ####
# filter data by location
stp17 <- filter(dat, loc == "STP" & year == "2017")
stp18 <- filter(dat, loc == "STP" & year == "2018")
tli17 <- filter(dat, loc == "TLI" & year == "2017")
tli18 <- filter(dat, loc == "TLI" & year == "2018")
loc_list <- list(stp17, stp18, tli17, tli18)
loc_names <- c("stp17", "stp18", "tli17", "tli18")
# remove reproductive tiller number and full plant yield, since that trait is missing tli18 environment
normal_traits <- traits[-c(2, 6)]
for (j in 1:length(loc_list)) {
for (i in 1:length(normal_traits)) {
formula <- paste0(normal_traits[i], " ~ famID/plantID3 + rep", sep = "")
model <- lm(formula, data = loc_list[[j]])
an <- as.data.frame(anova(model))
emmeans_fam <- as.data.frame(CLD(emmeans(model, ~ famID), Letters = c(LETTERS)))
emmeans_genet <- as.data.frame(emmeans(model, ~ plantID3|famID))
write.table(an, paste(path, "/", normal_traits[i], "/", normal_traits[i], "_anova_", loc_names[j], ".txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_fam, paste(path, "/", normal_traits[i], "/", normal_traits[i], "_emmeans_fam_", loc_names[j], ".txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
write.table(emmeans_genet, paste(path, "/", normal_traits[i], "/", normal_traits[i], "_emmeans_genet_", loc_names[j], ".txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
print(paste(Sys.time(), "done with", normal_traits[i], sep = " "))
}
}
# for reproductive tiller ct
for (j in 1:length(loc_list)) {
model <- lm(sqrt(reproductive_tiller_ct) ~ famID/plantID3 + rep, data = loc_list[[j]])
anova <- anova(model)
emmeans_fam <- as.data.frame(CLD(emmeans(model, ~ famID, type = "response"), Letters = c(LETTERS)))
emmeans_genet <- as.data.frame(emmeans(model, ~ plantID3|famID, type = "response"))
emmeans_genet_transformed_scale <- as.data.frame(emmeans(model, ~ plantID3|famID))
write.table(anova, paste(path, "/", traits[2], "/", traits[2], "_anova_", loc_names[j], ".txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_fam, paste(path, "/", traits[2], "/", traits[2], "_emmeans_fam_", loc_names[j], ".txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
write.table(emmeans_genet, paste(path, "/", traits[2], "/", traits[2], "_emmeans_genet_", loc_names[j], ".txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
write.table(emmeans_genet_transformed_scale, paste(path, "/", traits[2], "/", traits[2], "_emmeans_genet_transformed_scale_", loc_names[j], ".txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
}
# for yield per plant, not measured in all four envs
loc_list_ypp <- loc_list[-4]
loc_names_ypp <- loc_names[-4]
for (j in 1:length(loc_list_ypp)) {
formula <- paste(traits[6], "~ famID/plantID3 + rep", sep ="")
model <- lm(formula, data = loc_list_ypp[[j]])
anova <- anova(model)
emmeans_fam <- as.data.frame(CLD(emmeans(model, ~ famID, Letters = c(LETTERS))))
emmeans_genet <- as.data.frame(emmeans(model, ~ plantID3|famID))
write.table(anova, paste(path, "/", traits[6], "/", traits[6], "_anova_", loc_names_ypp[j], ".txt", sep = ""), quote = F, row.names = T, col.names = NA, sep = "\t")
write.table(emmeans_fam, paste(path, "/", traits[6], "/", traits[6], "_emmeans_fam_", loc_names_ypp[j], ".txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
write.table(emmeans_genet, paste(path, "/", traits[6], "/", traits[6], "_emmeans_genet_", loc_names_ypp[j], ".txt", sep = ""), quote = F, row.names = F, col.names = T, sep = "\t")
}
|
7c83e2b2f0694d3512dc5acef66e8546c84667e9
|
f2a7697627b6848a1de1f39a966abd8cd4e26b97
|
/man/predict.dawesModel.Rd
|
914c5167ee4e724c4c1df06932132a2e2786f5df
|
[
"MIT"
] |
permissive
|
dnlbrkc/heuristica
|
954203a8fcbc6b99fb98bbb4c93c6f05b494218c
|
d4125158e72b8165bca137b1ae791d70fe019df1
|
refs/heads/master
| 2020-12-24T22:29:05.291133
| 2015-08-01T15:19:13
| 2015-08-01T15:19:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 645
|
rd
|
predict.dawesModel.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/heuristic_classes.R
\name{predict.dawesModel}
\alias{predict.dawesModel}
\title{Generates predictions for Dawes Model}
\usage{
\method{predict}{dawesModel}(object, ...)
}
\arguments{
\item{object}{A dawesModel.}
\item{...}{Normally this would be the test data.
It is used to predict and can be a matrix or data.frame.
It must have the same cols_to_fit indices as those used in train_data.}
}
\value{
An N x 1 matrix of predicted values, or a list if there was only one cue.
}
\description{
Implementation of \code{\link[stats]{predict}} for dawesModel.
}
|
60307e1b9a51564e80edd6d25138768f37ec0b6b
|
98fd03ebd9de52038f06cd89200a460432f9cc5c
|
/man/metric_score.pkg_metric_has_news.Rd
|
fbb5e5eea423df883a00da5e9edf1851dcf7c712
|
[
"MIT"
] |
permissive
|
pharmaR/riskmetric
|
51d3b067da6db6ad1252f3ba706db1d922b5df64
|
3d1501880edc07cff5cd72129c0df0899db83029
|
refs/heads/master
| 2023-07-26T07:33:56.471690
| 2023-05-31T14:58:21
| 2023-05-31T14:58:21
| 173,354,970
| 148
| 32
|
NOASSERTION
| 2023-09-12T20:41:31
| 2019-03-01T19:11:16
|
R
|
UTF-8
|
R
| false
| true
| 642
|
rd
|
metric_score.pkg_metric_has_news.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assess_has_news.R
\name{metric_score.pkg_metric_has_news}
\alias{metric_score.pkg_metric_has_news}
\title{Score a package for the presence of a NEWS file}
\usage{
\method{metric_score}{pkg_metric_has_news}(x, ...)
}
\arguments{
\item{x}{a \code{pkg_metric_has_news} packge metric object}
\item{...}{additional arguments unused}
}
\value{
\code{1} if any NEWS files are found, otherwise \code{0}
}
\description{
Coerce the number of news files to binary indication of valid NEWS files
}
\examples{
\dontrun{metric_score(assess_has_news(pkg_ref("riskmetric")))
}
}
|
ac5865e08c7818f4e22b9dc6767757936f32965f
|
900b5811701444c45e39d1506c36f39a1e3d7205
|
/removeFeatures.R
|
26a28b7935fa46efb117f3f100a4c8322288ffbf
|
[] |
no_license
|
svicente99/DataScience_Capstone_Report
|
f03ef3633f22ad0779d6b6d1cef0fc4679f5abb2
|
fe05ad155cc61c8251e5060a09d2a537fbcb51ee
|
refs/heads/master
| 2021-01-10T03:09:29.187869
| 2015-12-31T00:39:56
| 2015-12-31T00:39:56
| 48,718,767
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,571
|
r
|
removeFeatures.R
|
# ---------------------------------------------------------------------
# Coursera.org - Data Science Specialization - Capstone Project
#
# >> Explore dataset files (clean up refinement, remove features)
# author..: Sergio Vicente (@svicente99)
# data....: 26th Dec. 2015
# ---------------------------------------------------------------------
# this package remove url addresses and some twitter features such as retweets, @people, # (hash tags) and emoticons
if(!require("qdapRegex")) {
install.packages("qdapRegex")
}
library(qdapRegex)
removeTweetFeatures <- function(str)
{
# erase url addresses
str <- gsub("http\\w+", "", str)
# erase @people names
str <- gsub("@\\w+", "", str)
# erase retweets
str <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", str)
# erase hash tags, emoticons and urls using package 'qdapRegex'
str <- rm_hash(str)
str <- rm_emoticon(str)
str <- rm_url(str)
}
removeOtherFeatures <- function(corp, numbers=TRUE, punctuation=TRUE, spaces=TRUE, stopwords=TRUE) {
if(numbers) corp <- tm_map(corp, removeNumbers)
if(punctuation) corp <- tm_map(corp, removePunctuation)
if(spaces) corp <- tm_map(corp, stripWhitespace)
if(stopwords) corp <- tm_map(corp, removeWords, stopwords("english"))
# the list of 'stopwords' could be confirmed in next link, according to "quanteda.pdf" package documentation
# http://jmlr.csail.mit.edu/papers/volume5/lewis04a/a11-smart-stop-list/english.stop
# http://stackoverflow.com/questions/7927367/r-text-file-and-text-mining-how-to-load-data
return(corp)
}
|
f4e89d5bd54abb44e5cf51f0b8527f8332f06cf7
|
be58961b8970ff0d19ff8b5d1ed1fd5b398a6adf
|
/metahit_script.R
|
90c91357c365d8622a0cdba9c9c50954e6c2c0ad
|
[] |
no_license
|
metahit/mh-execute
|
0b7f880363ad82efcb94a9c041c1b3b2d7b5dc49
|
4d2a571621a1294552a5dedec941927f74696e8a
|
refs/heads/master
| 2022-05-18T11:28:19.719126
| 2022-03-29T15:45:38
| 2022-03-29T15:45:38
| 194,102,830
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 50,918
|
r
|
metahit_script.R
|
rm(list=ls())
library(ithimr)
library(splines)
require(tidyverse)
require(knitr)
require(kableExtra)
# require(citr)
library(compiler)
library(earth)
library(future)
library(future.apply)
library(doFuture)
library(stringr)
registerDoFuture()
plan(multisession)
# Increase maximum size of global variables
options(future.globals.maxSize= +Inf)
all_scens <- list.dirs(path = "./inputs/scenarios", full.names = FALSE, recursive = FALSE)
for (global_scen in all_scens){
# global_scen <- all_scens[1]
# Set sample size
NSAMPLES <<- 8
## overwrite some functions for METAHIT's pp_summary use (instead of TIGTHAT's tripset use)
## in general, the overwriting functions are from ithimr's uncertain_travel branch
## in general, ithimr functions are written ithimr::function()
source('metahit_functions.R')
source("../mh-mslt/R/functions_MSLT.R")
source("../mh-mslt/R/RunMSLT.R")
## 1 SET GLOBAL VARIABLES ##########################################
## set variables, which are TIGTHAT studies' input parameters.
## general settings
setup_call_summary_filename <- 'setup_call_summary.txt'
AGE_RANGE <- c(0,150)
REFERENCE_SCENARIO <- 'Baseline'
## placeholders for uncertain parameters
MMET_CYCLING <- c(log(4.63),log(1.2)) # 4.63 #
MMET_WALKING <- c(log(2.53),log(1.1)) # 2.53 #
PM_CONC_BASE_QUANTILE <- T
PM_TRANS_SHARE_QUANTILE <- T#F
PA_DOSE_RESPONSE_QUANTILE <- T#F
AP_DOSE_RESPONSE_QUANTILE <- T#F
BACKGROUND_PA_SCALAR <- c(log(1),log(1.1)) # 1
BACKGROUND_PA_CONFIDENCE <- 1
INJURY_REPORTING_RATE <- c(40,5) # 1
CHRONIC_DISEASE_SCALAR <- c(log(1),log(1.1)) #1
SIN_EXPONENT_SUM <- c(log(1.9),log(1.03)) #2
CASUALTY_EXPONENT_FRACTION <- c(20,20) # 0.5 #
PM_EMISSION_INVENTORY_CONFIDENCE <- 0.9
DISTANCE_SCALAR_CAR_TAXI <- c(log(1),log(1.1)) # 1
DISTANCE_SCALAR_WALKING <- c(log(1),log(1.1)) # 1
DISTANCE_SCALAR_PT <- c(log(1),log(1.1)) # 1
DISTANCE_SCALAR_CYCLING <- c(log(1),log(1.1)) # 1
DISTANCE_SCALAR_MOTORCYCLE <- c(log(1),log(1.1)) # 1
DIABETES_IHD_RR_F <<- 2.82 ## 2.35
DIABETES_STROKE_RR_F <<- 2.28 ## 1.93
DIABETES_IHD_RR_M <<- 2.16 ## 2.16
DIABETES_STROKE_RR_M <<- 1.83 ## 1.6
## things we need for ithim-r to work
ADD_TRUCK_DRIVERS <<- F
ADD_BUS_DRIVERS <<- F
## SUMMARY OF INPUTS
# seed = double. sets seed to allow some reproducibility.
# CITY = string. used to identify input files.
# speeds = named list of doubles. average mode speeds.
# pm_emission_inventory = named list of doubles. vehicle emission factors.
# setup_call_summary_filename = string. Where to write input call summary.
# DIST_CAT = vector of strings. defines distance categories for scenario generation (5 accra scenarios)
# AGE_RANGE = vector of length 2, specifying the minimum and maximum ages to be used in the model. Note that the actual
# maximum and minimum will coincide with boundaries in the population and GBD files.
# REFERENCE_SCENARIO = string: at present, one of 'Baseline' or 'Scenario N' where N is an integer
# NSAMPLES = integer: number of samples to take for each parameter to be sampled
# MMET_CYCLING = parameter. double: sets cycling (M)METs. vector: samples from distribution.
# MMET_WALKING = parameter. double: sets walking (M)METs. vector: samples from distribution.
# PM_CONC_BASE = parameter. double: sets background PM. vector: samples from distribution.
# PM_TRANS_SHARE = parameter. double: sets PM proportion that comes from transport. vector: samples from distribution.
# PA_DOSE_RESPONSE_QUANTILE = logic. T: PA dose--response relationship is sampled. F: relationship is fixed.
# AP_DOSE_RESPONSE_QUANTILE = logic. T: AP dose--response relationship is sampled. F: relationship is fixed.
# CHRONIC_DISEASE_SCALAR = parameter. double: sets scalar for chronic disease background burden. vector: samples from distribution.
# BACKGROUND_PA_SCALAR = parameter. double: sets scalar for background PA. vector: samples from distribution.
# BACKGROUND_PA_CONFIDENCE = parameter. double between 0 and 1. 1 = use PA data as they are.
# INJURY_REPORTING_RATE = parameter. double: sets scalar for injury counts (inverse). vector: samples from distribution.
# INJURY_LINEARITY = parameter. double: sets scalar. vector: samples from distribution.
# CASUALTY_EXPONENT_FRACTION = parameter. double: sets scalar. vector: samples from distribution.
# PM_EMISSION_INVENTORY_CONFIDENCE = parameter. double between 0 and 1. 1 = use PM 2.5 emission data as they are.
# DISTANCE_SCALAR_CAR_TAXI = double: sets scalar. vector: samples from distribution.
# DISTANCE_SCALAR_WALKING = double: sets scalar. vector: samples from distribution.
# DISTANCE_SCALAR_PT = double: sets scalar. vector: samples from distribution.
# DISTANCE_SCALAR_CYCLING = double: sets scalar. vector: samples from distribution.
# DISTANCE_SCALAR_MOTORCYCLE = double: sets scalar. vector: samples from distribution.
## setting all the global variables at the beginning to minimise ITHIM computation
## copied from ithimr::run_ithim_setup
## SET GLOBAL VALUES
## PROGRAMMING VARIABLES
## fixed parameters for AP inhalation
BASE_LEVEL_INHALATION_RATE <<- 1
CLOSED_WINDOW_PM_RATIO <<- 0.5
CLOSED_WINDOW_RATIO <<- 0.9
ROAD_RATIO_MAX <<- 3.216
ROAD_RATIO_SLOPE <<- 0.379
SUBWAY_PM_RATIO <<- 0.8
## default speeds that can be edited by input.
default_speeds <- list(
bus=15,
bus_driver=15,
car=21,
taxi=21,
pedestrian=4.8,
cycle=14.5,
motorcycle=25,
truck=21,
van=15,
subway=28,
rail=35,
shared_taxi=21
)
TRAVEL_MODES <<- tolower(names(default_speeds))
MODE_SPEEDS <<- data.frame(stage_mode = TRAVEL_MODES, speed = unlist(default_speeds), stringsAsFactors = F)
## default emission contributions that can be edited by input.
default_emission_inventory <- list(
bus=0,
bus_driver=0.82,
car=0.228,
taxi=0.011,
pedestrian=0,
cycle=0,
motorcycle=0.011,
truck=0.859,
big_truck=0.711,
other=0.082
)
#names(default_emission_inventory) <- tolower(names(default_emission_inventory))
#PM_EMISSION_INVENTORY <<- default_emission_inventory
## 2 GET GLOBAL DATA ##################################################
## copied from ithimr ithim_load_data
global_path_ithimr <- file.path(find.package('ithimr',lib.loc=.libPaths()), 'extdata/global/')
## for windows??
global_path_ithimr <- paste0(global_path_ithimr, "/")
## DATA FILES FOR MODEL
##reading GBD 2017 IER functions that were provided by Rick Burnett: this include Diabetes in addition to previous five disease end-points
DR_AP <- read.csv(paste0(global_path_ithimr,"dose_response/drap/dose_response.csv"))
# Read updated disease outcomes lookup from ITHIM-R package
DISEASE_INVENTORY <<- read.csv(paste0(global_path_ithimr,"dose_response/disease_outcomes_lookup.csv"))
BACKGROUND_POLLUION_TABLE <<- read.csv('inputs/background-air-pollution/1_apmeans.csv')
disease_short_names <- read.csv("../mh-mslt/output/parameters/DISEASE_SHORT_NAMES.csv")
DISEASE_SHORT_NAMES <<- disease_short_names
DR_DF <- read.csv("inputs/dose_response/disease_outcomes_lookup_new.csv")
demography <- readxl::read_xlsx('inputs/scenarios/190330_sp_ind_codebook.xlsx',sheet=2,col_names=F)
demogindex_to_numerical <- unlist(demography[,3])
demography[,3] <- 1:nrow(demography)
demo_indices <- unlist(demography[,3])
age_table <- readxl::read_xlsx('inputs/scenarios/190330_sp_ind_codebook.xlsx',sheet=1,col_names=F)
age_category <- unlist(age_table[,1])
age_lower_bounds <- as.numeric(future_sapply(age_category,function(x)strsplit(x,' to ')[[1]][1]))
## 3 GET MULTI-CITY DATA #################################################
## set scenario variables. these can (should) be determined from input data rather than hard coded.
NSCEN <<- 1
SCEN_SHORT_NAME <<- c('base', global_scen)
SCEN_INJURY_SHORT_NAME <<- c('base', 'scen')
SCEN <<- c('Baseline','Scenario 1')
all_distances <- list()
for(i in 1:length(SCEN)){
scen_name <- SCEN_SHORT_NAME[i]
all_distances[[scen_name]] <- list()
for(file_name in c('emissions_distances','pa_distances'))
all_distances[[scen_name]][[file_name]] <- readRDS(paste0('inputs/distances/',scen_name,'_',file_name,'.Rds'))
# if(i==1&&as.numeric(system("awk '/MemFree/ {print $2}' /proc/meminfo", intern=TRUE)) > 1e7){
# ##!! hack for Rob - laptop can't compute london inh
# all_distances[[scen_name]]$inh_distances$london <- readRDS(paste0('inputs/distances/',scen_name,'_london_inh_distances.Rds'))
# INCLUDE_LONDON <- T
# if(i==1) cat('Including London.\n')
# }else if(i==1){
INCLUDE_LONDON <- F
# cat('Excluding London.\n')
# }
}
## get city distances for e.g. bus mode
city_total_distances <- read.csv('inputs/distances/mode_road_city.csv',stringsAsFactors = F)
for(i in 3:ncol(city_total_distances)) city_total_distances[,i] <- as.numeric(city_total_distances[,i])
## injury model / preprocessed data
# get data and model
path_to_injury_model_and_data <- 'inputs/injury/'
injury_table <- readRDS(paste0(path_to_injury_model_and_data,'processed_injuries_9.Rds'))
baseline_injury_model <- list()
for(i in 1:2){
baseline_injury_model[[i]] <- list()
for(j in 1:2){
baseline_injury_model[[i]][[j]] <- readRDS(paste0(path_to_injury_model_and_data,'city_region',i,j,'.Rds'))
if(INCLUDE_LONDON==F) injury_table[[i]][[j]] <- dplyr::filter(injury_table[[i]][[j]], region!='london')
}
}
## use bristol data to define demography etc
filename <- 'inputs/populations/bristol.csv'
demographic <- read_csv(filename,col_types = cols())
demographic$dem_index <- 1:nrow(demographic)
## find min and max age from AGE_RANGE, trips, and demographic.
##!! a lot of this is actually global, but it's coded within cities. It can be brought outside the city loop (to join demography code) by re-writing.
age_category <- demographic$age
max_age <- max(as.numeric(future.apply::future_sapply(age_category,function(x)strsplit(x,'-')[[1]][2])))
max_age <- min(max_age,AGE_RANGE[2])
min_age <- min(as.numeric(future.apply::future_sapply(age_category,function(x)strsplit(x,'-')[[1]][1])))
min_age <- max(min_age,AGE_RANGE[1])
demographic <- demographic[as.numeric(future.apply::future_sapply(age_category,function(x)strsplit(x,'-')[[1]][1]))<=max_age&
as.numeric(future.apply::future_sapply(age_category,function(x)strsplit(x,'-')[[1]][2]))>=min_age,]
POPULATION <<- demographic
demographic <- demographic[,names(demographic)!='population']
names(demographic)[which(names(demographic)=='age')] <- 'age_cat'
demographic$age <- future.apply::future_sapply(demographic$age_cat,function(x)strsplit(x,'-')[[1]][1])
DEMOGRAPHIC <<- demographic
# get age-category details from (modified) population data
AGE_CATEGORY <<- unique(POPULATION$age)
AGE_LOWER_BOUNDS <<- as.numeric(future.apply::future_sapply(AGE_CATEGORY,function(x)strsplit(x,'-')[[1]][1]))
MAX_AGE <<- max(as.numeric(future.apply::future_sapply(AGE_CATEGORY,function(x)strsplit(x,'-')[[1]][2])))
## 4 PREPARE LOCAL (CITY) DATA ##########################################
city_regions_table <- read.csv('inputs/mh_regions_lad_lookup.csv',stringsAsFactors = F)
city_regions <- unique(city_regions_table$cityregion)
city_regions <- city_regions[city_regions!='']
city_regions <- city_regions[city_regions %in% unique(injury_table$primary$whw$region)]
city_las <- city_regions_table$lad11cd[city_regions_table$cityregion%in%city_regions]
la_city_indices <- future.apply::future_sapply(city_las,function(x) which(city_regions==city_regions_table$cityregion[city_regions_table$lad11cd==x]))
city_regions_dt <- setDT(city_regions_table[city_regions_table$cityregion%in%city_regions,1:4])
city_regions_dt$la <- 1:nrow(city_regions_dt)
city_regions_dt$city_index <- la_city_indices
if(INCLUDE_LONDON==F) city_regions <- city_regions[city_regions!='london']
# Uncomment it to run for just one city
# city_regions <- city_regions[1]
## DATA FILES FOR CITY
##!! what are we doing with modes tube, train?
synth_pop_path <- paste0('inputs/scenarios/', global_scen, '/')
synth_pop_files <- list.files(synth_pop_path)
synth_pop_files <- synth_pop_files[future.apply::future_sapply(synth_pop_files,function(x)grepl('SPind_E[[:digit:]]+.Rds',x))]
la_names <- stringr::str_replace_all(synth_pop_files, "SPind_", "") # future.apply::future_sapply(synth_pop_files,function(x)gsub('SPind_','',x))
la_names <- stringr::str_replace_all(la_names, ".Rds", "") # future.apply::future_sapply(la_names,function(x)gsub('.Rds','',x))
synth_pop_list_in_la_order <- match(la_names,city_regions_dt$lad14cd)
##!! check they're in the right order
print(synth_pop_list_in_la_order)
inventory <- read.csv('inputs/background-air-pollution/emission_inventory.csv')
emission_inventories <- list()
for(city in city_regions){
row_index <- grepl(city,tolower(BACKGROUND_POLLUION_TABLE$apgroup_name))
col_indices <- (which(colnames(inventory)=='apgroup_name')+1):ncol(inventory)
emission_inventories[[city]] <- list()
for(i in col_indices)
emission_inventories[[city]][[colnames(inventory)[i]]] <- inventory[row_index,i]
}
PM_EMISSION_INVENTORIES <<- emission_inventories
## 5 SET PARAMETERS ################################################
parameters <- ithim_setup_parameters(NSAMPLES=NSAMPLES,
MMET_CYCLING=MMET_CYCLING,
MMET_WALKING=MMET_WALKING,
PM_CONC_BASE_QUANTILE=PM_CONC_BASE_QUANTILE,
PM_TRANS_SHARE_QUANTILE=PM_TRANS_SHARE_QUANTILE,
PA_DOSE_RESPONSE_QUANTILE=PA_DOSE_RESPONSE_QUANTILE,
AP_DOSE_RESPONSE_QUANTILE=AP_DOSE_RESPONSE_QUANTILE,
BACKGROUND_PA_SCALAR=BACKGROUND_PA_SCALAR,
BACKGROUND_PA_CONFIDENCE=BACKGROUND_PA_CONFIDENCE,
INJURY_REPORTING_RATE=INJURY_REPORTING_RATE,
CHRONIC_DISEASE_SCALAR=CHRONIC_DISEASE_SCALAR,
SIN_EXPONENT_SUM=SIN_EXPONENT_SUM,
CASUALTY_EXPONENT_FRACTION=CASUALTY_EXPONENT_FRACTION,
PM_EMISSION_INVENTORY_CONFIDENCE=PM_EMISSION_INVENTORY_CONFIDENCE,
DISTANCE_SCALAR_CAR_TAXI=DISTANCE_SCALAR_CAR_TAXI,
DISTANCE_SCALAR_WALKING=DISTANCE_SCALAR_WALKING,
DISTANCE_SCALAR_PT=DISTANCE_SCALAR_PT,
DISTANCE_SCALAR_CYCLING=DISTANCE_SCALAR_CYCLING,
DISTANCE_SCALAR_MOTORCYCLE=DISTANCE_SCALAR_MOTORCYCLE)
# set up injury prediction function
local_settings <- list(DISTANCE_SCALAR_CAR_TAXI=DISTANCE_SCALAR_CAR_TAXI,
DISTANCE_SCALAR_CYCLING=DISTANCE_SCALAR_CYCLING,
DISTANCE_SCALAR_MOTORCYCLE=DISTANCE_SCALAR_MOTORCYCLE,
DISTANCE_SCALAR_WALKING=DISTANCE_SCALAR_WALKING,
CAS_EXPONENT=CASUALTY_EXPONENT_FRACTION*SIN_EXPONENT_SUM,
STR_EXPONENT=(1-CASUALTY_EXPONENT_FRACTION)*SIN_EXPONENT_SUM)
model_settings <- readRDS('inputs/injury/model_settings.Rds')
if(any(c('CASUALTY_EXPONENT_FRACTION','SIN_EXPONENT_SUM',
'DISTANCE_SCALAR_CAR_TAXI' ,'DISTANCE_SCALAR_WALKING' ,
'DISTANCE_SCALAR_PT',
'DISTANCE_SCALAR_CYCLING' ,
'DISTANCE_SCALAR_MOTORCYCLE' )%in%names(parameters))||
!identical(local_settings,model_settings)){
predict_injuries <<- predict_without_model
}else{
predict_injuries <<- predict
}
## start metahit
## 6 START LOOP OVER CITIES #################################################
city_results <- list()
for(city_ind in 1:length(city_regions)){
# city_ind <- 1
## 7 GET LOCAL (city) DATA ###############################################
CITY <<- city_regions[city_ind]
print(CITY)
city_results[[CITY]] <- list()
## these datasets are all local, saved in local folder.
## there will be one folder per city. this block will have to loop over CITY.
## OR we have one file with, e.g., all the GBD data in.
# GBD file needs to have the following columns:
# age (=label, e.g. 15-49)
# sex (=male or female)
# measure
# cause (GBD_DATA$cause matches DISEASE_INVENTORY$GBD_name)
# metric
# burden
## now process GBD_DATA
filename <- paste0('inputs/gbd/',CITY,".csv")
GBD_DATA <- read_csv(filename,col_types = cols())
# keep named subset of diseases
disease_names <- c(as.character(DISEASE_INVENTORY$GBD_name),'Road injuries')
GBD_DATA <- GBD_DATA %>% filter(cause %in% disease_short_names$disease)
GBD_DATA <- disease_short_names %>% dplyr::select(GBD_name, acronym, disease) %>%
mutate(cause = disease) %>%
left_join(GBD_DATA)
# Rename columns
GBD_DATA <- GBD_DATA %>% rename(lc_gbd_name = cause, cause = GBD_name)
# GBD_DATA <- GBD_DATA %>% filter(cause %in% disease_short_names$disease)
#GBD_DATA <- subset(GBD_DATA,cause%in%disease_names)
# keep entries in correct age range
GBD_DATA$min_age <- as.numeric(future.apply::future_sapply(GBD_DATA$age,function(x)str_split(x,' to ')[[1]][1]))
GBD_DATA$max_age <- as.numeric(future.apply::future_sapply(GBD_DATA$age,function(x)str_split(x,' to ')[[1]][2]))
GBD_DATA <- subset(GBD_DATA,max_age>=AGE_LOWER_BOUNDS[1])
GBD_DATA <- subset(GBD_DATA,min_age<=MAX_AGE)
# Remove _name from all columns
# colnames(GBD_DATA) <- gsub("_name", "", colnames(GBD_DATA))
# ensure lower case
GBD_DATA$sex <- tolower(GBD_DATA$sex)
## get burden of disease for each city by scaling according to population
burden_of_disease <- expand.grid(measure=unique(GBD_DATA$measure),sex=unique(POPULATION$sex),age=unique(POPULATION$age),
cause=GBD_DATA$cause,stringsAsFactors = F)
burden_of_disease <- left_join(burden_of_disease,POPULATION,by=c('age','sex'))
burden_of_disease$min_age <- as.numeric(future.apply::future_sapply(burden_of_disease$age,function(x)str_split(x,'-')[[1]][1]))
burden_of_disease$max_age <- as.numeric(future.apply::future_sapply(burden_of_disease$age,function(x)str_split(x,'-')[[1]][2]))
## when we sum ages, we assume that all age boundaries used coincide with the GBD age boundaries.
##!! this isn't the case for metahit: age category 15-19 vs 16-19. therefore, have added '-1' for now.
burden_of_disease$rate <- 0
for (i in 1:nrow(burden_of_disease)){
local_df <- burden_of_disease[i,]
subtab <- dplyr::filter(GBD_DATA, measure == local_df$measure & sex == local_df$sex & cause == local_df$cause &
min_age >= as.numeric(local_df$min_age)-1 & max_age <= as.numeric(local_df$max_age))
burden_of_disease$rate[i] <- sum(subtab$number)/sum(subtab$pop)
}
burden_of_disease$burden <- burden_of_disease$population*burden_of_disease$rate
##!! if an entry is missing in GBD, we set it to zero. we should also issue a warning.
burden_of_disease$burden[is.na(burden_of_disease$burden)] <- 0
DISEASE_BURDEN <<- burden_of_disease
## for tigthat, use GBD to scale from fatalities to YLL. calculate this ratio here.
gbd_injuries <- DISEASE_BURDEN[which(DISEASE_BURDEN$cause == "Road injuries"),]
gbd_injuries$sex_age <- paste0(gbd_injuries$sex,"_",gbd_injuries$age)
## calculating the ratio of YLL to deaths for each age and sex group
gbd_injuries <- arrange(gbd_injuries, measure)
gbd_inj_yll <- gbd_injuries[which(gbd_injuries$measure == "YLLs (Years of Life Lost)"),]
gbd_inj_dth <- gbd_injuries[which(gbd_injuries$measure == "Deaths"),]
gbd_inj_yll$yll_dth_ratio <- gbd_inj_yll$burden/gbd_inj_dth$burden
GBD_INJ_YLL <<- gbd_inj_yll
## get mslt tables
mslt_df <- read.csv(paste0('inputs/mslt/',CITY, "_mslt.csv"))
MSLT_DF <<- mslt_df
## get inh distance
file_name <- 'inh_distances'
for(i in 1:length(SCEN)){
scen_name <- SCEN_SHORT_NAME[i]
all_distances[[scen_name]][[file_name]] <- readRDS(paste0('inputs/distances/',scen_name,'_',CITY,'_',file_name,'.Rds')) %>% mutate(across(where(is.character), as.numeric))
}
## 8 GET/SET CITY SYNTH POP #########################################
# select city LAs
la_indices <- synth_pop_list_in_la_order[city_regions_dt$city_index==city_ind]
# remove na (nottinghamshire)
la_indices <- la_indices[!is.na(la_indices)]
# set to data table
synth_pops <- list()
for(i in 1:length(la_indices)) synth_pops[[i]] <- setDT(readRDS(paste0(synth_pop_path,synth_pop_files[la_indices[i]])))
# take subset of columns
for(i in 1:length(synth_pops)) synth_pops[[i]] <-
synth_pops[[i]][,future.apply::future_sapply(colnames(synth_pops[[i]]),
function(x)x%in%c('census_id','demogindex','sport_wkmmets')
),with=F]
# rename
names(synth_pops) <- la_names[la_indices]
number_city_las <- length(synth_pops)
synth_pop <- plyr::rbind.fill(synth_pops) %>% as.data.table()
if ("sport_wkmmets" %in% colnames(synth_pop)){
synth_pop$sport_wkmmets <- ifelse(is.na(synth_pop$sport_wkmmets), 0, synth_pop$sport_wkmmets)
}else{
synth_pop$sport_wkmmets <- 0
}
# synth_pop <- do.call(rbind,synth_pops)
# synth_pops <- NULL
## convert synth pop to ithim-r style
synth_pop$participant_id <- 1:nrow(synth_pop)
demog_to_dem <- data.table(demogindex=demogindex_to_numerical,dem_index=1:length(demogindex_to_numerical))
synth_pop <- synth_pop[demog_to_dem,on='demogindex']
synthetic_pop <- synth_pop[,names(synth_pop)%in%c('participant_id','dem_index'),with=F]
##!! not sure we need this as a separate object but, for now...
SYNTHETIC_POPULATION <<- left_join(synthetic_pop,DEMOGRAPHIC[,names(DEMOGRAPHIC)%in%c('dem_index','age')],by='dem_index')
# synthetic_pop <- NULL
## we effectively have a "SYNTHETIC_POPULATION" per scenario.
pp_summary <- list()
dist_mode_names <- c('walk','cycle','mbikedrive','cardrive','vandrive','subway','bus')
function_mode_names <- c('pedestrian','cycle','motorcycle','car','van','subway','bus')
for(scenario in SCEN_SHORT_NAME){
#scenario_name_flag <- sapply(names(synth_pop),function(x)grepl(paste0(scenario,'_'),x))
#scenario_names <- names(synth_pop)[scenario_name_flag]
# choose subset for each scenario per person summary
pp_summary[[scenario]] <- synth_pop[,names(synth_pop)%in%c('participant_id','dem_index','census_id','sport_wkmmets'),with=F]
## pa
pp_summary[[scenario]][all_distances[[scenario]]$pa_distances,on='census_id',cycle_dur_pa:=i.cycle_dur_pa]
pp_summary[[scenario]][all_distances[[scenario]]$pa_distances,on='census_id',pedestrian_dur_pa:=i.walking_dur_pa]
## inh
for(modenumber in 1:length(dist_mode_names)){
cols <- future.apply::future_sapply(colnames(all_distances[[scenario]]$inh_distances),function(x)grepl(dist_mode_names[modenumber],x))
pp_summary[[scenario]][,c(paste0(function_mode_names[modenumber],'_dur')):=0]
pp_summary[[scenario]][match(all_distances[[scenario]]$inh_distances$census_id,pp_summary[[scenario]]$census_id),paste0(function_mode_names[modenumber],'_dur'):=rowSums(all_distances[[scenario]]$inh_distances[,cols,with=F])]
}
if('sport_wkmmets' %in% names(pp_summary[[scenario]]))
names(pp_summary[[scenario]])[names(pp_summary[[scenario]])=='sport_wkmmets'] <- 'work_ltpa_marg_met'
else
pp_summary[[scenario]]$work_ltpa_marg_met <- 0
}
true_pops <- pp_summary[[1]][,.N,by='dem_index']
POPULATION$population <- true_pops$N[match(POPULATION$dem_index,true_pops$dem_index)]
# synth_pop <- NULL
INH_NAMES <<- colnames(pp_summary[[1]])%in%paste0(function_mode_names,'_dur')
PA_NAMES <- colnames(pp_summary[[1]])%in%c('cycle_dur_pa','pedestrian_dur_pa')
print("Pedestrian duration difference")
print(summary(pp_summary$base$pedestrian_dur_pa - pp_summary[[global_scen]]$pedestrian_dur_pa))
print("Cycling duration difference")
print(summary(pp_summary$base$pedestrian_dur_pa - pp_summary[[global_scen]]$pedestrian_dur_pa))
##!! hard coded to maintain naming conventions etc
DIST <- matrix(0,nrow=3,ncol=NSCEN+1)
rownames(DIST) <- c('car','motorcycle','bus')
colnames(DIST) <- SCEN
n_roads <- ncol(all_distances[[SCEN_SHORT_NAME[1]]]$emissions_distances$distance_for_emission)-2
for(scen in 1:(NSCEN+1))
DIST[,scen] <- c(sum(all_distances[[SCEN_SHORT_NAME[scen]]]$emissions_distances$distance_for_emission[mode_name=='cardrive'&la%in%la_names[la_indices],2:(n_roads+1)]),
sum(all_distances[[SCEN_SHORT_NAME[scen]]]$emissions_distances$distance_for_emission[mode_name=='mbikedrive'&la%in%la_names[la_indices],2:(n_roads+1)]),
##!! assume total bus travel doesn't change in scenario
sum(city_total_distances[city_total_distances[,1]==CITY&city_total_distances[,2]=='bus',3:ncol(city_total_distances)]))
## 9 ITHIM ########################################
## set city-specific parameters
# background pm2.5
pm_conc_base <- BACKGROUND_POLLUION_TABLE$apmean_bpm25[grepl(CITY,tolower(BACKGROUND_POLLUION_TABLE$apgroup_name))]
if(PM_CONC_BASE_QUANTILE==F){
PM_CONC_BASE <- pm_conc_base
}else{
pm_sd <- BACKGROUND_POLLUION_TABLE$apsd_bpm25[grepl(CITY,tolower(BACKGROUND_POLLUION_TABLE$apgroup_name))]
lnorm_params <- get_lnorm_params(pm_conc_base,pm_sd)
parameters$PM_CONC_BASE <- qlnorm(parameters$PM_CONC_BASE_QUANTILE,lnorm_params[1],lnorm_params[2])
}
# transport portion of pm2.5
if(PM_TRANS_SHARE_QUANTILE==F){
pm_transport_share <- BACKGROUND_POLLUION_TABLE$transport_fraction[grepl(CITY,tolower(BACKGROUND_POLLUION_TABLE$apgroup_name))]
PM_TRANS_SHARE <- pm_transport_share
}else{
pm_share_alpha <- BACKGROUND_POLLUION_TABLE$alpha[grepl(CITY,tolower(BACKGROUND_POLLUION_TABLE$apgroup_name))]
pm_share_beta <- BACKGROUND_POLLUION_TABLE$beta[grepl(CITY,tolower(BACKGROUND_POLLUION_TABLE$apgroup_name))]
parameters$PM_TRANS_SHARE <- qbeta(parameters$PM_TRANS_SHARE_QUANTILE,pm_share_alpha,pm_share_beta)
}
if(PM_EMISSION_INVENTORY_CONFIDENCE<1){
total <- sum(unlist(PM_EMISSION_INVENTORIES[[CITY]]))
parameters$PM_EMISSION_INVENTORY <- list()
for(n in 1:NSAMPLES){
quantiles <- parameters$PM_EMISSION_INVENTORY_QUANTILE[[n]]
samples <- future.apply::future_lapply(names(quantiles),function(x) qgamma(quantiles[[x]],shape=PM_EMISSION_INVENTORIES[[CITY]][[x]]/total*dirichlet_pointiness(PM_EMISSION_INVENTORY_CONFIDENCE),scale=1))
names(samples) <- names(quantiles)
new_total <- sum(unlist(samples))
parameters$PM_EMISSION_INVENTORY[[n]] <- future.apply::future_lapply(samples,function(x)x/new_total)
}
}else{
PM_EMISSION_INVENTORY <- emission_inventories[[CITY]]
}
# other parameters to set by city:
#DISTANCE_SCALAR_CAR_TAXI
#DISTANCE_SCALAR_WALKING
#DISTANCE_SCALAR_PT
#DISTANCE_SCALAR_CYCLING
#DISTANCE_SCALAR_MOTORCYCLE
city_results[[CITY]] <- foreach(sampl = 1:NSAMPLES, .export = ls(globalenv()), .verbose = T) %dopar% {
# Uncomment for non-parallel execution
# city_results[[CITY]] <- lapply(1:NSAMPLES, function(sampl) {
# TESTING: run only for 1 sampl
# sampl <- 1
# Print sampl
print(paste("Sampl is: ", sampl))
for(i in 1:length(parameters))
assign(names(parameters)[i],parameters[[i]][[sampl]],pos=1)
CAS_EXPONENT <<- CASUALTY_EXPONENT_FRACTION * SIN_EXPONENT_SUM
STR_EXPONENT <<- SIN_EXPONENT_SUM - CAS_EXPONENT
## instead of ithimr::set_vehicle_inventory() # sets vehicle inventory
vehicle_inventory <- MODE_SPEEDS
vehicle_inventory$pm_emission_inventory <- 0
for(m in names(parameters$PM_EMISSION_INVENTORY[[sampl]]))
vehicle_inventory$pm_emission_inventory[vehicle_inventory$stage_mode %in% m] <- parameters$PM_EMISSION_INVENTORY[[sampl]][[m]] %>% as.numeric()
VEHICLE_INVENTORY <<- vehicle_inventory
## (1) AP PATHWAY ######################################
# Calculate PM2.5 concentrations
##!! using pa durations for now, which don't differentiate between road types and las.
##!! we don't have durations by road type and la. We could map from distances.
pm_conc <- scenario_pm_calculations(DIST, pp_summary)
## change inh column names
for(i in 1:length(pp_summary)) colnames(pp_summary[[i]])[INH_NAMES] <- paste0(colnames(pp_summary[[i]])[INH_NAMES],'_inh')
scenario_pm <- pm_conc$scenario_pm
pm_conc_pp <- pm_conc$pm_conc_pp
# pm_conc <- NULL
# Air pollution DR calculation
RR_AP_calculations <- gen_ap_rr(pm_conc_pp)
# pm_conc_pp <- NULL
## (2) PA PATHWAY ##############################################
# Calculate total mMETs
## pp_summary and SYNTHETIC_POPULATION are basically the same thing.
# Only difference is pp_summary is a list for scenarios. This could be more efficient.
# this function differs from ithim-r because mmets differ in baseline and scenario
##!! check these look sensible
## rename pa columns
for(i in 1:length(pp_summary)) {
if (pp_summary[[i]] %>% names() %>% str_detect("_pa") %>% any())
colnames(pp_summary[[i]]) <- future.apply::future_sapply(colnames(pp_summary[[i]]),function(x) gsub('_pa','',x))
}
mmets_pp <- total_mmet(pp_summary) %>% as.data.frame()
## change names back
##!! alternatively, re-write ITHIM-R functions within metahit_functions.R so that scenario_pm_calculations and total_mmet look for different columns, e.g. _dur_inh and _dur_pa.
for(i in 1:length(pp_summary)) colnames(pp_summary[[i]])[PA_NAMES] <- paste0(colnames(pp_summary[[i]])[PA_NAMES],'_pa')
for(i in 1:length(pp_summary)) colnames(pp_summary[[i]]) <- future.apply::future_sapply(colnames(pp_summary[[i]]),function(x) gsub('_inh','',x))
# Physical activity calculation
RR_PA_calculations <- ithimr::gen_pa_rr(mmets_pp)
# mmets_pp <- NULL
## (3) COMBINE (1) AND (2) #################################################
# Physical activity and air pollution combined
RR_PA_AP_calculations <- combined_rr_ap_pa(RR_PA_calculations,RR_AP_calculations)
# RR_PA_calculations <- RR_AP_calculations <- NULL
## (4) INJURIES ##############################################
# get city data
city_table <- injury_table
for(i in 1:2)
for(j in 1:2)
city_table[[i]][[j]] <- injury_table[[i]][[j]][injury_table[[i]][[j]]$region==CITY,]
## for each scenario, add/subtract distance
# get indices for fast matching data
roads <- unique(injury_table[[1]][[1]]$road)
model_modes <- c('pedestrian','cyclist','motorcycle','car/taxi')
distance_scalars <- c(DISTANCE_SCALAR_WALKING,DISTANCE_SCALAR_CYCLING,DISTANCE_SCALAR_MOTORCYCLE,DISTANCE_SCALAR_CAR_TAXI)
injury_deaths <- secondary_deaths <- list()
# get prediction for baseline (using smoothed data, not raw data)
for(i in 1:2)
for(j in 1:2){
## the baseline distances are the same as the scenario distances for the uninteresting modes
## baseline distances are overwritten by scenario distances for interesting modes
city_table[[i]][[j]]$cas_distance <- city_table[[i]][[j]]$base_cas_distance
city_table[[i]][[j]]$strike_distance <- city_table[[i]][[j]]$base_strike_distance
city_table[[i]][[j]]$cas_distance_sum <- city_table[[i]][[j]]$base_cas_distance_sum
city_table[[i]][[j]]$strike_distance_sum <- city_table[[i]][[j]]$base_strike_distance_sum
city_table[[i]][[j]]$pred <- city_table[[i]][[j]]$base_pred #
#city_table[[i]][[j]]$pred <- predict(baseline_injury_model[[i]][[j]],newdata=city_table[[i]][[j]],type='response')
}
injury_predictions <- summarise_injuries(city_table)
injury_deaths[[1]] <- injury_predictions[[1]]
secondary_deaths[[1]] <- injury_predictions[[2]]
injury_predictions_for_bz_baseline <- summarise_injuries_for_bz(city_table)
# store baseline data
baseline_city_table <- city_table
injury_ratios_for_bz <- list()
injury_ratios_for_bz[[1]] <- injury_predictions_for_bz_baseline
injury_ratios_for_bz[[1]][,c(1:ncol(injury_ratios_for_bz[[1]]))[-1]] <- injury_ratios_for_bz[[1]][,-1]/injury_predictions_for_bz_baseline[,-1]
## update distances
for(scen in 0:NSCEN+1){
scen_name <- SCEN_INJURY_SHORT_NAME[scen]
for(j in 1:2){
mode_indices <- match(baseline_city_table[[1]][[j]]$cas_mode,model_modes)
baseline_city_table[[1]][[j]][[paste0(scen_name,'_cas_distance')]] <- baseline_city_table[[1]][[j]][[paste0(scen_name,'_cas_distance')]] * distance_scalars[mode_indices]
baseline_city_table[[1]][[j]][[paste0(scen_name,'_cas_distance_sum')]] <- baseline_city_table[[1]][[j]][[paste0(scen_name,'_cas_distance_sum')]] * distance_scalars[mode_indices]
mode_indices <- match(baseline_city_table[[j]][[1]]$strike_mode,model_modes)
baseline_city_table[[j]][[1]][[paste0(scen_name,'_strike_distance')]] <- baseline_city_table[[j]][[1]][[paste0(scen_name,'_strike_distance')]] * distance_scalars[mode_indices]
baseline_city_table[[j]][[1]][[paste0(scen_name,'_strike_distance_sum')]] <- baseline_city_table[[j]][[1]][[paste0(scen_name,'_strike_distance_sum')]] * distance_scalars[mode_indices]
}
}
for(scen in 1:NSCEN+1){
scen_name <- SCEN_INJURY_SHORT_NAME[scen]
city_table <- baseline_city_table
# casualty distances
for(j in 1:2){
# edit dataset with new distances
city_table[[1]][[j]]$cas_distance <- city_table[[1]][[j]][[paste0(scen_name,'_cas_distance')]]
city_table[[1]][[j]]$cas_distance_sum <- city_table[[1]][[j]][[paste0(scen_name,'_cas_distance_sum')]]
}
# striker distances
for(i in 1:2){
# edit dataset with new distances
city_table[[i]][[1]]$strike_distance <- city_table[[i]][[1]][[paste0(scen_name,'_strike_distance')]]
city_table[[i]][[1]]$strike_distance_sum <- city_table[[i]][[1]][[paste0(scen_name,'_strike_distance_sum')]]
}
# get prediction for scenario using modified smoothed data, not raw data
for(i in 1:2)
for(j in 1:2)
city_table[[i]][[j]]$pred <- predict_injuries(baseline_injury_model[[i]][[j]],newdata=city_table[[i]][[j]],type='response')
# summarise predicted fatalities
injury_predictions <- summarise_injuries(city_table)
injury_ratios_for_bz[[scen]] <- summarise_injuries_for_bz(city_table)
# store results
injury_deaths[[scen]] <- injury_predictions[[1]]
secondary_deaths[[scen]] <- injury_predictions[[2]]
}
#city_table <- baseline_city_table <- scen_diff <- NULL
# convert to ithimr format
injuries <- cbind(do.call(rbind,injury_deaths),rep(SCEN,each=nrow(injury_deaths[[1]])))
names(injuries) <- c('dem_index','Deaths','scenario')
# compute ylls from deaths
(deaths_yll_injuries <- injury_death_to_yll(injuries))
# store reference number of deaths and ylls
ref_injuries <- deaths_yll_injuries$ref_injuries
##TODO report by mode. covert to burden. then sum.
## (5) COMBINE (3) AND (4)###########################################
# Combine health burden from disease and injury
(hb <- health_burden(RR_PA_AP_calculations,deaths_yll_injuries$deaths_yll_injuries))
pif_table <- health_burden_2(RR_PA_AP_calculations)
for(scen in 1:NSCEN+1) {
for(i in 2:ncol(injury_ratios_for_bz[[scen]])) {
injury_col_name <- colnames(injury_ratios_for_bz[[scen]])[i]
pif_table[[paste0(SCEN_SHORT_NAME[scen],'_',injury_col_name)]] <- injury_ratios_for_bz[[scen]][[i]]/injury_ratios_for_bz[[1]][[i]]
}
}
## add in population column
for(i in 1:length(hb))
hb[[i]] <- left_join(hb[[i]],POPULATION[,c(colnames(POPULATION)%in%c('population','dem_index'))],by='dem_index')
pathway_hb <- NULL
constant_mode <- F
# if(constant_mode) {
# pathway_hb <- health_burden(RR_PA_AP_calculations,deaths_yll_injuries$deaths_yll_injuries,combined_AP_PA=F)
# pathway_pif_table <- health_burden_2(RR_PA_AP_calculations,combined_AP_PA=F)
# x11(); plot(pif_table$scen_pif_pa_ap_noise_no2_ihd,1-(1-pathway_pif_table$scen_pif_pa_ihd)*(1-pathway_pif_table$scen_pif_ap_ihd))
# lines(c(0,1),c(0,1))
# }
#
# # Store pif table
#hb[["pif_table"]] <- pif_table
# RR_PA_AP_calculations <- NULL
#hb_2 <- RunMSLT(mslt_df, i_sex, i_age_cohort, disease_names, pif)
#profvis(hb_2 <- belens_function(pif_table) )
#sort(sapply(ls(),function(x)object.size(get(x))))
## Rob, added this line to save to my repo, but not sure if you have it too, so I commented it out.
# write_csv(hb_2, '../mh-mslt/data/pif.csv')
hb
}
## clear memory
# SYNTHETIC_POPULATION <<- NULL
# pp_summary <- NULL
# reduce size of injury table
for(i in 1:2)
for(j in 1:2)
injury_table[[i]][[j]] <- injury_table[[i]][[j]] %>% dplyr::filter(region != CITY)
saveRDS(city_results[[CITY]], paste0('outputs/scenarios/', global_scen, '/files/',CITY,'_results.Rds'))
city_results[[CITY]] <- c()
}
for(city_ind in 1:length(city_regions)){
CITY <<- city_regions[city_ind]
city_results[[CITY]] <- readRDS(paste0('outputs/scenarios/', global_scen, '/files/',CITY,'_results.Rds'))
}
saveRDS(city_results, paste0('outputs/scenarios/', global_scen, '/files/city_results.Rds'))
## 10 EXTRACT RESULTS AND PLOT ############################################################
outcomes <- list()
plot_cols <- future.apply::future_sapply(names(city_results[[1]][[1]][[1]]),function(x)grepl('scen',x))
col_names <- str_replace_all((names(city_results[[1]][[1]][[1]])[plot_cols]), paste0(paste0(global_scen, "_deaths_"), '|pa_ap_|ap_|pa_'), '')
#col_names <- future.apply::future_sapply(names(city_results[[1]][[1]][[1]])[plot_cols],function(x) dplyr::last(strsplit(x,'_')[[1]]))
for(type in c('deaths','ylls')){
outcomes[[type]] <- list()
outcomes[[type]]$lower <- matrix(0,nrow=length(city_regions),ncol=length(col_names))
colnames(outcomes[[type]]$lower) <- col_names
rownames(outcomes[[type]]$lower) <- city_regions
outcomes[[type]]$upper <- outcomes[[type]]$median <- outcomes[[type]]$lower
for(i in 1:length(city_regions)){
CITY <- city_regions[i]
sum_results <- future.apply::future_sapply(city_results[[CITY]],function(x) colSums(x[[type]][,plot_cols]))
outcomes[[type]]$median[i,] <- apply(sum_results,1,function(x)median(x))/sum(city_results[[CITY]][[1]][[type]]$population)*1e3
outcomes[[type]]$lower[i,] <- apply(sum_results,1,quantile,0.05)/sum(city_results[[CITY]][[1]][[type]]$population)*1e3
outcomes[[type]]$upper[i,] <- apply(sum_results,1,quantile,0.95)/sum(city_results[[CITY]][[1]][[type]]$population)*1e3
}
}
cols <- rainbow(length(city_regions))
for(type in c('deaths','ylls')){
td <- purrr::reduce(outcomes[[type]], rbind) %>% as.data.frame() %>% tibble::rownames_to_column("city") %>%
dplyr::mutate(var = case_when(stringr::str_detect(city, ".1") ~ "lower",
stringr::str_detect(city, ".2") ~ "upper",
TRUE ~ "median")) %>%
dplyr::mutate(city = str_remove(city, ".[0-9]")) %>%
tidyr::pivot_longer(cols = -c(city, var))
ggsave(filename = paste0('outputs/scenarios/', global_scen, '/figures/', type,'.png'), width = 12, height = 8, dpi = 300,
#width = 9, height = 6,
ggplot(td) +
aes(x = name, y = value, fill = city) +
geom_boxplot(shape = "circle", position=position_dodge(1)) +
scale_fill_brewer(palette = "Dark2", direction = 1) +
theme_light() + coord_flip() + labs(x = "", y = type) +
theme(axis.text.x=element_text(angle=90, hjust=1))
)
}
# for(type in c('deaths','ylls')){
# pdf(paste0('outputs/scenarios/', global_scen, '/figures/', type,'.pdf'), width=9, height=6);
# par(mar=c(6,5,1,1))
# x<-barplot(outcomes[[type]]$median,las=2,cex.axis=1.5,cex.lab=1.5,ylab=paste0('Thousand ',type,' pp averted in Scenario'),xlab='',cex.names=1.5,beside=T,col=cols)
# legend("topright", fill=cols,bty='n',legend=city_regions, y = max(outcomes[[type]]$median))
# dev.off()
# }
#
# for(type in c('deaths','ylls')){
# pdf(paste0('outputs/scenarios/', global_scen, '/figures/', type,'.pdf'), width=9, height=6);
# par(mar=c(6,5,1,1))
# plot(x,outcomes[[type]]$median,las=2,cex.axis=1.5,cex.lab=1.5,ylab=paste0('Thousand ',type,' pp averted in Scenario'),xlab='',xaxt='n',
# cex=1.5,col=cols,pch=15,frame=F,ylim=c(min(outcomes[[type]]$lower),max(outcomes[[type]]$upper)))
# abline(h=0)
# #legend(fill=cols,bty='n',legend=city_regions,x=prod(dim(outcomes[[type]][[1]])-1),y=max(outcomes[[type]]$upper))
# legend("topright", fill=cols,bty='n',legend = city_regions,y=min(outcomes[[type]]$median))
# for(i in 1:nrow(x)) for(j in 1:ncol(x))
# lines(c(x[i,j],x[i,j]),c(outcomes[[type]]$lower[i,j],outcomes[[type]]$upper[i,j]),col=cols[i],lwd=2)
# axis(1,at=x[1,],labels=col_names,las=2)
# dev.off()
# }
## 11 VOI ############################################################
#if('PM_EMISSION_INVENTORY'%in%names(parameters)){
# for(i in 1:length(parameters$PM_EMISSION_INVENTORY[[1]])){
# extract_vals <- sapply(parameters$PM_EMISSION_INVENTORY,function(x)x[[i]])
# if(sum(extract_vals)!=0)
# parameters[[paste0('EMISSION_INVENTORY_',names(parameters$PM_EMISSION_INVENTORY[[1]])[i])]] <- extract_vals
# }
#}
parameter_store <- parameters
for(list_names in c('DR_AP_LIST','PM_CONC_BASE_QUANTILE','PM_TRANS_SHARE_QUANTILE','PM_EMISSION_INVENTORY','PM_EMISSION_INVENTORY_QUANTILES'))
parameters[[list_names]] <- NULL
parameter_samples <- do.call(cbind,parameters)
saveRDS(parameter_samples, paste0('outputs/scenarios/', global_scen, '/files/parameter_samples.Rds'))
parameter_samples <- readRDS(paste0('outputs/scenarios/', global_scen, '/files/parameter_samples.Rds'))
#parameter_samples <- paramete r_samples[,!colnames(parameter_samples)%in%c('DR_AP_LIST','PM_CONC_BASE_QUANTILE','PM_TRANS_SHARE_QUANTILE','PM_EMISSION_INVENTORY','PM_EMISSION_INVENTORY_QUANTILES')]
plot_cols <- future.apply::future_sapply(names(city_results[[1]][[1]][[1]]),function(x)grepl('scen',x)&!(grepl('ac',x)|grepl('neo',x)))
col_names <- future.apply::future_sapply(names(city_results[[1]][[1]][[1]])[plot_cols],function(x) dplyr::last(strsplit(x,'_')[[1]]))
outcome <- list()
type <- 'ylls'
for(i in 1:length(city_regions)){
CITY <- city_regions[i]
outcome[[CITY]] <- t(sapply(city_results[[CITY]],function(x)colSums(x[[type]][,plot_cols])))
}
## get basic evppi matrix
evppi <- future.apply::future_lapply(1:ncol(parameter_samples),
FUN = ithimr::compute_evppi,
as.data.frame(parameter_samples),
outcome,
nscen=NSCEN,
all=T,
multi_city_outcome=F)
evppi <- do.call(rbind,evppi)
colnames(evppi) <- apply(expand.grid(SCEN_SHORT_NAME[2:length(SCEN_SHORT_NAME)],names(outcome)),1,function(x)paste0(x,collapse='_'))
rownames(evppi) <- colnames(parameter_samples)
## replace some rows of evppi if some parameters should be combined
## add four-dimensional EVPPI if AP_DOSE_RESPONSE is uncertain.
numcores <- 1
if("AP_DOSE_RESPONSE_QUANTILE_ALPHA_lri"%in%names(parameters)&&NSAMPLES>=1024){
AP_names <- future.apply::future_sapply(names(parameters),function(x)length(strsplit(x,'AP_DOSE_RESPONSE_QUANTILE_ALPHA')[[1]])>1)
diseases <- future.apply::future_sapply(names(parameters)[AP_names],function(x)strsplit(x,'AP_DOSE_RESPONSE_QUANTILE_ALPHA_')[[1]][2])
sources <- list()
for(di in diseases){
col_names <- future.apply::future_sapply(colnames(parameter_samples),function(x)grepl('AP_DOSE_RESPONSE_QUANTILE',x)&grepl(di,x))
sources[[di]] <- parameter_samples[,col_names]
}
evppi_for_AP <- future.apply::future_lapply(1:length(sources),
FUN = ithimr:::compute_evppi,
sources,
outcome,
all=T,
multi_city_outcome=F)
names(evppi_for_AP) <- paste0('AP_DOSE_RESPONSE_QUANTILE_',diseases)
evppi <- rbind(evppi,do.call(rbind,evppi_for_AP))
## get rows to remove
keep_names <- future.apply::future_sapply(rownames(evppi),function(x)!any(c('ALPHA','BETA','GAMMA','TMREL')%in%strsplit(x,'_')[[1]]))
evppi <- evppi[keep_names,]
}
if("PM_EMISSION_INVENTORY_QUANTILES"%in%names(parameter_store)&&NSAMPLES>=1024){
sources <- list()
for(ci in 1:length(city_regions)){
city <- city_regions[ci]
sources[[ci]] <- matrix(0,nrow=NSAMPLES,ncol=length(parameter_store$PM_EMISSION_INVENTORY_QUANTILES[[1]]))
total <- sum(unlist(PM_EMISSION_INVENTORIES[[city]]))
parameter_store$PM_EMISSION_INVENTORY <- list()
for(n in 1:NSAMPLES){
quantiles <- parameter_store$PM_EMISSION_INVENTORY_QUANTILE[[n]]
samples <- future.apply::future_sapply(names(quantiles),function(x) qgamma(quantiles[[x]],shape=PM_EMISSION_INVENTORIES[[city]][[x]]/total*dirichlet_pointiness(PM_EMISSION_INVENTORY_CONFIDENCE),scale=1))
new_total <- sum(unlist(samples))
sources[[ci]][n,] <- samples/new_total
}
}
evppi_for_emissions <- future.apply::future_lapply(1:length(sources),
FUN = ithimr:::compute_evppi,
sources,
outcome,
all=F,
multi_city_outcome=F)
#names(evppi_for_emissions) <- paste0('EMISSION_INVENTORY_',city_regions)
#sapply(evppi_for_emissions,function(x)x[x>0])
## get rows to remove
keep_names <- future.apply::future_sapply(rownames(evppi),function(x)!grepl('EMISSION_INVENTORY_',x))
evppi <- evppi[keep_names,]
evppi <- rbind(evppi,future.apply::future_sapply(evppi_for_emissions,function(x)x[x>0]))
rownames(evppi)[nrow(evppi)] <- 'PM_EMISSION_INVENTORY'
}
print(evppi)
## PA
if(sum(c("BACKGROUND_PA_SCALAR","BACKGROUND_PA_ZEROS")%in%names(parameters))==2&&NSAMPLES>=1024){
sources <- list()
for(ci in 1:length(city_regions)){
city <- city_regions[ci]
pa_names <- future.apply::future_sapply(colnames(parameter_samples),function(x)(grepl('BACKGROUND_PA_SCALAR',x)||grepl('BACKGROUND_PA_ZEROS',x)))
sources[[ci]] <- parameter_samples[,pa_names]
}
evppi_for_pa <- future.apply::future_lapply(1:length(sources),
FUN = ithimr:::compute_evppi,
sources,
outcome,
all=F,
multi_city_outcome=F)
#names(evppi_for_pa) <- paste0('BACKGROUND_PA_',city_regions)
## get rows to remove
keep_names <- future.apply::future_sapply(rownames(evppi),function(x)!grepl('BACKGROUND_PA_',x))
evppi <- evppi[keep_names,]
evppi <- rbind(evppi,future.apply::future_sapply(evppi_for_pa,function(x)x[x>0]))
#evppi <- rbind(evppi,do.call(rbind,evppi_for_pa))
rownames(evppi)[nrow(evppi)] <- 'BACKGROUND_PA'
}
## plot evppi
library(RColorBrewer)
library(plotrix)
evppi <- apply(evppi,2,function(x){x[is.na(x)]<-0;x})
{pdf(paste0('outputs/scenarios/', global_scen, '/figures/evppi.pdf'), height=15, width=8);
par(mar=c(6,20,3.5,5.5))
labs <- rownames(evppi)
get.pal=colorRampPalette(brewer.pal(9,"Reds"))
redCol=rev(get.pal(12))
bkT <- seq(max(evppi)+1e-10, 0,length=13)
cex.lab <- 1.5
maxval <- round(bkT[1],digits=1)
col.labels<- c(0,maxval/2,maxval)
cellcolors <- vector()
for(ii in 1:length(unlist(evppi)))
cellcolors[ii] <- redCol[tail(which(unlist(evppi[ii])<bkT),n=1)]
color2D.matplot(evppi,cellcolors=cellcolors,main="",xlab="",ylab="",cex.lab=2,axes=F,border='white')
fullaxis(side=1,las=2,at=NSCEN*0:(length(outcome)-1)+NSCEN/2,labels=names(outcome),line=NA,pos=NA,outer=FALSE,font=NA,lwd=0,cex.axis=1)
fullaxis(side=2,las=1,at=(length(labs)-1):0+0.5,labels=labs,line=NA,pos=NA,outer=FALSE,font=NA,lwd=0,cex.axis=0.8)
mtext(3,text='By how much (%) could we reduce uncertainty in\n the outcome if we knew this parameter perfectly?',line=1)
color.legend(NSCEN*length(outcome)+0.5,0,NSCEN*length(outcome)+0.8,length(labs),col.labels,rev(redCol),gradient="y",cex=1,align="rb")
for(i in seq(0,NSCEN*length(outcome),by=NSCEN)) abline(v=i)
for(i in seq(0,length(labs),by=NSCEN)) abline(h=i)
dev.off()
}
}
|
98a2ba49a61c4a1722838f8cf3228f790757923f
|
a2ba22a5bb7f38a214d40b769cbc8f6378426750
|
/R/calculateTiTv.R
|
e3f375a42d4584988b8fe061efd0fb93ab944cdb
|
[] |
no_license
|
huklein/R453Plus1Toolbox
|
faa368f57e1f27b2f45249ce12d95ed72610be8b
|
539c22e575530ea93c2f2e95166bd6ef646a2a8d
|
refs/heads/master
| 2021-08-30T02:00:13.385605
| 2017-12-14T01:55:16
| 2017-12-14T01:55:16
| 113,723,920
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,947
|
r
|
calculateTiTv.R
|
.calculateTiTv <- function(object) {
if (!is.element("variantBase", names(fData(object))) |
!is.element("referenceBases", names(fData(object)))) {
stop("Column variantBase or referenceBases is missing.")
}
df = fData(object)
indSNP = nchar(df$referenceBases) == 1 & nchar(df$variantBase) == 1
df = df[indSNP, ]
subMat = matrix(NA, nrow=4, ncol=4,
dimnames=list(c("A", "G", "C", "T"), c("A", "G", "C", "T")))
subMat["A", "G"] = sum(df$referenceBases == "A" & df$variantBase == "G")
subMat["A", "C"] = sum(df$referenceBases == "A" & df$variantBase == "C")
subMat["A", "T"] = sum(df$referenceBases == "A" & df$variantBase == "T")
subMat["G", "A"] = sum(df$referenceBases == "G" & df$variantBase == "A")
subMat["G", "C"] = sum(df$referenceBases == "G" & df$variantBase == "C")
subMat["G", "T"] = sum(df$referenceBases == "G" & df$variantBase == "T")
subMat["C", "A"] = sum(df$referenceBases == "C" & df$variantBase == "A")
subMat["C", "G"] = sum(df$referenceBases == "C" & df$variantBase == "G")
subMat["C", "T"] = sum(df$referenceBases == "C" & df$variantBase == "T")
subMat["T", "A"] = sum(df$referenceBases == "T" & df$variantBase == "A")
subMat["T", "G"] = sum(df$referenceBases == "T" & df$variantBase == "G")
subMat["T", "C"] = sum(df$referenceBases == "T" & df$variantBase == "C")
# A transition is a A <-> G or a C <-> T substitution
# http://www.mun.ca/biology/scarr/Transitions_vs_Transversions.html
# there are twice as many transversion
# expected values: http://www.broadinstitute.org/gsa/wiki/index.php/VariantEval
ti = sum(subMat["A", "G"] + subMat["G", "A"]
+ subMat["C", "T"] + subMat["T", "C"])
tv = sum(subMat, na.rm=TRUE) - ti
return(list(subMat=subMat, TiTv= ti/tv))
}
setMethod("calculateTiTv", signature=signature(object="AVASet"),
.calculateTiTv)
setMethod("calculateTiTv", signature=signature(object="MapperSet"),
.calculateTiTv)
|
7ae14af848d34fee466d5f5fceed4786fbd3e6bd
|
d10541def727e478bdf637b43bc48cf52111d1c5
|
/01_simple.R
|
717198a7480b86ed4a5f8a902c701fb8d50cd676
|
[] |
no_license
|
mfoos/step-by-step-shiny
|
fff337c686a1b2418669651955732ebdc92b8cb0
|
c28a55245c2044cffd9eea1deff5f797d9d2d3a8
|
refs/heads/master
| 2020-05-05T13:01:56.596112
| 2019-09-22T22:19:03
| 2019-09-22T22:19:03
| 180,056,150
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 649
|
r
|
01_simple.R
|
library(shiny)
ui <- fluidPage(
fluidRow( # Determines that this app will use the "fluid page" (grid) layout method
column(width = 12,
# Creates the input widget
textInput("textinputlabel", "What's up?"),
# Designates the place-holder for the output generated
textOutput("newtext")
)
)
)
server <- function(input, output) {
# Transforms the input to uppercase and writes it where it can be read
# into the user interface
output$newtext <- renderText({
toupper(input$textinputlabel)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
2b8c533a8ce199fa231b2d81a76dc903af5ce843
|
15e79da5728a9d3aea403753a19487e5fc05ca67
|
/DataAnalysisStatisticalInference/labs/3/lab3.R
|
8831d4c093eba8a898b8193a847050c27486689b
|
[] |
no_license
|
jose-dev/coursera
|
d690289e77c00d218461af64626cf047f5038c6e
|
a644badb9a56b4293affad936fcb906c64de31fe
|
refs/heads/master
| 2021-01-17T15:02:33.592741
| 2017-03-28T16:46:55
| 2017-03-28T16:46:55
| 23,797,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,653
|
r
|
lab3.R
|
#
# https://www.datacamp.com/courses/data-analysis-and-statistical-inference_mine-cetinkaya-rundel-by-datacamp/lab-3a-foundations-for-inference-sampling-distributions?ex=1
# https://www.datacamp.com/courses/data-analysis-and-statistical-inference_mine-cetinkaya-rundel-by-datacamp/lab-3b-foundations-for-inference-confidence-intervals?ex=1
#
# Load the 'ames' data frame:
load(url("http://s3.amazonaws.com/assets.datacamp.com/course/dasi/ames.RData"))
# Make some preliminary inspections:
names(ames)
head(ames)
tail(ames)
# Assign the variables:
area = ames$Gr.Liv.Area
price = ames$SalePrice
# Calculate the summary and draw a histogram of 'area'
summary(area)
hist(area)
# Create the samples:
samp0 = sample(area, 50)
samp1 = sample(area, 50)
# Draw the histograms:
par(mfrow = c(1,2))
hist(samp0)
hist(samp1)
# Set up an empty vector of 5000 NAs to store sample means:
sample_means50 = rep(NA, 5000)
# Take 5000 samples of size 50 of 'area' and store all of them in 'sample_means50'.
for (i in 1:5000) {
samp = sample(area, 50)
sample_means50[i] = mean(samp)
#print(i)
}
# View the result. If you want, you can increase the bin width to show more detail by changing the 'breaks' argument.
hist(sample_means50, breaks = 13)
# Initialize the sample distributions:
sample_means10 = rep(NA, 5000)
sample_means100 = rep(NA, 5000)
# Run the for loop:
for (i in 1:5000) {
samp = sample(area, 10)
sample_means10[i] = mean(samp)
samp = sample(area, 100)
sample_means100[i] = mean(samp)
}
# Take a look at the results:
head(sample_means10)
head(sample_means50) # was already loaded
head(sample_means100)
# Define the limits for the x-axis:
xlimits = range(sample_means10)
# Draw the histograms:
par(mfrow = c(3, 1))
hist(sample_means10, breaks = 20, xlim = xlimits)
hist(sample_means50, breaks = 20, xlim = xlimits)
hist(sample_means100, breaks = 20, xlim = xlimits)
# Take a sample of size 50 from 'price':
sample_50 = sample(price, 50)
# Print the mean:
mean(sample_50)
## large sample of price means
sample_means50 = rep(NA, 5000)
for (i in 1:5000) {
sample_means50[i] = mean(sample(price, 50))
}
head(sample_means50)
## large sample of price means but with samples of size 150
sample_means150 = rep(NA, 5000)
for (i in 1:5000) {
sample_means150[i] = mean(sample(price, 150))
}
head(sample_means150)
#########################################################################################
# Take a sample of size 60 of the population:
population = ames$Gr.Liv.Area
samp = sample(population, 60)
# Calculate the mean:
sample_mean = mean(samp)
# Draw a histogram:
hist(samp)
|
9bf0b8305cfca98c6d476d8983ea99bbc4257d0d
|
212f8d17d4e2a21ad33ebeee49571ec8764ab3e6
|
/R/read-meteologica2.R
|
4c7a3c2226889383e173a2c968167f4ad2954500
|
[] |
no_license
|
rte-antares-rpackage/antaresWeeklyMargin
|
50db90d81dee445dc223e6a37c223c6f91f7a0c4
|
31ca059a2d80ce82ed079cb7f75147b4a5c9415e
|
refs/heads/master
| 2021-05-11T10:41:49.555037
| 2019-07-24T15:29:38
| 2019-07-24T15:29:38
| 118,109,627
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,447
|
r
|
read-meteologica2.R
|
#' Read Meteologica file
#'
#' @param path path to a directory containing forecast files or a single file.
#' @param country filter files to read only one country.
#' @param date filter files to read by date(s), using a \code{Date} or a character with format \code{\%Y-\%m-\%d}.
#' @param time filter files to read by time, e.g. \code{"00"} or \code{"12"}.
#'
#' @return a \code{data.table}
#' @export
#'
#' @importFrom stringr str_which str_replace str_extract str_detect
#' @importFrom data.table rbindlist
#'
#' @examples
#' \dontrun{
#'
#' library(antaresWeeklyMargin)
#'
#' # ALL files in Europe folder
#' meteo <- read_meteologica2(
#' path = "meteologica/Europe"
#' )
#'
#' # only for a specific date and time
#' meteo <- read_meteologica2(
#' path = "meteologica/Europe",
#' date = "2018-05-24",
#' time = "12"
#' )
#'
#' }
read_meteologica2 <- function(path, country = NULL, date = NULL, time = NULL) {
if (missing(path)) {
path <- choose_path()
}
path_mtlg <- normalizePath(path, mustWork = TRUE)
if (dir.exists(path)) {
old.wd <- getwd()
setwd(path)
on.exit(setwd(old.wd))
path <- select_file(
path = ".",
pattern = "",
fileext = "\\.csv$",
multiple = TRUE,
recursive = TRUE,
verbose = FALSE
)
}
patterns <- c("PhotovoltaicPower", "Wind", "PowerDemand", "PV")
if (!is.null(country)) {
path <- path[str_which(string = tolower(path), pattern = tolower(country))]
}
if (!is.null(date)) {
date <- as.Date(date)
date <- format(date, format = "%Y%m%d")
filter_date <- lapply(
X = date,
FUN = str_detect, string = path
)
filter_date <- Reduce("|", filter_date)
path <- path[filter_date]
}
if (!is.null(time)) {
filter_time <- lapply(
X = paste0("\\d{8}", time, "00"),
FUN = str_detect, string = path
)
filter_time <- Reduce("|", filter_time)
path <- path[filter_time]
}
path <- path[!str_detect(string = tolower(path), pattern = "offshore")]
path <- path[!str_detect(string = tolower(path), pattern = "onshore")]
path <- path[!str_detect(string = tolower(path), pattern = "livefeed")]
list_countries <- c("Austria", "Belgium", "France", "Germany", "Italy",
"Netherlands", "Portugal", "Spain", "Switzerland", "UK",
"RepublicOfIreland", "NorthernIreland")
res <- lapply(
X = path,
FUN = function(x) {
message(paste("Reading file:", x))
dat <- read_meteologica2_file(x)
type <- str_which(string = x, pattern = patterns)
type <- patterns[type]
dat$type <- type
# country <- str_replace(string = x, pattern = ".*/|\\\\", replacement = "")
# country <- str_extract(string = country, pattern = sprintf(".+(?=-%s)", type))
country <- list_countries[stringr::str_detect(string = x, pattern = list_countries)]
dat$country <- country
dat$file_name <- stringr::str_extract(string = x, pattern = "(?<=/)[^/]+$")
dat$file_date <- stringr::str_extract(string = dat$file_name, pattern = "[:digit:]{12}")
dat$file_date <- as.POSIXct(x = dat$file_date, format = "%Y%m%d%H%M", tz = "Europe/Paris")
dat
}
)
res <- rbindlist(l = res)
res <- res[order(datetime, -file_name)]
res <- add_mtlg_areas(res, path_mtlg = path_mtlg)
return(res)
}
#' @importFrom stringr str_extract str_subset
#' @importFrom data.table fread setnames :=
#' @importFrom lubridate with_tz
read_meteologica2_file <- function(path) {
skip_ <- readLines(con = path, n = 10)
skip_ <- grep(pattern = "From yyyy-mm-dd hh:mm", x = skip_) - 1
dat <- fread(file = path, fill = TRUE, skip = skip_)
names_ <- names(dat)
names_ <- str_subset(string = names_, pattern = "ENS")
setnames(
x = dat,
old = names_,
new = str_extract(
string = names_,
pattern = "ENS\\d{2}"
)
)
setnames(x = dat, old = c("From yyyy-mm-dd hh:mm", "UTC offset from (UTC+/-hhmm)"), new = c("datetime", "tz"))
# format datetime
# dat <- dat[, datetime := as.POSIXct(datetime, tz = "Europe/Paris")]
dat[, tz := gsub(pattern = "UTC", replacement = "", x = tz)]
dat[, datetime := paste0(datetime, tz)]
dat[, datetime := as.POSIXct(x = datetime, format = "%Y-%m-%d %H:%M%z", tz = "UTC")]
dat[, datetime := lubridate::with_tz(time = datetime, tzone = "Europe/Paris")]
dat <- dat[, .SD, .SDcols = c("datetime", sprintf("ENS%02d", 0:50))]
dat
}
|
1028fba4b6a94b6af9f256a1b036c89d6f43ad67
|
fca19c9d85b9e37203868fbce35694f645092daa
|
/tv-ratings/04_categorize.R
|
e72ecf931f9c147a57ac721ad2c1cd8e6b3704de
|
[] |
no_license
|
ryanvmenezes/futbol
|
4592e2fcf78eb4c36852aca35f0923af89a9c89c
|
9729af696be09ee2f70190f666dabf644c7a619f
|
refs/heads/master
| 2020-08-02T23:13:41.645638
| 2020-01-05T07:47:46
| 2020-01-05T07:47:46
| 211,540,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 838
|
r
|
04_categorize.R
|
library(lubridate)
library(tidyverse)
library(googlesheets4)
raw.ratings = read_csv('ratings-uncleaned.csv')
raw.ratings
raw.ratings %>% arrange(-viewers)
comps = raw.ratings %>%
group_by(comp) %>%
summarise(
countgames = n(),
totalviewers = sum(viewers),
avgviewers = totalviewers / countgames
)
comps
SHEET = '1JjO7m1qCtzPTDSltUiBDqX6b9Mfki-cssrirE5CsLJY'
sheetname = 'organized'
comps.classified = read_sheet(SHEET, sheet = sheetname)
comps.classified
comps.classified %>% write_csv('competitions-classified.csv')
new.comps = comps %>%
left_join(comps.classified %>% select(-countgames, -totalviewers, -avgviewers)) %>%
arrange(continent, country, clubcountry, womens)
new.comps
new.comps %>% filter(is.na(continent))
new.comps %>%
write_sheet(SHEET, sheet = sheetname)
# tk: teams, networks
|
4242341f4f874f63a6cf14f75b1505beb67087de
|
7de54d346a101f2620058f14dc3eaf0fa415750c
|
/code/R/man/rhls.Rd
|
f4804b660fa874715da3da44624071c7147e357c
|
[] |
no_license
|
a3a/RHIPE
|
4e4d82d5e50e6eccc9aa603dc196dc2411a5f5ae
|
e5a9d3b261cd31a9ab01002b4551b5955948e858
|
refs/heads/master
| 2021-01-19T06:03:08.989299
| 2013-04-11T00:05:58
| 2013-04-11T00:05:58
| 9,531,318
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,125
|
rd
|
rhls.Rd
|
\name{rhls}
\alias{rhls}
\title{List Files On HDFS}
\usage{
rhls(folder = NULL, recurse = FALSE)
}
\arguments{
\item{folder}{Path of directory on HDFS or output from
rhmr or rhwatch(read=FALSE)}
\item{recurse}{If TRUE list all files and directories in
sub-directories.}
}
\value{
vector of file and directory names
}
\description{
List all files and directories contained in a directory
on the HDFS.
}
\details{
Returns a data.frame of filesystem information for the
files located at \code{path}. If \code{recurse} is TRUE,
\code{rhls} will recursively travel the directory tree
rooted at \code{path}. The returned object is a
data.frame consisting of the columns: \emph{permission,
owner, group, size (which is numeric), modification
time}, and the \emph{file name}. \code{path} may
optionally end in `*' which is the wildcard and will
match any character(s).
}
\author{
Saptarshi Guha
}
\seealso{
\code{\link{rhput}}, \code{\link{rhdel}},
\code{\link{rhread}}, \code{\link{rhwrite}},
\code{\link{rhsave}}, \code{\link{rhget}}
}
\keyword{directory}
\keyword{HDFS}
\keyword{list}
|
99cd0bf0951150ada8eae21589a25074a35d8804
|
9bd25d71e6b745f7ba96bd3447dddf0579e97704
|
/man/write_gifti_component_of_cifti.Rd
|
135632405e67f24b19ff43c878ff79a0ee2aef3e
|
[] |
no_license
|
noguchit/ciftiTools
|
a7a4cbe562835478b8725d4c9508a1405bb51293
|
a858c524520a5a1cdf1db1ebc05e3adf90e83d04
|
refs/heads/master
| 2022-12-07T13:40:00.144630
| 2020-08-14T01:04:22
| 2020-08-14T01:04:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 865
|
rd
|
write_gifti_component_of_cifti.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_cifti.R
\name{write_gifti_component_of_cifti}
\alias{write_gifti_component_of_cifti}
\title{Write out a GIFTI File}
\usage{
write_gifti_component_of_cifti(data, out_fname, datatype = NULL, ...)
}
\arguments{
\item{data}{A data matrix to write out as a GIFTI, i.e. a V (vertices) by T
(measurements) matrix correpsonding to the left or right cortex.}
\item{out_fname}{The path to the GIFTI file to write}
\item{datatype}{A vector corresponding to the NIFTI types of \code{data}.
For example, "NIFTI_TYPE_INT32" and "NIFTI_TYPE_FLOAT32". If \code{NULL}
(default), the datatype will be inferred.}
\item{...}{Other vectors of options to
\code{\link[freesurferformats]{gifti_xml}}, for example "encoding" and
"endian".}
}
\description{
Write GIFTI-format data.
}
\keyword{internal}
|
738feb279e95ef3491b802f044110ae738773884
|
60edbb04067fed891c5aba36bef5dba769c650b6
|
/CYTOF_base_population_plots.R
|
fd6f142c0a7194cc69cb1f0c7c7fe004f2551c3c
|
[] |
no_license
|
am794/CyTOF_Analysis
|
a2704402b8555d054985bcae1b4d0d2631532e51
|
c779c0bca4298fe0ebb59329bb3df45703b4edf6
|
refs/heads/master
| 2022-12-25T16:12:13.620992
| 2020-09-30T06:25:09
| 2020-09-30T06:25:09
| 283,353,191
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,949
|
r
|
CYTOF_base_population_plots.R
|
#########################
# Base population plots #
#########################
library("openxlsx")
library("reshape2")
library("ggplot2")
pertussis_events <- read.xlsx("/Users/amandava/Desktop/Pertussis/New_Donors/Events/Analysis_3/Events.xlsx",sheet=2)
#config <- read.table("/Users/amandava/Desktop/Pertussis/New_Donors/2968/config/2968.config")
#parents <- c(0,1,2,3,4,4,4,4,4,9,9,9,7,7,7,7,15,15,15,15,15,15,21,21,21,21,22,22,22,22,21,4,32,32,34,34,36,36,38,38,37,37,16)
parents <- c()
events <- as.data.frame(t(pertussis_events[,3:45]))
data <- c()
for(i in 1:length(parents)){
if(i>1){
fun1 <- function(x){(x*100/events[parents[i],])}
dat <- fun1(events[i,])
data <- rbind(data,dat)
}
}
data_fin <- cbind(pertussis_events[,1:3],t(data))
write.xlsx(data_fin,file="/Users/amandava/Desktop/Pertussis/New_Donors/Proportions_Analysis2.xlsx",row.names = TRUE,col.names = TRUE)
write.xlsx(final_duplexes,file="/Users/amandava/Desktop/Pertussis/New_Donors/Events/Analysis_3/duplexes.xlsx",row.names = TRUE,col.names = TRUE)
#WRT live cells
events <- as.data.frame(t(pertussis_events[,c(8:10,44,46)]))
rownames(events) <- c("Live_Cells","CD3_CD14_duplexes","CD19_CD3_duplexes","mDC_CD3_duplexes","pDC_CD3_duplexes")
data <- c()
for(i in 1:5){
if(i>1){
fun1 <- function(x){(x*100/events[1,])}
dat <- fun1(events[i,])
data <- rbind(data,dat)
}
}
final_duplexes <- cbind(pertussis_events[,1:4],t(data))
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=CD3_CD14_duplexes,fill=factor(Cohort)))+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_jitter(aes(colour=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of CD3CD14 duplexes with live cells as parent ")
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=CD3_CD14_duplexes,fill=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Median of percentages of CD3CD14 duplexes with live cells as parent")
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=CD19_CD3_duplexes,fill=factor(Cohort)))+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape=NA)+geom_jitter(aes(colour=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of CD19CD3 duplexes with live cells as parent ")
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=CD19_CD3_duplexes,fill=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Median of percentages of CD19CD3 duplexes with live cells as parent")
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=mDC_CD3_duplexes,fill=factor(Cohort)))+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape=NA)+geom_jitter(aes(colour=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of mDC_CD3 duplexes with live cells as parent ")
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=mDC_CD3_duplexes,fill=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Median of percentages of mDCCD3 duplexes with live cells as parent")
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=pDC_CD3_duplexes,fill=factor(Cohort)))+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape=NA)+geom_jitter(aes(colour=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of pDC_CD3 duplexes with live cells as parent ")
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=pDC_CD3_duplexes,fill=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Median of percentages of pDC_CD3 duplexes with live cells as parent")
CD3CD14 <- ddply(final_duplexes,~Visit+Cohort,summarize,median=median(CD3_CD14_duplexes),mean=mean(CD3_CD14_duplexes))
###Fold change
func1 <- function(df){
fc_median <- c()
fc_mean <- c()
order_df <- df[order(df[,2]),]
for(i in 1:10){
if(i<6){
fc_median[i] <- order_df[i,3]/order_df[1,3]
fc_mean[i] <- order_df[i,4]/order_df[1,4]
}else{
fc_median[i] <- order_df[i,3]/order_df[6,3]
fc_mean[i] <- order_df[i,4]/order_df[6,4]
}
}
final_fc <- cbind(order_df,fc_median,fc_mean)
return(final_fc)
}
CD3CD14_fc <- func1(CD3CD14)
CD19CD3_fc <- func1(CD19CD3)
pDCCD3_fc <- func1(pDCCD3)
mDCCD3_fc <- func1(mDCCD3)
CD19CD3 <- ddply(final_duplexes,~Visit+Cohort,summarize,median=median(CD19_CD3_duplexes),mean=mean(CD19_CD3_duplexes))
mDCCD3 <- ddply(final_duplexes,~Visit+Cohort,summarize,median=median(mDC_CD3_duplexes),mean=mean(mDC_CD3_duplexes))
pDCCD3 <- ddply(final_duplexes,~Visit+Cohort,summarize,median=median(pDC_CD3_duplexes),mean=mean(pDC_CD3_duplexes))
summary_duplexes <- rbind(CD3CD14,CD19CD3,mDCCD3,pDCCD3)
population <- rep(c("CD3CD14","CD19CD3","mDCCD3","pDCCD3"),c(10,10,10,10))
fin <- cbind(population,summary_duplexes)
quartz()
ggplot(data=CD3CD14_fc,aes(x=factor(Visit),y=fc_median,fill=factor(Cohort)))+geom_line(aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ylab("Fold Change(Median)")+ ggtitle("Fold Change(Median) for CD3CD14 duplexes with Visit 1 as baseline")
+scale_color_manual(values=c("#CC6666", "#9999CC"))
quartz()
ggplot(data=CD19CD3_fc,aes(x=factor(Visit),y=fc_median,fill=factor(Cohort)))+geom_line(aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ylab("Fold Change(Median)")+ ggtitle("Fold Change(Median) for CD19CD3 duplexes with Visit 1 as baseline")
quartz()
ggplot(data=pDCCD3_fc,aes(x=factor(Visit),y=fc_median,fill=factor(Cohort)))+geom_line(aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ylab("Fold Change(Median)")+ ggtitle("Fold Change(Median) for pDC_CD3 duplexes with Visit 1 as baseline")
quartz()
ggplot(data=mDCCD3_fc,aes(x=factor(Visit),y=fc_median,fill=factor(Cohort)))+geom_line(aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ylab("Fold Change(Median)")+ ggtitle("Fold Change(Median) for mDC_CD3 duplexes with Visit 1 as baseline")
#Mean
quartz()
ggplot(data=CD3CD14_fc,aes(x=factor(Visit),y=fc_mean,fill=factor(Cohort)))+geom_line(aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ylab("Fold Change(Mean)")+ ggtitle("Fold Change(Mean) for CD3CD14 duplexes with Visit 1 as baseline")
+scale_color_manual(values=c("#CC6666", "#9999CC"))
quartz()
ggplot(data=CD19CD3_fc,aes(x=factor(Visit),y=fc_mean,fill=factor(Cohort)))+geom_line(aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ylab("Fold Change(Mean)")+ ggtitle("Fold Change(Mean) for CD19CD3 duplexes with Visit 1 as baseline")
quartz()
ggplot(data=pDCCD3_fc,aes(x=factor(Visit),y=fc_mean,fill=factor(Cohort)))+geom_line(aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ylab("Fold Change(Mean)")+ ggtitle("Fold Change(Mean) for pDC_CD3 duplexes with Visit 1 as baseline")
quartz()
ggplot(data=mDCCD3_fc,aes(x=factor(Visit),y=fc_mean,fill=factor(Cohort)))+geom_line(aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ylab("Fold Change(Mean)")+ ggtitle("Fold Change(Mean) for mDC_CD3 duplexes with Visit 1 as baseline")
##Line plots
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=CD3_CD14_duplexes,fill=factor(Subject)))+geom_line()
##Mean
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=CD3_CD14_duplexes,fill=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=mean, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Mean of percentages of CD3CD14 duplexes with live cells as parent")
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=CD19_CD3_duplexes,fill=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=mean, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Mean of percentages of CD19CD3 duplexes with live cells as parent")
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=mDC_CD3_duplexes,fill=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=mean, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Mean of percentages of mDCCD3 duplexes with live cells as parent")
quartz()
ggplot(data=final_duplexes,aes(x=factor(Visit),y=pDC_CD3_duplexes,fill=factor(Cohort)))+
scale_fill_brewer(palette="Blues")+
stat_summary(fun.y=mean, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),lwd=1)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Mean of percentages of pDC_CD3 duplexes with live cells as parent")
########### All percentages
events <- as.data.frame(t(pertussis_events[,c(8:47)]))
data <- c()
for(i in 1:40){
if(i>1){
fun1 <- function(x){(x*100/events[1,])}
dat <- fun1(events[i,])
data <- rbind(data,dat)
}
}
final_percentages <- cbind(pertussis_events[,1:4],t(data))
write.xlsx(final_percentages,file="/Users/amandava/Desktop/Pertussis/New_Donors/Events/Analysis_3/All_percentages_Pertussis.xlsx",row.names = TRUE,col.names = TRUE)
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`CD3-CD19+`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of B cells with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`CD3+Tcells`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of T cells with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`CD4+Tcells`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of CD4+ T cells with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`CD8+Tcells`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of CD8+ T cells with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`CD45CD14+`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Monocytes with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`Tregs`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Tregs with live cells as parent ")
###CD4 Memory
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`TcmCD4`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Tcm CD4+ with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`TemCD4`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Tem CD4+ with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`TemraCD4`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Temra CD4+ with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`NaiveCD4`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Naive CD4 with live cells as parent ")
#CD8 Memory
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`TcmCD8`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Tcm CD8+ with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`TemCD8`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Tem CD8+ with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`TemraCD8`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Temra CD8+ with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`NaiveCD8`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Naive CD8 with live cells as parent ")
#Other populatins basophils, mDC, pDC, classical, nonclassical, intermediate monocytes
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`Classical_Monocytes`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Classical Monocytes with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`Non-Classical_Monocytes`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Non-Classical Monocytes with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`Intermediate_Monocytes`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Intermediate Monocytes with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`Basophils`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of Basophils with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`CD123+CD1c-`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of all pDCs with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`CD123+CD1c+`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of all mDCs with live cells as parent ")
quartz()
ggplot(data=final_percentages,aes(x=factor(Visit),y=final_percentages$`Nkcells`,fill=factor(Cohort)))+scale_fill_brewer(palette="Blues")+
geom_boxplot(aes(fill=Cohort),width=0.3,outlier.shape = NA)+geom_point(aes(colour=factor(Cohort)),shape=21,position=position_jitterdodge(),size=3)+
stat_summary(fun.y=median, geom="smooth", aes(group=factor(Cohort),colour=factor(Cohort)),position=position_dodge(0.2),lwd=0.5)+
xlab("Visit")+ ylab("Percentage") + ggtitle("Boxplot of percentages of NK cells with live cells as parent ")
mfi_all_marker_cp <- read.xlsx("/Users/amandava/Desktop/Pertussis/New_Donors/Gated/MFI_All_markers_CP.xlsx",sheet=1)
|
88280e82cf3a95559ea163571b4c569a074c4839
|
dcabe9a688420893a01ff7c2000b0ca2ae4687c0
|
/R/10-Test_robust_residuals.R
|
e947b537446d6f65ccab74922f116d89d311f1d9
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
nicebread/PSE-Database
|
30ceccaaa0a051e7815e5122405d77c79dae1c54
|
cac7584bed80871f2bd4217c5f56c26977b13760
|
refs/heads/master
| 2020-11-23T21:06:00.618381
| 2020-02-03T08:24:08
| 2020-02-03T08:24:08
| 169,381,395
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,774
|
r
|
10-Test_robust_residuals.R
|
## This source code is licensed under the FreeBSD license (see "LICENSE" file)
## (c) 2018 Felix Schönbrodt
## ======================================================================
## Explore the difference between OLS correction for word count
# (current standard practice) and the newly recommended robust regression
## ======================================================================
library(dplyr)
library(purrr)
library(robustbase)
load(file="processed_data/person.RData")
# ---------------------------------------------------------------------
# OLS residualization, within study
person$aff.resid <- person %>%
split(.$study_id) %>% # from base R
map(~ lm(aff.sum ~ wc.person.z, data = ., na.action=na.omit)) %>%
map(resid) %>%
unlist()
person$ach.resid <- person %>%
split(.$study_id) %>% # from base R
map(~ lm(ach.sum ~ wc.person.z, data = ., na.action=na.omit)) %>%
map(resid) %>%
unlist()
person$pow.resid <- person %>%
split(.$study_id) %>% # from base R
map(~ lm(pow.sum ~ wc.person.z, data = ., na.action=na.omit)) %>%
map(resid) %>%
unlist()
# ---------------------------------------------------------------------
# Robust residualization, within study
# Apply the recommended procedure
person$aff.resid.robust <- person %>%
split(.$study_id) %>% # from base R
map(~ lmrob(aff.sum ~ wc.person.z, data = ., na.action=na.omit, setting="KS2014")) %>%
map(resid) %>%
unlist()
person$ach.resid.robust <- person %>%
split(.$study_id) %>% # from base R
map(~ lmrob(ach.sum ~ wc.person.z, data = ., na.action=na.omit, setting="KS2014")) %>%
map(resid) %>%
unlist()
person$pow.resid.robust <- person %>%
split(.$study_id) %>% # from base R
map(~ lmrob(pow.sum ~ wc.person.z, data = ., na.action=na.omit, setting="KS2014")) %>%
map(resid) %>%
unlist()
sel <- person %>% select(contains("aff"), contains("ach"), contains("pow"))
C1 <- cor(sel) %>% round(3)
C1
plot(sel$aff.resid, sel$aff.resid.robust)
plot(sel$ach.resid, sel$ach.resid.robust)
plot(sel$pow.resid, sel$pow.resid.robust)
# --> OK: robust and regular regression does not make a real difference in our data sets ...
# But maybe they already have been screened for outliers.
## ======================================================================
## Check outlier influences
## ======================================================================
# use a part of real data set, then add some outliers
df <- person %>% filter(study_id=="OCS_smofee8") %>% slice(1:80)
cor(df$wc.person, df$aff.sum)
lm1 <- lm(aff.sum~wc.person, df)
rlm1 <- lmrob(aff.sum~wc.person, df, setting="KS2014")
plot(df$wc.person, df$aff.sum)
abline(a=coef(lm1)[1], b=coef(lm1)[2])
abline(a=coef(rlm1)[1], b=coef(rlm1)[2], col="red")
df2 <- df
# change two data points into outliers
df2[1, "wc.person"] <- 1800
df2[1, "aff.sum"] <- 0
df2[2, "wc.person"] <- 1900
df2[2, "aff.sum"] <- 0
cor.test(df2$wc.person, df2$aff.sum)
lm2 <- lm(aff.sum~wc.person, df2)
rlm2 <- lmrob(aff.sum~wc.person, df2, setting="KS2014")
plot(df2$wc.person, df2$aff.sum)
abline(a=coef(lm2)[1], b=coef(lm2)[2])
abline(a=coef(rlm2)[1], b=coef(rlm2)[2], col="red")
par(mfcol=c(3, 1))
hist(df2$aff.sum, breaks=24, main="aff raw score")
hist(resid(rlm2), breaks=24, main="robust residuals")
hist(resid(lm2), breaks=24, main="OLS residuals")
# ---------------------------------------------------------------------
# Try Oliver's suggestion for correction
# > 1. Sind Motivrohwerte normalverteilt (Shapiro-Wilk + visuelle Inspektion des
# > Werte-Histogramms)?
shapiro.test(df2$aff.sum)
hist(df2$aff.sum)
# > 2. Ist die Wortanzahl normalverteilt (Shapiro-Wilk + visuelle Inspektion des
# > Werte-Histogramms)?
shapiro.test(df2$wc.person)
hist(df2$wc.person)
# > 3. Wenn zu 1 UND 2 nein, überprüfe, ob Outlier-Werte der Wortanzahl Outlier-Werte der
# > Motivscores „generiert“ (ob also z.B eine Probandin, die außergewöhnlich viel geschrieben
# > hat, auch außergewöhnlich hohe Werte in einem Motiv hat). Das erkennt man am besten an
# > einem Regressionsplot: wenn Outlier- & Leverage-Punkte sehr nah an der Regressionsgerade
# > liegen, stellen diese Punkte vermutlich Extrapolationen des Zusammenhangs im Rest der
# > Datenwolke dar, und eine Korrektur ist nicht nötig. In diesem Fall wird das Residuum
# > völlig unauffällig sein.
# --> not leverage points, real outliers
# > 4. Wenn zu 1 ODER 2 nein, transformiere (schwach: Quadratwurzel; stark: Log; immer nach
# > Addition der Konstante 1) die entsprechende Variable & überprüfe auf Normalverteilung.
# try sqrt-trans:
df2$aff.sum.sqrt <- sqrt(df2$aff.sum)
shapiro.test(df2$aff.sum.sqrt)
hist(df2$aff.sum.sqrt)
df2$aff.sum.log <- log(df2$aff.sum+1)
shapiro.test(df2$aff.sum.log)
hist(df2$aff.sum.log)
df2$wc.person.sqrt <- sqrt(df2$wc.person)
shapiro.test(df2$wc.person.sqrt)
hist(df2$wc.person.sqrt)
df2$wc.person.log <- log(df2$wc.person)
shapiro.test(df2$wc.person.log)
hist(df2$wc.person.log)
# > 5. Partialisiere (transformierten) Motivwert für (transformierten) Wortanzahlwert per
# > OLS, speichere Residuen.
df2$aff.resid.OCS <- resid(lm(aff.sum.sqrt~wc.person.log, df2))
df2$aff.resid.OCS2 <- resid(lm(aff.sum.log~wc.person.log, df2))
# > 6. Teste Residuen auf Normalverteilung (Shapiro-Wilk + visuelle Inspektion des
# > Werte-Histogramms) -- normalverteilt: Transformiere zu z-Werten; nicht-normalverteilt,
# > zurück zu 4 mit stärkerer/anderer Transformation
shapiro.test(df2$aff.resid.OCS)
hist(df2$aff.resid.OCS)
shapiro.test(df2$aff.resid.OCS2)
hist(df2$aff.resid.OCS2)
# > 7. Wenn Residuen trotz allem nicht mit Normalverteilung in Einklang gebracht werden
# > können, verwende ROBREG für Partialisierung und berichte Ergebnisse aller Analysen mit
# > und ohne Outlier
# >
## ======================================================================
## Different example: Construct a data point that is an bivariate outlier
## (considering word count), but not univariate
## ======================================================================
# use a real data set, then add some outliers
df2 <- person %>% filter(study_id=="OCS_smofee8") %>% slice(1:80)
# change one data point into outliers
df2[1, "wc.person"] <- 1100
df2[1, "aff.sum"] <- 2
df2[2, "wc.person"] <- 1800
df2[2, "aff.sum"] <- 24
rlm2 <- lmrob(aff.sum~wc.person, df2, setting="KS2014")
plot(df2$wc.person, df2$aff.sum)
points(1100, 2, col="red", pch=20)
points(1800, 24, col="green", pch=20)
abline(a=coef(rlm2)[1], b=coef(rlm2)[2], col="red")
hist(df2$aff.sum, breaks=20)
points(2, 0, col="red", pch=20, cex=2)
points(24, 0, col="green", pch=20, cex=2)
hist(resid(rlm2))
points(resid(rlm2)[1], 0, col="red", pch=20, cex=2)
points(resid(rlm2)[2], 0, col="green", pch=20, cex=2)
|
ab80910b497ce671412f8adfb5c6f934fed0b3e3
|
f2e4096c2e4f1150bc38bde846964ae8c36f44a7
|
/man/DEM.Rd
|
b966836a409efec69e8b7018b62bf69e7ab79bbd
|
[
"Apache-2.0"
] |
permissive
|
bschumac/perfectPeak
|
6510cea3575e8810e4379235cbfb7cfd86ffb7c5
|
37b49f187fce77314df4ffc5084afc5c3a5b90f4
|
refs/heads/master
| 2020-07-29T04:42:06.812365
| 2016-02-03T17:47:05
| 2016-02-03T17:47:05
| 209,674,012
| 1
| 0
|
Apache-2.0
| 2019-09-20T00:47:48
| 2019-09-20T00:47:48
| null |
UTF-8
|
R
| false
| true
| 1,059
|
rd
|
DEM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/perfectPeak-package.R
\docType{data}
\name{DEM}
\alias{DEM}
\title{Demo Digital Elevation Model DEM}
\format{Arc ASCII Grid}
\source{
Data source: \url{https://www.tirol.gv.at/data/datenkatalog/geographie-und-planung/digitales-gelaendemodell-tirol/}
}
\usage{
raster('stubai.asc')
}
\description{
The example Digital Elevation Model DEM is taken from the Authority of Tirol
it is dervied from LIDAR data and can downloaded for Tirol.
The demo data is an Arc ASCII Grid with 324 cols by 360 rows and the following extent:
lon_min 11.35547, lon_max 11.40009, lat_min 47.10114, lat_max 47.13512
}
\details{
\itemize{
\item resolution : 10 Meter,10 Meter
\item datatype, 32 Bit floating point
\item projection, MGI_Austria_GK_West
\item EPSG-Code, 31254
\item unit, Meter
\item datum, D_MGI
\item Copyright: \url{https://www.tirol.gv.at/data/nutzungsbedingungen/}, \url{Creative Commons Namensnennung 3.0 Österreich Lizenz (CC BY 3.0 AT).}
}
}
\keyword{datasets}
|
f5b4228a81759de1d0d3b0d7c19c170a8fa3b19d
|
c9de7552a5bbf42969eec9d2f742d95dd3758897
|
/in_class_mar_03.R
|
01b8f4afe82e90a74cfdddc45bf7459ad53dc47f
|
[] |
no_license
|
michaelfrancenelson/NRC290B_2020
|
d24c0439306fda034c701a9a42a0ac421806e8a1
|
da60d6b0ef47b2479344f8d704d7a7e014568d27
|
refs/heads/master
| 2021-03-06T16:59:45.845597
| 2020-11-09T15:32:57
| 2020-11-09T15:32:57
| 246,211,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,033
|
r
|
in_class_mar_03.R
|
# I need to tell R that the 'Spp' column contains names.
# I want these row names of the data frame rather than a column of data.
# The 'row.names = "Spp"' is needed to tell r that it should be row names and not data
# on my computer, the data file is in a subdirectory called 'data' so I have to
# tell R to look there using "data/" before the filename.
dat = read.csv("data/butterfly-table.csv", row.names = "Spp")
# The data should be in a data frame:
class(dat)
head(dat)
# I can view the contents of the column called "X1998" using:
dat$X1998
# I can view the third column with:
dat[, 3]
# I can subset the 3rd and 4th columns with:
dat[, 3:4]
# I can get the first, third, and 5th colums with:
dat[, c(1, 3, 5)]
# barplot doesn't work directly:
barplot(dat)
# This works but the legend is not in a useful position:
barplot(as.matrix(dat), legend.text=rownames(dat))
# This separates each species into its own own bar per year.
# The legend is still covering up part of the plot.
barplot(as.matrix(dat), beside = TRUE, legend.text=rownames(dat))
# I can make a bar plot of all species for a single year (1998):
barplot(dat$X1998, names.arg = rownames(dat), main = "butterflies, year 1998")
# I can make a bar plot of a single species for all years:
barplot(as.matrix(dat[1, ]))
# I know I can use the 'legend()' function:
legend(x = "top", rownames(dat), horiz = TRUE, cex = 0.7)
# I could probably tinker with the legend arguments to make something that looks nicer.
# I might look up the following arguments in the help entry:
# col =
# fill =
# x =
# legend =
# bty =
# cex =
# ncol =
# I can use the square brackets to select a row or column by its name:
dat["M.bro", ]
dat[1, ]
c(dat[1, ])
plot(dat[1, ])
plot(dat)
# What about making a line plot?
# Here's one row:
plot(as.matrix(dat)[1, ], type = "l", ylim = c(0, 100))
# I can add row 2 using lines()
lines(as.matrix(dat)[2, ])
# I happen to know about a function called 'matplot', short for matrix plot.
# This looks sketchy, but I might be able to work with it:
matplot(as.matrix(dat))
# This is a little better:
matplot(as.matrix(dat), type = "l")
# I can make all of the lines solid, or different varieties of dashed:
matplot(as.matrix(dat), type = "l", lty = 1)
matplot(as.matrix(dat), type = "l", lty = 2)
matplot(as.matrix(dat), type = "l", lty = 3)
# I can specify the color for all lines using a color code, or using a named color:
matplot(as.matrix(dat), type = "l", col = 2)
matplot(as.matrix(dat), type = "l", col = "blue")
# I can also specify a vector of colors.
# Here's an ugly and difficult-to-read set:
matplot(as.matrix(dat), type = "l", col = 6:11)
# Here is a better version using thicker lines (lwd = ).
# My line of code is getting a little long, so I'll separate it into
# several lines so I can read it more easily:
matplot(
as.matrix(dat),
type = "l",
lwd = 3,
col = c(1, 2, 4, "darkgreen", "darkblue", "turquoise"))
# A legend, axis labels, and main titles would be helpful
# Your code here:
|
b4ac4f355b3056d093e76f08b392f358f884b37c
|
98f14939c2588d57de7025434b6934dcb076208c
|
/R/13_projections.R
|
94b378a130b1db777cad5ed2befa4f6be4e68c88
|
[] |
no_license
|
larenaud/OWPC
|
997ce88a1f974528201dc5eb90ddf73825c48808
|
773e6ece1735ea1047c428a0564e9bd9c0307e5b
|
refs/heads/master
| 2021-10-28T03:55:32.968774
| 2021-10-19T21:11:04
| 2021-10-19T21:11:04
| 238,471,426
| 0
| 1
| null | 2020-02-05T17:13:10
| 2020-02-05T14:36:54
|
R
|
UTF-8
|
R
| false
| false
| 49,153
|
r
|
13_projections.R
|
# code to simulate demographic rates - lambda - under different scenarios of temperature and phenology
# created by S. C. Beaumont
# modified by L. Renaud # 2020-04-23
# modified by S. C. Beaumont # 2020-06-01
# modified by J Van de Walle # 2020-07-16
# modified by S. C. Beaumont # 2020-07-20
####========================####
# Library
####========================####
library(plyr)
library(dplyr)
library(boot)
library(lme4)
library(popbio)
library(ggplot2)
library(pander)
library(cowplot)
library(boot)
rm(list = ls ())
####========================####
#Load Databases
####========================####
# Load final databases
load("/Users/Sandrine/Documents/Sherbrooke/OWPC/Post_OWPC/Projections /Mai25/surv_clim.RData.Rdata")
rm(mod.surv.clim, results.surv.clim) # keep only df_surv
load("/Users/Sandrine/Documents/Sherbrooke/OWPC/Post_OWPC/Projections /Mai25/true_repro_clim.RData.Rdata")
rm(mod.true.repro.clim,results.true.repro.clim) # keep only df_fec
# Create dataframes of environmental conditions
# To construct future dataframe, need SD
load("/Users/Sandrine/Documents/Sherbrooke/OWPC/Post_OWPC/Projections /dataProjectionsSURV copie.RData")
df_surv <-droplevels(subset(fullSurvDataScled,!(yr %in% c("1999","2000","2016"))))# n= 448
meanSnow<- data.MEAN["WinSnowsurvT1"][[1]]
p5 <- 0.05*meanSnow
p10 <- 0.10*meanSnow
p20 <- 0.20*meanSnow
p30 <- 0.30*meanSnow
SD.surv <- data.SD
#load("C:/Users/joani/Documents/PhD/Labo/One Week Paper Challenge 2020/dataProjectionsFEC.RData")
load("/Users/Sandrine/Documents/Sherbrooke/OWPC/Post_OWPC/Projections /dataProjectionsFEC.RData")
df_fec<-droplevels(subset(fullFecDataScld,!(yr %in% c("1999","2000","2001")))) # n= 274
SD.fec <- data.SD
####========================####
# Env Databases
####========================####
# create dataframes of annual average environmental conditions
# For survival
env_annual_surv <- data.frame(
year = as.numeric(as.character(unique(df_surv$yr))),
WinSnowsurvT1 = c(1:length(unique(df_surv$yr)))
)
for(i in 1:length(unique(df_surv$yr))){
df_surv$yr <- as.numeric(as.character(df_surv$yr))
tmp <- df_surv[which(df_surv$yr == env_annual_surv[i, "year"]),]
env_annual_surv$WinSnowsurvT1[i] <- unique(tmp$WinSnowsurvT1)
}
# For fecundity
env_annual_fec <- data.frame(
year = as.numeric(as.character(unique(df_fec$yr))),
TWin = c(1:length(unique(df_fec$yr))),
PWin = c(1:length(unique(df_fec$yr)))
)
for(i in 1:length(unique(df_fec$yr))){
df_fec$yr <- as.numeric(as.character(df_fec$yr))
tmp <- df_fec[which(df_fec$yr == env_annual_fec[i, "year"]),]
env_annual_fec$TWin[i] <- unique(tmp$TWin)
env_annual_fec$PWin[i] <- unique(tmp$PWin)
}
env.now <- data.frame(TWin = mean(env_annual_fec$TWin),
PWin = mean(env_annual_fec$PWin),
WinSnowsurvT1 = mean(env_annual_surv$WinSnowsurvT1) # variables for survival model,
)
#juste pour température
env.future <- data.frame(TWin = mean(env_annual_fec$TWin) + 1.5 / SD.fec["TWin"][[1]], # Changer Temperature, influence juste repro
PWin = mean(env_annual_fec$PWin), #Changer TWin pour PWin
WinSnowsurvT1 = mean(env_annual_surv$WinSnowsurvT1)
# - (71.1/ SD.surv["WinSnowsurvT1"][[1]]) # variables for survival model,
)
env.future_jour <- data.frame(TWin = mean(env_annual_fec$TWin) + 1.5 / SD.fec["TWin"][[1]], # Changer Temperature, influence juste repro
PWin = mean(env_annual_fec$PWin), #Changer TWin pour PWin
WinSnowsurvT1 = mean(env_annual_surv$WinSnowsurvT1) - (71.1/ SD.surv["WinSnowsurvT1"][[1]]) # variables for survival model,
)
# Pour Snow cover (5,10,20,30%)
env.future5 <- data.frame(TWin = mean(env_annual_fec$TWin) + 1.5 / SD.fec["TWin"][[1]], # Changer Temperature, influence juste repro
PWin = mean(env_annual_fec$PWin), #Changer TWin pour PWin
WinSnowsurvT1 = mean(env_annual_surv$WinSnowsurvT1) - (p5/ SD.surv["WinSnowsurvT1"][[1]]) # variables for survival model,
)
#mean(env_annual_surv$WinSnowsurvT1)*SD.surv["WinSnowsurvT1"][[1]] +
env.future10 <- data.frame(TWin = mean(env_annual_fec$TWin) + 1.5 / SD.fec["TWin"][[1]], # Changer Temperature, influence juste repro
PWin = mean(env_annual_fec$PWin), #Changer TWin pour PWin
WinSnowsurvT1 = mean(env_annual_surv$WinSnowsurvT1) - (p10/ SD.surv["WinSnowsurvT1"][[1]]) # variables for survival model,
)
env.future20 <- data.frame(TWin = mean(env_annual_fec$TWin) + 1.5 / SD.fec["TWin"][[1]], # Changer Temperature, influence juste repro
PWin = mean(env_annual_fec$PWin), #Changer TWin pour PWin
WinSnowsurvT1 = mean(env_annual_surv$WinSnowsurvT1) - (p20/ SD.surv["WinSnowsurvT1"][[1]]) # variables for survival model,
)
env.future30 <- data.frame(TWin = mean(env_annual_fec$TWin) + 1.5 / SD.fec["TWin"][[1]], # Changer Temperature, influence juste repro
PWin = mean(env_annual_fec$PWin), #Changer TWin pour PWin
WinSnowsurvT1 = mean(env_annual_surv$WinSnowsurvT1) - (p30/ SD.surv["WinSnowsurvT1"][[1]]) # variables for survival model,
)
####========================####
# Final Models
####========================####
## SURVIVAL
# I have removed predation from the models (JV)
surv <- glm(alive_t1 ~ -1 + ageClass/WinSnowsurvT1,
data=df_surv, family="binomial")
## FECUNDITY
#Enlever masse JV
true <- glmer(true_repro ~ -1 + ageClass/(TWin*PWin) + (1|ID),data=df_fec,
family="binomial", control = glmerControl(optimizer="bobyqa",optCtrl = list(maxfun = 200000)))
####========================####
# Bootstrap projections
####========================####
niter <- 10000 # number of bootstraps
# +++++++ SURVIVAL ++++++++ #
df_surv$rowNb <- 1:nrow(df_surv)
boot.surv <- function(df_surv, rowNb){ # Peut être mieux d'utiliser une colonne row number que d'utiliser ID
df_surv <- df_surv[rowNb,] # select obs. in bootstrap sample
mod <- glm(alive_t1 ~ -1 + ageClass/WinSnowsurvT1,
data=df_surv, family="binomial")
coefficients(mod) # return coefficient vector
}
surv.boot <- boot(data=df_surv, boot.surv, niter)
head(surv.boot$t) # these are all the bootstraped coefficients
# organise output in a dataframe
# note here that
# t1 = intercept age class 0,
# t2 = intercept age class 1
# t3 = intercept age class 2
# t4 = intercept age class 8+ ### Inversé
# t5 = intercept age class 3-7 ### Inversé
# t6 = slope age class 0 with snow cover duration
# t7 = slope age class 1 with snow cover duration
# t8 = slope age class 2 with snow cover duration
# t9 = slope age class 8 with snow cover duration ### Inversé
# t10 = slope age class 3-7 with snow cover duration ### Inversé
data_pred_surv <- data.frame(
ageClass = rep(c("0", "1", "2", "37", "8+"), each=niter),
Intercept = c(surv.boot$t[,1], surv.boot$t[,2], surv.boot$t[,3], surv.boot$t[,5], surv.boot$t[,4]),
Slope = c(surv.boot$t[,6], surv.boot$t[,7], surv.boot$t[,8], surv.boot$t[,10], surv.boot$t[,9])
)
# +++++++ FECUNDITY (I.E. REPRODUCTIVE RATE) ++++++++ #
# now
myfun_now <- function(x) predict(x,newdata=data.frame(ageClass = c("3","48","9"),
env.now),
type="link",re.form=NA)
Fec_now = bootMer(true,myfun_now, nsim = niter)
head(Fec_now$t)
# future
myfun_future <- function(x) predict(x,newdata=data.frame(ageClass = c("3","48","9"),
env.future), #### ENV FUTUR A CHANGER SI CHANGE CONDITION (JUSTE TEMPERATURE QUI INFLUENCE)
type="link",re.form=NA)
Fec_future = bootMer(true,myfun_future, nsim = niter)
# Save bootstrap iterations in RData
save(data_pred_surv,Fec_now,Fec_future, file = "/Users/Sandrine/Documents/Sherbrooke/OWPC/Post_OWPC/Projections /Juillet 7/bootstrapPred.RData")
load("/Users/Sandrine/Documents/Sherbrooke/OWPC/Post_OWPC/Projections /Juillet 7/bootstrapPred.RData")
####========================####
# Population Models NOW
####========================####
# Create aposterior an empty dataframe which will later contain the estimated lambdas
# For now
Lambda_now <- data.frame(
iteration = c(1:niter),
lambda = c(1:niter)
)
# Fill the dataframe
for(i in 1:niter){
S0 <- data_pred_surv[which(data_pred_surv$ageClass=="0"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="0"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S1 <- data_pred_surv[which(data_pred_surv$ageClass=="1"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="1"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S2 <- data_pred_surv[which(data_pred_surv$ageClass=="2"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="2"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S37 <- data_pred_surv[which(data_pred_surv$ageClass=="37"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="37"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S8 <- data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
# Reproductive rates
R3 <- Fec_now$t[i,1]
R48 <- Fec_now$t[i,2]
R9 <- Fec_now$t[i,3]
# Create the matrix
L <- matrix(0, nrow=9, ncol=9)
L[1,3] <- inv.logit(S2)*inv.logit(R3)/2 # F2
L[1,4] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,5] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,6] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,7] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,8] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,9] <- inv.logit(S8)*inv.logit(R9)/2 #F37
L[2,1] <- inv.logit(S0)
L[3,2] <- inv.logit(S1)
L[4,3] <- inv.logit(S2)
L[5,4] <- inv.logit(S37)
L[6,5] <- inv.logit(S37)
L[7,6] <- inv.logit(S37)
L[8,7] <- inv.logit(S37)
L[9,8] <- inv.logit(S37)
L[9,9] <- inv.logit(S8)
Lambda_now$lambda[i] <- eigen.analysis(L)$lambda #extract lambdas
}
####========================####
# Projections TEMPERATURE
####========================####
# For future conditions
# Pour la température
Lambda_future <- data.frame(
iteration = c(1:niter),
lambda = c(1:niter)
)
for(i in 1:niter){
S0 <- data_pred_surv[which(data_pred_surv$ageClass=="0"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="0"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S1 <- data_pred_surv[which(data_pred_surv$ageClass=="1"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="1"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S2 <- data_pred_surv[which(data_pred_surv$ageClass=="2"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="2"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S37 <- data_pred_surv[which(data_pred_surv$ageClass=="37"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="37"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S8 <- data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
# Reproductive rates
R3 <- Fec_future$t[i,1]
R48 <- Fec_future$t[i,2]
R9 <- Fec_future$t[i,3]
# Create the matrix
L <- matrix(0, nrow=9, ncol=9)
L[1,3] <- inv.logit(S2)*inv.logit(R3)/2 # F2
L[1,4] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,5] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,6] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,7] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,8] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,9] <- inv.logit(S8)*inv.logit(R9)/2 #F37
L[2,1] <- inv.logit(S0)
L[3,2] <- inv.logit(S1)
L[4,3] <- inv.logit(S2)
L[5,4] <- inv.logit(S37)
L[6,5] <- inv.logit(S37)
L[7,6] <- inv.logit(S37)
L[8,7] <- inv.logit(S37)
L[9,8] <- inv.logit(S37)
L[9,9] <- inv.logit(S8)
Lambda_future$lambda[i] <- eigen.analysis(L)$lambda
}
# +++++++ Figure ++++++++ #
# make density plots to compare the distributions
dens_x1 <- density(Lambda_now$lambda)
dens_x2 <- density(Lambda_future$lambda)
xlim <- c(0.8, 1.2)
ylim <- c(0,20)
col1 <- rgb(0.973,0.463,0.427,0.6) # Couleur original de Limoilou #f8766d, couleur de Joanie : rgb(0,0,0.3,0.6)
col2 <-rgb(0,0.749,0.769,0.6) # Couleur original de Limoilou #00bfc4, couleur de Joanie : rgb(0.3,0,0.2,0.6) # 4e terme = transparence
par(mfrow=c(1,1))
tiff("PredTemp.tiff", res = 600, height=10, width=16, units="cm", pointsize=12)
plot(dens_x1, xlim = xlim, ylim = ylim, xlab = "Lambda", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
#put our density plots in
polygon(dens_x1, col = col1, lty=2, lwd=2)
polygon(dens_x2, col = col2, lty=1, lwd=2)
legend('topleft',legend=c('Actual temperature','Warming of 1.5 degree Celsius'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
dev.off()
####========================####
# Projections SNOW DURATION
####========================####
# +++++++++++++++++++++++++++++++++++++++++++++++++
# Predictions 5% moins snow cover
# Rien de fixé
Lambda_future5 <- data.frame(
iteration = c(1:niter),
lambda = c(1:niter)
)
for(i in 1:niter){
S0 <- data_pred_surv[which(data_pred_surv$ageClass=="0"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="0"),"Slope"][i]*env.future5["WinSnowsurvT1"][[1]]
S1 <- data_pred_surv[which(data_pred_surv$ageClass=="1"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="1"),"Slope"][i]*env.future5["WinSnowsurvT1"][[1]]
S2 <- data_pred_surv[which(data_pred_surv$ageClass=="2"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="2"),"Slope"][i]*env.future5["WinSnowsurvT1"][[1]]
S37 <- data_pred_surv[which(data_pred_surv$ageClass=="37"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="37"),"Slope"][i]*env.future5["WinSnowsurvT1"][[1]]
S8 <- data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Slope"][i]*env.future5["WinSnowsurvT1"][[1]]
# Reproductive rates
R3 <- Fec_now$t[i,1]
R48 <- Fec_now$t[i,2]
R9 <- Fec_now$t[i,3]
# Create the matrix
L <- matrix(0, nrow=9, ncol=9)
L[1,3] <- inv.logit(S2)*inv.logit(R3)/2 # F2
L[1,4] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,5] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,6] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,7] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,8] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,9] <- inv.logit(S8)*inv.logit(R9)/2 #F37
L[2,1] <- inv.logit(S0)
L[3,2] <- inv.logit(S1)
L[4,3] <- inv.logit(S2)
L[5,4] <- inv.logit(S37)
L[6,5] <- inv.logit(S37)
L[7,6] <- inv.logit(S37)
L[8,7] <- inv.logit(S37)
L[9,8] <- inv.logit(S37)
L[9,9] <- inv.logit(S8)
Lambda_future5$lambda[i] <- eigen.analysis(L)$lambda
}
# +++++++++++++++++++++++++++++++++++++++++++++++++
# Predictions 10% moins snow cover
# Rien de fixé
Lambda_future10 <- data.frame(
iteration = c(1:niter),
lambda = c(1:niter)
)
for(i in 1:niter){
S0 <- data_pred_surv[which(data_pred_surv$ageClass=="0"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="0"),"Slope"][i]*env.future10["WinSnowsurvT1"][[1]]
S1 <- data_pred_surv[which(data_pred_surv$ageClass=="1"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="1"),"Slope"][i]*env.future10["WinSnowsurvT1"][[1]]
S2 <- data_pred_surv[which(data_pred_surv$ageClass=="2"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="2"),"Slope"][i]*env.future10["WinSnowsurvT1"][[1]]
S37 <- data_pred_surv[which(data_pred_surv$ageClass=="37"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="37"),"Slope"][i]*env.future10["WinSnowsurvT1"][[1]]
S8 <- data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Slope"][i]*env.future10["WinSnowsurvT1"][[1]]
# Reproductive rates
R3 <- Fec_now$t[i,1]
R48 <- Fec_now$t[i,2]
R9 <- Fec_now$t[i,3]
# Create the matrix
L <- matrix(0, nrow=9, ncol=9)
L[1,3] <- inv.logit(S2)*inv.logit(R3)/2 # F2
L[1,4] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,5] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,6] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,7] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,8] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,9] <- inv.logit(S8)*inv.logit(R9)/2 #F37
L[2,1] <- inv.logit(S0)
L[3,2] <- inv.logit(S1)
L[4,3] <- inv.logit(S2)
L[5,4] <- inv.logit(S37)
L[6,5] <- inv.logit(S37)
L[7,6] <- inv.logit(S37)
L[8,7] <- inv.logit(S37)
L[9,8] <- inv.logit(S37)
L[9,9] <- inv.logit(S8)
Lambda_future10$lambda[i] <- eigen.analysis(L)$lambda
}
# +++++++++++++++++++++++++++++++++++++++++++++++++
# Predictions 20% moins snow cover
# Rien de fixé
Lambda_future20 <- data.frame(
iteration = c(1:niter),
lambda = c(1:niter)
)
for(i in 1:niter){
S0 <- data_pred_surv[which(data_pred_surv$ageClass=="0"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="0"),"Slope"][i]*env.future20["WinSnowsurvT1"][[1]]
S1 <- data_pred_surv[which(data_pred_surv$ageClass=="1"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="1"),"Slope"][i]*env.future20["WinSnowsurvT1"][[1]]
S2 <- data_pred_surv[which(data_pred_surv$ageClass=="2"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="2"),"Slope"][i]*env.future20["WinSnowsurvT1"][[1]]
S37 <- data_pred_surv[which(data_pred_surv$ageClass=="37"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="37"),"Slope"][i]*env.future20["WinSnowsurvT1"][[1]]
S8 <- data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Slope"][i]*env.future20["WinSnowsurvT1"][[1]]
# Reproductive rates
R3 <- Fec_now$t[i,1]
R48 <- Fec_now$t[i,2]
R9 <- Fec_now$t[i,3]
# Create the matrix
L <- matrix(0, nrow=9, ncol=9)
L[1,3] <- inv.logit(S2)*inv.logit(R3)/2 # F2
L[1,4] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,5] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,6] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,7] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,8] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,9] <- inv.logit(S8)*inv.logit(R9)/2 #F37
L[2,1] <- inv.logit(S0)
L[3,2] <- inv.logit(S1)
L[4,3] <- inv.logit(S2)
L[5,4] <- inv.logit(S37)
L[6,5] <- inv.logit(S37)
L[7,6] <- inv.logit(S37)
L[8,7] <- inv.logit(S37)
L[9,8] <- inv.logit(S37)
L[9,9] <- inv.logit(S8)
Lambda_future20$lambda[i] <- eigen.analysis(L)$lambda
}
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# 30% less snow cover
Lambda_future30 <- data.frame(
iteration = c(1:niter),
lambda = c(1:niter)
)
for(i in 1:niter){
S0 <- data_pred_surv[which(data_pred_surv$ageClass=="0"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="0"),"Slope"][i]*env.future30["WinSnowsurvT1"][[1]]
S1 <- data_pred_surv[which(data_pred_surv$ageClass=="1"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="1"),"Slope"][i]*env.future30["WinSnowsurvT1"][[1]]
S2 <- data_pred_surv[which(data_pred_surv$ageClass=="2"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="2"),"Slope"][i]*env.future30["WinSnowsurvT1"][[1]]
S37 <- data_pred_surv[which(data_pred_surv$ageClass=="37"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="37"),"Slope"][i]*env.future30["WinSnowsurvT1"][[1]]
S8 <- data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Slope"][i]*env.future30["WinSnowsurvT1"][[1]]
# Reproductive rates
R3 <- Fec_now$t[i,1]
R48 <- Fec_now$t[i,2]
R9 <- Fec_now$t[i,3]
# Create the matrix
L <- matrix(0, nrow=9, ncol=9)
L[1,3] <- inv.logit(S2)*inv.logit(R3)/2 # F2
L[1,4] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,5] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,6] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,7] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,8] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,9] <- inv.logit(S8)*inv.logit(R9)/2 #F37
L[2,1] <- inv.logit(S0)
L[3,2] <- inv.logit(S1)
L[4,3] <- inv.logit(S2)
L[5,4] <- inv.logit(S37)
L[6,5] <- inv.logit(S37)
L[7,6] <- inv.logit(S37)
L[8,7] <- inv.logit(S37)
L[9,8] <- inv.logit(S37)
L[9,9] <- inv.logit(S8)
Lambda_future30$lambda[i] <- eigen.analysis(L)$lambda
}
#save(Lambda_now, Lambda_future,Lambda_future5,Lambda_future10,Lambda_future20,Lambda_future30, file = "/Users/Sandrine/Documents/Sherbrooke/OWPC/Post_OWPC/Projections /Juillet 7/LambdaIterations.RData")
#++++++++++++++++++++++++++++++ FIGURE ALL ++++++++++++++++++++++++++++++#
# 5%
# make density plots to compare the distributions
col1 <- rgb(0.973,0.463,0.427,0.6) # Couleur original de Limoilou #f8766d, couleur de Joanie : rgb(0,0,0.3,0.6)
col2 <-rgb(0,0.749,0.769,0.5) # Couleur original de Limoilou #00bfc4, couleur de Joanie : rgb(0.3,0,0.2,0.6) # 4e terme = transparence
xlim <- c(0.8, 1.2)
ylim <- c(0,20)
dens_x1.a <- density(Lambda_now$lambda)
dens_x1.b <- density(Lambda_now$lambda)
dens_x1.c <- density(Lambda_now$lambda)
dens_x1.d <- density(Lambda_now$lambda)
dens_x2.a <- density(Lambda_future5$lambda) # Changer lambda future
dens_x2.b <- density(Lambda_future10$lambda)
dens_x2.c <- density(Lambda_future20$lambda)
dens_x2.d <- density(Lambda_future30$lambda)
tiff("Pred_SnowDuration.tiff", res = 600, height=10, width=16, units="cm", pointsize=10)
par(mfrow=c(2,2),mar=c(2,2,2,2)) #mar = c(bas, gauche, haut, droite)
plot(dens_x1.a, xlim = xlim, ylim = ylim, xlab = "Lambda", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
#put our density plots in
polygon(dens_x1.a, col = col1, lty=2, lwd=2)
polygon(dens_x2.a, col = col2, lty=1, lwd=2)
legend('topleft',legend=c('Actual snow','5% less duration'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
# 10 %
plot(dens_x1.b, xlim = xlim, ylim = ylim, xlab = "Lambda", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
#put our density plots in
polygon(dens_x1.b, col = col1, lty=2, lwd=2)
polygon(dens_x2.b, col = col2, lty=1, lwd=2)
legend('topleft',legend=c('Actual snow','10% less duration'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
# 20%
# make density plots to compare the distributions
plot(dens_x1.c, xlim = xlim, ylim = ylim, xlab = "Lambda", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
#put our density plots in
polygon(dens_x1.c, col = col1, lty=2, lwd=2)
polygon(dens_x2.c, col = col2, lty=1, lwd=2)
legend('topleft',legend=c('Actual snow','20% less duration'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
# 30%
# make density plots to compare the distributions
plot(dens_x1.d, xlim = xlim, ylim = ylim, xlab = "Lambda", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
#put our density plots in
polygon(dens_x1.d, col = col1, lty=2, lwd=2)
polygon(dens_x2.d, col = col2, lty=1, lwd=2)
legend('topleft',legend=c('Actual snow','30% less duration'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
dev.off()
####============================================####
# Projections SNOW DURATION 5-10/fixed-not fixed
####============================================####
load(file = "/Users/Sandrine/Documents/Sherbrooke/OWPC/Post_OWPC/Projections /Juillet 7/LambdaIterations.RData")
load("/Users/Sandrine/Documents/Sherbrooke/OWPC/Post_OWPC/Projections /Juillet 7/bootstrapPred.RData")
Lambda_future5
Lambda_future10
# Do 5% and 10% with everything fixed except S1
niter = 10000
# 5% FIXED
Lambda_future5_fixed <- data.frame(
iteration = c(1:niter),
lambda = c(1:niter)
)
for(i in 1:niter){
S0 <- data_pred_surv[which(data_pred_surv$ageClass=="0"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="0"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S1 <- data_pred_surv[which(data_pred_surv$ageClass=="1"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="1"),"Slope"][i]*env.future5["WinSnowsurvT1"][[1]]
S2 <- data_pred_surv[which(data_pred_surv$ageClass=="2"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="2"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S37 <- data_pred_surv[which(data_pred_surv$ageClass=="37"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="37"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S8 <- data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
# Reproductive rates
R3 <- Fec_now$t[i,1]
R48 <- Fec_now$t[i,2]
R9 <- Fec_now$t[i,3]
# Create the matrix
L <- matrix(0, nrow=9, ncol=9)
L[1,3] <- inv.logit(S2)*inv.logit(R3)/2 # F2
L[1,4] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,5] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,6] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,7] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,8] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,9] <- inv.logit(S8)*inv.logit(R9)/2 #F37
L[2,1] <- inv.logit(S0)
L[3,2] <- inv.logit(S1)
L[4,3] <- inv.logit(S2)
L[5,4] <- inv.logit(S37)
L[6,5] <- inv.logit(S37)
L[7,6] <- inv.logit(S37)
L[8,7] <- inv.logit(S37)
L[9,8] <- inv.logit(S37)
L[9,9] <- inv.logit(S8)
Lambda_future5_fixed$lambda[i] <- eigen.analysis(L)$lambda
}
# 10 % fixed
Lambda_future10_fixed <- data.frame(
iteration = c(1:niter),
lambda = c(1:niter)
)
for(i in 1:niter){
S0 <- data_pred_surv[which(data_pred_surv$ageClass=="0"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="0"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S1 <- data_pred_surv[which(data_pred_surv$ageClass=="1"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="1"),"Slope"][i]*env.future10["WinSnowsurvT1"][[1]]
S2 <- data_pred_surv[which(data_pred_surv$ageClass=="2"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="2"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S37 <- data_pred_surv[which(data_pred_surv$ageClass=="37"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="37"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
S8 <- data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Slope"][i]*env.now["WinSnowsurvT1"][[1]]
# Reproductive rates
R3 <- Fec_now$t[i,1]
R48 <- Fec_now$t[i,2]
R9 <- Fec_now$t[i,3]
# Create the matrix
L <- matrix(0, nrow=9, ncol=9)
L[1,3] <- inv.logit(S2)*inv.logit(R3)/2 # F2
L[1,4] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,5] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,6] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,7] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,8] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,9] <- inv.logit(S8)*inv.logit(R9)/2 #F37
L[2,1] <- inv.logit(S0)
L[3,2] <- inv.logit(S1)
L[4,3] <- inv.logit(S2)
L[5,4] <- inv.logit(S37)
L[6,5] <- inv.logit(S37)
L[7,6] <- inv.logit(S37)
L[8,7] <- inv.logit(S37)
L[9,8] <- inv.logit(S37)
L[9,9] <- inv.logit(S8)
Lambda_future10_fixed$lambda[i] <- eigen.analysis(L)$lambda
}
# ++++++++ Figure ++++++++#
col1 <- rgb(0.973,0.463,0.427,0.6) # Couleur original de Limoilou #f8766d, couleur de Joanie : rgb(0,0,0.3,0.6)
col2 <-rgb(0,0.749,0.769,0.5) # Couleur original de Limoilou #00bfc4, couleur de Joanie : rgb(0.3,0,0.2,0.6) # 4e terme = transparence
xlim <- c(0.8, 1.2)
ylim <- c(0,20)
dens_x1.a <- density(Lambda_now$lambda)
dens_x1.b <- density(Lambda_now$lambda)
dens_x1.c <- density(Lambda_now$lambda)
dens_x1.d <- density(Lambda_now$lambda)
dens_x2.a <- density(Lambda_future5$lambda) # Changer lambda future
dens_x2.b <- density(Lambda_future10$lambda)
dens_x2.c <- density(Lambda_future5_fixed$lambda)
dens_x2.d <- density(Lambda_future10_fixed$lambda)
tiff("Pred_SnowDuration.tiff", res = 600, height=10, width=16, units="cm", pointsize=9)
par(mfrow=c(2,2),mar=c(2,2,2,2)) #mar = c(bas, gauche, haut, droite)
plot(dens_x1.a, xlim = xlim, ylim = ylim, xlab = "Lambda", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
#put our density plots in
polygon(dens_x1.a, col = col1, lty=2, lwd=2)
polygon(dens_x2.b, col = col2, lty=1, lwd=2)
legend('topleft',legend=c('Actual snow','5% less duration'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
# 10 %
plot(dens_x1.b, xlim = xlim, ylim = ylim, xlab = "Lambda", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
#put our density plots in
polygon(dens_x1.b, col = col1, lty=2, lwd=2)
polygon(dens_x2.b, col = col2, lty=1, lwd=2)
legend('topleft',legend=c('Actual snow','10% less duration'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
# 5% fixed
# make density plots to compare the distributions
plot(dens_x1.c, xlim = xlim, ylim = ylim, xlab = "Lambda", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
#put our density plots in
polygon(dens_x1.c, col = col1, lty=2, lwd=2)
polygon(dens_x2.c, col = col2, lty=1, lwd=2)
legend('topleft',legend=c('Actual snow','5% less duration, fixed'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
# 10% fixed
# make density plots to compare the distributions
plot(dens_x1.d, xlim = xlim, ylim = ylim, xlab = "Lambda", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
#put our density plots in
polygon(dens_x1.d, col = col1, lty=2, lwd=2)
polygon(dens_x2.d, col = col2, lty=1, lwd=2)
legend('topleft',legend=c('Actual snow','10% less duration, fixed'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
dev.off()
####============================================####
# Projections COMBINATION
####============================================####
#Predictions 5% moins snow cover + TEMP
# Rien de fixé
Lambda_future5_comb <- data.frame(
iteration = c(1:niter),
lambda = c(1:niter)
)
for(i in 1:niter){
S0 <- data_pred_surv[which(data_pred_surv$ageClass=="0"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="0"),"Slope"][i]*env.future5["WinSnowsurvT1"][[1]]
S1 <- data_pred_surv[which(data_pred_surv$ageClass=="1"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="1"),"Slope"][i]*env.future5["WinSnowsurvT1"][[1]]
S2 <- data_pred_surv[which(data_pred_surv$ageClass=="2"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="2"),"Slope"][i]*env.future5["WinSnowsurvT1"][[1]]
S37 <- data_pred_surv[which(data_pred_surv$ageClass=="37"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="37"),"Slope"][i]*env.future5["WinSnowsurvT1"][[1]]
S8 <- data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Slope"][i]*env.future5["WinSnowsurvT1"][[1]]
# Reproductive rates
R3 <- Fec_future$t[i,1]
R48 <- Fec_future$t[i,2]
R9 <- Fec_future$t[i,3]
# Create the matrix
L <- matrix(0, nrow=9, ncol=9)
L[1,3] <- inv.logit(S2)*inv.logit(R3)/2 # F2
L[1,4] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,5] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,6] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,7] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,8] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,9] <- inv.logit(S8)*inv.logit(R9)/2 #F37
L[2,1] <- inv.logit(S0)
L[3,2] <- inv.logit(S1)
L[4,3] <- inv.logit(S2)
L[5,4] <- inv.logit(S37)
L[6,5] <- inv.logit(S37)
L[7,6] <- inv.logit(S37)
L[8,7] <- inv.logit(S37)
L[9,8] <- inv.logit(S37)
L[9,9] <- inv.logit(S8)
Lambda_future5_comb$lambda[i] <- eigen.analysis(L)$lambda
}
# +++++++++++++++++++++++++++++++++++++++++++++++++
# Predictions 10% moins snow cover + TEMP
# Rien de fixé
Lambda_future10_comb <- data.frame(
iteration = c(1:niter),
lambda = c(1:niter)
)
for(i in 1:niter){
S0 <- data_pred_surv[which(data_pred_surv$ageClass=="0"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="0"),"Slope"][i]*env.future10["WinSnowsurvT1"][[1]]
S1 <- data_pred_surv[which(data_pred_surv$ageClass=="1"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="1"),"Slope"][i]*env.future10["WinSnowsurvT1"][[1]]
S2 <- data_pred_surv[which(data_pred_surv$ageClass=="2"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="2"),"Slope"][i]*env.future10["WinSnowsurvT1"][[1]]
S37 <- data_pred_surv[which(data_pred_surv$ageClass=="37"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="37"),"Slope"][i]*env.future10["WinSnowsurvT1"][[1]]
S8 <- data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Intercept"][i] +
data_pred_surv[which(data_pred_surv$ageClass=="8+"),"Slope"][i]*env.future10["WinSnowsurvT1"][[1]]
# Reproductive rates
R3 <- Fec_future$t[i,1]
R48 <- Fec_future$t[i,2]
R9 <- Fec_future$t[i,3]
# Create the matrix
L <- matrix(0, nrow=9, ncol=9)
L[1,3] <- inv.logit(S2)*inv.logit(R3)/2 # F2
L[1,4] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,5] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,6] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,7] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,8] <- inv.logit(S37)*inv.logit(R48)/2 #F37
L[1,9] <- inv.logit(S8)*inv.logit(R9)/2 #F37
L[2,1] <- inv.logit(S0)
L[3,2] <- inv.logit(S1)
L[4,3] <- inv.logit(S2)
L[5,4] <- inv.logit(S37)
L[6,5] <- inv.logit(S37)
L[7,6] <- inv.logit(S37)
L[8,7] <- inv.logit(S37)
L[9,8] <- inv.logit(S37)
L[9,9] <- inv.logit(S8)
Lambda_future10_comb$lambda[i] <- eigen.analysis(L)$lambda
}
####============================================####
# FIGURES FINALES
####============================================####
#save(Lambda_now,Lambda_future5,Lambda_future10,Lambda_future5_fixed,Lambda_future10_fixed,Lambda_future,Lambda_future5_comb,Lambda_future10_comb,file = "/Users/Sandrine/Documents/Sherbrooke/OWPC/Post_OWPC/Projections /Juillet 7/LambdaFigures.RData")
load("/Users/Sandrine/Documents/Sherbrooke/OWPC/Post_OWPC/Projections /Juillet 7/LambdaFigures.RData")
col1 <- rgb(0.973,0.463,0.427,0.6) # Couleur original de Limoilou #f8766d, couleur de Joanie : rgb(0,0,0.3,0.6)
col2 <-rgb(0,0.749,0.769,0.5) # Couleur original de Limoilou #00bfc4, couleur de Joanie : rgb(0.3,0,0.2,0.6) # 4e terme = transparence
xlim <- c(0.8, 1.2)
ylim <- c(0,20)
dens_x1.a <- density(Lambda_now$lambda)
dens_x1.b <- density(Lambda_now$lambda)
dens_x1.c <- density(Lambda_now$lambda)
dens_x1.d <- density(Lambda_now$lambda)
dens_x1.e <- density(Lambda_now$lambda)
dens_x1.f <- density(Lambda_now$lambda)
dens_x1.g <- density(Lambda_now$lambda)
dens_x2.a <- density(Lambda_future5$lambda)
dens_x2.b <- density(Lambda_future10$lambda)
dens_x2.c <- density(Lambda_future5_fixed$lambda)
dens_x2.d <- density(Lambda_future10_fixed$lambda)
dens_x2.e <- density(Lambda_future$lambda) # représente Temperature seulement
dens_x2.f <- density(Lambda_future5_comb$lambda)
dens_x2.g <- density(Lambda_future10_comb$lambda)
# Figure 5 panels
# 5%, 10%, +1.5, combination 5%, combination 10%
# Projections with everything but S1 fixed, in supplementary
tiff("Pojection_Options2.tiff", res = 600, height=12, width=16, units="cm", pointsize=9)
par(mfrow=c(3,2),mar=c(4,5,2,2)) #mar = c(bas, gauche, haut, droite)
# 5%
plot(dens_x1.a, xlim = xlim, ylim = ylim, xlab = "",ylab= "Density", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, expression(lambda), line=2.5,cex = 1.2)
#mtext(side=2, "Frequency", line=2.5)
#text(0.82, 18, labels="(a)", cex=1.5)
mtext(side = 3,"(a)", line = 0.6, at = 0.75)
#put our density plots in
polygon(dens_x1.a, col = col1, lty=2, lwd=2)
polygon(dens_x2.a, col = col2, lty=1, lwd=2)
legend(0.78,19,legend=c('Actual conditions','5% reduction'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1),x.intersp = 0.2,cex = 1.2)
# 10 %
plot(dens_x1.b, xlim = xlim, ylim = ylim, xlab = "" , ylab= "Density", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, expression(lambda), line=2.5,cex = 1.2)
#text(0.82, 18, labels="(b)", cex=1.5)
mtext(side = 3,"(b)",line = 0.6, at = 0.75)
#put our density plots in
polygon(dens_x1.b, col = col1, lty=2, lwd=2)
polygon(dens_x2.b, col = col2, lty=1, lwd=2)
legend(0.78,19,legend=c('Actual conditions','10% reduction'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1),x.intersp = 0.2,cex = 1.2
)
#Temp
plot(dens_x1.e, xlim = xlim, ylim = ylim, xlab = "",ylab= "Density", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, expression(lambda), line=2.5,cex = 1.2)
mtext(side = 3,"(c)", line = 0.6, at = 0.75)
#text(0.75, 21, labels="(c)", cex=1.5)
#put our density plots in
polygon(dens_x1.e, col = col1, lty=2, lwd=2)
polygon(dens_x2.e, col = col2, lty=1, lwd=2)
legend(0.78,19,legend=c('Actual conditions', expression("1.5 "^"o"*"C increase")),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1),x.intersp = 0.2,cex = 1.2)
# 5% + Temp
plot(dens_x1.f, xlim = xlim, ylim = ylim, xlab = "", ylab= "Density",axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, expression(lambda), line=2.5,cex = 1.2)
#text(0.82, 18, labels="(d)", cex=1.5)
mtext(side = 3,"(d)", line = 0.6, at = 0.75)
#put our density plots in
polygon(dens_x1.f, col = col1, lty=2, lwd=2)
polygon(dens_x2.f, col = col2, lty=1, lwd=2)
legend(0.78,19,legend=c('Actual conditions','5% reduction'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1),x.intersp = 0.2,cex = 1.2)
legend(0.80,11,legend= expression("1.5 "^"o"*"C increase"),
bty = 'n',border = F, lty=c(2,1), col = "white",x.intersp = 0.2,cex = 1.2)
# 10% + Temp
plot(dens_x1.g, xlim = xlim, ylim = ylim, xlab = "",ylab= "Density", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, expression(lambda), line=2.5,cex = 1.2)
#text(0.82, 18, labels="(e)", cex=1.5)
mtext(side = 3,"(e)", line = 0.6, at = 0.75)
#put our density plots in
polygon(dens_x1.g, col = col1, lty=2, lwd=2)
polygon(dens_x2.g, col = col2, lty=1, lwd=2)
legend(0.78,19,legend=c('Actual conditions','10% reduction'),
fill = c(col1, col2,"white"), bty = 'n',
border = T, lty=c(2,1),x.intersp = 0.2,cex = 1.2)
legend(0.80,11,legend= expression("1.5 "^"o"*"C increase"),
bty = 'n', border = F, lty=c(2,1), col = "white",x.intersp = 0.2,cex = 1.2)
dev.off()
# Second Figure with everything fixed but S1
# In supplementary
tiff("Projection_Option2_Supplementary.tiff", res = 600, height=9, width=14, units="cm", pointsize=9)
# 5% fixed
# make density plots to compare the distributions
par(mfrow=c(2,1),mar=c(4,5,2,2)) #mar = c(bas, gauche, haut, droite)
plot(dens_x1.c, xlim = xlim, ylim = ylim, xlab = "",ylab= "Density", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, expression(lambda), line=2.5,cex = 1.2)
mtext(side = 3,"(a)", line = 0.6, at = 0.75, cex = 1.2)
#put our density plots in
polygon(dens_x1.c, col = col1, lty=2, lwd=2)
polygon(dens_x2.c, col = col2, lty=1, lwd=2)
legend(0.78,14,legend=c('Actual conditions','5% reduction, fixed'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1), cex = 0.9)
# 10% fixed
# make density plots to compare the distributions
plot(dens_x1.d, xlim = xlim, ylim = ylim, xlab = "", ylab= "Density",axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, expression(lambda), line=2.5,cex = 1.2)
mtext(side = 3,"(b)", line = 0.6, at = 0.75, cex = 1.2)
#put our density plots in
polygon(dens_x1.d, col = col1, lty=2, lwd=2)
polygon(dens_x2.d, col = col2, lty=1, lwd=2)
legend(0.78,14,legend=c('Actual conditions','10% reduction, fixed'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1), cex = 0.9)
dev.off()
# Figure 7 panels
# Nothing in Supplementary
col1 <- rgb(0.973,0.463,0.427,0.6) # Couleur original de Limoilou #f8766d, couleur de Joanie : rgb(0,0,0.3,0.6)
col2 <-rgb(0,0.749,0.769,0.5) # Couleur original de Limoilou #00bfc4, couleur de Joanie : rgb(0.3,0,0.2,0.6) # 4e terme = transparence
xlim <- c(0.8, 1.2)
ylim <- c(0,20)
dens_x1.a <- density(Lambda_now$lambda)
dens_x1.b <- density(Lambda_now$lambda)
dens_x1.c <- density(Lambda_now$lambda)
dens_x1.d <- density(Lambda_now$lambda)
dens_x1.e <- density(Lambda_now$lambda)
dens_x1.f <- density(Lambda_now$lambda)
dens_x1.g <- density(Lambda_now$lambda)
dens_x2.a <- density(Lambda_future5$lambda)
dens_x2.b <- density(Lambda_future10$lambda)
dens_x2.c <- density(Lambda_future5_fixed$lambda)
dens_x2.d <- density(Lambda_future10_fixed$lambda)
dens_x2.e <- density(Lambda_future$lambda) # représente Temperature seulement
dens_x2.f <- density(Lambda_future5_comb$lambda)
dens_x2.g <- density(Lambda_future10_comb$lambda)
tiff("Projection_Option1.tiff", res = 600, height=13, width=16.5, units="cm", pointsize=9)
par(mfrow=c(4,2),mar=c(4,5,2,2)) #mar = c(bas, gauche, haut, droite)
plot(dens_x1.a, xlim = xlim, ylim = ylim, xlab = "",ylab= "Density", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, "Lambda", line=2.5,cex = 0.8)
#mtext(side=2, "Frequency", line=2.5)
text(0.82, 18, labels="(a)", cex=1.5)
#put our density plots in
polygon(dens_x1.a, col = col1, lty=2, lwd=2)
polygon(dens_x2.a, col = col2, lty=1, lwd=2)
legend(0.78,14,legend=c('Actual snow','5% less duration'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
# 10 %
plot(dens_x1.b, xlim = xlim, ylim = ylim, xlab = "" , ylab= "Density", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, "Lambda", line=2.5,cex = 0.8)
text(0.82, 18, labels="(b)", cex=1.5)
#put our density plots in
polygon(dens_x1.b, col = col1, lty=2, lwd=2)
polygon(dens_x2.b, col = col2, lty=1, lwd=2)
legend(0.78,14,legend=c('Actual snow','10% less duration'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
# 5% fixed
# make density plots to compare the distributions
plot(dens_x1.c, xlim = xlim, ylim = ylim, xlab = "",ylab= "Density", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, "Lambda", line=2.5,cex = 0.8)
text(0.82, 18, labels="(c)", cex=1.5)
#put our density plots in
polygon(dens_x1.c, col = col1, lty=2, lwd=2)
polygon(dens_x2.c, col = col2, lty=1, lwd=2)
legend(0.78,14,legend=c('Actual snow','5% less duration, fixed'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
# 10% fixed
# make density plots to compare the distributions
plot(dens_x1.d, xlim = xlim, ylim = ylim, xlab = "", ylab= "Density",axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, "Lambda", line=2.5,cex = 0.8)
text(0.82, 18, labels="(d)", cex=1.5)
#put our density plots in
polygon(dens_x1.d, col = col1, lty=2, lwd=2)
polygon(dens_x2.d, col = col2, lty=1, lwd=2)
legend(0.78,14,legend=c('Actual snow','10% less duration, fixed'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
#Temp
plot(dens_x1.e, xlim = xlim, ylim = ylim, xlab = "",ylab= "Density", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, "Lambda", line=2.5,cex = 0.8)
text(0.82, 18, labels="(e)", cex=1.5)
#put our density plots in
polygon(dens_x1.e, col = col1, lty=2, lwd=2)
polygon(dens_x2.e, col = col2, lty=1, lwd=2)
legend(0.78,14,legend=c('Actual snow','+1.5C warming'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
# 5% + Temp
plot(dens_x1.f, xlim = xlim, ylim = ylim, xlab = "", ylab= "Density",axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, "Lambda", line=2.5,cex = 0.8)
text(0.82, 18, labels="(f)", cex=1.5)
#put our density plots in
polygon(dens_x1.f, col = col1, lty=2, lwd=2)
polygon(dens_x2.f, col = col2, lty=1, lwd=2)
legend(0.78,14,legend=c('Actual snow','5% less duration'),
fill = c(col1, col2), bty = 'n',
border = T, lty=c(2,1))
legend(0.80,8,legend=c('+1.5C warming'),
bty = 'n',border = F, lty=c(2,1), col = "white")
# 10% + Temp
plot(dens_x1.g, xlim = xlim, ylim = ylim, xlab = "",ylab= "Density", axes=F,
main = '', cex.axis=1.2, cex.lab=1.2, type="n")
axis(1, at=seq(0.8, 1.2, 0.01), cex.axis=1)
axis(2, at=seq(0, 20,1), cex.axis =1)
mtext(side=1, "Lambda", line=2.5,cex = 0.8)
text(0.82, 18, labels="(g)", cex=1.5)
#put our density plots in
polygon(dens_x1.g, col = col1, lty=2, lwd=2)
polygon(dens_x2.g, col = col2, lty=1, lwd=2)
legend(0.78,14,legend=c('Actual snow','10% less duration'),
fill = c(col1, col2,"white"), bty = 'n',
border = T, lty=c(2,1))
legend(0.80,8,legend=c('+1.5C warming'),
bty = 'n', border = F, lty=c(2,1), col = "white")
dev.off()
####============================================####
# Get the means and SD
####============================================####
# Lambda now
mean(Lambda_now$lambda) # 1.066446
sd(Lambda_now$lambda) # 0.02252336
q <- quantile(Lambda_now$lambda, 0.975) # 1.109802
# Lambda 5%
mean(Lambda_future5$lambda) # 1.071571
sd(Lambda_future5$lambda) # 0.02433054
# Proportion lambda > 97.5 quartile of lambda now
length(which(Lambda_future5[,2] > q)) / (nrow(Lambda_future5)) # 0.0559
# Lambda 10%
mean(Lambda_future10$lambda) # 1.073211
sd(Lambda_future10$lambda) # 0.02864516
length(which(Lambda_future10[,2] > q)) / (nrow(Lambda_future10)) #0.0975
# Lambda Temperature
mean(Lambda_future$lambda) # 1.071984
sd(Lambda_future$lambda) # 0.02308012
length(which(Lambda_future[,2] > q)) / (nrow(Lambda_future)) # 0.049
# Combined 5%
mean(Lambda_future5_comb$lambda) # 1.077196
sd(Lambda_future5_comb$lambda) # 0.02483625
length(which(Lambda_future5_comb[,2] > q)) / (nrow(Lambda_future5_comb)) # 0.0936
# Combined 10%
mean(Lambda_future10_comb$lambda) # 1.078819
sd(Lambda_future10_comb$lambda) # 0.02909688
length(which(Lambda_future10_comb[,2] > q)) / (nrow(Lambda_future10_comb)) # 0.1455
#Fixed 5%
mean(Lambda_future5_fixed$lambda) # 1.06988
sd(Lambda_future5_fixed$lambda) # 0.02226941
length(which(Lambda_future5_fixed[,2] > q)) / (nrow(Lambda_future5_fixed)) # 0.0342
#Fixed 10%
mean(Lambda_future10_fixed$lambda) # 1.071331
sd(Lambda_future10_fixed$lambda) # 0.02219305
length(which(Lambda_future10_fixed[,2] > q)) / (nrow(Lambda_future10_fixed)) # 0.0391
|
b9facf1e206b65b7eae3f1565961edd226aadad4
|
85476eebe615722cf1759286db14e54725d15c2b
|
/R/genomic/chronology/league_model.R
|
6bb378b243d035d192a9fd28e530ef30eacdebcf
|
[
"MIT"
] |
permissive
|
matahi/lib
|
6b51799d86196f87c3a2df7422504f6898ae3bdf
|
d5f665db5cfefa05c68950311d0f016bcf9ef690
|
refs/heads/master
| 2021-09-22T16:04:13.496704
| 2018-09-11T18:53:43
| 2018-09-11T18:53:43
| 103,403,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,121
|
r
|
league_model.R
|
league_model <- function (chronology.df, n.repeat=1000, type="score", win=2, tie=1, loss=0, cutoff.occurrence=5, add.bars=T, group.genes=NULL) {
# cutoff.occurrence <- 5
# n.repeat <- 1000
# win <- 2
# tie <- 1
# loss <- 0
# add.bars <- T
# group.genes <- Reordering.features
require(tidyverse)
###
genes <- unique(chronology.df$Gene1)
###
precedence.df <- chronology.df %>% filter(Gene1 != Gene2)
###
score.gene <- sapply(1:length(genes), function(k)
{
chronology.gene <- precedence.df[precedence.df$Gene1==genes[k],]
if (any(chronology.gene$Total <= cutoff.occurrence))
chronology.gene[ chronology.gene$Total <= cutoff.occurrence,c("Total", "Win","Loss","Inconclusive")] <- c(9,3,3,3)
score.matrix <- sapply(1:nrow(chronology.gene), function(j)
{
prob.win <- chronology.gene$Win[j]/chronology.gene$Total[j]
prob.loss <- chronology.gene$Loss[j]/chronology.gene$Total[j]
prob.tie <- chronology.gene$Inconclusive[j]/chronology.gene$Total[j]
return(sample(c(win,tie,loss), size=n.repeat, replace=TRUE, prob=c(prob.win,prob.tie, prob.loss)))
})
score.repeat <- rowSums(score.matrix)
return(score.repeat)
})
# dummy variable
dummy.score <- rowSums(matrix(sample(c(win,tie,loss), size=(length(genes)-1)*n.repeat, replace=TRUE, prob=c(1/3,1/3, 1/3)),nrow=n.repeat, ncol=length(genes)-1))
dummy.mean <- -mean(dummy.score)
dummy.sd <- sd(dummy.score)
# # invert rank
# if (type == "rank")
# {
# score.rank <- t(apply(score.gene,1, function(x){length(genes)-rank(x, ties="random")+1}))
# colnames(score.rank) <- genes
# }
# score.rank <- data.frame(score.rank)
#mean.rank <- apply(score.rank,2,mean)
colnames(score.gene) <- genes
score.gene <- -data.frame(score.gene) %>% tbl_df() ## Opposite of score
mean.score <- apply(score.gene,2,mean)
score.df <- gather(score.gene,
key = "alteration",
value = "score")
gene.m <- melt(score.gene)
gene.m$variable <- factor(gene.m$variable, levels=genes[order(mean.score,decreasing=T)])
score.df <- score.df %>%
mutate(alteration = factor(alteration, levels = names(mean.score)[order(mean.score, decreasing=T)]))
source("./src/lib/R/plot/misc/data_summary.R")
pp <- ggplot(score.df, aes(x=alteration,y=score)) +
stat_summary(fun.data=data_summary) +
xlab("") +
ylab("relative order") +
coord_flip() +
theme(axis.text.x = element_blank(),
axis.ticks = element_blank(),
legend.text = element_text(size=20),
panel.grid = element_blank())
if (add.bars)
pp <- pp +
geom_hline(aes(yintercept=dummy.mean - dummy.sd), linetype="longdash") +
geom_hline(aes(yintercept=dummy.mean + dummy.sd), linetype="longdash")
if (!is.null(group.genes))
{
cols.axis <- group.genes$colours[ match(levels(score.df$alteration), group.genes$alterations )]
pp <- pp + theme(axis.text.y=element_text(color=cols.axis))
}
###
league.out <- list(ranking = score.df, p = pp)
return(league.out)
}
|
32ed73242b379fb5b76c98d946338fbe53eb33f0
|
41b50c2e16db59df993603387ee33df698e5fdd3
|
/code/prepare_data_MeDeCom_CCSP.R
|
dae80f6ce71e761f639a09045271a0297db5edb6
|
[] |
no_license
|
tkik/Lung_CoO_2
|
f407d8f7c124fa0f3d29b11671840e9b9b78f4e9
|
94a415c26b97ea8e8aa37fc851b3be0d04e9b858
|
refs/heads/master
| 2023-06-08T18:15:56.333197
| 2021-07-01T08:24:11
| 2021-07-01T08:24:11
| 349,067,228
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,413
|
r
|
prepare_data_MeDeCom_CCSP.R
|
library(methrix)
library(data.table)
library(HDF5Array)
library(SummarizedExperiment)
library(ggplot2)
library(rtracklayer)
library(limma)
library(knitr)
library(pheatmap)
library(MeDeCom)
library(ggsci)
###########libraries and functions#############
if (grepl("Windows", Sys.getenv("OS"))){
PATH ="V:/"} else {
PATH ="/C010-projects/"}
if (grepl("Windows", Sys.getenv("OS"))){
PATH_Y="N:/"} else {
PATH_Y="/C010-datasets/"}
DATA = paste0(PATH, "Reka/33_CoO_lung/CoO_Lung_Cancer/data/")
RESULT = paste0(PATH, "Reka/33_CoO_lung/CoO_Lung_Cancer/output/")
CALLS = paste0(PATH_Y, "External/2018-10-Sotillo/data/methylDackel/")
DOCS = paste0(PATH, "Reka/33_CoO_lung/CoO_Lung_Cancer/docs/")
res <- readRDS(paste0(DATA, "no_snps_methrix.RDS"))
res_SSPC <- methrix::subset_methrix(res,
samples = attr(res@colData, "rownames")[grep( "MsSPC_control", attr(res@colData, "listData")$full_name)])
mat_SSPC <- as.data.frame(get_matrix(res_SSPC, add_loci = T))
rownames(mat_SSPC) <- paste0(mat_SSPC$chr, "_", mat_SSPC$start)
mat_SSPC <- mat_SSPC[,-(1:3)]
mat_CCSP <-readRDS( file = file.path(PATH_Y, "External/Sotillo_mouse_lung_cell/data/MeDeCom_MCCSP_prepared_filt.rds"))
mat_SSPC <- mat_SSPC[rownames(mat_CCSP),]
mat <- as.matrix(cbind(mat_CCSP, mat_SSPC))
mat <- mat[complete.cases(mat),]
saveRDS(mat, file = file.path(DATA, "MeDeCom_MCCSP_SPC_prepared_filt.rds"))
|
55148bbfbb37f16828aa402f9d08e0cefb7043c1
|
c2bbe165858014ea7fd226710fa3dc1f4af36fe8
|
/man/gwa.Rd
|
c941c642f433723a566c29c5e1c6fbe0d8f4543d
|
[] |
no_license
|
tonymugen/GWAlikeMeth
|
5781bb71e0ac79d6772d405a025f4379a52453cc
|
8245a7224ba4253681f54aaedd17a60b4520bedf
|
refs/heads/master
| 2020-04-16T00:34:56.464885
| 2020-01-31T22:28:18
| 2020-01-31T22:28:18
| 165,144,017
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,472
|
rd
|
gwa.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interfaceFunctions.R
\name{gwa}
\alias{gwa}
\title{Single-marker genome-wide association}
\usage{
gwa(Y, K, X = NULL, repFactor = NULL, snps, nPerm = NULL,
nThreads = NULL)
}
\arguments{
\item{Y}{matrix of phenotypes; phenotypes should be in columns.}
\item{K}{relationship matrix}
\item{X}{fixed effect matrix (optional, do not include the intercept)}
\item{repFactor}{factor relating genotypes to replicates (optional)}
\item{snps}{matrix of genotypes (\code{NA} for missing data, lines as rows, SNPs as columns)}
\item{nPerm}{number of permutations to calculate \eqn{q}-values}
\item{nThreads}{suggested number of threads; if \code{NULL} or 0, maximum number will be used}
}
\value{
a list containing a matrix of random effects (named \code{ranef}), a matrix of fixed effects (including the intercept, named \code{fixef}), a vector of heritability estimates (named \code{hSq}), matrix of \eqn{-\log_{10}p} values (named \code{lPval}), and an optinal matrix of \eqn{q}-values (named \code{qVal}).
}
\description{
Performs single-marker GWA with a mixed-model population structure correction. The algorithm is EMMAX. Optionally supports replicated genotypes and fixed-effect covariates. Empirical \eqn{q}-values can be calculated using permutations is desired. Since all loci are used in the permutation test, the number of samples need not be very large. I get good results with 15 to 25 permutations. Phenotype, relationship matrix, covariate, and replication specification is the same as in \code{\link{mmFit}}. The genotype matrix may have missing data (specified as \code{NA}). Genotypes should be coded as {0, 1, 2}, with 1 as the heterozygote. The lines should be in rows, with the number of rows equal to the number of lines.
}
\details{
It is possible to specify the number of cores to be used. Multithreading is employed only for the SNP effect estimation. If you compiled R with multithreaded BLAS and/or LAPACK, it is safe to use it with this function. The linear algebra functions are run on single threads. The specified number of threads will be taken as a limit. If there are too few genotypes, not all of the suggested cores may be deployed. If the number of threads is \code{NULL}, 0, or not specified, the maximum number of cores will be used, provided there are enough SNPs to do so.
}
\references{
\insertRef{kang08}{GWAlikeMeth}
\insertRef{kang10}{GWAlikeMeth}
}
|
6e94697e93d39bf9a74e5de5621255ebac0290b6
|
43cf53d4395e950b16ac065c9badff519fd77f9f
|
/plot5.R
|
13d150dc7f68a75d2f15c418b2bf2d40a2710051
|
[] |
no_license
|
DEHDailey/ExData_Project2
|
567ab77193ebf263a48473f1b1122c8493fb2ebc
|
f5e90b53a25d71daf17c000eafde38f6d1315da2
|
refs/heads/master
| 2016-09-11T09:53:39.997608
| 2014-07-11T17:09:51
| 2014-07-11T17:09:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,086
|
r
|
plot5.R
|
## Coursera Exploratory Data Analysis (exdata-004) Course Project 2
## plot5.R: Creates an exploratory plot for Question 5:
## 5. How have emissions from motor vehicle sources changed from 1999–2008 in
## Baltimore City?
## Between 1999 and 2008, vehicular emissions of PM2.5 in Baltimore City have
## decreased markedly, from a 1999 level of nearly 350 tons to a 2008 level of
## less than 100 tons.
## Code to (a) unzip RDS files from original zip file; (b) read RDS files and
## save their contents as RData files; (c) read RData objects into the
## workspace. This code will do the least work necessary to get the data.frames
## into the workspace. This code is repeated in all R scripts for this
## assignment because we've been asked to keep the R scripts self-contained; in
## a real-world application, I would either write this code into a separate file
## that is source()d by this one, or create all the plots in a single R script.
zip.filename <- "exdata-data-NEI_data.zip"
rds.filenames <- c( NEI = "summarySCC_PM25.rds", SCC = "Source_Classification_Code.rds" )
rdata.filenames <- gsub( 'rds$', 'rdata', rds.filenames )
invis <- sapply( names( rds.filenames ), function( nom ) { ## Loops over the *name* of the vector of rds.filenames
if( !( rdata.filenames[ nom ] %in% dir() ) ) { ## Rdata file does not exist; create it from RDS
if( !( rds.filenames[ nom ] %in% dir() ) ) { ## RDS file does not exist; extract it from zip
message( sprintf( "Unzipping %s from %s...", rds.filenames[ nom ], zip.filename ) )
unzip( zip.filename, rds.filenames[ nom ] )
}
message( sprintf( "Reading %s from disk...", rds.filenames[ nom ] ) )
myData <- readRDS( rds.filenames[ nom ] )
assign( nom, myData )
message( sprintf( "Saving %s data as rdata object...", nom ) )
save( list=nom, file=rdata.filenames[ nom ] )
}
message( sprintf( "Loading %s into workspace...", rdata.filenames[ nom ] ) )
load( rdata.filenames[ nom ], .GlobalEnv )
} )
## Data loaded; now manipulate the data and create the plot.
message( 'Subsetting for Baltimore City...' )
NEI.BAL <- subset( NEI, fips == '24510')
message( 'Identifying and subsetting for motor-vehicle sources...' )
vehicleSCCs <- as.character( with( SCC, SCC[ grep( 'Vehicle', EI.Sector ) ] ) )
NEI.BAL.Vehicles <- subset( NEI.BAL, SCC %in% vehicleSCCs )
message( 'Summing up emissions by year...' )
SumByYear <- with( NEI.BAL.Vehicles, aggregate( Emissions, by=list( year ), FUN = sum ) )
colnames( SumByYear ) <- c( 'Year', 'Total.PM25' )
SumByYear$Total.PM25.K <- with( SumByYear, Total.PM25/1e3 )
SumByYear$Total.PM25.M <- with( SumByYear, Total.PM25/1e6 )
library( ggplot2 )
png( 'plot5.png' )
g <- (
qplot( Year, Total.PM25, data=SumByYear, geom='line',
ylim=range( c( 0, SumByYear$Total.PM25 ) ), ylab=expression( 'Total ' * PM[2.5] * ' (tons)'),
main='Emissons from Vehicular Sources in Baltimore City, 1999-2008')
+ geom_line( size=1.2 )
)
g <- g + geom_hline( yintercept=0, linetype=2, color='darkgray' )
print(g)
dev.off()
|
7585e0897c1237c04f37a8c86838e9b7acba7b11
|
424710e3b6366696935e29f8b0f6220a29e2d545
|
/rsm-msba/connections/rsm-msba-postgres.R
|
d0bbd49f27f93957ca52caf898c8373c540a99ee
|
[
"BSD-3-Clause"
] |
permissive
|
imccommons/docker
|
661111548eee1a8d89ac8610e32507325feffc16
|
80be5876cb88917d196316011dca65550d9c211e
|
refs/heads/master
| 2020-04-08T20:25:17.010484
| 2018-11-28T22:07:26
| 2018-11-28T22:07:26
| 159,698,519
| 1
| 0
| null | 2018-11-29T16:49:15
| 2018-11-29T16:49:15
| null |
UTF-8
|
R
| false
| false
| 328
|
r
|
rsm-msba-postgres.R
|
library(DBI)
library(RPostgreSQL)
con <- dbConnect(
dbDriver("PostgreSQL"),
user = "postgres",
host = "postgres", ## use when establishing a connection between containers
# host = "127.0.0.1", ## use when connection to postgres from local Rstudio
port = 5432,
dbname = "postgres",
password = "postgres"
)
|
eceac028dd7eff3479452077ea64707bb861dba7
|
52b84546a64b4f31245eb0bfaa68bfa489c90534
|
/sta141a/2016/discussion10.R
|
64a50a187ede002e5923a4a10e20c17fc82e4533
|
[
"CC-BY-NC-SA-4.0"
] |
permissive
|
nick-ulle/teaching-notes
|
6cb48d874ef4c8c99402b9987e58b2958adff056
|
12e388f626f415bd39543bfed99c44e4130a065b
|
refs/heads/master
| 2023-02-20T12:55:06.521649
| 2023-02-05T02:53:22
| 2023-02-05T02:53:22
| 86,759,329
| 31
| 33
|
BSD-3-Clause-Clear
| 2019-01-15T15:44:11
| 2017-03-30T23:49:53
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 4,563
|
r
|
discussion10.R
|
# discussion10.R
# Week 10
# -------
# How the Singapore Circle Line rogue train was caught with data:
#
# https://goo.gl/Raz9FB
#
# Please fill out TA evals!
# The Bootstrap
# -------------
library(readr)
library(broom)
elm = read_tsv("../data/elmhurst.txt")
mod = lm(gift_aid ~ family_income, elm)
coef(mod)
# Residual bootstrap (resample residuals)
resid = augment(mod)
resample = function(data) {
n = nrow(data)
# Sample row numbers (i) rather than values (e_i)
idx = sample(n, n, replace = TRUE)
# Use row numbers to get new residuals (e2_i).
res_samp = data$.resid[idx]
# y2_i = b_0 + b_1 * x_i + e2_i
y_samp = data$.fitted + res_samp
# Insert new response (y_i) into data frame, keeping old covariates (x_i)
data$gift_aid = y_samp
# Fit the same model with new data (y2_i, x_i).
new_mod = lm(gift_aid ~ family_income, data)
return (coef(new_mod))
}
# Bootstrap 400 times. The replicate() function is just a nice way to write
#
# sapply(1:400, function(i) resample(resid))
#
boot = replicate(400, resample(resid))
# Now compute statistics on the bootstrap samples. Each column is one bootstrap
# sample and each row is one statistic.
#
# For example, for 90% confidence intervals:
ci_intercept = quantile(boot[1, ], c(0.05, 0.95))
ci_slope = quantile(boot[2, ], c(0.05, 0.95))
# In assignment 5 problem 1, part (iv) has you repeat all of steps (i)-(iii).
#
# Part (iii) is a bootstrap. Part (iv) is NOT a bootstrap, it's just a repeated
# experiment.
#
# A good strategy is to write a function that performs steps (i)-(iii) and then
# call it with an apply function. The only input to your function should be the
# random seed. So a skeleton is:
prob1 = function(seed) {
set.seed(seed) # only set the seed once, at the beginning
# Part 1
# ...
# Part 2
# ...
# Part 3
# ...
# Return widths of both the theoretical and bootstrap confidence intervals:
return (ci_widths)
}
# Part 4
#all_ci_widths = sapply(1:10, prob1)
# ...
# Classification
# --------------
# Assignment 5 problem 2 has you use three different models for classification:
#
# * Logistic Regression: use glm(), a built-in function
# * Linear Discriminant Analysis (LDA): use lda() in package MASS
# * k-Nearest Neighbors (kNN): use knn() in package class
#
# You'll also need the predict() function to get predictions for LDA and
# logistic regression, and the table() function to create confusion matrices.
#
# Below is an example with the Spambase data set. In the Spambase data set,
# each row is one email. The columns have statistics about the emails (word
# counts, letter counts, etc) and also a classification (spam or ham).
spam = readRDS("../data/spambase.rds")
# Sample 10% of data from each class to use as test data (20% total).
n_test_half = ceiling(nrow(spam) * 0.10)
set.seed(10)
idx_test = c(
sample(which(spam$class == "spam"), n_test_half, replace = FALSE),
sample(which(spam$class == "ham"), n_test_half, replace = FALSE)
)
spam_test = spam[idx_test, ]
spam_train = spam[-idx_test, ]
# Fit logistic model on training data. Use family = binomial for logistic.
log_model = glm(class ~ capital_run_length_total, spam_train,
family = binomial)
# Predict for test data. Use type = "response" to get class probabilities.
log_pred = predict(log_model, spam_test, type = "response")
# Convert predictions to 1 or 2, for category 1 or 2 respectively.
log_pred = (log_pred > 0.5) + 1
# Convert predictions to spam or ham, same category order as original data.
log_pred = levels(spam_train$class)[log_pred]
# Make a confusion matrix by tabulating true classes against predicted classes.
log_con = table(true = spam_test$class, model = log_pred)
# Steps for fitting LDA are similar to those for fitting a linear or logistic
# regression model.
#
# Use lda() from package MASS to fit the model on the training data, and use
# predict() to predict for the test data.
# Use knn() from package class for k-nearest neighbors.
library(class)
# Fit knn (k = 3) model.
knn_pred = knn(
# Note the use of [ ] rather than $ or [[ ]].
#
# The knn() function expects a matrix or data frame for the train and test
# arguments. Using $ or [[ ]] would get a vector rather than a data frame.
#
train = spam_train["capital_run_length_total"], # 1-col data frame
test = spam_test["capital_run_length_total"], # 1-col data frame
cl = spam_train$class, # vector
k = 3
)
# Confusion matrix.
knn_con = table(true = spam_test$class, model = knn_pred)
|
07131491f88d21d31d97c45e082ec4aa6575e6b7
|
d8aa7961d1b6a9b4c95031095625915fea43a5b5
|
/R/tuneLearn.R
|
12f01b29d1e4693881d28a7f87bfd7b07f054c01
|
[] |
no_license
|
mfasiolo/qgam
|
4b74cdbdc193f198424f3dcc0ffb5ad3f045aa5a
|
70fe0b2013c1d4b395bf6f3a53ba15a8d206db49
|
refs/heads/master
| 2023-02-27T07:19:28.703301
| 2023-02-06T23:30:10
| 2023-02-06T23:30:10
| 56,142,623
| 27
| 8
| null | 2019-02-06T13:26:50
| 2016-04-13T10:28:50
|
R
|
UTF-8
|
R
| false
| false
| 10,790
|
r
|
tuneLearn.R
|
##########################
#' Tuning the learning rate for Gibbs posterior
#'
#' @description The learning rate (sigma) of the Gibbs posterior is tuned either by calibrating the credible intervals for the fitted
#' curve, or by minimizing the pinball loss on out-of-sample data. This is done by bootrapping or by k-fold cross-validation.
#' Here the calibration loss function is evaluated on a grid of values provided by the user.
#'
#' @param form A GAM formula, or a list of formulae. See ?mgcv::gam details.
#' @param data A data frame or list containing the model response variable and covariates required by the formula.
#' By default the variables are taken from environment(formula): typically the environment from which gam is called.
#' @param lsig A vector of value of the log learning rate (log(sigma)) over which the calibration loss function is evaluated.
#' @param qu The quantile of interest. Should be in (0, 1).
#' @param err An upper bound on the error of the estimated quantile curve. Should be in (0, 1).
#' Since qgam v1.3 it is selected automatically, using the methods of Fasiolo et al. (2017).
#' The old default was \code{err=0.05}.
#' @param multicore If TRUE the calibration will happen in parallel.
#' @param ncores Number of cores used. Relevant if \code{multicore == TRUE}.
#' @param cluster An object of class \code{c("SOCKcluster", "cluster")}. This allowes the user to pass her own cluster,
#' which will be used if \code{multicore == TRUE}. The user has to remember to stop the cluster.
#' @param paropts a list of additional options passed into the foreach function when parallel computation is enabled.
#' This is important if (for example) your code relies on external data or packages:
#' use the .export and .packages arguments to supply them so that all cluster nodes
#' have the correct environment set up for computing.
#' @param control A list of control parameters for \code{tuneLearn} with entries: \itemize{
#' \item{\code{loss} = loss function use to tune log(sigma). If \code{loss=="cal"} is chosen, then log(sigma) is chosen so that
#' credible intervals for the fitted curve are calibrated. See Fasiolo et al. (2017) for details.
#' If \code{loss=="pin"} then log(sigma) approximately minimizes the pinball loss on the out-of-sample
#' data.}
#' \item{\code{sam} = sampling scheme use: \code{sam=="boot"} corresponds to bootstrapping and \code{sam=="kfold"} to k-fold
#' cross-validation. The second option can be used only if \code{ctrl$loss=="pin"}.}
#' \item{\code{K} = if \code{sam=="boot"} this is the number of boostrap datasets, while if \code{sam=="kfold"} this is the
#' number of folds. By default \code{K=50}.}
#' \item{\code{b} = offset parameter used by the mgcv::gauslss. By default \code{b=0}.}
#' \item{\code{vtype} = type of variance estimator used to standardize the deviation from the main fit in the calibration.
#' If set to \code{"m"} the variance estimate obtained by the full data fit is used, if set to \code{"b"}
#' than the variance estimated produced by the bootstrap fits are used. By default \code{vtype="m"}.}
#' \item{\code{epsB} = positive tolerance used to assess convergence when fitting the regression coefficients on bootstrap data.
#' In particular, if \code{|dev-dev_old|/(|dev|+0.1)<epsB} then convergence is achieved.
#' Default is \code{epsB=1e-5}.}
#' \item{\code{verbose} = if TRUE some more details are given. By default \code{verbose=FALSE}.}
#' \item{\code{link} = link function to be used. See \code{?elf} and \code{?elflss} for defaults.}
#' \item{\code{progress} = argument passed to plyr::llply. By default \code{progress="text"} so that progress
#' is reported. Set it to \code{"none"} to avoid it.}
#' }
#' @param argGam A list of parameters to be passed to \code{mgcv::gam}. This list can potentially include all the arguments listed
#' in \code{?gam}, with the exception of \code{formula}, \code{family} and \code{data}.
#' @return A list with entries: \itemize{
#' \item{\code{lsig} = the value of log(sigma) resulting in the lowest loss.}
#' \item{\code{loss} = vector containing the value of the calibration loss function corresponding
#' to each value of log(sigma).}
#' \item{\code{edf} = a matrix where the first colums contain the log(sigma) sequence, and the remaining
#' columns contain the corresponding effective degrees of freedom of each smooth.}
#' \item{\code{convProb} = a logical vector indicating, for each value of log(sigma), whether the outer
#' optimization which estimates the smoothing parameters has encountered convergence issues.
#' \code{FALSE} means no problem.}
#' }
#' @author Matteo Fasiolo <matteo.fasiolo@@gmail.com>.
#' @references Fasiolo, M., Wood, S.N., Zaffran, M., Nedellec, R. and Goude, Y., 2020.
#' Fast calibrated additive quantile regression.
#' Journal of the American Statistical Association (to appear).
#' \url{https://www.tandfonline.com/doi/full/10.1080/01621459.2020.1725521}.
#' @examples
#' library(qgam); library(MASS)
#'
#' # Calibrate learning rate on a grid
#' set.seed(41444)
#' sigSeq <- seq(1.5, 5, length.out = 10)
#' closs <- tuneLearn(form = accel~s(times,k=20,bs="ad"),
#' data = mcycle,
#' lsig = sigSeq,
#' qu = 0.5)
#'
#' plot(sigSeq, closs$loss, type = "b", ylab = "Calibration Loss", xlab = "log(sigma)")
#'
#' # Pick best log-sigma
#' best <- sigSeq[ which.min(closs$loss) ]
#' abline(v = best, lty = 2)
#'
#' # Fit using the best sigma
#' fit <- qgam(accel~s(times,k=20,bs="ad"), data = mcycle, qu = 0.5, lsig = best)
#' summary(fit)
#'
#' pred <- predict(fit, se=TRUE)
#' plot(mcycle$times, mcycle$accel, xlab = "Times", ylab = "Acceleration",
#' ylim = c(-150, 80))
#' lines(mcycle$times, pred$fit, lwd = 1)
#' lines(mcycle$times, pred$fit + 2*pred$se.fit, lwd = 1, col = 2)
#' lines(mcycle$times, pred$fit - 2*pred$se.fit, lwd = 1, col = 2)
#'
tuneLearn <- function(form, data, lsig, qu, err = NULL,
multicore = !is.null(cluster), cluster = NULL, ncores = detectCores() - 1, paropts = list(),
control = list(), argGam = NULL)
{
if( length(qu) > 1 ) stop("length(qu) > 1, but this method works only for scalar qu")
# Removing all NAs, unused variables and factor levels from data
data <- .cleanData(.dat = data, .form = form, .drop = argGam$drop.unused.levels)
lsig <- sort( lsig )
# Setting up control parameter
ctrl <- list( "loss" = "calFast", "sam" = "boot", "K" = 50, "b" = 0, "vtype" = "m", "epsB" = 1e-5, "verbose" = FALSE,
"link" = "identity",
"progress" = ifelse(multicore, "none", "text") )
# Checking if the control list contains unknown names. Entries in "control" substitute those in "ctrl"
ctrl <- .ctrlSetup(innerCtrl = ctrl, outerCtrl = control)
if( ctrl$progress == FALSE ) { ctrl$progress <- "none" }
if( !(ctrl$vtype%in%c("m", "b")) ) stop("control$vtype should be either \"m\" or \"b\" ")
if( !(ctrl$loss%in%c("calFast", "cal", "pin")) ) stop("control$loss should be either \"cal\", \"pin\" or \"calFast\" ")
if( !(ctrl$sam%in%c("boot", "kfold")) ) stop("control$sam should be either \"boot\" or \"kfold\" ")
if( (ctrl$loss=="cal") && (ctrl$sam=="kfold") ) stop("You can't use control$sam == \"kfold\" when ctrl$loss==\"cal\" ")
if( length(argGam$sp) && ctrl$loss != c("calFast") ){ stop("Cannot fix smoothing parameters unless control$loss == \"calFast\".") }
n <- nrow(data)
nt <- length(lsig)
# Gaussian fit, used for initializations
# NB Initializing smoothing parameters using gausFit is a very BAD idea
if( is.formula(form) ) {
gausFit <- do.call("gam", c(list("formula" = form, "data" = quote(data),
"family" = gaussian(link=ctrl[["link"]])), argGam))
varHat <- gausFit$sig2
initM <- list("start" = coef(gausFit) + c(quantile(gausFit$residuals, qu), rep(0, length(coef(gausFit))-1)),
"in.out" = NULL) # let gam() initialize sp via initial.spg()
formL <- form
} else {
gausFit <- do.call("gam", c(list("formula" = form, "data" = quote(data),
"family" = gaulss(link=list(ctrl[["link"]], "logb"), b=ctrl[["b"]])), argGam))
varHat <- 1/gausFit$fit[ , 2]^2
initM <- list("start" = NULL, "in.out" = NULL) # Have no cluse
formL <- form[[1]]
}
# Get loss smoothness
if( is.null(err) ){ err <- .getErrParam(qu = qu, gFit = gausFit) }
# For each value of 'lsig' fit on full data
main <- .tuneLearnFullFits(lsig = lsig, form = formL, fam = "elf", qu = qu, err = err,
ctrl = ctrl, data = data, argGam = argGam, gausFit = gausFit,
varHat = varHat, initM = initM)
# Get score for each value of 'lsig'
outLoss <- if( ctrl$loss == "calFast" ){ # Fast calibration (loss already calculated) OR ...
sapply(main[["store"]], "[[", "loss")
} else { # ... bootstrapping or cross-validation
.tuneLearnBootstrapping(lsig = lsig, form = formL, fam = "elf", qu = qu, ctrl = ctrl,
data = data, store = main[["store"]], pMat = main[["pMat"]],
argGam = argGam, multicore = multicore, cluster = cluster,
ncores = ncores, paropts = paropts)
}
names( outLoss ) <- lsig
# convProb indicates whether there have been convergence problems during smoothing parameter estimation
convProb <- sapply(main[["store"]], "[[", "convProb")
names(convProb) <- lsig
out <- list("lsig" = lsig[which.min(outLoss)], "loss" = outLoss,
"edf" = main[["edfStore"]], "convProb" = convProb)
attr(out, "class") <- "learn"
return( out )
}
|
47af63e9a6909bae62efc4d40d0491bd7b3eb32c
|
08c20460437f3922026b18836fc4f9e940086ba4
|
/R/theme_report.R
|
106210e58fb8b080a8cf6ebc393367904a375ace
|
[] |
no_license
|
purplezippo/ggpkt
|
5f0d96196ed14c5a5b2b30766d507c12171ff762
|
c13fc9d31c1c665fdadacb3f72952c63708269c3
|
refs/heads/master
| 2020-04-06T16:52:35.254481
| 2019-04-02T14:23:12
| 2019-04-02T14:23:12
| 157,637,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,365
|
r
|
theme_report.R
|
#' Theme for reports
#'
#' This theme is used for create figures in data analysis reports.
#'
#' @param font.size.legend.title Font size of legend title.
#' @param font.size.legend.text Font size of legend text.
#' @param font.size.axis.x Font size of axis x.
#' @param font.size.axis.y Font size of axis y.
#' @family themes
#' @example inst/examples/ex-theme_report.R
#' @export
theme_rp_light <- function(
font.size.legend.title = 12, font.size.legend.text = 10,
font.size.axis.x = 12, font.size.axis.y = 12){
theme_light() + theme(
# 字体
title = element_text(size = 20, colour = 'black'),
legend.text = element_text(size = font.size.legend.text),
legend.title = element_text(size = font.size.legend.title),
axis.text.x = element_text(size = font.size.axis.x, face = "bold"),
axis.text.y = element_text(size = font.size.axis.y),
axis.title.x = element_text(size = 12),
axis.title.y = element_text(size = 12),
# axis.line.x = element_line(size = 1),
# 网格线
panel.grid.major.x = element_line(size = .2),
panel.grid.minor.x = element_line(size = .2, linetype = 'dashed'),
panel.grid.major.y = element_line(size = .2),
panel.grid.minor.y = element_line(size = .2, linetype = 'dashed'),
panel.background = element_rect(fill = 'white'),
# 边框
panel.border = element_blank()
)
}
|
694d81200249722b9e2638d71c867bed317ca437
|
ba34b341270e79fdc6e3cffe50a6bc25c0506de3
|
/R/checks.R
|
0b799446ad4ad06038ed85f1def007673a468532
|
[] |
no_license
|
ekstroem/DKcpr
|
b804fd81c3fc6a22653ef3de869547f575f5c9ee
|
0ee38fc6393c66768ac4ec77f7cd962fe0844162
|
refs/heads/master
| 2020-04-08T21:42:56.798282
| 2019-03-27T00:31:27
| 2019-03-27T00:31:27
| 159,755,540
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,445
|
r
|
checks.R
|
#' Check the modulo 11 rules for CPR numbers
#'
#' @param cpr A vector of CPR number strings
#' @return A logical vector with values TRUE (CPR number ok), FALSE (CPR number not ok), or NA (cannot check)
#' @export
check_mod11 <- function(cpr) {
cpr <- clean_cprstring(cpr)
anyna <- is.na(date_of_birth(cpr))
cprmatrix <- stringr::str_split_fixed(cpr, "", 10)
class(cprmatrix) <- "integer"
res <- as.vector((cprmatrix %*% c(4, 3, 2, 7, 6, 5, 4, 3, 2, 1) %% 11) == 0)
res[anyna] <- NA
return(res)
}
#' Check whether the CPR number is valid
#'
#' @param cpr A vector of CPR number strings
#' @return A logical vector with values TRUE (CPR number valid), FALSE (CPR number not valid), or NA (Not a proper date)
#' @export
is_cpr <- function(cpr) {
# Check that date is a proper date
dob <- date_of_birth(cpr)
# Before October 1st 2007 check the mod 11 rule
ifelse(dob<=as.Date("2007-09-30"), check_mod11(cpr), TRUE)
}
#' Check the modulo 11 rules for CPR numbers
#'
#' @param cpr A vector of potential CPR number strings
#' @return A vector of cleaned CPR number strings
#' @examples
#' cpr <- c("1010101234", "11111111", "101010-1234", "Cprnr : 101010-1234")
#' clean_cprstring(cpr)
#' @export
clean_cprstring <- function(cpr) {
# Remove all non-numeric numbers
cpr <- gsub("[^0-9]", "", cpr)
# Check that it has exactly a length of 10 characters
cpr[nchar(cpr) != 10] <- NA
cpr
}
|
f68af76b19102a146280e146da9296ac76fa5dd0
|
6ec7d30e63bab3594c8f80f5cbc0ac763d74b4f6
|
/RPackage/MeshDataSimplification/demo/example.R
|
91256a8b94dd0f5c9eebb750127cb93546f16600
|
[] |
no_license
|
Abdelilah-Majid/meshsimplification
|
1a85ccec3a333c002de83fe52e65d05fea6ee434
|
99cf4da098c17f0f684bc13349371629fbae9ad8
|
refs/heads/master
| 2021-12-14T15:07:43.374196
| 2017-05-03T16:51:54
| 2017-05-03T16:51:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,268
|
r
|
example.R
|
#' Driver running the simplification process within R.
#'
#' @param dataset name of dataset storing the mesh as an object of class SURFACE_MESH:
#' - pawn: pawn geometry, 2522 nodes;
#' - bunny: Stanford Bunny geometry, 34834 nodes;
#' - brain: cortical surface, 40962 nodes.
#' @param n1,n2 two different levels of simplification
#' @param wgeom weight for the geometric cost function
#' @param wdisp weight for the displacement cost function
#' @param wequi weight for the equidistribution cost function
dataset <- "pawn"
n1 <- 2000
n2 <- 1000
wgeom <- 1/3
wdisp <- 1/3
wequi <- 1/3
#
# Initialize simplification framework and plot initial mesh
#
library(methods)
library(utils)
require(MeshDataSimplification)
if (dataset == "pawn")
data(pawn)
if (dataset == "bunny")
data(bunny)
if (dataset == "brain")
data(brain)
x <- setup.simplification(mesh)
plot.surface.mesh(x, main = sprintf("Original mesh, %i nodes", mesh$nnodes))
#
# Simplified the mesh, then plot it again
#
y1 <- run.simplification(x, n1)
plot.surface.mesh(x, main = sprintf("Simplified mesh, %i nodes", n1))
#
# Resume the simplification, then plot the final mesh
#
y2 <- run.simplification(x, n2)
plot.surface.mesh(x, main = sprintf("Simplified mesh, %i nodes", n2))
|
5409cb12a4487d70bc56c21918c28c11cad1b35c
|
832f70afec26f748ce8b3f5ba337d235227dd40d
|
/R/srrr_fit.R
|
25d67392a0ef94a23ff70d32912022a4b19bef18
|
[] |
no_license
|
zsaijun/StatComp18032
|
8238e56fa3a467b0d48a1af74e4a15f9c8a44e56
|
41599a40a62c64fb3067187d8191d2276b73453f
|
refs/heads/master
| 2020-04-17T06:49:44.302974
| 2019-01-18T04:13:30
| 2019-01-18T04:13:30
| 165,175,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,474
|
r
|
srrr_fit.R
|
#' @title Fitting Sparse Reduced-Rank Regression
#'
#' @description Given a response matrix and a covariate matrix, fits sparse reduced-Rank
#' regression for a specified rank.
#' @import remMap
#' @param Y a matrix of response (n by q)
#' @param X a matrix of covariate (n by p)
#' @param lam penalty parameter for B
#' @param nrank an integer specifying the desired rank
#' @param epsilon convergence tolerance
#' @param maxit maximum number of iterations
#' @return a list of model parameters
#' @examples
#' \dontrun{
#' data<-Simulation1(Q=1e3+10,n=50,p=20,q=10,rho_x=0.0,rho_e=0.0,s2n=1,p0=10,q0=5,nrank=3)
#' Result<-srrr.fit(Y=data$Y,X=data$X,lam=1,nrank=3,epsilon=1e-3,maxit=1e4)
#' }
#' @export
srrr.fit <- function(Y,X,lam,nrank,epsilon,maxit)
{
n <- nrow(Y)
p <- ncol(X)
q <- ncol(Y);r<-nrank;lam<-lam*sqrt(log(p)/n)
B<-matrix(0.1,p,r);A<-matrix(0.1,q,r)
VALUE<-rep(0,maxit)
for (i in 1:maxit) { ## while(diff[iter] > conv & iter < maxit){
## 1. B step
B0<-B
B<-remMap(X, Y%*%A,lamL1=0, lamL2=lam, C.m=NULL)$phi
## 2. A step
A0<-A
Z1<-t(Y)%*%X%*%B
Z1svd<-svd(Z1)
A<-Z1svd$u%*%t(Z1svd$v)
a<-0
for(j in 1:(p)){a<-a+norm(B[j,],type="2")}
VALUE[i]<-norm(Y-X%*%B%*%t(A),type="F")^2+lam*a
norm(Y-X%*%B%*%t(A),type="F")^2+lam*a
# del <- sqrt(sum((B0-B)^2) + sum((B_0-B_)^2)+sum((A0-A)^2)+sum((A_0-A_)^2))
if(i>1) {if (abs(VALUE[i]-VALUE[i-1]) < epsilon) break
}
}
list(B=B,A=A,step=i)
}
|
4cbded349901a949b5b64e0faf7185742f697076
|
ab9315145932e1d0edcb1b52df1cc3ed078a317c
|
/man/format_raw_results.Rd
|
df975c7ee4c85b80ab17bb21dc4caa2a3f59787c
|
[] |
no_license
|
johnchower/flashreport
|
cfcbefc1afe5d77da6857ae21323d48ed3a6db96
|
c23dca38331dc9632c251983a2db59cf95dae664
|
refs/heads/master
| 2020-07-28T12:31:42.252620
| 2017-05-03T16:39:10
| 2017-05-03T16:39:10
| 73,411,648
| 0
| 0
| null | 2017-05-03T20:53:11
| 2016-11-10T18:54:18
|
R
|
UTF-8
|
R
| false
| true
| 1,302
|
rd
|
format_raw_results.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query_functions.r
\docType{methods}
\name{format_raw_results}
\alias{format_raw_results}
\alias{format_raw_results,auQuery-method}
\alias{format_raw_results,notificationsQuery-method}
\alias{format_raw_results,paQuery-method}
\title{A generic function to format the raw_results slot of a FlashReportQuery
object into a standardized format that can be rbinded with all other
results.}
\usage{
format_raw_results(frq)
\S4method{format_raw_results}{auQuery}(frq)
\S4method{format_raw_results}{paQuery}(frq)
\S4method{format_raw_results}{notificationsQuery}(frq)
}
\arguments{
\item{frq}{An object that inherits from FlashReportQuery.}
}
\value{
A FlashReportQuery object of the same subtype that was input. The
final_results slot contains a data.frame with columns (user_group,
date_range, variable, value).
}
\description{
A generic function to format the raw_results slot of a FlashReportQuery
object into a standardized format that can be rbinded with all other
results.
}
\section{Methods (by class)}{
\itemize{
\item \code{auQuery}: Format results of an Active Users query.
\item \code{paQuery}: Format results of a platform action query.
\item \code{notificationsQuery}: Format results of a notifications query.
}}
|
c2e8567ae9907ab5b3bbd2f3d9208226e48d4fa1
|
2f94e0f8911055053c30f9ab88e2f66691ca719d
|
/R/detect_duplicate_genomes.R
|
1488ea29d218ceaca45d0f67a0a7a8ae3b908886
|
[] |
no_license
|
anne-laureferchaud/stackr-1
|
4e4dd971dd83a8b238a858d6e1026ab2c6e271ed
|
55837d3005d4e4f13ce5706284e8137c98e8501a
|
refs/heads/master
| 2020-06-17T10:01:15.282997
| 2016-11-28T16:49:44
| 2016-11-28T16:49:44
| 75,013,926
| 0
| 0
| null | 2016-11-28T21:02:47
| 2016-11-28T21:02:46
| null |
UTF-8
|
R
| false
| false
| 19,572
|
r
|
detect_duplicate_genomes.R
|
# Detect duplicate genomes
#' @name detect_duplicate_genomes
#' @title Compute pairwise genome similarity or distance between individuals
#' to highligh potential duplicate individuals
#' @description The function can compute two methods
#' to highligh potential duplicate individuals (1. distance between individuals
#' or 2. pairwise genome similarity).
#' @param data A tidy data frame object in the global environment or
#' a tidy data frame in wide or long format in the working directory.
#' \emph{How to get a tidy data frame ?}
#' Look into \pkg{stackr} \code{\link{tidy_genomic_data}}.
#' @param distance.method (character) The distance measure used inside \code{stats::dist}
#' (<= 30000 markers) or \code{amap::Dist} (> 30000 markers).
#' This must be one of "euclidean", "maximum", "manhattan", "canberra", "binary".
#' Using \code{distance.method = NULL} will not run this method.
#' Default: \code{distance.method = "manhattan"}. This is very fast (max 5 min)
#' compared to the genome similarity method.
#' @param genome (logical) Computes pairwise genome similarity in parallel.
#' The proportion of the shared genotypes is averaged across shared markers between
#' each pairwise comparison. This method makes filtering easier because the
#' threshold is more intuitive with the plots produced, but it's much longer
#' to run, even in parallel, so better to run overnight.
#' Default: \code{genome = FALSE}.
#' @param parallel.core (optional) The number of core for parallel computation.
#' Default: \code{parallel.core = detectCores()-1}.
#' @return A list with potentially 8 objects:
#' \code{$distance }: results of the distance method
#' \code{$distance.stats}: Summary statistics of the distance method
#' \code{$pairwise.genome.similarity}: results of the genome method
#' \code{$genome.stats}: Summary statistics of the genome method
#' \code{$violin.plot.distance}: violin plot showing the distribution of pairwise distances
#' \code{$jitter.plot.distance}: same info different visual with jitter plot
#' \code{$violin.plot.genome}: violin plot showing the distribution of pairwise genome similarities
#' \code{$jitter.plot.genome}: same info different visual with jitter plot
#'
#' Saved in the working directory:
#' individuals.pairwise.dist.tsv, individuals.pairwise.distance.stats.tsv,
#' individuals.pairwise.genome.similarity.tsv, individuals.pairwise.genome.stats.tsv
#' @export
#' @rdname detect_duplicate_genomes
#' @importFrom stringi stri_paste stri_replace_all_fixed
#' @importFrom dplyr rename select group_by filter mutate rename_ filter_ bind_cols bind_rows summarise
#' @importFrom utils combn
#' @importFrom stats na.omit
#' @importFrom amap Dist
#' @importFrom data.table fread
#' @importFrom reshape2 melt
#' @importFrom lazyeval interp
#' @importFrom readr write_tsv
#' @importFrom parallel mclapply
#' @importFrom purrr flatten map
#' @examples
#' \dontrun{
#' # to get pairwise distance only, the simplest way to run:
#' dup <- detect_duplicate_genomes(data = "wombat_tidy.tsv")
#' # This will use by defaul \code{distance.method = "manhattan"},
#' \code{genome = FALSE}, and all my CPU -1 as default for \code{parallel.core}
#'
#' # To view the jitter plot:
#' dup$jitter.plot.distance
#'
#' # to view the data stats
#' dup.data.stats <- dup$distance.stats
#'
#' # to view the data
#' dup.data <- dup$distance
#'
#' # Based on the look of the distribution using both jitter and boxplot,
#' I can filter the dataset to highlight potential duplicates:
#' dup.filtered <- filter(.data = dup.data, DISTANCE < 3000000)
#'
#' # To run the distance (with euclidean distance instead of the default manhattan,
#' # with the genome methods:
#' dup <- detect_duplicate_genomes(
#' data = "wombat_tidy.tsv",
#' distance.method = "euclidean",
#' genome = TRUE
#' )
#' # to view the data of the genome data
#' dup.data <- dup$pairwise.genome.similarity
#'
#' # Based on the look of the distribution using both jitter and boxplot,
#' # I can filter the dataset based on 98% of identical genotype proportion,
#' # to highlight potential duplicates:
#' dup.filtered <- filter(.data = dup.data, PROP_IDENTICAL > 0.98)
#'
#' # Get the list of duplicates id
#' dup.list.names <- data.frame(INDIVIDUALS = unique(c(dup.filtered$ID1, dup.filtered$ID2)))
#' }
#' @author Thierry Gosselin \email{thierrygosselin@@icloud.com}
detect_duplicate_genomes <- function(
data,
distance.method = "manhattan",
genome = FALSE,
parallel.core = detectCores() - 1) {
cat("###############################################################################\n")
cat("######################## stackr: detect_duplicate_genomes ########################\n")
cat("###############################################################################\n")
timing <- proc.time()
# manage missing arguments ---------------------------------------------------
if (missing(data)) stop("missing data argument")
# Import data ---------------------------------------------------------------
if (is.vector(data)) {
input <- stackr::tidy_wide(data = data, import.metadata = FALSE)
} else {
input <- data
}
# check genotype column naming
colnames(input) <- stringi::stri_replace_all_fixed(
str = colnames(input),
pattern = "GENOTYPE",
replacement = "GT",
vectorize_all = FALSE
)
# necessary steps to make sure we work with unique markers and not duplicated LOCUS
if (tibble::has_name(input, "LOCUS") && !tibble::has_name(input, "MARKERS")) {
input <- dplyr::rename(.data = input, MARKERS = LOCUS)
}
# strata
strata <- input %>%
ungroup %>%
distinct(POP_ID, INDIVIDUALS)
# Functions -------------------------------------------------------------------
# distance method
distance_individuals <- function(x, distance.method, parallel.core) {
if (!requireNamespace("tibble")) warning("tibble not installed")
# Prep data
dist.computation <- suppressWarnings(
x %>%
dplyr::select(MARKERS, INDIVIDUALS, GT) %>%
dplyr::group_by(INDIVIDUALS) %>%
tidyr::spread(data = ., key = MARKERS, value = GT) %>%
ungroup %>%
tibble::remove_rownames(.) %>%
tibble::column_to_rownames(df = ., var = "INDIVIDUALS")
)
# compute distance
# gain in speed between the 2 is very small on small data set
if (n_distinct(x$MARKERS) > 30000) {
if (requireNamespace("amap")) {
dist.computation <- suppressWarnings(amap::Dist(
x = dist.computation,
method = distance.method,
nbproc = parallel.core
)
)
} else {
warning("amap not installed, using stats::dist instead")
dist.computation <- stats::dist(
x = dist.computation,
method = distance.method
)
}
} else {
dist.computation <- stats::dist(
x = dist.computation,
method = distance.method
)
}
# melt the dist matrice into a data frame
dist.computation <- stats::na.omit(
reshape2::melt(
data = as.matrix(dist.computation),
varnames = c("ID1", "ID2"),
value.name = "DISTANCE",
na.rm = TRUE)[reshape2::melt(upper.tri(as.matrix(dist.computation), diag = FALSE))$value,]
) %>%
as_data_frame() %>%
arrange(DISTANCE)
# Include population info with strata
ID1.pop <- suppressWarnings(
dist.computation %>%
dplyr::select(INDIVIDUALS = ID1) %>%
dplyr::inner_join(strata, by = "INDIVIDUALS") %>%
dplyr::select(ID1_POP = POP_ID))
ID2.pop <- suppressWarnings(
dist.computation %>%
dplyr::select(INDIVIDUALS = ID2) %>%
dplyr::inner_join(strata, by = "INDIVIDUALS") %>%
dplyr::select(ID2_POP = POP_ID))
dist.computation <- dplyr::bind_cols(dist.computation, ID1.pop, ID2.pop) %>%
dplyr::mutate(
POP_COMP = ifelse(ID1.pop == ID2.pop, "same pop", "different pop"),
POP_COMP = factor(POP_COMP, levels = c("same pop", "different pop"), ordered = TRUE),
PAIRWISE = rep("pairwise", n()),
METHOD = rep(distance.method, n())
)
return(dist.computation)
}
# pairwise genome similarity method
pairwise_genome_similarity <- function(list.pair, ...) {
# list.pair <- 2
id.select <- stringi::stri_join(purrr::flatten(id.pairwise[list.pair]))
id1 <- id.select[[1]]
id2 <- id.select[[2]]
identical.gt <- input %>%
dplyr::filter_(lazyeval::interp(~ INDIVIDUALS == as.name(id1) | INDIVIDUALS == as.name(id2))) %>%
dplyr::select(MARKERS, INDIVIDUALS, GT) %>%
tidyr::spread(data = ., INDIVIDUALS, GT) %>%
dplyr::rename_(ID1 = as.name(id1), ID2 = as.name(id2)) %>%
dplyr::mutate(
ID1 = stringi::stri_replace_all_fixed(
str = as.character(ID1),
pattern = c("/", ":", "_", "-", "."),
replacement = "",
vectorize_all = FALSE),
ID2 = stringi::stri_replace_all_fixed(
str = as.character(ID2),
pattern = c("/", ":", "_", "-", "."),
replacement = "",
vectorize_all = FALSE)
) %>%
dplyr::filter(ID1 != "000000" | ID2 != "000000") %>%
dplyr::group_by(MARKERS) %>%
dplyr::mutate(
IDENTICAL_GT = ifelse(ID1 == ID2, "IDENTICAL", "DIFFERENT")
) %>%
dplyr::group_by(IDENTICAL_GT) %>%
tally %>%
tidyr::spread(data = ., IDENTICAL_GT, n) %>%
dplyr::mutate(
TOTAL_MARKERS_GENOTYPED = IDENTICAL + DIFFERENT,
PROP_IDENTICAL = IDENTICAL/TOTAL_MARKERS_GENOTYPED,
ID1 = id1,
ID2 = id2
) %>%
dplyr::select(ID1, ID2, IDENTICAL, DIFFERENT, TOTAL_MARKERS_GENOTYPED, PROP_IDENTICAL)
return(identical.gt)
} # end duplicate pairwise
# New list to prepare for results
res <- list()
# Compute distance between individuals --------------------------------------
if (!is.null(distance.method)) {
message(stringi::stri_join("Computing ", distance.method, " distances between individuals..."))
# distance.method <- "euclidean"
# distance.method <- "manhattan"
# parallel.core <- 8
dist.computation <- distance_individuals(
x = input,
distance.method = distance.method,
parallel.core = parallel.core
)
res$distance <- dist.computation
readr::write_tsv(
x = dist.computation,
path = "individuals.pairwise.dist.tsv",
col_names = TRUE
)
# Stats
message("Generating summary statistics")
distance.stats <- dist.computation %>%
dplyr::summarise(
MEAN = mean(DISTANCE, na.rm = TRUE),
MEDIAN = stats::median(DISTANCE, na.rm = TRUE),
SE = round(sqrt(stats::var(DISTANCE, na.rm = TRUE)/length(stats::na.omit(DISTANCE))), 2),
MIN = round(min(DISTANCE, na.rm = TRUE), 2),
MAX = round(max(DISTANCE, na.rm = TRUE), 2),
QUANTILE25 = stats::quantile(DISTANCE, 0.25), # quantile25
QUANTILE75 = stats::quantile(DISTANCE, 0.75)#, # quantile75
# OUTLIERS_LOW = QUANTILE25 - (1.5 * (QUANTILE75 - QUANTILE25)), # outliers : below the outlier boxplot
# OUTLIERS_HIGH = QUANTILE75 + (1.5 * (QUANTILE75 - QUANTILE25)) # outliers : higher the outlier boxplot
)
res$distance.stats <- distance.stats
readr::write_tsv(
x = distance.stats,
path = "individuals.pairwise.distance.stats.tsv",
col_names = TRUE
)
message("Generating the plots")
# violin plot
res$violin.plot.distance <- ggplot(
data = dist.computation,
aes(x = PAIRWISE, y = DISTANCE, na.rm = TRUE)
) +
geom_violin(trim = TRUE) +
geom_boxplot(width = 0.1, fill = "black", outlier.colour = "black") +
stat_summary(fun.y = "mean", geom = "point", shape = 21, size = 2.5, fill = "white") +
labs(y = "Distance\n <- close distant ->") +
labs(x = "Pairwise comparisons") +
theme(
# legend.position = "none",
panel.grid.minor.x = element_blank(),
panel.grid.major.x = element_blank(),
# panel.grid.major.y = element_blank(),
axis.title.x = element_text(size = 10, family = "Helvetica", face = "bold"),
axis.text.x = element_blank(),
axis.title.y = element_text(size = 10, family = "Helvetica", face = "bold"),
axis.text.y = element_text(size = 8, family = "Helvetica")
)
# Jitter plot
res$jitter.plot.distance <- ggplot(data = dist.computation, aes(x = PAIRWISE, y = DISTANCE, colour = POP_COMP)) +
geom_jitter(alpha = 0.3) +
labs(y = "Distance\n <- distant close ->") +
labs(x = "Pairwise comparisons") +
labs(colour = "Population comparisons") +
scale_colour_manual(values = c("#0571b0", "black")) +
scale_y_reverse() +
theme_light() +
theme(
# legend.position = "none",
panel.grid.minor.x = element_blank(),
panel.grid.major.x = element_blank(),
# panel.grid.major.y = element_blank(),
axis.title.x = element_text(size = 10, family = "Helvetica", face = "bold"),
axis.text.x = element_blank(),
axis.title.y = element_text(size = 10, family = "Helvetica", face = "bold"),
axis.text.y = element_text(size = 8, family = "Helvetica")
)
} # end distance method
# Compute pairwise search for duplicate --------------------------------------
if (genome) {
# list of id
id.list <- unique(input$INDIVIDUALS) # id list
# all combination of individual pair
id.pairwise <- utils::combn(unique(id.list), 2, simplify = FALSE)
list.pair <- 1:length(id.pairwise)
# list.pair <- 5
# parallel.core<-8
message("Starting scan for duplicate genomes, take a break...")
pairwise.genome.similarity <- parallel::mclapply(
X = list.pair,
FUN = pairwise_genome_similarity,
mc.preschedule = FALSE,
mc.silent = FALSE,
mc.cleanup = TRUE,
mc.cores = parallel.core
)
pairwise.genome.similarity <- dplyr::bind_rows(pairwise.genome.similarity)
# Include population info with strata
ID1.pop <- suppressWarnings(
pairwise.genome.similarity %>%
dplyr::select(INDIVIDUALS = ID1) %>%
dplyr::inner_join(strata, by = "INDIVIDUALS") %>%
dplyr::select(ID1_POP = POP_ID))
ID2.pop <- suppressWarnings(
pairwise.genome.similarity %>%
dplyr::select(INDIVIDUALS = ID2) %>%
dplyr::inner_join(strata, by = "INDIVIDUALS") %>%
dplyr::select(ID2_POP = POP_ID))
pairwise.genome.similarity <- dplyr::bind_cols(
pairwise.genome.similarity, ID1.pop, ID2.pop
) %>%
dplyr::mutate(
POP_COMP = ifelse(ID1.pop == ID2.pop, "same pop", "different pop"),
POP_COMP = factor(POP_COMP, levels = c("same pop", "different pop"), ordered = TRUE),
PAIRWISE = rep("pairwise comparison", n()),
METHOD = rep("genome similarity", n())
)
res$pairwise.genome.similarity <- pairwise.genome.similarity
readr::write_tsv(
x = pairwise.genome.similarity,
path = "individuals.pairwise.genome.similarity.tsv",
col_names = TRUE
)
# Stats
message("Generating summary statistics")
genome.stats <- pairwise.genome.similarity %>%
dplyr::summarise(
MEAN = mean(PROP_IDENTICAL, na.rm = TRUE),
MEDIAN = stats::median(PROP_IDENTICAL, na.rm = TRUE),
SE = round(sqrt(stats::var(PROP_IDENTICAL, na.rm = TRUE)/length(na.omit(PROP_IDENTICAL))), 2),
MIN = round(min(PROP_IDENTICAL, na.rm = TRUE), 2),
MAX = round(max(PROP_IDENTICAL, na.rm = TRUE), 2),
QUANTILE25 = stats::quantile(PROP_IDENTICAL, 0.25), # quantile25
QUANTILE75 = stats::quantile(PROP_IDENTICAL, 0.75)#, # quantile75
# OUTLIERS_LOW = QUANTILE25 - (1.5 * (QUANTILE75 - QUANTILE25)), # outliers : below the outlier boxplot
# OUTLIERS_HIGH = QUANTILE75 + (1.5 * (QUANTILE75 - QUANTILE25)) # outliers : higher the outlier boxplot
)
res$genome.stats <- genome.stats
readr::write_tsv(
x = genome.stats,
path = "individuals.pairwise.genome.stats.tsv",
col_names = TRUE
)
# Visualization ------------------------------------------------------------
message("Generating the plots")
# violin plot
res$violin.plot.genome <- ggplot(
data = pairwise.genome.similarity,
aes(x = PAIRWISE, y = PROP_IDENTICAL, na.rm = TRUE)
) +
geom_violin(trim = TRUE) +
geom_boxplot(width = 0.1, fill = "black", outlier.colour = "black") +
stat_summary(fun.y = "mean", geom = "point", shape = 21, size = 2.5, fill = "white") +
labs(y = "Genome similarity (proportion)") +
labs(x = "Pairwise comparison") +
theme(
# legend.position = "none",
panel.grid.minor.x = element_blank(),
panel.grid.major.x = element_blank(),
# panel.grid.major.y = element_blank(),
axis.title.x = element_text(size = 10, family = "Helvetica", face = "bold"),
axis.text.x = element_blank(),
axis.title.y = element_text(size = 10, family = "Helvetica", face = "bold"),
axis.text.y = element_text(size = 8, family = "Helvetica")
)
# jitter plot
res$jitter.plot.genome <- ggplot(
data = pairwise.genome.similarity,
aes(x = PAIRWISE, y = PROP_IDENTICAL, colour = POP_COMP)
) +
geom_jitter(alpha = 0.3) +
labs(y = "Genome similarity (proportion)") +
labs(x = "Pairwise comparisons") +
labs(colour = "Population comparisons") +
scale_colour_manual(values = c("#0571b0", "black")) +
theme_light() +
theme(
# legend.position = "none",
panel.grid.minor.x = element_blank(),
panel.grid.major.x = element_blank(),
# panel.grid.major.y = element_blank(),
axis.title.x = element_text(size = 10, family = "Helvetica", face = "bold"),
axis.text.x = element_blank(),
axis.title.y = element_text(size = 10, family = "Helvetica", face = "bold"),
axis.text.y = element_text(size = 8, family = "Helvetica")
)
} # end genome method
# RESULTS --------------------------------------------------------------------
if (genome) message("A table: pairwise.genome.similarity.tsv was written in the working directory")
"individuals.pairwise.dist.tsv"
cat("################################### RESULTS ###################################\n")
message("Object in the list (if all arguments are selected):\n
$distance # results of the distance method
$distance.stats # Summary statistics of the distance method
$pairwise.genome.similarity # results of the genome method
$genome.stats # Summary statistics of the genome method\n
Visualization:
$violin.plot.distance
$jitter.plot.distance
$violin.plot.genome
$jitter.plot.genome\n
Saved in the working directory:
individuals.pairwise.dist.tsv
individuals.pairwise.distance.stats.tsv
individuals.pairwise.genome.similarity.tsv
individuals.pairwise.genome.stats.tsv
")
message(stringi::stri_join("Working directory: ", getwd()))
message(stringi::stri_join("Computation time: ", round((proc.time() - timing)[[3]]), " sec"))
cat("############################## completed ##############################\n")
return(res)
} # end function detect_duplicate_genomes
|
f9a843d337db0239f14adc2056800e26ee6064d3
|
2854264f4db84d49e75a4b214f6121b664a9971c
|
/dataset_creation/R/Old/tests.R
|
953d66f04940a527c246bd3c60daa1304fd9a57f
|
[] |
no_license
|
kajdreef/IN4334-MSR
|
6e256218072b484293da1f15fd49826c993d7570
|
deb8e80fa8ad0f5f0a1178f476ca06c53f070858
|
refs/heads/master
| 2021-01-10T09:37:24.734735
| 2016-01-27T21:29:51
| 2016-01-27T21:29:51
| 46,335,107
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,760
|
r
|
tests.R
|
# Clear history
(rm(list=ls()))
# Import libraries
library(dplyr)
library(ggplot2)
#CHANGE THESE TWO PARAMETERS
project = "lucene"
threshold = 0.05 #To distinguish minor and major
corr_cutoff = 0.75
sample_size = 2000 #Check the output of table(metrics$implicated) before setting
#SAMPLE SIZE MUST ALSO BE MULTIPLE OF K
k = 10 ### K-folds
set.seed(65)
#____________________________________________
input = paste("../", project, "/dataset/", project, "_metrics","_",toString(threshold),".csv", sep="")
metrics <-read.csv(file=input ,head=TRUE,sep=",")
table(metrics$implicated)
#SAMPLING
# create the dataset with even number of buggy/non-buggy rows
bugs <- metrics %>%
filter(implicated == 1)
non_bugs <- metrics %>%
filter(implicated == 0)
sample_data_set1 <- bugs[sample(nrow(bugs), sample_size, replace=TRUE, prob=NULL), ]
sample_data_set2 <- non_bugs[sample(nrow(non_bugs), sample_size, replace=TRUE, prob=NULL), ]
sample_data <- cbind(rbind(sample_data_set1, sample_data_set2), list("id"=1:k))
#######################################################################################
####################### REMOVING HIGHLY CORRELATED FEATURES ###########################
#######################################################################################
library(caret)
add2 <- function(x) {
x + 2
}
cors = cor(sample_data[,c(-1,-2,-ncol(sample_data))])
hc = findCorrelation(cors, cutoff=corr_cutoff)
hc = sort(hc)
hc = sapply(hc, add2, simplify = "array") #To consider sha and file
non_redundant_sample_data = sample_data[,-c(hc)]
# load the library
#library(mlbench)
# prepare training scheme
#control <- trainControl(method="repeatedcv", number=10, repeats=3)
# train the model
#model <- train(implicated~., data=non_redundant_sample_data, method="lvq", preProcess="scale", trControl=control)
# estimate variable importance
#importance <- varImp(model, scale=FALSE)
# summarize importance
#print(importance)
#plot(importance)
#data = non_redundant_sample_data[,c(-1,-2,-ncol(non_redundant_sample_data))]
data = sample_data[,c(-1,-2,-ncol(non_redundant_sample_data))]
data <- data %>%
transform(implicated = as.factor(implicated))
# ensure the results are repeatable
set.seed(7)
# load the library
library(mlbench)
library(randomForest)
# load the data
# define the control using a random forest selection function
control <- rfeControl(functions=rfFuncs, method="cv", number=10)
# run the RFE algorithm
results <- rfe(data[,1:(ncol(data) - 1)],
data[,ncol(data)],
c(1:(ncol(data) - 1)),
rfeControl=control)
# summarize the results
print(results)
# list the chosen features
predictors(results)
# plot the results
plot(results, type=c("g", "o"))
|
2a278351e253f0dfe4ec2107dd61f3507620947d
|
97f1e3e6e908a83489e4243268ba539316196176
|
/R/antsImageHeaderInfo.R
|
3e91562022f702d3a63e1daf1e19e23f5fe7889b
|
[
"Apache-2.0"
] |
permissive
|
ANTsX/ANTsRCore
|
1c3d1da3bea84859da7d18f54c34ae13d2af8619
|
8e234fd1363c0d618f9dc21c9566f3d5464655a2
|
refs/heads/master
| 2023-05-24T23:53:30.886217
| 2023-05-22T02:52:39
| 2023-05-22T02:52:39
| 83,897,912
| 8
| 22
| null | 2023-05-22T02:52:40
| 2017-03-04T14:09:48
|
C++
|
UTF-8
|
R
| false
| false
| 1,341
|
r
|
antsImageHeaderInfo.R
|
#' Read file info from image header
#'
#' Read file info from image header
#'
#'
#' @param filename name of image file to scan for info
#' @author Duda JT
#' @examples
#' antsImageHeaderInfo( getANTsRData("r16") )
#'
#' @return outputs a list containing:
#' \itemize{
#' \item{pixelclass: }{Type of pixel (scalar, vector, etc).}
#' \item{pixeltype: }{Type of pixel values (int, float, etc).}
#' \item{nDimensions: }{Number of image dimensions.}
#' \item{nComponents: }{Number of pixel dimensions.}
#' \item{dimensions: }{Size of image dimensions.}
#' \item{spacing: }{Pixel resolution.}
#' \item{origin: }{Spatial origin of image}
#' \item{pixelclass: }{Spatial directions of image axes.}
#' }
#' @export antsImageHeaderInfo
#' @examples
#' img <- antsImageRead(getANTsRData("r16"))
#' antsImageHeaderInfo(img)
#' antsImageHeaderInfo(getANTsRData("r16"))
#' testthat::expect_error(antsImageHeaderInfo(""))
antsImageHeaderInfo <- function( filename )
{
if (is.antsImage(filename)) {
tfile = tempfile(fileext = ".nii.gz")
antsImageWrite(filename, tfile)
on.exit(unlink(tfile))
filename = tfile
} else {
filename = path.expand(filename)
}
if ( !file.exists(filename) )
{
stop("File does not exist")
}
ret = .Call("antsImageHeaderInfo", filename, PACKAGE = "ANTsRCore")
return(ret)
}
|
832630a14e0caecf8b33dc199f5696734c5fc007
|
be0dcd3a7ff4df3a352a15a26cf5611bcc25eae5
|
/man/REE_plus_Y_Elements.Rd
|
c944167d49529c8e173e32b499f7b52ec85b9797
|
[
"MIT"
] |
permissive
|
cicarrascog/imputeREE
|
90b0ef70126c8dfc70705d075b0a188ed2e18e98
|
2f6c0594482391ae730220ad2c2d1b8d2df92c14
|
refs/heads/master
| 2023-07-07T09:16:39.443365
| 2023-06-25T23:30:25
| 2023-06-25T23:30:25
| 409,395,192
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 370
|
rd
|
REE_plus_Y_Elements.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/REE_plus_Y_Elements.R
\docType{data}
\name{REE_plus_Y_Elements}
\alias{REE_plus_Y_Elements}
\title{Rare earth element list}
\format{
Rare earth element + Y list
}
\usage{
REE_plus_Y_Elements
}
\description{
A string vector containing the elemental symbols for REE and Y.
}
\keyword{datasets}
|
1ac6824896f24cba9afaa98e237e27afbd2d922d
|
b26b28874f761ea2b085db8d60c994af57d99cd5
|
/man/two.ways.stepfor.Rd
|
283bcb1e934d617209df8cb09bfced2446e8ba73
|
[] |
no_license
|
mjnueda/maSigPro
|
de1f90885fafc23b38585bb75aec40aeee9ed89f
|
000885f3103c02880fb9bd1e69f0c679e08ae580
|
refs/heads/master
| 2021-01-22T12:37:54.848463
| 2020-12-08T17:53:08
| 2020-12-08T17:53:08
| 102,353,792
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,368
|
rd
|
two.ways.stepfor.Rd
|
\name{two.ways.stepfor}
\alias{two.ways.stepfor}
\title{Fitting a linear model by forward-stepwise regression}
\description{
\code{two.ways.stepfor} fits a linear regression model applying forward-stepwise strategy.
}
\usage{
two.ways.stepfor(y = y, d = d, alfa = 0.05, family = gaussian(), epsilon=0.00001 )
}
\arguments{
\item{y}{ dependent variable }
\item{d}{ data frame containing by columns the set of variables that could be in the selected model }
\item{alfa}{ significance level to decide if a variable stays or not in the model}
\item{family}{the distribution function to be used in the glm model}
\item{epsilon}{argument to pass to \code{glm.control}, convergence tolerance in the iterative process to estimate de glm model}
}
\details{
The strategy begins analysing all the possible models with only one of the variables included in \code{d}.
The most statistically significant variable (with the lowest p-value) is included in the model and then
it is considered to introduce in the model another variable analysing all the possible models with two variables
(the selected variable in the previous step plus a new variable). Again the most statistically significant variable
(with lowest p-value) is included in the model. The process is repeated till there are no more statistically significant
variables to include. Each time that a variable enters the model, the p-values of the current model vairables is recalculated and non significant variables will be removed.
}
\value{
\code{two.ways.stepfor} returns an object of the class \code{\link{lm}}, where the model uses
\code{y} as dependent variable and all the selected variables from \code{d} as independent variables.
The function \code{\link{summary}} are used to obtain a summary and analysis of variance table of the results.
The generic accessor functions \code{\link{coefficients}}, \code{\link{effects}},
\code{\link{fitted.values}} and \code{\link{residuals}} extract various useful features of the value returned by \code{\link{lm}}.
}
\references{Conesa, A., Nueda M.J., Alberto Ferrer, A., Talon, T. 2005.
maSigPro: a Method to Identify Significant Differential Expression Profiles in Time-Course Microarray Experiments.
}
\author{Ana Conesa and Maria Jose Nueda, \email{mj.nueda@ua.es}}
\seealso{ \code{\link{lm}}, \code{\link{step}}, \code{\link{stepback}}, \code{\link{stepfor}}, \code{\link{two.ways.stepback}}}
\examples{
## create design matrix
Time <- rep(c(rep(c(1:3), each = 3)), 4)
Replicates <- rep(c(1:12), each = 3)
Control <- c(rep(1, 9), rep(0, 27))
Treat1 <- c(rep(0, 9), rep(1, 9), rep(0, 18))
Treat2 <- c(rep(0, 18), rep(1, 9), rep(0,9))
Treat3 <- c(rep(0, 27), rep(1, 9))
edesign <- cbind(Time, Replicates, Control, Treat1, Treat2, Treat3)
rownames(edesign) <- paste("Array", c(1:36), sep = "")
dise <- make.design.matrix(edesign)
dis <- as.data.frame(dise$dis)
## expression vector
y <- c(0.082, 0.021, 0.010, 0.113, 0.013, 0.077, 0.068, 0.042, -0.056, -0.232, -0.014, -0.040,
-0.055, 0.150, -0.027, 0.064, -0.108, -0.220, 0.275, -0.130, 0.130, 1.018, 1.005, 0.931,
-1.009, -1.101, -1.014, -0.045, -0.110, -0.128, -0.643, -0.785, -1.077, -1.187, -1.249, -1.463)
s.fit <- two.ways.stepfor(y = y, d = dis)
summary(s.fit)
}
\keyword{ regression }
|
0b9223502f1d0a958d1422788c585cf3440e0b09
|
4dcd47cbd1468df4004c62f23f0abb635b718b8c
|
/R/fitted.R
|
843ad67890f7427f35baaaeefe1c36c2b624d3fc
|
[] |
no_license
|
peter1328/brms
|
ebbce7e0bb6f7c0ed1316377009f7f99f849d519
|
a5dbbb6a32c87976e6c6e59d62913a10bcd2ae40
|
refs/heads/master
| 2021-07-13T08:11:31.938842
| 2017-10-18T00:11:56
| 2017-10-18T00:12:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,979
|
r
|
fitted.R
|
# functions in this file have the same arguments structure
# Args:
# mu: untransformed linear predictor matrix
# draws: A named list returned by extract_draws containing
# all required data and samples
# Returns:
# transformed linear predictor representing the mean
# of the response distribution
fitted_gaussian <- function(draws) {
fitted_default(draws)
}
fitted_student <- function(draws) {
fitted_default(draws)
}
fitted_cauchy <- function(draws) {
fitted_default(draws)
}
fitted_skew_normal <- function(draws) {
fitted_default(draws)
}
fitted_lognormal <- function(draws) {
draws$sigma <- get_sigma(
draws$sigma, data = draws$data, dim = dim_mu(draws)
)
draws$mu <- ilink(draws$mu, draws$f$link)
if (!is_trunc(draws$data)) {
draws$mu <- with(draws, exp(mu + sigma^2 / 2))
} else {
draws$mu <- fitted_trunc(draws)
}
draws$mu
}
fitted_binomial <- function(draws) {
trials <- as_draws_matrix(draws$data$trials, dim_mu(draws))
draws$mu <- ilink(draws$mu, draws$f$link)
if (!is_trunc(draws$data)) {
# scale mu from [0,1] to [0,trials]
draws$mu <- draws$mu * trials
} else {
draws$mu <- fitted_trunc(draws)
}
draws$mu
}
fitted_bernoulli <- function(draws) {
fitted_default(draws)
}
fitted_poisson <- function(draws) {
fitted_default(draws)
}
fitted_negbinomial <- function(draws) {
fitted_default(draws)
}
fitted_geometric <- function(draws) {
fitted_default(draws)
}
fitted_exponential <- function(draws) {
fitted_default(draws)
}
fitted_gamma <- function(draws) {
fitted_default(draws)
}
fitted_weibull <- function(draws) {
draws$shape <- get_shape(
draws$shape, data = draws$data, dim = dim_mu(draws)
)
draws$mu <- ilink(draws$mu / draws$shape, draws$f$link)
if (!is_trunc(draws$data)) {
draws$mu <- with(draws, mu * gamma(1 + 1 / shape))
} else {
draws$mu <- fitted_trunc(draws)
}
draws$mu
}
fitted_frechet <- function(draws) {
fitted_default(draws)
}
fitted_gen_extreme_value <- function(draws) {
draws$sigma <- get_sigma(
draws$sigma, data = draws$data, dim = dim_mu(draws)
)
draws$xi <- get_dpar(draws$xi)
draws$mu <- ilink(draws$mu, draws$f$link)
if (!is_trunc(draws$data)) {
draws$mu <- with(draws, mu + sigma * (gamma(1 - xi) - 1) / xi)
} else {
draws$mu <- fitted_trunc(draws)
}
draws$mu
}
fitted_inverse.gaussian <- function(draws) {
fitted_default(draws)
}
fitted_exgaussian <- function(draws) {
draws$mu <- ilink(draws$mu, draws$f$link) + get_dpar(draws$beta)
fitted_trunc(draws)
}
fitted_wiener <- function(draws) {
# mu is the drift rate
draws$mu <- ilink(draws$mu, draws$f$link)
draws$bs <- get_dpar(draws$bs)
draws$ndt <- get_dpar(draws$ndt)
draws$bias <- get_dpar(draws$bias)
with(draws,
ndt - bias / mu + bs / mu *
(exp(- 2 * mu * bias) - 1) / (exp(-2 * mu * bs) - 1)
)
}
fitted_beta <- function(draws) {
fitted_default(draws)
}
fitted_von_mises <- function(draws) {
fitted_default(draws)
}
fitted_asym_laplace <- function(draws) {
draws$quantile <- get_dpar(draws$quantile)
draws$sigma <- get_sigma(
draws$sigma, data = draws$data, dim = dim_mu(draws)
)
draws$mu <- ilink(draws$mu, draws$f$link)
with(draws,
mu + sigma * (1 - 2 * quantile) / (quantile * (1 - quantile))
)
}
fitted_hurdle_poisson <- function(draws) {
draws$hu <- get_zi_hu(draws, par = "hu")
draws$mu <- adjust_old_forked(draws$mu, draws$hu)
draws$mu <- ilink(draws$mu, draws$f$link)
with(draws, mu / (1 - exp(-mu)) * (1 - hu))
}
fitted_hurdle_negbinomial <- function(draws) {
draws$shape <- get_shape(
draws$shape, data = draws$data, dim = dim_mu(draws)
)
draws$hu <- get_zi_hu(draws, par = "hu")
draws$mu <- adjust_old_forked(draws$mu, draws$hu)
draws$mu <- ilink(draws$mu, draws$f$link)
with(draws, mu / (1 - (shape / (mu + shape))^shape) * (1 - hu))
}
fitted_hurdle_gamma <- function(draws) {
draws$hu <- get_zi_hu(draws, par = "hu")
draws$mu <- adjust_old_forked(draws$mu, draws$hu)
draws$mu <- ilink(draws$mu, draws$f$link)
with(draws, mu * (1 - hu))
}
fitted_hurdle_lognormal <- function(draws) {
draws$sigma <- get_sigma(
draws$sigma, data = draws$data, dim = dim_mu(draws)
)
draws$hu <- get_zi_hu(draws, par = "hu")
draws$mu <- ilink(draws$mu, draws$f$link)
with(draws, exp(mu + sigma^2 / 2) * (1 - hu))
}
fitted_zero_inflated_poisson <- function(draws) {
draws$zi <- get_zi_hu(draws, par = "zi")
draws$mu <- adjust_old_forked(draws$mu, draws$zi)
ilink(draws$mu, draws$f$link) * (1 - draws$zi)
}
fitted_zero_inflated_negbinomial <- function(draws) {
draws$zi <- get_zi_hu(draws, par = "zi")
draws$mu <- adjust_old_forked(draws$mu, draws$zi)
ilink(draws$mu, draws$f$link) * (1 - draws$zi)
}
fitted_zero_inflated_binomial <- function(draws) {
draws$zi <- get_zi_hu(draws, par = "zi")
draws$mu <- adjust_old_forked(draws$mu, draws$zi)
draws$mu <- ilink(draws$mu, draws$f$link) * (1 - draws$zi)
trials <- draws$data[["trials"]]
if (!is.null(draws$data$N_trait)) {
# deprecated as of brms 1.0.0
J <- seq_len(ceiling(length(trials) / 2))
trials <- trials[J]
}
trials <- as_draws_matrix(trials, dim_mu(draws))
draws$mu * trials
}
fitted_zero_inflated_beta <- function(draws) {
draws$zi <- get_zi_hu(draws, par = "zi")
draws$mu <- adjust_old_forked(draws$mu, draws$zi)
ilink(draws$mu, draws$f$link) * (1 - draws$zi)
}
fitted_zero_one_inflated_beta <- function(draws) {
draws$zoi <- get_dpar(draws$zoi)
draws$coi <- get_dpar(draws$coi)
draws$zoi * draws$coi +
ilink(draws$mu, draws$f$link) * (1 - draws$zoi)
}
fitted_categorical <- function(draws) {
fitted_catordinal(draws)
}
fitted_cumulative <- function(draws) {
draws$disc <- get_disc(draws, ncat = draws$data[["ncat"]])
draws$mu <- draws$disc * draws$mu
fitted_catordinal(draws)
}
fitted_sratio <- function(draws) {
draws$disc <- get_disc(draws, ncat = draws$data[["ncat"]])
draws$mu <- draws$disc * draws$mu
fitted_catordinal(draws)
}
fitted_cratio <- function(draws) {
draws$disc <- get_disc(draws, ncat = draws$data[["ncat"]])
draws$mu <- draws$disc * draws$mu
fitted_catordinal(draws)
}
fitted_acat <- function(draws) {
draws$disc <- get_disc(draws, ncat = draws$data[["ncat"]])
draws$mu <- draws$disc * draws$mu
fitted_catordinal(draws)
}
fitted_mixture <- function(draws) {
families <- family_names(draws$f)
draws$theta <- get_theta(draws)
out <- 0
for (j in seq_along(families)) {
fitted_fun <- paste0("fitted_", families[j])
fitted_fun <- get(fitted_fun, asNamespace("brms"))
dpars <- valid_dpars(families[j])
tmp_draws <- list(
f = draws$f$mix[[j]],
nsamples = draws[["nsamples"]],
data = draws[["data"]]
)
for (ap in dpars) {
tmp_draws[[ap]] <- draws[[paste0(ap, j)]]
}
if (length(dim(draws$theta)) == 3L) {
theta <- draws$theta[, , j]
} else {
theta <- draws$theta[, j]
}
out <- out + theta * fitted_fun(tmp_draws)
}
out
}
# ------ fitted helper functions ------
fitted_default <- function(draws) {
# default fitted values
draws$mu <- ilink(draws$mu, draws$f$link)
draws$mu <- fitted_lagsar(draws)
fitted_trunc(draws)
}
fitted_catordinal <- function(draws) {
# fitted values for categorical and ordinal models
get_density <- function(s) {
# get probabilities of each category
do.call(dens, c(args, list(eta = draws$mu[, s, ])))
}
ncat <- draws$data[["ncat"]]
args <- list(seq_len(ncat), ncat = ncat, link = draws$f$link)
dens <- paste0("d", draws$f$family)
draws$mu <- abind(lapply(seq_len(ncol(draws$mu)), get_density), along = 3)
aperm(draws$mu, perm = c(1, 3, 2))
}
fitted_lagsar <- function(draws) {
if (!is.null(draws[["lagsar"]])) {
stopifnot(draws$f$family %in% c("gaussian", "student"))
.fitted_lagsar <- function(s) {
W_new <- with(draws, diag(data$N) - lagsar[s, ] * data$W)
as.numeric(solve(W_new) %*% draws$mu[s, ])
}
draws$mu <- do.call(rbind, lapply(1:draws$nsamples, .fitted_lagsar))
}
draws$mu
}
adjust_old_forked <- function(mu, par) {
# for compatibility with zi / hu models
# using old multivariate syntax
# Args:
# par: samples of zi or hu parameter
if (isTRUE(ncol(mu) == 2 * ncol(par))) {
mu <- mu[, seq_len(ncol(mu) / 2)]
}
mu
}
as_draws_matrix <- function(x, dim) {
# expand data to dimension appropriate for
# vectorized multiplication with posterior samples
stopifnot(length(dim) == 2L, length(x) %in% c(1, dim[2]))
matrix(x, nrow = dim[1], ncol = dim[2], byrow = TRUE)
}
dim_mu <- function(draws) {
c(nrow(draws$mu), draws$data$N)
}
is_trunc <- function(data) {
any(data[["lb"]] > - Inf) || any(data[["ub"]] < Inf)
}
fitted_trunc <- function(draws) {
# prepares data required for truncation and calles the
# family specific truncation function for fitted values
if (is_trunc(draws$data)) {
lb <- as_draws_matrix(draws$data[["lb"]], dim_mu(draws))
ub <- as_draws_matrix(draws$data[["ub"]], dim_mu(draws))
fitted_trunc_fun <- paste0("fitted_trunc_", draws$f$family)
fitted_trunc_fun <- try(
get(fitted_trunc_fun, asNamespace("brms")),
silent = TRUE
)
if (is(fitted_trunc_fun, "try-error")) {
stop2("Fitted values on the respone scale not yet implemented ",
"for truncated '", draws$f$family, "' models.")
} else {
trunc_args <- nlist(draws, lb, ub)
draws$mu <- do.call(fitted_trunc_fun, trunc_args)
}
}
draws$mu
}
# ----- family specific truncation functions -----
# Args:
# mu: draws: output of extract_draws
# lb: lower truncation bound
# ub: upper truncation bound
# Returns:
# samples of the truncated mean parameter
fitted_trunc_gaussian <- function(draws, lb, ub) {
draws$sigma <- get_sigma(
draws$sigma, data = draws$data, dim = dim_mu(draws)
)
zlb <- (lb - draws$mu) / draws$sigma
zub <- (ub - draws$mu) / draws$sigma
# truncated mean of standard normal; see Wikipedia
trunc_zmean <- (dnorm(zlb) - dnorm(zub)) / (pnorm(zub) - pnorm(zlb))
draws$mu + trunc_zmean * draws$sigma
}
fitted_trunc_student <- function(draws, lb, ub) {
draws$sigma <- get_sigma(
draws$sigma, data = draws$data, dim = dim_mu(draws)
)
draws$nu <- get_dpar(draws$nu)
zlb <- (lb - draws$mu) / draws$sigma
zub <- (ub - draws$mu) / draws$sigma
# see Kim 2008: Moments of truncated Student-t distribution
G1 <- with(draws,
gamma((nu - 1) / 2) * nu^(nu / 2) /
(2 * (pt(zub, df = nu) - pt(zlb, df = nu))
* gamma(nu / 2) * gamma(0.5))
)
A <- with(draws, (nu + zlb^2) ^ (-(nu - 1) / 2))
B <- with(draws, (nu + zub^2) ^ (-(nu - 1) / 2))
trunc_zmean <- G1 * (A - B)
draws$mu + trunc_zmean * draws$sigma
}
fitted_trunc_lognormal <- function(draws, lb, ub) {
# mu has to be on the linear scale
draws$sigma <- get_sigma(
draws$sigma, data = draws$data, dim = dim_mu(draws)
)
m1 <- with(draws,
exp(mu + sigma^2 / 2) *
(pnorm((log(ub) - mu) / sigma - sigma) -
pnorm((log(lb) - mu) / sigma - sigma))
)
with(draws,
m1 / (plnorm(ub, meanlog = mu, sdlog = sigma) -
plnorm(lb, meanlog = mu, sdlog = sigma))
)
}
fitted_trunc_gamma <- function(draws, lb, ub) {
draws$shape <- get_shape(
draws$shape, data = draws$data, dim = dim_mu(draws)
)
# mu becomes the scale parameter
draws$mu <- draws$mu / draws$shape
# see Jawitz 2004: Moments of truncated continuous univariate distributions
m1 <- with(draws,
mu / gamma(shape) *
(incgamma(ub / mu, 1 + shape) - incgamma(lb / mu, 1 + shape))
)
with(draws,
m1 / (pgamma(ub, shape, scale = mu) - pgamma(lb, shape, scale = mu))
)
}
fitted_trunc_exponential <- function(draws, lb, ub) {
# see Jawitz 2004: Moments of truncated continuous univariate distributions
# mu is already the scale parameter
inv_mu <- 1 / draws$mu
m1 <- with(draws, mu * (incgamma(ub / mu, 2) - incgamma(lb / mu, 2)))
with(draws, m1 / (pexp(ub, rate = inv_mu) - pexp(lb, rate = inv_mu)))
}
fitted_trunc_weibull <- function(draws, lb, ub) {
# see Jawitz 2004: Moments of truncated continuous univariate distributions
# mu is already the scale parameter
draws$shape <- get_shape(
draws$shape, data = draws$data, dim = dim_mu(draws)
)
a <- 1 + 1 / draws$shape
m1 <- with(draws,
mu * (incgamma((ub / mu)^shape, a) - incgamma((lb / mu)^shape, a))
)
with(draws,
m1 / (pweibull(ub, shape, scale = mu) - pweibull(lb, shape, scale = mu))
)
}
fitted_trunc_binomial <- function(draws, lb, ub) {
lb <- ifelse(lb < -1, -1, lb)
max_value <- max(draws$data$trials)
ub <- ifelse(ub > max_value, max_value, ub)
trials <- draws$data$trials
if (length(trials) > 1) {
trials <- as_draws_matrix(trials, dim_mu(draws))
}
args <- list(size = trials, prob = draws$mu)
fitted_trunc_discrete(dist = "binom", args = args, lb = lb, ub = ub)
}
fitted_trunc_poisson <- function(draws, lb, ub) {
lb <- ifelse(lb < -1, -1, lb)
max_value <- 3 * max(draws$mu)
ub <- ifelse(ub > max_value, max_value, ub)
args <- list(lambda = draws$mu)
fitted_trunc_discrete(dist = "pois", args = args, lb = lb, ub = ub)
}
fitted_trunc_negbinomial <- function(draws, lb, ub) {
lb <- ifelse(lb < -1, -1, lb)
max_value <- 3 * max(draws$mu)
ub <- ifelse(ub > max_value, max_value, ub)
draws$shape <- get_shape(
draws$shape, data = draws$data, dim = dim_mu(draws)
)
args <- list(mu = draws$mu, size = draws$shape)
fitted_trunc_discrete(dist = "nbinom", args = args, lb = lb, ub = ub)
}
fitted_trunc_geometric <- function(draws, lb, ub) {
lb <- ifelse(lb < -1, -1, lb)
max_value <- 3 * max(draws$mu)
ub <- ifelse(ub > max_value, max_value, ub)
args <- list(mu = draws$mu, size = 1)
fitted_trunc_discrete(dist = "nbinom", args = args, lb = lb, ub = ub)
}
fitted_trunc_discrete <- function(dist, args, lb, ub) {
stopifnot(is.matrix(lb), is.matrix(ub))
message(
"Computing fitted values for truncated ",
"discrete models may take a while."
)
pdf <- get(paste0("d", dist), mode = "function")
cdf <- get(paste0("p", dist), mode = "function")
mean_kernel <- function(x, args) {
# just x * density(x)
x * do.call(pdf, c(x, args))
}
if (any(is.infinite(c(lb, ub)))) {
stop("lb and ub must be finite")
}
# simplify lb and ub back to vector format
vec_lb <- lb[1, ]
vec_ub <- ub[1, ]
min_lb <- min(vec_lb)
# array of dimension S x N x length((lb+1):ub)
mk <- lapply((min_lb + 1):max(vec_ub), mean_kernel, args = args)
mk <- do.call(abind, c(mk, along = 3))
m1 <- vector("list", ncol(mk))
for (n in seq_along(m1)) {
# summarize only over non-truncated values for this observation
J <- (vec_lb[n] - min_lb + 1):(vec_ub[n] - min_lb)
m1[[n]] <- rowSums(mk[, n, ][, J, drop = FALSE])
}
rm(mk)
m1 <- do.call(cbind, m1)
m1 / (do.call(cdf, c(list(ub), args)) - do.call(cdf, c(list(lb), args)))
}
|
d7cb68e7f6b02ceee20be5585cf4f83e8260b4d9
|
514ca787ec8ef69fdcf173831989d8563bc9c1ee
|
/R/scripted-utilities.R
|
92a339f66613a85b33828325e09e9a4d7ba0fa19
|
[] |
no_license
|
sakrejda/scripted
|
6b6fd24d4bd8e9c5dcabb39bdec181c5c71d1153
|
32e5ff7e12db77bccf267e847e5e0949add9b424
|
refs/heads/master
| 2020-03-28T17:47:21.526575
| 2018-11-02T17:03:50
| 2018-11-02T17:03:50
| 148,821,168
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,317
|
r
|
scripted-utilities.R
|
#' Create a .check file name for the script
#'
#' This is not a fail-safe procedure.
#'
#' @param target_dir directory to look in.
#' @return single-string character vector of the file name.
check_file_path = function(target_dir, stub) {
found_path = dir(path = target_dir, pattern = paste0(stub, ".*\\.check"),
full.names = TRUE, recursive = FALSE)
if (length(found_path) == 0) {
path = tempfile(pattern = paste0(stub, "-"), tmpdir = target_dir,
fileext = ".check")
} else {
path = found_path
}
return(path)
}
#' Create a .check file for the script
#'
#' This is a .yaml file that records file content checksums
#' and their modification times, etc... to verify whether a file
#' is available and has been updated.
#' @param target_dir directory to look in
#' @return check file path
create_check_file = function(target_dir, job, logger) {
stub = paste0('scripted-target-', job[['name']])
path = check_file_path(target_dir, stub)
if (file.exists(path))
return(path)
cf_created = file.create(path, showWarnings = FALSE)
if (!cf_created) {
if (!file.exists(path)) {
logger(error, "check file ('", path, "') could not be created ",
"and does not exist. Stopping.")
} else {
cf_is_dir = file.info(path)$isdir
if (cf_is_dir)
logger(error, "check file ('", path, "') exists but is a directory. ",
"Stopping.")
}
}
return(path)
}
#' Load a check file and check dependencies.
#'
#' @param file path to check file
#' @param logger logger for output
#' @return list with hash for each dependency
#' @importFrom indexer find_file
check_dependencies = function(job, file, logger) {
# Check for previous check file.
will_error = FALSE
if (!file.exists(file))
logger(error, "Check file ('", file, "') ",
"does not exist. Stopping")
if (file.info(file)$isdir)
logger(error, "There is a directory at the check ",
"file path ('", file, "'). Stopping.")
cs = yaml::yaml.load_file(file)
if (is.null(cs)) {
logger(warn, "Check file ('", file, "') does not ",
"contain checksums. Creating fresh checksums. ",
"Script will run.")
cs = list()
}
dependency_files = job[['dependencies']]
dependency_paths = find_file(job[['source_dir']], dependency_files)
dependency_files_found = basename(dependency_paths)
# Check file existence.
for (dep in dependency_files) {
if (dep %in% dependency_files_found)
logger(debug, "Found dependency '", dep, "'.")
else {
will_error = TRUE
for (s in job[['source_dir']])
logger(warn, "Searched in '", s, "'.")
logger(warn, "Dependency not found: '", dep, "'.")
}
}
if (will_error)
logger(error, "Fatal errors encountered. Stopping. Check log file.")
# Check sums.
new_cs = list()
will_run = FALSE
for (dep in dependency_paths) {
new_cs[[dep]] = openssl::md5(dep)
if (!(dep %in% names(cs))) {
will_run = TRUE
logger(warn, "The dependency '", dep, "' is a new dependency. ",
"Script will be run.")
} else if (new_cs[[dep]] != cs[[dep]]) {
logger(warn, "The dependency '", dep, "' has been changed. ",
"Script will be run.")
will_run = TRUE
}
}
yaml::write_yaml(x = new_cs, file = file)
return(will_run)
}
|
5d0e78e6a8ffa4151a6633707cfe11abe16267aa
|
baa3ca16ece08d8af0b085b58167c0876fb55c96
|
/Cuarto/Simulacion/Ejercicio5/mm1.r
|
812f1e705963a9501fa7d3117cada726d2b32afa
|
[] |
no_license
|
ZhijieQ/inf-uam
|
851d66b1484a63cdf6e2f35209075abf07bdb830
|
38079fdd5da5bb0ea35fa3265e813e0fe1a3bdd1
|
refs/heads/master
| 2021-06-01T10:20:44.917316
| 2016-08-23T18:24:12
| 2016-08-23T18:24:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,016
|
r
|
mm1.r
|
#DES application: M/M/1 queue,arrival rate 0.5,servicerate 1.0
# initializes global variables specific to this app
mm1initglbls <- function(apppars) {
mm1glbls <<- list()
#simulation parameters
mm1glbls$oldpercent <<- 0.4
#Ventanilla, distrib normal, media 5 desviacion tipica 5
mm1glbls$srv1avg <<- 5
mm1glbls$srv1dsv <<- 5
#Vehiculos viejos exp ratio
mm1glbls$srv2rate <<- 0.1
#Todos los vehículos, normal truncada, cambian parametros segun clientes
mm1glbls$srv3avg <<- 12
mm1glbls$srv3dsv <<- 3
mm1glbls$srv3limiteclientes <<- 5
mm1glbls$srv3avglimite <<- 8
mm1glbls$srv3dsvlimite <<- 2
#Entrada a ventanilla. Poisson
mm1glbls$arrvrate <<- apppars$arrvrate #10
mm1glbls$srvrate <<- apppars$srvrate #10
# server queue, consisting of arrival times of queued job
mm1glbls$srvq <<- vector(length=0)
mm1glbls$oldq <<- vector(length=0)
mm1glbls$newq <<- vector(length=0)
# statistics
mm1glbls$q1size <<- 0
mm1glbls$q2size <<- 0
mm1glbls$q3size <<- 0
mm1glbls$njobsdone <<- 0 # jobs done so far
mm1glbls$totwait <<- 0.0 # total wait time so far
mm1glbls$acabados <<- 0
# set up first event, an arrival; the application-specifc data for each event
# will consist of its arrival time, which we need to record in order to
# calculate the job's residence time in the system
# First client arrivel time
#arrvtime <- rpois(1, mm1glbls$arrvrate)
#schedevnt(arrvtime,"arrv",list(arrvtime=arrvtime))
schednewarrv()
}
#Generate arrival to server 1
schednewarrv <- function() {
arrvtime <- sim$currtime + rpois(1,mm1glbls$arrvrate)
schedevnt(arrvtime,"arrv",list(arrvtime=arrvtime))
}
#Generate arrival to servers 2&3
schednewarrv2 <- function(head) {
arrvtime <- sim$currtime + rpois(1,mm1glbls$arrvrate)
#Generate number 0-1, test if greater than oldpercent
if(runif(1, 0, 1) > mm1glbls$oldpercent) { #NEW
#print("Generado new")
type <- "new"
# if server free, start service, else add to queue (added to queue
# even if empty, for convenience)
if(length(mm1glbls$newq) == 0) {
mm1glbls$newq <<- head$arrvtime
srvdonetime <- sim$currtime + timeserver3()
schedevnt(srvdonetime, "srvdone3", list(arrvtime=head$arrvtime))
}
else mm1glbls$newq <<- c(mm1glbls$newq, head$arrvtime)
}
else { #OLD
type <- "old"
#print("Generado old")
# if server free, start service, else add to queue (added to queue
# even if empty, for convenience)
if(length(mm1glbls$oldq) == 0) {
mm1glbls$oldq <<- head$arrvtime
srvdonetime <- sim$currtime + rexp(1, mm1glbls$srv2rate)
schedevnt(srvdonetime, "srvdone2", list(arrvtime=head$arrvtime))
}
else mm1glbls$oldq <<- c(mm1glbls$oldq, head$arrvtime)
#print(type)
}
}
#Calculate time spent in server 3
timeserver3 <- function() {
#print(length(mm1glbls$newq))
if(length(mm1glbls$newq) >= mm1glbls$srv3limiteclientes) { #Si supera el limite de clientes, mas rapido
#print("limite de clientes superado")
#print(length(mm1glbls$newq))
time <- rnormT(1, mm1glbls$srv3avglimite, mm1glbls$srv3dsvlimite)
} else {
time <- rnormT(1, mm1glbls$srv3avg, mm1glbls$srv3dsv)
}
return(time)
}
#React to arrival to server #1
reactarrival <- function(head) {
# if server free, start service, else add to queue (added to queue
# even if empty, for convenience)
if (length(mm1glbls$srvq) == 0) {
mm1glbls$srvq <<- head$arrvtime
srvdonetime <- sim$currtime + rnormT(1,mm1glbls$srv1avg, mm1glbls$srv1dsv)
schedevnt(srvdonetime,"srvdone",list(arrvtime=head$arrvtime))
}
else mm1glbls$srvq <<- c(mm1glbls$srvq,head$arrvtime)
# generate next arrival
schednewarrv()
}
#React to srvdone
reactdone1 <- function(head) {
# process job that just finished do accounting
mm1glbls$njobsdone <<- mm1glbls$njobsdone + 1
mm1glbls$totwait <<- mm1glbls$totwait + sim$currtime - head$arrvtime
# remove from queue
mm1glbls$srvq <<- mm1glbls$srvq[-1]
#Generate arrival to next step
schednewarrv2(head)
# more still in the queue?
if (length(mm1glbls$srvq) > 0) {
# schedule new service
srvdonetime <- sim$currtime + rexp(1,mm1glbls$srvrate)
schedevnt(srvdonetime,"srvdone",list(arrvtime=mm1glbls$srvq[1]))
}
}
#React to srvdone2
reactdone2 <- function(head) {
#print(length(mm1glbls$oldq))
#remove from queue
mm1glbls$oldq <<- mm1glbls$oldq[-1]
#Generate arrival to next step
if(length(mm1glbls$newq) == 0) {
mm1glbls$newq <<- head$arrvtime
srvdonetime <- sim$currtime + timeserver3()
schedevnt(srvdonetime, "srvdone3", list(arrvtime=head$arrvtime))
}
else mm1glbls$newq <<- c(mm1glbls$newq, head$arrvtime)
#More still in oldqueue?
if (length(mm1glbls$oldq) > 0) {
# schedule new service
srvdonetime <- sim$currtime + rexp(1,mm1glbls$srv2rate)
schedevnt(srvdonetime,"srvdone2",list(arrvtime=mm1glbls$oldq[1]))
}
}
#React to srvdone3
reactdone3 <- function(head) {
#Salir del servidor 3. Terminar.
#remove from queue
mm1glbls$newq <<- mm1glbls$newq[-1]
#More still in newqueue?
if (length(mm1glbls$newq) > 0) {
# schedule new service
srvdonetime <- sim$currtime + timeserver3()
schedevnt(srvdonetime,"srvdone3",list(arrvtime=mm1glbls$newq[1]))
}
#Statistics
mm1glbls$acabados <<- mm1glbls$acabados + 1
}
#application-specific event processing function called by dosim() in the general DES library
# arguments:
# head: event to be proccesed
mm1reactevnt <- function(head) {
printdebug(head);
#if(length(mm1glbls$newq) >=5) {
# print(length(mm1glbls$newq))
#}
mm1glbls$q1size <<- c(mm1glbls$q1size, length(mm1glbls$srvq))
mm1glbls$q2size <<- c(mm1glbls$q2size, length(mm1glbls$oldq))
mm1glbls$q3size <<- c(mm1glbls$q3size, length(mm1glbls$newq))
if (head$evnttype == "arrv") { # arrival event
reactarrival(head)
}
else if (head$evnttype == "srvdone"){ # service done event (Ventanilla 1)
reactdone1(head)
}
else if (head$evnttype == "srvdone2"){ #srvdone de servidor viejos
reactdone2(head)
}
else if (head$evnttype == "srvdone3"){#Srvdones nuevos
reactdone3(head)
}
}
mm1prntrslts <- function() {
#print("mean wait:")
#print(mm1glbls$totwait/mm1glbls$njobsdone)
print('Clientes atendidos')
print(mm1glbls$acabados)
graficos1()
}
graficos1 <- function() {
par(mfrow=c(3,2))
plot(mm1glbls$q1size, type='h', main='Evolución de cola en la entrada', xlab='Tiempo', ylab='Tamaño de cola1')
plot(density(mm1glbls$q1size),main="Funcion de densidad tamaño de cola1")
plot(mm1glbls$q2size, type='h', main='Evolución de cola en servidor viejos', xlab='Tiempo', ylab='Tamaño de cola2')
plot(density(mm1glbls$q2size),main="Funcion de densidad tamaño de cola2")
plot(mm1glbls$q3size, type='h', main='Evolución de cola en servidor general',xlab='Tiempo', ylab='Tamaño de cola3')
plot(density(mm1glbls$q2size),main="Funcion de densidad tamaño de cola3")
}
printdebug <- function(head) {
debug <- FALSE
if(debug == TRUE){
if (head$evnttype == "arrv") { # arrival event
print("Arrival servidor 1")
}
else if (head$evnttype == "srvdone"){ # service done event (Ventanilla 1)
print("Salir servidor 1")
}
else if (head$evnttype == "srvdone2"){ #srvdone de servidor viejos
print("Salir servidor 2, entrar 3")
}
else if (head$evnttype == "srvdone3"){
print("Procesando al final3")
}
}
}
#################### Ejemplo PROGRAMA PRINCIPAL
if (TRUE) {
source("rurtinasRsimulacion.r") # cargo el script de la libreria DES
source("rnormT.r")
# direct output to a file, output also send to terminal.
sink("kk.txt", append=FALSE, split=TRUE)
# Run the simulation:
# DES application:
# M/M/1 queue, arrival rate 0.5, servicerate 1.0, maxsimtime=100
dosim (mm1initglbls, mm1reactevnt, mm1prntrslts, maxsimtime=10000.0,
list(arrvrate=10,srvrate=10.0), dbg=FALSE)
# return output to the terminal
sink()
}
|
b9fcce32bdba161347c8552b0a128ffe3d49126b
|
9f983a6b5cec8fdee6ec13fc84d834634b762287
|
/man/partitionATSDir.Rd
|
3ea9096e46dd6d75c9897cd3791a36f184f6d69d
|
[] |
no_license
|
cbergmeir/tsExpKit
|
f30939015eb0ca8de33ef647d69bf06cf434af9d
|
c61a83697e942889cef9ed2b815b3cb01b7b2285
|
refs/heads/master
| 2021-03-12T20:06:10.572459
| 2013-01-29T18:12:20
| 2013-01-29T18:12:20
| 7,897,910
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 958
|
rd
|
partitionATSDir.Rd
|
\name{partitionATSDir}
\alias{partitionATSDir}
\title{Partition (to training and test sets) all time series in an ATS directory}
\usage{
partitionATSDir(inPath,
partitionParameters = list(type = c("blockedCV"), order = 0, numPartitions = 5, ratioValSet = 0.2),
outPath = NULL, savePlots = TRUE)
}
\arguments{
\item{inPath}{the input path, i.e., the ATS directory}
\item{partitionParameters}{a list giving all parameters
that are passed to \code{\link{computePartitions}}}
\item{outPath}{the output path. If \code{NULL}, it is
generated automatically as a subdirectory of the
\code{inPath}}
\item{savePlots}{if \code{TRUE}, a directory containing
plots of all partitions is generated}
}
\description{
The function loads all files in an ATS directory into an
ATSList, applies \code{\link{computePartitions}}, and
saves the result to an ATS directory.
}
\seealso{
\code{\link{embedATSDir}}, \code{\link{normalizeATSDir}}
}
|
1d3b3dc8f7fb6127422f0d3daa3b2bcdd1fc5dae
|
c84f4694c07cd32674abee7235e1d31353e225a3
|
/Distributions/eachGraphs/extremeValueDistribution_2(FrechetDistributionwithLocationParameter).R
|
56ee591ed1e0cd155f8e93ffdb4da366de358074
|
[] |
no_license
|
praster1/Note_SurvivalAnalysis
|
c1e2fee2865432fa4be89a4000c20e38ef1bef3e
|
db50363874c69fea6d4d47a24409c387c5276f84
|
refs/heads/master
| 2020-03-26T23:00:24.345471
| 2018-12-04T10:34:49
| 2018-12-04T10:34:49
| 145,502,914
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,279
|
r
|
extremeValueDistribution_2(FrechetDistributionwithLocationParameter).R
|
source("colorPalette.R")
require(VGAM)
##### lfrechet Distribution
### parameter
location = c(-1, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1)
shape = c(0.25, 0.5, 0.75, 1, 2, 4, 8)
scale = c(0.25, 0.5, 0.75, 1, 2, 4, 8)
### input varialbe
x = seq(0.1, 10, length.out = 1000)
# m : shape / c
# beta : scale / b
# mu : location / a
### 수명 분포
dlfrechet = function(x, location = 0, shape = 1, scale = 1)
{
fx = (shape/scale) * ((location - x) / scale)^(-(shape+1)) * exp(-((location - x) / scale)^(-shape))
}
### 난수 함수
rlfrechet = function (n, min=0.1, max=10, location = 0, shape = 1, scale = 1)
{
normalization = function(x) { (x-min(x))/(max(x)-min(x)); }
xseq = seq(min, max, length=1000000)
res = sample(xseq, size=n, prob=normalization(dlfrechet(xseq, location=location, shape=shape, scale=scale)), replace=TRUE)
return(res)
}
### 누적분포함수
plfrechet = function(x, location = 0, shape = 1, scale = 1)
{
fx = -(slfrechet(x, alpha, beta) - 1)
}
### 생존함수
slfrechet = function (x, location = 1, shape = 1, scale = 1)
{
fx = exp(-((location - x) / scale)^(-shape))
return(fx)
}
### 위험함수
hlfrechet = function (x, location = 1, shape = 1, scale = 1)
{
fx = dlfrechet(x, location = location, shape = shape, scale = scale) / slfrechet(x, location = location, shape = shape, scale = scale)
return(fx)
}
##### Plot
plot.lfrechet_seq = function(x, location = 1, shape = 1, scale = 1, xlim=c(0, 10), ylim=c(0, 5), func="dlfrechet")
{
color=colorPalette(300)
len_location = length(location) # location 파라메터의 길이
len_shape = length(shape) # shape 파라메터의 길이
len_scale = length(scale) # scale 파라메터의 길이
color_counter = 1
for (i in 1:len_location) ### 파라메터: location
{
if (func=="dlfrechet") # 수명분포
{
for (j in 1:len_shape) ### 파라메터: shape
{
color_counter_init = color_counter
legend_name = NULL;
plot(x, dlfrechet(x, location=location[1], shape=shape[1], scale=scale[1]), xlim=xlim, ylim=ylim, col=color[1], lwd=2, type = 'n', main="Life Distribution Function")
for (k in 1:len_scale) ### 파라메터: scale
{
lines(x, dlfrechet(x, location=location[i], shape=shape[j], scale=scale[k]), col=color[color_counter], lwd=2);
color_counter = color_counter + 1;
legend_name = c(legend_name, paste("location = ", location[i], " / shape = ", shape[j], " / scale = ", scale[k], sep=""))
}
legend('right', bty = 'n', lwd=2, col=color[color_counter_init:(color_counter - 1)], legend = legend_name)
}
}
else if (func == "plfrechet") # 누적분포함수
{
for (j in 1:len_shape) ### 파라메터: shape
{
color_counter_init = color_counter
legend_name = NULL;
plot(x, plfrechet(x, location=location[1], shape=shape[1], scale=scale[1]), xlim=xlim, ylim=ylim, col=color[1], lwd=2, type = 'n', main="Culocationlative Distribution Function")
for (k in 1:len_scale) ### 파라메터: scale
{
lines(x, plfrechet(x, location=location[i], shape=shape[j], scale=scale[k]), col=color[color_counter], lwd=2);
color_counter = color_counter + 1;
legend_name = c(legend_name, paste("location = ", location[i], " / shape = ", shape[j], " / scale = ", scale[k], sep=""))
}
legend('right', bty = 'n', lwd=2, col=color[color_counter_init:(color_counter - 1)], legend = legend_name)
}
}
else if (func == "slfrechet") # 생존함수
{
for (j in 1:len_shape) ### 파라메터: shape
{
color_counter_init = color_counter
legend_name = NULL;
plot(x, slfrechet(x, location=location[1], shape=shape[1], scale=scale[1]), xlim=xlim, ylim=ylim, col=color[1], lwd=2, type = 'n', main="Survival Function")
for (k in 1:len_scale) ### 파라메터: scale
{
lines(x, slfrechet(x, location=location[i], shape=shape[j], scale=scale[k]), col=color[color_counter], lwd=2);
color_counter = color_counter + 1;
legend_name = c(legend_name, paste("location = ", location[i], " / shape = ", shape[j], " / scale = ", scale[k], sep=""))
}
legend('right', bty = 'n', lwd=2, col=color[color_counter_init:(color_counter - 1)], legend = legend_name)
}
}
else if (func == "hlfrechet") # 위험함수
{
for (j in 1:len_shape) ### 파라메터: shape
{
color_counter_init = color_counter
legend_name = NULL;
plot(x, hlfrechet(x, location=location[1], shape=shape[1], scale=scale[1]), xlim=xlim, ylim=ylim, col=color[1], lwd=2, type = 'n', main="Hazard Function")
for (k in 1:len_scale) ### 파라메터: scale
{
lines(x, hlfrechet(x, location=location[i], shape=shape[j], scale=scale[k]), col=color[color_counter], lwd=2);
color_counter = color_counter + 1;
legend_name = c(legend_name, paste("location = ", location[i], " / shape = ", shape[j], " / scale = ", scale[k], sep=""))
}
legend('right', bty = 'n', lwd=2, col=color[color_counter_init:(color_counter - 1)], legend = legend_name)
}
}
}
}
par(mfrow = c(8, 8))
plot.lfrechet_seq(x, location, shape, scale, xlim=c(min(x), max(x)), ylim=c(-10, 10), func="dlfrechet")
par(mfrow = c(8, 8))
plot.lfrechet_seq(x, location, shape, scale, xlim=c(min(x), max(x)), ylim=c(-5, 5), func="plfrechet")
par(mfrow = c(8, 8))
plot.lfrechet_seq(x, location, shape, scale, xlim=c(min(x), max(x)), ylim=c(-5, 5), func="slfrechet")
par(mfrow = c(8, 8))
plot.lfrechet_seq(x, location, shape, scale, xlim=c(min(x), max(x)), ylim=c(-30, 30), func="hlfrechet")
|
ec5a7203b21a2bdaefca1f0670fb68532c30a368
|
1faf1557d278511830c2bf8ff83538c3544377ec
|
/cachematrix.R
|
01a0bc06e50db16049efa96c76882ae2522c253a
|
[] |
no_license
|
bryonjacob/ProgrammingAssignment2
|
51aaf7ce93e800703edbb3244181cfdef3e31c93
|
56914fd420244c669b87b9e6674c9cea736e6aa7
|
refs/heads/master
| 2021-01-15T10:52:44.586550
| 2014-09-14T20:47:21
| 2014-09-14T20:47:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,263
|
r
|
cachematrix.R
|
## This file contains functions for optimizing matrix operations by caching
## the results of potentially expensive computations. Caching of the "solve"
## method is implemented, to compute and cache the inverse of an invertible
## square matrix.
## Construct a cacheMatrix object given a "normal" R matrix - this cacheMatrix
## object can cache it's own inverse once it's been computed, preventing the
## need to recompute it if the value of the matrix doesn't change.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<-NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Executes the solve function against the given matrix - computing it's inverse
## assuming that no additional matrix b is provided (solve generically solves the
## equation a %*% x = b for x - when b is omitted, the identity matrix is assumed
## and the inverse of a is returned)
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
abc0550bebfc28e9c6ef1a7463ebf1e48c57030f
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612732889-test.R
|
dc7c8c7cdf959ef5e85ffd0a3b234ca37fe9108a
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 184
|
r
|
1612732889-test.R
|
testlist <- list(latLongs = structure(c(1.39988503525853e-306, 1.43400496860732e-314 ), .Dim = 2:1), r = 3.59560005352873e+252)
result <- do.call(MGDrivE::calcCos,testlist)
str(result)
|
449397251a1453ddfb8b73a70fcfea70fda4830e
|
b3962eff09caad325e910526023892e71cb51e9a
|
/man/ensebl_id.Rd
|
4fbec06feadf29ff3ca1ab6462d4441b7e67142e
|
[] |
no_license
|
ybai3/SCQC
|
d3b5e17d506842a2ce2245079abca0d58f14453b
|
3050d97b19508c2866dabff59166bcac5b52cb22
|
refs/heads/master
| 2020-03-29T12:51:38.294165
| 2018-09-24T02:13:19
| 2018-09-24T02:13:19
| 149,924,302
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 713
|
rd
|
ensebl_id.Rd
|
\name{ensebl_id}
\alias{ensebl_id}
\docType{data}
\title{Ensemble_id of housekeeping gene list
%% ~~ data name/kind ... ~~
}
\description{Housekeeping gene list provided by Eisenberg E and Levanon EY in Human Housekeeping genes, revisited.
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data("ensebl_id")}
\format{
character vector
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{2013 Oct;29(10):569-74. doi: 10.1016/j.tig.2013.05.010. Epub 2013 Jun 27.
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(ensebl_id)
}
\keyword{datasets}
|
49255876ce464555366e1a45f5b21d02e1d04c4a
|
5e1fdf6e9e63c8c8d09f3bdf0a1fb519d3770318
|
/ml_titanic.R
|
d278bf4529145addd83d66982f5e660bdc91c125
|
[
"MIT"
] |
permissive
|
yerlandana/R_project
|
410a215b408abfa484f385bfe1408af812709228
|
8cd004939da2c0650e70203261d84f8f8fa75afc
|
refs/heads/master
| 2020-06-15T21:08:46.355401
| 2020-01-12T15:28:14
| 2020-01-12T15:28:14
| 195,392,812
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,164
|
r
|
ml_titanic.R
|
library(rpart)
install.packages('DMwR')
library(DMwR)
install.packages('rpart.plot')
library(rpart.plot)
dft <- read.csv('C://Users/dyerl/Desktop/Titanic dataset.csv')
Ttree<- rpart(Survived~Pclass+Sex+Age+SibSp+Parch+Fare+Embarked, data= dft, method="class", control = rpart.control(minbucket = 25))
prp(Ttree)
Ttree
PredictCART <- predict(Ttree, newdata = dft, type="class",control=rpart.control(minbucket=3))
PredictCART <- predict(Ttree, newdata = dft, type="class",control=rpart.control(cp=0.01)) //information gain
table(PredictCART, dft$Survived)
(515 + 251) / 891
install.packages('caret', dependencies = TRUE)
library(caret)
set.seed(17)
index <- sample(nrow(dft), size = 0.75 * nrow(dft))
index
train <- dft[index, ]
test <- dft[-index, ]
Ttree<- rpart(Survived~Pclass+Sex+Age+SibSp+Parch+Fare+Embarked,
data= train, method="class", control = rpart.control(cp = 0.05))
PredictCART <- predict(Ttree, newdata = train, type="class")
table(PredictCART, train$Survived)
(375+170)/668
Predict_test <- predict(Ttree, newdata = test, type="class")
table(Predict_test, test$Survived)
(121+61)/22
|
b269934ef5f0815eae42555d591485bd3e482327
|
4999d197a87922bbbed19b2512419ddf05201b19
|
/atividade_6.r
|
4f045e49c0485f0db412da32260ed9eea7f6c900
|
[] |
no_license
|
bahbbc/RP-Atividades
|
05e1d6d94b437f069d70dfb4de3c9664d6c089b4
|
0e5c3fea436da991c176c15b9d4d8a29c5e39a23
|
refs/heads/master
| 2020-05-20T13:37:35.066453
| 2014-05-06T01:48:41
| 2014-05-06T01:48:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,010
|
r
|
atividade_6.r
|
split_data = function(data, k){
split = floor(nrow(data)/k)
left = nrow(data) %% k
sizes = 0
# por enquanto soh diz quanto vai ter em cada subconjunto
#acredito que temos que dividir o conj. original, soh num sei se eh agora ou na prox. fase
for(j in 1:k){
sizes[j] = split
}
for(i in 1:left){
#substituir essa soma por rbind()
sizes[i] = sizes[i]+1
}
return(sizes)
}
data_proportion = function(data, k){
#manter as proporcoes das classes originais.
#separa por classe
class = data[,1] #supoem-se que a primeira coluna eh a classe
data_by_class = split(data, class)
#nao sei trabalhar com listas ai joguei td no data frame
data_attr1 = data.frame(data_by_class[1])
data_attr2 = data.frame(data_by_class[2])
total = split_data(data, k)
attr1 = split_data(data_attr1, k)
attr2 = split_data(data_attr2, k)
split_data = data.frame(total, attr1, attr2)
return(split_data)
}
data = data_set = read.table('wdbc.data', sep=",")
data = cbind(data_set[2:32])
data_proportion(data, 13)
|
a2f5ee2ff236243865251cac960c44c0b9f0f3f0
|
12a9bea8cfff9e5dcd44651102a5e0adf477164a
|
/explorations/passes.R
|
8e1e6961c605e2bb4c8243c7128d0b9b4bedabde
|
[] |
no_license
|
duncantl/RLLVMCompile
|
2b98a04f1f7e71f973a281b40457f5730e38f284
|
7fad5bd394a6f74ace0f6053a5d08e4f15cf3a1f
|
refs/heads/master
| 2021-01-19T01:42:02.316459
| 2017-03-07T00:49:31
| 2017-03-07T00:49:31
| 3,894,344
| 32
| 3
| null | 2015-03-03T13:27:54
| 2012-04-01T18:04:28
|
R
|
UTF-8
|
R
| false
| false
| 1,348
|
r
|
passes.R
|
library(RLLVMCompile)
diffModule =
function(a, b)
{
if(is(a, "Module"))
a = strsplit(showModule(a, TRUE), "\\n")[[1]]
if(is(b, "Module"))
b = strsplit(showModule(b, TRUE), "\\n")[[1]]
c(setdiff(a, b), setdiff(b, a))
}
# This is a place to explore the effects of some of the compiler Pass classes
f =
function(x, y)
{
a = 1L
b = 2L
c = 1L + 2L # XXX putting x here causes problems!!!!
b = 3L
x
}
g =
function()
{
}
m = Module()
fc = compileFunction(f, Int32Type, list(Int32Type, Int32Type), m, optimize = FALSE)
m1 = clone(m)
# Grab original version of the code, unoptimized
str = strsplit(showModule(m1, TRUE), "\\n")[[1]]
#mgr = passManager(NULL, FALSE)
# passManager(m)
p = list(createAggressiveDCEPass, createDeadStoreEliminationPass, createDeadStoreEliminationPass)
lapply(p, function(f) {
mgr = passManager(NULL, FALSE)
addPass(mgr, f())
m = clone(m)
run(mgr, m)
# print(showModule(m))
print(diffModule(m, m1))
})
#addPass(mgr, createAggressiveDCEPass()) # Doesn't kill off the extra variables.
#addPass(mgr, createDeadStoreEliminationPass()) # This does
#addPass(mgr, createDeadStoreEliminationPass()) # So does this
#Optimize(m, mgr = mgr)
#run(mgr, m)
#showModule(m)
#print(str)
#print(diffModule(m, m1))
|
5a1d635bb0d556786592c8abf47eb65b64bd6ff7
|
3807c0d642d7c4789f3284f1b0429711973aa370
|
/R/radf-methods.R
|
2bf0c06ca1f4e4e5c91749423083e6525dd69a39
|
[] |
no_license
|
cran/exuber
|
e938e35fdb5e0a4708228234a20089e23cd40fc3
|
453054b85ca14707ee86774a979e95312c3ba627
|
refs/heads/master
| 2023-04-06T08:29:47.039261
| 2023-03-22T22:10:02
| 2023-03-22T22:10:02
| 137,660,134
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,649
|
r
|
radf-methods.R
|
# summary -----------------------------------------------------------------
#' Summarizing `radf` models
#'
#' \code{summary} method for radf models that consist of `radf_obj` and `radf_cv`.
#'
#' @param object An object of class `radf_obj`. The output of \code{\link[=radf]{radf()}}.
#' @param cv An object of class `radf_cv`. The output of \code{\link[=radf_mc_cv]{radf_mc_cv()}},
#' \code{\link[=radf_wb_cv]{radf_wb_cv()}} or \code{\link[=radf_sb_cv]{radf_sb_cv()}}.
#' @param ... Further arguments passed to methods. Not used.
#'
#' @return Returns a list of summary statistics, which include the estimated ADF,
#' SADF, and GSADF test statistics and the corresponding critical values
#'
#' @importFrom tidyr pivot_wider
#' @importFrom dplyr filter select
#' @importFrom rlang is_logical
#' @name summary.radf_obj
#' @examples
#' \donttest{
#' # Simulate bubble processes, compute the test statistics and critical values
#' rsim_data <- radf(sim_data)
#'
#' # Summary, diagnostics and datestamp (default)
#' summary(rsim_data)
#'
#' #Summary, diagnostics and datestamp (wild bootstrap critical values)
#'
#' wb <- radf_wb_cv(sim_data)
#'
#' summary(rsim_data, cv = wb)
#'
#' }
#' @export
summary.radf_obj <- function(object, cv = NULL, ...) {
cv <- cv %||% retrieve_crit(object)
assert_class(cv, "radf_cv")
assert_match(object, cv)
ret <- summary_radf(cv, object, ...)
ret %>%
add_attr(
minw = get_minw(object),
lag = get_lag(object),
method = get_method(cv),
iter = get_iter(cv)
) %>%
add_class("sm_radf")
}
summary_radf <- function(cv, ...){
UseMethod("summary_radf")
}
summary_radf.mc_cv <- summary_radf.wb_cv <- function(cv, object, ...) {
ret <- list()
snames <- series_names(object)
sm <- tidy_join(object, cv) %>%
pivot_wider(names_from = sig, values_from = crit)
for (nms in snames) {
ret[[nms]] <- filter(sm, id == nms) %>%
select(-id)
}
ret
}
summary_radf.sb_cv <- function(cv, object, ...) {
ret <- list()
ret[["panel"]] <- tidy_join(object, cv, panel = TRUE) %>%
pivot_wider(names_from = sig, values_from = crit) %>%
select(-id)
ret
}
#' @importFrom glue glue
#' @export
print.sm_radf <- function(x, ...) {
iter_char <- if (is_mc(x)) "nrep" else "nboot"
cat_line()
cat_rule(
left = glue("Summary (minw = {get_minw(x)}, lag = {get_lag(x)})"),
right = glue("{get_method(x)} ({iter_char} = {get_iter(x)})")
)
cat_line()
print.listof(x, ...)
}
# diagnostics -------------------------------------------------------------
#' Diagnostics on hypothesis testing
#'
#' Provides information on whether the null hypothesis of a unit root is rejected
#' against the alternative of explosive behaviour for each series in a dataset.
#'
#' @param object An object of class `obj`.
#' @param cv An object of class `cv`.
#' @param ... Further arguments passed to methods.
#'
#' @return Returns a list with the series that reject (positive) and the series
#' that do not reject (negative) the null hypothesis, and at what significance level.
#'
#' @details
#' Diagnostics also stores a vector whose elements take the value of 1 when
#' there is a period of explosive behaviour and 0 otherwise.
#'
#' @export
diagnostics <- function(object, cv = NULL, ...) {
UseMethod("diagnostics", object)
}
#' @rdname diagnostics
#' @importFrom dplyr case_when
#' @param option Whether to apply the "gsadf" or "sadf" methodology (default = "gsadf").
#' @export
#' @examples
#'
#' rsim_data <- radf(sim_data)
#' diagnostics(rsim_data)
#'
#' diagnostics(rsim_data, option = "sadf")
diagnostics.radf_obj <- function(object, cv = NULL,
option = c("gsadf", "sadf"), ...) {
# assert_class(object, "radf")
cv <- cv %||% retrieve_crit(object)
assert_class(cv, "radf_cv")
assert_match(object, cv)
option <- match.arg(option)
if (option == "sadf" && is_sb(cv)) {
stop_glue("argument 'option' cannot be be set to 'sadf' when cv is of class 'sb_cv'")
}
snames <- series_names(object)
if (is_sb(cv)) {
option <- "gsadf_panel"
}
out <- tidy_join(object, cv) %>%
pivot_wider(names_from = sig, values_from = crit, names_prefix = "cv") %>%
filter(stat == option)
# in case of simulation exercises
dummy <- case_when(
out$tstat < out$cv95 ~ 0,
out$tstat >= out$cv95 ~ 1
)
sig <- case_when(
out$tstat < out$cv90 ~ "Reject",
out$tstat >= out$cv90 & out$tstat < out$cv95 ~ "10%",
out$tstat >= out$cv95 & out$tstat < out$cv99 ~ "5%",
out$tstat >= out$cv99 ~ "1%"
)
dummy_lgl <- as.logical(dummy)
if (is_sb(cv)) {
positive <- ifelse(dummy_lgl , "panel", NA)
negative <- ifelse(dummy_lgl, NA, "panel")
} else {
positive <- snames[as.logical(dummy_lgl)]
negative <- snames[!as.logical(dummy_lgl)]
}
list(
positive = positive,
negative = negative,
sig = sig,
dummy = dummy
) %>%
add_attr(
panel = is_sb(cv),
series_names = if (!is_sb(cv)) snames,
method = get_method(cv),
option = option,
) %>%
add_class("dg_radf")
}
#' @export
tidy.dg_radf <- function(x, ...) {
snames <- series_names(x)
sig <- gsub("%", "", x$sig)
tibble(
"series" = snames,
"positive" = ifelse(snames %in% x$positive, TRUE, FALSE),
"negative" = ifelse(snames %in% x$negative, TRUE, FALSE),
"sig" = as.factor(ifelse(sig == "Reject", NA, sig))
)
}
diagnostics_internal <- function(...) {
dg <- diagnostics(...)
if (all(dg$dummy == 0)) {
stop_glue("Cannot reject H0 at the 5% significance level")
}
if (purrr::is_bare_character(dg$positive, n = 0)) {
stop_glue("Cannot reject H0")
}
unclass(dg)
}
#' @importFrom cli cat_line cat_rule
#' @importFrom glue glue
#' @importFrom rlang is_bare_character
#' @export
print.dg_radf <- function(x, ...) {
cli::cat_line()
cli::cat_rule(
left = glue('Diagnostics (option = {attr(x, "option")})'),
right = get_method(x)
)
cli::cat_line()
if (attr(x, "panel")) {
if (x$sig == "Reject")
cat(" Cannot reject H0 \n")
else
cat(" Rejects H0 at the", cli::col_red(x$sig), "significance level\n")
} else {
width <- nchar(series_names(x))
ngaps <- max(8, width) - width
for (i in seq_along(series_names(x))) {
cat(series_names(x)[i], ":" , rep(" ", ngaps[i]), sep = "")
if (x$sig[i] == "Reject")
cat(" Cannot reject H0 \n")
else
cat(" Rejects H0 at the", cli::col_red(x$sig[i]), "significance level\n")
}
}
cli::cat_line()
}
# datestamp ---------------------------------------------------------------
#' Date-stamping periods of mildly explosive behavior
#'
#' Computes the origination, termination and duration of
#' episodes during which the time series display explosive dynamics.
#'
#' @inheritParams diagnostics
#' @param min_duration The minimum duration of an explosive period for it to be
#' reported (default = 0).
#' @param nonrejected logical. Whether to apply datestamping technique to the series
#' that were not able to reject the Null hypothesis.
#' @param sig_lvl logical. Significance level, one of 90, 95 or 99.
#' @param ... further arguments passed to methods.
#'
#' @return Return a table with the following columns:
#'
#'\itemize{
#' \item Start:
#' \item Peak:
#' \item End:
#' \item Duration:
#' \item Signal:
#' \item Ongoing:
#' }
#'
#' @return Returns a list containing the estimated origination and termination
#' dates of episodes of explosive behaviour and the corresponding duration.
#' @details
#' Datestamp also stores a vector whose elements take the value of 1 when there is
#' a period of explosive behaviour and 0 otherwise. This output can serve as a
#' dummy variable for the occurrence of exuberance.
#'
#' @references Phillips, P. C. B., Shi, S., & Yu, J. (2015). Testing for
#' Multiple Bubbles: Historical Episodes of Exuberance and Collapse in the
#' S&P 500. International Economic Review, 56(4), 1043-1078.
#'
#' @export
datestamp <- function(object, cv = NULL, min_duration = 0L, ...) {
UseMethod("datestamp")
}
#' @rdname datestamp
#' @inheritParams diagnostics.radf_obj
#' @importFrom rlang sym !! %||%
#' @importFrom dplyr filter pull
#' @importFrom purrr map map_lgl possibly
#' @export
#'
#' @examples
#'
#' rsim_data <- radf(sim_data)
#'
#' ds_data <- datestamp(rsim_data)
#' ds_data
#'
#' # Choose minimum window
#' datestamp(rsim_data, min_duration = psy_ds(nrow(sim_data)))
#'
#' autoplot(ds_data)
datestamp.radf_obj <- function(object, cv = NULL, min_duration = 0L, sig_lvl = 95,
option = c("gsadf", "sadf"), nonrejected = FALSE, ...) {
# assert_class(object, "radf")
cv <- cv %||% retrieve_crit(object)
assert_class(cv, "radf_cv")
option <- match.arg(option)
stopifnot(sig_lvl %in% c(90, 95, 99))
assert_positive_int(min_duration, strictly = FALSE)
assert_match(object, cv)
is_panel <- is_sb(cv)
snames <- series_names(object)
pos <- if (isTRUE(nonrejected)) {
if (is_panel) "panel" else snames
} else {
diagnostics_internal(object, cv)$positive # internal to make the check here
}
filter_option <- if (option == "gsadf") c("bsadf_panel", "bsadf") else c("bsadf_panel", "badf")
ds_tbl <- augment_join(object, cv) %>%
filter(sig == sig_lvl, stat %in% filter_option) %>% # either {bsadf, badf} or bsadf_panel
mutate(ds_lgl = tstat > crit)
ds_basic <- map(pos, ~ filter(ds_tbl, id == .x) %>% pull(ds_lgl) %>% which())
ds_stamp <- map(ds_basic, ~ stamp(.x) %>% as.matrix())
if(!is_panel) {
tstat <- map2(pos, ds_stamp, ~ filter(ds_tbl, id == .x) %>% pull(tstat))
mat <- map(pos, ~ mat(object)[,.x])
possibly_add_peak <- possibly(add_peak, otherwise = NULL)
ds_stamp <- purrr::pmap(list(ds_stamp, tstat, mat, get_trunc(object)), possibly_add_peak)
}
idx <- if (is_panel) index(cv) else index(object)
idx_trunc <- if (is_panel) index(cv, trunc = TRUE) else index(object, trunc = TRUE)
ds_stamp_index <- map(ds_stamp, stamp_to_index, idx_trunc, cv) # index has to from cv to iclude sb_cv(+2)
ds_full <- purrr::map(ds_stamp_index, add_ongoing, idx, cv)
if(isTRUE(nonrejected)) {
dg <- diagnostics(object, cv)$negative
ds_full <- map2(ds_full, pos %in% dg, ~ mutate(.x, Nonrejected = .y))
}
ds <- map(ds_full, ~ filter(.x, Duration >= min_duration))
min_reject <- map_lgl(ds, ~ nrow(.x) == 0)
res <- ds[!min_reject]
names(res) <- pos[!min_reject]
if (length(res) == 0) {
warning_glue("Argument 'min_duration' excludes all explosive periods")
}
# store to dummy {0, 1}
idx <- index(object)
reps <- if (is_panel) 1 else match(pos, series_names(object))
dms <- list(seq_along(idx), if (is_panel) "panel" else snames[reps])
dummy <- matrix(0, nrow = length(idx), ncol = length(pos), dimnames = dms)
zadj <- get_minw(object) + get_lag(object)
for (z in seq_along(pos)) {
dummy[ds_basic[[z]] + zadj, z] <- 1
}
structure(
res,
dummy = dummy,
index = idx,
series_names = snames,
minw = get_minw(object),
lag = get_lag(object),
n = get_n(object),
panel = is_panel,
min_duration = min_duration,
option = option,
method = get_method(cv),
class = c("ds_radf", "list")
)
}
stamp <- function(x) {
start <- x[c(TRUE, diff(x) != 1)] # diff reduces length by 1
end <- x[c(diff(x) != 1, TRUE)] + 1
end[end - start == 0] <- end[end - start == 0]
duration <- end - start
tibble("Start" = start, "End" = end, "Duration" = duration)
}
stamp_to_index <- function(x, idx, cv) {
if(is_sb(cv)) {
if(is.null(x)) {
na_df <- data.frame(
"Start" = NA,
"End" = NA,
"Duration" = NA,
row.names = NULL
)
na_df <- na_df[-1,]
return(na_df)
}
data.frame(
"Start" = idx[x[, "Start"]],
"End" = idx[x[, "End"]],
"Duration" = x[, "Duration"],
row.names = NULL
)
} else{
if(is.null(x)) {
na_df <- data.frame(
"Start" = NA,
"Peak" = NA,
"End" = NA,
"Duration" = NA,
"Signal" = NA,
row.names = NULL
)
na_df <- na_df[-1,]
return(na_df)
}
data.frame(
"Start" = idx[x[, "Start"]],
"Peak" = idx[x[, "Peak"]],
"End" = idx[x[, "End"]],
"Duration" = x[, "Duration"],
"Signal" = x[, "Signal"],
row.names = NULL
)
}
}
add_peak <- function(ds, tstat, mat, minw) {
start <- ds[,"Start"]
end <- ds[,"End"]
np <- length(start)
signal <- peak <- numeric(np)
for(i in 1:np) {
ival <- start[i]:(end[i] - 1)
tstat_ival <- tstat[ival]
peak[i] <- start[i] + which.max(tstat_ival) - 1
diff_peak <- mat[minw + peak[i]] - mat[minw + start[i]]
signal[i] <- ifelse(diff_peak >= 0, "positive", "negative")
}
data.frame(
Start = start,
Peak = peak,
End = end,
Duration = ds[,"Duration"],
Signal = signal
)
}
# TODO ongoing cannot work in panel
add_ongoing <- function(ds, idx, cv) {
n <- get_n(cv)
end <- ds[,"End"]
if(is_logical(end, 0)) {
return(data.frame(ds, Ongoing = character(0)))
}else{
ongoing <- ifelse(is.na(end), TRUE, FALSE)
}
np <- length(end)
for(i in 1:np){
if(ongoing[i]) {
end[i] <- idx[n]
}
}
ds[,"End"] <- end
data.frame(ds, Ongoing = ongoing)
}
#' @export
print.ds_radf <- function(x, ...) {
if (length(x) == 0) {
return(invisible(NULL))
}
cli::cat_line()
cli::cat_rule(
left = glue("Datestamp (min_duration = {get_min_dur(x)})"),
right = get_method(x))
cli::cat_line()
print.listof(x)
}
#' Tidy a `ds_radf` object
#'
#' Summarizes information about `ds_radf` object.
#'
#' @param x An object of class `ds_radf`.
#' @param ... Further arguments passed to methods. Not used.
#'
#' @importFrom tibble as_tibble
#' @importFrom dplyr bind_rows
#' @importFrom rlang !!!
#'
#' @export
tidy.ds_radf <- function(x, ...) {
fct_lvls <- if (attr(x, "panel")) "panel" else series_names(x)
nlevels <- length(fct_lvls)
ds <- bind_rows(!!!x, .id = "id") %>%
as_tibble() %>%
mutate(id = factor(id, levels = fct_lvls))
ds
}
|
9a65a1c6a2c05fd2db6b769744f6212818bf4d7c
|
7cf81a9e617f6e77624ce45ef5b5c51a01cd0c28
|
/plot2.R
|
b051bf94af10913e71a395b01b696d4f0e334819
|
[] |
no_license
|
jhunnius/ExData_Plotting1
|
d15d934bd2d57d3a6a8f3252841c49b105b49abc
|
418e84713931dde7957254d6f54f5b8dfced3e77
|
refs/heads/master
| 2021-01-18T12:54:05.364633
| 2015-01-07T12:01:09
| 2015-01-07T12:01:09
| 28,904,903
| 0
| 0
| null | 2015-01-07T08:09:16
| 2015-01-07T08:09:16
| null |
UTF-8
|
R
| false
| false
| 1,872
|
r
|
plot2.R
|
## This function downloads the household power consumption data set from the internet
## and plots a line graph of global active power (in kilowatts) vs. time for the
## available data from 2007-02-01 to 2007-02-02.
plot2 <- function() {
## Set the locale to English for correct weekday abbreviations
Sys.setlocale("LC_TIME", "English")
## Download and extract the raw data file, if it doesn't exist
if(! file.exists("household_power_consumption.txt")) {
if(! file.exists("household_power_consumption.zip")) {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, destfile="household_power_consumption.zip", method="curl")
}
unzip("household_power_consumption.zip")
}
## Read the raw data
raw_data <- read.csv("household_power_consumption.txt", sep = ";", na.strings = "?")
## Keep all rows with a date between 2007-02-01 and 2007-02-02
relevant_data <- raw_data[raw_data$Date == "1/2/2007" | raw_data$Date == "2/2/2007",]
## Remove the raw data from memory
rm(raw_data)
## Convert the literal dates and times to the R date/time format
relevant_data$datetime <- as.POSIXct(paste(relevant_data$Date, relevant_data$Time), format="%d/%m/%Y %H:%M:%S")
## Remove the original date and time data from memory
relevant_data$Date <- NULL
relevant_data$Time <- NULL
## Plot the global active power data vs. time into the file "plot2.png"
png("plot2.png", width = 480, height = 480)
## Plot an empty graph first, then add the data lines
y_lab <- "Global Active Power (kilowatts)"
with(relevant_data, plot(datetime, Global_active_power, type = "n", xlab = "", ylab = y_lab))
with(relevant_data, lines(datetime, Global_active_power, type="l"))
## Close the output file
dev.off()
}
|
f498e1c36d96cbec046a166bf0f0f9df01dd8691
|
1c5957fd06781762a5d3d14c6e088b0788738ff4
|
/ui.R
|
aa88f4c1e5ccfd25bfc111d98603290f1fe6d88d
|
[] |
no_license
|
sor16/BayesianRCmodel
|
a2759930b89575329c683765ded2733bdeefbbc0
|
3abe5b797e1787d7c34074d69915a371c93ab909
|
refs/heads/master
| 2021-01-15T13:01:41.816611
| 2015-10-04T02:35:59
| 2015-10-04T02:35:59
| 38,426,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,102
|
r
|
ui.R
|
suppressPackageStartupMessages(library(shinydashboard))
suppressPackageStartupMessages(library(googleVis))
dashboardPage(
dashboardHeader(title='Bayesian Rating Curve'),
dashboardSidebar(
sidebarMenu(
menuItem("Instructions", tabName = "instructions", icon = icon("info-circle")),
menuItem("App", icon = icon("line-chart"), tabName = "app"),
menuItem("Theory", tabName = "theory", icon = icon("book")),
menuItem("Code", tabName = "code", icon = icon("code"))
)
),
dashboardBody(
tabItems(
tabItem(tabName="instructions",
h1("Instructions"),
tags$a(href="https://www.youtube.com/watch?v=NOyQywTcXaQ",
target="_blank",tags$img(src="instructions.png",height="356px",width="640px")),
br(),
br(),
tags$img(src="hvita.jpg", alt="Mountain View",width="640px",height="356px")
),
tabItem(tabName="app",
fluidRow(
column(width=8,
tabBox(
id = "tabset1",width=NULL,
tabPanel('Plots Model 1',uiOutput('plots1')),
tabPanel('Tables Model 1',
h4("Data fitted"),
htmlOutput('TableOfData1'),
h4("Fit for unobserved stages"),
htmlOutput('FitTable1'),
h4("95% posterior predictive lower for unobserved stages"),
htmlOutput('LowerTable1'),
h4("95% posterior predictive upper for unobserved stages"),
htmlOutput("UpperTable1")
),
tabPanel('Plots Model 2',uiOutput('plots2')),
tabPanel('Tables Model 2',
h4("Data fitted"),
htmlOutput('TableOfData2'),
h4("Fit for unobserved stages"),
htmlOutput('FitTable2'),
h4("95% posterior predictive lower for unobserved stages"),
htmlOutput('LowerTable2'),
h4("95% posterior predictive upper for unobserved stages"),
htmlOutput('UpperTable2')
)
),
tagList(
tags$head(
tags$link(rel="stylesheet", type="text/css",href="style.css"),
tags$script(type="text/javascript", src = "busy.js")
)
),
br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),
div(class = "busy",
img(src="progress.GIF")
)
),
column(width=4,
box(status="primary", width = NULL,
#background="light-blue",
title = "Controls",
tags$a(href = 'V316.txt', target="_blank",class = "btn", icon("download"), 'Download txt test file'),
br(),
tags$a(href = 'exceldata.xlsx', class = "btn", icon("download"), 'Download xlsx test file'),
br(),
br(),
selectInput("select", label = "Choose Country", choices = list("Iceland" = 'Iceland'),
selected = 'Iceland'),
textInput("name","Name of River"),
fileInput('file', 'Choose File'),
checkboxGroupInput("checkbox", label = "Output",
choices=list("Real scale"="raun","Logarithmic scale"="log",
"Real scale residuals"="leifraun","Standardized residuals"="leiflog") ,selected = "raun"),
checkboxInput("advanced", label="Advanced Settings", value=FALSE),
conditionalPanel(condition="input.advanced == true",
radioButtons('clickopts',label='Use click to:',choices=list('Zoom'='zoom','Add dummypoint'='dummy','Add forcepoint'='force','Exclude point'='exclude'),selected='zoom'),
sliderInput("includeDates", label = "Date Range", min = 1950, max = as.numeric(format(Sys.Date(), "%Y")),
value=c(1950,as.numeric(format(Sys.Date(), "%Y"))),sep=""),
checkboxInput("exclude", label="Exclude years from a certain period", value=FALSE),
conditionalPanel(condition="input.exclude == true",
dateRangeInput("excludeDates", label = "Date Range",start=Sys.Date()-1,end=Sys.Date()-1)),
textInput("Wmax",label="Maximum Stage (m)")
),
checkboxGroupInput("checkbox2", label = "Models",choices=list("Model1"='mdl1', "Model2"='mdl2'),selected="mdl1", inline=TRUE),
actionButton('reset',label='Reset'),
actionButton("go", label="Submit"),
# actionButton("back", label="Back"),
# actionButton("forward", label="Forward"),
br(),br(),br(),
downloadButton('downloadReport',label="Download Report"),
br(),br(),
downloadButton('downloadImages',label='Download Images'),
br(),br(),
downloadButton('xlsxexport',label='Export Tables as xlsx')
)
)
)
),
tabItem(tabName="theory",
includeMarkdown("Theory-2.md")
),
tabItem(tabName="code",
includeMarkdown("Code.md")
)
)
)
)
|
4a96bedc65b86ce5feb7dc1e57f20c7a7b056269
|
aa25c9ba3b36f795d768d924cddce3c9c1e2cdf5
|
/dash-simulator/evaluate-all.r
|
d31ee4be7040e2d3dc7def72a5b6d2f64d80ad3d
|
[] |
no_license
|
nus-mmsys/QUETRA
|
1b21aa296d394318b19fce9ee3acd54b29cddbbf
|
7b71dfec179655d00ff6e7cf5df7a12e71f93675
|
refs/heads/master
| 2021-01-02T08:53:58.867677
| 2017-07-29T13:46:36
| 2017-07-29T13:46:36
| 99,086,279
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,564
|
r
|
evaluate-all.r
|
#!/usr/bin/Rscript
library(ggplot2)
args <- commandArgs(trailingOnly=TRUE)
if (length(args) < 1) {
cat("\nusage: evaluate-all.r <benchmark file name> [<path to optimal results>]\n\n")
quit()
}
optimalpath <- ""
optdata <- data.frame()
if (length(args) == 2) {
optimalpath <- args[2]
}
benchdata <- read.csv(args[1], header = TRUE)
filename <- paste(args[1], '.pdf', sep="")
pdf(filename)
networkprof <- unique(benchdata[["profile"]])
videosample <- unique(benchdata[["sample"]])
# Filter by method
#methods <- c("gd", "kama", "qlast", "bola", "bb", "elastic")
for (p in networkprof) {
for (t in videosample) {
if (optimalpath != "") {
optdata <- read.csv(paste(optimalpath, "/", p, "-", t, ".csv", sep=""))
optdata <- optdata[c("bitrate", "change")]
}
benchsubdata <- subset(benchdata, profile==p & sample==t)
# Filter by method
#benchsubdata <- subset(benchsubdata, method %in% methods)
benchsubdata <- benchsubdata[c("bitrate", "change", "method")]
if (nrow(optdata) > 0) {
optdata <- cbind(optdata, method="optimal")
dt <- rbind(optdata, benchsubdata)
} else {
dt <- benchsubdata
}
plt <- ggplot()
plt <- plt + geom_point(data=dt,
aes(x=bitrate,
y=change,
color=method))
plt <- plt + ggtitle(paste(p, "-", t, sep=""))
print(plt)
}
}
cat(paste("The file", filename,"is successfully generated.\n"))
|
8c94ec5a7ab643579968ac38839b4bf5e1214a6b
|
4a80a4a1c06ccb28efd613098a7dd40df311de5e
|
/createNetAdults.R
|
51289916e8ead02625780bb0d3eef7bb2615c5c0
|
[] |
no_license
|
dnatheist/Ch5GenomicDiversityAndSpatialStructure
|
9e6c4ffa40fd033d5dc5229ff737042158d8f5ab
|
b74dccb88452d416106021d1526a61b076d22c13
|
refs/heads/master
| 2020-06-14T13:50:35.979108
| 2016-11-30T21:51:32
| 2016-11-30T21:51:32
| 75,175,716
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,626
|
r
|
createNetAdults.R
|
## @knitr createNetA
require(igraph)
relData<-output$relatedness[,c(2,3,estimator)]
#hist(relData[,3]) #to see how the data is distributed
#Makes links DF
lrelData<-relData
colnames(lrelData)[1] <- "from"
colnames(lrelData)[2] <- "to"
colnames(lrelData)[3] <- "weight"
lrelData$type<-"probRel"
relDataNoRows<-nrow(relData) #get number of rows and store for later use.
#create new DF for nodes
nrelData <- data.frame(matrix(ncol = 4, nrow = relDataNoRows))
colnames(nrelData) <- c("id", "name", "type", "label")
#Populate the DF with the 'to' nodes from original list
nrelData[,1]<-relData[,2]
nrelData[,2]<-relData[,2]
#add the first entry from the 'from' list (otherwise it wont get in as it is not
# in the 'to list')
newrow = c(relData[1,1],relData[1,1],NA,NA)
nrelData = rbind(nrelData,newrow)
# nrow(nrelData); length(unique(nrelData$id))
# nrow(lrelData); nrow(unique(lrelData[,c("from", "to")]))
nrelData<-data.frame(unique(nrelData[ , 1:4]))
#Apply types "adult" or "larvae" to fish based on id number.
tst<-ifelse(grepl("^A", nrelData$id),nrelData$type<-"adult",nrelData$type<-"larvae")
nrelData$type<-tst
rm(tst)
# Add data from larv to relatedNodes so that they are available for filtering on year etc.
larvNrel<-merge(nrelData,qslMetaLarvAndAdultsUnion, by="id")
#larvNrel<-larvNrel[,c(1:4,66)]
#Have a look at two data frames used to make net
# head(nrelData)
# head(larvNrel)
#Create the network data
net<-graph_from_data_frame(d=lrelData, vertices=nrelData, directed=FALSE)
fileName=paste("./outData/",main," net",sep="", collapse=" ")
save(net,file = fileName)# save net files for next chunk on.
|
20da39f26125587368ecfb74d5f0a6460a3b0ba4
|
6b915ba9db1de8d26bec39589c77fd5d225e1fce
|
/stanlba/hierarchical_noncentered_empiricalpriors_extend_with_init_vals_12c.R
|
4f7d13cd65928faa0f6b1f1f859a7fa292c2e36c
|
[
"Apache-2.0"
] |
permissive
|
bjsmith/reversallearning
|
c9179f8cbdfdbbd96405f603e63e4b13dfc233af
|
023304731d41c3109bacbfd49d4c850a92353978
|
refs/heads/master
| 2021-07-09T23:42:22.550367
| 2018-12-08T01:40:54
| 2018-12-08T01:40:54
| 100,329,149
| 0
| 2
| null | 2017-09-05T04:27:41
| 2017-08-15T02:21:25
|
R
|
UTF-8
|
R
| false
| false
| 10,571
|
r
|
hierarchical_noncentered_empiricalpriors_extend_with_init_vals_12c.R
|
library(LaplacesDemon)
#the informative priors model took 4.45 hours compared to 12.70 hours for the weakly informative priors model.
#so it may have sped up the model, although this is hard to judge because:
#(1) the server might be doing other things;
#(2) random chance for the very first starting values
print("extending in a 4th direction: including initial values")
source("stanlba/lba_rl_setup.R")
# source("stanlba/lba_rl_allsingles_get_results_summary.R")
#replace with
source("load_lba_rl_allsingles_resultsdata.R")
source("generate_lbarl_group_summary_stats.R")
#have to exclude improperly estimated runs.
improperly.estimated.runs<-unique(results.summary.dt[which(results.summary.dt$Rhat>1.05),.(sid,rid,motivation,FullRunId)])
lba_group_sstats<-generate_lbarl_group_summary_stats(results.summary.dt[!(FullRunId %in% improperly.estimated.runs$FullRunId)])
n_chains<-min(get_my_preferred_cores(),12)
cores_to_use <- n_chains
options(mc.cores = cores_to_use)
print(paste0("using ", cores_to_use, " cores."))
#Get a minimal amount of data to test a three level model.
multisubj_multirun_moresubs<-rawdata[subid %in% c(105:115) #& Motivation=="reward"
& reaction_time>0,
.(reaction_time,outcome,cue,choice,cor_res_Counterbalanced,subid,
ConsecSubId=as.integer(as.factor(as.character(subid))),
UniqueRunID=as.numeric(interaction(subid,runid,Motivation,drop = TRUE)))]
multisubj_multirun_allsubs<-rawdata[subid #%in% c(105:115) #& Motivation=="reward"
& reaction_time>0,
.(reaction_time,outcome,cue,choice,cor_res_Counterbalanced,subid,
ConsecSubId=as.integer(as.factor(as.character(subid))),
UniqueRunID=as.numeric(interaction(subid,runid,Motivation,drop = TRUE)))]
multisubj_multirun_Group1<-rawdata[SubjectGroup==1
& reaction_time>0,
.(reaction_time,outcome,cue,choice,cor_res_Counterbalanced,subid,
ConsecSubId=as.integer(as.factor(as.character(subid))),
UniqueRunID=as.numeric(interaction(subid,runid,Motivation,drop = TRUE)))]
source("bootstrap_smart_init_vals.R")
smart_init_vals<-bootstrap_smart_init_vals(n_samples = n_chains,
subid_set = sort(unique(multisubj_multirun_moresubs$subid)),
bootstrap_seed = c(1973449269))
#hmmm, before we can speedtest, we need to ensure the damn thing actually works.
bseed<-1236512756#set.seed(as.numeric(Sys.time())); sample.int(.Machine$integer.max-1000, 1)
run_model<-function(model_filename,model_description,filedir="",informative_priors=FALSE,
a_delta=0.9,data_to_use=multisubj_multirun_moresubs,warmup_iter=450,iter=500,
init_vals="auto"){
tstart<-Sys.time()
print(paste0("warmup_iter: ",warmup_iter))
print(paste0("iter: ",iter))
if(informative_priors){
data_to_pass<- list(
NUM_CHOICES=2,
A=0.01,
NUM_SUBJECTS=length(unique(data_to_use$subid)),
NUM_TRIALS=dim(data_to_use)[1],
NUM_RUNS=length(unique(data_to_use$UniqueRunID)),
run_subjid=data_to_use[,.(RunID=unique(UniqueRunID)),by=ConsecSubId] %>% .[order(RunID),ConsecSubId],
trial_runid=as.numeric(data_to_use$UniqueRunID),
response_time=data_to_use$reaction_time,
response=data_to_use$choice,
required_choice=data_to_use$cor_res_Counterbalanced,
cue=data_to_use$cue,
priors_alpha=lba_group_sstats$alpha_pr_mean,
priors_alpha_spread=lba_group_sstats$alpha_pr_var,
priors_alpha_sd_gamma=lba_group_sstats$alpha_sd_prior,
priors_alpha_run_sigma_gamma=lba_group_sstats$alpha_run_sigma_gamma,
# these priors could probably be set even narrower than this, but let's ease into it.
priors_lba_k=lba_group_sstats$k_pr_mean,
priors_lba_k_spread=lba_group_sstats$k_pr_var,
priors_lba_k_sd_gamma=lba_group_sstats$k_sd_prior*2,
priors_lba_k_run_sigma_gamma=lba_group_sstats$k_run_sigma_gamma,
priors_lba_tau=lba_group_sstats$tau_pr_mean,
priors_lba_tau_spread=lba_group_sstats$tau_pr_var,
priors_lba_tau_sd_gamma=lba_group_sstats$tau_sd_prior*2,
priors_lba_tau_run_sigma_gamma=lba_group_sstats$tau_run_sigma_gamma
)
}else{
data_to_pass<- list(
NUM_CHOICES=2,
A=0.01,
NUM_SUBJECTS=length(unique(data_to_use$subid)),
NUM_TRIALS=dim(data_to_use)[1],
NUM_RUNS=length(unique(data_to_use$UniqueRunID)),
run_subjid=data_to_use[,.(RunID=unique(UniqueRunID)),by=ConsecSubId] %>% .[order(RunID),ConsecSubId],
trial_runid=as.numeric(data_to_use$UniqueRunID),
response_time=data_to_use$reaction_time,
response=data_to_use$choice,
required_choice=data_to_use$cor_res_Counterbalanced,
cue=data_to_use$cue,
priors_alpha=-3,
priors_alpha_spread=3,
priors_alpha_sd_gamma=5,
priors_alpha_run_sigma_gamma=4,
# these priors could probably be set even narrower than this, but let's ease into it.
priors_lba_k=log(0.5),
priors_lba_k_spread=1,
priors_lba_k_sd_gamma=3,
priors_lba_k_run_sigma_gamma=2,
priors_lba_tau=log(0.5),
priors_lba_tau_spread=0.5,
priors_lba_tau_sd_gamma=2,
priors_lba_tau_run_sigma_gamma=1
)
}
PARID_alpha = 1;
PARID_lba_k = 2;
PARID_lba_tau = 3;
#we want to set initial values...
#we probably need to set initial values for all the subjects as well as the top level values? So let's try this out...
#####THINGS TO CHECK:
#####(1) How to define a vector or matrix of values? assume [M,N], M row, N columns
#####(2) Are these cauchys going to screw us? I think we need a flatter-tailed distribution. The truncated normals might end up being what we need.
#####(3) Do we have to define transformed parameters? A: Probably not.
source("init_vals_generate.R")
#
# get_init_vals_imitate_default<-function(){
# return(
# list(
# ################
# ######GROUP LEVEL
# subj_mu=c(runif(1,-2,2),
# runif(1,-2,2),
# runif(1,-2,2)),
# subj_sigma=c(exp(runif(1,-2,2)),
# exp(runif(1,-2,2)),
# exp(runif(1,-2,2))),
# run_sigma_gamma=c(exp(runif(1,-2,2)),
# exp(runif(1,-2,2)),
# exp(runif(1,-2,2))),
#
# #not sure that we really need to define transformed parameters, maybe only sampled parameters.
# ################
# ####SUBJECT LEVEL
# run_mu_var=matrix(runif(data_to_pass$NUM_SUBJECTS*3,-2,2),ncol=3),
#
# #NUM_SUBJECTS rows, NUM_PARAMS columns
# #[NUM_SUBJECTS,NUM_PARAMS];
# run_sigma=cbind(exp(runif(data_to_pass$NUM_SUBJECTS,-2,2)),
# exp(runif(data_to_pass$NUM_SUBJECTS,-2,2)),
# exp(runif(data_to_pass$NUM_SUBJECTS,-2,2))),
# #I think these cauchys are probably going to screw us!
# #no way we can start with these starting values.
#
# ################
# ######RUN LEVEL
# alpha_pr_var=runif(data_to_pass$NUM_RUNS,-2,2),
# k_pr_var=runif(data_to_pass$NUM_RUNS,-2,2),
# tau_pr_var=runif(data_to_pass$NUM_RUNS,-2,2)
# )
# )
#
# }
if(init_vals=="auto"){
init_method="random"
}else if (init_vals=="randomized"){
init_method=get_init_vals
}else if (init_vals=="bootstrapped"){
init_method=lapply(smart_init_vals,get_bootstrapped_init_vals)
}else
{stop("invalid value for init_vals argument.")}
print(init_method)
rmfit<-stan(file=paste0(stanfiledir,filedir,model_filename,".stan"),
#fit=fit_rl_lba_multi_subj_proto1,
data = data_to_pass,
warmup = warmup_iter,
iter = iter,
init=init_method,
chains = n_chains, #run as many chains as we have cores to run them, but no more than 12 necessary.
seed=bseed,
refresh=5,
control = list(max_treedepth = 13,adapt_delta=a_delta))
tend<-Sys.time()
print(tend-tstart)
file_save_name<-get_fit_desc(use_model = model_filename,descr = model_description,run=c(1,2),
model_rp_separately=TRUE,model_runs_separately=TRUE,
use_pain=FALSE,fastDebug=FALSE,fileSuffix="",
estimation_method=ESTIMATION_METHOD.MCMC,
bseed=bseed,warmup_iter = warmup_iter,
iterations=iter)
save(rmfit,file=file_save_name)
print(rmfit)
return(rmfit)
}
print("running...")
print("------------------------")
print("Running the informative priors model WITHOUT INITIAL VALUES SPECIFIED.")
fit_with_manual_init_vals <- run_model("lba_rl_multi_subj_7_3level_empiricalpriors_noncentered","10subs_auto_init_12c",
filedir="incremental/",informative_priors = TRUE,
init_vals="auto")
print("------------------------")
print("Running the informative priors model WITH RANDOM INITIAL VALUES SPECIFIED.")
fit_with_manual_init_vals <- run_model("lba_rl_multi_subj_7_3level_empiricalpriors_noncentered","10subs_randomized_init_12c",
filedir="incremental/",informative_priors = TRUE,
init_vals="randomized")
print("------------------------")
print("Running the informative priors model WITH BOOTSTRAPPED INITIAL VALUES SPECIFIED.")
fit_with_manual_init_vals <- run_model("lba_rl_multi_subj_7_3level_empiricalpriors_noncentered","10subs_bootstrapped_init_12c",
filedir="incremental/",informative_priors = TRUE,
init_vals="bootstrapped")
#what about the exp'd values.
#save(fit_normalsds,fit_widevariablecauchys,fit_base,file=paste0(dd, "Fits/hierarchical_stanforum_suggestion_results.RData"))
#save(fit_informative_priors,fit_weakly_informative_priors,file=paste0(dd, "Fits/informative_priors_test_allsubs_450.RData"))
|
7301537b0d14bc83ce1df07f4e330e5a66ca5c29
|
88592a2a30f76e3276f55614097cd5b28667f11d
|
/main.R
|
3ef4a9b46e0bfed041e9a472b1e1cfcf3bca7758
|
[] |
no_license
|
krabby-k/datasciencecoursera
|
a4907f0b62c77e966d8a23947d2d84c3f5d8642e
|
089007a8eb4be5080cc886313acbcd4ea6619bb4
|
refs/heads/master
| 2023-01-24T08:26:30.326151
| 2020-12-09T08:11:52
| 2020-12-09T08:11:52
| 319,884,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 84
|
r
|
main.R
|
# Title : TODO
# Objective : TODO
# Created by: brian
# Created on: 11/18/2020
|
4545748cae69abfd4a2d3f756b1a86ffba3849d6
|
2f15b2dc16de0471e7bee43f6739b6ad8522c81d
|
/tests/testthat/test-set_dimension_type_conformed.R
|
e997c5768d756f2a4e68f9550af37acff741cdbd
|
[
"MIT"
] |
permissive
|
billster45/starschemar
|
45566be916c95778727a3add3239143d52796aa9
|
5f7e0201494a36f4833f320e4b9535ad02b9bdc1
|
refs/heads/master
| 2022-12-20T13:45:06.773852
| 2020-09-26T03:44:12
| 2020-09-26T03:44:12
| 298,796,838
| 1
| 0
|
NOASSERTION
| 2020-09-26T11:10:30
| 2020-09-26T11:10:30
| null |
UTF-8
|
R
| false
| false
| 225
|
r
|
test-set_dimension_type_conformed.R
|
context("test set_dimension_type_conformed")
test_that("set_dimension_type_conformed works", {
d <- st_mrs_age_test$dimension$when
d <- set_dimension_type_conformed(d)
expect_equal(is_conformed_dimension(d), TRUE)
})
|
2947199a2240bd41a91324f34407d6a74bca44de
|
399e5a660591a8906796b8cb6af5cdd2d2dc92fc
|
/R/max_norm.R
|
69a48a9c3141d43925ac37cd9925d767871c01be
|
[] |
no_license
|
leontavares/BRDF
|
cc3b9d2c7833fc9d996a4126fb945744d4ebb4ec
|
ba1f4bb9be42db440db3cd4198f5fb34212d606f
|
refs/heads/master
| 2022-01-13T17:37:18.203575
| 2019-07-12T03:21:24
| 2019-07-12T03:21:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,300
|
r
|
max_norm.R
|
#' Compute the relative proximity of a solution in relation to an uncertainty.
#'
#' \code{max_norm} Determines where a norm (residual or solution) is within a certain tolerance, iteratively narrowing down a list of lambda values.
#'
#' @param norm_result Result of doing the gsvd_norm function
#' @param uncertainty Target level of the norm we want to acheive
#'
#' @return Data frame that contains the updated lambda min and max values
#' @examples
#'
#' # To be filled in later
#' @export
max_norm<-function(norm_result,uncertainty) {
# Return the max_value and lambda associated with the residual
compare_norm<-norm_result %>%
filter(norm=='residual') %>% # Filter out residuals
mutate(result_diff = result-uncertainty$sigma) # set up a difference vector
print(compare_norm)
test_idx <- which(compare_norm$result_diff>0)
if (length(test_idx)>0) {
min_idx = min(test_idx)
} else {min_idx <- length(compare_norm$result_diff) }
# Now make
end_idx<-max(1,min_idx)
start_idx <- max(1,min_idx-1)
print(min_idx)
print(c(start_idx,end_idx))
lambda_return <- compare_norm %>%
select(lambda,result,result_diff) %>%
slice(c(start_idx,end_idx)) %>%
mutate(tol_check = log10(abs(result_diff))) %>%
select(-result_diff)
return(lambda_return)
}
|
c2931219ff9a07b6edbf735f38ad7914854034fd
|
983585773a5f29526e764ff9b35c9a00bbe993c4
|
/ellipse.R
|
9fe93850902d6b580c06246d92d365526906effa
|
[] |
no_license
|
arjunchandrashekara/R-codes
|
3361f23b5867e4130df8a327ba1c0931cb73c2d3
|
f9fcdd275c77c639900823ab6a22c6234473857f
|
refs/heads/master
| 2023-03-26T09:05:51.945927
| 2021-03-29T13:48:19
| 2021-03-29T13:48:19
| 294,035,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,738
|
r
|
ellipse.R
|
library(shiny)
library(shape)
ui <- fluidPage(
actionButton("create","create 4 random numbers"),
wellPanel(tableOutput("Random_Numbers")),
fluidRow(column(3,"1"),column(8,plotOutput("A"))),
fluidRow(column(3,"2"),column(8,plotOutput("B"))),
fluidRow(column(3,"3"),column(8,plotOutput("C"))),
fluidRow(column(3,"4"),column(8,plotOutput("D")))
)
server <- function(input,output){
data <- eventReactive(input$create,
{
random_numbers <- runif(4,0,1)
})
output$Random_Numbers <- renderTable({
table<-data()
table
})
output$A <- renderPlot({
t<-data()
if (t[1] < 0.25)
{
color="red"
}
else if (t[1]<= 0.5 && t[1] > 0.25)
{
color="blue"
}
else if (t[1]<= 0.75 && t[1] > 0.5)
{
color="black"
}
else
{
color="green"
}
emptyplot(xlim = c(-2, 2), ylim = c(-2, 2))
filledellipse(rx1 = 1, ry1 = 0.5, mid = c(1, 1) ,
col = shadepalette(endcol = color))
})
output$B <- renderPlot({
t<-data()
if (t[2] < 0.25)
{
color="red"
}
else if (t[2]<= 0.5 && t[2] > 0.25)
{
color="blue"
}
else if (t[2]<= 0.75 && t[2] > 0.5)
{
color="black"
}
else
{
color="green"
}
emptyplot(xlim = c(-2, 2), ylim = c(-2, 2))
filledellipse(rx1 = 1, ry1 = 0.5, mid = c(1, 1) ,
col = shadepalette(endcol = color))
})
output$C <- renderPlot({
t<-data()
if (t[3] < 0.25)
{
color="red"
}
else if (t[3]<= 0.5 && t[3] > 0.25)
{
color="blue"
}
else if (t[3]<= 0.75 && t[3] > 0.5)
{
color="black"
}
else
{
color="green"
}
emptyplot(xlim = c(-2, 2), ylim = c(-2, 2))
filledellipse(rx1 = 1, ry1 = 0.5, mid = c(1, 1) ,
col = shadepalette(endcol = color))
})
output$D <- renderPlot({
t<-data()
if (t[4] < 0.25)
{
color="red"
}
else if (t[4]<= 0.5 && t[4] > 0.25)
{
color="blue"
}
else if (t[4]< 0.75 && t[4] > 0.5)
{
color="black"
}
else
{
color="green"
}
emptyplot(xlim = c(-2, 2), ylim = c(-2, 2))
filledellipse(rx1 = 1, ry1 = 0.5, mid = c(1, 1) ,
col = shadepalette(endcol = color))
})
}
shinyApp(ui=ui,server = server)
|
78fed9519c2b33877b86563e0f377cae17e53ce9
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/FRESA.CAD/man/reportEquivalentVariables.Rd
|
74bc7dded56dcff18022da6400feaaeaa3be802b
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,877
|
rd
|
reportEquivalentVariables.Rd
|
\name{reportEquivalentVariables}
\alias{reportEquivalentVariables}
\title{Report the set of variables that will perform an equivalent IDI discriminant function}
\description{
Given a model, this function will report a data frame with all the variables that may be interchanged in the model without affecting its classification performance.
For each variable in the model, this function will loop all candidate variables and report all of which result in an equivalent or better zIDI than the original model.
}
\usage{
reportEquivalentVariables(object,
pvalue = 0.05,
data,
variableList,
Outcome = "Class",
timeOutcome=NULL,
type = c("LOGIT", "LM", "COX"),
description = ".",
method="BH",
osize=0,
fitFRESA=TRUE)
}
\arguments{
\item{object}{
An object of class \code{lm}, \code{glm}, or \code{coxph} containing the model to be analyzed
}
\item{pvalue}{
The maximum \emph{p}-value, associated to the IDI , allowed for a pair of variables to be considered equivalent
}
\item{data}{
A data frame where all variables are stored in different columns
}
\item{variableList}{
A data frame with two columns. The first one must have the names of the candidate variables and the other one the description of such variables
}
\item{Outcome}{
The name of the column in \code{data} that stores the variable to be predicted by the model
}
\item{timeOutcome}{
The name of the column in \code{data} that stores the time to event
}
\item{type}{
Fit type: Logistic ("LOGIT"), linear ("LM"), or Cox proportional hazards ("COX")
}
\item{description}{
The name of the column in \code{variableList} that stores the variable description
}
\item{method}{
The method used by the p-value adjustment algorithm
}
\item{osize}{
The number of features used for p-value adjustment
}
\item{fitFRESA}{
if TRUE it will use the cpp based fitting method
}
}
\value{
\item{pvalueList}{
A list with all the unadjusted p-values of the equivalent features per model variable
}
\item{equivalentMatrix}{
A data frame with three columns. The first column is the original variable of the model.
The second column lists all variables that, if interchanged, will not statistically affect the performance of the model.
The third column lists the corresponding \emph{z}-scores of the IDI for each equivalent variable.
}
\item{formulaList}{
a character vector with all the equivalent formulas
}
\item{equivalentModel}{
a bagged model that used all the equivalent formulas. The model size is limited by the number of observations
}
}
\author{Jose G. Tamez-Pena}
\keyword{Model_Inspection}
|
2208fa5a158a370dcdbd845cf0237d5a9ee71913
|
1c17c81a2f3014e861fbfe51ade2a9c351e11a12
|
/Boxplot_6_immune cells_gut_116.R
|
34a25fe04e38874afd468f8e540281f17f91b1f6
|
[] |
no_license
|
Bennylikescoding/existing_R_code
|
818c36508dadfc8f10f3132ac983a2bef0ecac8a
|
ae50e9061e2436547e84234269cf64c859bfbf36
|
refs/heads/master
| 2020-11-26T03:48:12.355220
| 2020-03-24T02:50:33
| 2020-03-24T02:50:33
| 228,956,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,400
|
r
|
Boxplot_6_immune cells_gut_116.R
|
# 0. data type
# project order_No. group sample T CD4
# AD-116 1 WT B3527 65 13
# AD-116 1 WT B3528 75.4 10.1
# AD-116 1 WT B3529 66.7 12
# AD-116 1 WT B3530 61.6 8.33
# AD-116 1 WT B3531 60.9 12.2
# AD-116 1 WT B3552 60.3 10.4
# AD-116 1 WT B3553 43.6 0.33
# AD-116 1 WT B3554 41.8 13.2
# AD-116 1 WT B3555 56.5 14.2
# AD-116 1 WT B3556 54.6 10.4
# AD-116 2 Cocage_WT X13586 74.7 12.4
# AD-116 2 Cocage_WT B1675 59.5 7.84
# AD-116 2 Cocage_WT B1683 55.1 11.2
# AD-116 2 Cocage_WT B1693 63.9 7.4
# AD-116 2 Cocage_WT B1700 56.2 9.62
# AD-116 2 Cocage_WT B1908 72.6 10.2
# AD-116 2 Cocage_WT B1909 78.5 8.77
# AD-116 2 Cocage_WT B1910 67.5 10.1
# AD-116 2 Cocage_WT B1975 69.4 9.47
# AD-116 2 Cocage_WT B4194 78.6 10.3
# AD-116 3 Tg B1488 61.4 13.8
# AD-116 3 Tg B1495 53.6 9.38
# 1.import library
library(ggplot2)
library(ggsignif)
library(dplyr)
library(grid)
library(gridExtra)
# 2.import files
file_path<-choose.files()
bxplot_df<-read.csv(file_path)
# 3.set variables
n_col_value = 5
grid_graph_title = "gut immune cells (116)"
# 4.select df
bxplot_df_selected <- filter(bxplot_df, group == "WT" | group == "Tg" | group =="Tg_971_100mpk")
# 5.start ploting
colname <- colnames(bxplot_df[c(n_col_value:ncol(bxplot_df))])
out <- NULL
#c(seq_along(colname))
for (i in seq_along(colname)){
#http://www.sthda.com/english/wiki/ggplot2-facet-split-a-plot-into-a-matrix-of-panels
p<-ggplot(bxplot_df_selected,aes_string(x="group", y=colname[i],
group="group"))+
geom_boxplot()+
geom_dotplot(binaxis='y', stackdir='center',dotsize=0.5,color="black")+
stat_summary(fun.y=mean, geom="point", shape=20, size=3, color="red", fill="red")+
stat_summary(fun.y=mean, geom="line", size=1, color="red", aes(group=1))+
scale_x_discrete(name ="Groups",
limits=c("WT","Tg","Tg_971_100mpk"))
out[[i]] <- p
}
##GROUP 1 PLOTING...
grid.arrange(out[[1]], out[[2]], out[[3]], out[[4]], out[[5]],
out[[6]], out[[7]], out[[8]], out[[9]], out[[10]],
out[[11]], out[[12]], out[[13]], out[[14]], out[[15]],
out[[16]], out[[17]], out[[18]], out[[19]], out[[20]], out[[21]],
ncol = 4, nrow = 6,
top = textGrob(grid_graph_title, gp=gpar(fontsize=15,font=1)))
###-----END OF BOXPLOT---###
|
2517df0d7c31f5cca070aaaf5aaf8af8b454b17f
|
8d0eb323a84a89fbb81045c492c8c7ed72c7cc84
|
/RNA-seq-without-ref/step5-venn-compare.R
|
5632ae57bf8ee6fc0d708618007310cdbef7044b
|
[] |
no_license
|
reedliu/NGS-codes
|
e23157e1bf4a244b9e1ec0fc5b356fe562ea026d
|
d31dd42874789e9f0bf6979e47a7cd804f9bd5f4
|
refs/heads/master
| 2020-04-12T01:05:37.111875
| 2019-11-10T13:58:51
| 2019-11-10T13:58:51
| 162,221,667
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,949
|
r
|
step5-venn-compare.R
|
### ---------------
###
### Creator: Yunze Liu (Reed Liu)
### Date: 2019-3-3
### Email: jieandze1314@gmail.com
### Blog: https://reedliu.github.io/
### CAAS/AGIS/SDAU
###
### ---------------
rm(list = ls())
options(stringsAsFactors = F)
# for salmon
load('DESeq2-salmon.Rdata')
load('edgeR-salmon.Rdata')
load('method_compare.Rdata')
# for RSEM
load('DESeq2-RSEM.Rdata')
load('edgeR-RSEM.Rdata')
####################################
## get all genes and FC to check correlation
# 帮助判断不同方法是否正确(理论上两种方法得到的结果应该在一条直线上)
####################################
# for DU vs SS
DU_SS_deseq <- data.frame(gene=rownames(salmon_DESeq2_DU_SS),
DU_SS_Deseq=salmon_DESeq2_DU_SS$log2FoldChange)
DU_SS_edgeR <- data.frame(gene=rownames(salmon_edgeR_DU_SS),
DU_SS_edgeR=salmon_edgeR_DU_SS$logFC)
test1 <- merge(DU_SS_deseq,DU_SS_edgeR,by="gene")
plot(test1[2:3])
# for LFC2 vs SS
LFC2_SS_Deseq <- data.frame(gene=rownames(salmon_DESeq2_LFC2_SS),
LFC2_SS_Deseq=salmon_DESeq2_LFC2_SS$log2FoldChange)
LFC2_SS_edgeR <- data.frame(gene=rownames(salmon_edgeR_LFC2_SS),
LFC2_SS_edgeR=salmon_edgeR_LFC2_SS$logFC)
test2 <- merge(LFC2_SS_Deseq,LFC2_SS_edgeR,by="gene")
plot(test2[2:3])
# for RS vs SS
RS_SS_Deseq <- data.frame(gene=rownames(salmon_DESeq2_RS_SS),
RS_SS_Deseq=salmon_DESeq2_RS_SS$log2FoldChange)
RS_SS_edgeR <- data.frame(gene=rownames(salmon_edgeR_RS_SS),
RS_SS_edgeR=salmon_edgeR_RS_SS$logFC)
test3 <- merge(RS_SS_Deseq,RS_SS_edgeR,by="gene")
plot(test3[2:3])
# for XXCAD vs SS
XXCAD_SS_Deseq <- data.frame(gene=rownames(salmon_DESeq2_XXCAD_SS),
XXCAD_SS_Deseq=salmon_DESeq2_XXCAD_SS$log2FoldChange)
XXCAD_SS_edgeR <- data.frame(gene=rownames(salmon_edgeR_XXCAD_SS),
XXCAD_SS_edgeR=salmon_edgeR_XXCAD_SS$logFC)
test4 <- merge(XXCAD_SS_Deseq,XXCAD_SS_edgeR,by="gene")
plot(test4[2:3])
save(test1,test2,test3,test4,file="method_compare.Rdata")
# 组合准确性分析结果
par(mfrow=c(2,2))
plot(test1[2:3])
plot(test2[2:3])
plot(test3[2:3])
plot(test4[2:3])
####################################
## get UP and DOWN genes for each method to draw Venn plot
# 分别得到不同方法的上调、下调基因,然后分别做Venn图
####################################
# for RSEM DESeq2
DEG = RSEM_DESeq2_DU_SS
DEG = RSEM_DESeq2_LFC2_SS
DEG = RSEM_DESeq2_RS_SS
DEG = RSEM_DESeq2_XXCAD_SS
# for salmon DESeq2
DEG = salmon_edgeR_DU_SS
DEG = salmon_edgeR_LFC2_SS
DEG = salmon_edgeR_RS_SS
DEG = salmon_edgeR_XXCAD_SS
logFC_cutoff <- with(DEG,mean(abs(logFC)) + 2*sd(abs(logFC)) )
DEG$change = as.factor(ifelse(DEG$PValue < 0.05 & abs(DEG$logFC) > logFC_cutoff,
ifelse(DEG$logFC > logFC_cutoff ,'UP','DOWN'),'NOT')
)
# for DU
DU_up <- rownames(DEG[DEG$change =='UP',])
DU_down <- rownames(DEG[DEG$change =='DOWN',])
# for LFC2
LFC2_up <- rownames(DEG[DEG$change =='UP',])
LFC2_down <- rownames(DEG[DEG$change =='DOWN',])
# for RS
RS_up <- rownames(DEG[DEG$change =='UP',])
RS_down <- rownames(DEG[DEG$change =='DOWN',])
# for XXCAD
XXCAD_up <- rownames(DEG[DEG$change =='UP',])
XXCAD_down <- rownames(DEG[DEG$change =='DOWN',])
save(DU_up,DU_down,LFC2_up,LFC2_down,XXCAD_up,
XXCAD_down,RS_up,RS_down,file = "salmon_edgeR_DEGs.Rdata")
## Venn plot
if(!require(VennDiagram))install.packages('VennDiagram')
library (VennDiagram)
if(T){
# first look at UP comparison
venn.diagram(x= list(DESeq2_DU_UP = DU_up,
DESeq2_LFC2_UP = LFC2_up,
DESeq2_RS_UP = RS_up,
DESeq2_XXCAD_UP = XXCAD_up),
filename = "DESeq2-RSEM-compare_UP.png",
height = 800, width = 1200,
resolution =500,
imagetype="png",
col="transparent",
fill=c("green","darkorchid1","yellow", "orange"),
alpha = 0.50,
cex=0.3,
cat.cex=0.25)
# DOWN comparison
venn.diagram(x= list(DESeq2_DU_DOWN = DU_down,
DESeq2_LFC2_DOWN = LFC2_down,
DESeq2_RS_DOWN = RS_down,
DESeq2_XXCAD_DOWN = XXCAD_down),
filename = "DESeq2-RSEM-compare_DOWN.png",
height = 900, width = 1400,
resolution =500,
imagetype="png",
col="transparent",
fill=c("green","darkorchid1","yellow", "orange"),
alpha = 0.50,
cex=0.3,
cat.cex=0.25)
}
##############################################
### get edgeR UP and DOWN genes
# for RSEM edgeR
DEG = RSEM_edgeR_DU_SS
DEG = RSEM_edgeR_LFC2_SS
DEG = RSEM_edgeR_RS_SS
DEG = RSEM_edgeR_XXCAD_SS
logFC_cutoff <- with(DEG,mean(abs(logFC)) + 2*sd(abs(logFC)) )
DEG$change = as.factor(ifelse(DEG$PValue < 0.05 & abs(DEG$logFC) > logFC_cutoff,
ifelse(DEG$logFC > logFC_cutoff ,'UP','DOWN'),'NOT')
)
# for DU
DU_up <- rownames(DEG[DEG$change =='UP',])
DU_down <- rownames(DEG[DEG$change =='DOWN',])
# for LFC2
LFC2_up <- rownames(DEG[DEG$change =='UP',])
LFC2_down <- rownames(DEG[DEG$change =='DOWN',])
# for RS
RS_up <- rownames(DEG[DEG$change =='UP',])
RS_down <- rownames(DEG[DEG$change =='DOWN',])
# for XXCAD
XXCAD_up <- rownames(DEG[DEG$change =='UP',])
XXCAD_down <- rownames(DEG[DEG$change =='DOWN',])
## Venn plot
if(!require(VennDiagram))install.packages('VennDiagram')
library (VennDiagram)
if(T){
# first look at UP comparison
venn.diagram(x= list(edegR_DU_UP = DU_up,
edegR_LFC2_UP = LFC2_up,
edegR_RS_UP = RS_up,
edegR_XXCAD_UP = XXCAD_up),
filename = "edegR-RSEM-compare_UP.png",
height = 800, width = 1200,
resolution =500,
imagetype="png",
col="transparent",
fill=c("green","darkorchid1","yellow", "orange"),
alpha = 0.50,
cex=0.3,
cat.cex=0.25)
# DOWN comparison
venn.diagram(x= list(edegR_DU_DOWN = DU_down,
edegR_LFC2_DOWN = LFC2_down,
edegR_RS_DOWN = RS_down,
edegR_XXCAD_DOWN = XXCAD_down),
filename = "edegR-RSEM-compare_DOWN.png",
height = 900, width = 1400,
resolution =500,
imagetype="png",
col="transparent",
fill=c("green","darkorchid1","yellow", "orange"),
alpha = 0.50,
cex=0.3,
cat.cex=0.25)
}
|
290895470ec220e356173258ce7871b2a6de8c4d
|
fb0cbf6db81ee5ff6dfe73b618e2d649251b8b77
|
/Model2.R
|
4a4f07b3ce2f30cf8e0cde679d9a90e80a6b217a
|
[] |
no_license
|
PGC-PTSD-EWAS/PGC-PTSD-Longitudinal-Analysis
|
6a5841a9a2a33388959851e555a608315611b677
|
8ff7f5832bccac3343f5fefc3ee1d408885edeee
|
refs/heads/main
| 2023-08-12T00:50:22.291579
| 2021-10-11T14:49:57
| 2021-10-11T14:49:57
| 416,046,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,087
|
r
|
Model2.R
|
################################################################################
# Model 2: Linear model to evaluate CpGs associated with change in PTSS in each cohort
################################################################################
library(lmerTest)
library(data.table)
# Scale pheno
pheno <- read.csv(" ")
pheno$PCL_SUM_01scaled <- (pheno$PCL_SUM-min(pheno$PCL_SUM))/(max(pheno$PCL_SUM)-min(pheno$PCL_SUM))
# Load beta matrix
beta.norm<-fread(" ",data.table = F) # loading beta values, CpG X Sample
rownames(beta.norm)<-beta.norm$V1
beta.norm<-beta.norm[,-1]
# Define Variables
ageVar<-"AgeDiff" # Age variable = Age.post - Age.pre
cellTypes<-c("CD8T.Epic.diff","CD4T.Epic.diff","NK.Epic.diff","Bcell.Epic.diff","Mono.Epic.diff") # Cell type variables, difference in cell proportions between two time points
pcs<-c("Comp.2","Comp.3") # Ancestry PC variables
ptsdVar<-" " # PTSD variable = PTSS.post - PTSS.pre
studyID<-" " # E.g. "MRS","Prismo"
pheno$preID<-as.character(pheno$preID)
pheno$postID<-as.character(pheno$postID)
all(pheno$preID%in%colnames(beta.norm)) # Should be TRUE
all(pheno$postID%in%colnames(beta.norm)) # Should be TRUE
# Converting to M-values
range(beta.norm, na.rm=T)
# Changing beta values of 0 to 0.0001
if(min(beta.norm, na.rm=T)==0){
beta.norm[which(beta.norm==0)]<-0.0001
}
# Changing beta values of 1 to 0.9999
if(max(beta.norm, na.rm=T)==1){
beta.norm[which(beta.norm==1)]<-0.9999
}
range(beta.norm, na.rm=T)
sum(is.na(beta.norm))
# Convert to Mvalues using log2
beta.norm<-log2(beta.norm/(1-beta.norm)) # log transforming
range(beta.norm, na.rm=T)
formula<-as.formula(paste("outCpG~",
paste(c("expCpG", ptsdVar, ageVar, cellTypes, pcs), collapse="+"),
sep=""))
vars<-c("(Intercept)", "expCpG", ptsdVar, ageVar, cellTypes, pcs)
resultsBeta<-matrix(nrow=nrow(beta.norm), ncol=length(vars))
rownames(resultsBeta)<-rownames(beta.norm)
colnames(resultsBeta)<-vars
resultsSE<-resultsT<-resultsP<-resultsBeta
resultsDF<-matrix(nrow = nrow(beta.norm),ncol = 1)
rownames(resultsDF)<-rownames(beta.norm)
colnames(resultsDF)<-"df"
cpgs<-rownames(beta.norm)
errorProbes<-NULL
start<-proc.time()[3]
for(ii in 1:length(cpgs)){
tempPheno<-pheno
outCpG<-t(beta.norm[cpgs[ii], pheno$postID])
expCpG<-t(beta.norm[cpgs[ii], pheno$preID])
fit<-lm(formula, data=tempPheno)
res<-coef(summary(fit))
resultsBeta[ii,]<-res[, "Estimate"]
resultsSE[ii,]<-res[, "Std. Error"]
resultsT[ii,]<-res[, "t value"]
resultsP[ii,]<-res[, "Pr(>|t|)"]
resultsDF[ii,]<-fit$df.residual
if(ii%%100==0){print(ii)}
}
end<-proc.time()[3]
end-start
beta<-as.data.frame(resultsBeta)
pval<-as.data.frame(resultsP)
se<-as.data.frame(resultsSE)
t<-as.data.frame(resultsT)
final<-as.data.frame(cbind(beta[ptsdVar],se[ptsdVar],t[ptsdVar],pval[ptsdVar],resultsDF))
rownames(final)<-rownames(beta)
colnames(final)<-c("BETA","SE","t","pval","df")
write.csv(final,file = paste0(studyID,"_Rutten_noob_allPreAsControls_PCL01scaled_covar_age_epicCellTypes_methPC.csv"),quote = F,row.names = F)
|
4daa58f9a25c9ba13068a7a08ce220a28189261d
|
cc9707494002db22ce5797b63326602a99009e9a
|
/R/sg.R
|
ecda8b4a0a5183eb2a5e49fd505ada11e275885b
|
[
"MIT"
] |
permissive
|
CIP-RIU/sbformula
|
54fae73fc69c9009ed74d8d6221ccffa907a5e58
|
f22517e474411910c890083103f07346ab79c8a8
|
refs/heads/master
| 2021-06-21T07:55:37.290766
| 2017-08-18T21:34:01
| 2017-08-18T21:34:01
| 33,208,831
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 933
|
r
|
sg.R
|
#'Formula for calculating the Specific gravity (SG)
#'
#'@param twa Tuber weight in air
#'@param tww Tuber weight in water
#'@return Return the specific gravity
#'@author Omar Benites
#'@details Formula for calculating the specific gravity
#'@references International Potato Center (CIP).2014: Procedures for Standard Evaluation and Data Management of Advanced Potato Clones
#'Module 2. Healthy Tuber Yield Trials International Cooperators Guide.
#'@keywords potato, agronomy,post-harvest,quantitative-continuous,yield,late blight,plant
#'@family potato,yield,late blight,post-harvest,plant
#'@export
#'
sg <- function(twa,tww){
if(missing(twa)){
#stop("Please enter the Tuber weight in air 'twa'")
sg <- NA
}
if(missing(tww)){
#stop("Please enter the Tuber weight in water 'tww'")
sg <- NA
}
sg <- twa/apply(cbind(twa,-tww),1,sbsum)
return(sg)
}
|
1ffc98816f44b9b73eb9b999a7d29a5b50973b87
|
1ab3fe36ec133cb90fcfc4071c15b37edc1d1c79
|
/Seccion 06 - Regresion/078 - Regresión Lineal.R
|
bfe33693ca913878012e794143562d7e3950dc05
|
[] |
no_license
|
achiola/r
|
419d182bd6ec546af4ef0dc10b7b59678ada561b
|
08a5c2b78b58193d7fdbbf0fa612c52ec21df925
|
refs/heads/master
| 2020-07-06T14:24:46.427229
| 2020-01-07T21:43:38
| 2020-01-07T21:43:38
| 203,048,278
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,698
|
r
|
078 - Regresión Lineal.R
|
library(caret)
auto <-read.csv("Seccion 06 - Regresion/auto-mpg.csv")
auto$cylinders <- factor(auto$cylinders,
levels = c(3,4, 5,6,8),
labels = c("3 cyl", "4 cyl", "5 cyl", "6 cyl", "8 cyl"))
set.seed(2018)
t.id <- createDataPartition(auto$mpg,
p = 0.7,
list = F)
names(auto)
mod <- lm(mpg ~ .,
data = auto[t.id, -c(1, 8, 9)])
mod
# mpg = 37.284202 +
# 6.231475 * 4Cyl +
# 8.248195 * 5Cyl +
# 2.131026 * 6Cyl +
# 4.568171 * 8Cyl +
# 0.002245 * displacement +
# -0.057543 * horsepower +
# -0.00466 * weight +
# 0.050745 * acceleration
summary(mod)
boxplot(mod$residuals)
sqrt(mean((mod$fitted.values - auto[t.id,]$mpg)^2))
pred <- predict(mod, auto[-t.id, -c(1,8,9)])
sqrt(mean((pred - auto[-t.id,]$mpg)^2))
auto2 <- auto[-t.id,]
auto2$pred <- pred
par(mfrow=c(2,2))
plot(mod)
#############################################################
# 80. Opciones para las fórmulas de un modelo lineal
#############################################################
#categiria de referencia
auto <- within(auto, cylinders <- relevel(cylinders, ref="4 cyl"))
mod <- lm(mpg ~ .,
data = auto[t.id, -c(1, 8, 9)])
mod
pred <- predict(mod, auto[-t.id, -c(1,8,9)])
sqrt(mean((pred - auto[-t.id,]$mpg)^2))
plot(mod)
auto2$pred2 <- pred
#############################################################
# 81. La función step para simplificar el modelo lineal
#############################################################
library(MASS)
mod
summary(mod)
step.model <- stepAIC(mod, direction = "backward")
summary(step.model)
|
290a3c1df2798fa4ae54b1929db72ea71fabc681
|
5f7bddcf382d0ef974ebb3748e79b1b9eb30cbfb
|
/Tutorials/tukey_code.R
|
3f96761b2efa487ea64c6505f6aa559835b871d5
|
[] |
no_license
|
vmoprojs/GeoModels-page
|
b1a1dcd4cbf66c1fca901156bccc73d9b2c5c026
|
3832d083d77504148e051a8d76ead5da6be6a478
|
refs/heads/master
| 2023-09-03T15:46:07.570852
| 2023-08-29T03:45:55
| 2023-08-29T03:45:55
| 122,678,188
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,106
|
r
|
tukey_code.R
|
rm(list=ls());
require(GeoModels);
require(fields);
model="Tukeyh"; # model name in the GeoModels package set.seed(989);
set.seed(818);
N=1500;
coords=cbind(runif(N),runif(N));
plot(coords ,pch=20,xlab="",ylab="");
corrmodel = "Matern";
scale = 0.2/3;
smooth =0.5;
nugget=0;
tail=0.1; # tail parameter
sill= 1; # variance parameter
mean = 0.5; mean1= -1 # regression paramteres
a0=rep(1,N);a1=runif(N,-1,1)
X=cbind(a0,a1); ## regression matrix
# simulation
param=list(nugget=nugget,mean=mean,mean1=mean1, scale=scale, smooth=smooth, sill=sill,tail=tail);
data <- GeoSim(coordx=coords ,corrmodel=corrmodel , param=param ,model=model ,X=X)$data;
### estimation
optimizer="nlminb";
fixed1<-list(nugget=nugget,smooth=smooth);
start1<-list(mean=mean, mean1=mean1,scale=scale,sill=sill,tail=tail);
I=Inf;
lower1<-list(mean=-I, mean1=-I,scale=0,sill=0,tail=0);
upper1<-list(mean=I, mean1=I,scale=I,sill=I,tail=0.5);
fit2 <- GeoFit2(data=data,coordx=coords,corrmodel=corrmodel,
optimizer=optimizer,lower=lower1,upper=upper1,
type="Pairwise",likelihood="Conditional",
neighb=4,X=X,start=start1,fixed=fixed1, model = model);
#### some graphics ######
fit2
res=GeoResiduals(fit2) # computing residuals
GeoQQ(res)
GeoQQ(res,type="D",ylim=c(0,0.5),breaks=20)
vario <- GeoVariogram(data=res$data,coordx=coords,maxdist=0.4)
GeoCovariogram(res,show.vario=TRUE, vario=vario,pch=20)
########## kriging ###############
xx=seq(0,1,0.012)
loc_to_pred=as.matrix(expand.grid(xx,xx))
Nloc=nrow(loc_to_pred)
Xloc=cbind(rep(1,Nloc),runif(Nloc))
param_est=as.list(c(fit2$param,fixed1))
pr=GeoKrig(data=data, coordx=coords,loc=loc_to_pred, X=X,Xloc=Xloc,
corrmodel=corrmodel,model=model,mse=TRUE,param= param_est)
colour = rainbow(100)
#### map of data
quilt.plot(coords[,1], coords[,2], data,col=colour,main="Data")
# linear kriging
map=matrix(pr$pred,ncol=length(xx))
image.plot(xx,xx,map,col=colour,xlab="",ylab="",main="SimpleKriging")
#associated mean squared error
map_mse=matrix(pr$mse,ncol=length(xx))
image.plot(xx,xx,map_mse,col=colour,xlab="",ylab="",main="MSE")
|
50ee7560d8b8791cd7810761e0c844e11bff2546
|
fdc19ee9ca0e79837c139b47423a3369f203ea95
|
/plot1.R
|
ee3784610b17c670dc398a8724b82cad0c6758b6
|
[] |
no_license
|
M0nd4/ExData_Plotting1
|
dea54b570517e36964353b1b4615fd1126725ff7
|
64d722b57ba7a91c55b09bc2e932d81f501711dc
|
refs/heads/master
| 2021-01-20T23:24:20.904851
| 2014-08-11T00:27:06
| 2014-08-11T00:27:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 531
|
r
|
plot1.R
|
#### Reference
#### https://github.com/maurotrb/ExData_Plotting1
#### Read data.
datos <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
#### Subset of filtered data with related days.
datos<- subset(datos, (datos$Date == "1/2/2007" | datos$Date== "2/2/2007"))
#### Plotting
hist(datos$Global_active_power, col= "red", xlab= "Global Active Power (Kw)", ylab= "Frequency", main= "Global Active Power")
#### Saving the file
dev.copy(png, file="plot1.png")
#### Closing device
dev.off()
|
8c47244ee9716bb248cc88d54da4ce1c4224e8d3
|
2fc6db1958f1c3d18c1d2a248ba2be2023c98406
|
/visualize/Make_Effects_Graph - Multiple SPIs.R
|
d001619b1085a80fa177a93e1bd428419bdec909
|
[] |
no_license
|
mcooper/ftf
|
7c0ceed9cdbb036afbe85cd18bdff987d821a5b9
|
a45f4fb2507e38e1ede0723f1f548456e0e327c2
|
refs/heads/master
| 2020-03-26T04:08:19.957676
| 2019-11-06T05:02:42
| 2019-11-06T05:02:42
| 144,488,349
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,229
|
r
|
Make_Effects_Graph - Multiple SPIs.R
|
setwd('G://My Drive/Feed the Future/Cloud Model Results')
library(spdep)
library(dplyr)
library(ggplot2)
alpha <- 0.05/30
z <- qnorm(1 - alpha/2, 0, 1, lower.tail=T)
##############################
#Ghana
##########################
gha <- data.frame()
for (spi in c('spi12', 'spi24', 'spi36', 'spi48', 'spi60')){
#Ghana Child
for (f in list.files(pattern=paste0('gha_child_admin1.*', spi))){
load(f)
}
#HAZ
coefs <- as.data.frame(summary(haz_gha)$coefficients)
est <- coefs[grepl('spi', row.names(coefs)), 'Estimate']
se <- coefs[grepl('spi', row.names(coefs)), 'Std. Error']
gha <- bind_rows(gha, data.frame(Estimate=est, SE=se, Country="GHA", Outcome="Height-for-Age Z-Score",
Scale=spi))
#WHZ
coefs <- as.data.frame(summary(whz_gha)$coefficients)
est <- coefs[grepl('spi', row.names(coefs)), 'Estimate']
se <- coefs[grepl('spi', row.names(coefs)), 'Std. Error']
gha <- bind_rows(gha, data.frame(Estimate=est, SE=se, Country="GHA", Outcome="Weight-for-Height Z-Score",
Scale=spi))
#Ghana HHS
for (f in list.files(pattern=paste0('gha_hh_admin1_.*', spi))){
load(f)
}
coefs <- as.data.frame(summary(hhs_gha)$Coef)
est <- coefs[grepl('spi', row.names(coefs)), 'Estimate']
se <- coefs[grepl('spi', row.names(coefs)), 'Std. Error']
gha <- bind_rows(gha, data.frame(Estimate=est, SE=se, Country="GHA", Outcome="Household Hunger Scale",
Scale=spi))
}
gha$upper <- gha$Estimate + gha$SE*z
gha$lower <- gha$Estimate - gha$SE*z
gha$signif <- ifelse(gha$upper*gha$lower > 0, "Significant", "Not Significant")
sel <- gha %>% filter(Scale %in% c('spi12', 'spi24', 'spi36', 'spi48', 'spi60') &
(!Outcome %in% c('')))
sel <- merge(sel, data.frame(Scale=c('spi12', 'spi24', 'spi36', 'spi48', 'spi60'),
`SPI Window`=c('12', '24', '36',
'48', '60')))
sel$Outcome <- relevel(as.factor(sel$Outcome), ref='Household Hunger Scale')
ggplot(sel) +
geom_pointrange(aes(x=SPI.Window, ymin=lower, ymax=upper, y=Estimate, fill=signif), shape=21, size=1.25) +
geom_hline(aes(yintercept=0)) +
facet_wrap(~Outcome) +
xlab('SPI Window (Months)') +
ylab("Coefficient Estimate") +
theme_bw() +
theme(legend.title=element_blank(), legend.position="bottom")
ggsave('../Fig7.eps', width = 8, height=4, units = 'in')
################################################
#Bangladesh with Just Irrigation (No Interaction)
##################################################
bgd_justirrig <- data.frame()
for (spi in c('spi12', 'spi24', 'spi36', 'spi48', 'spi60')){
#Bangladesh Child
for (f in list.files(pattern=paste0('bgd_child_admin1_factor.*', spi))){
load(f)
}
coefs <- as.data.frame(summary(haz_bgd)$Coef)
est <- coefs[grepl('spi', row.names(coefs)), 'Estimate']
se <- coefs[grepl('spi', row.names(coefs)), 'Std. Error']
bgd_justirrig <- bind_rows(bgd_justirrig, data.frame(Estimate=est, SE=se, Country="BGD", Outcome="Height-for-Age Z-Score",
Variable=c('SPI', 'Irrigation'),
Scale=spi))
coefs <- as.data.frame(summary(whz_bgd)$Coef)
est <- coefs[grepl('spi', row.names(coefs)), 'Estimate']
se <- coefs[grepl('spi', row.names(coefs)), 'Std. Error']
bgd_justirrig <- bind_rows(bgd_justirrig, data.frame(Estimate=est, SE=se, Country="BGD", Outcome="Weight-for-Height Z-Score",
Scale=spi))
#Bangladesh HHS
for (f in list.files(pattern=paste0('bgd_hh_admin1_factor.*', spi))){
load(f)
}
print(spi)
coefs <- as.data.frame(summary(hhs_bgd)$Coef)
est <- coefs[grepl('spi', row.names(coefs)), 'Estimate']
se <- coefs[grepl('spi', row.names(coefs)), 'Std. Error']
bgd_justirrig <- bind_rows(bgd_justirrig, data.frame(Estimate=est, SE=se, Country="BGD", Outcome="Household Hunger Scale",
Scale=spi))
}
bgd_justirrig$upper <- bgd_justirrig$Estimate + bgd_justirrig$SE*z
bgd_justirrig$lower <- bgd_justirrig$Estimate - bgd_justirrig$SE*z
bgd_justirrig$signif <- ifelse(bgd_justirrig$upper*bgd_justirrig$lower > 0, "Significant", "Not Significant")
sel <- bgd_justirrig %>% filter(Scale %in% c('spi12', 'spi24', 'spi36', 'spi48', 'spi60') &
(!Outcome %in% c('')))
sel <- merge(sel, data.frame(Scale=c('spi12', 'spi24', 'spi36', 'spi48', 'spi60'),
`SPI Window`=c('12', '24', '36',
'48', '60')))
sel$Outcome <- relevel(as.factor(sel$Outcome), ref='Household Hunger Scale')
ggplot(sel) +
geom_pointrange(aes(x=SPI.Window, ymin=lower, ymax=upper, y=Estimate, fill=signif), position=position_dodge(width=1.5), shape=21, size=1.25) +
geom_hline(aes(yintercept=0)) +
facet_wrap(~Outcome) +
xlab('SPI Window (Months)') +
ylab("Coefficient Estimate") +
theme_bw() +
theme(legend.title=element_blank(), legend.position="bottom")
ggsave('../Fig8.eps', width = 8, height=4, units = 'in')
|
6c4163977cad7fdd5071a7dd639c30b253b89f82
|
dc8d1b4cfa60cd851435543dd42e73626aaf92e4
|
/comparing_models.R
|
8a04cc19f9e2360634f6ad5d7958d7ed44a8f0a0
|
[] |
no_license
|
PeerChristensen/Churn_autoML
|
3a7874b3451cdf756f553bc95707b4f094e24991
|
a893f0d5dd4c63b783ff624b3491a4863b11f677
|
refs/heads/master
| 2020-09-06T12:38:36.701803
| 2020-01-27T15:37:24
| 2020-01-27T15:37:24
| 220,426,164
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,739
|
r
|
comparing_models.R
|
# comparing models
# "2019-11-06"
# Peer Christensen
Sys.setlocale("LC_ALL","English")
library(tidyverse)
library(h2o)
library(modelplotr)
library(ggthemes)
h2o.init()
test_data <- read_csv("preprocessed_data/test_data.csv")
test_hf <- as.h2o(test_data)
###########################################################
# get list of models ordered according to the leaderboard
files <- file.info(dir(path = "models7", full.names = TRUE), extra_cols = FALSE)
files <- files[with(files, order(as.POSIXct(mtime))), ]
files <- rownames(files)
models <- list()
for (i in files[1:10]) {
mod <- h2o.loadModel(i)
models[i] = mod
}
###########################################################
# PRROC Curves
prroc_curves <- function(models, best = F, test_data, n_models =5) {
# if (best == T) {
# models <- as.vector(as.character(H2OAutoML_object@leader@model_id)) %>%
# map(h2o.getModel)
# }
# else {
# models <- as.vector(as.character(H2OAutoML_object@leaderboard$model_id))[1:n_models] %>%
# map(h2o.getModel)
# }
df <- tibble()
for (i in 1:length(models)) {
perf <- h2o.performance(models[[i]], test_data)
recall <- perf@metrics$thresholds_and_metric_scores$recall
precision <- perf@metrics$thresholds_and_metric_scores$precision
model_id <- models[[i]]@model_id
algorithm <- models[[i]]@algorithm
d <- tibble(model_id,algorithm,recall,precision)
d <- add_row(d,model_id = model_id, algorithm=algorithm,recall=0,precision=0,.before=T)
d <- add_row(d,model_id = model_id, algorithm=algorithm,recall=0,precision=0,.before=F)
d <- add_column(d, model_rank = i)
df <- rbind(df,d)
}
df$model_id1 <- str_split(df$model_id, "_AutoML") %>%
map_chr(1) %>%
paste0(df$model_rank,": ",.)
df$model_id2 <- str_split(df$model_id,"(?<=_)(?=[_model])") %>%
map(2) %>%
paste("_",.) %>%
str_remove(" ")
df$model_id <- paste0(df$model_id1,df$model_id2)
df$model_id <- str_remove(df$model_id,"_NULL")
return(df)
}
prroc <- prroc_curves(models,test_data = test_hf,n_models = 5)
prroc %>%
filter(precision != 1, recall > 0.1) %>%
ggplot(aes(recall,precision,colour = reorder(model_id,model_rank))) +
geom_line(size = 1,alpha=.8) +
coord_fixed() +
xlab('Recall') +
ylab('Precision') +
labs(colour = "Models") +
ggtitle('PR-ROC') +
theme_light() +
theme(plot.title = element_text(size = 16),
plot.subtitle = element_text(size = 12,face="italic",vjust=-1)) +
scale_colour_tableau()
ggsave("figures/prroc.png")
############################################################
# Get scoring metrics + rank
model_metrics_long <- function(models, best = F, test_data, n_models =5) {
# if (best == T) {
# models <- as.vector(as.character(H2OAutoML_object@leader@model_id)) %>%
# map(h2o.getModel)
# }
# else {
# models <- as.vector(as.character(H2OAutoML_object@leaderboard$model_id))[1:n_models] %>%
# map(h2o.getModel)
# }
#
df <- tibble()
for (i in 1:length(models)) {
perf <- h2o.performance(models[[i]], test_data)
metrics <- perf@metrics$max_criteria_and_metric_scores %>%
as_tibble() %>%
dplyr::select(-idx) %>%
filter(metric %in% c("max f1","max f2","max absolute_mcc")) %>%
mutate(metric = str_remove(metric, "max ")) %>%
add_row(metric="pr_auc",threshold="-",value=perf@metrics$pr_auc) %>%
mutate(model_id = models[[i]]@model_id,
rank_auc = i)
df <- rbind(df,metrics)
}
model_id1 <- str_split(df$model_id, "_AutoML") %>%
map_chr(1) %>%
paste0(df$rank_auc,": ",.)
model_id2 <- str_split(df$model_id,"(?<=_)(?=[_model])") %>%
map(2) %>%
paste("_",.) %>%
str_remove(" ")
df$model_id <- paste0(model_id1,model_id2)
df$model_id <- str_remove(df$model_id,"_NULL")
return(df)
}
all_metrics <- model_metrics_long(models, test_data = test_hf)
all_metrics %>%
filter(metric == "pr_auc") %>%
arrange(desc(value))
all_metrics %>%
filter(metric == "f2") %>%
arrange(desc(value))
############################################################
# confusion matrices
cm_tables <- list()
for (i in 1:length(models)) {
perf <- h2o.performance(models[[i]], test_hf)
cm <- h2o.confusionMatrix(perf,metrics = c("f2"))
cm_tables[[i]] = cm
}
cm_tables
############################################################
# Model plots
# lift curve
# h2o.gainsLift(models[[1]]) %>%
# ggplot(aes(cumulative_data_fraction,cumulative_lift)) +
# geom_line()
model_ids <- NULL
for (i in 1:length(models)) {
id1 <- str_split(models[[i]]@model_id, "_AutoML") %>%
map_chr(1) %>%
paste0(models[[i]]@algorithm,": ",.)
id2 <- str_split(models[[i]]@model_id,"(?<=_)(?=[_model])") %>%
map(2) %>%
paste("_",.) %>%
str_remove(" ")
model_id <- paste0(id1,id2)
model_id <- str_remove(model_id,"_NULL")
model_ids = c(model_ids,model_id)
}
m1 = models[[1]]
m2 = models[[2]]
m3 = models[[3]]
m4 = models[[4]]
m5 = models[[5]]
scores_and_ntiles <- prepare_scores_and_ntiles(datasets=list("test_data"),
dataset_labels = list("test data"),
models = list("m1","m2","m3","m4","m5"),
#models = list("model_glm"),
model_labels = model_ids,
target_column="Churned30",
ntiles = 100)
scores_and_ntiles <- scores_and_ntiles %>%
rename("ntl_0" = ntl_p0,"ntl_1" = ntl_p1)
plot_input <- plotting_scope(prepared_input = scores_and_ntiles,
scope="compare_models")
save_path <- "C:/Users/pech/Desktop/Projects/Churn_2.0/figures/"
#Cumulative gains
plot_cumgains(data = plot_input,
save_fig = T,
save_fig_filename = paste0(save_path,"cumgains_compare"))
#Cumulative lift
plot_cumlift(data = plot_input,
save_fig = T,
save_fig_filename = paste0(save_path,"cumlift_compare"))
#Response plot
plot_response(data = plot_input,
save_fig = T,
save_fig_filename = paste0(save_path,"response_compare"))
#Cumulative response plot
plot_cumresponse(data = plot_input,
save_fig = T,
save_fig_filename = paste0(save_path,"cumresponse_compare"))
plot_multiplot(data = plot_input, save_fig = T,
save_fig_filename = paste0(save_path,"multiplot_compare"))
# !! Financial plots do not support model comparison, see evaluation script
# # financial plots
# plot_roi(data = plot_input,
# fixed_costs = 1000,
# variable_costs_per_unit = 10,
# profit_per_unit = 50,
# save_fig = T,
# save_fig_filename = "C:/Users/pech/Desktop/Projects/Churn_2.0/roi_1",
# highlight_ntile = "max_roi",
# highlight_how = "text")
#
# plot_costsrevs(data = plot_input2,fixed_costs = 1000,
# variable_costs_per_unit = 10,
# profit_per_unit = 50,
# save_fig = T,
# save_fig_filename = "C:/Users/pech/Desktop/Projects/Churn_2.0/cost_rev_1",
# highlight_ntile = "max_roi",
# highlight_how = "text")
#
# plot_profit(data = plot_input2,fixed_costs = 1000,
# variable_costs_per_unit = 10,
# profit_per_unit = 50,
# save_fig = T,
# save_fig_filename = "C:/Users/pech/Desktop/Projects/Churn_2.0/profit_1",
# highlight_ntile = "max_profit",
# highlight_how = "text")
|
4599e3c851e0bd93bf99603b9a7620b2ba5c2bb2
|
4e5916f94bdbc6dd87c394067388f539ec702310
|
/merge_files.r
|
84613fefa36b94495cb0f70c8b49775ada62c401
|
[
"Apache-2.0"
] |
permissive
|
vijaybarve/tpt-siphonaptera
|
37faaac8dbddca6f0c81ed33c09d5a61153c9550
|
7b281964d2af8005216089b975c8c9006afbcbd0
|
refs/heads/main
| 2023-04-12T05:01:55.563601
| 2021-04-26T23:12:36
| 2021-04-26T23:12:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,541
|
r
|
merge_files.r
|
# add libraries
library(data.table)
library(readxl)
# define function: is not in
'%!in%' <- function(x,y)!('%in%'(x,y))
# compare FMNH to NMNH
NMNH_Siphonaptera <- read.csv("~/GitHub/tpt-siphonaptera/output/NMNH_DwC.csv", na = "NA") # read in cleaned NMNH review file
FMNH_Siphonaptera <- read.csv("~/GitHub/tpt-siphonaptera/output/FMNH_DwC.csv", na = "NA") # read in cleaned FMNH review file
FMNH_in_NMNH <- FMNH_Siphonaptera[FMNH_Siphonaptera$canonicalName %in% NMNH_Siphonaptera$canonicalName,] # get all rows in FMNH with canonical name that matches a row in Lewis
FMNH_not_in_NMNH <- FMNH_Siphonaptera[FMNH_Siphonaptera$canonicalName %!in% NMNH_Siphonaptera$canonicalName,] # get all rows in FMNH that does not match a canonical name in Lewis
merged_siphonaptera <- rbindlist(list(NMNH_Siphonaptera, FMNH_not_in_NMNH), fill = TRUE) # add FMNH terms not in NMNH to NMNH
# compare to Lewis list
Lewis_Siphonaptera <- read.csv("~/GitHub/tpt-siphonaptera/output/Lewis_Siphonaptera.csv", na = "NA") # read in cleaned Lewis review file
merged_in_Lewis <- merged_siphonaptera[merged_siphonaptera$canonicalName %in% Lewis_Siphonaptera$canonicalName,] # get all rows in merged with canonical name that matches a row in Lewis
merged_not_in_Lewis <- merged_siphonaptera[merged_siphonaptera$canonicalName %!in% Lewis_Siphonaptera$canonicalName,] # get all rows in merged that do not match a canonical name in Lewis
Lewis_in_merged <- Lewis_Siphonaptera[Lewis_Siphonaptera$canonicalName %in% merged_siphonaptera$canonicalName,] # get all rows in merged with canonical name that matches a row in Lewis
Lewis_not_in_merged <- Lewis_Siphonaptera[Lewis_Siphonaptera$canonicalName %!in% merged_siphonaptera$canonicalName,] # get all rows in merged that do not match a canonical name in Lewis
merged_siphonaptera <- rbindlist(list(Lewis_Siphonaptera, merged_not_in_Lewis), fill = TRUE) # add NMNH terms not in Lewis to Lewis
merged_siphonaptera$taxonID <- paste(merged_siphonaptera$TPTdataset, merged_siphonaptera$TPTID, sep = '')
write.csv(merged_siphonaptera,"~/GitHub/tpt-siphonaptera/output/merged_siphonaptera.csv", row.names = FALSE) # merged file
# review for duplicates
dupe <- merged_siphonaptera[,c('canonicalName')] # select columns to check duplicates
merged_dups <- merged_siphonaptera[duplicated(dupe) | duplicated(dupe, fromLast=TRUE),] # create duplicates data frame
# compare merged file to GBIF
GBIF_Siphonaptera <- read_excel("~/GitHub/tpt-siphonaptera/input/GBIF_Siphonaptera.xlsx") # read in GBIF taxonomy
# review for duplicates
dupe <- GBIF_Siphonaptera[,c('canonicalName')] # select columns to check duplicates
GBIF_dups <- GBIF_Siphonaptera[duplicated(dupe) | duplicated(dupe, fromLast=TRUE),] # create duplicates data frame
GBIF_Siphonaptera <- anti_join(GBIF_Siphonaptera,GBIF_dups, by = c("canonicalName", "taxonRank")) # remove duplicate rows from working file
write.csv(GBIF_dups, "~/GitHub/tpt-siphonaptera/output/GBIF_duplicates.csv") # write out GBIF dupes for review
GBIF_dups <- read_excel("~/GitHub/tpt-siphonaptera/input/GBIF_dupe_reviewed.xlsx") # read in GBIF duplicates to return
GBIF_Siphonaptera <- rbind(GBIF_Siphonaptera, GBIF_dups)
merged_in_GBIF <- merged_siphonaptera[merged_siphonaptera$canonicalName %in% GBIF_Siphonaptera$canonicalName,] # get all rows in merged with canonical name that matches a row in GBIF
merged_not_in_GBIF <- merged_siphonaptera[merged_siphonaptera$canonicalName %!in% GBIF_Siphonaptera$canonicalName,] # get all rows in merged that do not match a canonical name in GBIF
write.csv(merged_not_in_GBIF, "~/GitHub/tpt-siphonaptera/output/not_in_GBIF_siphonaptera.csv", row.names = FALSE) # names need review)
GBIF_in_merged <- GBIF_Siphonaptera[GBIF_Siphonaptera$canonicalName %in% merged_siphonaptera$canonicalName,] # get all rows in GBIF with canonical name that matches a row in merged
write.csv(GBIF_in_merged, "~/GitHub/tpt-siphonaptera/output/GBIF_in_siphonaptera.csv", row.names = FALSE) # names in GBIF
GBIF_not_in_merged <- GBIF_Siphonaptera[GBIF_Siphonaptera$canonicalName %!in% merged_siphonaptera$canonicalName,] # get all rows in GBIF that do not match a canonical name in merged
write.csv(GBIF_not_in_merged, "~/GitHub/tpt-siphonaptera/output/GBIF_not_in_siphonaptera.csv", row.names = FALSE) # names need review)
Siphonaptera <- rbindlist(list(GBIF_in_merged, merged_not_in_GBIF), fill = TRUE) # add GBIF names not in merged to merged
write.csv(Siphonaptera, "~/GitHub/tpt-siphonaptera/output/TPT_Siphonaptera.csv", row.names = FALSE) # names list
|
c13ba8e92f1aed1fe567d7ac66f9a99e2d74c43d
|
5820b1f46b64c2bb47ae81e38fbd233543c164e9
|
/scripts/load_dataset.R
|
8acbfa45b04de14999d751f46db17a1e79133489
|
[] |
no_license
|
Victinyx/EXPAI-test
|
10d8e359ed552dc8d66ea1979de0950659071021
|
33b8acafc809fd08883997079e911c3b08ffe588
|
refs/heads/main
| 2023-06-29T03:02:23.885414
| 2021-08-02T08:18:19
| 2021-08-02T08:18:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 814
|
r
|
load_dataset.R
|
setwd('./data')
dataset <- read.csv('heart.csv', stringsAsFactor = TRUE)
setwd('..')
colnames(dataset) <- c('age',
'sex',
'chestPain',
'bloodPressure',
'cholesterol',
'bloodSugar',
'ecg',
'maxHeartRate',
'indAngina',
'stDepression',
'stSlope',
'numVessels',
'scintigraphy',
'condition')
dataset$condition <- ifelse(dataset$condition == 1, 'CAD+', 'CAD-')
set.seed(9876)
data_split <- initial_split(dataset, prop = 0.6)
data_train <- training(data_split)
data_test <- testing(data_split)
|
8a484210b2e179227ec0261f28cbe03d0070e016
|
0ca78ef5a8670fbdab55409eecda579cec2baf68
|
/DM/deeplearning/prevent_overfit.R
|
7141ea2e6b70b5d64fce34ebb529a9045d27b1ee
|
[] |
no_license
|
zhurui1351/RSTOCK_TRAIL
|
ab83fdef790778a1e792d08a876522ef13a872e6
|
2396c512c8df81a931ea3ca0c925c151363a2652
|
refs/heads/master
| 2021-01-23T09:01:36.814253
| 2019-05-17T15:26:19
| 2019-05-17T15:26:19
| 23,482,375
| 9
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,914
|
r
|
prevent_overfit.R
|
################################################################################
## ##
## L1 Penalty ##
## ##
################################################################################
## set seed and simulate data
library(glmnet)
require(MASS)
set.seed(1234)
X <- mvrnorm(n = 200, mu = c(0, 0, 0, 0, 0),
Sigma = matrix(c(
1, .9999, .99, .99, .10,
.9999, 1, .99, .99, .10,
.99, .99, 1, .99, .10,
.99, .99, .99, 1, .10,
.10, .10, .10, .10, 1
), ncol = 5))
y <- rnorm(200, 3 + X %*% matrix(c(1, 1, 1, 1, 0)), .5)
m.ols <- lm(y[1:100] ~ X[1:100, ])
m.lasso.cv <- cv.glmnet(X[1:100, ], y[1:100], alpha = 1)
png("../FirstDraft/chapter03_images/B4228_03_01.png",
width = 5.5, height = 4, units = "in", res = 600)
plot(m.lasso.cv)
dev.off()
cbind(
OLS = coef(m.ols),
Lasso = coef(m.lasso.cv)[,1])
################################################################################
## ##
## L2 Penalty ##
## ##
################################################################################
m.ridge.cv <- cv.glmnet(X[1:100, ], y[1:100], alpha = 0)
png("../FirstDraft/chapter03_images/B4228_03_02.png",
width = 5.5, height = 4, units = "in", res = 600)
plot(m.ridge.cv)
dev.off()
cbind(
OLS = coef(m.ols),
Lasso = coef(m.lasso.cv)[,1],
Ridge = coef(m.ridge.cv)[,1])
################################################################################
## ##
## Use Case ##
## ##
################################################################################
## https://www.kaggle.com/c/digit-recognizer
## same data as from previous chapter
digits.train <- read.csv("train.csv")
## convert to factor
digits.train$label <- factor(digits.train$label, levels = 0:9)
i <- 1:5000
digits.X <- digits.train[i, -1]
digits.y <- digits.train[i, 1]
## try various weight decays and number of iterations
## register backend so that different decays can be
## estimated in parallel
cl <- makeCluster(10)
clusterEvalQ(cl, {
source("checkpoint.R")
})
registerDoSNOW(cl)
set.seed(1234)
digits.decay.m1 <- lapply(c(100, 150), function(its) {
train(digits.X, digits.y,
method = "nnet",
tuneGrid = expand.grid(
.size = c(10),
.decay = c(0, .1)),
trControl = trainControl(method = "cv", number = 5, repeats = 1),
MaxNWts = 10000,
maxit = its)
})
digits.decay.m1[[1]]
digits.decay.m1[[2]]
################################################################################
## ##
## Model Averaging ##
## ##
################################################################################
## simulated data
set.seed(1234)
d <- data.frame(
x = rnorm(400))
d$y <- with(d, rnorm(400, 2 + ifelse(x < 0, x + x^2, x + x^2.5), 1))
d.train <- d[1:200, ]
d.test <- d[201:400, ]
## three different models
m1 <- lm(y ~ x, data = d.train)
m2 <- lm(y ~ I(x^2), data = d.train)
m3 <- lm(y ~ pmax(x, 0) + pmin(x, 0), data = d.train)
## In sample R2
cbind(
M1 = summary(m1)$r.squared,
M2 = summary(m2)$r.squared,
M3 = summary(m3)$r.squared)
## correlations in the training data
cor(cbind(
M1 = fitted(m1),
M2 = fitted(m2),
M3 = fitted(m3)))
## generate predictions and the average prediction
d.test$yhat1 <- predict(m1, newdata = d.test)
d.test$yhat2 <- predict(m2, newdata = d.test)
d.test$yhat3 <- predict(m3, newdata = d.test)
d.test$yhatavg <- rowMeans(d.test[, paste0("yhat", 1:3)])
## correlation in the testing data
cor(d.test)
################################################################################
## ##
## Use Case ##
## ##
################################################################################
## Fit Models
nn.models <- foreach(i = 1:4, .combine = 'c') %dopar% {
set.seed(1234)
list(nn.train(
x = as.matrix(digits.X),
y = model.matrix(~ 0 + digits.y),
hidden = c(40, 80, 40, 80)[i],
activationfun = "tanh",
learningrate = 0.8,
momentum = 0.5,
numepochs = 150,
output = "softmax",
hidden_dropout = c(0, 0, .5, .5)[i],
visible_dropout = c(0, 0, .2, .2)[i]))
}
nn.yhat <- lapply(nn.models, function(obj) {
encodeClassLabels(nn.predict(obj, as.matrix(digits.X)))
})
perf.train <- do.call(cbind, lapply(nn.yhat, function(yhat) {
caret::confusionMatrix(xtabs(~ I(yhat - 1) + digits.y))$overall
}))
colnames(perf.train) <- c("N40", "N80", "N40_Reg", "N80_Reg")
options(digits = 4)
perf.train
i2 <- 5001:10000
test.X <- digits.train[i2, -1]
test.y <- digits.train[i2, 1]
nn.yhat.test <- lapply(nn.models, function(obj) {
encodeClassLabels(nn.predict(obj, as.matrix(test.X)))
})
perf.test <- do.call(cbind, lapply(nn.yhat.test, function(yhat) {
caret::confusionMatrix(xtabs(~ I(yhat - 1) + test.y))$overall
}))
colnames(perf.test) <- c("N40", "N80", "N40_Reg", "N80_Reg")
perf.test
|
080f87d33f7447acfe7bfbfde84811251b782696
|
29589aa37a7f3c31d168521bd8174adfc89c1d5d
|
/Part_A_Analysis_Richards_2008_plotting_binomial.R
|
c98317aa86ae1aee9044b2f0df4f2bc188060599
|
[] |
no_license
|
LoganHowell/Honours
|
a60f0343b98f2dfe67e8d1959e1dd39afc9210f7
|
4ef732f419819133b164ecb62f1babadb2f6def8
|
refs/heads/master
| 2023-05-04T20:35:18.167085
| 2021-05-26T00:01:55
| 2021-05-26T00:01:55
| 340,608,976
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,561
|
r
|
Part_A_Analysis_Richards_2008_plotting_binomial.R
|
library(tidyverse)
library(Rfast)
library(epitools)
library(extraDistr)
library(foreach)
library(doParallel)
library(parallel)
library(stats4)
library(fAsianOptions)
library(maxLik)
library(matrixStats)
library(ramify)
library(purrr)
library(doSNOW)
library(latex2exp)
library(ggrepel)
library(data.table)
library(plotly)
library(cowplot)
library(xlsx)
# Binomial Example:
kelp_bunches = seq(1:4) # The number of kelp bunches.
tau = 15 # The duration of the experiment.
alpha = 0.075 # Pre-defined capture rate (per hour).
beta = 0.5 # The effectiveness of the kelp in mitigating perch capture.
num_replicates = 5 # The number of truth values to be generated and which the models will be fit to (for each treatment).
num_perch = 10 # The number of prey fish in the tank.
phi = 0.025 # The pre-determined overdispersion factor.
num_reps = 1000 # The number of times the KLD will be calculated.
Z = 10000 # The number of new points to use in calculation of cumulative IC (for each treatment).
AIC_threshold = 6 # The threshold for delta_AIC-based selection.
QAIC_threshold = 6
simplicity_key = matrix(
data = c(
c(0,1,1,1,1,1),
c(0,0,0,0,1,1),
c(0,0,0,0,1,1),
c(0,0,0,0,1,1),
c(0,0,0,0,0,0),
c(0,0,0,0,0,0)
), nrow = 6, byrow = TRUE)
prob_model_selected = function(AIC_threshold, QAIC_threshold, AIC_mat_delta, QAIC_mat_delta)
{
# Create a list of models selected from each fit.
nested_AIC_models_selected = list()
delta_AIC_models_selected = list()
delta_QAIC_models_selected = list()
nested_QAIC_models_selected = list()
number_AIC_models_selected = c()
number_nested_models_selected = c()
best_model_selected_delta = 0
best_model_selected_nested = 0
for(i in 1:nrow(AIC_mat_delta))
{
# Extract the i'th rows.
current_AIC_row = AIC_mat_delta[i, ]
current_QAIC_row = QAIC_mat_delta[i, ]
# Determine which models are below the delta-AIC threshold.
nested_AIC_models_selected[[i]] = which(current_AIC_row <= AIC_threshold)
delta_AIC_models_selected[[i]] = which(current_AIC_row <= AIC_threshold)
delta_QAIC_models_selected[[i]] = which(current_QAIC_row <= QAIC_threshold)
nested_QAIC_models_selected[[i]] = which(current_QAIC_row <= QAIC_threshold)
AIC_values = current_AIC_row[c(nested_AIC_models_selected[[i]])]
QAIC_values = current_QAIC_row[c(nested_QAIC_models_selected[[i]])]
ordered_AIC_deltas = nested_AIC_models_selected[[i]][order(AIC_values, decreasing = FALSE)]
ordered_QAIC_deltas = nested_QAIC_models_selected[[i]][order(QAIC_values, decreasing = FALSE)]
final_AIC_list = ordered_AIC_deltas
final_QAIC_list = ordered_QAIC_deltas
for(j in 1:length(ordered_AIC_deltas))
{
AIC_removal_list = intersect(which(simplicity_key[ordered_AIC_deltas[j], ] == 1), ordered_AIC_deltas[j:length(ordered_AIC_deltas)])
QAIC_removal_list = intersect(which(simplicity_key[ordered_QAIC_deltas[j], ] == 1), ordered_QAIC_deltas[j:length(ordered_QAIC_deltas)])
# If a more complicated model has a greater AIC value:
if(length(AIC_removal_list) > 0)
{
final_AIC_list = final_AIC_list[!final_AIC_list %in% AIC_removal_list]
}
if(length(QAIC_removal_list) > 0)
{
final_QAIC_list = final_QAIC_list[!final_QAIC_list %in% QAIC_removal_list]
}
}
# Overwrite the original list with the updated list (nested models removed)
nested_AIC_models_selected[[i]] = final_AIC_list
nested_QAIC_models_selected[[i]] = final_QAIC_list
}
# Create an empty array to store the number of times each model is selected via the threshold selection rule.
nested_AIC_selection_times = rep(0, times = 6)
delta_AIC_selection_times = rep(0, times = 6)
nested_QAIC_selection_times = rep(0, times = 3)
delta_QAIC_selection_times = rep(0, times = 3)
for(i in 1:length(nested_AIC_models_selected))
{
# Increment the selection counts for the models selected in the i'th fit:
nested_AIC_selection_times[nested_AIC_models_selected[[i]]] = nested_AIC_selection_times[nested_AIC_models_selected[[i]]] + 1
delta_AIC_selection_times[delta_AIC_models_selected[[i]]] = delta_AIC_selection_times[delta_AIC_models_selected[[i]]] + 1
nested_QAIC_selection_times[delta_QAIC_models_selected[[i]]] = nested_QAIC_selection_times[delta_QAIC_models_selected[[i]]] + 1
delta_QAIC_selection_times[delta_QAIC_models_selected[[i]]] = delta_QAIC_selection_times[delta_QAIC_models_selected[[i]]] + 1
number_AIC_models_selected[i] = length(delta_AIC_models_selected[[i]])
number_nested_models_selected[i] = length(nested_AIC_models_selected[[i]])
if(5 %in% delta_AIC_models_selected[[i]])
{
best_model_selected_delta = best_model_selected_delta + 1
}
if(5 %in% nested_AIC_models_selected[[i]])
{
best_model_selected_nested = best_model_selected_nested + 1
}
}
# Calculate the proportion of times each model was selected.
nested_AIC_selection_prop = nested_AIC_selection_times / num_reps
delta_AIC_selection_prop = delta_AIC_selection_times / num_reps
nested_QAIC_selection_prop = nested_QAIC_selection_times / num_reps
delta_QAIC_selection_prop = delta_QAIC_selection_times / num_reps
return_list = vector("list", length = 6) # Initialise a list to return from the function.
return_list[[1]] = delta_AIC_selection_prop
return_list[[2]] = delta_QAIC_selection_prop
return_list[[3]] = number_AIC_models_selected
return_list[[4]] = number_nested_models_selected
return_list[[5]] = best_model_selected_delta / nrow(AIC_mat_delta)
return_list[[6]] = best_model_selected_nested / nrow(AIC_mat_delta)
return(return_list)
}
# Read in the scenario data:
AIC_A = as.matrix(xlsx::read.xlsx(file = "binomial_scenario_a.xlsx", sheetName = "AIC", header = FALSE))
KLD_estimates_A = as.matrix(xlsx::read.xlsx(file = "binomial_scenario_a.xlsx", sheetName = "KLD_estimates", header = FALSE))
AIC_KLD_estimates_A = as.matrix(xlsx::read.xlsx(file = "binomial_scenario_a.xlsx", sheetName = "AIC_KLD_estimates", header = FALSE))
QAIC_A = as.matrix(xlsx::read.xlsx(file = "binomial_scenario_a.xlsx", sheetName = "QAIC", header = FALSE))
AIC_A_delta = AIC_A - rowMins(AIC_A)
QAIC_A = QAIC_A[, 1:3]
QAIC_A_delta = QAIC_A - rowMins(QAIC_A)
threshold_sequence = seq(from = 0, to = 8, by = 0.5)
AIC_table_A = data.frame(
model = paste("(AIC) M", seq(1:6), sep = "")
)
QAIC_table_A = data.frame(
model = paste("(QAIC) M", seq(1:3), sep = "")
)
for(i in 1:length(threshold_sequence))
{
# Scenario A:
selection_output = prob_model_selected(AIC_threshold = threshold_sequence[i],
QAIC_threshold = threshold_sequence[i],
AIC_mat_delta = AIC_A_delta,
QAIC_mat_delta = QAIC_A_delta)
AIC_table_A[i + 1] = selection_output[[1]]
QAIC_table_A[i + 1] = selection_output[[2]]
}
selection_output = prob_model_selected(AIC_threshold = 6,
QAIC_threshold = 6,
AIC_mat_delta = AIC_A_delta,
QAIC_mat_delta = QAIC_A_delta)
# Output the average number of models selected
mean(selection_output[[3]])
mean(selection_output[[4]])
# Output the EKLD estimate for models 5 and 6.
colmeans(KLD_estimates_A[, 5:6])
selection_output[[5]]
selection_output[[6]]
|
3f278a3a46d04d564cfa0aa3a49190f5083ffd46
|
c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d
|
/mlmRev/doc/MlmSoftRev.R
|
0d21e893df165346828bf8e02ecc8fae5278f82b
|
[
"MIT"
] |
permissive
|
solgenomics/R_libs
|
bcf34e00bf2edef54894f6295c4f38f1e480b3fc
|
e8cdf30fd5f32babf39c76a01df5f5544062224e
|
refs/heads/master
| 2023-07-08T10:06:04.304775
| 2022-05-09T15:41:26
| 2022-05-09T15:41:26
| 186,859,606
| 0
| 2
|
MIT
| 2023-03-07T08:59:16
| 2019-05-15T15:57:13
|
C++
|
UTF-8
|
R
| false
| false
| 10,684
|
r
|
MlmSoftRev.R
|
### R code from vignette source 'MlmSoftRev.Rnw'
###################################################
### code chunk number 1: preliminaries
###################################################
options(width=80, show.signif.stars = FALSE,
lattice.theme = function() canonical.theme("pdf", color = FALSE))
library(mlmRev)
library(lme4)
library(lattice)
set.seed(1234321)
###################################################
### code chunk number 2: Examprep
###################################################
lmer(normexam ~ standLRT + sex + schgend + (1|school), Exam)
###################################################
### code chunk number 3: ExamData
###################################################
str(Exam)
summary(Exam)
###################################################
### code chunk number 4: ExamFit
###################################################
(Em1 <- lmer(normexam ~ standLRT + sex + schgend + (1|school), Exam))
###################################################
### code chunk number 5: Examtime
###################################################
system.time(lmer(normexam ~ standLRT + sex + schgend + (1|school), Exam))
###################################################
### code chunk number 6: ExamRelevel
###################################################
Exam$sex <- relevel(Exam$sex, "M")
Exam$schgend <- relevel(Exam$schgend, "girls")
(Em2 <- lmer(normexam ~ standLRT + sex + schgend + (1|school), Exam))
###################################################
### code chunk number 7: ExamIds
###################################################
Exam <- within(Exam, ids <- factor(school:student))
str(Exam)
###################################################
### code chunk number 8: dupExamIds
###################################################
as.character(Exam$ids[which(duplicated(Exam$ids))])
###################################################
### code chunk number 9: OnlyBoy
###################################################
subset(Exam, ids == '43:86')
xtabs(~ sex + school, Exam, subset = school %in% c(43, 50, 52), drop = TRUE)
###################################################
### code chunk number 10: ExamXtabs
###################################################
xtabs(~ sex + school, Exam, subset = type == "Mxd", drop = TRUE)
###################################################
### code chunk number 11: ExamMosaicshow (eval = FALSE)
###################################################
## ExamMxd <- within(subset(Exam, type == "Mxd"), school <- factor(school))
## mosaicplot(~ school + sex, ExamMxd)
###################################################
### code chunk number 12: ExamMosaic
###################################################
ExamMxd <- within(subset(Exam, type == "Mxd"), school <- factor(school))
mosaicplot(~ school + sex, ExamMxd)
###################################################
### code chunk number 13: Examplot1
###################################################
print(xyplot(normexam ~ standLRT | sex * type, Exam,
type = c("g", "p", "smooth"), layout = c(2,2),
xlab = "Standardized London Reading Test score",
ylab = "Normalized exam score", aspect = 1.2))
###################################################
### code chunk number 14: Examplot1show (eval = FALSE)
###################################################
## xyplot(normexam ~ standLRT | sex * type, Exam, type = c("g", "p", "smooth"))
###################################################
### code chunk number 15: Examplot2
###################################################
print(xyplot(normexam ~ standLRT, Exam, groups = sex:type,
type = c("g", "smooth"), xlim = c(-3,3), ylim = c(-2,2),
xlab = "Standardized London Reading Test score",
ylab = "Normalized exam score",
auto.key = list(space = 'right', points = FALSE, lines = TRUE),
aspect = 1))
###################################################
### code chunk number 16: Examplot2show (eval = FALSE)
###################################################
## xyplot(normexam ~ standLRT, Exam, groups = sex:type, type = c("g", "smooth"))
###################################################
### code chunk number 17: Examplot3
###################################################
print(xyplot(normexam ~ standLRT | school, Exam,
type = c("g", "p", "r"),
xlab = "Standardized London Reading Test score",
ylab = "Normalized exam score",
subset = sex == "F" & type == "Sngl"))
###################################################
### code chunk number 18: Examplot3show
###################################################
xyplot(normexam ~ standLRT | school, Exam,
type = c("g", "p", "r"),
subset = sex == "F" & type == "Sngl")
###################################################
### code chunk number 19: Examplot4show (eval = FALSE)
###################################################
## xyplot(normexam ~ standLRT | school, Exam, type = c("g", "p", "r"),
## subset = sex == "F" & type == "Sngl" & school != 48,
## index.cond = function(x, y) coef(lm(y ~ x))[1])
###################################################
### code chunk number 20: Examplot4
###################################################
print(xyplot(normexam ~ standLRT | school, Exam,
type = c("g", "p", "r"),
xlab = "Standardized London Reading Test score",
ylab = "Normalized exam score",
subset = sex == "F" & type == "Sngl" & school != 48,
index.cond = function(x, y) coef(lm(y ~ x))[1]))
###################################################
### code chunk number 21: Examplot4a
###################################################
print(xyplot(normexam ~ standLRT | school, Exam,
type = c("g", "p", "r"),
xlab = "Standardized London Reading Test score",
ylab = "Normalized exam score",
subset = sex == "F" & type == "Sngl" & school != 48,
index.cond = function(x, y) coef(lm(y ~ x))[2]))
###################################################
### code chunk number 22: ExamLmListFS
###################################################
show(ExamFS <- lmList(normexam ~ standLRT | school, Exam,
subset = sex == "F" & type == "Sngl" & school != 48))
###################################################
### code chunk number 23: Examplot4cshow
###################################################
plot(confint(ExamFS, pool = TRUE), order = 1)
###################################################
### code chunk number 24: Examplot4c
###################################################
print(plot(confint(ExamFS, pool = TRUE), order = 1))
###################################################
### code chunk number 25: Examplot5
###################################################
print(xyplot(normexam ~ standLRT | school, Exam,
type = c("g", "p", "r"),
xlab = "Standardized London Reading Test score",
ylab = "Normalized exam score",
subset = sex == "M" & type == "Sngl", layout = c(5,2),
index.cond = function(x, y) coef(lm(y ~ x))[1]))
###################################################
### code chunk number 26: ExamLmListMS
###################################################
show(ExamMS <- lmList(normexam ~ standLRT | school, Exam,
subset = sex == "M" & type == "Sngl"))
###################################################
### code chunk number 27: Examplot5b
###################################################
print(plot(confint(ExamMS, pool = TRUE), order = 1))
###################################################
### code chunk number 28: Examplot6
###################################################
print(xyplot(normexam ~ standLRT | school, Exam, groups = sex,
type = c("g", "p", "r"),
xlab = "Standardized London Reading Test score",
ylab = "Normalized exam score",
subset = !school %in% c(43, 47) & type == "Mxd",
index.cond = function(x, y) coef(lm(y ~ x))[1],
auto.key = list(space = 'top', lines = TRUE,
columns = 2), layout = c(7,5),
aspect = 1.2))
###################################################
### code chunk number 29: ExamLmListM
###################################################
show(ExamM <- lmList(normexam ~ standLRT + sex| school, Exam,
subset = type == "Mxd" & !school %in% c(43,47,54)))
###################################################
### code chunk number 30: Examplot6b
###################################################
print(plot(confint(ExamM, pool = TRUE), order = 1))
###################################################
### code chunk number 31: Em3
###################################################
(Em3 <- lmer(normexam ~ standLRT + sex + type + (1|school), Exam))
###################################################
### code chunk number 32: Em4
###################################################
(Em4 <- lmer(normexam ~ standLRT + sex + type + (standLRT|school), Exam))
###################################################
### code chunk number 33: EmAnova
###################################################
anova(Em3, Em4)
###################################################
### code chunk number 34: Em5
###################################################
(Em5 <- lmer(normexam ~ standLRT + sex + type + (standLRT + sex|school), Exam))
###################################################
### code chunk number 35: Oxboys
###################################################
str(Oxboys)
system.time(mX1 <- lmer(height ~ age + I(age^2) + I(age^3) + I(age^4) + (age + I(age^2)|Subject),
Oxboys))
summary(mX1)
system.time(mX2 <- lmer(height ~ poly(age,4) + (age + I(age^2)|Subject), Oxboys))
summary(mX2)
###################################################
### code chunk number 36: ScotsSec
###################################################
str(ScotsSec)
system.time(mS1 <- lmer(attain ~ sex + (1|primary) + (1|second), ScotsSec))
summary(mS1)
###################################################
### code chunk number 37: sessionInfo
###################################################
toLatex(sessionInfo())
|
239df01bc44dd61345066c629e02e6f892388b87
|
5065f337e3ffb1793b855e6b2264dfde4467fb97
|
/R course June/05 Rice code 2.R
|
0fdcca3fcf6200c63fd8b4a1a7a505cf12fc1f77
|
[] |
no_license
|
Reicharf/R-SAS.Introductory.Courses
|
faed787ec5c0eaa76d55dc476e6142f27c24ce6f
|
69ec001f8961d5f1c4d1387f80249daa4cbc159b
|
refs/heads/master
| 2022-06-16T23:21:33.677994
| 2020-04-26T13:50:08
| 2020-04-26T13:50:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,645
|
r
|
05 Rice code 2.R
|
rm(list=ls())
setwd("D:/Hohenheim/R-SAS.Introductory.Courses/Datasets")
library(data.table)
dt <- fread("05 Rice.txt") # directly import as data.table format
# splot plot design - two factor effects
### Split Plot Design
# When some factors (independent variables) are difficult
# or impossible to change in your experiment, a completely
# randomized design isn't possible. The result is a
# split-plot design, which has a mixture of hard to
# randomize (or hard-to-change) and easy-to-randomize
# (or easy-to-change) factors. The hard-to-change factors
# are implemented first, followed by the easier-to-change factors.
dt$Block <- as.factor(dt$Block)
dt$N <- as.factor(dt$N)
dt$Var <- as.factor(dt$Var)
dt$Yield <- as.numeric(dt$Yield)
table(dt$Var, dt$N, dt$Block)
# boxplots for first impression
boxplot(data=dt, Yield ~ N + Var, las=2)
boxplot(data=dt, Yield ~ N , las=2)
boxplot(data=dt, Yield ~ Var , las=2)
# In a split-plot design, the (incomplete) mainplots should
# be taken as a "random effect". Since we then have random and
# fixed effects in one model, we are fitting a "mixed model".
# In R the most common packages for that are "lme4", "nlme",
# "asreml-R" and "sommer".
# If you use lme4, always load the lmerTest package, too
library(lme4)
library(lmerTest)
# build the mixed model
mod <- lmer(data = dt,
formula = Yield ~ N + Var + N:Var +
Block + (1|Block:N))
anova(mod) # interaction effect significant!
# get adj. means for interaction effect and compare
library(emmeans) # also needs package multcompView to be installed
# get means and comparisons
means <- emmeans(mod, pairwise ~ N | Var)
# Note that N | Var gets pairwise N comparisons for each
# Variety separately. You can use N*Var instead to get all
# pairwise comparisons.
means # look at means and comparisons
means$emmeans # look at means
means$contrasts # look at comparions
output <- cld(means$emmeans, details=T, Letters = letters)
output # this data format is not good for ggplot
output <- as.data.table(output$emmeans) # reformatting into one table
output # this is better
###########################################################
# draw lsmeans graph in ggplot
library(ggplot2)
p <- ggplot(data=output, aes(x=N))
p <- p + geom_bar(aes(y=emmean), stat="identity", width=0.8)
p <- p + geom_errorbar(aes(ymin=emmean-SE, ymax=emmean+SE), width=0.4)
p <- p + geom_text(aes(y=emmean+1500, label=.group))
p <- p + facet_wrap(~Var) # one per variety
p # show plot
# save ggplot as file into your working directory
ggsave("test.jpeg", width = 20, height = 10, units = "cm")
|
2ddcf4aa9f8ef784d51a24ea6a2733bd7bc387fd
|
9f9155f936f7a800547fe6ffda440880534e93f3
|
/cachematrix.R
|
3801e9134f991717e64f962aee72ce59cea76c37
|
[] |
no_license
|
sarahfinder/ProgrammingAssignment2
|
375d0bd347bde7042dbe33eba5365507a6477c68
|
72da6b2f31e9921803cfe53fd4a86128878945db
|
refs/heads/master
| 2021-01-17T08:13:08.428426
| 2015-06-19T11:16:31
| 2015-06-19T11:16:31
| 37,710,853
| 0
| 0
| null | 2015-06-19T08:30:37
| 2015-06-19T08:30:34
| null |
UTF-8
|
R
| false
| false
| 1,520
|
r
|
cachematrix.R
|
## makeCacheMatrix is a function that creates a special "matrix"
## object that can cache its inverse. cacheSolve is a function that
## computes the inverse of the special "matrix" returned by the above
## function.
## makeCacheMatrix is a function that stores a list of functions: set,
## get, setinverse, getinverse. get is a function that returns the x
## matrix stored in the main function. set is a function that changes
## the matrix stored in the main function. setinverse and getinverse
## store the value of the input in a variable m into the main function
## makeCacheMatrix and return it.
makeCacheMatrix <- function(x = matrix()) {
m<- NULL
set<- function(y){
x<<- y
m<<- NULL
}
get<- function() x
setinverse<- function(solve) m<<- solve
getinverse<- function() m
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## cacheSolve is a function calculates the inverse of the special
## "vector" created with the makeCacheMatrix: firstly, it checks to
## see if the inverse has already been calculated; ff so, it "get"s
## the inverse from the cache and skips the computation; Otherwise,
## it calculates the inverse of the matrix and sets the value of the
## inverse in the cache via the "setinverse" function.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
c0f7a921be3c459c2fc8de92dec9c138831d2215
|
7124857f88de282565f0e53dfd56034323064944
|
/cachematrix.R
|
af27560b7a328fce288972f94ec4c1e8a190e449
|
[] |
no_license
|
tipu-zlatan/ProgrammingAssignment2
|
657bddc5604f8b1bf44f8a7e4f66cacda8e6c6e1
|
211fdf6d865793d8850ef6c7e7ea933d79e90c4c
|
refs/heads/master
| 2022-09-23T03:08:48.463674
| 2020-06-03T14:38:54
| 2020-06-03T14:38:54
| 269,087,681
| 0
| 0
| null | 2020-06-03T12:53:28
| 2020-06-03T12:53:27
| null |
UTF-8
|
R
| false
| false
| 883
|
r
|
cachematrix.R
|
## A pair of functions that checks and sets the cache of inverse matrix.
## This function creates a special "matrix" object that can cache its inverse.
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
invers_e<-NULL
set<-function(y){
x<<-y
invers_e<<-NULL
}
get <- function() x
setinverse <- function(invers__e) {invers_e<<-invers__e}
getinverse <- function() invers_e
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
# returns the inverse of the special "matrix" returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invers_e<-x$getinverse()
if(!is.null(invers_e)){
message("getting cached inverse")
return(invers_e)
}
data<-x$get()
invers_e <- solve(data,...)
x$setinverse(invers_e)
invers_e
}
|
73dbf5cacc7e266c5d1b25fdd3f9374add4babd1
|
e32b591d7e9ca79bf90e0adae17d664da0fde1df
|
/R/simField.R
|
14c7d5fad9a2ec0f2144363a4e38c159c9426627
|
[] |
no_license
|
nmmarquez/DRU5MR
|
6d41368448397abc9860f951bdd6b6f6caaafae4
|
e71ed7908b1cceb0527f38df1f3c21cd9bcf872b
|
refs/heads/master
| 2020-04-22T12:22:45.709306
| 2019-09-01T20:02:08
| 2019-09-01T20:02:08
| 170,369,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,936
|
r
|
simField.R
|
#' Simulate A continous 2D spatial field evolving over time
#'
#' @description Calculates a continous underlying spatial field for the
#' Dominican Republic over time and across ages for probability of mortality
#' for one of 7 age groups. The final observed field is a linear additive model
#' as described in Wakefield 2017.
#'
#' @param M Number of years
#' @param sigmaE spatial variance
#' @param rangeE spatial range
#' @param rhoAR Yearly autoregression
#' @param sigmaRW Variation of independent temporal component
#' @param ageB age specfifc effects
#' @param deltaUrban urban effect
#' @param offset mesh creation parameter
#' @param max.edge mesh creation parameter
#'
#' @return field object, contains multiple items. Spatial points data frame with
#' raster values for transformed linear combination, and other values. A mesh
#' that was used to create the latent field. The latent field
#' itself. A bounding shape where all observations take place. A projection
#' matrix from the mesh to the entire field of interest. The spde for the matern
#' approximation.
#'
#' @examples
#' \dontrun{
#' require(tidyverse)
#'
#' fieldDR <- simField(4, rangeE = 1.3)
#'
#' fieldDR$fqzDF %>%
#' ggplot(aes(x=long, y=lat, fill=fqz)) +
#' geom_raster() +
#' facet_wrap(~year) +
#' coord_equal() +
#' theme_void() +
#' scale_fill_distiller(palette = "Spectral")
#'}
#'
#' @export
simField <- function(
M,
rangeE = .8,
sigmaE = .04,
rhoAR = .9,
sigmaRW = .001,
ageB = arm::logit((7:1 / 1000) + .00025),
deltaUrban = -.1,
offset = 1.,
max.edge = c(.15,.3)){
library(rgdal)
library(sp)
library(maptools)
library(PointPolygon)
library(rgeos)
library(tidyverse)
library(ar.matrix)
meshDR <- INLA::inla.mesh.2d(
loc=bbCoords(spDF),
offset = offset,
max.edge = max.edge)
AprojDR <- INLA::inla.spde.make.A(
mesh=meshDR,
loc=as.matrix(fullDF[,c("long", "lat")]))
kappaE <- sqrt(8) / rangeE
tauE <- 1/(sqrt(4*pi)*kappaE*sigmaE)
spde <- INLA::inla.spde2.matern(meshDR)
Qspde <- tauE**2 *
(kappaE**4 * spde$param.inla$M0 +
2 * kappaE**2 *spde$param.inla$M1 + spde$param.inla$M2)
Qar1 <- Q.AR1(M, 1, rhoAR)
Q <- kronecker(Qar1, Qspde)
x_ <- as.vector(ar.matrix::sim.AR(n=1, Q))
x <- x_ - mean(x_)
for(i in 1:M){
fullDF[,paste0("x", i)] <- as.vector(
AprojDR %*% x[(meshDR$n * (i-1) + 1):(meshDR$n * i)])
}
pAgeDF <- tibble(
ageB = ageB,
age = 1:7)
pTimeDF <- tibble(
time = 1:M,
rwTime = c(r.AR1(1, M, sigmaRW, .9999) %>% `-`(mean(.)))
)
idDF <- as_tibble(expand.grid(id=1:max(fullDF$id), time=1:M, age=1:7))
fullDF <- fullDF %>%
gather("time", "x", -long, -lat, -reg, -urban, -id, -strat) %>%
mutate(time=as.integer(gsub("x", "", time))) %>%
right_join(idDF, by=c("id", "time")) %>%
left_join(pTimeDF, by="time") %>%
left_join(pAgeDF, by="age") %>%
mutate(deltaU=ifelse(urban == 1, deltaUrban, 0)) %>%
mutate(p = arm::invlogit(ageB + deltaU + rwTime + x)) %>%
mutate(year = time + 1999) %>%
select(-time)
fqzDF <- fullDF %>%
group_by(long, lat, id, reg, urban, strat, year) %>%
summarize(fqz=1-prod(1-p)) %>%
ungroup
boundShape <- rgeos::gUnaryUnion(spDF, id = spDF@data$Bound)
boundShape <- sp::SpatialPolygonsDataFrame(
boundShape, data.frame(Bound = 1, row.names = "1"))
field <- list(spdf = fullDF, mesh = meshDR, latent = x, fqzDF=fqzDF,
bound = boundShape, AprojField = AprojDR, spde = spde,
deltaUrban, rangeE = rangeE, sigmaE = sigmaE, rhoAR = rhoAR,
sigmaRW = sigmaRW, ageB = ageB)
class(field) <- "field"
field
}
|
239f13ed1c88aeb5d6afe756d5f7b95104b5d7c7
|
b9a05537e3cd702891305c8c958171d02050a72e
|
/packages/rethinking.pkgs/R/packages_import.R
|
5f1f56c425a62245c9bdc2449d4a03c39c5d57f4
|
[] |
no_license
|
anhnguyendepocen/statistical_rethinking-rsuite
|
fd2bdd7487426a553439796089c560ca9748259c
|
6d6794bec2666b618b7d26c431e5313b0486b3a5
|
refs/heads/master
| 2022-02-14T08:33:50.665896
| 2019-08-19T22:12:27
| 2019-08-19T22:12:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 242
|
r
|
packages_import.R
|
#----------------------------------------------------------------------------
# rethinking.pkgs
#
# Global package definitions and imports
#----------------------------------------------------------------------------
#' @import logging
NULL
|
819f1c4432b5142c430187c482e77aa320f1cc89
|
9f7baaad4b6028ab5ed2fd20d8d1ca18045ffff0
|
/man/AtaImpact.Rd
|
82c931d7ba15cc01688a4c58575e1847ce0ac1ae
|
[] |
no_license
|
ArnaudBu/ReservingLad
|
d294adde36af6d498d02be023ed46a87715c4ff2
|
59e9854d90825e2acf0e7770e7e3ca9ce19c6519
|
refs/heads/master
| 2021-09-26T08:54:56.473788
| 2021-09-19T15:19:04
| 2021-09-19T15:19:04
| 102,781,474
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 726
|
rd
|
AtaImpact.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AtaImpact.R
\name{AtaImpact}
\alias{AtaImpact}
\title{Age to Age factors impacts}
\usage{
AtaImpact(triangle, mode = "value")
}
\arguments{
\item{triangle}{Cumulated triangle as a matrix}
\item{mode}{Character. The way to display the impact: as a value ("value") or as a percent of the total reserve ("percent").}
}
\value{
A triangle equivalent to the age-to-age factors triangle, but with the impact on the reserves instead.
}
\description{
\code{AtaImpact} computes the impact on the total reserve from removing each age to age factor in the Chain Ladder estimation for coefficents.
}
\examples{
impact <- AtaImpact(triangleExampleEngland)
}
|
6d4e01131132be27cdcbb3ad9bf837a1ceb76a09
|
a163da6d35190a6461455e38f9be1b4ed88af943
|
/cachematrix.R
|
43b18fe05f25c2aeb0482fca5cf060040194de37
|
[] |
no_license
|
matwgmkw/ProgrammingAssignment2
|
5813369973f6b8faabe611d5aaf08d4687cf0b44
|
9aab3628420c150f07bdd657d771ee19cc91de86
|
refs/heads/master
| 2022-10-18T08:42:59.013316
| 2020-06-10T16:55:59
| 2020-06-10T16:55:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 493
|
r
|
cachematrix.R
|
makeCacheMatrix <- function(x = matrix())
{
inve <- NULL
set <- function(y) {
x <<- y
inve <<- NULL}
get <- function() x
setinve <- function(inverse) inve <<- inverse
getinve <- function() inve
list(set = set, get = get,
setinve = setinve,getinve = getinve)
}
cacheSolve <- function(x, ...)
{
inve <- x$getinve()
if (!is.null(inve)) {
message("getting cached data")
return(inve) }
data <- x$get()
inve <- solve(data, ...)
x$setinve(inve)
inve
}
|
3c5aac73e86a6a75b6ff48ea52f89fbc83849d18
|
4850bb228c6eb7956c7afbd128dabf0370f9b561
|
/man/dtables2latex.Rd
|
beaa871151a9b4b4a98db3f5aec7a1f68de02697
|
[] |
no_license
|
renlund/descripteur
|
830b64f5f4a6543f7ee27f2ea8dc0314032d0163
|
1d987cdc7f01c0d52b9f7065d415846e3c063de1
|
refs/heads/master
| 2023-02-19T14:41:01.819745
| 2023-02-09T15:42:37
| 2023-02-09T15:42:37
| 61,724,091
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 937
|
rd
|
dtables2latex.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dtables.R
\name{dtables2latex}
\alias{dtables2latex}
\alias{dtables2latex_ungrouped_helper}
\alias{dtables2latex_grouped_helper}
\title{dtables to latex code}
\usage{
dtables2latex(
dt,
format = TRUE,
format.param = as.list(NULL),
n = c(n = "size"),
...
)
dtables2latex_ungrouped_helper(dt, format = TRUE, ...)
dtables2latex_grouped_helper(dt, format = TRUE, ...)
}
\arguments{
\item{dt}{a dtables}
\item{format}{logical; do you want to format?}
\item{format.param}{list of parameter values passed to format function}
\item{n}{size indicator in table (set to NULL to suppress this)}
\item{...}{arguments passed to \code{dtable_latex}}
}
\description{
dtables to latex code
}
\section{Functions}{
\itemize{
\item \code{dtables2latex_ungrouped_helper()}: deprecated function
\item \code{dtables2latex_grouped_helper()}: deprecated function
}}
|
72f8876429fe14c37c1b86f3ab827344ebab731c
|
edae1ad20b35fd8c14cfc2442cdd7eae206d279e
|
/R/gkde_fun.R
|
524626f19519f2635ba620cadb72d2815e1c0e92
|
[] |
no_license
|
yangxhcaf/rasterExtras
|
54623642888dcd3e55b1b1df09a587e23ed8b136
|
44124f68e5b04208bff1c9ad936a5b676d817361
|
refs/heads/master
| 2020-07-07T15:19:02.308670
| 2019-06-05T15:33:37
| 2019-06-05T15:33:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,565
|
r
|
gkde_fun.R
|
#' @useDynLib rasterExtras
#' @importFrom Rcpp sourceCpp
#' @import raster
#' @import parallel
#' @import stats
#' @import snow
NULL
#' Geographic Kernel Density Estimator using linear or Haversine distances
#'
#' This function calculates a kernel density estimation for raster objects.
#'
#' @param grid A raster object to match.
#' @param points A two column data frame in the form (lon,lat) or (x,y)
#' @param parallel TRUE or FALSE, should this code be executed in parallel.
#' @param nclus IF parallel==TRUE then how many cores in the cluster.
#' @param dist.method Which distance should we use? Haversine for lat/long projections,or Pythagorean for flat images and/or small areas.
#' @param maxram Maximum theoretical RAM usage. Will be divided by nclus for parallel jobs.
#' @param bw Bandwidth. Numeric bandwidth. See bw.calc for help
#'
#' @export
#' @examples
#' require(raster)
#' grid = raster::raster(nrows=81, ncols=162, xmn=-180, xmx=180, ymn=-90, ymx=90, vals=NULL)
#' grid = raster::setValues(grid,values=(as.vector(seq(1:raster::ncell(grid)))))
#' points = cbind(
#' c(seq(xmin(grid), xmax(grid), length.out=1000),
#' seq(xmax(grid), xmin(grid), length.out=1000)),
#' c(seq(ymin(grid), ymax(grid), length.out=100),
#' seq(ymin(grid), ymax(grid), length.out=100))
#' )
#' plot(grid); points(points);
#' den = gkde(grid, points, parallel=TRUE, dist.method='Haversine', bw= 10)
#' plot(den)
#' points(points)
gkde <-
function(grid,
points,
parallel = TRUE,
nclus = 4,
dist.method = 'Haversine',
maxram=4, bw = 200) {
bw.gen = bw;
.gkde.core.h <- function(x) {
require(rasterExtras)
coords = latlonfromcell(as.vector(x),
as.vector(
c(
raster::xmin(grid),
raster::xmax(grid),
raster::ymin(grid),
raster::ymax(grid)
)
),
nrow(grid),
ncol(grid))
d = distance(as.matrix(coords[, 2:1]), as.matrix(points))
di = vector()
for (c in 1:raster::nrow(coords)) {
di[c] = stats::density(
d[c, ],
n = 1,
kernel = 'gaussian',
from = 0,
to = 0,
bw = bw.gen,
na.rm = TRUE
)$y
}
return(di)
}
.gkde.core.p <- function(x) {
coords = latlonfromcell(as.vector(x),
as.vector(
c(
raster::xmin(grid),
raster::xmax(grid),
raster::ymin(grid),
raster::ymax(grid)
)
),
nrow(grid),
ncol(grid))
d = pythagorean(as.matrix(coords[, 2:1]), as.matrix(points))
di = vector()
for (c in 1:raster::nrow(coords)) {
di[c] = stats::density(
d[c, ],
n = 1,
kernel = 'gaussian',
from = 0,
to = 0,
bw = bw.gen,
na.rm = TRUE
)$y
}
return(di)
}
dd =8;
xx = seq(1:raster::ncell(grid))
##Check max matrix size.
vol = (dd * (nrow(points) * raster::ncell(grid))) / 1024 / 1024 / 1024
if (parallel == FALSE) {
ramtarg= maxram;
targ.n = ceiling((ramtarg / vol) * raster::ncell(grid))
if (targ.n > raster::ncell(grid)) {
splits = list(seq(1:raster::ncell(grid)))
} else {
n = targ.n
f <- sort(rep(1:(trunc(length(xx) / n) + 1), n))[1:length(xx)]
splits = split(xx, f)
}
if (dist.method == "Pythagorean") {
di = unlist(lapply(splits, .gkde.core.p))
} else if (dist.method == "Haversine") {
di = unlist(lapply(splits, .gkde.core.h))
}
} else {
ramtarg= maxram/nclus;
targ.n = ceiling((ramtarg / vol) * raster::ncell(grid))
if (targ.n > raster::ncell(grid)) {
splits = list(seq(1:raster::ncell(grid)))
} else {
n = targ.n
f <- sort(rep(1:(trunc(length(xx) / n) + 1), n))[1:length(xx)]
splits = split(xx, f)
}
if (nclus > length(splits)) {
n = targ.n / nclus;
f <- sort(rep(1:(trunc(length(xx) / n) + 1), n))[1:length(xx)]
splits = split(xx, f)
}
##Reporting bloc
cat("BEGIN PARALLEL COMPUTATION\n");
cat("Core count: ", nclus, "\n");
cat("Cells/iteration: ", length(splits[[1]]), "of", raster::ncell(grid), "\n")
cat("Iterations total: ", raster::ncell(grid)/length(splits[[1]]), "\n")
cat("Points: ", nrow(points), "\n");
cat("Maximum RAM per proc.: ", maxram/nclus, "\n");
cat("Distance Method: ", dist.method, "\n\n");
###
p = proc.time();
cl = parallel::makeCluster(nclus, type = 'SOCK')
parallel::clusterExport(cl,
c(
"grid",
"points",
"bw.gen",
"latlonfromcell",
"pythagorean"
),
envir = environment())
if (dist.method == "Pythagorean") {
di = unlist(parallel::parLapply(cl, splits, .gkde.core.p))
} else if (dist.method == "Haversine") {
di = unlist(parallel::parLapply(cl, splits, .gkde.core.h))
}
parallel::stopCluster(cl)
ep = proc.time() - p;
cat("Time elapsed: ", ep[[3]], "\n\n");
}
r = raster::raster(
nrows = nrow(grid),
ncol = ncol(grid),
crs = raster::crs(grid),
ext = raster::extent(grid)
)
r = raster::setValues(r, values = di)
r = raster::mask(r, grid)
return(r)
}
#' Bandwidth selection tool for gkde()
#'
#' Calculate the optimal bandwidth or bootstrap the bandwidth selection for lage datasets.
#'
#' @param points A two column data frame in the form (lon,lat) or (x,y)
#' @param dist.method Haversine or Pythagorean distance implemented.
#' @param boot.n Number of bootstrap replicates to do. If set, will do bootstrap even on small datasets. Default 1000.
#' @param sam.size Bootstrap sample size.
#' @param maxram Maximum RAM, in Gigabytes.
#'
#' @export
#' @examples
#' require(raster)
#' grid = raster::raster(nrows=18, ncols=36, xmn=-180, xmx=180, ymn=-90, ymx=90, vals=NULL)
#' grid = raster::setValues(grid,values=(as.vector(seq(1:raster::ncell(grid)))))
#' points = cbind(
#' c(seq(xmin(grid), xmax(grid), length.out=1000),
#' seq(xmax(grid), xmin(grid), length.out=1000)),
#' c(seq(ymin(grid), ymax(grid), length.out=1000),
#' seq(ymin(grid), ymax(grid), length.out=1000))
#' )
#' bandwidth = bw.calc(points);
bw.calc = function(points, dist.method = 'Pythagorean', boot.n = 0, sam.size = 100, maxram=4){
#one cell of a matrix should contain a double, so:
dd = 32; ##Double double because Rcpp for some reason doubles ram on return.
#16 bytes per cell. Estimates seem off using this value for memory of doubles in matrices
vol = dd * (nrow(points) ^ 2) / 1024 / 1024 / 1024
if(boot.n > 1){
#if (vol > maxram) {
#if distance matrix will be > than ???
##Bootstrap bandwidth selection
n = 1000
bw = vector()
for (i in 1:n) {
sam = sample(c(1:nrow(points)), 100, replace = TRUE)
p = points[sam, ]
if (dist.method == "Pythagorean") {
ps = as.vector(pythagorean(as.matrix(points[sam, ]), as.matrix(points[sam, ])))
} else if (dist.method == "Haversine") {
ps = as.vector(distance(as.matrix(points[sam, ]), as.matrix(points[sam, ])))
}
bw[i] = stats::bw.nrd(as.vector(ps))
}
bw.gen = mean(bw)
} else {
pbp = as.vector(pythagorean(as.matrix(points), as.matrix(points)))
if (dist.method == "Pythagorean") {
pbp = as.vector(pythagorean(as.matrix(points), as.matrix(points)))
} else if (dist.method == "Haversine") {
pbp = as.vector(distance(as.matrix(points), as.matrix(points)))
pbp = stats::na.omit(pbp)
}
bw.gen = stats::bw.nrd(as.vector(pbp))
}
return(bw.gen)
}
#' Calculate distance to points surfaces
#'
#' This function calculates the minimum distance to points raster.
#'
#' @param grid A raster object to match.
#' @param points A two column data frame in the form (lon,lat) or (x,y)
#' @param parallel TRUE or FALSE, should this code be executed in parallel.
#' @param nclus IF parallel==TRUE then how many cores in the cluster.
#' @param dist.method Which distance should we use? Haversine for lat/long projections,or Pythagorean for flat images and/or small areas.
#' @param maxram Maximum theoretical RAM usage. Will be divided by nclus for parallel jobs.
#'
#' @export
#' @examples
#' require(raster)
#' grid = raster::raster(nrows=81, ncols=162, xmn=-180, xmx=180, ymn=-90, ymx=90, vals=NULL)
#' grid = raster::setValues(grid,values=(as.vector(seq(1:raster::ncell(grid)))))
#' points = cbind(
#' c(seq(xmin(grid), xmax(grid), length.out=1000),
#' seq(xmax(grid), xmin(grid), length.out=1000)),
#' c(seq(ymin(grid), ymax(grid), length.out=100),
#' seq(ymin(grid), ymax(grid), length.out=100))
#' )
#' plot(grid); points(points);
#' di = dist2point(grid, points, parallel=TRUE, maxram = 0.5, nclus = 4, dist.method='Haversine')
#' plot(di)
#' points(points)
dist2point <-
function(grid,
points,
parallel = TRUE,
nclus = 4,
dist.method = 'Haversine',
maxram=4) {
.dist2point.core.h <- function(x) {
require(rasterExtras)
coords = latlonfromcell(as.vector(x),
as.vector(
c(
raster::xmin(grid),
raster::xmax(grid),
raster::ymin(grid),
raster::ymax(grid)
)
),
nrow(grid),
ncol(grid))
d = distance(as.matrix(coords[, 2:1]), as.matrix(points))
return(apply(d, 1, function(x) min(x, na.rm=TRUE)))
}
.dist2point.core.p <- function(x) {
coords = latlonfromcell(as.vector(x),
as.vector(
c(
raster::xmin(grid),
raster::xmax(grid),
raster::ymin(grid),
raster::ymax(grid)
)
),
nrow(grid),
ncol(grid))
d = pythagorean(as.matrix(coords[, 2:1]), as.matrix(points))
return(apply(d, 1, function(x) min(x, na.rm=TRUE)))
}
dd =8;
xx = seq(1:raster::ncell(grid))
##Check max matrix size.
vol = (dd * (nrow(points) * raster::ncell(grid))) / 1024 / 1024 / 1024
if (parallel == FALSE) {
ramtarg= maxram;
targ.n = ceiling((ramtarg / vol) * raster::ncell(grid))
if (targ.n > raster::ncell(grid)) {
splits = list(seq(1:raster::ncell(grid)))
} else {
n = targ.n
f <- sort(rep(1:(trunc(length(xx) / n) + 1), n))[1:length(xx)]
splits = split(xx, f)
}
if (dist.method == "Pythagorean") {
di = unlist(lapply(splits, .dist2point.core.p))
} else if (dist.method == "Haversine") {
di = unlist(lapply(splits, .dist2point.core.h))
}
} else {
ramtarg= maxram/nclus;
targ.n = ceiling((ramtarg / vol) * raster::ncell(grid))
if (targ.n > raster::ncell(grid)) {
splits = list(seq(1:raster::ncell(grid)))
} else {
n = targ.n
f <- sort(rep(1:(trunc(length(xx) / n) + 1), n))[1:length(xx)]
splits = split(xx, f)
}
if (nclus > length(splits)) {
n = targ.n / nclus;
f <- sort(rep(1:(trunc(length(xx) / n) + 1), n))[1:length(xx)]
splits = split(xx, f)
}
##Reporting bloc
cat("BEGIN PARALLEL COMPUTATION\n");
cat("Core count: ", nclus, "\n");
cat("Cells/iteration: ", length(splits[[1]]), "of", raster::ncell(grid), "\n")
cat("Iterations total: ", raster::ncell(grid)/length(splits[[1]]), "\n")
cat("Points: ", nrow(points), "\n");
cat("Maximum RAM per proc.: ", maxram/nclus, "\n");
cat("Distance Method: ", dist.method, "\n\n");
###
p = proc.time();
cl = parallel::makeCluster(nclus, type = 'SOCK')
parallel::clusterExport(cl,
c(
"grid",
"points",
"latlonfromcell",
"pythagorean"
),
envir = environment())
if (dist.method == "Pythagorean") {
di = unlist(parallel::parLapply(cl, splits, .dist2point.core.p))
} else if (dist.method == "Haversine") {
di = unlist(parallel::parLapply(cl, splits, .dist2point.core.h))
}
parallel::stopCluster(cl)
ep = proc.time() - p;
cat("Time elapsed: ", ep[[3]], "\n\n");
}
r = raster::raster(
nrows = nrow(grid),
ncol = ncol(grid),
crs = raster::crs(grid),
ext = raster::extent(grid)
)
r = raster::setValues(r, values = di)
r = raster::mask(r, grid)
return(r)
}
|
ad29c7474c5a3e3b0d3c4e31ea54bafc1a890855
|
0bfcd397ed2041f9b2589165cc9b00eee48b9b94
|
/aphid_anomalies.R
|
6be6f849532f43f2205b71fe3a29a18aa3b6081a
|
[] |
no_license
|
YDAckerman/Chlorpyrifos
|
9f229a7826f44ff6b7d33df1170a09d2e4b9daf6
|
9f2df38ffd7b6c0dfa3734dbd0d5f6952ea79950
|
refs/heads/master
| 2021-01-10T14:40:37.619335
| 2016-02-03T00:58:21
| 2016-02-03T00:58:21
| 43,094,239
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,169
|
r
|
aphid_anomalies.R
|
library(plyr)
library(dplyr)
source("~/Documents/ZhangLab/R/Chlorpyrifos/assemble_final_dataset.R")
findAnom <- function(tab){
if(!is.data.frame(tab)){return(NA)}
rel_cols <- attr(attr(tab, "info")$allRanges, "relevantCols")
cols <- unlist(llply(rel_cols, function(x){
attr(tab, "info")$allRanges[[x]]
}))
stgr_cols <- intersect(cols, grep("stgr", colnames(tab), ignore.case = TRUE))
date_cols <- try(attr(tab, "info")$dateCols$pos)
if(identical(class(date_cols), "try-error")){
date_cols <- intersect(cols, hf$mgrep(c("dat", "wat"), colnames(tab), ignore.case = TRUE))
if(all(is.na(date_cols))){return(NA)}
}
dens_data <- tab[, intersect(cols, date_cols)]
stgr_data <- tab[, stgr_cols]
bool_vals <- as.data.frame(llply(dens_data, function(x){
if(all(is.na(x))){return(x)}
x > x[length(x)]
}))
bool_stgr <- as.data.frame(llply(stgr_data, function(x){
if(all(is.na(x))){return(x)}
stgr_letters <- strsplit(x, "")
unlist(llply(stgr_letters, function(y){
length(intersect(y, stgr_letters[[length(stgr_letters)]])) == 0
}))
}))
bool <- bool_vals & bool_stgr
any(na.omit(bool))
}
pests <- c("aphid", "weevil", "mite", "bug", "worm")
ldply(pests, function(x){
##x <- "bug"
pest <- ins_table %>% filter(grepl(x, Pest..as..common.name..scientific.name...if.both.given..if.not.just.enter.which.is.stated.in.article.)) %>% select(Pest..as..common.name..scientific.name...if.both.given..if.not.just.enter.which.is.stated.in.article.) %>% distinct(Pest..as..common.name..scientific.name...if.both.given..if.not.just.enter.which.is.stated.in.article.)
pest_exps <- semi_join(response_tables_lookup, pest,
by = "Pest..as..common.name..scientific.name...if.both.given..if.not.just.enter.which.is.stated.in.article.")
pest_tables <- llply(unique(pest_exps$V1), function(x){
response_tables[[x]]
})
res <- unlist(llply(pest_tables, findAnom, .inform = TRUE))
data.frame(Pest = x, experiments = length(res), anomalies = sum(na.omit(res)))
}, .inform = TRUE)
|
3820f0373bae7b4387a50b1aa11f711c8c8d0a28
|
58b0cfbf14d5adcf8b84bedf827d7cbde1c3e1dd
|
/R/ef_scaled_euro.R
|
7664ffb0c1486e92321a1a42b407240316b2466b
|
[] |
no_license
|
ipeaGIT/gtfs2emis
|
5c40fcb787eaa72a6b4e67c9199c5220a7542d10
|
d68ffe805a66229f73df76de1e9b42c39d981688
|
refs/heads/master
| 2023-07-19T16:04:46.727586
| 2023-07-13T15:31:34
| 2023-07-13T15:31:34
| 218,305,263
| 23
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,407
|
r
|
ef_scaled_euro.R
|
#' @title
#' Scale local emission factors in order to make emission estimates a function
#' of speed.
#'
#' @description
#' Scale emission factors to account for vehicle speed based on values from the
#' emission factor model by the European Environment Agency (EMEP/EEA). Emission
#' factor estimates are expressed in units 'g/km'.
#'
#' @param ef_local data.frame or a list containing the emission factors data.frame.
#' Local emission factors, in units 'g/km'.
#' @param speed units. Speed in 'km/h'.
#' @param veh_type character. Bus type, classified as "Ubus Midi <=15 t",
#' "Ubus Std 15 - 18 t", "Ubus Artic >18 t", "Coaches Std <=18 t", or
#' "Coaches Artic >18 t".
#' @param euro character. Euro period of vehicle, classified in "Conventional",
#' "I", "II", "III", "IV", "V", "VI", and "EEV".
#' @param pollutant character. Pollutant: "FC", "CO2", "CO", "NOx", "VOC",
#' "PM10", "EC", "CH4", "NH3", "N2O", "FC" (fuel consumption).
#' @param fuel character. Fuel type, classified in "D" (Diesel), "DHD" (Diesel
#' Hybrid ~ Diesel), "DHE" (Diesel Hybrid ~ Electricity), "CNG"
#' (Compressed Natural Gas), "BD" (Biodiesel). Default is "D".
#' @param tech character. After treatment technology, classified in "SCR"
#' (Selective Catalytic Reduction), "EGR" (Exhaust Gas Recirculation),
#' and "DPF+SCR" (Diesel Particulate Filter + SCR, for Euro VI). Default
#' is "SCR" for "IV" and "V".
#' @param SDC numeric. Average speed of urban driving condition in 'km/h'.
#' Default is 19 km/h, which is the average speed adopted in EMEP/EEA
#' report.
#' @param slope numeric. Slope gradient, categorized in -0.06, -0.04, -0.02,
#' 0.00, 0.02, 0.04 and 0.06. Negative gradients means downhills and
#' positive uphills. Default is 0.0.
#' @param load numeric. Passenger load ratio, classified in 0.0, 0.5 and 1.0.
#' Default is 0.5.
#' @param fcorr numeric. Correction based on fuel composition. The length must
#' be one per each euro standards. Default is 1.0.
#'
#' @return list. Emission factors in units 'g/km'.
#' @details
#'
#' The scaled emission factor is related to speed by the expression
#'
#' EF_scaled (V) = EF_local * ( EF(V) / EF(SDC)),
#'
#' where EF_scaled(V) is the scaled emission factors for each street link,
#' EF_local is the local emission factor, EF(V) and EF(SDC) are the EMEP/EEA
#' emission factor the speed of V and the average urban driving speed 'SDC', respectively.
#'
#' Please note that the function reads the vector arguments in the same order
#' as informed by the user. For instance, if the pollutant input is `c("CO","PM10")`
#' input in the local emission factor function, the order needs to be the same for the
#' pollutant in the `ef_scaled_euro` function.
#'
#' In the case of vehicle type, which generally changes according to the emission
#' factor source, the input argument in the `ef_scaled_euro` needs to be consistent
#' with the order adopted in the local emission factor function.
#'
#' For example, if the vector of local vehicle type is
#' `c("BUS_URBAN_D","BUS_MICRO_D")`, the related vector for EMEP/EEA model needs
#' to be `c("Ubus Std 15 - 18 t","Ubus Midi <=15 t")`. The same approach applies for
#' other input arguments. See more in the examples.
#'
#' @family Emission factor model
#'
#' @examples
#' temp_ef_br <- ef_brazil_cetesb(
#' pollutant = c("CO","PM10","CO2","CH4","NOx"),
#' veh_type = c("BUS_URBAN_D","BUS_MICRO_D"),
#' model_year = c(2015,2015),
#' as_list = TRUE
#' )
#'
#' temp_ef_scaled <- ef_scaled_euro(
#' ef_local = temp_ef_br,
#' speed = units::set_units(1:100,"km/h"),
#' veh_type = c("Ubus Std 15 - 18 t","Ubus Midi <=15 t"),
#' euro = c("IV","IV"),
#' fuel = c("D","D"),
#' tech = c("SCR","SCR"),
#' pollutant = c("CO","PM10","CO2","CH4","NOx")
#' )
#' @export
ef_scaled_euro <- function(ef_local, speed, veh_type, euro, pollutant, fuel = "D",
tech = "SCR", SDC = 19, slope = 0.0,
load = 0.5, fcorr = 1){
# check inputs ----
# euro vector
utils::data('ef_europe_emep_db')
temp_ef <- ef_europe_emep_db
# ef_local
if(inherits(ef_local,what = "list")) {
checkmate::assert_choice("EF", names(ef_local))
checkmate::assert(
checkmate::check_class(ef_local$EF, "data.frame")
, checkmate::check_class(ef_local$EF, "data.table")
, checkmate::check_class(ef_local$EF, "units")
, combine = "or"
)
}
checkmate::assert(
checkmate::check_class(ef_local, "data.frame")
, checkmate::check_class(ef_local, "data.table")
, checkmate::check_list(ef_local)
, checkmate::check_class(ef_local, "units")
, combine = "or"
)
# speed
checkmate::assert_vector(speed,any.missing = FALSE,min.len = 1,null.ok = FALSE)
checkmate::assert_numeric(speed,lower = 1,upper = 130)
checkmate::assert_class(speed,"units")
if(units(speed)$numerator != "km" | units(speed)$denominator != "h"){
stop("Invalid 'speed' units: 'speed' needs to be in 'km/h' units.")
}
# vehicle type
checkmate::assert_vector(veh_type,any.missing = FALSE,min.len = 1,null.ok = FALSE)
checkmate::assert_character(veh_type,any.missing = FALSE,min.len = 1)
for(i in veh_type) checkmate::assert_choice(i,unique(temp_ef$Segment),null.ok = FALSE)
# euro
checkmate::assert_vector(euro,any.missing = FALSE,min.len = 1,null.ok = FALSE)
checkmate::assert_character(euro,any.missing = FALSE,min.len = 1)
for(i in euro) checkmate::assert_choice(i,unique(temp_ef$Euro),null.ok = FALSE)
# pollutant
checkmate::assert_vector(pollutant,any.missing = FALSE,min.len = 1,null.ok = FALSE)
checkmate::assert_character(pollutant,any.missing = FALSE,min.len = 1)
for(i in pollutant) checkmate::assert_choice(i,unique(temp_ef$Pol),null.ok = FALSE)
# fuel
checkmate::assert_vector(fuel,any.missing = FALSE,min.len = 1,null.ok = FALSE)
checkmate::assert_character(fuel,any.missing = FALSE,min.len = 1)
for(i in fuel) checkmate::assert_choice(i,unique(temp_ef$Fuel),null.ok = FALSE)
# technology
checkmate::assert_vector(tech,any.missing = FALSE,min.len = 1,null.ok = FALSE)
checkmate::assert_character(tech,any.missing = FALSE,min.len = 1)
for(i in tech) checkmate::assert_choice(i,unique(temp_ef$Technology),null.ok = FALSE)
#
# check dimensions
#
temp_ef$Pol
if (is(ef_local, "list")) {
ef_local <- ef_local$EF
}
if (length(veh_type) == 1) {
veh_type <- rep(veh_type, length(euro))
}
if (length(veh_type) != length(euro)) {
stop("Incorrect input lengths: 'euro' and 'veh_type' need to have the same length.")
}
if (length(fuel) == 1) {
fuel <- rep(fuel,length(euro))
}
if (length(fuel) != length(euro)) {
stop("Incorrect input lengths: 'fuel' and 'veh_type' need to have the same length.")
}
#
# check units
#
lapply(seq_along(ef_local), function(i){ # i = 1
if (!is(ef_local, "units")) {
stop("Incorrect units: 'ef_local' needs to be in 'g/km' units.")
}
if(units(ef_local[[i]])$numerator != "g" | units(ef_local[[i]])$denominator != "km"){
stop("Incorrect units: 'ef_local' needs to be in 'g/km' units.")
}
})
#
# ef_europe with SDC (urban driving speed condition)
#
ef_sdc <- ef_europe_emep(speed = units::set_units(SDC, "km/h"),
veh_type = veh_type,
euro = euro,
pollutant = pollutant,
fuel = fuel,
tech = tech,
slope = slope,
load = load,
fcorr = fcorr,
as_list = FALSE)
# ef_europe(speed = units::set_units(rnorm(100,50,5),"km/h"),
# veh_type = c("Urban Buses Standard 15 - 18 t","Urban Buses Articulated >18 t"),
# euro = c("IV","V"),
# pollutant = c("CO2","NOx"),
# fuel = "Diesel" ,
# tech = c("SCR","EGR"),
# slope = 0.0,
# load = 0.5,
# fcorr = 1,
# as_list = TRUE)
# adjustment factor
k <- as.numeric(ef_local)/as.numeric(ef_sdc)
# ef europe speed
ef_speed <- ef_europe_emep(speed = speed,
veh_type = veh_type,
fuel = fuel,
euro = euro,
slope = slope,
load = load,
tech = tech,
fcorr = fcorr,
pollutant = pollutant,
as_list = TRUE)
ef_speed$EF <- as.matrix(ef_speed$EF)
#
# ef_scaled
#
ef_scaled <- sapply(seq_along(k),function(i){ef_speed$EF[,i] * k[i]})
if(length(speed) == 1){
ef_scaled <- t(as.matrix(ef_scaled))
}
# to units
ef_scaled <- units::set_units(ef_scaled, 'g/km')
ef_scaled <- data.table::as.data.table(ef_scaled)
colnames(ef_scaled) <- colnames(ef_speed$EF)
# add back to list
ef_speed$EF <- ef_scaled
return(ef_speed)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.