blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b117ec30e095f34cea29f5454829433cf2ff1176
|
96e0fc5d5ef1c7a9001588a48415b5d908400364
|
/altergro/traf/traf.R
|
60985a82366bac25f9481aab43e8f89a70509b87
|
[] |
no_license
|
Shmuma/tools
|
4635af3921828e89a8599be93dd0833e819720c0
|
8a2e04913efe2516cdfe8b3f07ead5a499b07085
|
refs/heads/master
| 2021-01-17T08:42:22.711421
| 2017-08-09T17:54:34
| 2017-08-09T17:54:34
| 6,597,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,040
|
r
|
traf.R
|
# Script downloads and calculates total traffic for the current day from 8:00 to midnight
library (methods)
library (bitops)
source("getTrafData.R")
source("parseTrafData.R")
source("getToday.R")
if (!file.exists("auth.txt")) {
print ("No auth.txt file found! You need to copy auth.txt.sample to auth.txt and edit it!")
return
}
# read password file
auth <- read.csv("auth.txt", header=T)
# download used traffic
html_data <- getTrafData(login=auth$Login, pass=auth$Password)
# parse traffic table
traf_data <- parseTrafData(html_data)
format_size <- function(msg, bytes) {
sprintf("%-12s%.1f", msg, bytes / 1024)
}
msg <- paste("",
format_size("Night in:", traf_data$night_in),
format_size("Night out:", traf_data$night_out),
format_size("Night:", traf_data$night),
"",
format_size("Day in:", traf_data$day_in),
format_size("Day out:", traf_data$day_out),
format_size("Day:", traf_data$day),
sep="\n")
write(msg, file="")
|
65e7b7db608a6cc9d77517cc07f12f73e5eb1544
|
fb0448f213ee6c975be8474094a0f6b2f1353146
|
/R/mag.R
|
b51e736978333ee1e8e76c518fc0c855c55cd003
|
[] |
no_license
|
mbfumc/fici
|
bf9d2add6e6743c9ac2ad0259fc7c2ce49ca290f
|
4ac3c87157ec776fa9dde1c6e241eedb7f35aaec
|
refs/heads/master
| 2022-10-06T06:55:52.196521
| 2020-04-14T18:30:53
| 2020-04-14T18:30:53
| 184,465,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 222
|
r
|
mag.R
|
#' Calculate Magnitude of Vector
#'
#' This function allows you to quickly calculate the magnitude of a vector.
#' @keywords magnitude
#' @export
#' @examples
#' x <- c(3,5,6)
#' mag(x)
mag <- function(x){sqrt(sum(x^2))}
|
52c0240dfdd0a735d0bad5fcc9aa44d93831676e
|
9c9503f3f993e012b0d55b6f66259337f53957c2
|
/Assingment3_test.R
|
d95bb7e75e8f4a2138c433ccfb0211569415e38a
|
[] |
no_license
|
Pyotr001/Quiz_R
|
360fc16a1dd2435c0e85cd1555b085d7032231c8
|
5ebfdb8d2c16b945d3fcbd808c4d04432cc8e18c
|
refs/heads/master
| 2020-05-17T04:53:32.072968
| 2015-03-08T13:30:35
| 2015-03-08T13:30:35
| 31,598,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 469
|
r
|
Assingment3_test.R
|
files <- list.files("rprog-data-ProgAssignment3-data", full.names = T)
outcome <- read.csv(files[3], colClasses = "character")
outcome2 <- read.csv(files[3])
dim(outcome)
names_outcome <- names(outcome)
outcome[, 11] <- as.numeric(outcome[, 11])
hist(outcome[, 11])
head(outcome[, 20:26])
testStop <- function(x){
if(x == 3) stop(call. = F, "Не делю на три")
14/x
}
apply(outcome, 2, class) == apply(outcome2, 2, class)
head(outcome[2])
|
11ba87bb0225d1d417d82450325ece428664e118
|
33780efd85cb1e5e91292df5b79989df7f6ad830
|
/Div_estimates.R
|
1b70b5d192c59e2a82045378d764f8d0ab5aa782
|
[] |
no_license
|
lotte-korell/precipitation-change-experiments-
|
e485f1929966a3b7aad857e4d4261f03bc25cb8e
|
558115d979988ed581ee18cdefad4403045ee9d8
|
refs/heads/master
| 2023-03-08T05:02:05.567325
| 2021-02-19T13:03:17
| 2021-02-19T13:03:17
| 276,665,437
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,284
|
r
|
Div_estimates.R
|
ls()
rm(list=ls())
ls()
getwd()
setwd("C:/Users/korell/R/Meta.analysis/results")
dir()
library(lme4)
library(MASS)
library(car)
library(ggplot2)
library(MuMIn)
library(arm)
library(boot)
Div.data_local <- read.csv("Div.data_local.csv" ,dec=".", sep=";",h=T)
Div.data_gamma <- read.csv("Div.data_site.csv" ,dec=".", sep=";",h=T)
Cum.abs_local <- read.csv("Cum.abs_local.csv" ,dec=".", sep=";",h=T)
Cum.abs_gamma<- read.csv("Cum.abs_site.csv" ,dec=".", sep=";",h=T)
levels(as.factor(Div.data_local$study))
levels(as.factor(Div.data_local$site))
levels(as.factor(Div.data_local$site))
########################
######grand mean########
########################
## Total abundance at local and gamma scale
grand_mean.cum.abs_P <- lmer(LRR_cum.abs ~ 1 + (1|study:site:SiteBlock), data=Cum.abs_local)
summary(grand_mean.cum.abs_P)
r.squaredGLMM(grand_mean.cum.abs_P)
set.seed(1234)
m1.grand_mean_boot<-bootMer(grand_mean.cum.abs_P, FUN = fixef, nsim = 1000)
m1.grand_mean_boot # estimate
grand_mean.gamma.cum.abs_P <- lmer(LRR_cum.abs_gamma ~ 1 + (1|study:site), data=Cum.abs_gamma)
summary(grand_mean.gamma.cum.abs_P)
r.squaredGLMM(grand_mean.gamma.cum.abs_P)
set.seed(1234)
m7.grand_mean_boot<-bootMer(grand_mean.gamma.cum.abs_P, FUN = fixef, nsim = 1000)
m7.grand_mean_boot # estimate
## Species richness at local, turnover and gamma scale
grand_mean.S_P <- lmer(LRR_Sn ~ 1 + (1|study:site:SiteBlock), data=Div.data_local)
summary(grand_mean.S_P)
r.squaredGLMM(grand_mean.S_P)
set.seed(1234)
m1S.grand_mean_boot<-bootMer(grand_mean.S_P, FUN = fixef, nsim = 1000)
m1S.grand_mean_boot # estimate
grand_mean.betaS_P <- lmer(LRR_betaSn ~ 1 + (1|study:site:SiteBlock), data=Div.data_local)
summary(grand_mean.betaS_P)
r.squaredGLMM(grand_mean.betaS_P)
set.seed(1234)
m3.grand_mean_boot<-bootMer(grand_mean.betaS_P, FUN = fixef, nsim = 1000)
m3.grand_mean_boot # estimate
grand_mean.gammaSn_P <- lmer(LRR_gammaSn ~ 1 + (1|study:site), data=Div.data_gamma)
summary(grand_mean.gammaSn_P)
r.squaredGLMM(grand_mean.gammaSn_P)
set.seed(1234)
m5.grand_mean_boot<-bootMer(grand_mean.gammaSn_P, FUN = fixef, nsim = 1000)
m5.grand_mean_boot # estimate
## Evenness at local, turnover and gamma scale
grand_mean.SPie_P <- lmer(LRR_SPie ~ 1 + (1|study:site:SiteBlock), data=Div.data_local)
summary(grand_mean.SPie_P)
r.squaredGLMM(grand_mean.SPie_P)
set.seed(1234)
m2.grand_mean_boot<-bootMer(grand_mean.SPie_P, FUN = fixef, nsim = 1000)
m2.grand_mean_boot # estimate
grand_mean.betaSPie_P <- lmer(LRR_betaSPie ~ 1 + (1|study:site:SiteBlock), data=Div.data_local)
summary(grand_mean.betaSPie_P)
r.squaredGLMM(grand_mean.betaSPie_P)
set.seed(1234)
m4.grand_mean_boot<-bootMer(grand_mean.betaSPie_P, FUN = fixef, nsim = 1000)
m4.grand_mean_boot # estimate
grand_mean.gammaSPie_P <- lmer(LRR_gammaSPie ~ 1 + (1|study:site), data=Div.data_gamma)
summary(grand_mean.gammaSPie_P)
r.squaredGLMM(grand_mean.gammaSPie_P)
set.seed(1234)
m6.grand_mean_boot<-bootMer(grand_mean.gammaSPie_P, FUN = fixef, nsim = 1000)
m6.grand_mean_boot # estimate
#######################################################################################
################################# delta P ###########################################
#######################################################################################
##############################~~~~~~~total abundance~~~~~~~~###########################
#local
m1.cum<-lmer(LRR_cum.abs~delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Cum.abs_local,na.action="na.fail")
m1.cum.sum<-summary(m1.cum)
m1.cum.est<-m1.cum.sum$coefficients
set.seed(1234)
m1.cum_boot<-bootMer(m1.cum, FUN = fixef, nsim = 1000)
m1.cum_boot # estimate
m1.cum_boot_ci1<-boot.ci(m1.cum_boot, index =1, type = "perc")
m1.cum_boot_ci1 #CI
#site
m7.mod<-lmer(LRR_cum.abs_gamma~delta.P1_rescale+
(1|study:site)-1, REML=F,data=Cum.abs_gamma, na.action="na.fail")
m7.sum<-summary(m7.mod)
m7.est<-m7.sum$coefficients
set.seed(1234)
m7.est_boot<-bootMer(m7.mod, FUN = fixef, nsim = 1000)
m7.est_boot#effect size delta P
m7.est_ci1<-boot.ci(m7.est_boot, index =1, type = "perc")#delta P
m7.est_ci1#CI delta P
##############################~~~~~~~species richness~~~~~~~~###########################
#local
m1.mod<-lmer(LRR_S~delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m1.sum<-summary(m1.mod)
m1.est<-m1.sum$coefficients
m1.est
##bootstrapping effect size and CI
set.seed(1234)
m1.est_boot<-bootMer(m1.mod, FUN = fixef, nsim = 1000)
m1.est_boot#estimate
m1.est_ci1<-boot.ci(m1.est_boot, index =1, type = "perc")#deltaP
m1.est_ci1#CI
#turnover
m3a.mod<-lmer(LRR_betaSn~delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m3a.sum<-summary(m3a.mod)
m3a.est<-m3a.sum$coefficients
m3a.est
###bootstrapping
set.seed(1234)
m3a.est_boot<-bootMer(m3a.mod, FUN = fixef, nsim = 1000)
m3a.est_boot#effect size delta P
m3a.est_ci1<-boot.ci(m3a.est_boot, index =1, type = "perc")
m3a.est_ci1#CI #delta P
# site
m7.mod<-lmer(LRR_gammaSn~delta.P1_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m7.sum<-summary(m7.mod)
m7.est<-m7.sum$coefficients
##bootstrapping effect size and CI
set.seed(1234)
m7.est_boot<-bootMer(m7.mod, FUN = fixef, nsim = 1000)
m7.est_boot#delta P
m7.est_ci1<-boot.ci(m7.est_boot, index =1, type = "perc")
m7.est_ci1#
##############################~~~~~~~~~~evenness~~~~~~~~################################
#local
m2.mod<-lmer(LRR_SPie~delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Div.data_local,na.action="na.fail")
m2.sum<-summary(m2.mod)
m2.est<-m2.sum$coefficients
###bootstrapping
set.seed(1234)
m2.est_boot<-bootMer(m2.mod, FUN = fixef, nsim = 1000)
m2.est_boot #effect size
m2.est_ci1<-boot.ci(m2.est_boot, index =1, type = "perc")#deltaP
m2.est_ci1 #CI
#turnover
m4.mod3<-lmer(LRR_betaSPie~delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m4.sum3<-summary(m4.mod3)
m4.est3<-m4.sum3$coefficients
##bootstrapping
set.seed(1234)
m4.est3_boot<-bootMer(m4.mod3, FUN = fixef, nsim = 1000)
m4.est3_boot#effect size delta P
m4.est3_ci1<-boot.ci(m4.est3_boot, index =1, type = "perc")
m4.est3_ci1#CI delta P
#site
m6.mod<-lmer(LRR_gammaSPie~delta.P1_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m6.sum<-summary(m6.mod)
m6.est<-m6.sum$coefficients
##bootstrapping effect size and CI
set.seed(1234)
m6.est_boot<-bootMer(m6.mod, FUN = fixef, nsim = 1000)
m6.est_boot#delta P
m6.est_ci1<-boot.ci(m6.est_boot, index =1, type = "perc")
m6.est_ci1#
#######################################################################################
################################# MAP ###########################################
#######################################################################################
##############################~~~~~~~total abundance~~~~~~~~###########################
#local
m1.cum<-lmer(LRR_cum.abs~MAP_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Cum.abs_local,na.action="na.fail")
m1.cum.sum<-summary(m1.cum)
m1.cum.est<-m1.cum.sum$coefficients
set.seed(1234)
m1.cum_boot<-bootMer(m1.cum, FUN = fixef, nsim = 1000)
m1.cum_boot # estimate
m1.cum_boot_ci1<-boot.ci(m1.cum_boot, index =1, type = "perc")
m1.cum_boot_ci1 #CI
#site
m7.mod<-lmer(LRR_cum.abs_gamma~MAP_rescale+
(1|study:site)-1, REML=F,data=Cum.abs_gamma, na.action="na.fail")
m7.sum<-summary(m7.mod)
m7.est<-m7.sum$coefficients
set.seed(1234)
m7.est_boot<-bootMer(m7.mod, FUN = fixef, nsim = 1000)
m7.est_boot#effect size
m7.est_ci1<-boot.ci(m7.est_boot, index =1, type = "perc")#delta P
m7.est_ci1#CI
##############################~~~~~~~species richness~~~~~~~~###########################
#local
m1.mod<-lmer(LRR_S~MAP_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m1.sum<-summary(m1.mod)
m1.est<-m1.sum$coefficients
m1.est
##bootstrapping effect size and CI
set.seed(1234)
m1.est_boot<-bootMer(m1.mod, FUN = fixef, nsim = 1000)
m1.est_boot#estimate
m1.est_ci1<-boot.ci(m1.est_boot, index =1, type = "perc")#deltaP
m1.est_ci1#CI
#turnover
m3a.mod<-lmer(LRR_betaSn~MAP_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m3a.sum<-summary(m3a.mod)
m3a.est<-m3a.sum$coefficients
m3a.est
###bootstrapping
set.seed(1234)
m3a.est_boot<-bootMer(m3a.mod, FUN = fixef, nsim = 1000)
m3a.est_boot#effect size delta P
m3a.est_ci1<-boot.ci(m3a.est_boot, index =1, type = "perc")
m3a.est_ci1#CI #delta P
#site
m7.mod<-lmer(LRR_gammaSn~MAP_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m7.sum<-summary(m7.mod)
m7.est<-m7.sum$coefficients
##bootstrapping effect size and CI
set.seed(1234)
m7.est_boot<-bootMer(m7.mod, FUN = fixef, nsim = 1000)
m7.est_boot#estimate
m7.est_ci1<-boot.ci(m7.est_boot, index =1, type = "perc")
m7.est_ci1#CI
##############################~~~~~~~~~~evenness~~~~~~~~################################
#local
m2.mod<-lmer(LRR_SPie~MAP_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Div.data_local,na.action="na.fail")
m2.sum<-summary(m2.mod)
m2.est<-m2.sum$coefficients
###bootstrapping
set.seed(1234)
m2.est_boot<-bootMer(m2.mod, FUN = fixef, nsim = 1000)
m2.est_boot #effect size
m2.est_ci1<-boot.ci(m2.est_boot, index =1, type = "perc")#deltaP
m2.est_ci1 #CI
#turnover
m4.mod3<-lmer(LRR_betaSPie~MAP_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m4.sum3<-summary(m4.mod3)
m4.est3<-m4.sum3$coefficients
##bootstrapping
set.seed(1234)
m4.est3_boot<-bootMer(m4.mod3, FUN = fixef, nsim = 1000)
m4.est3_boot#effect size
m4.est3_ci1<-boot.ci(m4.est3_boot, index =1, type = "perc")
m4.est3_ci1#CI
#site
m6.mod<-lmer(LRR_gammaSPie~MAP_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m6.sum<-summary(m6.mod)
m6.est<-m6.sum$coefficients
##bootstrapping effect size and CI
set.seed(1234)
m6.est_boot<-bootMer(m6.mod, FUN = fixef, nsim = 1000)
m6.est_boot#effect size
m6.est_ci1<-boot.ci(m6.est_boot, index =1, type = "perc")
m6.est_ci1#CI
#######################################################################################
################################# PET ###########################################
#######################################################################################
##############################~~~~~~~total abundance~~~~~~~~###########################
#local
m1.cum<-lmer(LRR_cum.abs~PET_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Cum.abs_local,na.action="na.fail")
m1.cum.sum<-summary(m1.cum)
m1.cum.est<-m1.cum.sum$coefficients
set.seed(1234)
m1.cum_boot<-bootMer(m1.cum, FUN = fixef, nsim = 1000)
m1.cum_boot # estimate
m1.cum_boot_ci1<-boot.ci(m1.cum_boot, index =1, type = "perc")
m1.cum_boot_ci1 #CI
#site
m7.mod<-lmer(LRR_cum.abs_gamma~PET_rescale+
(1|study:site)-1, REML=F,data=Cum.abs_gamma, na.action="na.fail")
m7.sum<-summary(m7.mod)
m7.est<-m7.sum$coefficients
set.seed(1234)
m7.est_boot<-bootMer(m7.mod, FUN = fixef, nsim = 1000)
m7.est_boot#effect size
m7.est_ci1<-boot.ci(m7.est_boot, index =1, type = "perc")#delta P
m7.est_ci1#CI
##############################~~~~~~~species richness~~~~~~~~###########################
#local
m1.mod<-lmer(LRR_S~PET_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m1.sum<-summary(m1.mod)
m1.est<-m1.sum$coefficients
m1.est
##bootstrapping effect size and CI
set.seed(1234)
m1.est_boot<-bootMer(m1.mod, FUN = fixef, nsim = 1000)
m1.est_boot#estimate
m1.est_ci1<-boot.ci(m1.est_boot, index =1, type = "perc")#deltaP
m1.est_ci1#CI
#turnover
m3a.mod<-lmer(LRR_betaSn~PET_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m3a.sum<-summary(m3a.mod)
m3a.est<-m3a.sum$coefficients
m3a.est
###bootstrapping
set.seed(1234)
m3a.est_boot<-bootMer(m3a.mod, FUN = fixef, nsim = 1000)
m3a.est_boot#effect size delta P
m3a.est_ci1<-boot.ci(m3a.est_boot, index =1, type = "perc")
m3a.est_ci1#CI #delta P
# site
m7.mod<-lmer(LRR_gammaSn~PET_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m7.sum<-summary(m7.mod)
m7.est<-m7.sum$coefficients
##bootstrapping effect size and CI
set.seed(1234)
m7.est_boot<-bootMer(m7.mod, FUN = fixef, nsim = 1000)
m7.est_boot#estimate
m7.est_ci1<-boot.ci(m7.est_boot, index =1, type = "perc")
m7.est_ci1#CI
##############################~~~~~~~~~~evenness~~~~~~~~################################
#local
m2.mod<-lmer(LRR_SPie~PET_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Div.data_local,na.action="na.fail")
m2.sum<-summary(m2.mod)
m2.est<-m2.sum$coefficients
###bootstrapping
set.seed(1234)
m2.est_boot<-bootMer(m2.mod, FUN = fixef, nsim = 1000)
m2.est_boot #effect size
m2.est_ci1<-boot.ci(m2.est_boot, index =1, type = "perc")#deltaP
m2.est_ci1 #CI
#turnover
m4.mod3<-lmer(LRR_betaSPie~PET_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m4.sum3<-summary(m4.mod3)
m4.est3<-m4.sum3$coefficients
##bootstrapping
set.seed(1234)
m4.est3_boot<-bootMer(m4.mod3, FUN = fixef, nsim = 1000)
m4.est3_boot#effect size
m4.est3_ci1<-boot.ci(m4.est3_boot, index =1, type = "perc")
m4.est3_ci1#CI
#site
m6.mod<-lmer(LRR_gammaSPie~PET_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m6.sum<-summary(m6.mod)
m6.est<-m6.sum$coefficients
##bootstrapping effect size and CI
set.seed(1234)
m6.est_boot<-bootMer(m6.mod, FUN = fixef, nsim = 1000)
m6.est_boot#effect size
m6.est_ci1<-boot.ci(m6.est_boot, index =1, type = "perc")
m6.est_ci1#CI
#######################################################################################
################################# duration ###########################################
#######################################################################################
##############################~~~~~~~total abundance~~~~~~~~###########################
#local
m1.cum<-lmer(LRR_cum.abs~duration_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Cum.abs_local,na.action="na.fail")
m1.cum.sum<-summary(m1.cum)
m1.cum.est<-m1.cum.sum$coefficients
set.seed(1234)
m1.cum_boot<-bootMer(m1.cum, FUN = fixef, nsim = 1000)
m1.cum_boot # estimate
m1.cum_boot_ci1<-boot.ci(m1.cum_boot, index =1, type = "perc")
m1.cum_boot_ci1 #CI
#site
m7.mod<-lmer(LRR_cum.abs_gamma~duration_rescale+
(1|study:site)-1, REML=F,data=Cum.abs_gamma, na.action="na.fail")
m7.sum<-summary(m7.mod)
m7.est<-m7.sum$coefficients
set.seed(1234)
m7.est_boot<-bootMer(m7.mod, FUN = fixef, nsim = 1000)
m7.est_boot#effect size delta P
m7.est_ci1<-boot.ci(m7.est_boot, index =1, type = "perc")#delta P
m7.est_ci1#CI delta P
##############################~~~~~~~species richness~~~~~~~~###########################
#local
m1.mod<-lmer(LRR_S~duration_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m1.sum<-summary(m1.mod)
m1.est<-m1.sum$coefficients
m1.est
##bootstrapping effect size and CI
set.seed(1234)
m1.est_boot<-bootMer(m1.mod, FUN = fixef, nsim = 1000)
m1.est_boot#estimate
m1.est_ci1<-boot.ci(m1.est_boot, index =1, type = "perc")#deltaP
m1.est_ci1#CI
#turnover
m3a.mod<-lmer(LRR_betaSn~duration_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m3a.sum<-summary(m3a.mod)
m3a.est<-m3a.sum$coefficients
m3a.est
###bootstrapping
set.seed(1234)
m3a.est_boot<-bootMer(m3a.mod, FUN = fixef, nsim = 1000)
m3a.est_boot#effect size delta P
m3a.est_ci1<-boot.ci(m3a.est_boot, index =1, type = "perc")
m3a.est_ci1#CI #delta P
# site
m7.mod<-lmer(LRR_gammaSn~duration_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m7.sum<-summary(m7.mod)
m7.est<-m7.sum$coefficients
##bootstrapping effect size and CI
set.seed(1234)
m7.est_boot<-bootMer(m7.mod, FUN = fixef, nsim = 1000)
m7.est_boot#delta P
m7.est_ci1<-boot.ci(m7.est_boot, index =1, type = "perc")
m7.est_ci1#
##############################~~~~~~~~~~evenness~~~~~~~~################################
#local
m2.mod<-lmer(LRR_SPie~duration_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Div.data_local,na.action="na.fail")
m2.sum<-summary(m2.mod)
m2.est<-m2.sum$coefficients
###bootstrapping
set.seed(1234)
m2.est_boot<-bootMer(m2.mod, FUN = fixef, nsim = 1000)
m2.est_boot #effect size
m2.est_ci1<-boot.ci(m2.est_boot, index =1, type = "perc")#deltaP
m2.est_ci1 #CI
#turnover
m4.mod3<-lmer(LRR_betaSPie~duration_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m4.sum3<-summary(m4.mod3)
m4.est3<-m4.sum3$coefficients
##bootstrapping
set.seed(1234)
m4.est3_boot<-bootMer(m4.mod3, FUN = fixef, nsim = 1000)
m4.est3_boot#effect size delta P
m4.est3_ci1<-boot.ci(m4.est3_boot, index =1, type = "perc")
m4.est3_ci1#CI delta P
#site
m6.mod<-lmer(LRR_gammaSPie~duration_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m6.sum<-summary(m6.mod)
m6.est<-m6.sum$coefficients
##bootstrapping effect size and CI
set.seed(1234)
m6.est_boot<-bootMer(m6.mod, FUN = fixef, nsim = 1000)
m6.est_boot#delta P
m6.est_ci1<-boot.ci(m6.est_boot, index =1, type = "perc")
m6.est_ci1#
#######################################################################################
################################# treatment direction #################################
#######################################################################################
#~~~~~~total abundnace
#local
m1.mod2<-lmer(LRR_cum.abs~treatment.direction:delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Cum.abs_local, na.action="na.fail")
m1.sum2<-summary(m1.mod2)
m1.est2<-m1.sum2$coefficients
m1.est2
##bootstrapping effect size and CI
set.seed(1234)
m1.est2_boot<-bootMer(m1.mod2, FUN = fixef, nsim = 1000)
m1.est2_boot#estimates direction
m1.est2_ci1<-boot.ci(m1.est2_boot, index =1, type = "perc")#deltaP
m1.est2_ci1#treatment.direction:delta.P
m1.est2_ci2<-boot.ci(m1.est2_boot, index =2, type = "perc")#deltaP
m1.est2_ci2##treatment.direction:delta P
m7.mod1<-lmer(LRR_cum.abs_gamma~treatment.direction:delta.P1_rescale+
(1|study:site)-1, REML=F,data=Cum.abs_gamma, na.action="na.fail")
m7.sum1<-summary(m7.mod1)
m7.est2<-m7.sum1$coefficients
set.seed(1234)
m7.est2_boot<-bootMer(m7.mod1, FUN = fixef, nsim = 1000)
m7.est2_boot#effect size delta P
m7.est2_ci1<-boot.ci(m7.est2_boot, index =1, type = "perc")#delta P
m7.est2_ci1##treatment decr.P
m7.est2_ci2<-boot.ci(m7.est2_boot, index =2, type = "perc")#delta P
m7.est2_ci2##treatment incr.P
#######~~~~~~species richness~~~~~~~######
#local
m1.mod3<-lmer(LRR_S~treatment.direction:delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Div.data_local,na.action="na.fail")
m1.sum3<-summary(m1.mod3)
m1.est3<-m1.sum3$coefficients
###bootstrapping
set.seed(1234)
m1.est3_boot<-bootMer(m1.mod3, FUN = fixef, nsim = 1000)
m1.est3_boot #estimates direction
m1.est3_ci1<-boot.ci(m1.est3_boot, index =1, type = "perc")#deltaP
m1.est3_ci1 #treatment decr.P
m1.est3_ci2<-boot.ci(m1.est3_boot, index =2, type = "perc")#deltaP
m1.est3_ci2 #treatment incr.P
#turnover
m3a.mod3<-lmer(LRR_betaSn~treatment.direction:delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Div.data_local,na.action="na.fail")
m3a.sum3a<-summary(m3a.mod3)
m3a.est3<-m3a.sum3a$coefficients
###bootstrapping
set.seed(1234)
m3a.est3_boot<-bootMer(m3a.mod3, FUN = fixef, nsim = 1000)
m3a.est3_boot #estimates direction
m3a.est3_ci1<-boot.ci(m3a.est3_boot, index =1, type = "perc")#deltaP
m3a.est3_ci1 #treatment decr.P
m3a.est3_ci2<-boot.ci(m3a.est3_boot, index =2, type = "perc")#deltaP
m3a.est3_ci2 #treatment incr.P
#site
m5a.mod2<-lmer(LRR_gammaSn~treatment.direction:delta.P1_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m5a.sum2<-summary(m5a.mod2)
m5a.est2<-m5a.sum2$coefficients
##bootstrapping
set.seed(1234)
m5a.est2_boot<-bootMer(m5a.mod2, FUN = fixef, nsim = 1000)
m5a.est2_boot##estimates direction
m5a.est2_ci1<-boot.ci(m5a.est2_boot, index =1, type = "perc")
m5a.est2_ci1#treatment decr.P
m5a.est2_ci2<-boot.ci(m5a.est2_boot, index =2, type = "perc")
m5a.est2_ci2#treatment incr.P
#######~~~~~~evenness~~~~~~~######
m2.mod1<-lmer(LRR_SPie~treatment.direction:delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m2.sum1<-summary(m2.mod1)
m2.est1<-m2.sum1$coefficients
###bootstrapping
set.seed(1234)
m2.est1_boot<-bootMer(m2.mod1, FUN = fixef, nsim = 1000)
m2.est1_boot##effect sizes treatment direction
m2.est1_ci1<-boot.ci(m2.est1_boot, index =1, type = "perc")#MAP
m2.est1_ci2#CI treatment decr.P
m2.est1_ci2<-boot.ci(m2.est1_boot, index =2, type = "perc")#deltaP
m2.est1_ci2#CI treatmen incr.P
#turover
m4.mod2<-lmer(LRR_betaSPie~treatment.direction:delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m4.sum2<-summary(m4.mod2)
m4.est2<-m4.sum2$coefficients
##bootstrapping
set.seed(1234)
m4.est2_boot<-bootMer(m4.mod2, FUN = fixef, nsim = 1000)
m4.est2_boot#effect sizes treatment direction
m4.est2_ci1<-boot.ci(m4.est2_boot, index =1, type = "perc")
m4.est2_ci1#CI treatment decr.P
m4.est2_ci2<-boot.ci(m4.est2_boot, index =2, type = "perc")
m4.est2_ci2#CI treatmen incr.P
#site
m6.mod3<-lmer(LRR_gammaSPie~treatment.direction:delta.P1_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m6.sum3<-summary(m6.mod3)
m6.est3<-m6.sum3$coefficients
##bootstrapping effect size and CI
set.seed(1234)
m6.est3_boot<-bootMer(m6.mod3, FUN = fixef, nsim = 1000)
m6.est3_boot#effect sizes treatment direction
m6.est3_ci1<-boot.ci(m6.est3_boot, index =1, type = "perc")
m6.est3_ci1#CI treatment decr.P
m6.est3_ci2<-boot.ci(m6.est3_boot, index =2, type = "perc")
m6.est3_ci2#CI treatmen incr.P
#######################################################################################
################################# delta P : PET ##################################
#######################################################################################
##############################~~~~~~~total abundance~~~~~~~~###########################
m1.cum.best6<-lmer(LRR_cum.abs~delta.P1_rescale*PET_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Cum.abs_local, na.action="na.fail")
m1.cum.best6.sum<-summary(m1.cum.best6)
m1.cum.best6.est<-m1.cum.best6.sum$coefficients
set.seed(1234)
m1.cum.best6_boot<-bootMer(m1.cum.best6, FUN = fixef, nsim = 1000)
m1.cum.best6_boot#t1 = MAP, t2 = delta P, t3 = delta P * MAP
m1.cum.best6_ci1<-boot.ci(m1.cum.best6_boot, index =1, type = "perc")
m1.cum.best6_ci1#delta P
m1.cum.best6_ci2<-boot.ci(m1.cum.best6_boot, index =2, type = "perc")
m1.cum.best6_ci2#MAP
m1.cum.best6_ci3<-boot.ci(m1.cum.best6_boot, index =3, type = "perc")
m1.cum.best6_ci3#delta P*MAP
m7.mod1<-lmer(LRR_cum.abs_gamma~delta.P1_rescale*PET_rescale+
(1|study:site)-1, REML=F,data=Cum.abs_gamma, na.action="na.fail")
m7.sum1<-summary(m7.mod1)
m7.est2<-m7.sum1$coefficients
set.seed(1234)
m7.est2_boot<-bootMer(m7.mod1, FUN = fixef, nsim = 1000)
m7.est2_boot#effect size delta P
m7.est2_ci1<-boot.ci(m7.est2_boot, index =1, type = "perc")#delta P
m7.est2_ci1#delta P
m7.est2_ci2<-boot.ci(m7.est2_boot, index =2, type = "perc")#delta P
m7.est2_ci2##PET
m7.est2_ci3<-boot.ci(m7.est2_boot, index =2, type = "perc")#delta P
m7.est2_ci3##delta P*PET
##############################~~~~~~~species richness~~~~~~~~###########################
# local
m1.mod1<-lmer(LRR_S~PET_rescale*delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m1.sum1<-summary(m1.mod1)
m1.est1<-m1.sum1$coefficients
m1.est1
##bootstrapping effect size and CI
set.seed(1234)
m1.est1_boot<-bootMer(m1.mod1, FUN = fixef, nsim = 1000)
m1.est1_boot#t1 = PET, t2 = delta P, t3 = elta P * PET
m1.est1_ci1<-boot.ci(m1.est1_boot, index =1, type = "perc")
m1.est1_ci1#PET
m1.est1_ci2<-boot.ci(m1.est1_boot, index =2, type = "perc")
m1.est1_ci2#deltaP
m1.est1_ci3<-boot.ci(m1.est1_boot, index =3, type = "perc")
m1.est1_ci3#PET*delta P
# turover
m3a.mod1<-lmer(LRR_betaSn~PET_rescale*delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m3a.sum1<-summary(m3a.mod1)
m3a.est1<-m3a.sum1$coefficients
m3a.est1
###bootstrapping
set.seed(1234)
m3a.est1_boot<-bootMer(m3a.mod1, FUN = fixef, nsim = 1000)
m3a.est1_boot#effect size delta P
m3a.est1_ci1<-boot.ci(m3a.est1_boot, index =1, type = "perc")#delta P
m3a.est1_ci1#CI PET
m3a.est1_ci2<-boot.ci(m3a.est1_boot, index =2, type = "perc")#delta P
m3a.est1_ci2#CI delta
m3a.est1_ci3<-boot.ci(m3a.est1_boot, index =3, type = "perc")#delta P
m3a.est1_ci3#CI delta : PET
#site
m7.mod1<-lmer(LRR_gammaSn~PET_rescale*delta.P1_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m7.sum1<-summary(m7.mod1)
m7.est1<-m7.sum1$coefficients
##bootstrapping effect size and CI
set.seed(1234)
m7.est1_boot<-bootMer(m7.mod1, FUN = fixef, nsim = 1000)
m7.est1_boot#delta P
m7.est1_ci1<-boot.ci(m7.est1_boot, index =1, type = "perc")
m7.est1_ci1#
m7.est1_ci2<-boot.ci(m7.est1_boot, index =2, type = "perc")
m7.est1_ci2#
m7.est1_ci3<-boot.ci(m7.est1_boot, index =3, type = "perc")
m7.est1_ci3#
##############################~~~~~~~~~~evenness~~~~~~~~################################
#local
m2.mod1<-lmer(LRR_SPie~delta.P1_rescale*PET_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m2.sum1<-summary(m2.mod1)
m2.est1<-m2.sum1$coefficients
###bootstrapping
set.seed(1234)
m2.est1_boot<-bootMer(m2.mod1, FUN = fixef, nsim = 1000)
m2.est1_boot#t1 = PET, t2 = delta P, t3 = delta P * PET
m2.est1_ci1<-boot.ci(m2.est1_boot, index =1, type = "perc")#PET
m2.est1_ci2#CI delta P
m2.est1_ci2<-boot.ci(m2.est1_boot, index =2, type = "perc")#deltaP
m2.est1_ci2#CI PET
m2.est1_ci3<-boot.ci(m2.est1_boot, index =3, type = "perc")#deltaP*PET
m2.est1_ci3#CI delta P:PET
#turnover
m4.mod<-lmer(LRR_betaSPie~PET_rescale*delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m4.sum<-summary(m4.mod)
m4.est<-m4.sum$coefficients
##bootstrapping
set.seed(1234)
m4.est_boot<-bootMer(m4.mod, FUN = fixef, nsim = 1000)
m4.est_boot# t1 = PET, t2 = delta P, t3 = delta P * PET
m4.est_ci1<-boot.ci(m4.est_boot, index =1, type = "perc")
m4.est_ci1#PET
m4.est_ci2<-boot.ci(m4.est_boot, index =2, type = "perc")
m4.est_ci2#delta P
m4.est_ci3<-boot.ci(m4.est_boot, index =3, type = "perc")
m4.est_ci3#delta P * PET
#site
m6.mod<-lmer(LRR_gammaSPie~PET_rescale*delta.P1_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m6.sum<-summary(m6.mod)
m6.est<-m6.sum$coefficients
##bootstrapping
set.seed(1234)
m6.est_boot<-bootMer(m6.mod, FUN = fixef, nsim = 1000)
m6.est_boot# t1 = PET, t2 = delta P, t3 = delta P * PET
m6.est_ci1<-boot.ci(m6.est_boot, index =1, type = "perc")
m6.est_ci1#PET
m6.est_ci2<-boot.ci(m6.est_boot, index =2, type = "perc")
m6.est_ci2#delta P
m6.est_ci3<-boot.ci(m6.est_boot, index =3, type = "perc")
m6.est_ci3#delta P * PET
#######################################################################################
################################# delta P : MAP ##################################
#######################################################################################
##############################~~~~~~~total abundance~~~~~~~~###########################
m1.cum.best6<-lmer(LRR_cum.abs~delta.P1_rescale*MAP_rescale+
(1|study:site:SiteBlock)-1,REML=T, data=Cum.abs_local, na.action="na.fail")
m1.cum.best6.sum<-summary(m1.cum.best6)
m1.cum.best6.est<-m1.cum.best6.sum$coefficients
set.seed(1234)
m1.cum.best6_boot<-bootMer(m1.cum.best6, FUN = fixef, nsim = 1000)
m1.cum.best6_boot#t1 = MAP, t2 = delta P, t3 = delta P * MAP
m1.cum.best6_ci1<-boot.ci(m1.cum.best6_boot, index =1, type = "perc")
m1.cum.best6_ci1#delta P
m1.cum.best6_ci2<-boot.ci(m1.cum.best6_boot, index =2, type = "perc")
m1.cum.best6_ci2#MAP
m1.cum.best6_ci3<-boot.ci(m1.cum.best6_boot, index =3, type = "perc")
m1.cum.best6_ci3#delta P*MAP
m7.mod1<-lmer(LRR_cum.abs_gamma~delta.P1_rescale*MAP_rescale+
(1|study:site)-1, REML=F,data=Cum.abs_gamma, na.action="na.fail")
m7.sum1<-summary(m7.mod1)
m7.est2<-m7.sum1$coefficients
set.seed(1234)
m7.est2_boot<-bootMer(m7.mod1, FUN = fixef, nsim = 1000)
m7.est2_boot#effect size delta P
m7.est2_ci1<-boot.ci(m7.est2_boot, index =1, type = "perc")#delta P
m7.est2_ci1#delta P
m7.est2_ci2<-boot.ci(m7.est2_boot, index =2, type = "perc")#delta P
m7.est2_ci2##MAP
m7.est2_ci3<-boot.ci(m7.est2_boot, index =2, type = "perc")#delta P
m7.est2_ci3##delta P*MAP
##############################~~~~~~~species richness~~~~~~~~###########################
# local
m1.mod1<-lmer(LRR_S~MAP_rescale*delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m1.sum1<-summary(m1.mod1)
m1.est1<-m1.sum1$coefficients
m1.est1
##bootstrapping effect size and CI
set.seed(1234)
m1.est1_boot<-bootMer(m1.mod1, FUN = fixef, nsim = 1000)
m1.est1_boot#t1 = MAP, t2 = delta P, t3 = elta P * MAP
m1.est1_ci1<-boot.ci(m1.est1_boot, index =1, type = "perc")
m1.est1_ci1#MAP
m1.est1_ci2<-boot.ci(m1.est1_boot, index =2, type = "perc")
m1.est1_ci2#deltaP
m1.est1_ci3<-boot.ci(m1.est1_boot, index =3, type = "perc")
m1.est1_ci3#MAP*delta P
# turover
m3a.mod1<-lmer(LRR_betaSn~MAP_rescale*delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m3a.sum1<-summary(m3a.mod1)
m3a.est1<-m3a.sum1$coefficients
m3a.est1
###bootstrapping
set.seed(1234)
m3a.est1_boot<-bootMer(m3a.mod1, FUN = fixef, nsim = 1000)
m3a.est1_boot#effect size delta P
m3a.est1_ci1<-boot.ci(m3a.est1_boot, index =1, type = "perc")#delta P
m3a.est1_ci1#CI MAP
m3a.est1_ci2<-boot.ci(m3a.est1_boot, index =2, type = "perc")#delta P
m3a.est1_ci2#CI delta
m3a.est1_ci3<-boot.ci(m3a.est1_boot, index =3, type = "perc")#delta P
m3a.est1_ci3#CI delta : MAP
#site
m7.mod1<-lmer(LRR_gammaSn~MAP_rescale*delta.P1_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma1, na.action="na.fail")
m7.sum1<-summary(m7.mod1)
m7.est1<-m7.sum1$coefficients
##bootstrapping effect size and CI
set.seed(1234)
m7.est1_boot<-bootMer(m7.mod1, FUN = fixef, nsim = 1000)
m7.est1_boot#delta P
m7.est1_ci1<-boot.ci(m7.est1_boot, index =1, type = "perc")
m7.est1_ci1#
m7.est1_ci2<-boot.ci(m7.est1_boot, index =2, type = "perc")
m7.est1_ci2#
m7.est1_ci3<-boot.ci(m7.est1_boot, index =3, type = "perc")
m7.est1_ci3#
##############################~~~~~~~~~~evenness~~~~~~~~################################
#local
m2.mod1<-lmer(LRR_SPie~delta.P1_rescale*MAP_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m2.sum1<-summary(m2.mod1)
m2.est1<-m2.sum1$coefficients
###bootstrapping
set.seed(1234)
m2.est1_boot<-bootMer(m2.mod1, FUN = fixef, nsim = 1000)
m2.est1_boot#t1 = MAT, t2 = delta P, t3 = delta P * MAT
m2.est1_ci1<-boot.ci(m2.est1_boot, index =1, type = "perc")#MAT
m2.est1_ci2#CI delta P
m2.est1_ci2<-boot.ci(m2.est1_boot, index =2, type = "perc")#deltaP
m2.est1_ci2#CI MAT
m2.est1_ci3<-boot.ci(m2.est1_boot, index =3, type = "perc")#deltaP*MAT
m2.est1_ci3#CI delta P:MAT
#turnover
m4.mod<-lmer(LRR_betaSPie~MAP_rescale*delta.P1_rescale+
(1|study:site:SiteBlock)-1,REML=T,data=Div.data_local, na.action="na.fail")
m4.sum<-summary(m4.mod)
m4.est<-m4.sum$coefficients
##bootstrapping
set.seed(1234)
m4.est_boot<-bootMer(m4.mod, FUN = fixef, nsim = 1000)
m4.est_boot# t1 = MAT, t2 = delta P, t3 = delta P * MAT
m4.est_ci1<-boot.ci(m4.est_boot, index =1, type = "perc")
m4.est_ci1#MAT
m4.est_ci2<-boot.ci(m4.est_boot, index =2, type = "perc")
m4.est_ci2#delta P
m4.est_ci3<-boot.ci(m4.est_boot, index =3, type = "perc")
m4.est_ci3#delta P * MAT
#site
m6.mod<-lmer(LRR_gammaSPie~MAP_rescale*delta.P1_rescale+
(1|study:site)-1,REML=T,data=Div.data_gamma, na.action="na.fail")
m6.sum<-summary(m6.mod)
m6.est<-m6.sum$coefficients
##bootstrapping
set.seed(1234)
m6.est_boot<-bootMer(m6.mod, FUN = fixef, nsim = 1000)
m6.est_boot# t1 = MAT, t2 = delta P, t3 = delta P * MAT
m6.est_ci1<-boot.ci(m6.est_boot, index =1, type = "perc")
m6.est_ci1#MAT
m6.est_ci2<-boot.ci(m6.est_boot, index =2, type = "perc")
m6.est_ci2#delta P
m6.est_ci3<-boot.ci(m6.est_boot, index =3, type = "perc")
m6.est_ci3#delta P * MAT
|
d1085343e17f97270366a1fa90f1fe5f2330b0b4
|
4d95fe5d8ba5ca6b8ced8a284905b7e455bad75e
|
/man/mwt.Rd
|
b759622d203f070d1713aaa24a49d12ed5cf7e6d
|
[] |
no_license
|
Senbee/mwt
|
68026a11c0f5c46b3f21f9d44860480523ce4f9d
|
af0134ef2df677fd246b745b3d262202c09831f2
|
refs/heads/master
| 2021-04-09T10:28:14.703984
| 2016-06-16T09:53:18
| 2016-06-16T09:53:18
| 60,255,860
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,190
|
rd
|
mwt.Rd
|
\name{mwt}
\alias{mwt}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Function to compute global FDR based on the moderated Welch test }
\description{
MWT compares two independent groups using t-test. It is designed
to deal with small-sample microarray data where the group
variances might be unequal. In small samples it is better than
either the standard t-test or its Welch version.
}
\usage{
mwt(object, grp, log.it = FALSE, localfdr = TRUE)
}
\arguments{
\item{object}{ Expression data. Either an object of class 'matrix' or 'ExpressionSet'}
\item{grp}{Numeric or factor. Group indicator variable }
\item{log.it}{Logical. Take log2 of the data prior to analysis}
\item{localfdr}{Logical. Shall the function return local FDR (fdr)? Defaults to TRUE}
}
\details{
The statistic is equal mean difference divided by its standard error,
where the std error is a weighted average of pooled and unpooled
standard errors, and the weight is the FDR for equal variance. The
std error is further penalized to avoid small values.
}
\value{
A list containing
\item{MWT}{Moderated Welch statistic}
\item{pvalue}{Corresponding p-values from MWT}
\item{FDR}{Estimated global FDR from the pvalues}
\item{fdr}{Estimated local FDR from the pvalues}
\item{df}{degrees of freedom of the MWT test (using central t dist)}
\item{se2.m}{Moderated standard error}
\item{d0.prior}{Estimated d0 parameter}
\item{s2.prior}{Estimated scale parameter for the standard errors}
\item{lev.stat}{Levene's test statistic}
\item{lev.FDR}{Levene's test FDR}
}
\references{
Demissie M, Mascialino B, Calza S, Pawitan Y. Unequal group variances in
microarray data analyses. Bioinformatics. 2008 May 1;24(9):1168-74.
PMID: 18344518.
Ploner A, Calza S, Gusnanto A, Pawitan Y. Multidimensional local false
discovery rate for microarray studies. Bioinformatics. 2006 Mar
1;22(5):556-65. PMID: 16368770.
}
\author{Pawitan Y and Calza S}
\note{ }
\seealso{ }
\examples{
# simulate data with unequal variance
xdat = MAsim.uneqvar(ng=10000,n1=3,n2=9)
dim(xdat)
grp <- factor(colnames(xdat))
colnames(xdat) <- paste("S",1:ncol(xdat),sep=".")
# straight run
out = mwt(xdat, grp) # get FDR from MWT
names(out)
plot(out$MWT, out$FDR)
# alternative run using ExpressionSet class
eset <- new("ExpressionSet",exprs=xdat,
phenoData=new("AnnotatedDataFrame",
data=data.frame(GRP=grp,row.names=colnames(xdat))))
out = mwt(eset, "GRP") # get FDR from MWT
plot(out$MWT, out$FDR)
### Local FDR ###
## Simulate data based on G.Smyth model
require(OCplus)
xdat = MAsim.smyth(ng=10000, p0=0.8, n1=3,n2=3) ## using Smyth model
dim(xdat)
grp <- factor(colnames(xdat))
colnames(xdat) <- paste("S",1:ncol(xdat),sep=".")
# straight run
out = mwt(xdat, grp) # get global FDR and local fdr from MWT
## local fdr behaves like fdr2d:
stat = tstatistics(xdat, grp, logse=TRUE)
plot(stat$tstat, stat$logse)
pick = out$fdr<0.1
points(stat$tstat[pick], stat$logse[pick], col='red', pch=16)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{}
\keyword{}% __ONLY ONE__ keyword per line
|
50df8183ee37ade4feee537ebb265d8344cfa20f
|
f4462380035ce0099fdfbcc8864cb5a8066344ff
|
/R/kdistance.R
|
f01e88bf9c9ab5b7182c6a94128472687ba047f6
|
[
"MIT"
] |
permissive
|
happyjane/TPCA
|
a58432bbcae4ed87916b7a47e6c39ade97c9d420
|
f232197318a76b96a1be5979cf9d374b7441288e
|
refs/heads/master
| 2021-01-18T08:22:52.491921
| 2015-03-04T09:12:25
| 2015-03-04T09:12:25
| 31,596,254
| 1
| 0
| null | 2015-03-03T12:02:52
| 2015-03-03T12:02:50
| null |
UTF-8
|
R
| false
| false
| 342
|
r
|
kdistance.R
|
dist2 <- read.table(file="./distancegraph2")
dist2 <- sort(dist2$V1, decreasing=TRUE)
dist3 <- read.table(file="./distancegraph3")
dist3 <- sort(dist3$V1, decreasing=TRUE)
dist4 <- read.table(file="./distancegraph4")
dist4 <- sort(dist4$V1, decreasing=TRUE)
plot(dist2)
points(dist3, add=TRUE, col="red")
points(dist4, add=TRUE, col="green")
|
f6673d2c7edf436dfaf83c0b4b6580e8693db974
|
11e9a640ad60972f0a1ff2fb8509ec998059ccb0
|
/R/GetTissueTimes.R
|
6b342989de263263d835a47f357bab1f50e9eade
|
[
"MIT"
] |
permissive
|
jakeyeung/TissueCiradianAnalysis
|
f53f6a65e1e5489e6ee9c465a612c1cce108d256
|
6c29a33820c8c0ab6dabbd992cc2412b199fc7af
|
refs/heads/master
| 2020-09-21T05:58:35.578267
| 2020-08-07T16:35:13
| 2020-08-07T16:35:13
| 224,702,276
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,596
|
r
|
GetTissueTimes.R
|
# Functions to get tissue and times: new after column fixed to array.
# Jake Yeung
# Dec 4 2014
# GetTissueTimes.R
GetTissues <- function(samp.names, get_unique=TRUE){
# Samp names of form: WFAT48 (as vector)
# return WFAT (as vector, unique)
tissues <- unlist(lapply(samp.names, function(samp.name){
substr(samp.name, 1, nchar(samp.name) - 2)
}))
if (get_unique){
return(unique(tissues))
} else {
return(tissues)
}
}
GetTimes <- function(samp.names, get_unique=TRUE){
# Samp names of form: WFAT48 (as vector)
# return 48 (as vector, unique)
times <- unlist(lapply(samp.names, function(samp.name){
substr(samp.name, nchar(samp.name) - 1, nchar(samp.name))
}))
if (get_unique){
return(as.numeric(unique(times)))
} else {
return(as.numeric(times))
}
}
GetTissues.merged <- function(samp.names){
# Sampnames of form: Kidney60.array
# return NON-unique
tissues <- unlist(lapply(samp.names, function(samp.name.full){
samp.name <- strsplit(samp.name.full, "[.]")[[1]][[1]]
substr(samp.name, 1, nchar(samp.name) - 2)
}))
return(tissues)
}
GetTimes.merged <- function(samp.names){
# Sampnames of form: Kidney60.array
# return NON-unique
times <- unlist(lapply(samp.names, function(samp.name.full){
samp.name <- strsplit(samp.name.full, "[.]")[[1]][[1]]
substr(samp.name, nchar(samp.name) - 1, nchar(samp.name))
}))
return(as.numeric(times))}
GetExperiments.merged <- function(samp.names){
experiments <- unlist(lapply(samp.names, function(samp.name.full){
strsplit(samp.name.full, "[.]")[[1]][[2]]
}))
}
|
dcd44b1f1db5bd70ff460c5b0bf5e2721e21cda0
|
8d119d59dedf7994c9b14a637fc069d3a3e0494c
|
/RPackageSource/man/RcometsAnalytics.Rd
|
2a424bd0f30e44d19c50c4fa8fd4a596203fe0c5
|
[] |
no_license
|
CBIIT/R-cometsAnalytics
|
3f77bf818050eefbcef4e5a74a5cdab0e17157dc
|
bc0da393319a7fc2ec53275f9545d24b982faabc
|
refs/heads/master
| 2023-08-24T04:34:16.888409
| 2023-08-04T20:28:26
| 2023-08-04T20:28:26
| 64,139,217
| 8
| 9
| null | 2023-09-13T18:43:04
| 2016-07-25T14:02:36
|
HTML
|
UTF-8
|
R
| false
| true
| 1,267
|
rd
|
RcometsAnalytics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imports.R
\docType{package}
\name{RcometsAnalytics}
\alias{RcometsAnalytics}
\title{RcometsAnalytics R package}
\description{
This R package supports all cohort-specific analyses of
the COMETS consortium \url{https://www.comets-analytics.org/}.
Data are not saved in the system but
output must be downloaded and submitted for meta-analyses.
import only functions needed
}
\details{
\bold{Functions for analysis:} \cr
\code{\link{runCorr}} (correlation analysis) \cr
\code{\link{runModel}} (correlation, glm or lm) \cr
\code{\link{runAllModels}} (run models in batch mode from models sheet) \cr
\bold{Functions for graphics:} \cr
\code{\link{plotVar}} (metabolite variance distribution plot) \cr
\code{\link{plotMinvalues}} (distribution of missing values) \cr
\code{\link{showHeatmap}} (heat map of metabolite correlations) \cr
\code{\link{showHClust}} (interactive heat map with hierarchical clustering) \cr
\bold{Functions for saving results to files:} \cr
\code{\link{OutputCSVResults}} (write to .csv file) \cr
\code{\link{OutputXLSResults}} (write to excel file) \cr
\code{\link{OutputListToExcel}} (write list of data frames to excel file with multiple sheets) \cr
}
|
2b750b5eb3f9f6fcb03881d4acfd2690bf801f4c
|
6c908c2811e529f272747cbbfdca41812d825672
|
/NLP/lemmatizing.R
|
de544d832a6f5ea7e44c63bb397a880913e3d115
|
[] |
no_license
|
basbaccarne/mict
|
63109fda346537cabc56d96666712bfdc4c06274
|
4876b84ba76a71058e3aa8d5cb09164b82ada3e6
|
refs/heads/master
| 2021-01-12T10:31:35.378659
| 2018-03-06T13:24:57
| 2018-03-06T13:24:57
| 76,471,942
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,237
|
r
|
lemmatizing.R
|
# lemmatization with korPus and treetagger
# installation > first time only
# install treetagger https://www.youtube.com/watch?v=SYMc2SllI0c
# download "http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/data/dutch-par-linux-3.2-utf8.bin.gz"
# put the file dutch-utf8.par in the treetagger/lib folder
# library(devtools)
# install_github("unDocUMeantIt/koRpus", ref="develop", force = TRUE)
# install.packages("https://reaktanz.de/R/src/contrib/koRpus.lang.nl_0.01-3.tar.gz")
library("koRpus")
library("koRpus.lang.nl")
# Options (set treetagger folder and language)
set.kRp.env(TT.cmd="manual", TT.options=list(
path="c://treetagger",
preset="nl"),
lang="nl")
# lemma fucntion (set treetagger folder and language)
lemmatize <- function(text){
tagged.txt <- treetag(text, format="obj", TT.options=list(path="c://TreeTagger", preset="nl"))
tagged.txt
}
#lemmatize
lemmatize(txtobject)
# related functions
taggedText(lemmatize(txtobject)) # gives a data.frame of the tagged text
description(lemmatize(txtobject)) # gives a report on some basic text analytics
# more information: https://cran.r-project.org/web/packages/koRpus/vignettes/koRpus_vignette.pdf
|
33250099ce62dfe74311eea51a7534fc6cdcaae8
|
a60775370c58e09cf935769f38c9ad3903c8ec80
|
/plot3.R
|
cc1d54120ec0277505103a74dd33b706f15d3533
|
[] |
no_license
|
connectomania/ExData_Plotting2
|
4f8a3d91ea6ba91e33f938ee6a57df484e66dab9
|
fd4568917fa7a3832d367dff488e18dab06a55cd
|
refs/heads/master
| 2020-12-03T04:02:55.762085
| 2017-06-29T18:54:10
| 2017-06-29T18:54:10
| 95,808,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,518
|
r
|
plot3.R
|
#**************************************************************************
# 3. Use ggplot2 to see which source types (point, nonpoint, onroad, ####
# nonroad) have increased vs. decreased in Baltimore from 1999-2008
#**************************************************************************
# Read files as needed
if(!exists("nei")) {
nei <- readRDS("./summarySCC_PM25.rds")
names(nei) <- tolower(names(nei))
}
suppressMessages(library(dplyr))
# Calculate emissions by source type for 1999 and 2008
baltimore <- nei %>%
filter(fips=="24510") %>%
filter(year==1999 | year==2008) %>%
mutate(year = as.factor(year)) %>%
group_by(type, year) %>%
summarize_at("emissions", sum) %>%
mutate(emissions = round(emissions/1000, digits=2))
# Plot emissions changes from 1999-2008 by source type using ggplot2 system
suppressMessages(library(ggplot2))
png("./plot3.png", width=480, height=480)
baltgg <- ggplot(baltimore, aes(x=year, y=emissions, group=type))
baltgg +
geom_line(aes(color=type)) +
geom_point(aes(color=type)) +
labs(y = "Emissions (kilotons)", x = "Year") +
labs(title = "Baltimore emissions by source type 1999 & 2008",
subtitle = "(kilotons)") +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5))
dev.off()
|
bdc7c73c23ccb0cf291327b1a8214c86d536f3e7
|
d9b9b90973145241af8e7f843d6b8e2f79ac1779
|
/Differential_gene_expression_analyses/DE_analyses_and_plots.R
|
8a66e184ca53af34490ee0c1857da51f97daf5c1
|
[] |
no_license
|
wenhaotan208/Monarch_RNA-Seq
|
89e7c9587a98d525202483a667591ea9157bdfd6
|
440fd15d7efce8b9089db051af176f59cd9ce3af
|
refs/heads/master
| 2020-05-16T00:40:12.124981
| 2019-04-22T03:07:43
| 2019-04-22T03:07:43
| 182,585,493
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,624
|
r
|
DE_analyses_and_plots.R
|
#### Differential Gene Expression analysis ####
# DGE analyses were performed using Bioconductor package "EdgeR"
# Generate MA plots, Volcano plots, and Heatmaps
# Also test for specific gene sets of interest (immune and detoxification)
# Reference[1] https://web.stanford.edu/class/bios221/labs/rnaseq/lab_4_rnaseq.html
# Reference[2] http://combine-australia.github.io/RNAseq-R/index.html
# Reference[3] http://www.nathalievilla.org/doc/html/solution-edgeR-rnaseq.html
### load libraries
library(edgeR)
library(RColorBrewer)
library(gplots)
library(limma)
library(grid)
### Import datasets----
# Input a merged expression count table generated previously
count_data <- read.table("Merged_counts_all.txt", header = T)
sample_id <- colnames(count_data)
# Input a list of sample information
samples <- read.table("Sample_info_all.txt", header = T) # input sample information
samples <- samples[match(sample_id, samples$ID), ] # sort the matrix according to the count dataframe
samples$ID == sample_id # double checking
colnames(count_data) <- samples$sample_name # replace IDs with sample name
# Create subsets
gut <- which(samples$tissue == "g") # gut tissue only
body <- which(samples$tissue == "w") # body tissue only
# Input a list of monarch immune genes
IG_list <- read.table("Immune_gene_list.txt", header = F)
# Input lists of monarch detoxification genes
CYP <- read.table("CYP_genelist.txt", header = F) #Cytochrome P450
UGT <- read.table("UGT_genelist.txt", header = F) # UDP-glycosyltransferase
ABC <- read.table("ABC_genelist.txt", header = F) # ABC transporter
GST <- read.table("GST_genelist.txt", header = F) # glutathione S-transferase
### DGE estimation and exploratory graphs ----
## All in a function
DGE_estimation <- function(SUB, EXPLORE, outdir){
# SUB = subset to use (gut or body); EXPLORE = exploratory mode (T or F); outdir = output directory
## 1) Subset the data: either gut or body
data_sub <- count_data[, SUB]
samples_sub <- samples[SUB,]
print("Sample info:")
print(samples_sub)
## 2) Create a data object (DGEList format) for edgeR analysis
DGE <- DGEList(counts = data_sub) # make a DGElist object
# Create a grouping factor
groups <- as.factor(paste(samples_sub$treat, samples_sub$plant, sep = "_"))
## 3) Filtering and normalizing data
print("Total gene counts per sample:")
print(apply(DGE$counts, 2, sum))
keep <- rowSums(cpm(DGE) > 0) >= 2 # get index for genes with cpm > 0 in at least two samples
DGE <- DGE[keep,] #filtering based on the above
# reset library sizes
DGE$samples$lib.size <- colSums(DGE$counts)
# Normalizing the data
DGE_norm <- calcNormFactors(DGE) # calc. scaling factor by TMM method
## 4) Data Exploration
if (EXPLORE == T){ # if doing this option
setwd(paste0(current_dir, "/", outdir))
## Quality plots
logcounts_un <- cpm(DGE,log = TRUE) # Get log2 CPM for unnormalized samples
logcounts_nor <- cpm(DGE_norm,log = TRUE) # Get log2 CPM for normalized samples
# Check distributions of unnormalized samples
png(file="Quality_plot_before_normalization.png")
boxplot(logcounts_un, xlab = "", ylab = expression(Log[2]("Counts per Million")), las = 2)
abline(h = median(logcounts_un),col = "blue") # median logCPM
title("Boxplots of LogCPMs (unnormalized)")
dev.off()
# Check distributions of normalized samples
png(file="Quality_plot_after_normalization.png")
boxplot(logcounts_nor, xlab = "", ylab = expression(Log[2]("Counts per Million")), las = 2)
abline(h = median(logcounts_nor),col = "blue") # median logCPM
title("Boxplots of LogCPMs (normalized)")
dev.off()
## MDS plot
colors <- c("red4", "firebrick1", "blue4", "dodgerblue1")
png(file = "MDS_plot.png", width = 440, height = 480)
plotMDS(DGE_norm, method="bcv", col = colors[as.factor(groups)],
main = "Multidimensional scaling plot for samples",
cex.lab = 1.3, cex.axis = 1.2, cex.main = 1.5, cex = 0.8)
legend("topright", levels(groups), col = colors, pch=15, cex = 1.2)
dev.off()
## Heatmap
var_genes <- apply(logcounts_nor, 1, var) # estimate var. for each row in the logcounts
select_var <- names(sort(var_genes, decreasing=TRUE))[1:500] # Get the gene names for the top 500 most variable genes
highly_variable_lcpm <- logcounts_nor[select_var,] # Subset the matrix
color <- brewer.pal(11,"RdYlBu")
color_heat <- colorRampPalette(color)
col.group <- colors[as.factor(groups)] # colors for groups
# Plot the heatmap
png(file = "heatmap.png", width = 500, height = 400)
par(oma = c(0.5,3,0.5,0.5), xpd = T)
heatmap.2(highly_variable_lcpm,col = rev(color_heat(50)),trace = "none",
main = "", margin = c(5,6), labRow = "",
ColSideColors = col.group, scale = "row",
key.par = list(mgp = c(1.6, 0.5, 0), mar = c(3, 2.5, 3, 1), cex = 0.7, cex.lab = 1.3, cex.main = 1.2))
legend(0.2, 1.2, levels(groups), col = colors, pch=15, cex = 1.0, horiz = T)
dev.off()
par(oma = c(0, 0, 0, 0), xpd = F)
}
## 5) Estimating Dispersion using GLMs
# reate a design matrix
design.mat <- model.matrix(~ 0 + groups)
# estimate dispersion
DGE_final <- estimateDisp(DGE_norm, design.mat) # estimate common, trended, and tagwise dispersions
if (EXPLORE == T){ # if doing this option
png(file = "glm_dispersion.png")
plotBCV(DGE_final, main = "Estimated Dispersion by GLM")
dev.off()
}
## 6) Differential Expression
# fit glm with NB
fit <- glmFit(DGE_final, design.mat) # model fitting
# see comparisons
print("Comparisons:")
print(colnames(fit))
# Return the normalized DGE dataframe and the GLM-fit
RESULTS <- list(groups = groups, DGE_norm = DGE_norm, DGE_final = DGE_final, fit = fit)
return(RESULTS)
# change back the wd
setwd(current_dir)
}
### DE analyses and graphs -----
## All in a function
DE_analysis <- function(comp, outdir, FIGMODE, FIGLAB){
# comp: 1 = inf in all, 2 = inf in inc, 3 = inf in cur, 4 = plants
# outdir: output directory
# FIGMODE: 1 = MA+volcano plots; 2 = heatmap; F = N/A
# FIGLAB: figure labels
setwd(paste0(current_dir, "/", outdir))
## 1) Likihood-Ratio Tests
# comp: 1 = inf in all, 2 = inf in inc, 3 = inf in cur, 4 = plants
if (comp == 1){
LRT <- glmLRT(fit, contrast = c(1,1,-1,-1)) # inf vs uninf (in both plants)
} else if (comp == 2){
LRT <- glmLRT(fit, contrast = c(0,1,0,-1)) # inf vs uninf (in inc)
} else if (comp == 3){
LRT <- glmLRT(fit, contrast = c(1,0,-1,0)) # inf vs uninf (in cur)
} else if (comp == 4){
LRT <- glmLRT(fit, contrast = c(1,-1,1,-1)) # for cur vs inc (in both trts)
}
## 2) Summary of num. DE genes
DE <- decideTestsDGE(LRT, adjust.method = "BH", p.value = 0.05)
print("Summary of differentially expressed genes:")
print(summary(DE)) # -1 = down-regulatedl 0 = non-diff; 1 = up-regulated
## 3) MA plot for DE genes with FDR < 0.05
detags <- rownames(DGE_final)[as.logical(DE)] # DE genes
# customize the FC axis label
if (comp == 4){
FC_axis <- expression(paste("Log"[2], " (cur:inc)"))
} else {
FC_axis <- expression(paste("Log"[2], " (Inf:Uninf)"))
}
# plot
if (FIGMODE == F){
png(file = "smear_plot.png")
}
if (FIGMODE != 2){
par(mar = c(4,4.3,5,3))
plotSmear(LRT, de.tags=detags,
ylab = FC_axis,
xlab = expression(paste("Log"[2], " average expression (CPM)")),
cex.lab = 1.3, cex.axis = 1.2,
ylim = c(-25, 15)
)
abline(h = c(-1, 1), col = "blue", lty = 5) # line indicating +-1 fold change
# Add title, grid line, figure number for graphic purposes
if (FIGMODE != F){
mtext(substitute(bold(x), list(x = FIGLAB[1])), side = 3, adj = -0.25, line = 0.8, cex = 1.3)
if (FIGLAB[1] %in% c("(A)", "(C)")){
X <- grconvertX(17, "user", "ndc")
grid.lines(x = X, y = c(0.01, 0.99), gp = gpar(col = "darkgray", lty = 2, lwd = 2))
}
if (FIGLAB[1] == "(A)"){
title(expression(bold(underline("Infected vs. Uninfected"))), cex.main = 1.6, line = 3.5)
} else if (FIGLAB[1] == "(B)"){
title(expression(underline(paste(bolditalic("A. curassavica "), bold("vs. "), bolditalic("A. incarnata")))), cex.main = 1.6, line = 3.5)
}
}
}
if (FIGMODE == F){
dev.off()
}
## 4) Volcano plot
voc <- topTags(LRT, n = nrow(LRT)) # all genes
voc_color <- numeric(nrow(voc)) # colors
for (i in 1:nrow(voc)){
if(voc$table$logFC[i] >= 2 & voc$table$FDR[i] < 0.05){
voc_color[i] = "red"
} else if (voc$table$logFC[i] <= -2 & voc$table$FDR[i] < 0.05){
voc_color[i] = "blue"
} else {
voc_color[i] = "black"
}
}
# plot
if (FIGMODE == F){
png(file = "volcano_plot.png")
}
if (FIGMODE != 2){
par(mar = c(4,4.3,5,3))
plot(voc$table$logFC, -log10(voc$table$FDR), pch=19, cex=0.3, col = voc_color,
xlab = FC_axis,
ylab = expression(paste("-Log"[10], " (P-value)"))
, ylim = c(0, 5), xlim = c(-20, 15),
cex.lab = 1.3, cex.axis = 1.2
)
abline(h = -log10(0.05), lty = 2)
abline(v = -2, lty = 3)
abline(v = 2, lty = 3)
# Add figure number for graphic purposes
if (FIGMODE != F){
mtext(substitute(bold(x), list(x = FIGLAB[2])), side = 3, adj = -0.25, line = 0.8, cex = 1.3)
}
}
if (FIGMODE == F){
dev.off()
}
## 5) HeatMap on only DE genes
logcounts_nor <- cpm(DGE_norm,log=TRUE) # Get log2 CPM for normalized samples
var_genes <- apply(logcounts_nor, 1, var) # estimate var. for each row in the logcounts
# Get the gene names for only the DE genes
if (length(detags) >= 250){
select_var_DE <- intersect(names(sort(var_genes, decreasing = TRUE)), detags)[1:250] # first 250 genes
} else {
select_var_DE <- intersect(names(sort(var_genes, decreasing=TRUE)), detags) # all DE genes
}
highly_variable_lcpm_DE <- logcounts_nor[select_var_DE,] # Subset the matrix
colors <- c("red4", "firebrick1", "blue4", "dodgerblue1")
color <- brewer.pal(11,"RdYlBu")
color_heat <- colorRampPalette(color)
col.group <- colors[as.factor(groups)] # colors for groups
# Plot the heatmap
if (length(detags) > 10){ # no need to plot if having too few genes
if (FIGMODE == F){
png(file="heatmap_DE.png", width = 500, height = 400)
}
if (FIGMODE != 1){
par(oma = c(0.5,3,0.5,0.5), xpd = T)
heatmap.2(highly_variable_lcpm_DE,col=rev(color_heat(50)),trace="none",
main="", margin = c(5,6), labRow = "",
ColSideColors=col.group, scale="row",
key.par = list(mgp = c(1.6, 0.5, 0), mar = c(3, 2.5, 3, 1), cex = 0.7, cex.lab = 1.3, cex.main = 1.2))
legend(0.3, 1.2, levels(groups), col = colors, pch=15, cex = 1.0, horiz = T)
}
# Add figure number for graphic purposes
if (FIGMODE == 2){
mtext(substitute(bold(x), list(x = FIGLAB[1])), side = 3, adj = -0.2, line = 3)
}
if (FIGMODE == F){
dev.off()
}
par(oma = c(0, 0, 0, 0), xpd = F)
}
## 6) Print out the top 15 up-reduated and down-regulated genes
LRT_all <- topTags(LRT, n = nrow(count_data), sort.by = "PValue") # all genes
sig_LRT_all <- as.data.frame(LRT_all[which(LRT_all$table$FDR < 0.05),]) # sig. DE genes
Pos_sig <- sig_LRT_all[which(sig_LRT_all$logFC > 0), ] # up-reg
Neg_sig <- sig_LRT_all[which(sig_LRT_all$logFC < 0), ] # down-reg
upreg_top_15 <- Pos_sig[order(Pos_sig$FDR, decreasing = F), ][1:15, ] # top 15 up-reg
downreg_top_15 <- Neg_sig[order(Neg_sig$FDR, decreasing = F), ][1:15, ] # top 15 down-reg
print("Top 15 up-regulated genes:")
print(upreg_top_15)
print("Top 15 down-regulated genes:")
print(downreg_top_15)
#output
if (FIGMODE == F){
write.table(rownames(Pos_sig), file = "DE_genes_upreg.txt",sep = "\t", row.names = F, col.names = F, quote = F) # all sig. upreg. genes (for GO analysis)
write.table(rownames(Neg_sig), file = "DE_genes_downreg.txt", sep = "\t", row.names = F, col.names = F, quote = F) # all sig. downreg. genes (for GO analysis)
write.table(upreg_top_15, file = "upreg_top_15.txt", sep = "\t") # top 15 upreg.
write.table(downreg_top_15, file = "downreg_top_15.txt", sep = "\t") # top 15 downreg.
}
## 7) Find immune genes among all DE genes
LRT_all <- topTags(LRT, n = nrow(count_data), sort.by = "PValue") # all genes
sig_LRT_all <- as.data.frame(LRT_all[which(LRT_all$table$FDR < 0.05),]) # sig. DE genes
DE_genes <- rownames(sig_LRT_all)
DE_IG_genes <- sig_LRT_all[intersect(DE_genes, IG_list[,1]),] # find intersections between the two
print("DE immune genes:")
print(DE_IG_genes)
# output
if (FIGMODE == F){
write.table(DE_IG_genes, file = "Immune_DE_list.txt", sep = "\t")
}
## 8) Find detoxification genes (only for plant comparisons)
if (comp == 4){
# expressed genes
exp_genes <- rownames(DGE_norm$counts) # defined as this
print("Total expressed genes:")
print(length(exp_genes)) # no. of expressed genes
# Chose the 3 importnat canonical detox. genes Based on Bimbaum et al 2017 Mol Ecol
# CYPs
print("Expressed CYP genes:")
print(length(intersect(exp_genes, CYP[,1]))) # expressed CYPs
sig_CYP <- sig_LRT_all[intersect(DE_genes, CYP[,1]),] # find matchings
print("Up-reg. CYP genes:")
print(length(which(sig_CYP$logFC > 0))) # sig. up-reg. genes
print("Down-reg. CYP genes:")
print(length(which(sig_CYP$logFC < 0))) # sig. down-reg. genes
# UGTs
print("Expressed UGT genes:")
print(length(intersect(exp_genes, UGT[,1]))) # expressed UGTs
sig_UGT <- sig_LRT_all[intersect(DE_genes, UGT[,1]),] # find matchings
print("Up-reg. UGT genes:")
print(length(which(sig_UGT$logFC > 0))) # sig. up-reg. genes
print("Down-reg. UGT genes:")
print(length(which(sig_UGT$logFC < 0))) # sig. down-reg. genes
# ABCs
print("Expressed ABC genes:")
print(length(intersect(exp_genes, ABC[,1]))) # expressed ABCs
sig_ABC <- sig_LRT_all[intersect(DE_genes, ABC[,1]),] # find matchings
print("Up-reg. ABC genes:")
print(length(which(sig_ABC$logFC > 0))) # sig. up-reg. genes
print("Down-reg. ABC genes:")
print(length(which(sig_ABC$logFC < 0))) # sig. down-reg. genes
# GSTs
print("Expressed GST genes:")
print(length(intersect(exp_genes, GST[,1]))) # expressed GSTs
sig_GST <- sig_LRT_all[intersect(DE_genes, GST[,1]),] # find matchings
print("Up-reg. GST genes:")
print(length(which(sig_GST$logFC > 0))) # sig. up-reg. genes
print("Down-reg. GST genes:")
print(length(which(sig_GST$logFC < 0))) # sig. down-reg. genes
# outputs
if (FIGMODE == F){
write.table(sig_CYP, file = "CYP_DE_list.txt", sep = "\t")
write.table(sig_UGT, file = "UGT_DE_list.txt", sep = "\t")
write.table(sig_ABC, file = "ABC_DE_list.txt", sep = "\t")
write.table(sig_GST, file = "GST_DE_list.txt", sep = "\t")
}
}
# change back the wd
setwd(current_dir)
}
### Run the function for gut samples ----
OUT <- DGE_estimation(SUB = gut, EXPLORE = T, outdir = "DE_results/Gut")
groups <- OUT$groups
DGE_norm <- OUT$DGE_norm
DGE_final <- OUT$DGE_final
fit <- OUT$fit
DE_analysis(comp = 1, outdir = "DE_results/Gut/Inf_in_all", FIGMODE = F, FIGLAB = "")
DE_analysis(comp = 2, outdir = "DE_results/Gut/Inf_in_INC", FIGMODE = F, FIGLAB = "")
DE_analysis(comp = 3, outdir = "DE_results/Gut/Inf_in_CUR", FIGMODE = F, FIGLAB = "")
DE_analysis(comp = 4, outdir = "DE_results/Gut/Plant_in_all", FIGMODE = F, FIGLAB = "")
### Run the function for body samples ----
OUT <- DGE_estimation(SUB = body, EXPLORE = T, outdir = "DE_results/Body")
groups <- OUT$groups
DGE_norm <- OUT$DGE_norm
DGE_final <- OUT$DGE_final
fit <- OUT$fit
DE_analysis(comp = 1, outdir = "DE_results/Body/Inf_in_all", FIGMODE = F, FIGLAB = "")
DE_analysis(comp = 2, outdir = "DE_results/Body/Inf_in_INC", FIGMODE = F, FIGLAB = "")
DE_analysis(comp = 3, outdir = "DE_results/Body/Inf_in_CUR", FIGMODE = F, FIGLAB = "")
DE_analysis(comp = 4, outdir = "DE_results/Body/Plant_in_all", FIGMODE = F, FIGLAB = "")
### Generate Figures ----
## Fig. 3 (MA plots and volcano plots for gut samples)
tiff(file = "FIGURE3.tif", width = 2400, height = 2400, res = 300)
OUT <- DGE_estimation(SUB = gut, EXPLORE = F, outdir = "")
groups <- OUT$groups
DGE_norm <- OUT$DGE_norm
DGE_final <- OUT$DGE_final
fit <- OUT$fit
layout(matrix(c(1,2,3,4), 2, 2))
DE_analysis(comp = 1, outdir = "", FIGMODE = 1, FIGLAB = c("(A)", "(C)"))
DE_analysis(comp = 4, outdir = "", FIGMODE = 1, FIGLAB = c("(B)", "(D)"))
dev.off()
## Fig. 4 (MA plots and volcano plots for body samples)
tiff(file = "FIGURE4.tif", width = 2400, height = 2400, res = 300)
OUT <- DGE_estimation(SUB = body, EXPLORE = F, outdir = "")
groups <- OUT$groups
DGE_norm <- OUT$DGE_norm
DGE_final <- OUT$DGE_final
fit <- OUT$fit
layout(matrix(c(1,2,3,4), 2, 2))
DE_analysis(comp = 1, outdir = "", FIGMODE = 1, FIGLAB = c("(A)", "(C)"))
DE_analysis(comp = 4, outdir = "", FIGMODE = 1, FIGLAB = c("(B)", "(D)"))
dev.off()
## Fig. 5 (heatmaps for gut and body samples)
tiff(file = "FIGURE5A.tif", width = 2400, height = 1600, res = 300)
OUT <- DGE_estimation(SUB = gut, EXPLORE = F, outdir = "")
groups <- OUT$groups
DGE_norm <- OUT$DGE_norm
DGE_final <- OUT$DGE_final
fit <- OUT$fit
DE_analysis(comp = 4, outdir = "", FIGMODE = 2, FIGLAB = "(A)")
dev.off()
tiff(file = "FIGURE5B.tif", width = 2400, height = 1600, res = 300)
OUT <- DGE_estimation(SUB = body, EXPLORE = F, outdir = "")
groups <- OUT$groups
DGE_norm <- OUT$DGE_norm
DGE_final <- OUT$DGE_final
fit <- OUT$fit
DE_analysis(comp = 4, outdir = "", FIGMODE = 2, FIGLAB = "(B)")
dev.off()
|
e970de1ee43e294e98351b23e48b61606c9e0b50
|
097eb75714cc53fb6c17bd89631879f2515f1714
|
/man/recurse_node.Rd
|
d12271a770d582d85cd643e07fb2997ecb074240
|
[] |
no_license
|
hafen/osfr
|
41bf1598acce3d08279ad0214c368596c49cadf1
|
01d22d8681fc2fe405b9ee6099f24ad376a2a143
|
refs/heads/master
| 2021-01-20T01:32:37.739614
| 2017-06-16T12:17:39
| 2017-06-16T12:17:39
| 89,292,987
| 0
| 2
| null | 2017-06-09T18:26:54
| 2017-04-24T22:31:30
|
R
|
UTF-8
|
R
| false
| true
| 516
|
rd
|
recurse_node.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nodes.R
\name{recurse_node}
\alias{recurse_node}
\title{Function to crawl through OSF project}
\usage{
recurse_node(id = NULL, private = FALSE, maxdepth = 5)
}
\arguments{
\item{id}{OSF parent ID (osf.io/xxxx) to crawl}
\item{private}{Boolean, search for private too?}
\item{maxdepth}{Integer, amount of levels deep to crawl}
}
\value{
List of OSF ids, with parents as very last.
}
\description{
Function to crawl through OSF project
}
|
ba724f395832a3bd8e85ba76cbff1bcf466f0f59
|
0611363f7a2fa5bf07dd6185c9f04619f234e87f
|
/Workshop - plot.R
|
b168b9d8d554d15fd69ed620eeab278cfc1cfd4e
|
[] |
no_license
|
mighster/Basic_Stats_Scripts
|
0b32cfae1ee892ad5af8571004e245f0cf0c25e7
|
211cb6d47b7308c23e2f84d0dcc42a4a414a4086
|
refs/heads/master
| 2020-04-04T18:41:43.469404
| 2018-11-05T07:15:22
| 2018-11-05T07:15:22
| 156,174,423
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 795
|
r
|
Workshop - plot.R
|
## 3Dplot
## http://www.sthda.com/english/wiki/scatterplot3d-3d-graphics-r-software-and-data-visualization
## http://www.sthda.com/english/wiki/impressive-package-for-3d-and-4d-graph-r-software-and-data-visualization#install-plot3d-package
## https://cran.r-project.org/web/packages/plot3D/plot3D.pdf
## To load interact with XYZ data:
library(plot3D)
library(rgl)
setwd("D:/3D data analysis - interpretation/seedling")
mydata = read.csv("Seedling - model2.csv",sep = "\t")
X <- as.numeric(mydata[1:nrow(mydata),1])
Y <- as.numeric(mydata[1:nrow(mydata),2])
Z <- as.numeric(mydata[1:nrow(mydata),3])
plot(X,Y)
plot(X,Z)
plot(Y,Z)
## Excel example here
## or with more insights:
scatter3D(X,Y,Z)
plot3d(mydata,alpha=0.1)
plot3d(mydata)
|
e9dd95a4b97bf1118b7f42367020856363da4aee
|
165f9b48b6bf83d5940d83008eb6464df61bb7c7
|
/others/PopSV/automatedPipeline.R
|
22d5725872d2ce011c4f61848852fa4aa15edcc7
|
[] |
no_license
|
jmonlong/reppopsv
|
3ee9d903fc4dec2a356f5211e45b15ae2207c6f4
|
21682d4e630b8f8502e0664a090892421cd40efd
|
refs/heads/master
| 2021-04-28T16:19:18.518580
| 2018-11-02T00:08:52
| 2018-11-02T00:08:52
| 122,011,014
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,713
|
r
|
automatedPipeline.R
|
message(" /!\ Might need to be tweaked /!\ ")
message("Two functions :
- 'autoGCcounts' to count BC in each sample.
- 'autoNormTest' to normalize and test all the samples.
")
autoGCcounts <- function(files.f, bins.f, redo=NULL, sleep=180, status=FALSE, file.suffix="", lib.loc=NULL, other.resources=NULL, skip=NULL, step.walltime=c(2,20), step.cores=c(1,1)){
load(files.f)
step.walltime = paste0(step.walltime, ":0:0")
message("\n== 1) Get GC content in each bin.\n")
stepName = paste0("getGC",file.suffix)
if(any(redo==1)) unlink(paste0(stepName, "-files"), recursive=TRUE)
reg <- makeRegistry(id=stepName, seed=123)
if(!any(skip==1) & length(findJobs(reg))==0){
getGC.f <- function(imF){
load(imF)
library(PopSV, lib.loc=lib.loc)
bins.df = getGC.hg19(bins.df)
save(bins.df, file=imF)
}
batchMap(reg, getGC.f,bins.f)
submitJobs(reg, findJobs(reg), resources=c(list(walltime=step.walltime[1], nodes="1", cores=step.cores[1]), other.resources))
waitForJobs(reg, sleep=sleep)
}
if(length(findJobs(reg))!=length(findDone(reg))){
showStatus(reg)
if(length(findExpired(reg))>0){
message("Re-submitting ", findExpired(reg))
submitJobs(reg, findExpired(reg), resources=c(list(walltime=step.walltime[1], nodes="1", cores=step.cores[1]), other.resources))
}
if(length(findNotSubmitted(reg))>0){
message("Re-submitting ", findNotSubmitted(reg))
submitJobs(reg, findNotSubmitted(reg), resources=c(list(walltime=step.walltime[1], nodes="1", cores=step.cores[1]), other.resources))
}
waitForJobs(reg, sleep=sleep)
if(length(findJobs(reg))!=length(findDone(reg))) stop("Not done yet or failed, see for yourself")
}
if(status) showStatus(reg)
message("\n== 2) Get bin counts in each sample and correct for GC bias.\n")
stepName = paste0("getBC",file.suffix)
if(any(redo==2)) unlink(paste0(stepName, "-files"), recursive=TRUE)
reg <- makeRegistry(id=stepName, seed=123)
if(!any(skip==2) & length(findJobs(reg))==0){
getBC.f <- function(file.i, bins.f, files.df){
library(PopSV, lib.loc=lib.loc)
load(bins.f)
bam.f = files.df$bam[file.i]
if("bam2" %in% colnames(files.df)) bam.f = c(bam.f, files.df$bam2[file.i])
bb.o = bin.bam(bam.f, bins.df, files.df$bc[file.i])
correct.GC(files.df$bc.gz[file.i], bins.df, files.df$bc.gc[file.i])
bb.o
}
batchMap(reg, getBC.f,1:nrow(files.df), more.args=list(bins.f=bins.f, files.df=files.df))
submitJobs(reg, findJobs(reg), resources=c(list(walltime=step.walltime[2], nodes="1", cores=step.cores[2]), other.resources))
waitForJobs(reg, sleep=sleep)
}
if(length(findJobs(reg))!=length(findDone(reg))){
showStatus(reg)
if(length(findExpired(reg))>0){
message("Re-submitting ", findExpired(reg))
submitJobs(reg, findExpired(reg), resources=c(list(walltime=step.walltime[2], nodes="1", cores=step.cores[2]), other.resources))
}
if(length(findNotSubmitted(reg))>0){
message("Re-submitting ", findNotSubmitted(reg))
submitJobs(reg, findNotSubmitted(reg), resources=c(list(walltime=step.walltime[2], nodes="1", cores=step.cores[2]), other.resources))
}
waitForJobs(reg, sleep=sleep)
if(length(findJobs(reg))!=length(findDone(reg))) stop("Not done yet or failed, see for yourself")
}
if(status) showStatus(reg)
## load(bins.f)
## quick.count(files.df, bins.df, col.files="bc.gc.gz", nb.rand.bins=1e3)
}
autoNormTest <- function(files.f, bins.f, redo=NULL, rewrite=FALSE, sleep=180, status=FALSE, loose=FALSE, file.suffix="", lib.loc=NULL, other.resources=NULL, norm=c("1pass","trim"), ref.samples=NULL, FDR.th=.001, step.walltime=c(10,12,6,6,1,1), step.cores=c(6,1,3,1,1,1), skip=NULL){
load(files.f)
step.walltime = paste0(step.walltime, ":0:0")
message("\n== 1) Sample QC and reference definition.\n")
bc.ref.f = paste0("bc-gcCor",file.suffix,".tsv")
sampQC.pdf.f = paste0("sampQC",file.suffix,".pdf")
stepName = paste0("sampQC",file.suffix)
if(any(redo==1)) unlink(paste0(stepName, "-files"), recursive=TRUE)
reg <- makeRegistry(id=stepName, seed=123)
if(length(findJobs(reg))==0){
if(!is.null(ref.samples)){
files.ref = subset(files.df, sample %in% ref.samples)
} else {
files.ref = files.df
}
sampQC.f <- function(bc.all.f, bins.f, files.df, sampQC.pdf.f, lib.loc){
load(bins.f)
library(PopSV, lib.loc=lib.loc)
pdf(sampQC.pdf.f)
qc.o = qc.samples(files.df, bins.df, bc.all.f, nb.cores=6, nb.ref.samples=200)
dev.off()
qc.o
}
batchMap(reg, sampQC.f,bc.ref.f, more.args=list(bins.f=bins.f, files.df=files.ref, sampQC.pdf.f=sampQC.pdf.f, lib.loc=lib.loc))
submitJobs(reg, 1, resources=c(list(walltime=step.walltime[1], nodes="1", cores=step.cores[1]), other.resources))
waitForJobs(reg, sleep=sleep)
}
if(length(findJobs(reg))!=length(findDone(reg))){
showStatus(reg)
if(length(findExpired(reg))>0){
message("Re-submitting ", findExpired(reg))
submitJobs(reg, findExpired(reg), resources=c(list(walltime=step.walltime[1], nodes="1", cores=step.cores[1]), other.resources))
}
if(length(findNotSubmitted(reg))>0){
message("Re-submitting ", findNotSubmitted(reg))
submitJobs(reg, findNotSubmitted(reg), resources=c(list(walltime=step.walltime[1], nodes="1", cores=step.cores[1]), other.resources))
}
waitForJobs(reg, sleep=sleep)
if(length(findJobs(reg))!=length(findDone(reg))) stop("Not done yet or failed, see for yourself")
}
samp.qc.o = loadResult(reg, 1)
save(samp.qc.o, file=paste0(stepName,".RData"))
if(status) showStatus(reg)
message("\n== 2) Reference sample normalization.\n")
stepName = paste0("bcNormTN",file.suffix)
if(any(redo==2)) unlink(paste0(stepName, "-files"), recursive=TRUE)
reg <- makeRegistry(id=stepName, seed=123)
if(!any(skip==2) & length(findJobs(reg))==0){
load(bins.f)
if(all(colnames(bins.df)!="sm.chunk")){
bins.df = chunk.bin(bins.df, bg.chunk.size=5e5, sm.chunk.size=5e3)
save(bins.df, file=bins.f)
}
bcNormTN.f <- function(chunk.id, file.bc, file.bin, cont.sample, lib.loc, norm){
load(file.bin)
library(PopSV, lib.loc=lib.loc)
bc.df = read.bedix(file.bc, subset(bins.df, bg.chunk==subset(bins.df, sm.chunk==chunk.id)$bg.chunk[1]))
tn.norm(bc.df, cont.sample, bins=subset(bins.df, sm.chunk==chunk.id)$bin, norm=norm, force.diff.chr=TRUE)
}
batchMap(reg, bcNormTN.f,unique(bins.df$sm.chunk), more.args=list(file.bc=samp.qc.o$bc, file.bin=bins.f,cont.sample=samp.qc.o$cont.sample, lib.loc=lib.loc, norm=norm))
submitJobs(reg, findJobs(reg) , resources=c(list(walltime=step.walltime[2], nodes="1", cores=step.cores[2]), other.resources))
waitForJobs(reg, sleep=sleep)
}
if(length(findJobs(reg))!=length(findDone(reg))){
showStatus(reg)
if(length(findExpired(reg))>0){
message("Re-submitting ", findExpired(reg))
submitJobs(reg, findExpired(reg), resources=c(list(walltime=step.walltime[2], nodes="1", cores=step.cores[2]), other.resources))
}
if(length(findNotSubmitted(reg))>0){
message("Re-submitting ", findNotSubmitted(reg))
submitJobs(reg, findNotSubmitted(reg), resources=c(list(walltime=step.walltime[2], nodes="1", cores=step.cores[2]), other.resources))
}
waitForJobs(reg, sleep=sleep)
if(length(findJobs(reg))!=length(findDone(reg))) stop("Not done yet or failed, see for yourself")
}
## Write normalized bin counts and reference metrics
out.files = paste(paste0("ref",file.suffix), c("bc-norm.tsv", "norm-stats.tsv"), sep="-")
if(rewrite | all(!file.exists(out.files))){
if(any(file.exists(out.files))){
tmp = file.remove(out.files[which(file.exists(out.files))])
}
tmp = reduceResultsList(reg, fun=function(res, job){
write.table(res$bc.norm, file=out.files[1], sep="\t", row.names=FALSE, quote=FALSE, append=file.exists(out.files[1]), col.names=!file.exists(out.files[1]))
write.table(res$norm.stats, file=out.files[2], sep="\t", row.names=FALSE, quote=FALSE, append=file.exists(out.files[2]), col.names=!file.exists(out.files[2]))
})
}
if(status) showStatus(reg)
message("\n== 3) Compute Z-scores in reference samples.\n")
stepName = paste0("zRef",file.suffix)
if(any(redo==3)) unlink(paste0(stepName, "-files"), recursive=TRUE)
reg <- makeRegistry(id=stepName, seed=123)
if(length(findJobs(reg))==0){
zRef.f <- function(bc.f, files.df, ns.f, lib.loc, nb.cores){
library(PopSV, lib.loc=lib.loc)
z.comp(bc.f=bc.f, norm.stats.f=ns.f, files.df=files.df, nb.cores=nb.cores, z.poisson=TRUE, chunk.size=1e4)
}
batchMap(reg, zRef.f,out.files[1], more.args=list(files.df=files.df, ns.f=out.files[2], lib.loc=lib.loc, nb.cores=step.cores[3]))
submitJobs(reg, 1, resources=c(list(walltime=step.walltime[3], nodes="1", cores=step.cores[3]), other.resources))
waitForJobs(reg, sleep=sleep)
}
if(length(findJobs(reg))!=length(findDone(reg))){
showStatus(reg)
if(length(findExpired(reg))>0){
message("Re-submitting ", findExpired(reg))
submitJobs(reg, findExpired(reg), resources=c(list(walltime=step.walltime[3], nodes="1", cores=step.cores[3]), other.resources))
}
if(length(findNotSubmitted(reg))>0){
message("Re-submitting ", findNotSubmitted(reg))
submitJobs(reg, findNotSubmitted(reg), resources=c(list(walltime=step.walltime[3], nodes="1", cores=step.cores[3]), other.resources))
}
waitForJobs(reg, sleep=sleep)
if(length(findJobs(reg))!=length(findDone(reg))) stop("Not done yet or failed, see for yourself")
}
if(status) showStatus(reg)
message("\n== 4) Normalization and Z-score computation for other samples.\n")
stepName = paste0("zOthers",file.suffix)
if(any(redo==4)) unlink(paste0(stepName, "-files"), recursive=TRUE)
reg <- makeRegistry(id=stepName, seed=123)
if(length(findJobs(reg))==0){
callOthers.f <- function(samp, cont.sample, files.df, norm.stats.f, bc.ref.f, lib.loc){
library(PopSV, lib.loc=lib.loc)
tn.test.sample(samp, files.df, cont.sample, bc.ref.f, norm.stats.f, z.poisson=TRUE, aberrant.cases=FALSE)
}
batchMap(reg, callOthers.f,setdiff(files.df$sample, samp.qc.o$ref.samples), more.args=list(cont.sample=samp.qc.o$cont.sample, files.df=files.df, norm.stats.f=out.files[2], bc.ref.f=samp.qc.o$bc, lib.loc=lib.loc))
submitJobs(reg, findJobs(reg), resources=c(list(walltime=step.walltime[4], nodes="1", cores=step.cores[4]), other.resources))
waitForJobs(reg, sleep=sleep)
}
if(length(findJobs(reg))!=length(findDone(reg))){
showStatus(reg)
if(length(findExpired(reg))>0){
message("Re-submitting ", findExpired(reg))
submitJobs(reg, findExpired(reg), resources=c(list(walltime=step.walltime[4], nodes="1", cores=step.cores[4]), other.resources))
}
if(length(findNotSubmitted(reg))>0){
message("Re-submitting ", findNotSubmitted(reg))
submitJobs(reg, findNotSubmitted(reg), resources=c(list(walltime=step.walltime[4], nodes="1", cores=step.cores[4]), other.resources))
}
waitForJobs(reg, sleep=sleep)
if(length(findJobs(reg))!=length(findDone(reg))) stop("Not done yet or failed, see for yourself")
}
if(status) showStatus(reg)
if(!loose){
message("\n== 5) Calling abnormal bin.\n")
stepName = paste0("call",file.suffix)
if(any(redo==5)) unlink(paste0(stepName, "-files"), recursive=TRUE)
reg <- makeRegistry(id=stepName, seed=123)
if(length(findJobs(reg))==0){
abCovCallCases.f <- function(samp, files.df, norm.stats.f, bins.f, stitch.dist, lib.loc, FDR.th){
library(PopSV, lib.loc=lib.loc)
load(bins.f)
call.abnormal.cov(files.df=files.df, samp=samp, out.pdf=paste0(samp,"-sdest-abCovCall.pdf"), FDR.th=FDR.th, merge.cons.bins="stitch", z.th="sdest", norm.stats=norm.stats.f, stitch.dist=stitch.dist, gc.df=bins.df, min.normal.prop=.6)
}
batchMap(reg, abCovCallCases.f, files.df$sample, more.args=list(files.df=files.df, norm.stats.f=out.files[2], bins.f=bins.f, stitch.dist=5e3, lib.loc=lib.loc, FDR.th=FDR.th))
submitJobs(reg, findJobs(reg) , resources=c(list(walltime=step.walltime[5], nodes="1", cores=step.cores[5]), other.resources))
waitForJobs(reg, sleep=sleep)
}
if(length(findJobs(reg))!=length(findDone(reg))){
showStatus(reg)
if(length(findExpired(reg))>0){
message("Re-submitting ", findExpired(reg))
submitJobs(reg, findExpired(reg), resources=c(list(walltime=step.walltime[5], nodes="1", cores=step.cores[5]), other.resources))
}
if(length(findNotSubmitted(reg))>0){
message("Re-submitting ", findNotSubmitted(reg))
submitJobs(reg, findNotSubmitted(reg), resources=c(list(walltime=step.walltime[5], nodes="1", cores=step.cores[5]), other.resources))
}
waitForJobs(reg, sleep=sleep)
if(length(findJobs(reg))!=length(findDone(reg))) stop("Not done yet or failed, see for yourself")
}
if(status) showStatus(reg)
} else {
message("\n== 6) Calling abnormal bin with loose threshold.\n")
stepName = paste0("callLoose",file.suffix)
if(any(redo==6)) unlink(paste0(stepName, "-files"), recursive=TRUE)
reg <- makeRegistry(id=stepName, seed=123)
if(length(findJobs(reg))==0){
abCovCallCases.f <- function(samp, files.df, norm.stats.f, bins.f, stitch.dist, lib.loc){
library(PopSV, lib.loc=lib.loc)
load(bins.f)
project = subset(files.df, sample==samp)$project
call.abnormal.cov(files.df=files.df, samp=samp, out.pdf=paste0(samp,"/",samp,"-sdest-abCovCall.pdf"), FDR.th=.05, merge.cons.bins="stitch", z.th="sdest", norm.stats=norm.stats.f, stitch.dist=stitch.dist, gc.df=bins.df, min.normal.prop=.6)
}
batchMap(reg, abCovCallCases.f, files.df$sample, more.args=list(files.df=files.df, norm.stats.f=out.files[2], bins.f=bins.f, stitch.dist=5e3, lib.loc=lib.loc))
submitJobs(reg, findJobs(reg) , resources=c(list(walltime=step.walltime[6], nodes="1", cores=step.cores[6]), other.resources))
waitForJobs(reg, sleep=sleep)
}
if(length(findJobs(reg))!=length(findDone(reg))){
showStatus(reg)
if(length(findExpired(reg))>0){
message("Re-submitting ", findExpired(reg))
submitJobs(reg, findExpired(reg), resources=c(list(walltime=step.walltime[6], nodes="1", cores=step.cores[6]), other.resources))
}
if(length(findNotSubmitted(reg))>0){
message("Re-submitting ", findNotSubmitted(reg))
submitJobs(reg, findNotSubmitted(reg), resources=c(list(walltime=step.walltime[6], nodes="1", cores=step.cores[6]), other.resources))
}
waitForJobs(reg, sleep=sleep)
if(length(findJobs(reg))!=length(findDone(reg))) stop("Not done yet or failed, see for yourself")
}
if(status) showStatus(reg)
}
res.df = do.call(rbind, reduceResultsList(reg))
return(res.df)
}
autoExtra <- function(files.f, bins.f, do=NULL, redo=NULL, sleep=180, status=FALSE, file.suffix="", lib.loc=NULL, other.resources=NULL, step.walltime=c(6,6,6), step.cores=c(3,2, 3), col.files="bc.gc.gz", bc.ref.f="ref-bc-norm.tsv", seed.c=123){
load(files.f)
step.walltime = paste0(step.walltime, ":0:0")
if(is.null(do)){
stop("Option for 'do=': '1' quick counts, '2' split ref bc norm, '3' order/compress/index ref bc norm.")
}
if(do==1){
message("\n== 1) Quick counts.\n")
stepName = paste0("quickCount",file.suffix)
if(any(redo==1)) unlink(paste0(stepName, "-files"), recursive=TRUE)
reg <- makeRegistry(id=stepName, seed=seed.c)
if(length(findJobs(reg))==0){
quickCount.f <- function(col.files, bins.f, files.df, lib.loc, nb.cores){
load(bins.f)
library(PopSV, lib.loc=lib.loc)
quick.count(files.df, bins.df, col.files=col.files, nb.rand.bins=1e3, nb.cores=nb.cores)
}
batchMap(reg, quickCount.f,col.files, more.args=list(bins.f=bins.f, files.df=files.df, lib.loc=lib.loc, nb.cores=step.cores[1]))
submitJobs(reg, 1, resources=c(list(walltime=step.walltime[1], nodes="1", cores=step.cores[1]), other.resources))
waitForJobs(reg, sleep=sleep)
}
if(length(findJobs(reg))!=length(findDone(reg))){
showStatus(reg)
if(length(findExpired(reg))>0){
message("Re-submitting ", findExpired(reg))
submitJobs(reg, findExpired(reg), resources=c(list(walltime=step.walltime[1], nodes="1", cores=step.cores[1]), other.resources))
}
if(length(findNotSubmitted(reg))>0){
message("Re-submitting ", findNotSubmitted(reg))
submitJobs(reg, findNotSubmitted(reg), resources=c(list(walltime=step.walltime[1], nodes="1", cores=step.cores[1]), other.resources))
}
waitForJobs(reg, sleep=sleep)
if(length(findJobs(reg))!=length(findDone(reg))) stop("Not done yet or failed, see for yourself")
}
if(status) showStatus(reg)
return(loadResult(reg, 1))
}
if(do==2){
if(!file.exists(bc.ref.f)){
stop(bc.ref.f, " not found. Check 'bc.ref.f=' parameter.")
}
message("\n== 2) Split BC norm in ref samples.\n")
stepName = paste0("splitRef",file.suffix)
if(any(redo==2)) unlink(paste0(stepName, "-files"), recursive=TRUE)
reg <- makeRegistry(id=stepName, seed=seed.c)
if(length(findJobs(reg))==0){
splitRef.f <- function(bc.ref, files.df, lib.loc){
library(PopSV, lib.loc=lib.loc)
write.split.samples(list(bc=bc.ref.f), files.df, files.col="bc.gc.norm", reorder=TRUE)
}
batchMap(reg, splitRef.f, bc.ref.f, more.args=list(files.df=files.df, lib.loc=lib.loc))
submitJobs(reg, 1, resources=c(list(walltime=step.walltime[2], nodes="1", cores=step.cores[2]), other.resources))
waitForJobs(reg, sleep=sleep)
}
if(length(findJobs(reg))!=length(findDone(reg))){
showStatus(reg)
if(length(findExpired(reg))>0){
message("Re-submitting ", findExpired(reg))
submitJobs(reg, findExpired(reg), resources=c(list(walltime=step.walltime[2], nodes="1", cores=step.cores[2]), other.resources))
}
if(length(findNotSubmitted(reg))>0){
message("Re-submitting ", findNotSubmitted(reg))
submitJobs(reg, findNotSubmitted(reg), resources=c(list(walltime=step.walltime[2], nodes="1", cores=step.cores[2]), other.resources))
}
waitForJobs(reg, sleep=sleep)
if(length(findJobs(reg))!=length(findDone(reg))) stop("Not done yet or failed, see for yourself")
}
if(status) showStatus(reg)
return(loadResult(reg, 1))
}
if(do==3){
if(!file.exists(bc.ref.f)){
stop(bc.ref.f, " not found. Check 'bc.ref.f=' parameter.")
}
message("\n== 3) Order, compress and index BC norm in reference samples.\n")
stepName = paste0("compIndRef",file.suffix)
if(any(redo==3)) unlink(paste0(stepName, "-files"), recursive=TRUE)
reg <- makeRegistry(id=stepName, seed=seed.c)
if(length(findJobs(reg))==0){
compIndRef.f <- function(bc.ref, lib.loc){
library(PopSV, lib.loc=lib.loc)
comp.index.files(bc.ref.f, reorder=TRUE)
}
batchMap(reg, compIndRef.f, bc.ref.f, more.args=list(lib.loc=lib.loc))
submitJobs(reg, 1, resources=c(list(walltime=step.walltime[3], nodes="1", cores=step.cores[3]), other.resources))
waitForJobs(reg, sleep=sleep)
}
if(length(findJobs(reg))!=length(findDone(reg))){
showStatus(reg)
if(length(findExpired(reg))>0){
message("Re-submitting ", findExpired(reg))
submitJobs(reg, findExpired(reg), resources=c(list(walltime=step.walltime[3], nodes="1", cores=step.cores[3]), other.resources))
}
if(length(findNotSubmitted(reg))>0){
message("Re-submitting ", findNotSubmitted(reg))
submitJobs(reg, findNotSubmitted(reg), resources=c(list(walltime=step.walltime[3], nodes="1", cores=step.cores[3]), other.resources))
}
waitForJobs(reg, sleep=sleep)
if(length(findJobs(reg))!=length(findDone(reg))) stop("Not done yet or failed, see for yourself")
}
if(status) showStatus(reg)
return(loadResult(reg, 1))
}
}
|
eb2a8ef938a92f6160a12337417560f257a19b42
|
47cf5c6525d3a0c4ba63fd4930b6682194a80753
|
/plot1.R
|
c26dd7fc45798234f68e0408abb6b4ba836523d7
|
[] |
no_license
|
sameerq/ExData_Plotting1
|
6f8fd6a5d937457be776201aca4512fde0c66dca
|
0509bb99d8a7cfb700725bd6994bb40a944ee676
|
refs/heads/master
| 2021-01-14T12:10:27.408506
| 2016-01-11T05:43:44
| 2016-01-11T05:43:44
| 49,402,190
| 0
| 0
| null | 2016-01-11T04:33:21
| 2016-01-11T04:33:21
| null |
UTF-8
|
R
| false
| false
| 726
|
r
|
plot1.R
|
#Data has been downloaded and unzipped into working directory as a .txt file
#This stores the data and takes the target dates we want
mydata <- read.table('household_power_consumption.txt', header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
mydata$Date <- as.Date(mydata$Date, format="%d/%m/%Y")
startDate <- as.Date("01/02/2007", format="%d/%m/%Y")
endDate <- as.Date("02/02/2007", format="%d/%m/%Y")
mydata <- mydata[mydata$Date >= startDate & mydata$Date <= endDate, ]
#Now we make a graph out of the data
png(filename="plot1.png", width=480, height=480)
globalActivePower <- as.numeric(mydata$Global_active_power)
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
|
19394a65c7c3fcbf53aa90444b1084dc7885178f
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/3869_0/rinput.R
|
4730fef94d22dbef808e822aefeb45ea32e9b735
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("3869_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3869_0_unrooted.txt")
|
6191f311039e52be987cefd2dd4e494cf9fc6d36
|
a820f4282433b3e5fd12d7d2e003d4bfd5afd639
|
/plot1.R
|
07f8589f6eee9593f26ecd7227c8ee8d92ef429c
|
[] |
no_license
|
Duangrat-praj/ExData_Plotting1
|
3438763f91a5f3a054e971ca06441917c5b90037
|
1b320414d7b51e928586c2db4bc1deff77ab9b8e
|
refs/heads/master
| 2022-10-26T16:35:47.989037
| 2020-06-14T16:25:16
| 2020-06-14T16:25:16
| 272,239,671
| 0
| 0
| null | 2020-06-14T16:23:21
| 2020-06-14T16:23:21
| null |
UTF-8
|
R
| false
| false
| 570
|
r
|
plot1.R
|
data <- read.table("./household_power_consumption.txt", sep = ";", header = TRUE,
as.is = c("Date", "Time", "Global_active_power","Global_reactive_power","Voltage",
"Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),
na.strings="?")
subdata <- subset(data, data$Date=="1/2/2007" | data$Date =="2/2/2007")
png(file = "plot1.png", width=480, height=480)
hist(subdata$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
4c041b1da1c976754b0ef08ea24796315e8a5cb0
|
62cd477be5b35bf8efac2f222dcd179354a9554d
|
/scripts/funding-sources.R
|
eee95e424f0bbb62da8a52c3cc36b5631763aa87
|
[] |
no_license
|
lisicase/info201
|
ed514ebb46f7af3212926519fe48e64fa8374366
|
9671c451ce710eea8e3081356956dbeb915d413e
|
refs/heads/master
| 2020-06-26T22:53:12.193422
| 2019-10-11T17:47:21
| 2019-10-11T17:47:21
| 199,779,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 938
|
r
|
funding-sources.R
|
# Load packages
library("dplyr")
library("ggplot2")
library("stringr")
# Function to create graph of funding
funding_graph <- function(years, state) {
# Load aggregated data
funding_data <- read.csv("data/prepped/aggregate.csv",
stringsAsFactors = FALSE)
# Narrow data to specified years for given state and reshape for plotting
plot_data <- funding_data %>%
filter(str_to_title(State.Name) == state) %>%
filter(Year > years[1],
Year < years[2]) %>%
mutate("Federal Revenue" = Federal.Revenue,
"State Revenue" = State.Revenue,
"Local Revenue" = Local.Revenue) %>%
select(Year, "Federal Revenue", "State Revenue", "Local Revenue") %>%
gather("Source", "Funding", 2:4)
# Create the plot
ggplot(
data = plot_data,
mapping = aes_string(x = "Year", y = "Funding",
color = "Source")
) +
geom_smooth(se = FALSE)
}
|
68a3c31a6b172b31be16ad201adaf0d0cc313bc9
|
0cb61321d6cfc04ea536eec02d74d238259ec018
|
/code/plot-revbayes/script-StochMapp-Plotting_with_simmap.R
|
9c399d6c874ab9de744a38a55ebb061c8361843c
|
[
"MIT"
] |
permissive
|
asmeseguer/aquatic_transition
|
7ad5c3af7b2308b398247c816320b07317943e0d
|
618a716639118b789f2a2eeab52cd41f2feac878
|
refs/heads/main
| 2023-08-17T21:11:59.633413
| 2021-09-24T09:27:06
| 2021-09-24T09:27:06
| 362,422,087
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,047
|
r
|
script-StochMapp-Plotting_with_simmap.R
|
#####################
# This script could be used to plot the ancestral states of stochastic character mapping HiSSE. Adapted from RevBayes Manual
#####################
library(plotrix)
library(phytools)
character_file = "output/stochastic-mapping/marginal_character.tree"
sim2 = read.simmap(file=character_file, format="phylip")
#ladderize.simmap(sim2,right=TRUE)->sim2
####################
# Define colors for 4 character states
####################
# There are 4 states in our HiSSE analysis (0=1A, 1=2A, 2=1b, 3=2B), including the observed characters (1= terrestrial, 2= aquatic) and the hidden states (A, B). Therefore, in this case, terrestrial is represented by 0, 2, aquatic by 1, 3.
colors = vector()
for (i in 1:length( sim2$maps ) ) {
colors = c(colors, names(sim2$maps[[i]]) )
}
colors = sort(as.numeric(unique(colors)))
colors
cols = setNames( rainbow(length(colors), start=0.0, end=0.9), colors)
cols
## change colors to assing the same color to hidden states A and B of each character state 1 and 2.
cols[[1]] <- "darkgoldenrod3"
cols[[2]] <- "deepskyblue2"
cols[[3]] <- "darkgoldenrod3"
cols[[4]] <- "deepskyblue2"
####################
# plot ancestral states of stochastic character mapping HiSSE
####################
library(phytools)
library(plotrix)
pdf("RevBayes_StochCharMap_HiSSE.pdf", paper="special", height =16, width=11)
plotSimmap(sim2, cols, direction="rightwards",fsize=0.001, lwd=1, split.vertical=TRUE, type="fan")
dev.off()
# add legend
leg = names(cols)
leg
add.simmap.legend(leg, colors=cols, cex=0.3, x=0.8, y=0.8, fsize=0.8)
A = 0
B = 350
C = 700
D = 1150
####################
# plot posteriors ancestral states of stochastic character mapping HiSSE
####################
posterior_file = "output/stochastic-mapping/marginal_posterior.tree"
sim_p = read.simmap(file=posterior_file, format="phylip")
# Define colours for posterior probability
colors = vector()
for (i in 1:length( sim_p$maps ) ) {
colors = c(colors, names(sim_p$maps[[i]]) )
}
colors = sort(as.numeric(unique(colors)))
# We can use different two colour choices to plot the posterior tree as a "heatmap". For posteriors, this works better.
cols = setNames( heat.colors(length(colors), rev=TRUE), colors)
# Or using a basic palette with red, yellow, blue, etc.
# cols = setNames( rainbow(length(colors), start=0.0, end=0.9, rev=TRUE), colors)
# fsize is font size for tipe labels, lwd = line width for plotting, ftype = b (bold), i (italics)
# pts: whether to plot filled circles at each tree vertex, as well as transition points between mapped states: default is false.
plotSimmap(sim_p, cols, fsize=1.0, lwd=2.0, split.vertical=TRUE, ftype="bi", pts=FALSE)
# Add legend
# To identify which colour corresponde to which value of the posterior probability
leg = names(cols)
leg
add.simmap.legend(leg, colors=cols, cex=0.2, x=0.2, y=0.2, fsize=0.3)
# A message appears in console: "Click where you want to draw legend". Click and draw in RQuartz window to get the legend plotted.
# Save image using Save ----- RPlot
|
d85a53180cd968708184f7e8a18148ccd8246a57
|
98a0bd2de4836b813642df0faf5f0b5bd31f7617
|
/man/gapfillerWrap.Rd
|
3f5e6b5bae8c69469619cfbbd888f4b716ecb774
|
[] |
no_license
|
inambioinfo/chimera
|
7bf3834f72464e546b83f52704354acbc9c329bc
|
17e0580ccd842a57f519fd968bc9df3d9ec29a0f
|
refs/heads/master
| 2021-06-25T06:56:13.520654
| 2017-04-24T19:50:57
| 2017-04-24T19:50:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,868
|
rd
|
gapfillerWrap.Rd
|
\name{gapfillerWrap}
\alias{gapfillerWrap}
\title{A function to prepare files and to run gapfiller}
\description{A function that uses GapFiller to confirm, by de novo assembly, the presence of the fusion break point. The function needs as input a list of fusion transcript generated by chimeraSeqSet function and the bam file containing the reads remapped over the fusion transcripts made using subreadRun.}
\usage{gapfillerWrap(chimeraSeqSet.out, bam, parallel=c(FALSE,TRUE))
}
\arguments{
\item{chimeraSeqSet.out}{a list of DNAStringSet output from chimeraSeqSet}
\item{bam}{bam file containing the reads remapped over the fusion transcripts using Rsubread}
\item{parallel}{if FALSE FALSE no parallelization, if TRUE TRUE full paralleization, if FALSE TRUE only parallelization for internal funtions}
}
\value{
The program will write in a temporary directory contigs.fasta and contig.stats, which are used to evaluate if the de novo assembly allows the identification of the fusion break point.
The function returns for each fusion a list of three objects. The list is returned only in case that some of de novo assemblies cover the breakpoint junction.
The list is made of:
\item{contigs}{which is a PairwiseAlignments object}
\item{junction.contigs}{which is a DNAStringSet encompassing the sequences present in the contigs object}
\item{fusion}{which is a DNAStringSet object encompassing the fusion transcript}
}
\author{Raffaele A Calogero}
\examples{
#tmp <- importFusionData("star", "Chimeric.out.junction", org="hg19", min.support=100)
#myset <- tmp[1:4]
#tmp.seq <- chimeraSeqsSet(myset, type="transcripts")
#tmp <- gapfillerWrap(chimeraSeqSet.out=trsx, bam="accepted_hits_mapped.bam", parallel=c(FALSE,TRUE))
}
\seealso{ \code{\link{chimeraSeqs}}, \code{\link{gapfillerInstallation}}, \code{\link{gapfillerRun}}}
\keyword{utilities}
|
78ecea3843a7d865b50bb030a40c2337c8599f12
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qat/examples/qat_call_plot_noc_rule.Rd.R
|
e5bcdd5085d18ffa1fc2cb18e7d76d80d32c5349
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 531
|
r
|
qat_call_plot_noc_rule.Rd.R
|
library(qat)
### Name: qat_call_plot_noc_rule
### Title: Plot a result of a NOC rule check
### Aliases: qat_call_plot_noc_rule
### Keywords: utilities
### ** Examples
vec <- c(1,2,3,4,4,4,5,5,4,3,NaN,3,2,1)
workflowlist_part <- list(max_return_elements=1)
resultlist <- qat_call_noc_rule(vec, workflowlist_part,element=1)
# this example produce a file exampleplot_1_noc.png in the current directory
qat_call_plot_noc_rule(resultlist[[2]], measurement_vector=vec,
measurement_name="Result of Check", basename="exampleplot")
|
b209cee892972746b8a311817e9773d807782ff4
|
255e643aab0d85916e0b6474a741238f048d4081
|
/Analysis/Functions/auxiliary_old.R
|
545d56ade9087c19ae3269ac4e6a62219e2c9b08
|
[
"MIT"
] |
permissive
|
MarioniLab/OesophagusSingleCell2018
|
e439a0b2f989d359297d2c6d91eebe1fa423c503
|
31f8e077238a05c05c89d7df80c1f2541edf8d6a
|
refs/heads/master
| 2020-03-11T09:22:52.490276
| 2019-10-14T15:19:25
| 2019-10-14T15:19:25
| 129,908,964
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,560
|
r
|
auxiliary_old.R
|
#############################################
#### Script to store auxiliary functions ####
#############################################
# load data by tissue
loadData <- function(file_vec){
# load normalized data
normdata <- sapply(file_vec, readRDS)
# add patients to column names
ps <- sapply( strsplit(file_vec, "/"), grep, pattern="Patient", value=TRUE)
for(i in 1:length(normdata)){
colnames(normdata[[i]]) <- paste(ps[i], colnames(normdata[[i]]), sep="_")
}
if(length(file_vec)>1){
# reduce to genes present in all data sets
genes <- lapply(normdata, row.names)
genes <- table(unlist(genes))
genes <- names(genes)[genes>=length(normdata)]
# reduce to genes expressed in at least one sample
normdata_sub <- sapply(normdata, function(x, g) as.matrix(x[g, ]), g=genes)
genes_sub <- genes[rowSums(do.call("cbind", normdata_sub))>0]
normdata_sub <- sapply(normdata, function(x, g) as.matrix(x[g, ]), g=genes_sub)
return(normdata_sub)
}else{
return(normdata)
}
}
# filter contaminating cells
filterData <- function(normdata_list, cd45=TRUE, vim=TRUE){
# prep
select <- sapply(normdata_list, function(x) rep(TRUE, ncol(x)))
# helper
filterhelper <- function(geneid, normdata, select){
if(geneid%in%row.names(normdata)){
select <- select & (!normdata[geneid, ]>0)
}else{
print("gene not measured")
}
return(select)
}
# filter immune cells
if(cd45) {
select <- sapply(1:length(normdata_list), function(i)
filterhelper("ENSG00000081237", normdata_list[[i]], select[[i]]))
}
if(vim) {
select <- sapply(1:length(normdata_list), function(i)
filterhelper("ENSG00000026025", normdata_list[[i]], select[[i]]))
}
normdata_list <- sapply(1:length(normdata_list), function(i)
return(normdata_list[[i]][,select[[i]]]))
return(normdata_list)
}
# highly variable genes
getHVGs <- function(normdata_list){
# remove lowly expressed genes
genes <- lapply(normdata_list, function(x) row.names(x)[rowMeans(x)>0.1])
genes <- table(unlist(genes))
genes <- names(genes)[genes==length(normdata_list)]
normdata_list <- lapply(normdata_list, function(x, g) x[g,], g=genes)
# find highly variable genes, single data set
hvgHelper <- function(data){
# fit variance mean relationship
logdata <- log2(data + 1)
varfit <- trendVar(logdata)
decomp <- decomposeVar(logdata, varfit)
# plot fit
#plot(varfit$mean, varfit$var)
#curve(varfit$trend(x), col="red", lwd=2, add=TRUE)
return( decomp )
}
par(mfrow=c(1,length(normdata_list)))
HVG_list <- lapply(normdata_list, hvgHelper)
# combine if more than one sample
if(length(HVG_list)>1){
# unpack arguments for combineVar
HVG.df <- do.call(combineVar, HVG_list)
}else{
HVG.df <- HVG_list[[1]]
}
# get top 1000 most variable genes
HVG.df <- HVG.df[order(HVG.df$bio, decreasing=TRUE), ]
HVG <- rownames(HVG.df)[1:1000]
return(HVG)
}
# batch correct by tissue
batchCorrect <- function(normdata_list, HVG){
# batch correction based on top highly variable genes
normdata_list_hvg <- sapply(normdata_list, function(x, hvg) log2(x[hvg, ] +1), hvg=HVG)
# unpack arguments for mnnCorrect
funcText <- paste0("mnnCorrect(",
paste0("normdata_list_hvg[[", 1:length(normdata_list_hvg), "]]", collapse=", "),
", cos.norm.in=TRUE, cos.norm.out=TRUE, sigma=0.1)")
bcdata <- eval( parse(text=funcText) )
return(bcdata)
}
|
b595749748ad7ac9a64e4e90eb6efdeb2ff308f5
|
44a687ab2b10bb849d835bead9c3f4c4d2cffb6b
|
/R_workshop_2016/stat_progs_R/stat_progs_R/5_statistical_tests/2_non-parametric_tests/9_Kruskal-Wallis_test.r
|
a47a88edef3d52bd1ea4ff8b4b4b1df1441a345b
|
[] |
no_license
|
lakshmibalasubramanian/codebase_laks
|
031ba4c3cdabc0f4e886dec60475b7e9de40aaf2
|
664390654f2e785dc9d75107fc47643b8e3b69f3
|
refs/heads/master
| 2020-06-05T19:44:05.919169
| 2020-04-07T13:58:41
| 2020-04-07T13:58:41
| 192,528,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 558
|
r
|
9_Kruskal-Wallis_test.r
|
#Kruskal-Wallis Rank-sum test, non-parametric alternative to anova.
dat = read.table("insect_sprays.txt", header=TRUE, sep='\t')
mean(dat$count[dat$spray=="A"])
mean(dat$count[dat$spray=="B"])
mean(dat$count[dat$spray=="C"])
mean(dat$count[dat$spray=="D"])
mean(dat$count[dat$spray=="E"])
tapply(dat$count, dat$spray, mean)
tapply(dat$count, dat$spray, length)
boxplot(dat$count ~ dat$spray)
kr = kruskal.test(count ~ spray, data=InsectSprays)
print(kr)
# str(kr) gives the output quantities by name
stat = as.numeric(kr$statistic)
print(stat)
#etc
|
5e06c21383fc92325d507242e80bab6386f8a528
|
12c9f9fcb40e704df8e8c79e0de681daca83b2da
|
/WADE_Language_Analysis.R
|
fc9fcd4583dcaa7451fac2fda840766ce7e76d47
|
[] |
no_license
|
laceywade/word-naming-analysis
|
09f071ff238f22672badb5afaadd264c4de3dccd
|
ffa8d5accea60b88366fe40d06761f93be5e84e9
|
refs/heads/main
| 2023-07-16T15:09:30.874021
| 2021-09-05T16:32:56
| 2021-09-05T16:32:56
| 400,847,731
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,264
|
r
|
WADE_Language_Analysis.R
|
library(plyr)
library(tidyverse)
library(lme4)
library(Rmisc)
library(reshape2)
library(wPerm)
'%!in%' = Negate('%in%')
################################################ LOAD & CLEAN UP RAW DATA ######################################
### Load in the formants file
formants<-read_csv("WordNamingGame_Formants.csv")
formants$filename<-formants$Filename
### Load in the Ibex data and get rid of duplicate rows
ibex<-read_csv("WordNamingGame_PCIbexResults.csv")
ibex.unique<-ibex %>%
distinct(filename, .keep_all = TRUE)
### Merge by filename and word
exp1a<-merge(ibex.unique, formants, by=c("filename"))
### Load in demographic data # (WordNamingGame_Demographics.csv)
dems<-read_csv("WordNamingGame_Demographics.csv")
dems$time<-dems$Time
dems.unique<-dems %>%
distinct(time, .keep_all = TRUE)
exp1<-merge(exp1a, dems.unique, by="time")
### Get rid of participants who missed more than 30 words
exp1.phone<-exp1[exp1$phone%in%c("AY1", "AY2"),]
exp1.phone.sums<-summarySE(exp1.phone, measurevar="f1_80", groupvars=c("time", "Dialect", "voice"), na.rm=T)
to.omit<-exp1.phone.sums[exp1.phone.sums$N<60,]
to.omit.part<-unique(to.omit$time)
exp1<-exp1[exp1$time%!in%c(to.omit.part),]
### Outlier Trimming
by.part<-Rmisc::summarySE(exp1, measurevar="f1_80", groupvar="time", na.rm=T)
by.part$max.p<-by.part$f1_80+(by.part$sd*3)
by.part$min.p<-by.part$f1_80-(by.part$sd*3)
by.part.trim<-dplyr::select(by.part, time, max.p, min.p)
exp1<-merge(exp1, by.part.trim, by="time")
exp1$outlier<-"no"
exp1$outlier[exp1$f1_80>exp1$max.p]<-"yes"
exp1$outlier[exp1$f1_80<exp1$min.p]<-"yes"
by.word<-Rmisc::summarySE(exp1, measurevar="f1_80", groupvar="word.x", na.rm=T)
by.word$max.w<-by.word$f1_80+(by.word$sd*3)
by.word$min.w<-by.word$f1_80-(by.word$sd*3)
by.word.trim<-dplyr::select(by.word, word.x, max.w, min.w)
exp1<-merge(exp1, by.word.trim, by="word.x")
exp1$outlier[exp1$f1_80>exp1$max.w]<-"yes"
exp1$outlier[exp1$f1_80<exp1$min.w]<-"yes"
exp1.trimmed<-exp1[exp1$outlier!="yes",]
### Note that performing the same trimming analyses on F2 does not find any additional outliers
### besides those omitted based on the F1 omission
### Normalize with Lobanov method
exp1.normed<-exp1.trimmed %>%
group_by(time) %>%
mutate(z1_80=scale(f1_80), z2_80=scale(f2_80),
z1_70=scale(f1_70), z2_70=scale(f2_70),
z1_50=scale(f1_50), z2_50=scale(f2_50),
z1_30=scale(f1_30), z2_30=scale(f2_30),
z1_20=scale(f1_20), z2_20=scale(f2_20),
z1_10=scale(f1_10), z2_10=scale(f2_10))
exp1.ay<-exp1.normed[exp1.normed$phone=="AY1" | exp1.normed$phone=="AY2",]
### Make "participant" column from unique time identifier
exp1.ay$participant<-as.factor(exp1.ay$time)
### Make diagonal measure
exp1.ay$diag<-exp1.ay$z2_80-(2*exp1.ay$z1_80)
### Add in frequency data
freq<-read_csv("word_freq.csv")
exp1.ay$word<-exp1.ay$word.x
exp1.ay<-merge(exp1.ay, freq, by="word")
exp1.ay$phase<-as.factor(exp1.ay$phase)
exp1.ay$voice<-as.factor(exp1.ay$voice)
exp1.ay$Dialect<-as.factor(exp1.ay$Dialect)
### Add in participant's baseline
baseline<-exp1.ay[exp1.ay$phase=="baseline",]
baseline.sums <- baseline %>%
group_by(participant) %>%
summarise(baseline.diag=mean(diag))
exp1.ay<-merge(baseline.sums, exp1.ay, by="participant")
################################################## CODE THE SURVEY DATA #############################################
# Create a "Familiarity with the South" subscore
exp1.ay$Familiarity<-
exp1.ay$I.Lived.in.South+
exp1.ay$I.Familiar.South.Speech+
exp1.ay$I.Friends.South+
exp1.ay$I.have.Southern.accent+
exp1.ay$Parents.have.accent+
exp1.ay$Colleagues.have.accent+
exp1.ay$I.imitate.accent
# Create a "Talker Likability" subscore
exp1.ay$Likable<-
exp1.ay$Happy+
exp1.ay$Kind+
exp1.ay$Friendly+
exp1.ay$Would.Be.Friends+
exp1.ay$Relatable
# Create a "Prestige" subscore
exp1.ay$Prestige<-
exp1.ay$Intelligent+
exp1.ay$Wealthy+
exp1.ay$Professional+
exp1.ay$Attractive+
exp1.ay$Educated
### Marlowe-Crowne Social Desirability Scale
exp1.ay$SocDes1=ifelse(exp1.ay$Q13.1_1==TRUE,1,0)
exp1.ay$SocDes2=ifelse(exp1.ay$Q13.1_2==TRUE,1,0)
exp1.ay$SocDes3=ifelse(exp1.ay$Q13.1_3==FALSE,1,0)
exp1.ay$SocDes4=ifelse(exp1.ay$Q13.1_4==TRUE,1,0)
exp1.ay$SocDes5=ifelse(exp1.ay$Q13.1_5==FALSE,1,0)
exp1.ay$SocDes6=ifelse(exp1.ay$Q13.1_6==FALSE,1,0)
exp1.ay$SocDes7=ifelse(exp1.ay$Q13.1_7==TRUE,1,0)
exp1.ay$SocDes8=ifelse(exp1.ay$Q13.1_8==TRUE,1,0)
exp1.ay$SocDes9=ifelse(exp1.ay$Q13.1_9==FALSE,1,0)
exp1.ay$SocDes10=ifelse(exp1.ay$Q13.1_10==FALSE,1,0)
exp1.ay$SocDes11=ifelse(exp1.ay$Q13.1_11==FALSE,1,0)
exp1.ay$SocDes12=ifelse(exp1.ay$Q13.1_12==FALSE,1,0)
exp1.ay$SocDes13=ifelse(exp1.ay$Q13.1_13==TRUE,1,0)
exp1.ay$SocDes14=ifelse(exp1.ay$Q13.1_14==FALSE,1,0)
exp1.ay$SocDes15=ifelse(exp1.ay$Q13.1_15==FALSE,1,0)
exp1.ay$SocDes16=ifelse(exp1.ay$Q13.1_16==TRUE,1,0)
exp1.ay$SocDes17=ifelse(exp1.ay$Q13.1_17==TRUE,1,0)
exp1.ay$SocDes18=ifelse(exp1.ay$Q13.1_18==TRUE,1,0)
exp1.ay$SocDes19=ifelse(exp1.ay$Q13.1_19==FALSE,1,0)
exp1.ay$SocDes20=ifelse(exp1.ay$Q13.1_20==TRUE,1,0)
exp1.ay$SocDes21=ifelse(exp1.ay$Q13.1_21==TRUE,1,0)
exp1.ay$SocDes22=ifelse(exp1.ay$Q13.1_22==FALSE,1,0)
exp1.ay$SocDes23=ifelse(exp1.ay$Q13.1_23==FALSE,1,0)
exp1.ay$SocDes24=ifelse(exp1.ay$Q13.1_24==TRUE,1,0)
exp1.ay$SocDes25=ifelse(exp1.ay$Q13.1_25==TRUE,1,0)
exp1.ay$SocDes26=ifelse(exp1.ay$Q13.1_26==TRUE,1,0)
exp1.ay$SocDes27=ifelse(exp1.ay$Q13.1_27==TRUE,1,0)
exp1.ay$SocDes28=ifelse(exp1.ay$Q13.1_28==FALSE,1,0)
exp1.ay$SocDes29=ifelse(exp1.ay$Q13.1_29==TRUE,1,0)
exp1.ay$SocDes30=ifelse(exp1.ay$Q13.1_30==FALSE,1,0)
exp1.ay$SocDes31=ifelse(exp1.ay$Q13.1_31==TRUE,1,0)
exp1.ay$SocDes32=ifelse(exp1.ay$Q13.1_32==FALSE,1,0)
exp1.ay$SocDes33=ifelse(exp1.ay$Q13.1_33==TRUE,1,0)
exp1.ay$SocDes<-exp1.ay$SocDes1+
exp1.ay$SocDes2+exp1.ay$SocDes3+
exp1.ay$SocDes4+exp1.ay$SocDes5+
exp1.ay$SocDes6+exp1.ay$SocDes7+
exp1.ay$SocDes8+exp1.ay$SocDes9+
exp1.ay$SocDes10+exp1.ay$SocDes11+
exp1.ay$SocDes12+exp1.ay$SocDes13+
exp1.ay$SocDes14+exp1.ay$SocDes15+
exp1.ay$SocDes16+exp1.ay$SocDes17+
exp1.ay$SocDes18+exp1.ay$SocDes19+
exp1.ay$SocDes20+exp1.ay$SocDes21+
exp1.ay$SocDes22+exp1.ay$SocDes23+
exp1.ay$SocDes24+exp1.ay$SocDes25+
exp1.ay$SocDes26+exp1.ay$SocDes27+
exp1.ay$SocDes28+exp1.ay$SocDes29+
exp1.ay$SocDes30+exp1.ay$SocDes31+
exp1.ay$SocDes32+exp1.ay$SocDes33
### Big Five
# Reverse items DO NOT RUN THESE AGAIN!!!!
exp1.ay$Q11_6<-(0-exp1.ay$Q11_6)+6
exp1.ay$Q11_21<-(0-exp1.ay$Q11_21)+6
exp1.ay$Q11_31<-(0-exp1.ay$Q11_31)+6
exp1.ay$Q11_2<-(0-exp1.ay$Q11_2)+6
exp1.ay$Q11_12<-(0-exp1.ay$Q11_12)+6
exp1.ay$Q11_27<-(0-exp1.ay$Q11_27)+6
exp1.ay$Q11_37<-(0-exp1.ay$Q11_37)+6
exp1.ay$Q11_8<-(0-exp1.ay$Q11_8)+6
exp1.ay$Q11_18<-(0-exp1.ay$Q11_18)+6
exp1.ay$Q11_23<-(0-exp1.ay$Q11_23)+6
exp1.ay$Q11_43<-(0-exp1.ay$Q11_43)+6
exp1.ay$Q11_9<-(0-exp1.ay$Q11_9)+6
exp1.ay$Q11_24<-(0-exp1.ay$Q11_24)+6
exp1.ay$Q11_34<-(0-exp1.ay$Q11_34)+6
exp1.ay$Q11_35<-(0-exp1.ay$Q11_35)+6
exp1.ay$Q11_41<-(0-exp1.ay$Q11_41)+6
exp1.ay$extraversion<-
exp1.ay$Q11_1+
exp1.ay$Q11_6+
exp1.ay$Q11_11+
exp1.ay$Q11_16+
exp1.ay$Q11_21+
exp1.ay$Q11_26+
exp1.ay$Q11_31+
exp1.ay$Q11_36
exp1.ay$agreeableness<-
exp1.ay$Q11_2 +
exp1.ay$Q11_7 +
exp1.ay$Q11_12 +
exp1.ay$Q11_17 +
exp1.ay$Q11_22 +
exp1.ay$Q11_27 +
exp1.ay$Q11_32 +
exp1.ay$Q11_37 +
exp1.ay$Q11_42
exp1.ay$conscientiousness<-
exp1.ay$Q11_3 +
exp1.ay$Q11_8 +
exp1.ay$Q11_13 +
exp1.ay$Q11_18 +
exp1.ay$Q11_23 +
exp1.ay$Q11_28 +
exp1.ay$Q11_33 +
exp1.ay$Q11_38 +
exp1.ay$Q11_43
exp1.ay$neuroticism<-
exp1.ay$Q11_4 +
exp1.ay$Q11_9 +
exp1.ay$Q11_14 +
exp1.ay$Q11_19 +
exp1.ay$Q11_24 +
exp1.ay$Q11_29 +
exp1.ay$Q11_34 +
exp1.ay$Q11_39
exp1.ay$openness<-
exp1.ay$Q11_5 +
exp1.ay$Q11_10 +
exp1.ay$Q11_15 +
exp1.ay$Q11_20 +
exp1.ay$Q11_25 +
exp1.ay$Q11_30 +
exp1.ay$Q11_35 +
exp1.ay$Q11_40 +
exp1.ay$Q11_41 +
exp1.ay$Q11_44
#### AQ scores
exp1.ay$AQ1<-exp1.ay$Q12_51
exp1.ay$AQ2<-exp1.ay$Q12_52
exp1.ay$AQ3<-exp1.ay$Q12_53
exp1.ay$AQ4<-exp1.ay$Q12_54
exp1.ay$AQ5<-exp1.ay$Q12_55
exp1.ay$AQ6<-exp1.ay$Q12_56
exp1.ay$AQ7<-exp1.ay$Q12_57
exp1.ay$AQ8<-exp1.ay$Q12_58
exp1.ay$AQ9<-exp1.ay$Q12_59
exp1.ay$AQ10<-exp1.ay$Q12_60
exp1.ay$AQ11<-exp1.ay$Q12_61
exp1.ay$AQ12<-exp1.ay$Q12_62
exp1.ay$AQ13<-exp1.ay$Q12_63
exp1.ay$AQ14<-exp1.ay$Q12_64
exp1.ay$AQ15<-exp1.ay$Q12_65
exp1.ay$AQ16<-exp1.ay$Q12_66
exp1.ay$AQ17<-exp1.ay$Q12_67
exp1.ay$AQ18<-exp1.ay$Q12_68
exp1.ay$AQ19<-exp1.ay$Q12_69
exp1.ay$AQ20<-exp1.ay$Q12_70
exp1.ay$AQ21<-exp1.ay$Q12_71
exp1.ay$AQ22<-exp1.ay$Q12_72
exp1.ay$AQ23<-exp1.ay$Q12_73
exp1.ay$AQ24<-exp1.ay$Q12_74
exp1.ay$AQ25<-exp1.ay$Q12_75
exp1.ay$AQ26<-exp1.ay$Q12_76
exp1.ay$AQ27<-exp1.ay$Q12_77
exp1.ay$AQ28<-exp1.ay$Q12_78
exp1.ay$AQ29<-exp1.ay$Q12_79
exp1.ay$AQ30<-exp1.ay$Q12_80
exp1.ay$AQ31<-exp1.ay$Q12_81
exp1.ay$AQ32<-exp1.ay$Q12_82
exp1.ay$AQ33<-exp1.ay$Q12_83
exp1.ay$AQ34<-exp1.ay$Q12_84
exp1.ay$AQ35<-exp1.ay$Q12_85
exp1.ay$AQ36<-exp1.ay$Q12_86
exp1.ay$AQ37<-exp1.ay$Q12_87
exp1.ay$AQ38<-exp1.ay$Q12_88
exp1.ay$AQ39<-exp1.ay$Q12_89
exp1.ay$AQ40<-exp1.ay$Q12_90
exp1.ay$AQ41<-exp1.ay$Q12_91
exp1.ay$AQ42<-exp1.ay$Q12_92
exp1.ay$AQ43<-exp1.ay$Q12_93
exp1.ay$AQ44<-exp1.ay$Q12_94
exp1.ay$AQ45<-exp1.ay$Q12_95
exp1.ay$AQ46<-exp1.ay$Q12_96
exp1.ay$AQ47<-exp1.ay$Q12_97
exp1.ay$AQ48<-exp1.ay$Q12_98
exp1.ay$AQ49<-exp1.ay$Q12_99
exp1.ay$AQ50<-exp1.ay$Q12_100
exp1.ay$AQ13<-(1-exp1.ay$AQ13)+4
exp1.ay$AQ22<-(1-exp1.ay$AQ22)+4
exp1.ay$AQ45<-(1-exp1.ay$AQ45)+4
exp1.ay$AQ2<-(1-exp1.ay$AQ2)+4
exp1.ay$AQ4<-(1-exp1.ay$AQ4)+4
exp1.ay$AQ16<-(1-exp1.ay$AQ16)+4
exp1.ay$AQ43<-(1-exp1.ay$AQ43)+4
exp1.ay$AQ46<-(1-exp1.ay$AQ46)+4
exp1.ay$AQ7<-(1-exp1.ay$AQ7)+4
exp1.ay$AQ18<-(1-exp1.ay$AQ18)+4
exp1.ay$AQ26<-(1-exp1.ay$AQ26)+4
exp1.ay$AQ33<-(1-exp1.ay$AQ33)+4
exp1.ay$AQ35<-(1-exp1.ay$AQ35)+4
exp1.ay$AQ39<-(1-exp1.ay$AQ39)+4
exp1.ay$AQ20<-(1-exp1.ay$AQ20)+4
exp1.ay$AQ21<-(1-exp1.ay$AQ21)+4
exp1.ay$AQ41<-(1-exp1.ay$AQ41)+4
exp1.ay$AQ42<-(1-exp1.ay$AQ42)+4
exp1.ay$AQ5<-(1-exp1.ay$AQ5)+4
exp1.ay$AQ6<-(1-exp1.ay$AQ6)+4
exp1.ay$AQ9<-(1-exp1.ay$AQ9)+4
exp1.ay$AQ12<-(1-exp1.ay$AQ12)+4
exp1.ay$AQ19<-(1-exp1.ay$AQ19)+4
exp1.ay$AQ23<-(1-exp1.ay$AQ23)+4
exp1.ay$AQ<-exp1.ay$AQ1+exp1.ay$AQ2+exp1.ay$AQ3+exp1.ay$AQ4+exp1.ay$AQ5+exp1.ay$AQ6+exp1.ay$AQ7+exp1.ay$AQ8+exp1.ay$AQ9+exp1.ay$AQ10+
exp1.ay$AQ11+exp1.ay$AQ12+exp1.ay$AQ13+exp1.ay$AQ14+exp1.ay$AQ15+exp1.ay$AQ16+exp1.ay$AQ17+exp1.ay$AQ18+exp1.ay$AQ19+exp1.ay$AQ20+
exp1.ay$AQ21+exp1.ay$AQ22+exp1.ay$AQ23+exp1.ay$AQ24+exp1.ay$AQ25+exp1.ay$AQ26+exp1.ay$AQ27+exp1.ay$AQ28+exp1.ay$AQ29+exp1.ay$AQ30+
exp1.ay$AQ31+exp1.ay$AQ32+exp1.ay$AQ33+exp1.ay$AQ34+exp1.ay$AQ35+exp1.ay$AQ36+exp1.ay$AQ37+exp1.ay$AQ38+exp1.ay$AQ39+exp1.ay$AQ40+
exp1.ay$AQ41+exp1.ay$AQ42+exp1.ay$AQ43+exp1.ay$AQ44+exp1.ay$AQ45+exp1.ay$AQ46+exp1.ay$AQ47+exp1.ay$AQ48+exp1.ay$AQ49+exp1.ay$AQ50
exp1.ay$SS<-exp1.ay$AQ1+
exp1.ay$AQ11+
exp1.ay$AQ13+
exp1.ay$AQ15+
exp1.ay$AQ22+
exp1.ay$AQ36+
exp1.ay$AQ44+
exp1.ay$AQ45+
exp1.ay$AQ47+
exp1.ay$AQ48
exp1.ay$AS<-exp1.ay$AQ2+
exp1.ay$AQ4+
exp1.ay$AQ10+
exp1.ay$AQ16+
exp1.ay$AQ25+
exp1.ay$AQ32+
exp1.ay$AQ34+
exp1.ay$AQ37+
exp1.ay$AQ43+
exp1.ay$AQ46
exp1.ay$AD<-exp1.ay$AQ5+
exp1.ay$AQ6+
exp1.ay$AQ9+
exp1.ay$AQ12+
exp1.ay$AQ19+
exp1.ay$AQ23+
exp1.ay$AQ28+
exp1.ay$AQ29+
exp1.ay$AQ30+
exp1.ay$AQ49
exp1.ay$C<-exp1.ay$AQ7+
exp1.ay$AQ17+
exp1.ay$AQ18+
exp1.ay$AQ26+
exp1.ay$AQ27+
exp1.ay$AQ31+
exp1.ay$AQ33+
exp1.ay$AQ35+
exp1.ay$AQ38+
exp1.ay$AQ39
exp1.ay$I<-exp1.ay$AQ3+
exp1.ay$AQ8+
exp1.ay$AQ14+
exp1.ay$AQ21+
exp1.ay$AQ24+
exp1.ay$AQ40+
exp1.ay$AQ41+
exp1.ay$AQ42+
exp1.ay$AQ50
###################################################### DATA VISUALISATION ##############################################
### SHIFT BY PHASE & CONDITION
exp1.ay.sums<-summarySE(exp1.ay, measurevar="diag", groupvars=c("voice", "phase"))
exp1.ay.sums$Voice<-"Southern Voice Condition"
exp1.ay.sums$Voice[exp1.ay.sums$voice=="Midland"]<-"Midland Voice Condition"
exp1.ay.sums<-na.omit(exp1.ay.sums)
ggplot(exp1.ay.sums, aes(phase, diag, color=phase))+
geom_point(size=2.5)+
facet_grid(~Voice)+
geom_errorbar(aes(ymin=diag-ci, ymax=diag+ci), size=1)+
theme_bw()+
scale_color_manual(values=c("black", "gray55", "gray75"))+
xlab("Experiment Phase")+
ylab("Front Diagonal (Normalized F2-2*F1)")+
theme(legend.position="none")+
ggtitle("Main Experiment")
### SHIFT BY PHASE, CONDITION & DIALECT
exp1.ay.sums<-summarySE(exp1.ay, measurevar="diag", groupvars=c("voice", "phase", "Dialect"))
exp1.ay.sums$Voice<-"Southern Voice Condition"
exp1.ay.sums$Voice[exp1.ay.sums$voice=="Midland"]<-"Midland Voice Condition"
exp1.ay.sums<-na.omit(exp1.ay.sums)
ggplot(exp1.ay.sums, aes(phase, diag, color=phase))+
geom_point(size=2.5)+
facet_grid(Dialect~Voice)+
geom_errorbar(aes(ymin=diag-ci, ymax=diag+ci), size=1)+
theme_bw()+
scale_color_manual(values=c("black", "gray55", "gray75"))+
xlab("Experiment Phase")+
ylab("Front Diagonal (Normalized F2-2*F1)")+
theme(legend.position="none")+
ggtitle("Main Experiment")
### PARTICIPANT SHIFT (INDIVIDUAL DIFFERENCES)
exp1.ay.sums<-summarySE(exp1.ay, , measurevar="diag", groupvars=c("voice", "phase", "time", "Dialect"))
exp1.ay.sums$Voice<-"Southern Voice Condition"
exp1.ay.sums$Voice[exp1.ay.sums$voice=="Midland"]<-"Midland Voice Condition"
exp1.ay.sums$participant<-as.factor(exp1.ay.sums$time)
exp1.ay.sums.wide<-spread(exp1.ay.sums, phase, diag)
exp1.ay.sums<-na.omit(exp1.ay.sums)
ggplot(exp1.ay.sums[exp1.ay.sums$voice=="Southern" & exp1.ay.sums$phase!="post",], aes(phase, diag))+
geom_point(size=3, shape=1, alpha=.5)+
facet_wrap(~Dialect)+
geom_line(aes(color=ave(diag,participant,FUN=diff)>=0, group=participant), alpha=.4, size=1)+
theme_bw()+
scale_color_manual(values=c("dodgerblue4", "orangered3"))+
xlab("Experiment Phase")+
ylab("Normalized F1 at glide (80%)")+
theme(legend.position="none")
#### FINAL MODEL #####
fit1<-lmer(scale(diag)~
phase*relevel(voice, "Southern")*scale(baseline.diag)+
phase*relevel(voice, "Southern")*Dialect+
scale(freq)+
scale(duration)+
(scale(baseline.diag)+scale(duration)|word)+
(phase+scale(duration)+scale(freq)|participant),
data=exp1.ay,
contrasts=list(Dialect=contr.sum),
control = lmerControl(optimizer = "bobyqa",
optCtrl = list(maxfun=2e5)))
summary(fit1)
# extract coefs from model
coefs<-data.frame(ranef(fit1)$participant)
coefs$participant<-rownames(coefs)
coefs<-select(coefs, participant, phaseexposure)
#### merge w/ survey data
exp1.survey<-select(exp1.ay, participant, ProlificID, group,
Kind, Wealthy, Friendly, Intelligent, Professional, Attractive, Speaker.from,
Would.Be.Friends, From.South, From.Midwest, Happy, Relatable, Educated,
I.Lived.in.South, I.Familiar.South.Speech, I.Friends.South, I.Would.Live.South,
I.Like.Southerners.Talk, I.have.Southern.accent, Parents.have.accent, Colleagues.have.accent,
I.imitate.accent, AQ, SS, AS, I, C, AD, openness, extraversion, agreeableness, neuroticism,
conscientiousness, SocDes, voice, Dialect)
exp1.survey.unique<-unique(exp1.survey)
exp1.ind<-merge(exp1.survey.unique, coefs, by="participant")
exp1.ay.participants<-exp1.ay %>%
group_by(participant, phase) %>%
summarise(diag.sum=diag)
exp1.ay.part.wide<-dcast(exp1.ay.participants, participant~phase, value.var="diag.sum", fun.aggregate=mean)
exp1.ind<-merge(exp1.ind, exp1.ay.part.wide, by="participant")
### BIG FIVE
bigfive<-select(exp1.ind, openness, extraversion, agreeableness, neuroticism,
conscientiousness, voice, Dialect, participant, phaseexposure)
bigfive.long<-melt(bigfive, id.vars=c("voice", "Dialect", "participant", "phaseexposure"))
ggplot(bigfive.long[bigfive.long$voice=="Southern",],
aes(value, phaseexposure, color=Dialect, shape=Dialect))+
geom_point(alpha=.7)+geom_smooth(method="lm", se=F)+
facet_wrap(~variable, scales="free")+
theme_bw()+
scale_color_manual(values=c("black", "gray50"))+
ggtitle("Big Five Personality Traits")+xlab("Score (Scaled)")+ylab("Baseline to Exposure Shift")
### AQ
AQ.sub<-select(exp1.ind, SS, AS, AD, C, I, voice, Dialect, participant, phaseexposure)
AQ.sub.long<-melt(AQ.sub, id.vars=c("voice", "Dialect", "participant", "phaseexposure"))
ggplot(AQ.sub.long[AQ.sub.long$voice=="Southern",],
aes(value, phaseexposure, color=Dialect, shape=Dialect))+
geom_point(alpha=.7)+geom_smooth(method="lm", se=F)+
facet_wrap(~variable, scales="free")+
theme_bw()+
scale_color_manual(values=c("black", "gray50"))+
ggtitle("Autism Quotient Subscores")+xlab("Score (Scaled)")+ylab("Baseline to Exposure Shift")
### Marlowe-Crowne
ggplot(exp1.ind[exp1.ind$voice=="Southern",],
aes(SocDes, phaseexposure, color=Dialect, shape=Dialect))+
geom_point(alpha=.5)+geom_smooth(method="lm", se=F)+
theme_bw()+
scale_color_manual(values=c("black", "gray50"))+
ggtitle("Marlowe-Crowne \n Social Desirability Scale")+xlab("Score")+ylab("Baseline to Exposure Shift")
#### Affective/Familiarity Measures
judgments<-select(exp1.ind, participant, Kind, Wealthy, Friendly, Intelligent, Professional, Attractive,
Would.Be.Friends, From.South, From.Midwest, Happy, Educated, Relatable,
I.Lived.in.South, I.Familiar.South.Speech, I.Friends.South, I.Would.Live.South,
I.Like.Southerners.Talk, I.have.Southern.accent, Parents.have.accent, Colleagues.have.accent,
I.imitate.accent, phaseexposure, voice, Dialect)
judgments<-unique(judgments)
judgments.long<-melt(judgments, id.vars=c("participant", "voice", "Dialect", "phaseexposure"))
judgments<-dcast(judgments.long, participant+voice+Dialect+phaseexposure~variable, value.var="value")
# Create a "Familiarity with the South" subscore
judgments$Familiarity<-
judgments$I.Lived.in.South+
judgments$I.Familiar.South.Speech+
judgments$I.Friends.South+
judgments$I.have.Southern.accent+
judgments$Parents.have.accent+
judgments$Colleagues.have.accent+
judgments$I.imitate.accent
# Create a "Talker Likability" subscore
judgments$Likability<-
judgments$Happy+
judgments$Kind+
judgments$Friendly+
judgments$Would.Be.Friends
# Create a "Prestige" subscore
judgments$Prestige<-
judgments$Intelligent+
judgments$Wealthy+
judgments$Professional+
judgments$Attractive+
judgments$Educated
### Prestige
ggplot(judgments[judgments$voice=="Southern",],
aes(Prestige, phaseexposure, color=Dialect, shape=Dialect))+
geom_point(alpha=.75, position="jitter")+geom_smooth(method="lm")+
theme_bw()+
scale_color_manual(values=c("black", "gray55", "gray75"))+
ggtitle("Talker Prestige")+xlab("Score")+ylab("Baseline to Exposure Shift")
### Likability
ggplot(judgments[judgments$voice=="Southern",],
aes(Likability, phaseexposure, color=Dialect, shape=Dialect))+
geom_point(alpha=.75, position="jitter")+geom_smooth(method="lm")+
theme_bw()+
scale_color_manual(values=c("black", "gray55", "gray75"))+
ggtitle("Talker Likability")+xlab("Score")+ylab("Baseline to Exposure Shift")
### Familiarity
ggplot(judgments[judgments$voice=="Southern",],
aes(Familiarity, phaseexposure, color=Dialect, shape=Dialect))+
geom_point(alpha=.75, position="jitter")+
geom_smooth(method="lm", aes(group=1), se=F, color="black", linetype=2)+
geom_smooth(method="lm")+
scale_color_manual(values=c("black", "gray55", "gray75"))+
theme_bw()+
ggtitle("Familiarity with the South")+xlab("Score")+ylab("Baseline to Exposure Shift")
#### Permutations
s<-exp1.ind[exp1.ind$voice=="Southern",]
m<-exp1.ind[exp1.ind$voice=="Midland",]
ss<-s[s$Dialect=="Southern",]
sn<-s[s$Dialect=="Non-Southern",]
ms<-s[m$Dialect=="Southern",]
mn<-s[m$Dialect=="Non-Southern",]
### Only significant individual differences predictors (Southern talker condition)
# conscientiousness for southerners
cor.test(ss$conscientiousness, ss$phaseexposure)
con.ss.test<-ss[!is.na(ss$conscientiousness),]
perm.relation(con.ss.test$conscientiousness, con.ss.test$phaseexposure, R=10000)
# neuroticism for non-southerners
cor.test(sn$neuroticism, sn$phaseexposure)
neur.sn.test<-sn[!is.na(sn$neuroticism),]
perm.relation(neur.sn.test$neuroticism, neur.sn.test$phaseexposure, R=10000)
# Attention to Detail for non-southerners
cor.test(sn$AD, sn$phaseexposure)
AD.sn.test<-sn[!is.na(sn$AD),]
perm.relation(AD.sn.test$AD, AD.sn.test$phaseexposure, R=10000)
# Imagination for non-southerners
cor.test(sn$I, sn$phaseexposure)
I.sn.test<-sn[!is.na(sn$I),]
perm.relation(I.sn.test$I, I.sn.test$phaseexposure, R=10000)
## Also significant for Southerners in the Midland (control condition)
# Imagination
cor.test(ms$I, ms$phaseexposure)
I.ms.test<-ms[!is.na(ms$I),]
perm.relation(I.ms.test$I, I.ms.test$phaseexposure, R=10000)
# Conscientiousness
cor.test(ms$conscientiousness, ms$phaseexposure)
con.ms.test<-ms[!is.na(ms$conscientiousness),]
perm.relation(con.ms.test$conscientiousness, con.ms.test$phaseexposure, R=10000)
|
10446babb0ddca166754a8d4b21e0bfb6b7dfbd0
|
e38a1ef996105c9648d23c6fe598e29f74d60874
|
/R/fars_functions.R
|
6668baec9a0c7eedeb53bb8cd0389ba84296ec56
|
[] |
no_license
|
adhager/farsr
|
cc0df152f51a736d24a1d2c99c10a0007721cac7
|
bd406363ee664cd726f9298bf71d84c6eb589453
|
refs/heads/master
| 2020-03-23T11:03:34.347540
| 2019-06-18T13:04:57
| 2019-06-18T13:04:57
| 141,479,924
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,639
|
r
|
fars_functions.R
|
#' Read FARS file
#'
#' A FARS file contains data from the US National Highway Traffic Safety
#' Administration's Fatality Analysis Reporting System, which is a nationwide
#' census providing the American public yearly data regarding fatal injuries
#' suffered in motor vehicle traffic crashes.
#'
#' Requires dpyr, tidyr, readr, maps, graphics
#'
#' @param filename Name of the file to read
#'
#' @return This function returns a data frame containing the data read from the file
#' if the requested file does not exist, an error will be reported and the function
#' will stop
#'
#' @examples
#' \dontrun{
#' fars_read("data/accident_2015.csv.bz2")
#' }
#'
#' @importFrom readr read_csv
#' @importFrom dplyr tbl_df
#'
#' @export
fars_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_csv(filename, progress = FALSE)
})
dplyr::tbl_df(data)
}
#' Create a FARS filename for a particular year
#'
#' @param year The year for which to generate the filename
#'
#' @return This function returns the formatted file name.
#' If year is not numeric, a warning is generated: "NAs introduced by coercion"
#'
#' @examples
#' \dontrun{
#' make_filename("2015")
#' }
#'
#' @export
make_filename <- function(year) {
year <- as.integer(year)
sprintf("accident_%d.csv.bz2", year)
}
#' Reads months and years from multiple FARS files (for multiple years)
#'
#' Notes:
#' The FARS file(s) must be located in the current working directory.
#'
#' @param years A vector or list of years for which to read FARS files
#'
#' @return This function returns a list of months and years of accidents from the specified datafile(s)
#' If any error occurs, a warning is generated: "invalid year: XXXX"
#'
#' @examples
#' \dontrun{
#' fars_read_years(2013:2015)
#' fars_read_years(list(2013, 2015))
#' }
#'
#' @importFrom dplyr mutate
#' @importFrom dplyr select
#'
#' @export
fars_read_years <- function(years) {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate_(dat, year = ~year) %>%
dplyr::select_("MONTH", "year")
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
#' Reads months and years from FARS files and summarizes how many accidents occurred per month/year
#'
#' Notes:
#' The FARS file(s) must be located in the current working directory.
#'
#' @param years A vector or list of years for which to read FARS files
#'
#' @return This function returns a tibble containing the number of accidents per month. Each column represents a year and each row a month.
#'
#' @examples
#' \dontrun{
#' fars_summarize_years(2013:2015)
#' }
#'
#' @importFrom dplyr %>%
#' @importFrom dplyr bind_rows
#' @importFrom dplyr group_by
#' @importFrom dplyr summarize
#' @importFrom tidyr spread
#'
#' @export
fars_summarize_years <- function(years) {
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by_("year", "MONTH") %>%
dplyr::summarize_(n = ~n()) %>%
tidyr::spread_("year", "n")
}
#' Plots location of accidents on a map.
#'
#' @param state.num The state to load data for
#' @param year The year to load data for
#'
#' @return NULL. Draws a plot. On error (invalid state, no accidents), displays a message.
#'
#' @examples
#' \dontrun{
#' fars_map_state(9, 2013)
#' }
#'
#' @importFrom dplyr filter
#' @importFrom maps map
#' @importFrom graphics points
#' @importFrom tidyr spread
#'
#' @export
fars_map_state <- function(state.num, year) {
filename <- make_filename(year)
data <- fars_read(filename)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE)))
stop("invalid STATE number: ", state.num)
data.sub <- dplyr::filter_(data, ~STATE == state.num)
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
|
01260a00062e306c73576fbea7ea89ed05f37b20
|
e0bcd3a0bfa23c1d445c2c738b8e37323a0c3b71
|
/test/20190206_data_integrity_testing.R
|
9f88f4679be5b2b0f5a4d2e44fea6278df51b93e
|
[] |
no_license
|
active-analytics/pqam_2018
|
87a017a55c130412e4d090518f2f47a6adb67c9a
|
cbc0437bb9da5460939641ec39af24070b733e24
|
refs/heads/master
| 2021-10-20T11:22:43.513713
| 2019-02-27T14:07:28
| 2019-02-27T14:07:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 446
|
r
|
20190206_data_integrity_testing.R
|
rm(list = ls())
cat("\014")
library(tidyverse)
df_old <- read_csv("data_output/20181229_spy_weekly_opt_hist_5yr.csv")
df_new <- read_csv("data_output/spy_weekly_opt_hist_5yr.csv")
nrow(df_old)
nrow(df_new)
df_old %>%
group_by(data_date, expiration) %>%
summarize(num_row = n())
df_new %>%
group_by(data_date, expiration) %>%
summarize(num_row = n())
# df_old %>%
# filter(data_date == "2013-12-27") %>% View()
|
19ddd835873c299f9c02a1f36830196701d6e1ab
|
e5438533f88dfcd48bcfd8fe079ef0f7de0f3438
|
/man/VKSgraphic.Rd
|
5e90a917c04f0a9b0f3fa75979cb47478d6b7662
|
[] |
no_license
|
cran/CVD
|
b4f04637fc273afb166c7dfc1f9d0a1e27326095
|
85474eb2f075943a488bb25b19a2b4c0b8ba6812
|
refs/heads/master
| 2020-05-18T06:51:44.040624
| 2016-11-28T12:23:17
| 2016-11-28T12:23:17
| 17,678,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,876
|
rd
|
VKSgraphic.Rd
|
\name{VKSgraphic}
\alias{VKSgraphic}
\alias{VKSvariantGraphic}
\title{Graphical score for the D-15 tests}
\description{\code{VKSgraphic} computes a graphical score based on
the Vingrys and King-Smith method (VKS) for the D-15 test or similar tests.
\code{VKSvariantGraphic} shows the angles with double their value,
for a continuous display of the confusion axis.}
\usage{
VKSgraphic(VKSdata, xLimit=5, yLimit=4, VKStitle='', VKSxlabel='',
VKSylabel='')
}
\arguments{
\item{VKSdata}{ data.frame with color vision deficiency name,
VKS angle and VKS index}
\item{xLimit}{ X-axis boundaries}
\item{yLimit}{ Y-axis boundaries}
\item{VKStitle}{ title for the plot}
\item{VKSxlabel}{ text for the x label}
\item{VKSylabel}{ text for the y label}
}
\value{
none
}
\source{
VKSvariantGraphic - original idea by David Bimler
Atchison DA, Bowman KJ, Vingrys AJ
Quantitave scoring methods for D15 panel tests in the diagnosis of
congenital colour-vision deficiencies.
Optometry and Vision Science 1991, 68:41-48.
}
\references{
Atchison DA, Bowman KJ, Vingrys AJ
Quantitave scoring methods for D15 panel tests in the diagnosis of
congenital colour-vision deficiencies.
Optometry and Vision Science 1991, 68:41-48.
}
\author{Jose Gama}
\examples{
# Creating similar graphics to "A Quantitative Scoring Technique For Panel
#Tests of Color Vision" Algis J. Vingrys and P. Ewen King-Smith
\dontrun{
VKSdata<-VKStable2[,c(1,3:5)]
VKSdata[1,1]<-'Normal no error'
VKSdata[2:9,1]<-'Normal'
VKSdata[10:13,1]<-'Acquired CVD'
# the graphics are similar but not identical because the data used in the
#plots is the average of the values instead of all the values
VKSgraphic(VKSdata[,1:3],5,4,'D-15 angle vs C-index (Average)','Angle',
'C-index') # Fig. 6
VKSgraphic(VKSdata[,c(1,2,4)],5,4,'D-15 angle vs S-index (Average)','Angle',
'S-index') # Fig. 7
}
}
\keyword{programming}
|
8c6159686482d0963b9ce5f77a60a3207c45e4bd
|
9492380587c4336fa23ad876af43af283663e136
|
/6
|
17afdfb686ba0253d90da0721d443f3a48edad78
|
[] |
no_license
|
sunilpatil27/dsr_lab
|
c87fab7f3a6e0ddb17a41b08c36408ce7274df3e
|
b5bde2a8cd1144d343bb624475566189a0fb6f91
|
refs/heads/master
| 2020-09-09T00:31:30.346907
| 2019-11-12T18:57:33
| 2019-11-12T18:57:33
| 221,289,779
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 633
|
6
|
library(visualize)
library(BSDA)
rural<-c(3.1,2.9,2.8,3.0,2.7,3.1,2.6,2.8,2.9,3.0)
urban<-c(3.5,3.0,3.1,3.2,2.9,3.4,3.0,3.4,2.8,3.4)
xrbar=mean(rural)
xrbar
xurbar=mean(urban)
xurbar
var(rural)
sd(rural)
var(urban)
sd(urban)
#Obtaining t-calculated value
t.test(x=rural,y=urban,var.equal = TRUE,conf.level = 0.95)
#t.test(x=xrbar,y=xurbar,var.equal = TRUE)
#Obtain t value for two sided test at 0.05 significance levels
#From t distribution table or t-significant,t-critical
qt(p=0.05/2,df=18,lower.tail = FALSE)
visualize.t(stat=c(-2.9886,2.9886),df=18,section="tails")
visualize.t(stat=c(-2.100922,2.100922),df=18,section="tails")
|
|
ec750bf846e8c4afb8c011294671c06292cf3384
|
57a7632e12ec2b39de5131a6e614a828d00b7705
|
/Plot1.R
|
0693095bf71da9f2f97432d90321199ac7ed7922
|
[] |
no_license
|
bielgarcies/Project-1-Exploratory-Data-Analysis
|
f92ab2dc3d8e170785406e0a699b2ee14792315d
|
70dfc0f31444471d53de217b77ca8ff383cc2d68
|
refs/heads/main
| 2023-02-08T12:58:13.432484
| 2020-12-31T17:48:57
| 2020-12-31T17:48:57
| 325,844,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 495
|
r
|
Plot1.R
|
#Preparing data
household_power_consumption <- read.csv("~/Downloads/household_power_consumption.txt", sep=";")
data<- household_power_consumption
data$Date <- as.Date(data$Date,"%d/%m/%Y")
data <- subset(data, Date == "2007-02-01" | Date == "2007-02-02")
datetime <- strptime(paste(data$Date,as.character(data$Time)),"%Y-%m-%d %H:%M:%S")
#Code plot 1
hist(as.numeric(as.character(data$Global_active_power)), main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
|
c3c9b2096c33cbb74e04be1f0a7c4a0a418ec1a7
|
05f59cd61540ce4c9961d91377cff9008b78c009
|
/R/propensity_score_linear.R
|
32bd9112252f598ba496e1f2b6a04eab89208125
|
[
"MIT"
] |
permissive
|
rzgross/uRbanmatching
|
e3abddab1946e2f84c32378820585b96abe4840d
|
356b3d1ac10d72e3c5a20de08903f137c2c22965
|
refs/heads/master
| 2023-04-12T21:20:24.938362
| 2021-05-09T19:04:41
| 2021-05-09T19:04:41
| 355,984,522
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 937
|
r
|
propensity_score_linear.R
|
#' propensity_score_linear
#'
#' Function to predict treatment using \code{glm} (binomial) or \code{lm}.
#'
#' @param use_linear_lm Whether to use lm or glm.
#'
#' @return returns a function that accepts \code{train_test_list}
#' and this returns a vector of predictions for the test data
#' @export
propensity_score_linear <- function(use_linear_lm = FALSE) {
function(train_test_list) {
train_frame <- as.data.frame(train_test_list[["x_train"]])
train_frame[["y"]] <- train_test_list[["y_train"]]
test_frame <- as.data.frame(train_test_list[["x_test"]])
if (use_linear_lm) {
train_res <- lm(y ~ ., data = train_frame)
lin_pred <- predict(train_res, newdata = test_frame,
type = "response")
return(pmax(pmin(lin_pred, 1), 0))
}
train_res <- glm(y ~ ., data = train_frame, family = "binomial")
predict(train_res, newdata = test_frame, type = "response")
}
}
|
771a5f401af9dcdd3100aeb78ec8050b25752945
|
6a9c07b8ee61bca7c38155410c24b0abb2217590
|
/hashmap2.R
|
3397eca929e87f61fbe5a765fb6953fae261d982
|
[] |
no_license
|
kim-carter/ASC2018
|
5004bc2d77604b71fdc43014483882b7f806c977
|
a2b6f58300d9d9f87e5553d926b1c63cc8403a0e
|
refs/heads/master
| 2020-03-27T04:29:29.744620
| 2018-08-28T12:07:58
| 2018-08-28T12:07:58
| 145,944,736
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 878
|
r
|
hashmap2.R
|
##
## Kim Carter 2018. Testing R hashmap package for hashtable speed
##
library(hashmap)
filename <- "testdata.tsv"
con <- file(filename,open="r")
date()
counter <- 0
t1 <- Sys.time();
# there's no nice init for this, so we have to create it with the first row of the file
line <- readLines(con, n = 1)
cols <- strsplit(line,"\t")
hm<-hashmap(cols[[1]][1],cols[[1]][2])
counter<-counter
while(TRUE)
{
line <- readLines(con, n = 1)
if ( length(line) == 0 )
{
break
}
cols <- strsplit(line,"\t")
hm$insert(c(cols[[1]][1]),c(cols[[1]][2]))
counter<-counter+1
paste(counter)
if (counter %% 100000 == 0 )
{
t2 <- Sys.time()
#print(t2-t1)
d <- difftime(time1=t2,time2=t1,units="secs")
cat(as.numeric(d),"\n")
t1 <- t2
#paste(date(), "reached ",counter,"records")
}
}
close(con)
hm$keys()
|
17bc5650f082b08610cbbb779d562eaf9358d4b8
|
8007bfcdf202c5f0246658d0ba467bba0780b7a5
|
/man/sjp.likert.Rd
|
3aca9e4a03a23b94ad8e2e30ccb271833a42b675
|
[] |
no_license
|
harshagn/devel
|
a50432d9ecd6b655cee01f1cf63092b0da5bc2f3
|
cd67a4a135cf8f9bc9a618cc7c018eef3648caba
|
refs/heads/master
| 2021-01-02T08:40:37.695668
| 2014-09-10T16:14:16
| 2014-09-10T16:14:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,216
|
rd
|
sjp.likert.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{sjp.likert}
\alias{sjp.likert}
\title{Plot likert scales as centered stacked bars}
\usage{
sjp.likert(items, legendLabels = NULL, orderBy = NULL,
reverseOrder = FALSE, dropLevels = NULL, weightBy = NULL,
weightByTitleString = NULL, hideLegend = FALSE, title = NULL,
titleSize = 1.3, titleColor = "black", legendTitle = NULL,
includeN = TRUE, axisLabels.y = NULL, axisLabelSize = 1.1,
axisLabelAngle.x = 0, axisLabelColor = "gray30", valueLabelSize = 4,
valueLabelColor = "black", breakTitleAt = 50, breakLabelsAt = 30,
breakLegendTitleAt = 30, breakLegendLabelsAt = 28, gridRange = 1,
gridBreaksAt = 0.2, expand.grid = TRUE, barWidth = 0.5,
barColor = NULL, colorPalette = "GnBu", barAlpha = 1,
borderColor = NULL, axisColor = NULL, barOutline = FALSE,
barOutlineColor = "black", majorGridColor = NULL, minorGridColor = NULL,
hideGrid.x = FALSE, hideGrid.y = FALSE, axisTitle.x = NULL,
axisTitle.y = NULL, axisTitleColor = "black", axisTitleSize = 1.3,
theme = NULL, showTickMarks = FALSE, showValueLabels = TRUE,
jitterValueLabels = FALSE, showItemLabels = TRUE,
showSeparatorLine = FALSE, separatorLineColor = "grey80",
separatorLineSize = 0.3, legendPos = "right", legendSize = 1,
legendBorderColor = "white", legendBackColor = "white",
flipCoordinates = TRUE, printPlot = TRUE)
}
\arguments{
\item{items}{A data frame with each column representing one likert-item.}
\item{legendLabels}{A list or vector of strings that indicate the likert-scale-categories and which
appear as legend text.}
\item{orderBy}{Indicates whether the \code{items} should be ordered by total sum of positive or negative answers.
Use \code{"pos"} to order descending by sum of positive answers, \code{"neg"} for sorting descending
negative answers or \code{NULL} (default) for no sorting.}
\item{reverseOrder}{If \code{TRUE}, the item order (positive/negative) are reversed. Default is \code{FALSE}.}
\item{dropLevels}{Indicates specific factor levels that should be dropped from the items
before the likert scale is plotted. Default is \code{NULL}, hence all factor levels
are included. Exampe to drop first factor level: \code{dropLevels=c(1)}.}
\item{weightBy}{A weight factor that will be applied to weight all cases from \code{items}.}
\item{weightByTitleString}{If a weight factor is supplied via the parameter \code{weightBy}, the diagram's title
may indicate this with a remark. Default is \code{NULL}, so the diagram's title will not be modified when
cases are weighted. Use a string as parameter, e.g.: \code{weightByTitleString=" (weighted)"}}
\item{hideLegend}{Indicates whether legend (guide) should be shown or not.}
\item{title}{Title of the diagram, plotted above the whole diagram panel.}
\item{titleSize}{The size of the plot title. Default is 1.3.}
\item{titleColor}{The color of the plot title. Default is \code{"black"}.}
\item{legendTitle}{Title of the diagram's legend.}
\item{includeN}{If \code{TRUE} (default), the N of each item is included into axis labels.}
\item{axisLabels.y}{Labels for the y-axis (the labels of the \code{items}). These parameters must
be passed as list! Example: \code{axisLabels.y=list(c("Q1", "Q2", "Q3"))}
Axis labels will automatically be detected, when they have
a \code{"variable.lable"} attribute (see \code{\link{sji.setVariableLabels}}) for details).}
\item{axisLabelSize}{The size of category labels at the axes. Default is 1.1, recommended values range
between 0.5 and 3.0}
\item{axisLabelAngle.x}{Angle for axis-labels.}
\item{axisLabelColor}{User defined color for axis labels. If not specified, a default dark gray
color palette will be used for the labels.}
\item{valueLabelSize}{The size of value labels in the diagram. Default is 4, recommended values range
between 2 and 8}
\item{valueLabelColor}{The color of value labels in the diagram. Default is black.}
\item{breakTitleAt}{Wordwrap for diagram title. Determines how many chars of the title are displayed in
one line and when a line break is inserted into the title.}
\item{breakLabelsAt}{Wordwrap for diagram labels. Determines how many chars of the category labels are displayed in
one line and when a line break is inserted.}
\item{breakLegendTitleAt}{Wordwrap for diagram legend title. Determines how many chars of the legend's title
are displayed in one line and when a line break is inserted.}
\item{breakLegendLabelsAt}{Wordwrap for diagram legend labels. Determines how many chars of the legend labels are
displayed in one line and when a line break is inserted.}
\item{gridRange}{Sets the limit of the x-axis-range. Default is 1, so the x-scale ranges
from zero to 100 percent on both sides from the center. Valid values
range from 0 (0 percent) to 1 (100 percent).}
\item{gridBreaksAt}{Sets the breaks on the y axis, i.e. at every n'th position a major
grid is being printed. Valid values range from 0 to 1.}
\item{expand.grid}{If \code{TRUE} (default), the diagram has margins, i.e. the y-axis is not exceeded
to the diagram's boundaries.}
\item{barWidth}{Width of bars. Recommended values for this parameter are from 0.4 to 1.5}
\item{barColor}{User defined color for bars.
If not specified (\code{NULL}), a default red-green color palette for four(!) categories will be used
for the bar charts. You can use pre-defined color-sets that are independent from the amount of categories:
\itemize{
\item If barColor is \code{"brown"}, a brown-marine-palette will be used.
\item If barColor is \code{"violet"}, a violet-green palette will be used.
\item If barColor is \code{"pink"}, a pink-green palette will be used.
\item If barColor is \code{"brewer"}, use the \code{colorPalette} parameter to specify a palette of the color brewer.
}
Else specify your own color values as vector (e.g. \code{barColor=c("darkred", "red", "green", "darkgreen")})}
\item{colorPalette}{If \code{barColor} is \code{"brewer"}, specify a color palette from the color brewer here. All color brewer
palettes supported by ggplot are accepted here.}
\item{barAlpha}{Specify the transparancy (alpha value) of bars.}
\item{borderColor}{User defined color of whole diagram border (panel border).}
\item{axisColor}{User defined color of axis border (y- and x-axis, in case the axes should have different colors than
the diagram border).}
\item{barOutline}{If \code{TRUE}, each bar gets a colored outline. Default is \code{FALSE}.}
\item{barOutlineColor}{The color of the bar outline. Only applies, if \code{barOutline} is set to \code{TRUE}.}
\item{majorGridColor}{Specifies the color of the major grid lines of the diagram background.}
\item{minorGridColor}{Specifies the color of the minor grid lines of the diagram background.}
\item{hideGrid.x}{If \code{TRUE}, the x-axis-gridlines are hidden. Default if \code{FALSE}.}
\item{hideGrid.y}{If \code{TRUE}, the y-axis-gridlines are hidden. Default if \code{FALSE}.}
\item{axisTitle.x}{A label for the x axis. Useful when plotting histograms with metric scales where no category labels
are assigned to the x axis.}
\item{axisTitle.y}{A label for the y axis. Useful when plotting histograms with metric scales where no category labels
are assigned to the y axis.}
\item{axisTitleColor}{The color of the x and y axis labels. refers to \code{axisTitle.x} and \code{axisTitle.y},
not to the tick mark or category labels.}
\item{axisTitleSize}{The size of the x and y axis labels. refers to \code{axisTitle.x} and \code{axisTitle.y},
not to the tick mark or category labels.}
\item{theme}{Specifies the diagram's background theme. Default (parameter \code{NULL}) is a gray
background with white grids.
\itemize{
\item Use \code{"bw"} for a white background with gray grids
\item \code{"classic"} for a classic theme (black border, no grids)
\item \code{"minimal"} for a minimalistic theme (no border,gray grids)
\item \code{"none"} for no borders, grids and ticks or
\item \code{"themr"} if you are using the \code{ggthemr} package
}
See \url{http://rpubs.com/sjPlot/custplot} for details and examples.}
\item{showTickMarks}{Whether tick marks of axes should be shown or not}
\item{showValueLabels}{Whether counts and percentage values should be plotted to each bar}
\item{jitterValueLabels}{If \code{TRUE}, the value labels on the bars will be "jittered", i.e. they have
alternating vertical positions to avoid overlapping of labels in case bars are
very short. Default is \code{FALSE}.}
\item{showItemLabels}{Whether x axis text (category names) should be shown or not}
\item{showSeparatorLine}{If \code{TRUE}, a line is drawn to visually "separate" each bar in the diagram.}
\item{separatorLineColor}{The color of the separator line. Only applies, if \code{showSeparatorLine} is \code{TRUE}}
\item{separatorLineSize}{The size of the separator line. only applies, if \code{showSeparatorLine} is \code{TRUE}}
\item{legendPos}{The position of the legend. Default is \code{"right"}. Use one of the following values:
\code{"right"}, \code{"left"}, \code{"bottom"}, \code{"top"}.}
\item{legendSize}{The size of the legend.}
\item{legendBorderColor}{The border color of the legend.}
\item{legendBackColor}{The background color of the legend.}
\item{flipCoordinates}{If \code{TRUE}, the x and y axis are swapped.}
\item{printPlot}{If \code{TRUE} (default), plots the results as graph. Use \code{FALSE} if you don't
want to plot any graphs. In either case, the ggplot-object will be returned as value.}
}
\value{
(Insisibily) returns the ggplot-object with the complete plot (\code{plot}) as well as the data frame that
was used for setting up the ggplot-object (\code{df}).
}
\description{
Plot likert scales as centered stacked bars. "Neutral" categories
(odd-numbered categories) will be removed from the plot.
}
\note{
Since package version 1.3, the parameter \code{legendLabels}, which represent the
value labels, are retrieved automatically if a) the variables in \code{items} come from a data frame
that was imported with the \code{\link{sji.SPSS}} function (because then value labels are
attached as attributes to the data) or b) when the variables are factors with named factor levels
(e.g., see column \code{group} in dataset \code{\link{PlantGrowth}}). However, you still
can use own parameters as axis- and legendlabels. \cr \cr
Transformation of data and ggplot-code taken from
\url{http://statisfactions.com/2012/improved-net-stacked-distribution-graphs-via-ggplot2-trickery/}
}
\examples{
# prepare data for dichotomous likert scale, 5 items
likert_2 <- data.frame(as.factor(sample(1:2, 500, replace=TRUE, prob=c(0.3,0.7))),
as.factor(sample(1:2, 500, replace=TRUE, prob=c(0.6,0.4))),
as.factor(sample(1:2, 500, replace=TRUE, prob=c(0.25,0.75))),
as.factor(sample(1:2, 500, replace=TRUE, prob=c(0.9,0.1))),
as.factor(sample(1:2, 500, replace=TRUE, prob=c(0.35,0.65))))
# create labels
levels_2 <- list(c("Disagree", "Agree"))
# prepare data for 4-category likert scale, 5 items
likert_4 <- data.frame(as.factor(sample(1:4, 500, replace=TRUE, prob=c(0.2,0.3,0.1,0.4))),
as.factor(sample(1:4, 500, replace=TRUE, prob=c(0.5,0.25,0.15,0.1))),
as.factor(sample(1:4, 500, replace=TRUE, prob=c(0.25,0.1,0.4,0.25))),
as.factor(sample(1:4, 500, replace=TRUE, prob=c(0.1,0.4,0.4,0.1))),
as.factor(sample(1:4, 500, replace=TRUE, prob=c(0.35,0.25,0.15,0.25))))
# create labels
levels_4 <- list(c("Strongly disagree", "Disagree", "Agree", "Strongly Agree"))
# prepare data for 6-category likert scale, 5 items
likert_6 <- data.frame(
as.factor(sample(1:6, 500, replace=TRUE, prob=c(0.2,0.1,0.1,0.3,0.2,0.1))),
as.factor(sample(1:6, 500, replace=TRUE, prob=c(0.15,0.15,0.3,0.1,0.1,0.2))),
as.factor(sample(1:6, 500, replace=TRUE, prob=c(0.2,0.25,0.05,0.2,0.2,0.2))),
as.factor(sample(1:6, 500, replace=TRUE, prob=c(0.2,0.1,0.1,0.4,0.1,0.1))),
as.factor(sample(1:6, 500, replace=TRUE, prob=c(0.1,0.4,0.1,0.3,0.05,0.15))))
# create labels
levels_6 <- list(c("Very strongly disagree", "Strongly disagree", "Disagree",
"Agree", "Strongly Agree", "Very strongly agree"))
# create item labels
items <- list(c("Q1", "Q2", "Q3", "Q4", "Q5"))
# plot dichotomous likert scale, ordered by "negative" values
sjp.likert(likert_2, legendLabels=levels_2, axisLabels.y=items, orderBy="neg")
# plot 4-category-likert-scale, no order
sjp.likert(likert_4, legendLabels=levels_4, axisLabels.y=items)
# plot 4-category-likert-scale, ordered by positive values,
# in brown color scale and with jittered value labels
sjp.likert(likert_6, legendLabels=levels_6, barColor="brown",
axisLabels.y=items, orderBy="pos", jitterValueLabels=TRUE)
}
\references{
\url{http://strengejacke.wordpress.com/sjplot-r-package/} \cr \cr
\url{http://strengejacke.wordpress.com/2013/07/17/plotting-likert-scales-net-stacked-distributions-with-ggplot-rstats/}
}
\seealso{
\code{\link{sjp.stackfrq}}
}
|
1efdd2c4bd5260ef1279f68533390a1d7b249e8b
|
802b6455d0aab96d7f7a5d1e48e67d97962a2859
|
/R_Lab_2/SGM_TMATH_390_R_lab2.R
|
c7e4dce87f65ed121bde5072ff61bdb3833486fe
|
[] |
no_license
|
stevexenios/Statistics
|
ebe66d6287b4b47ec77c847dfa26fa30cb7d67c7
|
ad9f57ff66e256fe93ea52996ddb12906d371762
|
refs/heads/master
| 2020-09-07T18:03:26.372068
| 2020-02-19T07:59:37
| 2020-02-19T07:59:37
| 220,871,301
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,759
|
r
|
SGM_TMATH_390_R_lab2.R
|
###################################################################################################
# Steve G. Mwangi
# TMATH 390, Fall 2019
# Lab 2 R script
# October 13, 2019
###################################################################################################
#OBJECTIVES:
#1. Estimation in R
###################################################################################################
#C1. Submit Your R Scipts to Canvas.
#Upload them directly to your assignment as *.R documents.
#--------------------------------------------------------------------------------------------------
#C2.
#Determine working directory with
getwd()
#Change working directory to: C:\Users\steve\Desktop\UWT\Fall Classes\TMATH 390\R Documents\R Assignments\R_Lab_2
setwd("C:/Users/steve/Desktop/UWT/Fall Classes/TMATH 390/R Documents/R Assignments/R_Lab_2")
#Read csv file of my data
data.df = read.csv("data.csv")
head(data.df)
#--------------------------------------------------------------------------------------------------
###################################################################################################
#Quantitative variable
#C3.
#Quantitative variable chosen: Net Worth
#Producing a summary of Quantitative Net worth column
summary(data.df$Net.worth)
#Producing a summary of Qualitative Gender column
summary(data.df$Gender)
#--------------------------------------------------------------------------------------------------
#C4.
#Quantitative variable chosen: Net Worth
#Producing a summary of Quantitative Net worth column
summary(data.df$Net.worth)
#--------------------------------------------------------------------------------------------------
#C5.
#Histogram of Net Worth Column
#Command creates a histogram of chosen column, in the dataframe data.df
#the xlab argument writes test to label the x-axis
# main argument gives it title
hist(data.df$Net.worth, xlab="Networth of 50 richest people in the world(in billions of dollars)", main="Networth distribution of top 50 Richest people")
#--------------------------------------------------------------------------------------------------
#C6. Described
#--------------------------------------------------------------------------------------------------
#C7. Boxplots
# Establishing a graphing window with 1 rows and 2 columns,
# and las = 1 sets axis labels to be horizontal
par(mfrow=c(1,2), las = 1)
#Creating a boxplot.
# ~ represents a relationship between two variable, with Y on left side, X on right side.
# Networth(quantitative) across Gender(Qualitative)
boxplot(data.df$Net.worth~data.df$Gender)
#--------------------------------------------------------------------------------------------------
#C9.
#Individual summary statistics
#Command to get individual summary statistics
#FEMALE
#For mean
mean(data.df$Net.worth[data.df$Gender=="Female"])
#For median
median(data.df$Net.worth[data.df$Gender=="Female"])
#Standard Dev.
sd(data.df$Net.worth[data.df$Gender=="Female"])
#MALE
#For mean
mean(data.df$Net.worth[data.df$Gender=="Male"])
#For median
median(data.df$Net.worth[data.df$Gender=="Male"])
#Standard Dev.
sd(data.df$Net.worth[data.df$Gender=="Male"])
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
###################################################################################################
###################################################################################################
###################################################################################################
#---------------------------------------------END--------------------------------------------------
|
f0bf2f01a4a2a7fe59cb7e50bb455b58f6244b5a
|
e3e3843080406b83178105f59b2f366643f545a6
|
/man/createWebpages.Rd
|
313a56f98c1feb13950d309bb7446bfacab6d76b
|
[] |
no_license
|
nickmckay/lipdverseR
|
c1dcdb9e6752a41a00d7f9386cf684f24f3f1f46
|
56b01e8ce33dc9f71e39bdf1c5c978f3b33d1db7
|
refs/heads/master
| 2023-06-07T19:16:43.787187
| 2023-05-31T18:20:31
| 2023-05-31T18:20:31
| 182,172,405
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 307
|
rd
|
createWebpages.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nightlyUpdateDrake.R
\name{createWebpages}
\alias{createWebpages}
\title{Create lipdversePages old framework}
\usage{
createWebpages(params, data)
}
\arguments{
\item{data}{}
}
\description{
Create lipdversePages old framework
}
|
8f4709aed02ab7a26c5f8d9d9379dcd3f713280b
|
fcc552d20daeafd15451979ba1718cb7bda82634
|
/testUnits/util/runitDistance_GPS.r
|
988756088d68261eeab8b1c001b8a27f59972c9d
|
[] |
no_license
|
cristianchilipirea/crowdMovement
|
b248370e5733cae964763550a588e13a470b7962
|
b649c992fd07b40d84544fe47fac80c66c112767
|
refs/heads/master
| 2021-01-20T11:38:12.653112
| 2017-09-05T14:14:43
| 2017-09-05T14:14:43
| 101,675,043
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 309
|
r
|
runitDistance_GPS.r
|
source("Scripts/crowdMovement/util/distance_GPS.r")
test.getDistance = function() {
detectionA = data.frame(latitude = c(0, 42.05), longitude = c(0, 5.02))
detectionsB = data.frame(latitude = c(0, 42.06), longitude = c(0, 5.05))
checkEquals(getDistance(detectionA, detectionsB), c(0, 2720), tolerance=1)
}
|
8105a1bdab81ec6930bb37d6cda3980f92ecc81c
|
1908bd86fad96d5e039ab69a2a2c722b4ab0bfbd
|
/R/pvaluate.R
|
d43641f295e896c3eee32ab5165dfac31d5a22f0
|
[] |
no_license
|
eliocamp/wave4
|
675d87878d2d0e4ea4490a0842c55e4030e50c6f
|
c6cb04c008ff32a17df990454ef9e84d5e72abea
|
refs/heads/main
| 2023-02-17T20:48:40.791224
| 2021-01-20T12:25:02
| 2021-01-20T12:25:02
| 330,792,390
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 544
|
r
|
pvaluate.R
|
#' Compute p.values
#'
#' Computes p.values from the result of [metR::FitLm()] (or any assumed student-t-distributed
#' statistic) with optional adjustments.
#'
#' @param estimate estimate of the regression (or other fit)
#' @param std.error standard error
#' @param df degrees of freedom
#' @param adjustment method for adjustment, see [stats::p.adjust()].
#'
#' @export
pvaluate <- function(estimate, std.error, df, adjustment = "none") {
stats::p.adjust(2*stats::pt(abs(estimate)/std.error, df, lower.tail = FALSE), method = adjustment)
}
|
ef360b1260508871dad1c57f1fb9300049c19fcb
|
fc438fdcd495a4248d564a88f334a23779c2ad14
|
/R/ui.R
|
e185ab0139d3bde6621f607fe9665a292646ee7d
|
[] |
no_license
|
afukushima/PVT
|
8e7d5c4fee8f8fef6a2a7cc257b601f1115a6cba
|
cc8b512d4cd2e5a3d6af5f991f7e371f90b5151c
|
refs/heads/master
| 2016-09-06T01:09:49.966980
| 2015-09-17T06:02:50
| 2015-09-17T06:02:50
| 41,794,671
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,303
|
r
|
ui.R
|
library(shiny)
source('ui-components/atmet-tabs.R', local=TRUE)
source('ui-components/meko-tabs.R', local=TRUE)
source('ui-components/gcms-tabs.R', local=TRUE)
source('ui-components/generic-tabs.R', local=TRUE)
shinyUI(
pageWithSidebar(
headerPanel('PRIMe Visualization Tools', 'PRIMe Visualizer'),
sidebarPanel(
tagList(
singleton(
tags$head(
includeCSS("www/css/smoothness/jquery-ui-1.10.3.custom.min.css"),
includeCSS("www/css/custom.css"),
includeScript("www/js/jquery-ui-1.10.3.custom.min.js"),
includeScript("www/js/scripts.js")
)
)
),
tags$p(id='about',
'RIKEN PRIMe provides web-based data analysis and visualization
tools for public access. Users may analyze datasets from both
AtMetExpress and MeKO, as well as upload custom datasets.'),
tags$hr(),
h5('Analysis parameters'),
uiOutput('apParamsSummary'),
h5('Downloads'),
conditionalPanel(
condition = 'input.apConfig !== "MeKO" & input.apConfig !== "GC-MS (RIKEN format)" & input.apConfig !== "Generic"',
p('None')
),
conditionalPanel(
condition = 'input.apConfig === "MeKO"',
uiOutput('mekoDownloads')
),
conditionalPanel(
condition = 'input.apConfig === "GC-MS (RIKEN format)"',
uiOutput('gcmsDownloads')
),
conditionalPanel(
condition = 'input.apConfig === "Generic"',
uiOutput('genericDownloads')
),
tags$hr(),
actionButton('openApEditor', 'Edit parameters')
),
mainPanel(
uiOutput('apHiddenVars'),
uiOutput('apDialog'),
textOutput("text1"),
conditionalPanel(
condition = 'input.apConfig !== "AtMetExpress" & input.apConfig !== "MeKO" & input.apConfig !== "GC-MS (RIKEN format)" & input.apConfig !== "Generic"',
class='ui-state-highlight',
tags$p('To begin, edit the analysis parameters by clicking the button on the left')
),
conditionalPanel(
condition = 'input.apConfig === "AtMetExpress"',
atMetAnalysisTabs
),
conditionalPanel(
condition = 'input.apConfig === "MeKO"',
mekoAnalysisTabs
),
conditionalPanel(
condition = 'input.apConfig === "GC-MS (RIKEN format)"',
gcmsAnalysisTabs
),
conditionalPanel(
condition = 'input.apConfig === "Generic"',
genericAnalysisTabs
)
)
)
)
|
41ac349124d37a17fc9e3309cb1ae63f3c7cab9e
|
dc5ddbb8d0a92321a988b4c9f161e09be21e88c0
|
/5 - Analysing Backtesting Results.R
|
cd07af497f2ed17b553abd2068a3bcbac98fcbad
|
[] |
no_license
|
ssh352/Trading
|
857b2002367b197ac39fe5e0905372921976acd7
|
b1490100148f2410741438cf5d0b22bf2d77ba6d
|
refs/heads/master
| 2020-04-25T10:46:10.497855
| 2018-10-11T11:44:18
| 2018-10-11T11:44:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,538
|
r
|
5 - Analysing Backtesting Results.R
|
# analyze the trading strategy results
################
# Chart trades #
################
chart.Posn(portfolio.st,"NSEI")
###############
# Trade Stats #
###############
# tradeStats
# function is used to return the trade-level statistics within a portfolio.
# This function returns important statistics like number of trades, number of transactions,
# net profit, winloss ratio, sharpe ratio etc.
# We will learn about these trade statistics in detail in this chapter.
# But first lets just print all the tradestats using tradeStats function.
trade_stats <- tradeStats(portfolio.st)
trade_stats1 <- as.data.frame(t(tradeStats(portfolio.st)))
knitr::kable(trade_stats1)
# Now let us see what each of these trade stats means.
#####################
# Basic trade stats #
#####################
#in the output trade stats:
# Portfolio is the name of the portfolio,
# Symbol is the symbol of the stock,
# Num.Txns is the number of transactions made using the strategy and
# Num.Trades is the number of trades that are done using the strategy.
knitr::kable(trade_stats1[c("Portfolio","Symbol","Num.Txns","Num.Trades"),])
#############################
# Profit n Loss trade stats #
#############################
# Net.Trading.PL
# is the total profit/loss made due to all trades, a positive value indicates profit and
# negative value indicates loss.
# Avg.Trade.PL
# is the average profit/loss made due to all trades,
# Med.Trade.PL is the median of profit/loss made due to all trades.
# In case of extreme limits in profit/loss average value will be biased in such cases median gives a
# good understanding of central tendency of profit/loss.
# Std.Dev.Trade.PL
# is the standard deviation between net profit/loss of trades and gives an idea about spread of net
# profit/loss.
# Std.Err.Trade.PL is standard error of the net profit/loss which is a measure of chopiness of the equity
# line and lower standard error is desirable.
# Similarly Avg.Daily.PL, Med.Daily.PL, Std.Dev.Daily.PL and Std.Err.Daily.PL gives average profit/loss,
# median profit/loss, standard deviation between profit/loss of trades, standard error of the profit/loss
# respectively considering the days on which transactions are made.
knitr::kable(trade_stats1[c("Net.Trading.PL","Avg.Trade.PL","Med.Trade.PL","Std.Dev.Trade.PL","Std.Err.Trade.PL","Avg.Daily.PL","Med.Daily.PL","Std.Dev.Daily.PL","Std.Err.Daily.PL"),])
###################
# Win Loss trades #
###################
|
5788aa915d08772b6e903aecf395476a47274d60
|
7172c9fc447a5cc688118b4c7daa5ac4ca0fef70
|
/man/OCRdataFields.Rd
|
eb175a04fc702da07f643b254e243913669e5618
|
[] |
no_license
|
jniedballa/camtrapR
|
0b2d6dd7f3b204494f07a8230a5990916c95b5d7
|
0bcbff865569fde24a19199e3a4deaa5de3aabf9
|
refs/heads/master
| 2023-08-04T08:58:23.455756
| 2023-08-03T03:07:16
| 2023-08-03T03:07:16
| 147,821,210
| 38
| 3
| null | 2021-03-20T11:34:11
| 2018-09-07T12:36:39
|
R
|
UTF-8
|
R
| false
| true
| 4,743
|
rd
|
OCRdataFields.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OCRdataFields.R
\name{OCRdataFields}
\alias{OCRdataFields}
\title{Optical character recognition (OCR) from data fields in digital images}
\usage{
OCRdataFields(inDir, geometries, invert = FALSE)
}
\arguments{
\item{inDir}{character. Directory containing camera trap images (or
subdirectories containing images)}
\item{geometries}{list. A (possibly named) list of geometry strings defining
the image area(s) to extract.}
\item{invert}{logical. Invert colors in the image? Set to TRUE if text in
data field is white on black background. Leave if FALSE if text is black in
white background.}
}
\value{
A \code{data.frame} with original directory and file names, and
additional columns for the OCR data of each extracted geometry.
}
\description{
Extracts information from the data fields in camera trap images (not the
metadata). Many camera traps include data fields in camera trap images,
often including date and time of images, and sometimes other information.
This function extracts the information from these fields using optical
character recognition provided by the package \pkg{tesseract} after reading
images using the package \pkg{magick}.
}
\details{
Normally all these information should be in the image metadata. This
function is meant as a last resort if image metadata are unreadable or were
removed from images. OCR is not perfect and may misidentify characters, so
check the output carefully.
The output of this function can be used in
\code{\link{writeDateTimeOriginal}} to write date/time into the
DateTimeOriginal tag in image metadata, making these images available for
automatic processing with \code{\link{recordTable}} and other functions that
extract image metadata.
This function reads all images in inDir (including subdirectories), crops
them to the geometries in the "geometries" list, and performs optical
character recognition (OCR) on each of these fields (leveraging the magick
and tesseract packages).
Geometries are defined with \code{geometry_area} from \pkg{magick}. See
\code{\link[magick]{geometry}} for details on how to specify geometries with
\code{geometry_area}. The format is: "widthxheight+x_off+y_off", where:
\describe{ \item{width}{width of the area of interest} \item{height}{height
of the area of interest} \item{x_off}{offset from the left side of the
image} \item{y_off}{offset from the top of the image} }
Units are pixels for all fields. digiKam can help in identifying the correct
specification for geometries. Open the Image Editor, left-click and draw a
box around the data field of interest. Ensure the entire text field is
included inside the box, but nothing else. Now note two pairs of numbers at
the bottom of the window, showing the offsets and box size as e.g.:
"(400, 1800) (300 x 60)"
This corresponds to the geometry values as follows:
"(x_off, y_off) (width x height)"
Using these values, you'd run:
\code{geometry_area(x_off = 400, y_off = 1800, width = 300, height = 60)}
and receive
"300x60+400+1800"
as your geometry.
OCR in tesseract has problems with white font on black background. If that
is the case in your images, set \code{invert} to \code{TRUE} to invert the
image and ensure OCR uses black text on white background.
Even then, output will not be perfect. Error rates in OCR depend on multiple
factors, including the text size and font type used. We don't have control
over these, so check the output carefully and edit as required.
}
\examples{
\dontrun{
# dontrun is to avoid forcing users to install additional dependencies
wd_images_OCR <- system.file("pictures/full_size_for_ocr", package = "camtrapR")
library(magick)
# define geometries
geometry1 <- geometry_area(x_off = 0, y_off = 0, width = 183, height = 37)
geometry2 <- geometry_area(x_off = 196, y_off = 0, width = 200, height = 17)
geometry3 <- geometry_area(x_off = 447, y_off = 0, width = 63, height = 17)
geometry4 <- geometry_area(x_off = 984, y_off = 0, width = 47, height = 17)
geometry5 <- geometry_area(x_off = 0, y_off = 793, width = 320, height = 17)
# combine geometries into list
geometries <- list(date = geometry1,
time = geometry2,
sequence_id = geometry3,
temperature = geometry4,
camera_model = geometry5)
df_image_data <- OCRdataFields(inDir = wd_images_OCR,
geometries = geometries,
invert = TRUE)
df_image_data
# note the mistake in "camera_model"
# it should be "PC850", not "PC8S0O"
# date and time are correct though
}
}
\seealso{
\code{\link{writeDateTimeOriginal}}
}
\author{
Juergen Niedballa
}
|
b73655e1a4b8909a2c67adfde9a989768dc13f63
|
97cfe591bfc17d23e1d08d3db8c652b1c9b66647
|
/R/compile.R
|
7330171f60a09d857a8057bef46e3030bb8c4488
|
[
"Apache-2.0"
] |
permissive
|
sibusiso16/sparklyr
|
dfc87fa828e7f390ad04016ab9b9bb6c81024e98
|
33fc9c96534d6dd90646430c22a5bfb3fc159316
|
refs/heads/master
| 2020-07-12T05:41:00.856428
| 2016-07-22T06:41:42
| 2016-07-22T06:41:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,198
|
r
|
compile.R
|
#' @import rprojroot
#' @import digest
spark_compile <- function(spark_version = "1.6.1", hadoop_version = "2.6") {
version_numeric <- gsub("[-_a-zA-Z]", "", spark_version)
version_sufix <- gsub("\\.|[-_a-zA-Z]", "", spark_version)
jar_name <- paste0("sparklyr-", version_numeric, ".jar")
if (!requireNamespace("rprojroot", quietly = TRUE))
install.packages("rprojroot")
library(rprojroot)
root <- rprojroot::find_package_root_file()
Sys.setenv(R_SPARKLYR_INSTALL_INFO_PATH = file.path(root, "inst/extdata/install_spark.csv"))
if (!requireNamespace("digest", quietly = TRUE))
install.packages("digest")
library(digest)
sparklyr_path <- file.path(root, "inst", "java", jar_name)
sparklyr_scala <- lapply(
Filter(
function(e) {
# if filename has version only include version being built
if (grepl(".*_\\d+\\.scala", e)) {
grepl(version_sufix, e)
}
else {
grepl(".*\\.scala$", e)
}
},
list.files(file.path(root, "inst", "scala"))
),
function(e) file.path(root, "inst", "scala", e)
)
sparklyr_scala_digest <- file.path(root, paste0(
"inst/scala/sparklyr-", version_numeric, ".md5"
))
sparklyr_scala_contents <- paste(lapply(sparklyr_scala, function(e) readLines(e)))
sparklyr_scala_contents_path <- tempfile()
sparklyr_scala_contents_file <- file(sparklyr_scala_contents_path, "w")
writeLines(sparklyr_scala_contents, sparklyr_scala_contents_file)
close(sparklyr_scala_contents_file)
# Bail if 'sparklyr.*' hasn't changed
md5 <- tools::md5sum(sparklyr_scala_contents_path)
if (file.exists(sparklyr_scala_digest) && file.exists(sparklyr_path)) {
contents <- readChar(sparklyr_scala_digest, file.info(sparklyr_scala_digest)$size, TRUE)
if (identical(contents, md5[[sparklyr_scala_contents_path]])) {
return()
}
}
message("** building '", jar_name, "' ...")
cat(md5, file = sparklyr_scala_digest)
execute <- function(...) {
cmd <- paste(...)
message("*** ", cmd)
system(cmd)
}
if (!nzchar(Sys.which("scalac")))
stop("failed to discover 'scalac' on the PATH")
if (!nzchar(Sys.which("jar")))
stop("failed to discover 'jar' on the PATH")
# Work in temporary directory (as temporary class files
# will be generated within there)
dir <- file.path(tempdir(), paste0("sparklyr-", version_sufix, "-scala-compile"))
if (!file.exists(dir))
if (!dir.create(dir))
stop("Failed to create '", dir, "'")
owd <- setwd(dir)
# Get potential installation paths
install_info <- tryCatch(
spark_install_find(spark_version, hadoop_version),
error = function(e) {
spark_install(spark_version, hadoop_version)
spark_install_find(spark_version, hadoop_version)
}
)
# list jars in the installation folder
candidates <- c("jars", "lib")
jars <- NULL
for (candidate in candidates) {
jars <- list.files(
file.path(install_info$sparkVersionDir, candidate),
full.names = TRUE,
pattern = "jar$"
)
if (length(jars))
break
}
if (!length(jars))
stop("failed to discover Spark jars")
# construct classpath
CLASSPATH <- paste(jars, collapse = .Platform$path.sep)
# ensure 'inst/java' exists
inst_java_path <- file.path(root, "inst/java")
if (!file.exists(inst_java_path))
if (!dir.create(inst_java_path, recursive = TRUE))
stop("failed to create directory '", inst_java_path, "'")
# call 'scalac' compiler
classpath <- Sys.getenv("CLASSPATH")
# set CLASSPATH environment variable rather than passing
# in on command line (mostly aesthetic)
Sys.setenv(CLASSPATH = CLASSPATH)
execute("scalac", paste(shQuote(sparklyr_scala), collapse = " "))
Sys.setenv(CLASSPATH = classpath)
# call 'jar' to create our jar
class_files <- file.path("sparklyr", list.files("sparklyr", pattern = "class$"))
execute("jar cf", sparklyr_path, paste(shQuote(class_files), collapse = " "))
# double-check existence of jar
if (file.exists(sparklyr_path)) {
message("*** ", basename(sparklyr_path), " successfully created.")
} else {
stop("*** failed to create ", jar_name)
}
setwd(owd)
}
|
5dadddabc9b5521bb16836f090f335bd73ddf3b4
|
f4c80419f369de8034301b321a30a71f56909aee
|
/hypergeometric.R
|
b73d2f3bb6530b68e734a3980277b44e3d155a2f
|
[
"MIT"
] |
permissive
|
embunnik/hypergeometric
|
a88c8cbd8c6f0d8c82a5104a30ff2dec75c05361
|
d9e4bc013fbee8f3d5882d19323512494e104222
|
refs/heads/master
| 2021-01-20T18:53:06.713164
| 2016-06-16T00:18:40
| 2016-06-16T00:18:40
| 61,249,233
| 0
| 0
| null | 2016-06-16T00:17:42
| 2016-06-16T00:10:40
| null |
UTF-8
|
R
| false
| false
| 1,632
|
r
|
hypergeometric.R
|
##Probability of getting an overlap of at least a particular size given two lists and the hypergeometric distribution
#### Data input is of the format:
## column_1 column_2 column_3 column_4
## overlap_size size_of_list_A Population_size size_of_list_B
## overlap_size size_of_list_A Population_size size_of_list_B
## overlap_size size_of_list_A Population_size size_of_list_B
## overlap_size size_of_list_A Population_size size_of_list_B
## overlap_size size_of_list_A Population_size size_of_list_B
## .... .... .... ....
## where:
## overlap_size is the intersection of list A and list B
## size_of_list_A is the number of proteins in list A (e.g. the number of proteins in the reference ##proteome annotated as having a particular feature)
## Population_size is the entire population size to select from (e.g. the reference proteome size or ##the number of proteins in the 'conservome')
## size_of_list_B is the number of proteins in list B (e.g. the number of proteins in the mRBPome ##annotated as having a particular feature).
##
##
## read in csv file containing data input, in above format
## obtain a raw p-value for obtaining an overlap using hypergeometic distribution
## adjust raw p-value using Benjami-Hochberg false discovery rate correction
## write csv file with the results, including the counts
data_input <- read.csv("data_input_file.csv")
raw_p <- 1-phyper(data_input[,1]-1,data_input[,2],data_input[,3]-data_input[,2],data_input[,4])
BH_adjusted_p_values <- p.adjust(raw_p,method="BH")
write.csv(cbind(data_input,BH_adjusted_p_values), file="my_output_results.csv",quote=FALSE)
|
e24afc07f850cfcf3994859618e2521e2fbe4faa
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Mathematical_Statistics_And_Data_Analysis_by_John_A_Rice/CH1/EX1.4.2.B/Ex1_4_2_B.R
|
28a9ebb41a3ad6d15aa0605fe04431484df4d671
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 77
|
r
|
Ex1_4_2_B.R
|
#Page 10
total_lineup = choose(10,5) * factorial(5)
print(total_lineup)
|
f9ef6d77a746ee5c65312f4a65603f9bf7d28d87
|
0e7bd9d49f15674ba5415c10e721923520e11bd2
|
/R/surface.kernel.R
|
94f9def17d1f18da779b9005ef2b4977f402c566
|
[] |
no_license
|
syunhong/seg
|
8bbf3ac80aa31dab57652e7e8350d2449f55e622
|
6ef0afe116c16b0e55e5f47a95a9e4c162e60117
|
refs/heads/master
| 2022-08-20T22:33:19.085866
| 2022-08-12T21:59:15
| 2022-08-12T21:59:15
| 92,493,454
| 12
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,596
|
r
|
surface.kernel.R
|
# ------------------------------------------------------------------------------
# Internal function 'surface.kernel'
#
# Author: Seong-Yun Hong <hong.seongyun@gmail.com>
# ------------------------------------------------------------------------------
surface.kernel <- function(coords, data, sigma, nrow, ncol, window, verbose) {
if (verbose){
begTime <- Sys.time(); fn <- match.call()[[1]]
message(fn, ": kernel smoothing of the population data ...")
}
x <- coords[,1]; y <- coords[,2]
for (i in 1:ncol(data)) {
if (verbose)
message(fn, ": processing column ", i)
wgtXY <- cbind(rep(x, data[,i]), rep(y, data[,i]))
tmp1 <- splancs::kernel2d(wgtXY, window, h0 = sigma, nx = ncol, ny = nrow,
quiet = TRUE)
# Transform the result to the format needed for spseg()
tmp2 <- cbind(expand.grid(tmp1$x, tmp1$y), as.numeric(tmp1$z))
colnames(tmp2) <- c("x", "y", "z")
if (i == 1) {
pixels <- as.matrix(tmp2[,1:2])
values <- tmp2[,3]
} else if (i > 1) {
values <- cbind(values, tmp2[,3])
}
}
# Remove points that are outside of the polygons
outside <- which(is.na(values[,1]))
if (length(outside) > 0) {
pixels <- pixels[-outside,]
values <- values[-outside,]
}
if (verbose){
tt <- as.numeric(difftime(Sys.time(), begTime, units = "sec"))
message(fn, ": done! [", tt, " seconds]")
}
colnames(pixels) <- c("x", "y")
colnames(values) <- colnames(data)
list(coords = pixels, data = values)
}
|
c7c2f01dca51d740e41f63d8bc6c9fde3a699b9f
|
2491ce3e1bd5762df5d129b7f4826c66723780df
|
/R/fun.gld.all.vary.emp.R
|
e403ca0a0f0a1d57bb34fe41a780c122714dfff5
|
[] |
no_license
|
cran/GLDreg
|
60e3cf2d6890604d98aad9cd09080bdc7758cb25
|
4d6ad22ceada267cf3a46ef8b4cab5b9006ae022
|
refs/heads/master
| 2022-06-04T22:22:08.379317
| 2022-05-13T06:30:09
| 2022-05-13T06:30:09
| 26,482,634
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 602
|
r
|
fun.gld.all.vary.emp.R
|
fun.gld.all.vary.emp <-
function (q, fit, fit.simu, maxit = 20000, method = "Nelder-Mead")
{
x <- fit$x
y <- fit$y
k <- apply(fit.simu, 2, function(x, q) quantile(x, q), q)
r <- optim(k, function(k, x, y, q) {
resid <- y - x %*% c(k)
return((sum(resid <= 0)/length(resid) - q)^2)
}, x = x, y = y, q = q, control = list(maxit = maxit), method = method)
r.val <- setNames(c(r$par, r$value, r$convergence),c(names(fit$"Estimated"[-c((length(fit$"Estimated")-3):length(fit$"Estimated"))]),"Objective Value","Convergence"))
return(list(r, r.val))
}
|
69b5a56d22fa003a932f6bc3403f2394195f6879
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rdfp/examples/dfp_full_report_wrapper.Rd.R
|
9944aa6e3633c7f3d162a85cf941f942623818e0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,015
|
r
|
dfp_full_report_wrapper.Rd.R
|
library(rdfp)
### Name: dfp_full_report_wrapper
### Title: Take report request and return data.frame
### Aliases: dfp_full_report_wrapper
### ** Examples
## Not run:
##D request_data <- list(reportJob =
##D list(reportQuery =
##D list(dimensions = 'MONTH_AND_YEAR',
##D dimensions = 'AD_UNIT_ID',
##D dimensions = 'AD_UNIT_NAME',
##D dimensions = 'ADVERTISER_NAME',
##D dimensions = 'ORDER_NAME',
##D dimensions = 'LINE_ITEM_NAME',
##D adUnitView = 'FLAT',
##D columns = 'AD_SERVER_IMPRESSIONS',
##D columns = 'AD_SERVER_CLICKS',
##D dateRangeType = 'LAST_WEEK')))
##D report_data <- dfp_full_report_wrapper(request_data)
## End(Not run)
|
f61daa91f40a9d7f64a917c1366dc5446177558b
|
157bcf7190b4663915e23bbd3342fd99101b782e
|
/R/create_colormaps.R
|
5967a9bfb55658fd25b26fe62681d0645a2eedfb
|
[] |
no_license
|
zachcp/phyloTreeWidget
|
c241d1dae2bfe77f426a7446dbd7326c8ab350ba
|
f041640c72bd303eb42da10f8fc445fc0f5d99e4
|
refs/heads/master
| 2022-03-07T22:31:09.772298
| 2019-10-04T17:25:28
| 2019-10-04T17:25:28
| 116,187,708
| 2
| 0
| null | 2019-10-23T01:12:39
| 2018-01-03T22:20:12
|
JavaScript
|
UTF-8
|
R
| false
| false
| 1,671
|
r
|
create_colormaps.R
|
#' create_colormaps
#'
#' create colormaps for dataframe character/factor columns
#'
#' @param df Required. A dataframe containing information about nodes in the tree.
#' @param custommaps. Optional. Curently a stub. There needs to be a way to pass
#' in custom colors.
#' @param exclude_attr a list of columns to exclude from the final output. The
#' defaults are the otputs from phylobase
#' @importFrom colormap colormap_pal
#'
#' @export
create_colormaps <- function(df, custommaps=NULL,
exclude_attr = c('node', 'ancestor', 'node.type')) {
colmap <- list()
cols <- names(df)[2:length((names(df)))]
# for now simply assign colors based on discrete scale of uniques.
# TODO: handle, options, continuous,etc.
for (col in cols) {
if (!col %in% exclude_attr) {
# handle only character and factor columns
colclass <- class(df[[col]])
if (colclass %in% c('character', 'factor')) {
if (colclass == 'character') {
colvals <- sort(unique(df[[col]]))
} else if (colclass == 'factor') {
colvals <- as.character(sort(unique(df[[col]])))
}
# create the colormaps
local_colors <- colormap_pal()(length(colvals))
# keep hex tag and 6 characters to ignore the alpha channel
local_colors <- as.character(Map(function(d){substr(d,1,7)}, local_colors))
localcolorlist <- list()
for (i in seq_along(colvals)) {
localcolorlist[[colvals[i] ]] <- local_colors[i]
}
colmap[col] <- list(localcolorlist)
}
}
}
return(colmap)
}
|
4924ce2c61b41fe63dec77ecb6a0906fd4b37f66
|
b3357449a175852145c9099327649df50046d0c4
|
/src/utils/generate.R
|
6a649d1e4b4d35dad29816e74c696046c5bf030b
|
[] |
no_license
|
savchukndr/temat14
|
acb2e5c28beee21c67c7deca942b4edca34ca6b5
|
3fb63c9c2128de0784047be72d9daf2090bf098c
|
refs/heads/master
| 2021-05-14T17:21:29.412546
| 2018-01-15T20:55:35
| 2018-01-15T20:55:35
| 116,044,592
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 623
|
r
|
generate.R
|
# Function for genereting data frame
generate.generate_empty_df = function(unique_user_id, unique_item){
df = data.frame(user_id=c(unique_user_id))
# Filling data frame with empty rating
for (column in unique_item){
df[[toString(column)]] = 0
}
return(df)
}
# Function for filling data frame
generate.generate_complete_df = function(df, user_id, item, rating){
# browser()
# Write book ratings into data frame
for (i in seq(length(rating))){
col = toString(item[[i]])
row = which(df$user_id == user_id[[i]])
m = mean(rating)
df[[col]][[row]] = rating[[i]]
}
return(df)
}
|
fc440a779114b774c662bfde6c1f15944e855785
|
de37330b37b7d0c0eaea794d634b2d44385a1585
|
/R/namespace.R
|
c6983f3653355ccfee1a1df7a442e656391de3ce
|
[
"MIT"
] |
permissive
|
poissonconsulting/slobr
|
472d66b5d9dd43d689688f88d79ba80e30b814ea
|
37beb9c01c473d3fd765e9a4b98db82042f1e146
|
refs/heads/main
| 2023-06-07T11:23:55.616470
| 2022-06-17T22:09:48
| 2022-06-17T22:09:48
| 188,949,094
| 0
| 0
|
NOASSERTION
| 2021-03-08T21:47:21
| 2019-05-28T03:42:44
|
R
|
UTF-8
|
R
| false
| false
| 149
|
r
|
namespace.R
|
#' @import shiny glue RSQLite bsplus
#' @importFrom shinyjs reset useShinyjs
#' @importFrom utils zip
#' @rawNamespace import(chk, except = p)
NULL
|
61c00bef8c85643cfc9452666fd094cab1596b54
|
db00d3a1fde561492ac49104f272a7c98aebf0f8
|
/R/mait.R
|
2f499b065e7e5214e1b1507cf99e5f2a22543fc4
|
[] |
no_license
|
gigascience/cuddel-gsk-dataset
|
e064774dd16cb018057957f7ea1e28129395f245
|
bf757f8a7fb5f34b803698c0524d7203e579f082
|
refs/heads/master
| 2021-01-25T04:50:11.079641
| 2018-10-12T01:22:12
| 2018-10-12T01:22:12
| 93,491,347
| 2
| 4
| null | 2018-11-16T08:01:19
| 2017-06-06T07:52:17
|
R
|
UTF-8
|
R
| false
| false
| 15,286
|
r
|
mait.R
|
# Title : TODO
# Objective : TODO
# Created by: peterli
# Created on: 17/7/2018
library(MAIT)
# The MAIT workflow involves filling slots in the MAIT object and returning the updated MAIT object as output
# First, load faahKO data into MAIT object
library(faahKO)
cdfFiles <- system.file("cdf", package="faahKO", mustWork=TRUE)
# Detect peaks using xcms with the sampleProcessing function in MAIT
MAIT <- sampleProcessing(dataDir = cdfFiles, project = "MAIT_Demo", snThres=2, rtStep=0.03)
summary(MAIT)
## A MAIT object built of 12 samples
## The object contains 6 samples of class KO
## The object contains 6 samples of class WT
##
## Parameters of the analysis:
## Value
## dataDir "/usr/lib64/R/library/faahKO/cdf"
## snThres "2"
## Sigma "2.12332257516562"
## mzSlices "0.3"
## retcorrMethod "loess"
## groupMethod "density"
## bwGroup "3"
## mzWidGroup "0.25"
## filterMethod "centWave"
## rtStep "0.03"
## nSlaves "0"
## project "MAIT_Demo"
## ppm "10"
## minfrac "0.5"
## fwhm "30"
## family1 "gaussian"
## family2 "symmetric"
## span "0.2"
## centWave peakwidth1 "5"
## centWave peakwidth2 "20"
# Do peak annotation
MAIT <- peakAnnotation(MAIT.object = MAIT, corrWithSamp = 0.7, corrBetSamp = 0.75, perfwhm = 0.6)
## WARNING: No input adduct/fragment table was given. Selecting default MAIT table for positive polarity...
## Set adductTable equal to negAdducts to use the default MAIT table for negative polarity
## Start grouping after retention time.
## Created 321 pseudospectra.
## Spectrum build after retention time done
## Generating peak matrix!
## Run isotope peak annotation
## % finished: 10 20 30 40 50 60 70 80 90 100
## Found isotopes: 122
## Isotope annotation done
## Start grouping after correlation.
## Generating EIC's ..
##
## Calculating peak correlations in 321 Groups...
## % finished: 10 20 30 40 50 60 70 80 90 100
##
## Calculating peak correlations across samples.
## % finished: 10 20 30 40 50 60 70 80 90 100
##
## Calculating isotope assignments in 321 Groups...
## % finished: 10 20 30 40 50 60 70 80 90 100
## Calculating graph cross linking in 321 Groups...
## % finished: 10 20 30 40 50 60 70 80 90 100
## New number of ps-groups: 751
## xsAnnotate has now 751 groups, instead of 321
## Spectrum number increased after correlation done
## Generating peak matrix for peak annotation!
## Found and use user-defined ruleset!
## Calculating possible adducts in 751 Groups...
## % finished: 10 20 30 40 50 60 70 80 90 100
## Adduct/fragment annotation done
# MAIT object has an xsAnnotated object containing the information about peaks, spectra and their annotation.
rawData(MAIT)
## $xsaFA
## An "xsAnnotate" object!
## With 751 groups (pseudospectra)
## With 12 samples and 1331 peaks
## Polarity mode is set to: positive
## Using automatic sample selection
## Annotated isotopes: 122
## Annotated adducts & fragments: 81
## Memory usage: 4.45 MB
# Do statistical analysis to identify features different between classes using spectralSigFeatures function
MAIT <- spectralSigFeatures(MAIT.object = MAIT, pvalue = 0.05, p.adj = "none", scale = FALSE)
# Summarise analysis
summary(MAIT)
## A MAIT object built of 12 samples and 1331 peaks. No peak aggregation technique has been applied
## 63 of these peaks are statistically significant
## The object contains 6 samples of class KO
## The object contains 6 samples of class WT
## Parameters of the analysis:
## Value
## dataDir "/usr/lib64/R/library/faahKO/cdf"
## snThres "2"
## Sigma "2.12332257516562"
## mzSlices "0.3"
## retcorrMethod "loess"
## groupMethod "density"
## bwGroup "3"
## mzWidGroup "0.25"
## filterMethod "centWave"
## rtStep "0.03"
## nSlaves "0"
## project "MAIT_Demo"
## ppm "10"
## minfrac "0.5"
## fwhm "30"
## family1 "gaussian"
## family2 "symmetric"
## span "0.2"
## centWave peakwidth1 "5"
## centWave peakwidth2 "20"
## corrWithSamp "0.7"
## corrBetSamp "0.75"
## perfwhm "0.6"
## sigma "6"
## peakAnnotation pvalue "0.05"
## calcIso "TRUE"
## calcCiS "TRUE"
## calcCaS "TRUE"
## graphMethod "hcs"
## annotateAdducts "TRUE"
## peakAggregation method "None"
## peakAggregation PCAscale "FALSE"
## peakAggregation PCAcenter "FALSE"
## peakAggregation scale "FALSE"
## peakAggregation RemoveOnePeakSpectra "FALSE"
## Welch pvalue "0.05"
## Welch p.adj "none"
# In the spectralSigFeatures analysis, a table called signifcantFeatures.csv is created in the Tables subfolder. This
# table shows the characteristics of the statistically significant features. This table can be retrieved using:
signTable <- sigPeaksTable(MAIT.object = MAIT, printCSVfile = FALSE)
head(signTable)
## mz mzmin mzmax rt rtmin rtmax npeaks KO WT ko15 ko16 ko18 ko19 ko21 ko22 wt15 wt16
## 249 328.2 328.1 328.2 56.31 56.27 56.45 4 4 0 43851.29 88615.33 41311.31 35218.11 40095.58 47006.34 1907.784 1165.861
## 884 496.2 496.2 496.2 56.27 56.12 56.44 7 3 3 11275649.72 3795994.86 2624223.82 3630452.88 8335183.79 5624245.57 36141998.610 3377994.510
## 891 497.2 497.2 497.2 56.23 56.12 56.41 6 3 3 9239784.03 2603425.00 798816.25 1219126.64 2340526.64 1560252.53 9219730.435 1993433.398
## 896 498.2 498.2 498.2 56.21 56.06 56.23 4 1 3 853625.83 42028.88 150286.13 217955.75 198805.08 356467.11 1837432.274 96019.739
## 899 499.2 499.1 499.2 56.19 55.93 56.34 8 3 4 86962.36 13390.91 53202.17 17416.74 36593.84 54465.13 124631.905 12083.597
## 953 508.2 508.1 508.2 56.19 56.15 56.20 5 4 1 164074.60 139084.68 271571.75 155172.88 0.00 199981.96 99744.641 196969.877
## wt18 wt19 wt21 wt22 isotopes adduct pcgroup P.adjust p Fisher.Test Mean Class KO Mean Class WT
## 249 12587.95 7536.961 6619.609 6892.127 4 0.002514234 0.002514234 NA 49349.66 6118.382
## 884 2885261.59 5243788.829 7706061.535 7808922.868 [62][M]+ 4 0.421686461 0.421686461 NA 5880958.44 10527337.990
## 891 978322.88 1472473.239 2221906.241 2241262.797 [62][M+1]+ 4 0.973650701 0.973650701 NA 2960321.85 3021188.165
## 896 202991.67 243572.435 628464.047 357991.803 [62][M+2]+ 4 0.404976403 0.404976403 NA 303194.80 561078.661
## 899 14849.16 94691.930 41735.420 42589.910 [62][M+3]+ 4 0.610170147 0.610170147 NA 43671.86 55096.988
## 953 153144.41 245829.063 496743.520 31504.200 4 0.534531870 0.534531870 NA 154980.98 203989.286
## Median Class KO Median Class WT
## 249 42581.30 6755.868
## 884 4710120.21 6474925.182
## 891 1950389.59 2107669.820
## 896 208380.41 300782.119
## 899 44898.01 42162.665
## 953 159623.74 175057.144
# Summarise results
MAIT
## A MAIT object built of 12 samples and 1331 peaks. No peak aggregation technique has been applied
## 63 of these peaks are statistically significant
## The object contains 6 samples of class KO
## The object contains 6 samples of class WT
# Do statistical plots. MAIT objects are created wutg PCA and PLS models saved inside them
plotBoxplot(MAIT)
plotHeatmap(MAIT)
# The plot output figures are saved in the subfolders in the project folder
MAIT <- plotPCA(MAIT, plot3d=FALSE)
MAIT <- plotPLS(MAIT, plot3d=FALSE)
PLSmodel <- model(MAIT, type = "PLS")
PCAmodel <- model(MAIT, type = "PCA")
PLSmodel
## Partial Least Squares
##
## 12 samples
## 63 predictors
## 2 classes: 'KO', 'WT'
##
## No pre-processing
## Resampling: Bootstrapped (25 reps)
## Summary of sample sizes: 12, 12, 12, 12, 12, 12, ...
## Resampling results across tuning parameters:
##
## ncomp Accuracy Kappa
## 1 1 1
## 2 1 1
## 3 1 1
##
## Accuracy was used to select the optimal model using the largest value.
## The final value used for the model was ncomp = 1.
pcaScores(MAIT)
## PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8 PC9 PC10 PC11 PC12
## [1,] -8.823400 6.31998966 -0.5526132 -1.2916266 -1.39191835 0.6247246 -0.186452381 0.17513153 -0.03013042 0.07214817 -0.13531712 2.466777e-15
## [2,] -8.202057 0.08372414 2.0231200 3.4017405 1.26354572 -0.4580168 1.896542591 -0.57535747 0.71556592 0.27847053 0.37346246 6.869505e-16
## [3,] -6.647327 -3.00939301 3.6390245 -2.6080710 -0.47162186 -0.9921232 -1.539667265 -0.36479469 0.51448150 -0.11756634 -0.19180616 -6.678685e-16
## [4,] -3.554215 -2.11458548 0.2277483 -0.4454514 0.93503858 1.8743863 1.000764293 2.36685217 -1.32658830 -0.45467997 -0.15334779 -8.083811e-16
## [5,] -4.394232 -3.25707904 -4.0828290 -0.5586029 -0.59580779 1.7252921 0.037541531 -1.35815385 0.59885276 0.56612633 0.16379772 -9.783840e-16
## [6,] -3.165613 -0.45844726 -2.8491469 1.4610838 0.38569573 -2.9636763 -1.383997315 0.16386948 -1.20232530 -0.45472153 -0.16106150 -4.440892e-16
## [7,] 5.305730 1.64665278 -0.9821137 -1.8563330 3.87308588 -0.1983468 -0.007830579 -0.08448479 1.11571569 -0.43147651 -0.09692091 -1.169204e-15
## [8,] 5.487610 0.31429828 0.9351818 2.6945683 -0.73282854 1.7723295 -1.748001788 -0.43156894 0.44464047 -0.96597551 -0.92322133 -3.400058e-16
## [9,] 6.313361 0.39820865 1.2046214 -0.9395212 -0.02530098 -0.1413301 1.580728764 -1.87584484 -1.75244866 0.53327213 -0.66087917 7.109114e-16
## [10,] 5.641545 0.73284993 1.0706125 0.4904065 0.21710858 0.7084426 -1.784234044 0.09591630 -0.68029637 0.77764390 1.64880640 -6.730727e-16
## [11,] 6.093996 -0.28217905 -0.2864657 0.3521655 -1.13341090 -1.0305014 0.315105967 1.71724384 0.99906322 1.59307934 -0.67748775 1.377370e-15
## [12,] 5.944601 -0.37403961 -0.3471400 -0.7003586 -2.32358607 -0.9211803 1.819500227 0.17119127 0.60346949 -1.39632054 0.81397514 -3.538836e-16
# Before identifying metabolites, peak annotation can be improved using the Biotransformations function. Use the default
# MAIT table for biotransformations:
Biotransformations(MAIT.object = MAIT, peakPrecision = 0.005)
# A user-defined biotransformations table can be used - see MAIT manual
# Metabolite identification
MAIT <- identifyMetabolites(MAIT.object = MAIT, peakTolerance = 0.005)
## WARNING: No input database table was given. Selecting default MAIT database...
## Metabolite identification initiated
##
## % Metabolite identification in progress: 10 20 30 40 50 60 70 80 90 100
## Metabolite identification finished
# A table is created containing the possible metabolite identifications
metTable <- metaboliteTable(MAIT)
# View some results in the table
metTable[1:5,1:ncol(metTable)]
## Query Mass Database Mass (neutral mass) rt Isotope Adduct Name spectra Biofluid ENTRY p.adj p Fisher.Test
## 1 328.2 Unknown 56.31 Unknown 4 unknown unknown 2.514234e-03 2.514234e-03 NA
## 2 454.1 Unknown 55.88 [45][M]+ Unknown 7 unknown unknown 6.124024e-01 6.124024e-01 NA
## 3 549.1 Unknown 53.1 [91][M+1]+ Unknown 215 unknown unknown 6.646044e-01 6.646044e-01 NA
## 4 411.2 Unknown 65.6 Unknown 221 unknown unknown 6.761483e-07 6.761483e-07 NA
## 5 324.2 Unknown 54.59 Unknown 233 unknown unknown 3.990481e-02 3.990481e-02 NA
## Mean Class KO Mean Class WT Median Class KO Median Class WT KO WT ko15 ko16 ko18 ko19 ko21 ko22 wt15
## 1 49349.66 6118.382 42581.30 6755.868 4 0 43851.29 88615.33 41311.31 35218.11 40095.58 47006.34 1907.784
## 2 710915.10 882177.176 595445.77 769797.473 6 4 381784.88 878240.29 1511025.72 797661.72 303548.19 393229.82 377503.040
## 3 59572.67 55136.583 57388.44 54346.972 2 5 61471.41 79003.52 85458.18 52338.98 25858.49 53305.46 73945.095
## 4 148772.97 33519.391 145294.01 23729.675 6 4 172806.65 133447.55 167487.24 128308.37 141713.88 148874.14 47465.487
## 5 109873.48 9594.587 84825.35 7569.202 5 0 96901.74 287813.15 79667.89 55325.88 49549.44 89982.80 19287.067
## wt16 wt18 wt19 wt21 wt22
## 1 1165.861 12587.948 7536.961 6619.609 6892.127
## 2 492109.145 1419864.480 1815052.071 141048.523 1047485.800
## 3 51594.920 52613.735 39506.860 56080.210 57078.680
## 4 64596.940 22508.083 24951.268 20562.535 21032.035
## 5 0.000 8493.043 6645.361 20716.752 2425.299
# Validation to check predictive value of significant features
MAIT <- Validation(Iterations = 20, trainSamples= 3, MAIT.object = MAIT)
summary(MAIT)
# External peak data can be analysed by MAIT using hte MAITbuilder function to import peak data and analyse it using
# MAIT statistical functions. Consider that we have the following data:
peaks <- scores(MAIT)
masses <- getPeaklist(MAIT)$mz
rt <- getPeaklist(MAIT)$rt/60
# Use the MAITbuilder to annotate and identify metabolites on these data - check parameter values are suitable!!!
importMAIT <- MAITbuilder(data=peaks, masses=masses, rt=rt, significantFeatures=TRUE, spectraEstimation=TRUE, rtRange=0.2, corThresh=0.7)
# Perform biotransformations - to run negative annotation, sert adductTable = negAdducts
importMAIT <- Biotransformations(MAIT.object = importMAIT, adductAnnotation = TRUE, peakPrecision = 0.005, adductTable = NULL)
# Identify metabolites
importMAIT <- identifyMetabolites(MAIT.object = importMAIT, peakTolerance=0.005, polarity="positive")
## WARNING: No input database table was given. Selecting default MAIT database...
## Metabolite identification initiated
##
## % Metabolite identification in progress: 0 10 20 30 40 50 60 70 80 90 100
## Metabolite identification finished
|
10e0e37b1cd072184eecca483355f570ffabbc8b
|
2b9e8f79a771a5ae510956db076bd6451204dc82
|
/apprentissage-automatique/TP1/Script.R
|
ae2bedcc257bb293d70500b44629e10a06337d9c
|
[] |
no_license
|
hamza-hramchi/eilco
|
24d78b7c754b4d1e2d3c7c7a0571e40013333af0
|
bf9b417b91f7653bd228fee30df316ecc31588f4
|
refs/heads/master
| 2023-01-28T19:46:50.719722
| 2020-12-08T21:30:04
| 2020-12-08T21:30:04
| 297,595,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,774
|
r
|
Script.R
|
# Title : R script
# Objective : Discover R langage
# Created by: Hamza HRAMCHI
# Created on: 28/09/2020
# -------------- Basics of R langage -----------------------------
a <- 5
a
b <- a
b
v <- c(1, 2, 5, 9)
mode(v)
length(v)
# -------------- Input/Output --------------------------------------
print("Enter numeric number : ")
scan()
print("Enter numeric number for a : ")
a <- scan()
print("Enter numeric number for b : ")
b <- scan()
c <- a+b
print("c = a + b : ")
print(c)
c
# -------------- Functions ----------------------------------------
carre <- function(x) { return (x*x) }
carre(3)
# -------------- Vectors ----------------------------------------
cat('# -------------- Vectors -------------------------')
v1 <- vector("numeric", 10)
print(v1)
v2 <- vector("logical", 8)
print(v2)
v3 <- c(1,3,4,8)
print(v3)
v4 <- rep(1, 10)
print(v4)
v5 <- seq(1, 10)
print(v5)
v6 <- (1:10)
print(v6)
v7 <- seq(1, 10, 3)
print(v7)
# ---------------- La taille ---------------------------------------------
load("test.RData")
print("La taille de : objets")
length(objets)
print("La taille de : performanceF")
length(performanceF)
# ...
tailleF
tailleG
#---------------- Afficher les noms associés aux différentes mesures ----.
names(tailleG)
names(tailleF)
# ...
# ---------------- Union function ---------------------------------------
union(tailleF, tailleG)
# ...
# ---------------- Factors ---------------------------------------
vent <- factor(c("fort", "faible", "moyen", "faible", "faible", "fort"))
vent
# ---------------- Matrix ---------------------------------------
m1 <- matrix(1:6, nrow = 2, ncol = 3)
m2 <- matrix(1:6, nrow = 2, ncol = 3, byrow = TRUE)
m3 <- matrix(c(40, 80, 45, 21, 55 ,32), nrow = 2, ncol = 3)
# ---------------- Lists ---------------------------------------
athletes <- list(Didier=c(630, 625, 628, 599, 635, 633, 622),
Jules=c(610, 590, 595, 582, 601, 603),
Pierre=c(644, 638, 639, 627, 642, 633, 639),
Matthieu=c(622, 625, 633, 641, 610),
Georges=c(561, 572, 555, 569, 653, 549, 558, 561),
Khaled=c(611, 621, 619, 618, 623, 614, 623),
Guillaume=c(599, 601, 612, 609, 607, 608, 594),
Hermann=c(624, 630, 631, 629, 634, 618, 622),
Carlos=c(528, 531, 519, 533, 521), Keith=c(513))
# ---------------- Data frame ---------------------------------------
resultats <- data.frame(taille=c(185,178,165,171,172),
poids=c(82,81,55,65,68),
QI=c(110,108,125,99,124),
sexe=c("M","M","F","F","F"),
row.names=c("Paul","Matthieu",
"Camille","Mireille","Capucine"))
|
2592471a9b4f2b9cdb93b029d0dde8cbdce786b4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/apTreeshape/examples/shift.test.Rd.R
|
561bb63a3daa402086759eb7d31f9f4c568c34ed
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 944
|
r
|
shift.test.Rd.R
|
library(apTreeshape)
### Name: shift.test
### Title: Testing diversification rate variation in phylogenetic trees
### Aliases: shift.test
### Keywords: htest
### ** Examples
## Detecting diversification rate variation in bird families (135 tips)
data(bird.families)
tree.birds <- as.treeshape(bird.families, model = "yule")
class(tree.birds) <- "treeshape"
pv <- sapply(1:135, FUN = function(i) shift.test(tree.birds, i, lambda1 = 1,
lambda2 = 100, nrep = 1000, silent = TRUE))
## Significant shifts detected at nodes = 67 and 78
pv[c(67,78)]
shift.test(tree.birds, node = 67, lambda1 = 1, lambda2 = 100, nrep = 10000, silent = TRUE)
shift.test(tree.birds, node = 78, lambda1 = 1, lambda2 = 100, nrep = 10000, silent = TRUE)
## visualize the shifts
par(mfrow=c(2,1))
plot(cutreeshape(tree.birds, ancestor(tree.birds, 67) , "bottom"))
plot(cutreeshape(tree.birds, 78 , "bottom"))
|
360650d111faaa728e140f4aabf6193f5695656c
|
d517fbecd093d7c6ba2e69d8d97ddb3b9281b622
|
/S_meliloti_GO_analysis_rsubread-data.R
|
3330559682f32d28cbe1d49fb4e9f9af4a93d431
|
[
"MIT"
] |
permissive
|
ahalfpen727/Gene-Expression-SecondaRy-Analysis
|
1505c22092830c784bf858997b3b987fa0378d72
|
7c6eb0cf178b3a1a35ebf3d6990f75a48dc80ac6
|
refs/heads/main
| 2023-02-06T04:34:24.822876
| 2020-12-31T09:06:08
| 2020-12-31T09:06:08
| 305,691,492
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41,085
|
r
|
S_meliloti_GO_analysis_rsubread-data.R
|
#####################################################################
# library(STRINGdb); KEGG_+_GO_analysis post-featurecounts.R
#####################################################################
source("https://bioconductor.org/biocLite.R")
# biocLite("limma");biocLite("Rsubread"); biocLite("STRINGdb")
browseVignettes("STRINGdb")
#STRINGdb$help("get_graph")
# get_interactions(string_ids)
###### returns the interactions in between the input proteins
# get_neighbors(string_ids)
###### Get the neighborhoods of a protein (or of a vector of proteins).
# get_subnetwork(string_ids)
###### returns a subgraph from the given input proteins
#####################################################################
# GO Pathway DOSE korg
# Sinorhizobium meliloti strain 1021
# [TAX:382] Taxonomy ID: 266834
# GO:0003674 - MF ,GO:0005575 - CC , GO:0008150 - BP
# 000006965* sme Sinorhizobium meliloti 1021 --> NC_003047 11474104,11481430
#cuff<-readCufflinks(dbFile = "cuffData.db", genome = "1021_genome.fa", rebuild=T)
#####################################################################
### LibraryLoading
#####################################################################
library(GO.db);library(GOstats);library(topGO)
library(gage);library(pathview)
library(KEGG.db);library(KEGGgraph)
library(clusterProfiler);library(DOSE)
library(ReactomePA);library(STRINGdb)
library(igraph);library(biomaRt)
library(keggorthology)
#library("EnrichmentBrowser"); vignette("EnrichmentBrowser")
# library(org.Hs.eg.db); library(keggorthology);library(Path2PPI)
#####################################################################
# Standard GO analysis from edgeR
#####################################################################
library(STRINGdb);library(Rsubread)
library(limma); library(edgeR)
library(biomaRt)
# library(biomaRt) functions to create a genetable from a gff3
Gff2GeneTable("1021_genome.gff3")
load("geneTable.rda")
edb<-geneTable$GeneID
head(geneTable)
#########################################################################
# Grab DE tables from each comparison made in the featurecounts script
#########################################################################
head(A.vs.AB.DE)
dim(A.vs.AB.DE)
head(A.vs.wt1021.DE)
dim(A.vs.wt1021.DE)
head(AB.vs.wt1021.DE)
dim(AB.vs.wt1021.DE)
head(AB.vs.wt1021B.DE)
dim(AB.vs.wt1021B.DE)
head(wt1021.over.wt1021B.DE)
dim(wt1021B.vs.wt1021.DE)
#####################################################################
# library(STRINGdb); KEGG_IDS
#####################################################################
browseVignettes("STRINGdb") ; #STRINGdb$help("get_graph")
## get_interactions(string_ids) # returns the interactions in between the input proteins
## get_neighbors(string_ids) # Get the neighborhoods of a protein (or of a vector of proteins).
## get_subnetwork(string_ids) # returns a subgraph from the given input proteins
#000006965* sme Sinorhizobium meliloti 1021 --> NC_003047 11474104,11481430
###########################################################################################
## Query STRINGdb database for species, get KEGGids, GOids, and STRINGids
###########################################################################################
sme1021 <- search_kegg_organism('Sinorhizobium meliloti 1021', by='scientific_name')
dim(sme1021); head(sme1021)
sme1021$kegg_code
Smeliloti <- search_kegg_organism('Sinorhizobium meliloti', by='scientific_name')
Smeliloti$scientific_name
smelil<-search_kegg_organism(sme1021$kegg_code, by='kegg_code')
dim(smelil);head(smelil)
species.all<-get_STRING_species(version="10", species_name=NULL)
colnames(species.all)
sm1021<-grep(pattern='Sinorhizobium meliloti', species.all$official_name, ignore.case = T)
taxa.info<-species.all[sm1021,]
taxa.info
taxID<-taxa.info$species_id
taxID
string.db.sme1021 <- STRINGdb$new(version="10", species=taxID)
string.db.sme1021
sme.kegg1021<-search_kegg_organism('sme', by='kegg_code')
sme.kegg.org1021<- search_kegg_organism('Sinorhizobium meliloti 1021', by='scientific_name')
dim(sme.kegg.org1021)
head(sme.kegg.org1021)
sme.pwys <- download.kegg.pathways("sme")
kegg.gs <- get.kegg.genesets("sme")
head(sme.pwys)
head(kegg.gs)
library(gage)
data(gse16873)
sme.kegg.sets<-kegg.gsets(species = "sme",id.type = "kegg")
sme.kegg.sets
###########################################################################################
## Write the KEGGids, the genes involved, and the human readable pathway names to a file
###########################################################################################
keggfile<-file.path("Sme1021.kegg.genesets.txt", "w")
keggfile<-file("Sme1021.kegg.genesets.txt", "a")
KEGGid<-names(kegg.gs)
x=0
for (keggpath in kegg.gs){
x<-c(x + 1)
kegg.df <-c(x,KEGGid[x],keggpath)
write(kegg.df, file=keggfile, append=T)
}
###########################################################################################
## For each comparison DE table, map the gene symbols to the KEGGids/STRINGids
###########################################################################################
A.vs.wt1021.DE_mapped <- string.db.sme1021$map( A.vs.wt1021.DE, "GeneSymbol", removeUnmappedRows = TRUE )
write.table(A.vs.wt1021.DE_mapped, file="A.vs.wt1021.KEGG.difftable")
head(A.vs.wt1021.DE_mapped)
dim(A.vs.wt1021.DE_mapped)
AB.vs.wt1021B.DE_mapped <- string.db.sme1021$map( AB.vs.wt1021B.DE, "GeneSymbol", removeUnmappedRows = TRUE )
write.table(AB.vs.wt1021B.DE_mapped, file="AB.vs.wt1021B.KEGG.difftable")
head(AB.vs.wt1021B.DE_mapped)
AB.vs.wt1021.DE_mapped <- string.db.sme1021$map( AB.vs.wt1021.DE, "GeneSymbol", removeUnmappedRows = TRUE )
write.table(AB.vs.wt1021.DE_mapped, file="AB.vs.wt1021.KEGG.difftable")
head(AB.vs.wt1021.DE_mapped)
A.vs.AB.DE_mapped <- string.db.sme1021$map( A.vs.AB.DE, "GeneSymbol", removeUnmappedRows = TRUE )
write.table(A.vs.AB.DE_mapped, file="A.vs.AB.KEGG.difftable")
head(A.vs.AB.DE_mapped)
wt1021.vs.wt1021B.DE_mapped <- string.db.sme1021$map( wt1021.vs.wt1021B.DE, "GeneSymbol", removeUnmappedRows = TRUE )
write.table(wt1021.vs.wt1021B.DE_mapped, file="wt1021.vs.wt1021B.KEGG.difftable")
head(wt1021.vs.wt1021B.DE_mapped)
#####################################################################
# enrichment
###################################################
AB.vs.wt1021B.DE.df<-as.data.frame(cbind(gene=AB.vs.wt1021B.DE_mapped$GeneSymbol,
pvalue=AB.vs.wt1021B.DE_mapped$Pval,
logFC=AB.vs.wt1021B.DE_mapped$LogFoldChange), stringsAsFactors=F)
dim(AB.vs.wt1021B.DE.df)
head(AB.vs.wt1021B.DE.df)
AB.vs.wt1021B.intersected<-string.db.sme1021$map(AB.vs.wt1021B.DE.df, "gene", removeUnmappedRows=T)
head(AB.vs.wt1021B.intersected)
class(AB.vs.wt1021B.intersected)
string.db.sme1021$plot_network(AB.vs.wt1021B.intersected$STRING_id[1:400],)
AB.vs.wt1021B.subnets<-string.db.sme1021$get_subnetwork(AB.vs.wt1021B.intersected)
AB.vs.wt1021B.subnets
AB.vs.wt1021B.mapped_sig<-as.data.frame(cbind(genes=c(AB.vs.wt1021B.intersected$gene[AB.vs.wt1021B.intersected$pvalue < 0.05]),
pvalue=c(AB.vs.wt1021B.intersected$pvalue[AB.vs.wt1021B.intersected$pvalue < 0.05]),
logFC=c(AB.vs.wt1021B.intersected$logFC[AB.vs.wt1021B.intersected$pvalue < 0.05]),
STRING_id=c(AB.vs.wt1021B.intersected$STRING_id[AB.vs.wt1021B.intersected$pvalue < 0.05])),
stringsAsFactors=F, row.names=F)
head(AB.vs.wt1021B.mapped_sig)
AB.vs.wt1021B.DE.pv.fc.STRING<-as.data.frame(cbind(gene=AB.vs.wt1021B.DE_mapped$GeneSymbol,
pvalue=AB.vs.wt1021B.DE_mapped$Pval,
logFC=AB.vs.wt1021B.DE_mapped$LogFoldChange,
STRING_id=AB.vs.wt1021B.DE_mapped$STRING_id), stringsAsFactors=F, row.names=F, col.names=T)
head(AB.vs.wt1021B.DE.pv.fc.STRING)
# post payload information to the STRING server
AB.vs.wt1021B_pval01 <- string.db.sme1021$post_payload(AB.vs.wt1021B.mapped_sig$STRING_id,
colors=AB.vs.wt1021B.mapped_sig["pvalue"]$color )
# display a STRING network png with the "halo"
string.db.sme1021$plot_network( AB.vs.wt1021B.DE.pv.fc.STRING$STRING_id[1:50],
payload_id=AB.vs.wt1021B_pval01,
required_score=AB.vs.wt1021B.DE.pv.fc.STRING$logFC[1:50])
# plot the enrichment for the best 100 genes
ab.wt1021.top100<-string.db.sme1021$plot_ppi_enrichment( AB.vs.wt1021B.intersected$STRING_id[1:500], quiet=TRUE )
#####################################################################
# enrichment A.vs.wt1021
###################################################
A.vs.wt1021.DE.df<-as.data.frame(cbind(gene=A.vs.wt1021.DE_mapped$GeneSymbol,
pvalue=A.vs.wt1021.DE_mapped$Pval,
logFC=A.vs.wt1021.DE_mapped$LogFoldChange), stringsAsFactors=F)
dim(A.vs.wt1021.DE.df)
head(A.vs.wt1021.DE.df)
A.vs.wt1021.intersected<-string.db.sme1021$map(A.vs.wt1021.DE.df, "gene", removeUnmappedRows=T)
head(A.vs.wt1021.intersected)
class(A.vs.wt1021.intersected)
string.db.sme1021$plot_network(A.vs.wt1021.intersected$STRING_id[1:400],)
A.vs.wt1021.subnets<-string.db.sme1021$get_subnetwork(A.vs.wt1021.intersected)
A.vs.wt1021.subnets
A.vs.wt1021.mapped_sig<-as.data.frame(cbind(genes=c(A.vs.wt1021.intersected$gene[A.vs.wt1021.intersected$pvalue < 0.05]),
pvalue=c(A.vs.wt1021.intersected$pvalue[A.vs.wt1021.intersected$pvalue < 0.05]),
logFC=c(A.vs.wt1021.intersected$logFC[A.vs.wt1021.intersected$pvalue < 0.05]),
STRING_id=c(A.vs.wt1021.intersected$STRING_id[A.vs.wt1021.intersected$pvalue < 0.05])),
stringsAsFactors=F, row.names=F)
head(A.vs.wt1021.mapped_sig)
A.vs.wt1021.DE.pv.fc.STRING<-as.data.frame(cbind(gene=A.vs.wt1021.DE_mapped$GeneSymbol,
pvalue=A.vs.wt1021.DE_mapped$Pval,
logFC=A.vs.wt1021.DE_mapped$LogFoldChange,
STRING_id=A.vs.wt1021.DE_mapped$STRING_id), stringsAsFactors=F, row.names=F, col.names=T)
head(A.vs.wt1021.DE.pv.fc.STRING)
# post payload information to the STRING server
A.vs.wt1021_pval01 <- string.db.sme1021$post_payload(A.vs.wt1021.mapped_sig$STRING_id,
colors=A.vs.wt1021.mapped_sig["pvalue"]$color )
# display a STRING network png with the "halo"
string.db.sme1021$plot_network( A.vs.wt1021.DE.pv.fc.STRING$STRING_id[1:50],
payload_id=A.vs.wt1021_pval01,
required_score=A.vs.wt1021.DE.pv.fc.STRING$logFC[1:50])
# plot the enrichment for the best 100 genes
ab.wt1021.top100<-string.db.sme1021$plot_ppi_enrichment( A.vs.wt1021.intersected$STRING_id[1:500], quiet=TRUE )
#####################################################################
# enrichment
###################################################
AB.vs.wt1021.DE.df<-as.data.frame(cbind(gene=AB.vs.wt1021.DE_mapped$GeneSymbol,
pvalue=AB.vs.wt1021.DE_mapped$Pval,
logFC=AB.vs.wt1021.DE_mapped$LogFoldChange), stringsAsFactors=F)
dim(AB.vs.wt1021.DE.df)
head(AB.vs.wt1021.DE.df)
AB.vs.wt1021.intersected<-string.db.sme1021$map(AB.vs.wt1021.DE.df, "gene", removeUnmappedRows=T)
head(AB.vs.wt1021.intersected)
class(AB.vs.wt1021.intersected)
string.db.sme1021$plot_network(AB.vs.wt1021.intersected$STRING_id[1:400],)
AB.vs.wt1021.subnets<-string.db.sme1021$get_subnetwork(AB.vs.wt1021.intersected)
AB.vs.wt1021.subnets
AB.vs.wt1021.mapped_sig<-as.data.frame(cbind(genes=c(AB.vs.wt1021.intersected$gene[AB.vs.wt1021.intersected$pvalue < 0.05]),
pvalue=c(AB.vs.wt1021.intersected$pvalue[AB.vs.wt1021.intersected$pvalue < 0.05]),
logFC=c(AB.vs.wt1021.intersected$logFC[AB.vs.wt1021.intersected$pvalue < 0.05]),
STRING_id=c(AB.vs.wt1021.intersected$STRING_id[AB.vs.wt1021.intersected$pvalue < 0.05])),
stringsAsFactors=F, row.names=F)
head(AB.vs.wt1021.mapped_sig)
AB.vs.wt1021.DE.pv.fc.STRING<-as.data.frame(cbind(gene=AB.vs.wt1021.DE_mapped$GeneSymbol,
pvalue=AB.vs.wt1021.DE_mapped$Pval,
logFC=AB.vs.wt1021.DE_mapped$LogFoldChange,
STRING_id=AB.vs.wt1021.DE_mapped$STRING_id), stringsAsFactors=F, row.names=F, col.names=T)
head(AB.vs.wt1021.DE.pv.fc.STRING)
# post payload information to the STRING server
AB.vs.wt1021_pval01 <- string.db.sme1021$post_payload(AB.vs.wt1021.mapped_sig$STRING_id,
colors=AB.vs.wt1021.mapped_sig["pvalue"]$color )
# display a STRING network png with the "halo"
string.db.sme1021$plot_network( AB.vs.wt1021.DE.pv.fc.STRING$STRING_id[1:50],
payload_id=AB.vs.wt1021_pval01,
required_score=AB.vs.wt1021.DE.pv.fc.STRING$logFC[1:50])
# plot the enrichment for the best 100 genes
ab.wt1021.top100<-string.db.sme1021$plot_ppi_enrichment( AB.vs.wt1021.intersected$STRING_id[1:500], quiet=TRUE )
#############################################################################
#####################################################################
# enrichment
###################################################
A.vs.AB.DE.df<-as.data.frame(cbind(gene=A.vs.AB.DE_mapped$GeneSymbol,
pvalue=A.vs.AB.DE_mapped$Pval,
logFC=A.vs.AB.DE_mapped$LogFoldChange), stringsAsFactors=F)
dim(A.vs.AB.DE.df)
head(A.vs.AB.DE.df)
A.vs.AB.intersected<-string.db.sme1021$map(A.vs.AB.DE.df, "gene", removeUnmappedRows=T)
head(A.vs.AB.intersected)
class(A.vs.AB.intersected)
string.db.sme1021$plot_network(A.vs.AB.intersected$STRING_id[1:400],)
A.vs.AB.subnets<-string.db.sme1021$get_subnetwork(A.vs.AB.intersected)
A.vs.AB.subnets
A.vs.AB.mapped_sig<-as.data.frame(cbind(genes=c(A.vs.AB.intersected$gene[A.vs.AB.intersected$pvalue < 0.05]),
pvalue=c(A.vs.AB.intersected$pvalue[A.vs.AB.intersected$pvalue < 0.05]),
logFC=c(A.vs.AB.intersected$logFC[A.vs.AB.intersected$pvalue < 0.05]),
STRING_id=c(A.vs.AB.intersected$STRING_id[A.vs.AB.intersected$pvalue < 0.05])),
stringsAsFactors=F, row.names=F)
head(A.vs.AB.mapped_sig)
A.vs.AB.DE.pv.fc.STRING<-as.data.frame(cbind(gene=A.vs.AB.DE_mapped$GeneSymbol,
pvalue=A.vs.AB.DE_mapped$Pval,
logFC=A.vs.AB.DE_mapped$LogFoldChange,
STRING_id=A.vs.AB.DE_mapped$STRING_id), stringsAsFactors=F, row.names=F, col.names=T)
head(A.vs.AB.DE.pv.fc.STRING)
# post payload information to the STRING server
A.vs.AB_pval01 <- string.db.sme1021$post_payload(A.vs.AB.mapped_sig$STRING_id,
colors=A.vs.AB.mapped_sig["pvalue"]$color )
# display a STRING network png with the "halo"
string.db.sme1021$plot_network( A.vs.AB.DE.pv.fc.STRING$STRING_id[1:50],
payload_id=A.vs.AB_pval01,
required_score=A.vs.AB.DE.pv.fc.STRING$logFC[1:50])
# plot the enrichment for the best 100 genes
ab.wt1021.top100<-string.db.sme1021$plot_ppi_enrichment( A.vs.AB.intersected$STRING_id[1:500], quiet=TRUE )
#############################################################################
#####################################################################
# enrichment
###################################################
wt1021.vs.wt1021B.DE.df<-as.data.frame(cbind(gene=wt1021.vs.wt1021B.DE_mapped$GeneSymbol,
pvalue=wt1021.vs.wt1021B.DE_mapped$Pval,
logFC=wt1021.vs.wt1021B.DE_mapped$LogFoldChange), stringsAsFactors=F)
dim(wt1021.vs.wt1021B.DE.df)
head(wt1021.vs.wt1021B.DE.df)
wt1021.vs.wt1021B.intersected<-string.db.sme1021$map(wt1021.vs.wt1021B.DE.df, "gene", removeUnmappedRows=T)
head(wt1021.vs.wt1021B.intersected)
class(wt1021.vs.wt1021B.intersected)
string.db.sme1021$plot_network(wt1021.vs.wt1021B.intersected$STRING_id[1:400],)
wt1021.vs.wt1021B.subnets<-string.db.sme1021$get_subnetwork(wt1021.vs.wt1021B.intersected)
wt1021.vs.wt1021B.subnets
wt1021.vs.wt1021B.mapped_sig<-as.data.frame(cbind(genes=c(wt1021.vs.wt1021B.intersected$gene[wt1021.vs.wt1021B.intersected$pvalue < 0.05]),
pvalue=c(wt1021.vs.wt1021B.intersected$pvalue[wt1021.vs.wt1021B.intersected$pvalue < 0.05]),
logFC=c(wt1021.vs.wt1021B.intersected$logFC[wt1021.vs.wt1021B.intersected$pvalue < 0.05]),
STRING_id=c(wt1021.vs.wt1021B.intersected$STRING_id[wt1021.vs.wt1021B.intersected$pvalue < 0.05])),
stringsAsFactors=F, row.names=F)
head(wt1021.vs.wt1021B.mapped_sig)
wt1021.vs.wt1021B.DE.pv.fc.STRING<-as.data.frame(cbind(gene=wt1021.vs.wt1021B.DE_mapped$GeneSymbol,
pvalue=wt1021.vs.wt1021B.DE_mapped$Pval,
logFC=wt1021.vs.wt1021B.DE_mapped$LogFoldChange,
STRING_id=wt1021.vs.wt1021B.DE_mapped$STRING_id), stringsAsFactors=F, row.names=F, col.names=T)
head(wt1021.vs.wt1021B.DE.pv.fc.STRING)
# post payload information to the STRING server
wt1021.vs.wt1021B_pval01 <- string.db.sme1021$post_payload(wt1021.vs.wt1021B.mapped_sig$STRING_id,
colors=wt1021.vs.wt1021B.mapped_sig["pvalue"]$color )
# display a STRING network png with the "halo"
string.db.sme1021$plot_network( wt1021.vs.wt1021B.DE.pv.fc.STRING$STRING_id[1:50],
payload_id=wt1021.vs.wt1021B_pval01,
required_score=wt1021.vs.wt1021B.DE.pv.fc.STRING$logFC[1:50])
# plot the enrichment for the best 100 genes
wt1021.wt1021b.top100<-string.db.sme1021$plot_ppi_enrichment( wt1021.vs.wt1021B.intersected$STRING_id[1:500], quiet=TRUE )
## ----eval = FALSE--------------------------------------------------------
wt1021.vs.wt1021B.kegg.rich <- enrichKEGG(gene = wt1021.vs.wt1021B.DE_mapped$GeneSymbol,organism='sme',pvalueCutoff = 0.05)
head(wt1021.vs.wt1021B.kegg.rich)
dim(wt1021.vs.wt1021B.kegg.rich)
wt1021.vs.wt1021B.mkegg.rich <- enrichMKEGG(gene = wt1021.vs.wt1021B.DE_mapped$GeneSymbol,organism='sme',pvalueCutoff = 0.05)
head(wt1021.vs.wt1021B.mkegg.rich)
dim(wt1021.vs.wt1021B.mkegg.rich)
barplot(wt1021.vs.wt1021B.mkegg.rich, drop=TRUE, showCategory=12)
barplot(wt1021.vs.wt1021B.kegg.rich,drop=T, showCategory=12)
dotplot(wt1021.vs.wt1021B.mkegg.rich)
dotplot(wt1021.vs.wt1021B.kegg)
cnetplot(wt1021.vs.wt1021B.mkegg.rich, categorySize="pvalue") # ,wt1021.vs.wt1021B.kegg
enrichMap(wt1021.vs.wt1021B.mkegg.rich)
cnetplot(wt1021.vs.wt1021B.kegg.rich, categorySize="pvalue") # ,wt1021.vs.wt1021B.kegg
enrichMap(wt1021.vs.wt1021B.kegg.rich)
cnetplot(wt1021.vs.wt1021B.kegg.rich,categorySize="pvalue", foldChange=,wt1021.vs.wt1021B.DE_mapped$LogFoldChange, )
cnetplot(wt1021.vs.wt1021B.kegg.rich, categorySize="pvalue") # ,wt1021.vs.wt1021B.kegg
enrichMap(wt1021.vs.wt1021B.kegg.rich)
## ----fig.height=12, fig.width=8------------------------------------------
AB.vs.wt1021.mkegg <- enrichMKEGG(gene = AB.vs.wt1021.DE_mapped$GeneSymbol,
organism = 'sme')
AB.vs.wt1021.kegg <- enrichKEGG(gene = AB.vs.wt1021.DE_mapped$GeneSymbol,
organism = 'sme')
barplot(AB.vs.wt1021.mkegg, drop=TRUE, showCategory=12)
barplot(AB.vs.wt1021.kegg, showCategory=8)
dotplot(AB.vs.wt1021.mkegg)
dotplot(AB.vs.wt1021.kegg)
cnetplot(AB.vs.wt1021.mkegg, categorySize="pvalue") # ,wt1021.vs.wt1021B.kegg
enrichMap(AB.vs.wt1021.mkegg)
cnetplot(AB.vs.wt1021.kegg, categorySize="pvalue",foldChange=AB.vs.wt1021.DE_mapped$LogFoldChange)
enrichMap(AB.vs.wt1021.kegg)
# GO analysis adjusting for gene length bias
# (assuming that y$genes$Length contains gene lengths)
library(EnrichmentBrowser)
go.abund <- goa(A.vs.wt1021.DE, geneid = "GeneID", trend = T)
go.abund
go.len <- goanna(A.vs.wt1021.DE, geneid = "GeneID", trend = "Length")
topGO(go.len, sort = "Qval")
topGO(go.len, sort = "Pval")
#Avswt1021.up_reg_genes<-topGO(go.abund, sort = "Qval")
#tAvswt1021.down_reg_genes<-topGO(go.abund, sort = "Qval")
## Default usage with a list of gene sets:
go.de <- goana(list(DE1 = EG.DE1, DE2 = EG.DE2, DE3 = EG.DE3))
topGO(go.de, sort = "DE1")
topGO(go.de, sort = "DE2")
topGO(go.abund, ontology = "BP")
topGO(go.de, ontology = "CC", sort = "DE3")
topGO(go.de, ontology = "MF", sort = "DE3")
## Standard KEGG analysis
AB.vs.wt1021.DE.kegg <- kegga(AB.vs.wt1021.DE$GeneSymbol, species.KEGG="sme") # equivalent to previous
AB.vs.wt1021.DE.kegg
barplot(AB.vs.wt1021.DE.kegg$DE,drop=T, showCategory=8)
## ------------------------------------------------------------------------
dotplot(AB.vs.wt1021.DE.kegg)
## ----fig.cap="enrichment map of enrichment result", fig.align="center", fig.height=16, fig.width=16, eval=FALSE----
enrichMap(mkk)
## ## categorySize can be scaled by 'pvalue' or 'geneNum'
cnetplot(AB.vs.wt1021.DE_mapped$STRING_id, categorySize="pvalue", foldChange=AB.vs.wt1021.DE_mapped$LogFoldChange)
## ----fig.height=12, fig.width=8------------------------------------------
plotGOgraph(ego)
## ----fig.cap="plotting gsea result", fig.align="center", fig.height=6, fig.width=8----
gseaplot(AB.vs.wt1021.DE.kegg, geneSetID = "sme")
head(ggo)
AB.vs.wt1021.DE_gse<-as.data.frame(AB.vs.wt1021.DE_mapped$Pval, row.names=c(AB.vs.wt1021.DE_mapped$GeneSymbol), stringsAsFactors=F)
names(AB.vs.wt1021.DE_sorted.gse)<-AB.vs.wt1021.DE_mapped$GeneSymbol
AB.vs.wt1021.DE_sorted.gse<-sort(AB.vs.wt1021.DE.kegg$P.DE, decreasing=T)
kk2 <- gseKEGG(geneList = AB.vs.wt1021.DE_sorted.gse,
organism = 'sme',
nPerm = 1000,
minGSSize = 120,
pvalueCutoff = 0.05,
verbose = FALSE)
head(kk2)
##############################################################################
### DOSE/ClusterProfile - KEGG and Gene Ontology analysis
###############################################################################
wt1021.vs.wt1021B.mkegg.rich <- enrichMKEGG(gene = wt1021.vs.wt1021B.DE_mapped$GeneSymbol,organism = 'sme')
wt1021.vs.wt1021B.kegg.rich <- enrichKEGG(gene = wt1021.vs.wt1021B.DE_mapped$GeneSymbol,organism='sme',pvalueCutoff = 0.05)
head(wt1021.vs.wt1021B.kegg.rich)
dim(wt1021.vs.wt1021B.kegg.rich)
wt1021.vs.wt1021B.DE.kegg.rich <- enrichMKEGG(gene = wt1021.vs.wt1021B.DE_mapped$GeneSymbol,organism='sme')
head(wt1021.vs.wt1021B.DE.kegg.rich)
dim(wt1021.vs.wt1021B.DE.kegg.rich)
AB.vs.wt1021.kegg.rich <- enrichMKEGG(gene = AB.vs.wt1021.DE_mapped$GeneSymbol,organism='sme',pvalueCutoff = 0.05)
head(AB.vs.wt1021.kegg.rich)
dim(AB.vs.wt1021.kegg.rich)
AB.vs.wt1021B.kegg.rich <- enrichKEGG(gene = AB.vs.wt1021B.DE_mapped$GeneSymbol,organism='sme',pvalueCutoff = 0.05)
head(AB.vs.wt1021B.kegg.rich)
dim(AB.vs.wt1021B.kegg.rich)
AB.vs.wt1021B.kegg.rich
A.vs.wt1021.kegg.rich <- enrichKEGG(gene = A.vs.wt1021.DE_mapped$GeneSymbol,organism='sme',pvalueCutoff = 0.05)
head(A.vs.wt1021.kegg.rich)
dim(A.vs.wt1021.kegg.rich)
A.vs.wt1021.kegg.rich
## ----KEGG Download------------------------------------------------------------
sme.kegg.code<-search_kegg_organism('sme', by='kegg_code')
sme.kegg.code
go.abund
Smeliloti.kegg <- search_kegg_organism('Sinorhizobium meliloti 1021', by='scientific_name')
dim(Smeliloti.kegg)
head(Smeliloti.kegg)
sme.1021.kegg<-download_KEGG(species="sme", keggType = "KEGG", keyType = "kegg")
sme.1021.kegg$KEGGPATHID2EXTID[1:10,1]
sme.1021.kegg$KEGGPATHID2EXTID[1:10,2]
length(sme.1021.kegg)
names(sme.1021.kegg)
bitr_kegg
sme.1021.kegg
gene.df <- bitr(names(AB.vs.wt1021.DE_mapped$GeneSymbol), fromType = "SYMBOL",toType = c("ENTREZID", "KEGG"),OrgDb=sme.1021.kegg)
#######################################################################
##
#######################################################################
A.vs.wt1021.DE_mapped <- string.db.sme1021$map( A.vs.wt1021.DE, "GeneSymbol", removeUnmappedRows = TRUE )
write.table(A.vs.wt1021.DE_mapped, file="A.vs.wt1021.KEGG.difftable")
head(A.vs.wt1021.DE_mapped)
AB.vs.wt1021B.DE_mapped <- string.db.sme1021$map( AB.vs.wt1021B.DE, "GeneSymbol", removeUnmappedRows = TRUE )
write.table(AB.vs.wt1021B.DE_mapped, file="AB.vs.wt1021B.KEGG.difftable")
head(AB.vs.wt1021B.DE_mapped)
AB.vs.wt1021.DE_mapped <- string.db.sme1021$map( AB.vs.wt1021.DE, "GeneSymbol", removeUnmappedRows = TRUE )
write.table(AB.vs.wt1021.DE_mapped, file="AB.vs.wt1021.KEGG.difftable")
head(AB.vs.wt1021.DE_mapped)
A.vs.AB.DE_mapped <- string.db.sme1021$map( A.vs.AB.DE, "GeneSymbol", removeUnmappedRows = TRUE )
write.table(A.vs.AB.DE_mapped, file="A.vs.AB.KEGG.difftable")
head(A.vs.AB.DE_mapped)
wt1021B.vs.wt1021.DE_mapped <- string.db.sme1021$map( wt1021B.vs.wt1021.DE, "GeneSymbol", removeUnmappedRows = TRUE )
write.table(wt1021B.vs.wt1021.DE_mapped, file="wt1021B.vs.wt1021.KEGG.difftable")
head(tw1021B.vs.wt1021.DE_mapped)
# GO analysis adjusting for gene length bias
# (assuming that y$genes$Length contains gene lengths)
library(EnrichmentBrowser)
go.abund <- goa(A.vs.wt1021.DE, geneid = "GeneID", trend = T)
go.abund
go.len <- goanna(A.vs.wt1021.DE, geneid = "GeneID", trend = "Length")
topGO(go.len, sort = "Qval")
topGO(go.len, sort = "Pval")
#Avswt1021.up_reg_genes<-topGO(go.abund, sort = "Qval")
#tAvswt1021.down_reg_genes<-topGO(go.abund, sort = "Qval")
## Default usage with a list of gene sets:
go.de <- goana(list(DE1 = EG.DE1, DE2 = EG.DE2, DE3 = EG.DE3))
topGO(go.de, sort = "DE1")
topGO(go.de, sort = "DE2")
topGO(go.abund, ontology = "BP")
topGO(go.de, ontology = "CC", sort = "DE3")
topGO(go.de, ontology = "MF", sort = "DE3")
## Standard KEGG analysis
AB.vs.wt1021.DE.kegg <- kegga(AB.vs.wt1021.DE$GeneSymbol, species.KEGG="sme") # equivalent to previous
AB.vs.wt1021.DE.kegg
barplot(AB.vs.wt1021.DE.kegg$DE,drop=T, showCategory=8)
## ------------------------------------------------------------------------
dotplot(AB.vs.wt1021.DE.kegg)
## ----fig.cap="enrichment map of enrichment result", fig.align="center", fig.height=16, fig.width=16, eval=FALSE----
enrichMap(mkk)
## ## categorySize can be scaled by 'pvalue' or 'geneNum'
cnetplot(AB.vs.wt1021.DE_mapped$STRING_id, categorySize="pvalue", foldChange=AB.vs.wt1021.DE_mapped$LogFoldChange)
## ----fig.height=12, fig.width=8------------------------------------------
plotGOgraph(ego)
## ----fig.cap="plotting gsea result", fig.align="center", fig.height=6, fig.width=8----
gseaplot(AB.vs.wt1021.DE.kegg, geneSetID = "sme")
head(ggo)
AB.vs.wt1021.DE_gse<-as.data.frame(AB.vs.wt1021.DE_mapped$Pval, row.names=c(AB.vs.wt1021.DE_mapped$GeneSymbol), stringsAsFactors=F)
names(AB.vs.wt1021.DE_sorted.gse)<-AB.vs.wt1021.DE_mapped$GeneSymbol
AB.vs.wt1021.DE_sorted.gse<-sort(AB.vs.wt1021.DE.kegg$P.DE, decreasing=T)
kk2 <- gseKEGG(geneList = AB.vs.wt1021.DE_sorted.gse,
organism = 'sme',
nPerm = 1000,
minGSSize = 120,
pvalueCutoff = 0.05,
verbose = FALSE)
head(kk2)
## ------------------------------------------------------------------------
ego <- enrichGO(gene = gene,
universe = names(geneList),
OrgDb = org.Hs.eg.db,
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 0.01,
qvalueCutoff = 0.05,
readable = TRUE)
head(ego)
## ------------------------------------------------------------------------
kk <- enrichKEGG(gene = gene,
organism = 'hsa',
pvalueCutoff = 0.05)
head(kk)
## ------------------------------------------------------------------------
kk2 <- gseKEGG(geneList = geneList,
organism = 'hsa',
nPerm = 1000,
minGSSize = 120,
pvalueCutoff = 0.05,
verbose = FALSE)
head(kk2)
## ----eval = FALSE--------------------------------------------------------
## mkk <- enrichMKEGG(gene = gene,
## organism = 'hsa')
## ----eval=FALSE----------------------------------------------------------
## mkk2 <- gseMKEGG(geneList = geneList,
## species = 'hsa')
## ----eval=FALSE----------------------------------------------------------
## david <- enrichDAVID(gene = gene,
## idType = "ENTREZ_GENE_ID",
## listType = "Gene",
## annotation = "KEGG_PATHWAY",
## david.user = "clusterProfiler@hku.hk")
## ----fig.height=5, fig.width=9-------------------------------------------
barplot(ggo, drop=TRUE, showCategory=12)
## ----fig.height=5, fig.width=8-------------------------------------------
barplot(mkk, showCategory=8)
## ------------------------------------------------------------------------
dotplot(mkk)
## ----fig.cap="enrichment map of enrichment result", fig.align="center", fig.height=16, fig.width=16, eval=FALSE----
enrichMap(mkk)
## ## categorySize can be scaled by 'pvalue' or 'geneNum'
cnetplot(mkk, categorySize="pvalue", foldChange=geneList)
## ----fig.height=12, fig.width=8------------------------------------------
plotGOgraph(ego)
## ----fig.cap="plotting gsea result", fig.align="center", fig.height=6, fig.width=8----
gseaplot(kk, geneSetID = "sme")
## ----eval=FALSE----------------------------------------------------------
library("pathview")
hsa04110 <- pathview(gene.data = geneList,
species = "sme",
limit = list(gene=max(abs(geneList)), cpd=1))
## ------------------------------------------------------------------------
data(gcSample)
lapply(gcSample, head)
## ------------------------------------------------------------------------
ck <- compareCluster(geneCluster = gcSample, fun = "enrichKEGG")
head(as.data.frame(ck))
## ------------------------------------------------------------------------
mydf <- data.frame(Entrez=names(geneList), FC=geneList)
mydf <- mydf[abs(mydf$FC) > 1,]
mydf$group <- "upregulated"
mydf$group[mydf$FC < 0] <- "downregulated"
mydf$othergroup <- "A"
mydf$othergroup[abs(mydf$FC) > 2] <- "B"
formula_res <- compareCluster(Entrez~group+othergroup, data=mydf, fun="enrichKEGG")
head(as.data.frame(formula_res))
## ----fig.height=7, fig.width=9-------------------------------------------
dotplot(ck)
## ----fig.height=6, fig.width=10------------------------------------------
dotplot(formula_res)
dotplot(formula_res, x=~group) + ggplot2::facet_grid(~othergroup)
library(org.Hs.eg.db)
keytypes(org.Hs.eg.db)
# [1] "ACCNUM" "ALIAS" "ENSEMBL" "ENSEMBLPROT" "ENSEMBLTRANS" "ENTREZID" "ENZYME"
# [8] "EVIDENCE" "EVIDENCEALL" "GENENAME" "GO" "GOALL" "IPI" "MAP"
#[15] "OMIM" "ONTOLOGY" "ONTOLOGYALL" "PATH" "PFAM" "PMID" "PROSITE"
#[22] "REFSEQ" "SYMBOL" "UCSCKG" "UNIGENE" "UNIPROT"
eg = bitr(glist, fromType="SYMBOL", toType="ENTREZID", OrgDb="org.Hs.eg.db")
head(eg)
uniprot_ids <- bitr(glist, fromType="SYMBOL", toType=c("UNIPROT"), OrgDb="org.Hs.eg.db")
head(uniprot_ids)
refseq_ids <- bitr(glist, fromType="SYMBOL", toType=c("REFSEQ"), OrgDb="org.Hs.eg.db")
head(refseq_ids)
go_ids <- bitr(glist, fromType="SYMBOL", toType=c("UCSCKG"), OrgDb="org.Hs.eg.db")
head(go_ids)
go_ids <- bitr(glist, fromType="SYMBOL", toType=c("GOALL"), OrgDb="org.Hs.eg.db")
head(go_ids)
#eg2np <- bitr_kegg(glist, fromType='ncbi-geneid', toType='kegg', organism='hsa')
#bitr_kegg("Z5100", fromType="kegg", toType='ncbi-proteinid', organism='ece')
#bitr_kegg("Z5100", fromType="kegg", toType='uniprot', organism='ece')
library(DOSE)
na.omit(genelist)
gene <- names(glist)
gene.df <- bitr(glist, fromType = "SYMBOL",toType = c("ENTREZID", "SYMBOL"),
OrgDb = org.Hs.eg.db)
str(gene.df)
entrezgenes<-gene.df[,"ENTREZID"]
ggo <- groupGO(gene=entrezgenes, OrgDb=org.Hs.eg.db, ont="CC",
level = 3,readable = TRUE)
head(ggo)
kk <- enrichKEGG(gene = entrezgenes,organism='hsa',pvalueCutoff = 0.05)
head(kk)
gene.df <- bitr(AB.vs.wt1021.DE$GeneSymbol, fromType = "SYMBOL",toType = c("ENTREZID", "KEGG"),OrgDb=Org.Hs.egOMIM2EG@datacache)
S.me1021 <- enrichKEGG(gene = geneList,organism='sme',pvalueCutoff = 0.05)
head(S.me1021)
#############################################################################
AB.vs.wt1021.npid <- bitr_kegg(AB.vs.wt1021.DE$GeneSymbol, fromType='kegg', toType='ncbi-proteinid', organism='sme',drop=T)
head(AB.vs.wt1021.npid)
dim(AB.vs.wt1021.npid)
AB.vs.wt1021.geneid <- bitr_kegg(AB.vs.wt1021.DE$GeneSymbol, fromType='kegg', toType='ncbi-geneid', organism='sme')
dim(AB.vs.wt1021.geneid)
## ------------------------------------------------------------------------
ego <- enrichGO(gene=entrezgenes,
universe=names(geneList),
OrgDb= org.Hs.eg.db,
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = 0.01,
qvalueCutoff = 0.05,
readable = TRUE)
head(ego)
## ----eval=FALSE----------------------------------------------------------
## ego2 <- enrichGO(gene = gene.df$ENSEMBL,
## OrgDb = org.Hs.eg.db,
## keytype = 'ENSEMBL',
## ont = "CC",
## pAdjustMethod = "BH",
## pvalueCutoff = 0.01,
## qvalueCutoff = 0.05)
## ----eval=FALSE----------------------------------------------------------
## ego2 <- setReadable(ego2, OrgDb = org.Hs.eg.db)
## ----eval=FALSE----------------------------------------------------------
## ego3 <- gseGO(geneList = geneList,
## OrgDb = org.Hs.eg.db,
## ont = "CC",
## nPerm = 1000,
## minGSSize = 100,
## maxGSSize = 500,
## pvalueCutoff = 0.05,
## verbose = FALSE)
## ------------------------------------------------------------------------
barcodeplot(AB.vs.wt1021B.DE[,8], index = AB.vs.wt1021B.DE[,7],index2 = AB.vs.wt1021B.DE[,8], col.bars = "dodgerblue",alpha=.01,
labels = "LogFoldChange",xlab="FoldChange")
barcodeplot(A.vs.wt1021.DE[,8], index = A.vs.wt1021.DE[,7],index2 = A.vs.wt1021.DE[,8], col.bars = "dodgerblue",alpha=.01,
labels = "LogFoldChange",xlab="FoldChange")
barcodeplot(wt1021B.vs.wt1021.DE[,8], index = wt1021B.vs.wt1021.DE[,7],index2 = wt1021B.vs.wt1021.DE[,8], col.bars = "dodgerblue",alpha=.01,
labels = "LogFoldChange",xlab="FoldChange")
barcodeplot(A.vs.AB.DE[,8], index = A.vs.AB.DE[,7],index2 = A.vs.AB.DE[,8], col.bars = "dodgerblue",alpha=.01,
labels = "LogFoldChange",xlab="FoldChange")
barcodeplot(AB.vs.wt1021.DE[,8], index = AB.vs.wt1021.DE[,7],index2 = AB.vs.wt1021.DE[,8], col.bars = "dodgerblue",alpha=.01,
labels = "LogFoldChange",xlab="FoldChange")
## ------------------------------------------------------------------------
kk2 <- gseKEGG(geneList = geneList,
organism = 'hsa',
nPerm = 1000,
minGSSize = 120,
pvalueCutoff = 0.05,
verbose = FALSE)
head(kk2)
## ----eval = FALSE--------------------------------------------------------
sme.genes <- enrichMKEGG(gene = geneList,
organism = 'sme')
## ----eval=FALSE----------------------------------------------------------
## mkk2 <- gseMKEGG(geneList = geneList,
## species = 'hsa')
## ----eval=FALSE----------------------------------------------------------
## david <- enrichDAVID(gene = gene,
## idType = "ENTREZ_GENE_ID",
## listType = "Gene",
## annotation = "KEGG_PATHWAY",
## david.user = "clusterProfiler@hku.hk")
## ------------------------------------------------------------------------
gmtfile <- system.file("extdata", "c5.cc.v5.0.entrez.gmt", package="clusterProfiler")
c5 <- read.gmt(gmtfile)
egmt <- enricher(gene, TERM2GENE=c5)
head(egmt)
egmt2 <- GSEA(geneList, TERM2GENE=c5, verbose=FALSE)
head(egmt2)
## ----fig.height=5, fig.width=9-------------------------------------------
barplot(mkk, drop=TRUE, showCategory=12)
## ----fig.height=5, fig.width=8-------------------------------------------
barplot(ego, showCategory=8)
## ------------------------------------------------------------------------
dotplot(mkk)
## ----
#fig.cap="enrichment map of enrichment result", fig.align="center", fig.height=16, fig.width=16, eval=FALSE----
# enrichMap(ego)
## ----fig.height=14, fig.width=14, eval=FALSE-----------------------------
## ## categorySize can be scaled by 'pvalue' or 'geneNum'
## cnetplot(ego, categorySize="pvalue", foldChange=geneList)
## ----fig.height=12, fig.width=8------------------------------------------
plotGOgraph(ego)
## ----fig.cap="plotting gsea result", fig.align="center", fig.height=6, fig.width=8----
gseaplot(kk2, geneSetID = "hsa04145")
## ----eval=FALSE----------------------------------------------------------
## browseKEGG(kk, 'hsa04110')
## ----eval=FALSE----------------------------------------------------------
## library("pathview")
## hsa04110 <- pathview(gene.data = geneList,
## pathway.id = "hsa04110",
## species = "hsa",
## limit = list(gene=max(abs(geneList)), cpd=1))
## ------------------------------------------------------------------------
data(gcSample)
lapply(gcSample, head)
## ------------------------------------------------------------------------
ck <- compareCluster(geneCluster = gcSample, fun = "enrichKEGG")
head(as.data.frame(ck))
## ------------------------------------------------------------------------
mydf <- data.frame(Entrez=names(geneList), FC=geneList)
mydf <- mydf[abs(mydf$FC) > 1,]
mydf$group <- "upregulated"
mydf$group[mydf$FC < 0] <- "downregulated"
mydf$othergroup <- "A"
mydf$othergroup[abs(mydf$FC) > 2] <- "B"
formula_res <- compareCluster(Entrez~group+othergroup, data=mydf, fun="enrichKEGG")
head(as.data.frame(formula_res))
## ----fig.height=7, fig.width=9-------------------------------------------
dotplot(ck)
## ----fig.height=6, fig.width=10------------------------------------------
dotplot(formula_res)
dotplot(formula_res, x=~group) + ggplot2::facet_grid(~othergroup)
library(package = affyLib, character.only = TRUE)
## the distribution of the adjusted p-values
hist(geneList, 100)
## how many differentially expressed genes are:
sum(topDiffGenes(geneList))
## build the topGOdata class
GOdata <- new("topGOdata",ontology = "BP",
allGenes = geneList,geneSel = topDiffGenes,
annot = annFUN.db,affylib = affyLib)
## display the GOdata object
GOdata
##########################################################
## Examples on how to use the methods
##########################################################
## description of the experiment
description(GOdata)
## obtain the genes that will be used in the analysis
a <- genes(GOdata)
str(a)
numGenes(GOdata)
## obtain the score (p-value) of the genes
selGenes <- names(geneList)[sample(1:length(geneList), 10)]
gs <- geneScore(GOdata, whichGenes = selGenes)
print(gs)
## if we want an unnamed vector containing all the feasible genes
gs <- geneScore(GOdata, use.names = FALSE)
str(gs)
## the list of significant genes
sg <- sigGenes(GOdata)
str(sg)
numSigGenes(GOdata)
## to update the gene list
.geneList <- geneScore(GOdata, use.names = TRUE)
GOdata ## more available genes
GOdata <- updateGenes(GOdata, .geneList, topDiffGenes)
GOdata ## the available genes are now the feasible genes
## the available GO terms (all the nodes in the graph)
go <- usedGO(GOdata)
length(go)
## to list the genes annotated to a set of specified GO terms
sel.terms <- sample(go, 10)
ann.genes <- genesInTerm(GOdata, sel.terms)
str(ann.genes)
## the score for these genes
ann.score <- scoresInTerm(GOdata, sel.terms)
str(ann.score)
## to see the number of annotated genes
num.ann.genes <- countGenesInTerm(GOdata)
str(num.ann.genes)
## to summarise the statistics
termStat(GOdata, sel.terms)
|
9bd3b2a15f0d93f5fec25f6756717ddb995c4546
|
3dd11d7fbaca863b0ac383fc2e6bf53efba6dac8
|
/R/workspace.R
|
bf893694b149fc490b5c97a98f2a20995453abe3
|
[] |
no_license
|
agroimpacts/EnergyAccess
|
a755ce2dcb1d9bad9c9a115e354862a7aec45633
|
744e6522ab9f16f8a50b0bc57d15093d7e3bffc1
|
refs/heads/master
| 2021-08-30T11:22:57.732571
| 2017-12-17T18:03:15
| 2017-12-17T18:03:15
| 111,222,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,500
|
r
|
workspace.R
|
library(sp)
library(rgdal)
library(rgeos)
library(raster)
library(devtools)
library(roxygen2)
library(gstat)
library(dismo)
library(RColorBrewer)
library(viridis)
library(mapview)
library(ggplot2)
library(reshape2)
library(gridExtra) #arranging ggplots in grid
library(grid) #arranging ggplots in grid
#======================================================
### CLEANING SURVEY DATA ###
## READ/CLEAN IPUMS DATA ##
IPUMS<- read.csv(file="inst/extdata/idhs_00003.csv", stringsAsFactors = FALSE)
#Reassign into boolean values
#IPUMS<-subset(IPUMS, URBAN!=1) #activate for rural only analysis
IPUMS$ELECTRCHH[which(IPUMS$ELECTRCHH ==6 | IPUMS$ELECTRCHH ==8)]<-0
IPUMS$COOKFUEL[which(IPUMS$COOKFUEL==400 | IPUMS$COOKFUEL==500 | IPUMS$COOKFUEL==520 |IPUMS$COOKFUEL==530) ] <-1
IPUMS$COOKFUEL[which(IPUMS$COOKFUEL !=1 )]<-0
IPUMS$EDUCLVL[which(IPUMS$EDUCLVL==2 | IPUMS$EDUCLVL==3 | IPUMS$EDUCLVL==8)] <-1
#Create seperate tables for 3 time intervals
C_erase<-c("SAMPLE","URBAN","CLUSTERNO", "ELECTRCHH", "COOKFUEL", "EDUCLVL")
data2003<- IPUMS[IPUMS$SAMPLE==2884, names(IPUMS) %in% C_erase]
data2008<- IPUMS[IPUMS$SAMPLE==2885, names(IPUMS) %in% C_erase]
data2014<- IPUMS[IPUMS$SAMPLE==2886, names(IPUMS) %in% C_erase]
#Aggregate to Cluster number by averaging values
d2003<-aggregate(data2003[, 4:5], list(data2003$CLUSTERNO), mean)
d2008<-aggregate(data2008[, 4:5], list(data2008$CLUSTERNO), mean)
d2014<-aggregate(data2014[, 4:6], list(data2014$CLUSTERNO), mean)
d2003$COOKFUEL<-round(d2003$COOKFUEL, 3)*100
d2003$ELECTRCHH<-round(d2003$ELECTRCHH, 3)*100
d2008$COOKFUEL<-round(d2008$COOKFUEL, 3)*100
d2008$ELECTRCHH<-round(d2008$ELECTRCHH, 3)*100
d2014$COOKFUEL<-round(d2014$COOKFUEL, 3)*100
d2014$ELECTRCHH<-round(d2014$ELECTRCHH, 3)*100
d2014$EDUCLVL<-round(d2014$EDUCLVL, 3)*100
## READ/CLEAN DHS STATCOMPILER DATA ##
#these variables will be used for multivariate regression for 2014 only
DHS<- read.csv(file="inst/extdata/clusters2014/GHGC71FL.csv", stringsAsFactors = FALSE)
C_erase<-c("DHSCLUST","All_Population_Density_2015","BUILT_Population_2014")
dataDHS<- DHS[, names(DHS) %in% C_erase] #clean out unncessary attributes
colnames(dataDHS)<-c("DHSCLUST", "Pop15", "Built14")
dataDHS$Built14<-round(dataDHS$Built14, 4)
## MAKING SURVEY DATA SPATIAL ##
#Import survey cluster points
clust03<-shapefile("inst/extdata/clusters2003/c2003c.shp") #2003 Survey
clust08<-shapefile("inst/extdata/clusters2008/c2008c.shp") #2008 Survey
clust14<-shapefile("inst/extdata/clusters2014/c2014c.shp") #2014 Survey
#attach survey data averages to cluster points
clust03M <- merge(clust03, d2003, by.x = "DHSCLUST",
by.y = "Group.1")
clust08M <- merge(clust08, d2008, by.x = "DHSCLUST",
by.y = "Group.1")
clust14M <- merge(clust14, d2014, by.x = "DHSCLUST",
by.y = "Group.1")
clust14M <- merge(clust14M, dataDHS, by.x = "DHSCLUST",
by.y ="DHSCLUST") #add columns for Built and Population
cnames<-c("ELECTRCHH","COOKFUEL")
cnames1<-c("ELECTRCHH","COOKFUEL","EDUCLVL","Pop15","Built14")
clust03M<- clust03M[,(names(clust03M) %in% cnames)]
clust08M<- clust08M[,(names(clust08M) %in% cnames)]
clust14M<- clust14M[,(names(clust14M) %in% cnames1)]
clust03M <- clust03M[!is.na(clust03M@data$COOKFUEL),]
clust08M <- clust08M[!is.na(clust08M@data$COOKFUEL),]
clust14M <- clust14M[!is.na(clust14M@data$COOKFUEL),]
#Import shapefile of Ghana districts
districts<-shapefile("inst/extdata/DistrictBoundary/GHA_admbndp2_1m_GAUL.shp")
districts <- districts[,(names(districts) %in% "HRname")] #keep column with district names
#project files to Albers Equal Area
dist_albs<-spTransform(x=districts, CRS="+proj=aea +lat_1=20 +lat_2=-23 +lat_0=0
+lon_0=25 +x_0=0 +y_0=0 +ellps=WGS84 +units=m +no_defs")
c2003<-spTransform(x=clust03M, CRSobj=proj4string(dist_albs))
c2008<-spTransform(x=clust08M, CRSobj=proj4string(dist_albs))
c2014<-spTransform(x=clust14M, CRSobj=proj4string(dist_albs))
clist<-c(c2003, c2008, c2014)
#Inverse Distance Weighing Interpolation of points
r <- raster(extent(dist_albs), res = 2000, crs = crs(dist_albs), #create blank raster
vals = 1)
interpolCOOK <- lapply(clist, function(x) { #interpolate cluster point values to raster surface
a <- gstat(id = "COOKFUEL", formula = COOKFUEL ~ 1, data = x)
b <- interpolate(object = r, model = a)
c <- mask(x = b, mask = dist_albs)
})
interpolEnergy <- lapply(clist, function(x) { #interpolate cluster point values to raster surface
a <- gstat(id = "ELECTRCHH", formula = ELECTRCHH ~ 1, data = x)
b <- interpolate(object = r, model = a)
c <- mask(x = b, mask = dist_albs)
})
#EDUCATION interpolate cluster point values to raster surface
a <- gstat(id = "EDUCLVL", formula = EDUCLVL ~ 1, data = c2014)
b <- interpolate(object = r, model = a)
c <- mask(x = b, mask = dist_albs)
#POPULATION interpolate cluster point values to raster surface
a1 <- gstat(id = "Pop15", formula = Pop15 ~ 1, data = c2014)
b1 <- interpolate(object = r, model = a1)
c1 <- mask(x = b1, mask = dist_albs)
#BUILT AREAS interpolate cluster point values to raster surface
a2 <- gstat(id = "Built14", formula = Built14 ~ 1, data = c2014)
b2 <- interpolate(object = r, model = a2)
c2 <- mask(x = b2, mask = dist_albs)
## INTERPOLATED RASTER to DISTRICTS ##
dist_a<-dist_albs
#Wood as Cooking Fuel
v.vals <- extract(interpolCOOK[[1]], dist_a)
dist_a$COOKFUEL03 <- round(sapply(v.vals, mean))
v.vals <- extract(interpolCOOK[[2]], dist_a)
dist_a$COOKFUEL08 <- round(sapply(v.vals, mean))
v.vals <- extract(interpolCOOK[[3]], dist_a)
dist_a$COOKFUEL14 <- round(sapply(v.vals, mean))
#Energy Access
v.vals <- extract(interpolEnergy[[1]], dist_a)
dist_a$ELECTRCHH03 <- round(sapply(v.vals, mean))
v.vals <- extract(interpolEnergy[[2]], dist_a)
dist_a$ELECTRCHH08 <- round(sapply(v.vals, mean))
v.vals <- extract(interpolEnergy[[3]], dist_a)
dist_a$ELECTRCHH14 <- round(sapply(v.vals, mean))
#Education
v.vals <- extract(c, dist_a)
dist_a$EDUCLVL14 <- round(sapply(v.vals, mean))
#Population
v.vals <- extract(c1, dist_a)
dist_a$Pop15 <- round(sapply(v.vals, mean), 3)
#Built Areas
v.vals <- extract(c2, dist_a)
dist_a$Built14 <- round(sapply(v.vals, mean), 4)
#======================================================
#### RASTER SECTION ####
## DEFORESTATION ##
fnm5 <- file.path("C:/Users/NMcCray/Documents/R/EnergyAccess/inst/extdata/HansenAllyr.tif")
deforestation <- raster(fnm5)
zamr <- raster(x = extent(districts), crs = crs(districts), res = 0.1)
values(zamr) <- 1:ncell(zamr)
zamr_alb <- projectRaster(from = zamr, res = 2500, crs = crs(dist_a),
method = "ngb")
deforest_alb <- projectRaster(from = deforestation, to = zamr_alb, method = "ngb")
rclmat <- matrix( #all deforestation since 2001
c(0, 0.9, 0, 0.99, 16, 1),
nrow = 2,
ncol = 3,
byrow = TRUE)
rclmat1 <- matrix( #deforestation from 2001-2003
c(0, 0.9, 0, 0.99, 3.9, 1, 3.99, 16, 0),
nrow = 3,
ncol = 3,
byrow = TRUE)
rclmat2 <- matrix( #deforestation from 2003-2008
c(0, 3.9, 0, 3.99, 8.9, 1, 8.99, 16, 0),
nrow = 3,
ncol = 3,
byrow = TRUE)
rclmat3 <- matrix( #deforestation from 2008-2014
c(0, 8.9, 0, 8.99, 14.9, 1, 14.99, 16, 0),
nrow = 3,
ncol = 3,
byrow = TRUE)
totaldeforestclass <- reclassify(x = deforest_alb, rcl = rclmat, include.lowest = TRUE)
deforestclass0103 <- reclassify(x = deforest_alb, rcl = rclmat1, include.lowest = TRUE)
deforestclass0408 <- reclassify(x = deforest_alb, rcl = rclmat2, include.lowest = TRUE)
deforestclass0914 <- reclassify(x = deforest_alb, rcl = rclmat3, include.lowest = TRUE)
#extract values
deforest.all <-extract(totaldeforestclass, dist_a)
deforest.0103 <- extract(deforestclass0103, dist_a)
deforest.0408 <- extract(deforestclass0408, dist_a)
deforest.0914 <- extract(deforestclass0914, dist_a)
#aggregated to district
dist_a$deforestALL<-round(100*sapply(deforest.all, mean),3)
dist_a$deforest03<-round(100*sapply(deforest.0103, mean),3)
dist_a$deforest08<-round(100*sapply(deforest.0408, mean),3)
dist_a$deforest14<-round(100*sapply(deforest.0914, mean),3)
## CROPLAND ##
fnm6 <- file.path("C:/Users/NMcCray/Documents/R/EnergyAccess/inst/extdata/LandUse2009.tif")
CLand <- raster(fnm6)
CLand_alb <- projectRaster(from = CLand, to = zamr_alb, method = "ngb") #project
rcc1 <- matrix(
c(0, 31, 1, 39, 231, 0),
nrow = 2,
ncol = 3,
byrow = TRUE)
CLand_RC <- reclassify(x = CLand_alb, rcl = rcc1, include.lowest = TRUE)
CLand_RC_e <- extract(CLand_RC, dist_a)
dist_a$crop09<-round(100*sapply(CLand_RC_e, mean),3) #aggregate crop % values to district
#======================================================
### VISUALATION ###
#Descriptive stats
#scatter plots
#>>>Energy Access
elecdf = data.frame(count = c(1:139), dist_a@data[,5:7])
colnames(elecdf)<-c("count","energy access 2003","energy access 2008","energy access 2014")
elecdf.m = melt(elecdf, id.vars ="count", measure.vars = c("energy access 2003","energy access 2008","energy access 2014"))
p1<-ggplot(elecdf.m, aes(count, value, colour = variable)) + geom_point() + ylim(0,100)+stat_smooth(method=lm)+ ggtitle("Energy Access") +theme(plot.title = element_text(color="#666666", face="bold", size=23, hjust=0))+labs(x="District #",y="% of Electrified Dwellings")+theme(axis.title = element_text( color="#666666", face="bold", size=13))
#>>>Wood Use
cookdf = data.frame(count = c(1:139), dist_a@data[,2:4])
colnames(cookdf)<-c("count","wood use 2003","wood use 2008","wood use 2014")
cookdf.m = melt(cookdf, id.vars ="count", measure.vars = c("wood use 2003","wood use 2008","wood use 2014"))
p2<-ggplot(cookdf.m, aes(count, value, colour = variable)) + geom_point() + ylim(0,100)+stat_smooth(method=lm)+ ggtitle("Wood Use as Cooking Fuel") +theme(plot.title = element_text(color="#666666", face="bold", size=23, hjust=0))+labs(x="District #",y="% of Wood Use")+theme(axis.title = element_text( color="#666666", face="bold", size=13))
#>>>Deforestation
defdf = data.frame(count = c(1:139), dist_a@data[,12:14])
colnames(defdf)<-c("count","deforestation 2003","deforestation 2008","deforestation 2014")
defdf.m = melt(defdf, id.vars ="count", measure.vars = c("deforestation 2003","deforestation 2008","deforestation 2014"))
head(defdf.m)
p3<-ggplot(defdf.m, aes(count, value, colour = variable)) + geom_point() + ylim(0,15)+stat_smooth(method=lm)+ ggtitle("Deforestation") +theme(plot.title = element_text(color="#666666", face="bold", size=23, hjust=0))+labs(x="District #",y="% of Area Deforested")+theme(axis.title = element_text( color="#666666", face="bold", size=13))
grid.arrange(p1,p2,p3, nrow=3) #OUTPUT
#histograms
h1<-ggplot(data = elecdf.m, mapping = aes(x = value, fill=variable)) +
geom_histogram(bins = 10) + facet_wrap(~variable) #energy access
h2<-ggplot(data = cookdf.m, mapping = aes(x = value, fill=variable)) +
geom_histogram(bins = 10) + facet_wrap(~variable) #Wood
h3<-ggplot(data = defdf.m, mapping = aes(x = value, fill=variable)) +
geom_histogram(bins = 10) + facet_wrap(~variable) #Deforestation
grid.arrange(h1,h2,h3, nrow=3) #OUTPUT
#maps
scale<-seq(0, 100, 10) #standardize legend scale
scaleD<-seq(0, 20, 2) #scale for deforesataion legends
cols<-rev(get_col_regions()) #add col.regions=cols for reveresed and new colors
Mtype<-c("CartoDB.Positron") #basemap
e_map03<-mapview(dist_a, zcol="ELECTRCHH03", col.regions=cols, layer.name="2003 Energy Access", maxpoints=40000000, alpha.regions=100,legend=TRUE, at= scale, map.types=Mtype)
e_map08<-mapview(dist_a, zcol="ELECTRCHH08", col.regions=cols, layer.name="2008 Energy Access", maxpoints=40000000, alpha.regions=100,legend=TRUE, at= scale, map.types=Mtype)
e_map14<-mapview(dist_a, zcol="ELECTRCHH14", col.regions=cols, layer.name="2014 Energy Access", maxpoints=40000000, alpha.regions=100,legend=TRUE, at= scale, map.types=Mtype)
ElecMaps=e_map03+ e_map08 +e_map14
ElecMaps #OUTPUT
c_map03<-mapview(dist_a, zcol="COOKFUEL03", layer.name="2003 Wood Use", maxpoints=40000000, alpha.regions=100,legend=TRUE, at= scale, map.types=Mtype, col.regions=cols)
c_map08<-mapview(dist_a, zcol="COOKFUEL08", layer.name="2008 Wood Use", maxpoints=40000000, alpha.regions=100,legend=TRUE, at= scale, map.types=Mtype, col.regions=cols)
c_map14<-mapview(dist_a, zcol="COOKFUEL14", layer.name="2014 Wood Use", maxpoints=40000000, alpha.regions=100,legend=TRUE, at= scale, map.types=Mtype, col.regions=cols)
CookMaps<-c_map03+c_map08+c_map14
CookMaps #OUTPUT
d_map03<-mapview(dist_a, zcol="deforest03", layer.name="2003 Deforestation", maxpoints=40000000, alpha.regions=100,legend=TRUE,col.regions=cols, at= scaleD, map.types=Mtype)
d_map08<-mapview(dist_a, zcol="deforest08", layer.name="2008 Deforestation", maxpoints=40000000, alpha.regions=100,legend=TRUE,col.regions=cols, at= scaleD, map.types=Mtype)
d_map14<-mapview(dist_a, zcol="deforest14", layer.name="2014 Deforestation", maxpoints=40000000, alpha.regions=100,legend=TRUE,col.regions=cols, at= scaleD, map.types=Mtype)
defMaps<-d_map03+d_map08+d_map14
defMaps #OUTPUT
#======================================================
### ANALYIS ###
#Bivariate regression
WU_EA03<-lm(COOKFUEL03 ~ ELECTRCHH03, data=dist_a)
WU_EA08<-lm(COOKFUEL08 ~ ELECTRCHH08, data=dist_a)
WU_EA14<-lm(COOKFUEL14 ~ ELECTRCHH14, data=dist_a)
summary(WU_EA03)
cor(dist_a$COOKFUEL03, dist_a$ELECTRCHH03)
summary(WU_EA08)
cor(dist_a$COOKFUEL08, dist_a$ELECTRCHH08)
summary(WU_EA14)
cor(dist_a$COOKFUEL14, dist_a$ELECTRCHH14)
WU_D03<-lm(deforest03 ~ COOKFUEL03, data=dist_a)
WU_D08<-lm(deforest08 ~ COOKFUEL08, data=dist_a)
WU_D14<-lm(deforest14 ~ COOKFUEL14, data=dist_a)
summary(WU_D03)
cor(dist_a$deforest03, dist_a$COOKFUEL03)
summary(WU_D08)
cor(dist_a$deforest08, dist_a$COOKFUEL08)
summary(WU_D08)
cor(dist_a$deforest14, dist_a$COOKFUEL14)
EA_D03<- lm(deforest03 ~ ELECTRCHH03, data=dist_a)
EA_D08<- lm(deforest08 ~ ELECTRCHH08, data=dist_a)
EA_D14<- lm(deforest14 ~ ELECTRCHH14, data=dist_a)
summary(EA_D03)
cor(dist_a$deforest03, dist_a$ELECTRCHH03)
summary(EA_D08)
cor(dist_a$deforest08, dist_a$ELECTRCHH08)
summary(EA_D14)
cor(dist_a$deforest14, dist_a$ELECTRCHH14)
##Multivariate Regression##
fit<-lm(deforestALL ~ COOKFUEL14 + ELECTRCHH14 + Pop15 + EDUCLVL14 + Built14 + crop09, data= dist_a)
summary(fit)
confint(fit, level=0.95) #confidence intervals
fitted(fit)
plot(residuals(fit))
anova(fit)
layout(matrix(c(1,2,3,4),2,2))
plot(fit)
library(MASS)
step<- stepAIC(fit, direction="both")
step$anova
##OLS###
### TIME SERIES LINERA REGRESSION##### tslm(formula=)
#splm (for spatial regressions)##
#rvest- harvest scrape webpages#
####BIVARIATE LOCAL SPATIAL AUTOCORRELATION####
#bivariate Morans
install.packages("dplyr")
library(dplyr)
library(ggplot2)
library(sf)
install.packages("spdep")
library(spdep)
library(rgdal)
library(stringr)
y<- dist_a$ELECTRCHH03
x<- dist_a$deforest03
head(dist_a@data)
# Programming some functions
# Bivariate Moran's I
moran_I <- function(x, y = NULL, W){
if(is.null(y)) y = x
xp <- (x - mean(x, na.rm=T))/sd(x, na.rm=T)
yp <- (y - mean(y, na.rm=T))/sd(y, na.rm=T)
W[which(is.na(W))] <- 0
n <- nrow(W)
global <- (xp%*%W%*%yp)/(n - 1)
local <- (xp*W%*%yp)
list(global = global, local = as.numeric(local))
}
# Permutations for the Bivariate Moran's I
simula_moran <- function(x, y = NULL, W, nsims = 1000){
if(is.null(y)) y = x
n = nrow(W)
IDs = 1:n
xp <- (x - mean(x, na.rm=T))/sd(x, na.rm=T)
W[which(is.na(W))] <- 0
global_sims = NULL
local_sims = matrix(NA, nrow = n, ncol=nsims)
ID_sample = sample(IDs, size = n*nsims, replace = T)
y_s = y[ID_sample]
y_s = matrix(y_s, nrow = n, ncol = nsims)
y_s <- (y_s - apply(y_s, 1, mean))/apply(y_s, 1, sd)
global_sims <- as.numeric( (xp%*%W%*%y_s)/(n - 1) )
local_sims <- (xp*W%*%y_s)
list(global_sims = global_sims,
local_sims = local_sims)
}
#======================================================
# Adjacency Matrix (Queen)
nb <- poly2nb(dist_a)
lw <- nb2listw(nb, style = "B", zero.policy = T)
W <- as(lw, "symmetricMatrix")
W <- as.matrix(W/rowSums(W))
W[which(is.na(W))] <- 0
#======================================================
# Calculating the index and its simulated distribution
# for global and local values
m <- moran_I(x, y, W)
m[[1]] # global value
m_i <- m[[2]] # local values
local_sims <- simula_moran(x, y, W)$local_sims
# Identifying the significant values
alpha <- .05 # for a 95% confidence interval
probs <- c(alpha/2, 1-alpha/2)
intervals <- t( apply(local_sims, 1, function(x) quantile(x, probs=probs)))
sig <- ( m_i < intervals[,1] ) | ( m_i > intervals[,2] )
#======================================================
# Preparing for plotting
dist_a03<- st_as_sf(dist_a)
dist_a03$sig <- sig
# Identifying the LISA patterns
xp <- (x-mean(x))/sd(x)
yp <- (y-mean(y))/sd(y)
patterns <- as.character( interaction(xp > 0, W%*%yp > 0) )
patterns <- patterns %>%
str_replace_all("TRUE","High") %>%
str_replace_all("FALSE","Low")
patterns[dist_a03$sig==0] <- "Not significant"
dist_a03$patterns <- patterns
# Plotting
mapview(dist_a03, zcol="patterns", legend=TRUE, alpha=0, maxpoints=40000000, alpha.regions=80, layer.name="BiLISA: Deforestation and EA")
#This is the link to download the Hansen data
#Go to tasks and then download to google drive
#https://code.earthengine.google.com/d5c909c06ec28626324ecd65c34417f2
#This is the link to download the Cropland data
#Go to tasks and then download to google drive
#https://code.earthengine.google.com/594731702af6ef064128e784a632a0e8
|
b78d8778e6f599aedf8109f7e1d963a15b24dcfb
|
c3e7d1ff8719f4c24b87b01eba1ccbbe5250e463
|
/man/convertToWellLocation.Rd
|
19c2f530dbf4ea978c684e5f6d66876ffffdec0a
|
[] |
no_license
|
nbenn/singleCellFeatures
|
4016508a11b2e9ed5103e45af7347bc63175e014
|
9b3b72a2cd0aaabc78ccde770f068b6d2c16b82c
|
refs/heads/master
| 2020-04-03T03:02:30.944690
| 2016-06-15T09:13:15
| 2016-06-15T09:13:15
| 32,792,562
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 531
|
rd
|
convertToWellLocation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convertToWellLocation.R
\name{convertToWellLocation}
\alias{convertToWellLocation}
\title{Covert compatible objects to WellLocation}
\usage{
convertToWellLocation(x)
}
\arguments{
\item{The}{object to be used as basis for new WellLocation object}
}
\value{
The new WellLocation object
}
\description{
Build WellLocation object from other objects, such as WellData.
}
\examples{
plate <- PlateLocation("J101-2C")
path <- convertToWellLocation(plate)
}
|
2c5957146729ab93a122564b9a14dfd5903a1f69
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/dfcomb/R/dfcomb.R
|
565c55fa8809fbdcfcef703d6d45461ab57ec596
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,407
|
r
|
dfcomb.R
|
CombIncrease_sim = function(ndose_a1, ndose_a2, p_tox, target, target_min, target_max, prior_tox_a1, prior_tox_a2, n_cohort,
cohort, tite=FALSE, time_full=0, poisson_rate=0, nsim, c_e=0.85, c_d=0.45, c_stop=0.95, c_t=0.5,
c_over=0.25, cmin_overunder=2, cmin_mtd=3, cmin_recom=1, startup=1, alloc_rule=1, early_stop=1,
nburn=2000, niter=5000, seed=14061991){
c_d = 1-c_d
dim_ptox = dim(p_tox)
if(dim_ptox[1] != ndose_a1 || dim_ptox[2] != ndose_a2){
stop("Wrong dimension of the matrix for true toxicity probabilities.")
}
n_prior_tox_a1 = length(prior_tox_a1)
if(n_prior_tox_a1 != ndose_a1){
stop("The entered vector of initial guessed toxicity probabities for agent 1 is of wrong length.")
}
n_prior_tox_a2 = length(prior_tox_a2)
if(n_prior_tox_a2 != ndose_a2){
stop("The entered vector of initial guessed toxicity probabities for agent 2 is of wrong length.")
}
ndose_a1 = as.integer(ndose_a1)[1]
ndose_a2 = as.integer(ndose_a2)[1]
target = as.double(target)[1]
target_min = as.double(target_min)[1]
target_max = as.double(target_max)[1]
prior_tox_a1 = as.double(prior_tox_a1)
prior_tox_a2 = as.double(prior_tox_a2)
n_cohort = as.integer(n_cohort)[1]
cohort = as.integer(cohort)[1]
tite = as.logical(tite)[1]
time_full = as.double(time_full)[1]
poisson_rate = as.double(poisson_rate)[1]
nsim = as.integer(nsim)[1]
c_e = as.double(c_e)[1]
c_d = as.double(c_d)[1]
c_stop = as.double(c_stop)[1]
c_t = as.double(c_t)[1]
c_over = as.double(c_over)[1]
cmin_overunder = as.integer(cmin_overunder)[1]
cmin_mtd = as.integer(cmin_mtd)[1]
cmin_recom = as.integer(cmin_recom)[1]
startup = as.integer(startup)[1]
alloc_rule = as.integer(alloc_rule)[1]
early_stop = as.integer(early_stop)[1]
seed = as.integer(seed)[1]
nburn = as.integer(nburn)[1]
niter = as.integer(niter)[1]
if(startup < 0 || startup > 3){
stop("Unknown start-up id.")
}
if(alloc_rule != 1 && alloc_rule != 2 && alloc_rule != 3){
stop("Unknown allocation rule id.")
}
if(early_stop != 1 && early_stop != 2 && early_stop != 3){
stop("Unknown early stopping rule id.")
}
if(target < 0 || target > 1){
stop("Targeted toxicity probability is not comprised between 0 and 1.")
}
if(target_max < 0 || target_max > 1){
stop("Maximum targeted toxicity probability is not comprised between 0 and 1.")
}
if(target_min < 0 || target_min > 1){
stop("Minimum targeted toxicity probability is not comprised between 0 and 1.")
}
if(n_cohort <= 0){
stop("Number of cohorts must be positive.")
}
if(cohort <= 0){
stop("Cohort size must be positive.")
}
if(time_full < 0){
stop("Full follow-up time must be positive.")
}
if(poisson_rate < 0){
stop("Parameter for Poisson process accrual must be positive.")
}
if(nsim <= 0){
stop("Number of simulations must be positive.")
}
if(c_e < 0 || c_e > 1 || c_d < 0 || c_d > 1 || c_stop < 0 || c_stop > 1 || c_t < 0 || c_t > 1 || c_over < 0 || c_over > 1){
stop("Probability thresholds are not comprised between 0 and 1.")
}
if(cmin_overunder < 0 || cmin_mtd < 0 || cmin_recom < 0){
stop("Minimum number of cohorts for stopping or recommendation rule must be positive.")
}
if(nburn <= 0 || niter <= 0){
stop("Number of iterations and burn-in for MCMC must be positive.")
}
for(a1 in 1:ndose_a1){
if(prior_tox_a1[a1] < 0 || prior_tox_a1[a1] > 1){
stop("At least one of the initial guessed toxicity probability for agent 1 is not comprised between 0 and 1.")
}
}
for(a2 in 1:ndose_a2){
if(prior_tox_a2[a2] < 0 || prior_tox_a2[a2] > 1){
stop("At least one of the initial guessed toxicity probability for agent 2 is not comprised between 0 and 1.")
}
}
for(a1 in 1:ndose_a1){
for(a2 in 1:ndose_a2){
if(p_tox[a1,a2] < 0 || p_tox[a1,a2] > 1){
stop("At least one of the true toxicity probability is not comprised between 0 and 1.")
}
}
}
p_tox_na = matrix(NA, nrow=ndose_a1+1, ncol=ndose_a2+1)
p_tox_na[1:ndose_a1, 1:ndose_a2] = p_tox
for(a1 in 1:ndose_a1){
for(a2 in 1:ndose_a2){
if(p_tox[a1,a2] >
min(1,p_tox_na[a1+1,a2],p_tox_na[a1,a2+1],p_tox_na[a1+1,a2+1],na.rm=TRUE)){
stop("The partial ordering between true toxicity probabilities is not satisfied.")
}
}
}
p_tox = as.double(p_tox)
inconc = as.double(numeric(1))
n_pat_dose = as.double(numeric(ndose_a1*ndose_a2))
rec_dose = as.double(numeric(ndose_a1*ndose_a2))
n_tox_dose = as.double(numeric(ndose_a1*ndose_a2))
early_conc = as.double(numeric(1))
conc_max = as.double(numeric(1))
tab_pat = as.double(numeric(nsim))
# Appeler fonction C
logistic = .C(C_logistic_sim, tite, ndose_a1, ndose_a2, time_full, poisson_rate, p_tox, target,
target_max, target_min, prior_tox_a1, prior_tox_a2, n_cohort, cohort, nsim, c_e, c_d, c_stop,
c_t, c_over, cmin_overunder, cmin_mtd, cmin_recom, seed, startup, alloc_rule, early_stop,
nburn, niter,
rec_dose=rec_dose, n_pat_dose=n_pat_dose, n_tox_dose=n_tox_dose, inconc=inconc, early_conc=early_conc, tab_pat=tab_pat)
nsim = logistic$nsim
inconc=logistic$inconc*100
early_conc=logistic$early_conc*100
conc_max=100-early_conc-inconc
tab_pat=logistic$tab_pat
rec_dose=logistic$rec_dose*100
n_pat_dose=logistic$n_pat_dose
n_tox_dose=logistic$n_tox_dose
# Reformat outputs
p_tox= matrix(p_tox,nrow=ndose_a1)
rec_dose=matrix(rec_dose,nrow=ndose_a1)
n_pat_dose=matrix(n_pat_dose,nrow=ndose_a1)
n_tox_dose=matrix(n_tox_dose,nrow=ndose_a1)
p_tox_p = t(p_tox)[ndose_a2:1,]
rec_dose_p = t(rec_dose)[ndose_a2:1,]
n_pat_dose_p = t(n_pat_dose)[ndose_a2:1,]
n_tox_dose_p = t(n_tox_dose)[ndose_a2:1,]
dimnames(p_tox_p) = list("Agent 2" = ndose_a2:1, "Agent 1" = 1:ndose_a1)
dimnames(rec_dose_p) = list("Agent 2 " = ndose_a2:1, "Agent 1" = 1:ndose_a1)
dimnames(n_pat_dose_p) = list("Agent 2"=ndose_a2:1, "Agent 1" = 1:ndose_a1)
dimnames(n_tox_dose_p) = list("Agent 2" = ndose_a2:1, "Agent 1" = 1:ndose_a1)
pat_tot = round(sum(n_pat_dose),1)
res = list(call = match.call(),
tite=tite,
ndose_a1=ndose_a1,
ndose_a2=ndose_a2,
time_full=time_full,
poisson_rate=poisson_rate,
startup=startup,
alloc_rule=alloc_rule,
early_stop=early_stop,
p_tox=p_tox,
p_tox_p=p_tox_p,
target=target,
target_min=target_min,
target_max=target_max,
prior_tox_a1=prior_tox_a1,
prior_tox_a2=prior_tox_a2,
n_cohort=n_cohort,
cohort=cohort,
pat_tot=pat_tot,
nsim=nsim,
c_e=c_e,
c_d=c_d,
c_stop=c_stop,
c_t=c_t,
c_over=c_over,
cmin_overunder=cmin_overunder,
cmin_mtd=cmin_mtd,
cmin_recom=cmin_recom,
nburn=nburn,
niter=niter,
seed=seed,
rec_dose=rec_dose,
n_pat_dose=n_pat_dose,
n_tox_dose=n_tox_dose,
rec_dose_p=rec_dose_p,
n_pat_dose_p=n_pat_dose_p,
n_tox_dose_p=n_tox_dose_p,
inconc=inconc,
early_conc=early_conc,
conc_max=conc_max,
tab_pat=tab_pat)
class(res) = "CombIncrease_sim"
return(res)
}
print.CombIncrease_sim = function(x, dgt = 2, ...) {
cat("Call:\n")
print(x$call)
cat("\n")
print_rnd= function (hd, x) {cat(hd, "\n"); print(round(x, digits = dgt)); cat("\n")}
print_rnd("True toxicities:", x$p_tox_p)
print_rnd("Percentage of Selection:", x$rec_dose_p)
print_rnd("Mean number of patients:" , x$n_pat_dose_p)
print_rnd("Mean number of toxicities:", x$n_tox_dose_p)
cat(paste("Percentage of inconclusive trials:\t",x$inconc,"\n",sep=""), sep="")
cat(paste("Percentage of trials stopping with criterion for finding MTD:\t",x$early_conc,"\n",sep=""), sep="")
cat(paste("Percentage of trials stopping with recommendation based on maximum sample size:\t",x$conc_max,"\n",sep=""), sep="")
cat("\n")
cat("Number of simulations:\t", x$nsim, "\n")
cat("Total mean number of patients accrued:\t", x$pat_tot, "\n")
cat("Quantiles for number of patients accrued:\t", "\n", quantile(x$tab_pat), "\n")
}
CombIncrease_next = function(ndose_a1, ndose_a2, target, target_min, target_max, prior_tox_a1, prior_tox_a2, cohort, final,
pat_incl, dose_adm1, dose_adm2, tite=FALSE, toxicity, time_full=0, time_tox=0, time_follow=0,
c_e=0.85, c_d=0.45, c_stop=0.95, c_t=0.5, c_over=0.25, cmin_overunder=2, cmin_mtd=3, cmin_recom=1,
early_stop=1, alloc_rule=1, nburn=2000, niter=5000){
if(tite == TRUE) {
toxicity = as.numeric(time_tox < time_follow)
}
if(pat_incl > 0) {
cdose1 = dose_adm1[pat_incl]
cdose2 = dose_adm2[pat_incl]
}
else {
cdose1 = 0
cdose2 = 0
}
n_prior_tox_a1 = length(prior_tox_a1)
if(n_prior_tox_a1 != ndose_a1){
stop("The entered vector of initial guessed toxicity probabities for agent 1 is of wrong length.")
}
n_prior_tox_a2 = length(prior_tox_a2)
if(n_prior_tox_a2 != ndose_a2){
stop("The entered vector of initial guessed toxicity probabities for agent 2 is of wrong length.")
}
n_toxicity = length(toxicity)
n_time_follow = length(time_follow)
n_time_tox = length(time_tox)
n_dose_adm1 = length(dose_adm1)
n_dose_adm2 = length(dose_adm2)
if(tite==FALSE && n_toxicity != pat_incl){
stop("The entered vector of observed toxicities is of wrong length.")
}
if(tite==TRUE && n_time_follow != pat_incl){
stop("The entered vector for patients' follow-up time is of wrong length.")
}
if(tite==TRUE && n_time_tox != pat_incl){
stop("The entered vector for patients' time-to-toxicity is of wrong length.")
}
if(n_dose_adm1 != pat_incl){
stop("The entered vector for patients' dose of agent 1 is of wrong length.")
}
if(n_dose_adm2 != pat_incl){
stop("The entered vector for patients' dose of agent 2 is of wrong length.")
}
tite = as.logical(tite)
ndose_a1 = as.integer(ndose_a1)[1]
ndose_a2 = as.integer(ndose_a2)[1]
time_full = as.double(time_full)[1]
target = as.double(target)[1]
target_max = as.double(target_max)[1]
target_min = as.double(target_min)[1]
prior_tox_a1 = as.double(prior_tox_a1)
prior_tox_a2 = as.double(prior_tox_a2)
cohort = as.integer(cohort)[1]
final = as.logical(final)
c_e = as.double(c_e)[1]
c_d = as.double(c_d)[1]
c_stop = as.double(c_stop)[1]
c_t = as.double(c_t)[1]
c_over = as.double(c_over)[1]
cmin_overunder = as.integer(cmin_overunder)[1]
cmin_mtd = as.integer(cmin_mtd)[1]
cmin_recom = as.integer(cmin_recom)[1]
pat_incl = as.integer(pat_incl)[1]
cdose1 = as.integer(cdose1-1)
cdose2 = as.integer(cdose2-1)
dose_adm1 = as.integer(dose_adm1-1)
dose_adm2 = as.integer(dose_adm2-1)
time_tox = as.double(time_tox)
time_follow = as.double(time_follow)
toxicity = as.logical(toxicity)
alloc_rule = as.integer(alloc_rule)[1]
early_stop = as.integer(early_stop)[1]
nburn = as.integer(nburn)[1]
niter = as.integer(niter)[1]
if(alloc_rule != 1 && alloc_rule != 2 && alloc_rule != 3){
stop("Unknown allocation rule id.")
}
if(early_stop != 1 && early_stop != 2 && early_stop != 3){
stop("Unknown early stopping rule id.")
}
if(cohort <= 0){
stop("Cohort size must be positive.")
}
if(time_full < 0){
stop("Full follow-up time must be positive.")
}
if(c_e < 0 || c_e > 1 || c_d < 0 || c_d > 1 || c_stop < 0 || c_stop > 1 || c_t < 0 || c_t > 1 || c_over < 0 || c_over > 1){
stop("Probability thresholds are not comprised between 0 and 1.")
}
if(cmin_overunder < 0 || cmin_mtd < 0 || cmin_recom < 0){
stop("Minimum number of cohorts for stopping or recommendation rule must be positive.")
}
if(nburn <= 0 || niter <= 0){
stop("Number of iterations and burn-in for MCMC must be positive.")
}
for(i in 1:ndose_a1){
if(prior_tox_a1[i] < 0 || prior_tox_a1[i] > 1){
stop("At least one of the initial guessed toxicity for agent 1 is not comprised between 0 and 1.")
}
}
for(i in 1:ndose_a2){
if(prior_tox_a2[i] < 0 || prior_tox_a2[i] > 1){
stop("At least one of the initial guessed toxicity for agent 2 is not comprised between 0 and 1.")
}
}
if(target < 0 || target > 1){
stop("Targeted toxicity probability is not comprised between 0 and 1.")
}
if(target_max < 0 || target_max > 1){
stop("Maximum targeted toxicity probability is not comprised between 0 and 1.")
}
if(target_min < 0 || target_min > 1){
stop("Minimum targeted toxicity probability is not comprised between 0 and 1.")
}
inconc = as.logical(numeric(1))
early_conc = as.logical(numeric(1))
pi = as.double(numeric(ndose_a1*ndose_a2))
ptox_inf = as.double(numeric(ndose_a1*ndose_a2))
ptox_inf_targ = as.double(numeric(ndose_a1*ndose_a2))
ptox_targ = as.double(numeric(ndose_a1*ndose_a2))
ptox_sup_targ = as.double(numeric(ndose_a1*ndose_a2))
logistic = .C(C_logistic_next, tite, ndose_a1, ndose_a2, time_full, target, target_max, target_min, prior_tox_a1, prior_tox_a2,
cohort, final, c_e, c_d, c_stop, c_t, c_over, cmin_overunder, cmin_mtd, cmin_recom, early_stop, alloc_rule,
nburn, niter, pat_incl, cdose1=cdose1, cdose2=cdose2, dose_adm1, dose_adm2, time_tox, time_follow, toxicity,
inconc=inconc, early_conc=early_conc, pi=pi, ptox_inf=ptox_inf, ptox_inf_targ=ptox_inf_targ, ptox_targ=ptox_targ, ptox_sup_targ=ptox_sup_targ, NAOK=TRUE)
# Reformat outputs
cdose1=logistic$cdose1+1
cdose2=logistic$cdose2+1
dose_adm1=dose_adm1+1
dose_adm2=dose_adm2+1
pi=matrix(logistic$pi, nrow=ndose_a1)
ptox_inf=matrix(logistic$ptox_inf, nrow=ndose_a1)
ptox_inf_targ=matrix(logistic$ptox_inf_targ, nrow=ndose_a1)
ptox_targ=matrix(logistic$ptox_targ, nrow=ndose_a1)
ptox_sup_targ=matrix(logistic$ptox_sup_targ, nrow=ndose_a1)
n_pat_comb = matrix(0, nrow=ndose_a1, ncol=ndose_a2)
n_tox_comb = matrix(0, nrow=ndose_a1, ncol=ndose_a2)
for(i in 1:pat_incl){
n_pat_comb[dose_adm1[i],dose_adm2[i]] = n_pat_comb[dose_adm1[i],dose_adm2[i]]+1
n_tox_comb[dose_adm1[i],dose_adm2[i]] = n_tox_comb[dose_adm1[i],dose_adm2[i]]+toxicity[i]
}
n_pat_comb_p = t(n_pat_comb)[ndose_a2:1,]
n_tox_comb_p = t(n_tox_comb)[ndose_a2:1,]
pi_p = t(pi)[ndose_a2:1,]
ptox_inf_p = t(ptox_inf)[ndose_a2:1,]
ptox_inf_targ_p = t(ptox_inf_targ)[ndose_a2:1,]
ptox_targ_p = t(ptox_targ)[ndose_a2:1,]
ptox_sup_targ_p = t(ptox_sup_targ)[ndose_a2:1,]
dimnames(n_pat_comb_p) = list("Agent 2"=ndose_a2:1, "Agent 1"=1:ndose_a1)
dimnames(n_tox_comb_p) = list("Agent 2"=ndose_a2:1, "Agent 1"=1:ndose_a1)
dimnames(pi_p) = list("Agent 2"=ndose_a2:1, "Agent 1"=1:ndose_a1)
dimnames(ptox_inf_p) = list("Agent 2"=ndose_a2:1, "Agent 1"=1:ndose_a1)
dimnames(ptox_inf_targ_p) = list("Agent 2"=ndose_a2:1, "Agent 1"=1:ndose_a1)
dimnames(ptox_targ_p) = list("Agent 2"=ndose_a2:1, "Agent 1"=1:ndose_a1)
dimnames(ptox_sup_targ_p) = list("Agent 2"=ndose_a2:1, "Agent 1"=1:ndose_a1)
res = list(call = match.call(),
tite=tite,
ndose_a1=ndose_a1,
ndose_a2=ndose_a2,
time_full=time_full,
target=target,
target_max=target_max,
target_min=target_min,
prior_tox_a1=prior_tox_a1,
prior_tox_a2=prior_tox_a2,
cohort=cohort,
final=final,
c_e=c_e,
c_d=c_d,
c_stop=c_stop,
c_t=c_t,
c_over=c_over,
cmin_overunder=cmin_overunder,
cmin_mtd=cmin_mtd,
cmin_recom=cmin_recom,
early_stop=early_stop,
alloc_rule=alloc_rule,
nburn=nburn,
niter=niter,
pat_incl=pat_incl,
cdose1=cdose1,
cdose2=cdose2,
dose_adm1=dose_adm1,
dose_adm2=dose_adm2,
time_tox=time_tox,
time_follow=time_follow,
toxicity=toxicity,
inconc=logistic$inconc,
early_conc=logistic$early_conc,
n_pat_comb=n_pat_comb,
n_tox_comb=n_tox_comb,
pi=pi,
ptox_inf=ptox_inf,
ptox_inf_targ=ptox_inf_targ,
ptox_targ=ptox_targ,
ptox_sup_targ=ptox_sup_targ,
n_pat_comb_p=n_pat_comb_p,
n_tox_comb_p=n_tox_comb_p,
pi_p=pi_p,
ptox_inf_p=ptox_inf_p,
ptox_inf_targ_p=ptox_inf_targ_p,
ptox_targ_p=ptox_targ_p,
ptox_sup_targ_p=ptox_sup_targ_p)
class(res) = "CombIncrease_next"
return(res)
}
print.CombIncrease_next = function(x, dgt = 2, ...) {
cat("Call:\n")
print(x$call)
cat("\n")
print_rnd= function (hd, x) {cat(hd, "\n"); print(round(x, digits = dgt)); cat("\n")}
print_rnd("Number of patients:" , x$n_pat_comb_p)
print_rnd("Number of toxicities:", x$n_tox_comb_p)
print_rnd("Estimated toxicity probabilities:", x$pi_p)
print_rnd("P(toxicity probability < target):", x$ptox_inf_p)
print_rnd("Probabilities of underdosing:", x$ptox_inf_targ_p)
print_rnd("Probabilities being in targeted interval:", x$ptox_targ_p)
print_rnd("Probabilities of overdosing:", x$ptox_sup_targ_p)
cat("Warning: recommendation for model-based phase (start-up phase must be ended).\n")
if(!x$inconc){
if(!x$early_conc){
if(x$final){
cat(paste("The RECOMMENDED COMBINATION at the end of the trial is:\t (",x$cdose1, ",", x$cdose2, ")\n",sep=""), sep="")
}
else{
cat(paste("The next RECOMMENDED COMBINATION is:\t (",x$cdose1, ",", x$cdose2, ")\n",sep=""), sep="")
}
}
else{
cat(paste("The dose-finding process should be STOPPED (criterion for identifying MTD met) and the RECOMMENDED COMBINATION is:\t (",x$cdose1, ",", x$cdose2, ")\n",sep=""), sep="")
}
}
else{
cat(paste("The dose-finding process should be STOPPED WITHOUT COMBINATION RECOMMENDATION (criterion for over or under toxicity met)\n",sep=""), sep="")
}
}
|
9cd3b4e28fbeb67e2d38933ef2f5de2c36252f67
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/EngrExpt/examples/uvoven.Rd.R
|
0fde771085c3f68b0e39a3fd6b6cb2b1dd40def6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 590
|
r
|
uvoven.Rd.R
|
library(EngrExpt)
### Name: uvoven
### Title: UV absorbance for lens cured in different ovens
### Aliases: uvoven
### Keywords: datasets
### ** Examples
str(uvoven)
summary(uvoven)
densityplot(~ uv, uvoven, groups = oven, auto.key = list(columns = 2),
xlab = "UV absorbance")
qqmath(~ uv, uvoven, groups = oven,
auto.key = list(space = "right", title = "Oven"),
xlab = "Standard normal quantiles", type = c("p","g"),
ylab = "UV absorbance",
panel = function(...)
{
panel.qqmath(...)
panel.qqmathline(..., alpha = 0.5)
})
|
2e58e26e15e31aaf7e4ed7696e018ad707a74f29
|
dead36e975d42bcc18f7922012c7ddf8d3327c21
|
/R/holland.R
|
21e109d2f981c3cc931ea533f0eb4442221bf3d7
|
[] |
no_license
|
carlislerainey/unnecessary
|
a741d705dc59540dcabc22bb8e9f2b9daf79a24e
|
1f72f238055e4c562162a89b6b270847987e44f8
|
refs/heads/master
| 2023-01-10T15:53:27.308825
| 2022-12-25T10:54:28
| 2022-12-25T10:54:28
| 116,427,263
| 3
| 1
| null | 2018-08-17T10:41:13
| 2018-01-05T21:12:04
|
R
|
UTF-8
|
R
| false
| false
| 7,001
|
r
|
holland.R
|
# load packages
library(tidyverse)
library(ggrepel)
library(kableExtra)
# sandwich package also required
# MASS package also required
# set seed
set.seed(8904)
# load data
holland <- haven::read_dta("data/Enforcement.dta") %>%
# keep only the variables we use
select(city, district, operations, lower, vendors, budget, population) %>%
glimpse()
# formula corresponds to model 1 for each city in holland (2015) table 2
holland_f <- operations ~ lower + vendors + budget + population
# create function to fit regression model
fit_model <- function(data) {
fit <- glm(holland_f, family = poisson, data = data)
return(fit)
}
# simulate coefficients
simulate_coefficients <- function(beta_hat, Sigma_hat) {
MASS::mvrnorm(5000000, mu = beta_hat, Sigma = Sigma_hat)
}
# simulate quantities of interest
compute_tau <- function(data, beta_hat, beta_tilde) {
# set scenarios
X_hyp <- X_obs <- model.matrix(holland_f, data = data)
X_hyp[, "lower"] <- X_hyp[, "lower"]*.5
# compute quantites of interest
tau_tilde <- t((exp(X_hyp%*%t(beta_tilde)) - exp(X_obs%*%t(beta_tilde)))/exp(X_obs%*%t(beta_tilde)))
tau_hat_avg <- apply(tau_tilde, 2, mean) # simulation average
tau_hat_mle <- (exp(X_hyp%*%beta_hat) - exp(X_obs%*%beta_hat))/exp(X_obs%*%beta_hat) # mle
# compute quantites of interest (hyp)
tau_tilde_lo <- t(exp(X_hyp%*%t(beta_tilde)))
tau_hat_avg_lo <- apply(tau_tilde_lo, 2, mean) # simulation average
tau_hat_mle_lo <- exp(X_hyp%*%beta_hat)
# compute quantites of interest (obs)
tau_tilde_hi <- t(exp(X_obs%*%t(beta_tilde)))
tau_hat_avg_hi <- apply(tau_tilde_hi, 2, mean) # simulation average
tau_hat_mle_hi <- exp(X_obs%*%beta_hat)
# combine estimated qis into a data frame
tau <- data.frame(mle = tau_hat_mle,
avg = tau_hat_avg,
mle_lo = tau_hat_mle_lo,
avg_lo = tau_hat_avg_lo,
mle_hi = tau_hat_mle_hi,
avg_hi = tau_hat_avg_hi) %>%
bind_cols(data)
return(tau)
}
# fit models and compute quantities of interest
estimations <- holland %>%
group_by(city) %>%
nest() %>%
mutate(fit = map(data, fit_model),
beta_hat = map(fit, coef),
Sigma_hat = map(fit, sandwich::vcovHC, type = "HC4m"),
beta_tilde = map2(beta_hat, Sigma_hat, simulate_coefficients),
tau = pmap(list(data, beta_hat, beta_tilde), compute_tau)) %>%
glimpse()
# wrangle estimates into usuable form
tau <- estimations %>%
unnest(tau) %>%
ungroup() %>%
mutate(city = str_to_title(city)) %>%
mutate(district = reorder(district, lower)) %>%
glimpse()
# create a data frame of annotations of mle and aos estimators
ann <- tau %>%
group_by(city) %>%
#filter(city == "Santiago") %>%
filter(avg == max(avg)) %>%
mutate(avg_label = "hat(tau)^avg") %>%
mutate(mle_label = "hat(tau)^mle") %>%
mutate(ch_pos = (avg + mle)/2,
ch_label = paste0(round(100*(avg-mle)/mle), "'%'"),
ch_percent = round(100*(avg-mle)/mle)) %>%
glimpse()
# create a data frome of annotations for top 5 districts in each city
ann_city <- tau %>%
group_by(city) %>%
top_n(5, lower) %>%
glimpse()
# create a helper function to label the y-axis
lab_fn <- function(x) {
scales::percent(x, accuracy = 1)
}
# plot the estimates of the quantities of interest
ggplot(tau, aes(x = lower, xend = lower, y = avg, yend = mle)) +
facet_wrap(vars(city), scales = "free_y") +
#geom_point(size = 0.5) +
geom_segment(arrow = arrow(length = unit(0.05, "inches"))) +
scale_y_continuous(labels = lab_fn) +
theme_bw() +
labs(x = "Percent of District in Lower Class",
y = "Percent Increase in Enforcement Operations") +
geom_segment(data = ann, aes(x = lower + 1, xend = lower + 9, y = avg, yend = avg),
size = 0.2, color = "#d95f02") +
geom_segment(data = ann, aes(x = lower + 1, xend = lower + 9, y = mle, yend = mle),
size = 0.2, color = "#1b9e77") +
geom_label(data = ann, aes(x = lower + 7.5, y = avg, label = avg_label),
parse = TRUE, size = 2.5, color = "#d95f02", label.padding = unit(0.1, "lines")) +
geom_label(data = ann, aes(x = lower + 7.5, y = mle, label = mle_label),
parse = TRUE, size = 2.5, color = "#1b9e77", label.padding = unit(0.1, "lines")) +
geom_text_repel(data = ann_city, aes(x = lower, y = avg, label = district),
color = "grey50", size = 2.5, direction = "both", angle = 0, nudge_x = -14,
segment.size = .2, point.padding = 0.5, min.segment.length = 0)
ggsave("doc/figs/fig3-holland.pdf", height = 3, width = 9, scale = 1.2)
# 5 largest biases for each district
smry <- tau %>%
mutate(ratio = (mle - avg)/avg) %>%
group_by(city) %>%
top_n(5, -ratio) %>%
glimpse()
# create latex table w/ details for 5 largest qis in each city
smry %>%
mutate(shrinkage = -(mle - avg)/avg) %>%
arrange(city, desc(avg)) %>%
mutate(avg = paste0(round(100*avg), "%"),
avg_lo = round(avg_lo, 1),
avg_hi = round(avg_hi, 1),
mle = paste0(round(100*mle), "%"),
mle_lo = round(mle_lo, 1),
mle_hi = round(mle_hi, 1),
shrinkage = paste0(round(100*shrinkage), "%")) %>%
select(City = city,
District = district,
`% Change[note]` = avg,
`From[note]` = avg_hi,
`To[note]` = avg_lo,
`% Change` = mle,
From = mle_hi,
To = mle_lo,
`Shrinkage[note]` = shrinkage) %>%
kable("latex", booktabs = TRUE, align = c(rep("l", 2), rep("c", 7)),
caption = "\\label{tab:top-5}This table presents the details for the districts labeled in Figure \\ref{fig:holland}.") %>%
kable_styling(latex_options = "hold_position", position = "center", font_size = 8) %>%
add_header_above(c(" " = 2, "Average of Simulations" = 3,
"ML Estimate" = 3,
" " = 1),
bold = TRUE) %>%
column_spec(1, bold = TRUE) %>%
column_spec(2, bold = TRUE) %>%
row_spec(0, bold = TRUE) %>%
collapse_rows(columns = 1, latex_hline = "major", valign = "middle") %>%
add_footnote(c("Quantity of interest; percent change in enforcement operations when the percent in the lower class drops by half.",
"Enforcement operations when the percent in the lower class equals its observed value.",
"Enforcement operations when the percent in the lower class equals half its observed value.",
"Shrinkage in the quantity of interest due to switching from the average of simulations to the ML estimator."),
notation = "alphabet") %>%
cat(file = "doc/tabs/tab1-top-5.tex")
# median bias for each district
smry2 <- tau %>%
mutate(ratio = (mle - avg)/avg) %>%
group_by(city) %>%
summarize(med = median(ratio)) %>%
glimpse() %>%
write_csv("doc/tabs/holland-medians.csv")
texreg::screenreg(estimations$fit)
|
ed98bfc0a0c94e85a15f417913e95896e23951c5
|
f9d2eca47d9604aba2373e1793f5b00fb3722ccc
|
/dev/callee_scripts/produce_maps.R
|
6bc25aebf71470dd54ede16ec1548e2c28872b0c
|
[
"MIT"
] |
permissive
|
linneasoli/Sus
|
c2a73d642e18df95dbe1bdf4516e7e987415a55b
|
0c82720d110469ad2a36b00ca7640f617d2a069e
|
refs/heads/main
| 2023-06-14T05:48:44.223296
| 2021-07-09T18:14:02
| 2021-07-09T18:14:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,761
|
r
|
produce_maps.R
|
# Produce left and right maps ---------------------------------------------
# Dependent script: needs 'borough' object
make_circle <- function(x) {
borough %>%
st_transform(32618) %>%
st_union() %>%
st_centroid() %>%
{. + c(0, -3500)} %>%
st_set_crs(32618) %>%
st_buffer(26000) %>%
st_intersection(st_transform(x, 32618), .) %>%
st_transform(4326)
}
circle_borough <- make_circle(borough)
theme_map <- function(...) {
default_bg <- "transparent"
default_fc <- "black"
default_ff <- "Helvetica"
theme_minimal() +
theme(
text = element_text(family = default_ff, color = default_fc),
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = default_bg, color = NA),
panel.background = element_rect(fill = default_bg, color = NA),
legend.background = element_rect(fill = default_bg, color = NA),
legend.position = "none",
plot.margin = unit(c(0, .5, .2, .5), "cm"),
panel.border = element_blank(),
panel.spacing = unit(c(-.1, 0.2, .2, 0.2), "cm"),
legend.title = element_text(size = 11),
legend.text = element_text(size = 22, hjust = 0, color = default_fc),
plot.title = element_text(size = 15, hjust = 0.5, color = default_fc),
plot.subtitle = element_text(
size = 10, hjust = 0.5, color = default_fc,
margin = margin(b = -0.1, t = -0.1, l = 2, unit = "cm")),
plot.caption = element_text(size = 7, hjust = .5,
margin = margin(t = 0.2, b = 0, unit = "cm"),
color = "#939184"),
...)
}
shadow_left <- png::readPNG("www/dropshadow_left.png", native = TRUE)
legend_left <- png::readPNG("www/univariate_left.png", native = TRUE)
shadow_right <- png::readPNG("www/dropshadow_right.png", native = TRUE)
legend_right <- png::readPNG("www/univariate_right.png", native = TRUE)
walk(c("borough", "CT", "DA", "grid"), function(scale) {
data <- get(scale)
data <- make_circle(data)
var_list <- c(" ", str_subset(names(data), "q3"))
walk(var_list, ~{
# Left map
if (.x == " ") {
p <-
data %>%
ggplot() +
{if (scale == "grid") geom_sf(data = circle_borough, fill = "grey70",
color = "white", size = 0.01)} +
geom_sf(fill = "#CABED0", color = "white", size = 0.01) +
theme_map() +
theme(legend.position = "none")
{wrap_elements(shadow_left) +
inset_element(p, 0.18, 0.148, 0.83, 0.85, align_to = "full")} %>%
ggsave("out.png", ., width = 4, height = 4)
} else {
p <-
data %>%
select(var = all_of(.x)) %>%
ggplot() +
{if (scale == "grid") geom_sf(data = circle_borough, fill = "grey70",
color = "white", size = 0.01)} +
geom_sf(aes(fill = as.factor(var)), color = "white", size = 0.01) +
scale_fill_manual(values = colour_scale[1:3], na.value = "grey70") +
theme_map() +
theme(legend.position = "none")
{wrap_elements(shadow_left) +
inset_element(p, 0.18, 0.148, 0.83, 0.85, align_to = "full") +
inset_element(wrap_elements(
full = legend_left) +
theme(plot.background = element_rect(fill = "transparent",
colour = "transparent")),
0.2, 0.25, 0.46, 0.5, align_to = "full")} %>%
ggsave("out.png", ., width = 4, height = 4)
}
img <- png::readPNG("out.png")
img <- img[251:950, 251:950,]
png::writePNG(img, paste0("www/maps/left_", scale, "_",
sub("_q3", "", .x), ".png"))
# Right map
if (.x == " ") {
p <-
data %>%
ggplot() +
{if (scale == "grid") geom_sf(data = circle_borough, fill = "grey70",
color = "white", size = 0.01)} +
geom_sf(fill = "#CABED0", color = "white", size = 0.01) +
theme_map() +
theme(legend.position = "none")
{wrap_elements(shadow_right) +
inset_element(p, 0.17, 0.148 , 0.818, 0.844, align_to = "full")} %>%
ggsave("out.png", ., width = 4, height = 4)
} else {
p <-
data %>%
select(var = all_of(.x)) %>%
ggplot() +
{if (scale == "grid") geom_sf(data = circle_borough, fill = "grey70",
color = "white", size = 0.01)} +
geom_sf(aes(fill = as.factor(var)), color = "white", size = 0.01) +
scale_fill_manual(values = colour_scale[4:6], na.value = "grey70") +
theme_map() +
theme(legend.position = "none")
{wrap_elements(shadow_right) +
inset_element(p, 0.17, 0.148 , 0.818, 0.844, align_to = "full") +
inset_element(wrap_elements(
full = legend_right) +
theme(plot.background = element_rect(fill = "transparent",
colour = "transparent")),
0.54, 0.245, 0.8, 0.495, align_to = "full")} %>%
ggsave("out.png", ., width = 4, height = 4)
}
img <- png::readPNG("out.png")
img <- img[251:950, 251:950,]
png::writePNG(img, paste0("www/maps/right_", scale, "_",
sub("_q3", "", .x), ".png"))
})
})
unlink("out.png")
rm(circle_borough, make_circle, theme_map)
|
17fcf3c84e059eeff4147aa1f993f74f7c9c82f6
|
f05b62a978e2abea690fb5be403389f0d083df5b
|
/DM1_boxplots.R
|
7e0f1f52930dbe268e625e7be60b2c10ca1411f0
|
[] |
no_license
|
vedereka/PMIP4_Benchm_proj
|
87ae2480b112499c552f65c546ce807474ae323c
|
80d26927790c2384fca14f6c6b75fb62be4b5596
|
refs/heads/master
| 2023-05-13T02:53:05.784000
| 2021-03-01T14:32:48
| 2021-03-01T14:32:48
| 337,995,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,368
|
r
|
DM1_boxplots.R
|
### produce data only and data-model latitudinal boxplots
# Boxplot #1: Bartlein (B), Cleator at the Bartlein sites (CL) and all Cleator data (CL_all)
# Boxplot #2: Bartlein (B), all Cleator data (CL_all) and model data
# Statistical summaries of all variables are saved in output/
# These are the things that will require checking if the models are updated:
# - model_ls: Are model names correctly trimmed?
# - scales_y: are limits still valid?
# - guide_legend nrow and ncol: do they need to be updated?
# - breaks and levels in scale_fill_manual (note that the order is strange)
# - colorSet to match the number of models (and the order)
# It requires facetscales package from devtools::install_github("zeehio/facetscales")
#
# Created by Laia Comas-Bru in October 2020
# Last modified: February 2021
# Still to-do: Haven't been able to keep empty spaces for missing data in the
# DM boxplots. This is a known issue of ggplot2. See:
# https://github.com/tidyverse/ggplot2/issues/3345
#### LOAD OBSERVATIONS AND ORGANISE DATA ####
# files produced in Step0 extract site data
data_obs <- read.csv(file.path(dataobspath, "data_obs_raw.csv"), na.strings = "NA",strip.white = TRUE, blank.lines.skip = T) %>%
dplyr::rename (LAT = lat, LON = lon) %>% dplyr::select (LAT, LON, MAT, MTCO, MTWA, MAP, REF)
data_BarPre <- data_obs %>% filter (REF == "B_wf" | REF == "PR_all")
data_Cle <- data_obs %>% filter (REF == "CL_all_244") # use most recent Cleator dataset
#### SELECT OVERLAPPING SITES BETWEEN BARTLEIN GRIDS AND CLEATOR ####
# load gridcells from Bartlein's gridded data and filter Cleator to just that spread of data
ncfname <- paste (dataobspath, "raw_data/mat_delta_21ka_ALL_grid_2x2.nc",sep="")
ncin <- nc_open(ncfname)
lat <- ncin[["dim"]][["lat"]][["vals"]]
lon <- ncin[["dim"]][["lon"]][["vals"]]
rm(ls="ncfname","ncin")
grid <- expand.grid(lon = lon, lat = lat)
#ranges
grid$lat_min <- grid$lat - mean(diff(lat)) / 2
grid$lat_max <- grid$lat + mean(diff(lat)) / 2
grid$lon_min <- grid$lon - mean(diff(lon)) / 2
grid$lon_max <- grid$lon + mean(diff(lon)) / 2
grid$count_n <- NA
for (n in 1:dim(grid)[1]) {
newx <-
data_BarPre %>% filter (
data_BarPre$LAT >= grid$lat_min [n] &
data_BarPre$LAT < grid$lat_max[n] &
data_BarPre$LON >= grid$lon_min[n] &
data_BarPre$LON < grid$lon_max[n]
)
if (dim(newx)[1] == 0) {
grid$count_n[n] = NA
} else {
grid$count_n[n] <- dim(newx)[1] # how many data points per gridcell?
}
if (n == 1) {
x_temp <- newx [, 3:6] %>% summarise_if(is.numeric, mean, na.rm = T)
} else {
x_temp[n,] <- newx [, 3:6] %>% summarise_if(is.numeric, mean, na.rm = T)
}
}
grid <- cbind (grid, x_temp)
grid <- grid %>% filter (grid$count_n >= 0)
grid_BartPren <- grid
rm(ls="n","x_temp","newx","grid")
# select grid lat/lons for which we have BArt/Pren data and filter Cleator's to that geographical range (with averaged values for all variables)
grid <- grid_BartPren [, 1:6]
grid$count_n <- NA
for (n in 1:dim(grid)[1]) {
newx <-
data_Cle %>% filter (
data_Cle$LAT >= grid$lat_min [n] &
data_Cle$LAT < grid$lat_max[n] &
data_Cle$LON >= grid$lon_min[n] &
data_Cle$LON < grid$lon_max[n]
)
if (dim(newx)[1] == 0) {
grid$count_n[n] = NA
} else {
grid$count_n[n] <- dim(newx)[1] # how many data points per gridcell?
}
if (n == 1) {
x_temp <-
newx [, 3:6] %>% summarise_if(is.numeric, mean, na.rm = T)
} else {
x_temp[n,] <-
newx [, 3:6] %>% summarise_if(is.numeric, mean, na.rm = T)
}
}
grid <- cbind (grid, x_temp)
grid <- grid %>% filter (grid$count_n >= 0)
grid_Cle <- grid
rm(ls="n","x_temp","newx","grid")
grid_Cle$REF <- "CL"
grid_BartPren$REF <- "BP"
# end of data manipulation #
#### BOXPLOT #1: only data ####
## comparisons for gridded overlapping data sources
dtBP <- grid_BartPren [, -c(3:7)]
dtCL <- grid_Cle [, -c(3:7)]
dtCL_all <- data_Cle
colnames(dtCL_all) [1] <- "lat"
colnames(dtCL_all) [2] <- "lon"
dtCL_all$REF <- "CL_all"
obs <- rbind(dtBP, dtCL, dtCL_all)
# Group the data by latitudinal bands
brkpnt <- seq(-80, 80, by = 20)
startpnt <- brkpnt[1:length(brkpnt) - 1]
endpnt <- brkpnt[2:length(brkpnt)]
brk_lab <- paste(startpnt, '° to ', endpnt, '°', sep = '')
obs$lat_band <- cut(obs$lat, breaks = brkpnt,labels = brk_lab)
obs = obs[!is.na(obs$lat_band),] #remove lats outside of range
# select chosen variables, in this case, MAP, MTCO and MTWA
#obs <- obs [,-c(3,7:8)]
#save statistical summary of each variable
sum_obs = summary(obs %>% filter (obs$REF == "BP"))
write.csv(sum_obs, paste(datapath, "summary_BP.csv", sep=""))
sum_obs = summary(obs %>% filter (obs$REF == "CL_all"))
write.csv(sum_obs, paste(datapath, "summary_CL_all.csv", sep=""))
sum_obs = summary(obs %>% filter (obs$REF == "CL"))
write.csv(sum_obs, paste(datapath, "summary_CL_overlap.csv", sep=""))
obs2 = obs
obs <- reshape2::melt(obs, na.rm=F, id.vars = c("lat","lon","REF", "lat_band"), variable.name = "var")
# undo with: dcast(obs, lat + lon + REF + lat_band ~ var, value.var = "value")
obs$REF <- factor(obs$REF , levels=c("CL_all", "CL", "BP")) # reorder boxplots bottom to top
bp <- ggplot(na.omit(obs), aes(x=lat_band, y=value, fill=REF)) +
geom_boxplot(aes(fill=REF),outlier.alpha = 0.5, outlier.size = 0.5, outlier.colour = "grey86",
width = 0.8, varwidth=F, lwd=0.01,position = position_dodge2(preserve = "single")) +
theme_bw()+
theme(axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.text.x = element_text(angle = 0, vjust = 0, hjust=0.9,face="bold"),
axis.text.y = element_text(angle = 0, vjust = -0.1, hjust=0.5,face="bold"),
legend.position="top",
legend.box = "horizontal", legend.text.align=0)+
scale_fill_manual(name = element_blank(),
breaks = c('BP', 'CL', 'CL_all'),
labels = c(expression('Bartlein + Prentice'), expression('Cleator'),
expression('Cleator all')),
values = c('orange', 'steelblue4', 'cyan3')) +
facet_grid(.~ var,scales='free') +
coord_flip()
print(bp)
ggsave(bp,file=paste(plotpath,"DM_boxplots/boxplot_data_B_CL244.jpg", sep=""),width=12,height=7)
#### BOXPLOT #2: observations and model data ####
mod_variable_ls <- c('tas_anom','mtco_anom','mtwa_anom','pre_anom', 'gdd5_anom')
# location of model output
mod_dir <- ncpath
mod_files <- list.files(mod_dir, full.names = TRUE)
# create list of model names for output
model_ls <- lapply(list.files(mod_dir, full.names = F), FUN = my_name_trim) %>% as.character (.)
obs_coord = unique(obs[,1:2])
for (mod_name in model_ls){
ncname <- paste(ncpath,mod_name, "_LGM_anomalies.nc",sep="")
ncin <- nc_open(ncname)
lat <- ncin[["dim"]][["lat"]][["vals"]]; nlat <- length(lat)
lon <- ncin[["dim"]][["lon"]][["vals"]];nlon <- length(lon)
grid <- expand.grid(lon=lon, lat=lat)
for (mod_varname in mod_variable_ls) {
var <- ncvar_get(ncin, mod_varname)
var[var=="NaN"]=NA
# extract indices of closest gridcells
j <- sapply(obs_coord$lon, function(x) which.min(abs(lon - x)))
k <- sapply(obs_coord$lat, function(x) which.min(abs(lat - x)))
var_vec <- as.vector(var)
# extract data for all locations
jk <- (k - 1) * nlon + j #jk <- (j-1)*nlat + k
var_extr <- var_vec[jk]
var_extr_df <- data.frame (var_extr)
colnames(var_extr_df)[1] = "value"
var_extr_df$REF = mod_name
var_extr_df$var = mod_varname
var_extr_df = cbind (obs_coord, var_extr_df)
if (mod_varname == mod_variable_ls[1] & mod_name == model_ls[1]) {
pts <- var_extr_df
} else {
pts <- rbind (pts, var_extr_df)
}
}
}
nc_close(ncin)
pts$lat_band <- cut(pts$lat, breaks = brkpnt,labels = brk_lab)
# rename vars
pts <- data.frame(lapply(pts, function(x) {gsub("tas_anom", "MAT", x)}))
pts <- data.frame(lapply(pts, function(x) {gsub("mtco_anom", "MTCO", x)}))
pts <- data.frame(lapply(pts, function(x) {gsub("mtwa_anom", "MTWA", x)}))
pts <- data.frame(lapply(pts, function(x) {gsub("pre_anom", "MAP", x)}))
pts <- data.frame(lapply(pts, function(x) {gsub("gdd5_anom", "GDD5", x)}))
data_all = rbind(obs, pts)
#remove => CL (=Cleator at Bartlein sites)
data_all <- data_all %>% filter(REF != "CL")
data_all$lat <- as.numeric(data_all$lat)
data_all$lon <- as.numeric(data_all$lon)
data_all$value <- as.numeric(data_all$value)
data_all$var <- as.factor(data_all$var)
data_all$REF <- factor(data_all$REF ,
levels= c(rev(as.character(model_ls)), "CL_all", "BP"))
data_all$lat_band <- factor(data_all$lat_band, levels = brk_lab[2:8])
saveRDS(data_all, file = paste(datapath,"obs_mod.RDS", sep=""))
require (randomcoloR) # ColorBrewer max length is 12, we need 13 + 2 grey
# color palette in the right order
n <- length(unique(data_all$REF)) %>% distinctColorPalette(.)
colorSet <- rev(c(n[1:2],'grey75', 'grey40',n[3:length(n)]))
# pie(rep(1, length(colorSet), col=colorSet)) # to see colours in a pie chart (diff each time)
require(facetscales) # install with devtools::install_github("zeehio/facetscales")
#set limits for each variable (only possible with facetscales)
scales_y <- list(
GDD5 = scale_y_continuous(breaks=scales::extended_breaks(n=3),limits=c(1500,-4000)),
MAP = scale_y_continuous(breaks=scales::extended_breaks(n=5),limits=c(1500,-1500)),
MAT = scale_y_continuous(breaks=scales::extended_breaks(n=4),limits=c(10,-20)),
MTWA = scale_y_continuous(breaks=scales::extended_breaks(n=4),limits=c(10,-20)),
MTCO = scale_y_continuous(breaks=scales::extended_breaks(n=4),limits=c(10,-30))
)
scales_x <- list(
name = scale_x_discrete()
)
bp <-ggplot(na.omit(data_all), aes(x=lat_band, y=value, fill=var)) +
geom_hline(yintercept = 0, linetype="solid", color = "black", size=0.5) +
geom_boxplot(aes(fill=REF),outlier.alpha = 0.8, outlier.size = 0.5, outlier.colour = "grey86",
width = 0.8, varwidth=F,lwd=0.2,fatten=1,position = position_dodge2(preserve = "single")) +
theme_bw()+
theme(axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.text.x = element_text(angle = -90, vjust = 0, hjust=0.9,size=13,face="bold"),
axis.text.y = element_text(angle = -90, vjust = -0.1, hjust=0.5,size=13,face="bold"),
legend.position="left") +
guides(fill = guide_legend(reverse = TRUE,
direction = "vertical",
nrow = 5,
ncol = 3,
label.position = "bottom",
legend.box.just = "right",
#legend.text.align=0,
label.theme = element_text(angle = -90, vjust = 0.5, hjust=0,size=10),
title.position = "bottom", title.theme = element_text(angle = 90)))+
scale_x_discrete(position = "top") +
scale_fill_manual(name = element_blank(),
breaks = c(model_ls[3], model_ls[2], model_ls[1],"CL_all", "BP",
model_ls[8],model_ls[7],model_ls[6],model_ls[5],model_ls[4],
model_ls[13],model_ls[12],model_ls[11],model_ls[10],model_ls[9]),
labels = c(model_ls[3], model_ls[2], model_ls[1],"CL_all", "BP",
model_ls[8],model_ls[7],model_ls[6],model_ls[5],model_ls[4],
model_ls[13],model_ls[12],model_ls[11],model_ls[10],model_ls[9]),
values = colorSet) + #strange order
facet_grid_sc(rows=vars(var), scales = list(y = scales_y))+
theme(strip.text.y = element_text(
size = 14, color = "black", face = "bold"
))
bp
ggsave(bp,file=paste(plotpath,"DM_boxplots/boxplot_data_model.jpg", sep=""),width=11,height=14)
#ggsave(bp,file=paste(plotpath,"DM_boxplots/boxplot_data_model.pdf", sep=""),width=11,height=14)
# extract statistical summary of all variables used in the boxplot
br <- c("CL_all", "BP", as.character(model_ls))
for (i in br){
x1 <- data_all %>% filter (data_all$REF == i)
sum_obs = summary(dcast(x1, lat + lon + lat_band ~ var, value.var = "value"))
write.csv(sum_obs, paste(datapath, "summary_mod_boxplot_",i,".csv", sep=""))
}
graphics.off()
|
260d2ee1572f7d4890919464c0fd909388965fa9
|
aa3ec60c734dc3c8bd7f959fa7604ef3dc82c2df
|
/R/epc_interp_grid.R
|
517079c5d50719ac7375b93551c27eb7c2521a5d
|
[] |
no_license
|
fawda123/tb_chl
|
d6ba36e1fc5a6bfd40a838af472c6fe47deae713
|
47b5715cc6976e6aa0bc50e98fe79f9b65b61d9d
|
refs/heads/master
| 2020-07-26T00:11:13.450383
| 2017-08-29T18:46:59
| 2017-08-29T18:46:59
| 21,586,396
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,971
|
r
|
epc_interp_grid.R
|
######
#created Sep 2013, M. Beck
#creates interpolation grids for TB hirsch model
#used to create heat maps and to get normalization data
######
#get interpolation grids
######
#salinity grid
#includes beta estimates for weighted regressions for each obs
#includes back-transformation from Moyer et al. 2012
rm(list=ls())
source('M:/r_code/EPC/epc_dat.r')
sal.div<-20 #no. of divisions, range is different for each segment
segs<-unique(tb.dat$seg)
mods.out <- NULL
strt<-Sys.time()
for(seg in segs){
dat.in<-tb.dat[tb.dat$seg==seg,]
#salinity grid data specific to each segment
sal.grid<-seq(min(dat.in$sal.ref),max(dat.in$sal.ref),length=sal.div)
seg.out<-NULL
for(row in 1:nrow(dat.in)){
row.out<-NULL
ref.in<-dat.in[row,]
# log
cat(as.character(seg), nrow(dat.in) - row,'\n')
for(sal in sal.grid){
ref.in$sal.ref<-sal
ref.wts<-wt.fun(ref.in,dat.in,wt.vars=c('month.num','year','sal.ref'))
# data to predict
pred.dat <- data.frame(sal.ref=sal,dec.time=ref.in$dec.time)
# crq model, estimates all quants
mod <- quantreg::crq(
Surv(Chla_ugl, not_cens, type = "left") ~
dec.time + sal.ref + sin(2*pi*dec.time) + cos(2*pi*dec.time),
weights = ref.wts,
data = dat.in,
method = "Portnoy"
)
# sometimes crq fucks up
test <- try({coef(mod)})
if('try-error' %in% class(test)){
err_out <- rep(NA, 9)
row.out<-rbind(row.out, c(err_out))
next
}
# fitted coefficients for each model
parms <- coef(mod, c(0.1, 0.5, 0.9))
# predicted values by quantile model coefficients
fits <- sapply(1:3, function(x){
with(pred.dat,
parms[1, x] + parms[2, x] * dec.time + parms[3, x] * sal.ref + parms[4, x] * sin(2*pi*dec.time) + parms[5, x] * cos(2*pi*dec.time)
)
})
names(fits) <- paste0('fit.', c('lo', 'md', 'hi'))
# back transformed predicted values
bt.fits <- exp(fits)
names(bt.fits) <- paste0('bt.', c('lo', 'md', 'hi'))
# model parameters for sal.ref
betas <- coef(mod, c(0.1, 0.5, 0.9))['sal.ref', ]
names(betas) <- paste0('b.', c('lo', 'md', 'hi'))
#append to row out for each unique sal
row.out<-rbind(
row.out,
c(fits, bt.fits, betas)
)
}
wt.fits<-suppressWarnings(data.frame(
year=ref.in$year,
month.num=ref.in$month.num,
date.f=ref.in$date.f,
dec.time=ref.in$dec.time,
seg,
sal.grid,
row.out
))
seg.out<-rbind(seg.out,wt.fits)
}
mods.out <- rbind(mods.out, seg.out)
}
Sys.time() - strt
sal.grd <- mods.out
save(sal.grd,file='M:/wq_models/EPC/interp_grids/salwt_grd.RData')
#####
#month and yr. grids have to be redone as of Nov. 5th for back-transformation estimates....
#####
#month grid
rm(list=ls())
source('M:/r_code/EPC/epc_dat.r')
#get interpolation grids
mo.grid<-sort(unique(tb.dat$month.num))
mods.out<-NULL
segs<-unique(tb.dat$seg)
strt<-Sys.time()
for(seg in segs){
dat.in<-tb.dat[tb.dat$seg==seg,]
seg.out<-NULL
for(row in 1:nrow(dat.in)){
wt.fit.md<-NULL
wt.fit.hi<-NULL
wt.fit.lo<-NULL
ref.in<-dat.in[row,]
cat(as.character(seg), nrow(dat.in) - row,'\n')
flush.console()
for(mo in mo.grid){
#reset month and dec.time to eval month
ref.in$month.num<-mo
ref.in$dec.time<-as.numeric(ref.in$year) + mo
ref.wts<-wt.fun(ref.in,dat.in,wt.vars=c('month.num','year','sal.ref'))
#data to use for model predictions
pred.dat<-data.frame(sal.ref=ref.in$sal.ref,dec.time=ref.in$dec.time)
#OLS wtd model
wt.mod.md<-lm(
Chla_ugl~dec.time + sal.ref + sin(2*pi*dec.time) + cos(2*pi*dec.time),
weights=ref.wts,
data=dat.in
)
#OLS wtd predict
wt.pred.md<-predict(wt.mod.md,newdata=pred.dat)
if(class(try({
wt.mod.hi<-rq(
Chla_ugl~dec.time + sal.ref + sin(2*pi*dec.time) + cos(2*pi*dec.time),
weights=ref.wts,
tau=0.9,
data=dat.in
)
wt.pred.hi<-predict(wt.mod.hi,newdata=pred.dat)
}))=='try-error'){ wt.pred.hi<-NA }
#quantile wtd model, 0.1, exception for error
if(class(try({
wt.mod.lo<-rq(
Chla_ugl~dec.time + sal.ref + sin(2*pi*dec.time) + cos(2*pi*dec.time),
weights=ref.wts,
tau=0.1,
data=dat.in
)
wt.pred.lo<-predict(wt.mod.lo,newdata=pred.dat)
}))=='try-error'){ wt.pred.lo<-NA }
wt.fit.md<-c(wt.fit.md,wt.pred.md)
wt.fit.hi<-c(wt.fit.hi,wt.pred.hi)
wt.fit.lo<-c(wt.fit.lo,wt.pred.lo)
}
wt.fits<-data.frame(
year=ref.in$year,
month.num=dat.in[row,]$month.num,
date.f=ref.in$date.f,
dec.time=dat.in[row,]$dec.time,
seg,
mo.grid,
wt.fit.md,
wt.fit.hi,
wt.fit.lo
)
seg.out<-rbind(seg.out,wt.fits)
}
mods.out<-rbind(mods.out,seg.out)
}
cat(Sys.time()-strt,'\n')
mo.grd<-mods.out
save(mo.grd,file='M:/wq_models/EPC/interp_grids/mowt_grd.RData')
######
#annual grid
rm(list=ls())
source('M:/r_code/EPC/epc_dat.r')
#get interpolation grids
yr.grid<-sort(unique(tb.dat$year))
mods.out<-NULL
segs<-unique(tb.dat$seg)
strt<-Sys.time()
for(seg in segs){
dat.in<-tb.dat[tb.dat$seg==seg,]
seg.out<-NULL
for(row in 1:nrow(dat.in)){
wt.fit.md<-NULL
wt.fit.hi<-NULL
wt.fit.lo<-NULL
ref.in<-dat.in[row,]
cat(as.character(seg), nrow(dat.in) - row,'\n')
flush.console()
for(yr in yr.grid){
#reset year and dec.time to eval year
ref.in$year<-yr
ref.in$dec.time<-as.numeric(ref.in$year) + ref.in$month.num
ref.wts<-wt.fun(ref.in,dat.in,wt.vars=c('month.num','year','sal.ref'))
#data to use for model predictions
pred.dat<-data.frame(sal.ref=ref.in$sal.ref,dec.time=ref.in$dec.time)
#OLS wtd model
wt.mod.md<-lm(
Chla_ugl~dec.time + sal.ref + sin(2*pi*dec.time) + cos(2*pi*dec.time),
weights=ref.wts,
data=dat.in
)
#OLS wtd predict
wt.pred.md<-predict(wt.mod.md,newdata=pred.dat)
if(class(try({
wt.mod.hi<-rq(
Chla_ugl~dec.time + sal.ref + sin(2*pi*dec.time) + cos(2*pi*dec.time),
weights=ref.wts,
tau=0.9,
data=dat.in
)
wt.pred.hi<-predict(wt.mod.hi,newdata=pred.dat)
}))=='try-error'){ wt.pred.hi<-NA }
#quantile wtd model, 0.1, exception for error
if(class(try({
wt.mod.lo<-rq(
Chla_ugl~dec.time + sal.ref + sin(2*pi*dec.time) + cos(2*pi*dec.time),
weights=ref.wts,
tau=0.1,
data=dat.in
)
wt.pred.lo<-predict(wt.mod.lo,newdata=pred.dat)
}))=='try-error'){ wt.pred.lo<-NA }
wt.fit.md<-c(wt.fit.md,wt.pred.md)
wt.fit.hi<-c(wt.fit.hi,wt.pred.hi)
wt.fit.lo<-c(wt.fit.lo,wt.pred.lo)
}
wt.fits<-data.frame(
year=dat.in[row,]$year,
month.num=dat.in[row,]$month.num,
date.f=ref.in$date.f,
dec.time=dat.in[row,]$dec.time,
seg,
yr.grid,
wt.fit.md,
wt.fit.hi,
wt.fit.lo
)
seg.out<-rbind(seg.out,wt.fits)
}
mods.out<-rbind(mods.out,seg.out)
}
cat(Sys.time()-strt,'\n')
yr.grd<-mods.out
save(yr.grd,file='M:/wq_models/EPC/interp_grids/yrwt_grd.RData')
######
#grid for all normalization variables: year, month, and salinity
#can be used to create monstrous 3D interp grid
yr.grid<-sort(unique(tb.dat$year))
mo.grid<-sort(unique(tb.dat$month.num))
sal.div<-50
sal.grid<-seq(min(tb.dat$sal.ref),max(tb.dat$sal.ref),length=sal.div)
int.grid<-expand.grid(yr.grid,mo.grid,sal.grid,stringsAsFactors=F)
names(int.grid)<-c('year','month.num','sal.grid')
######
#plot interpolation grids
###
#by salinity
ylabs<-expression(paste('chl ',italic(a),' (',italic(mu),'g',l^-1,')'))
#min, max sal.ref vals to plot....
lim.vals<-aggregate(
sal.ref~month.num+seg,
FUN=function(x) cbind(quantile(x,0.05),quantile(x,0.95)),
data=epc.est
)
names(lim.vals)[2]<-'seg'
to.plo<-merge(int.grd,lim.vals,by=c('month.num','seg'),all.x=T)
#interp grid removing extreme values
p<-ggplot(to.plo,aes(x=dec.time,y=sal.grid)) +
geom_tile(aes(fill=exp(wt.fit.md)),width=0.1) + #adjust this to fill gaps
# scale_fill_brewer(type='div',palette = 'BuGn') +
scale_fill_gradient2(name=ylabs,low='blue',mid='lightgreen',high='red',midpoint=20) +
geom_line(aes(x=dec.time,y=sal.ref[,2])) +
geom_line(aes(x=dec.time,y=sal.ref[,1])) +
facet_wrap(~seg,nrow=2,ncol=2) +
theme_bw() +
scale_x_continuous(
breaks=seq(1974,2012,by=2),
name='Date',
expand = c(0,0)
) +
scale_y_continuous(name='Proportion freshwater',expand = c(0,0)) +
theme(
axis.text.x=element_text(angle = 90, vjust=0.5,hjust=1)
)
pdf('C:/Users/mbeck/Desktop/sal_grd.pdf',width=11,height=6.5,family='serif')
print(p)
dev.off()
###
#by year
to.plo<-yr.grd
p<-ggplot(to.plo,aes(x=dec.time,y=as.numeric(as.character(yr.grid)))) +
geom_tile(aes(fill=exp(as.numeric(wt.fit.md))),width=0.2) + #adjust this to fill gaps
scale_fill_gradient2(name=ylabs,low='blue',mid='lightgreen',high='red',midpoint=20) +
facet_wrap(~seg,nrow=2,ncol=2) +
theme_bw() +
scale_x_continuous(
breaks=seq(1974,2012,by=2),
name='Date',
expand = c(0,0)
) +
scale_y_continuous(
name='Estimated annual condition',
breaks=seq(1974,2012,by=2),
expand = c(0,0)
) +
theme(
axis.text.x=element_text(angle = 90, vjust=0.5,hjust=1)
)
pdf('C:/Users/mbeck/Desktop/yr_grd.pdf',width=11,height=6.5,family='serif')
print(p)
dev.off()
###
#by month
to.plo<-mo.grd
p<-ggplot(to.plo,aes(x=as.numeric(dec.time),y=mo.grid)) +
geom_tile(aes(fill=exp(as.numeric(wt.fit.md))),height=1/12,width=0.25) + #adjust this to fill gaps
scale_fill_gradient2(name=ylabs,low='blue',mid='lightgreen',high='red',midpoint=20) +
facet_wrap(~seg,nrow=2,ncol=2) +
theme_bw() +
scale_x_continuous(
name='Date',
breaks=seq(1974,2012,by=2),
expand = c(0,0)
) +
scale_y_continuous(
breaks=seq(min(to.plo$mo.grid),max(to.plo$mo.grid),length=12),
labels=c('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'),
name='Estimated monthly condition',
expand = c(0,0)
) +
theme(
axis.text.x=element_text(angle = 90, vjust=0.5,hjust=1)
)
pdf('C:/Users/mbeck/Desktop/mo_grd.pdf',width=11,height=6.5,family='serif')
print(p)
dev.off()
|
26c7234e8515936c9ca32925f77b0ab4f205407f
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/GeneralizedUmatrix/man/upscaleUmatrix.Rd
|
5a25a4313663c6b395bab74e91f7cef152a0286d
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,250
|
rd
|
upscaleUmatrix.Rd
|
\name{upscaleUmatrix}
\alias{upscaleUmatrix}
\title{
Upscale a Umatrix grid
}
\description{
Use linear interpolation to increase the size of a umatrix. This can be used to produce nicer ggplot plots in \code{\link{plotTopographicMap}} and is going to be used for further normalization of the umatrix.
}
\usage{
upscaleUmatrix(Umatrix, Factor = 2,BestMatches, Imx)
}
\arguments{
\item{Umatrix}{
The umatrix which should be upscaled
}
\item{BestMatches}{
The BestMatches which should be upscaled
}
\item{Factor}{
Optional: The factor by which the axes will be scaled. Be aware that the size of the matrix will grow by Factor squared. Default: 2
}
\item{Imx}{
Optional: Island cutout of the umatrix. Should also be scaled to the new size of the umatrix.
}
}
\value{ A List consisting of:
\item{Umatrix}{A matrix representing the upscaled umatrix.}
\item{BestMatches}{If BestMatches was given as parameter: The rescaled BestMatches for an island cutout. Otherwise: \code{NULL}}
\item{Imx}{If Imx was given as parameter: The rescaled matrix for an island cutout. Otherwise: \code{NULL}}
}
\author{
Felix Pape
}
\concept{topographic map}
\concept{Generalized U-matrix}
\keyword{U-matrix}
|
8503eab461da432c244f134c4474e04c64108081
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/c3/examples/pipe.Rd.R
|
175c70e97f750727e2e615a8faf007c269a86200
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 140
|
r
|
pipe.Rd.R
|
library(c3)
### Name: %>%
### Title: Pipe operator
### Aliases: %>%
### ** Examples
data.frame(a=c(1,2,3,2),b=c(2,3,1,5)) %>%
c3()
|
b23615113befa22cbfddbff906d65664072b9fc3
|
2f5cdf1e21345e7d3271aef35027f11c7df7bd1a
|
/episcanpy/preprocessing/notes_benchmarking_multiomic.R
|
dd585e00c05d202dfaa1e742d79bf85baec03c2b
|
[
"BSD-3-Clause"
] |
permissive
|
colomemaria/epiScanpy
|
8d6992d5e9797d574ccdfff92f028beede5143ed
|
070a9467dce121345da19f1b3955e7fa1eabf19a
|
refs/heads/master
| 2023-08-08T08:38:27.289808
| 2023-05-09T12:46:19
| 2023-05-09T12:46:19
| 170,289,801
| 127
| 37
|
BSD-3-Clause
| 2023-07-21T13:30:51
| 2019-02-12T09:28:15
|
Python
|
UTF-8
|
R
| false
| false
| 1,661
|
r
|
notes_benchmarking_multiomic.R
|
.libPaths( "/home/icb/anna.danese/miniconda3/envs/scmoib-seuratv4/lib/R/library")
library(Seurat)
library(SeuratDisk)
library(Signac)
# Load the peak ATAC
file = "/home/icb/anna.danese/project_anna/scmoib/brain_peaks_filtered_not_normalised.h5ad"
file_seurat = "/home/icb/anna.danese/project_anna/scmoib/brain_peaks_filtered_not_normalised.h5seurat"
Convert(file, dest = "h5seurat", overwrite = TRUE, assay='ATAC')
atac <- LoadH5Seurat(file_seurat)
brain.atac <- CreateSeuratObject(counts = atac[['ATAC']], assay = "ATAC", project = "SHAREseq_ATAC")
# RNA
file = "/home/icb/anna.danese/project_anna/scmoib/processed_data/brain_data/brain_preprocessed_rna_full_features.h5ad"
file_seurat = "/home/icb/anna.danese/project_anna/scmoib/processed_data/brain_data/brain_preprocessed_rna_full_features.h5seurat"
Convert(file, dest = "h5seurat", overwrite = TRUE, assay='RNA')
rna <- LoadH5Seurat(file_seurat)
brain.rna <- CreateSeuratObject(counts = rna[['RNA']], assay = "RNA", project = "SHAREseq_RNA")
# Perform standard analysis of each modality independently RNA analysis
brain.rna <- NormalizeData(brain.rna)
brain.rna <- FindVariableFeatures(brain.rna)
brain.rna <- ScaleData(brain.rna)
brain.rna <- RunPCA(brain.rna)
brain.rna <- RunUMAP(brain.rna, dims = 1:30)
#create the raw geneactivity matrix
gtf_file = "/home/icb/anna.danese/project_anna/scmoib/processed_data/gencode.vM26.annotation.gtf"
chromosome <- paste0('chr', c(1:19, "X", "Y"))
chromosome
activity.matrix <- GeneActivity(peak.matrix = atac[['ATAC']]@counts, annotation.file = gtf_file,
seq.levels = chromosome, upstream = 2000, verbose = TRUE)
|
52d260ab2ed65539769a9af226f642d6915b9601
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MAGNAMWAR/examples/FormatMCLFastas.Rd.R
|
ef251361f0ac26cc6fb585f5478daf25b68d0446
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 358
|
r
|
FormatMCLFastas.Rd.R
|
library(MAGNAMWAR)
### Name: FormatMCLFastas
### Title: Format all raw GenBank fastas to single OrthoMCL compatible
### fasta file
### Aliases: FormatMCLFastas
### ** Examples
## Not run:
##D dir <- system.file('extdata', 'fasta_dir', package='MAGNAMWAR')
##D dir <- paste(dir,'/',sep='')
##D formatted_file <- FormatMCLFastas(dir)
## End(Not run)
|
73b53943212a73a31bff56644905c3124fac7c64
|
ed00cdd70acf530004fb6f56d2e2c090b10813ff
|
/R/pairangle.R
|
85bad707608473f9599466c307d328eef0e04c31
|
[] |
no_license
|
adchSTATS/genfun
|
a7400075fd296eeba1a3e83bc4d64397a0e15aad
|
e6189fe729797397516a619c5c1e88e7adb3a717
|
refs/heads/master
| 2021-08-08T17:32:10.692919
| 2017-11-10T19:21:43
| 2017-11-10T19:21:43
| 108,581,350
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
r
|
pairangle.R
|
#' Pairwise angles
#'
#' Computes the matrix of angles between all pairs of vectors.
#' @param mat A numeric matrix with each row representing a vector.
#' @return A square matrix whose \code{[i,j]} entry is the angle between the two vectors
#' represented in row \code{i} and \code{j} of \code{mat}.
#' @details The (i, j)-entry is the angle between the vectors represented by the i'th and j'th row of the input matrix
#' @author Andreas Dyreborg Christoffersen \email{andreas@math.aau.dk}
#' @export
pairangle <- function(mat){
dps <- tcrossprod(mat)
norm.prods <- 1/tcrossprod(sqrt(diag(dps)))
normalized.dps <- dps*norm.prods
diag(normalized.dps) <- 1
out <- acos(normalized.dps)
return(out)
}
|
4884c78ec7a210d8eba2910cfea588aa567b40e2
|
081576efbe245e5c437d4c45a30c10453cd2cac8
|
/man/keep_cat_cols.Rd
|
4a8fc1c0518316c7a526e713d3ebe1577d3343e7
|
[
"curl",
"MIT"
] |
permissive
|
fei0810/UCSCXenaShiny
|
93ecad178e560cbfe6668978bb77dac31e5b169a
|
398605a5da859886313649036da739feee0edec3
|
refs/heads/master
| 2023-04-18T00:58:07.866434
| 2021-04-28T12:39:02
| 2021-04-28T12:39:02
| 274,140,433
| 1
| 0
|
NOASSERTION
| 2021-04-28T12:39:02
| 2020-06-22T13:11:24
|
R
|
UTF-8
|
R
| false
| true
| 627
|
rd
|
keep_cat_cols.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{keep_cat_cols}
\alias{keep_cat_cols}
\title{Keep Only Columns Used for Sample Selection}
\usage{
keep_cat_cols(x, keep_sam_cols = TRUE, return_idx = TRUE)
}
\arguments{
\item{x}{a \code{data.frame} with many columns.}
\item{keep_sam_cols}{if \code{TRUE} (default), keep columns with
pattern 'sample', 'patient', etc.}
\item{return_idx}{if \code{TRUE} (default), return index of 5 (at most) columns, it is useful in Shiny.}
}
\value{
a \code{data.frame} or a \code{list}.
}
\description{
Keep Only Columns Used for Sample Selection
}
|
e4057fe69a362bd12a32f4812cf8d7d205aeab7f
|
913127177bc7fe98cddbe2bd1592a274590f626c
|
/cachematrix.R
|
add74c35b7970611ae38a177c32dcc13f85c8c10
|
[] |
no_license
|
shsab/ProgrammingAssignment2
|
fcdf798749de9821a8ce85abe96f2c5f6b419e2b
|
f8472b7359e51b50b4eb261fafc2bea721fae626
|
refs/heads/master
| 2021-01-14T12:30:40.709538
| 2015-04-26T07:01:18
| 2015-04-26T07:01:18
| 32,295,407
| 0
| 0
| null | 2015-03-16T01:58:25
| 2015-03-16T01:58:22
| null |
UTF-8
|
R
| false
| false
| 1,239
|
r
|
cachematrix.R
|
## Cretae a special matrix that contains the matrix and
## Caches its invert.
## Assume matrix provided is invertable
makeCacheMatrix <- function(x = matrix()) {
xInv <- NULL
set <- function(Y){
x <<- y
xInv <<- NULL
}
get <- function() x
setInvert <- function(xInvert) xInv <- xInvert
getInvert <- function() xInv
list(set = set, get = get,
setInvert = setInvert,
getInvert = getInvert)
}
## Check if invert exists in chache return it, otherwise calculate
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xInv <- x$getInvert()
if(!is.null(xInv)) {
message("getting cached data")
return(xInv)
}
MAT <- x$get()
tol <- Null
EXP <- -1
#MAT <- as.matrix(MAT)
matdim <- dim(MAT)
if(is.null(tol)){
tol=min(1e-7, .Machine$double.eps*max(matdim)*max(MAT))
}
if(matdim[1]>=matdim[2]){
svd1 <- svd(MAT)
keep <- which(svd1$d > tol)
xInv <- t(svd1$u[,keep]%*%diag(svd1$d[keep]^EXP, nrow=length(keep))%*%t(svd1$v[,keep]))
}
if(matdim[1]<matdim[2]){
svd1 <- svd(t(MAT))
keep <- which(svd1$d > tol)
xInv <- svd1$u[,keep]%*%diag(svd1$d[keep]^EXP, nrow=length(keep))%*%t(svd1$v[,keep])
}
x$setInvert(xInv)
xInv
}
|
ea7673c763a4b77ab54a6712b1cf7f637d246f10
|
34b1ab46a70fe81143874a40d6493c0254f1e5c9
|
/time_series/tsoutliers.test.02.R
|
355d083e5d82bf846826cf9c383fcc9db68d3cb4
|
[] |
no_license
|
yama1968/Spikes
|
5f974a20812dbd88f789cabf7720826d358f8e76
|
498b0cacfc23627ecee743f012a6fda6451cda7f
|
refs/heads/master
| 2021-06-06T00:33:33.637745
| 2020-11-14T18:49:25
| 2020-11-14T18:49:25
| 29,531,065
| 2
| 0
| null | 2020-11-12T21:13:21
| 2015-01-20T13:29:35
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 593
|
r
|
tsoutliers.test.02.R
|
library(tsoutliers)
library(ggplot2)
data(Nile)
resNile1 <- tso(y=Nile, types=c("AO", "LS", "TC"), cval=3,
tsmethod="auto.arima", args.tsmodel=list(model="local-level"))
print(resNile1)
r <- resNile1
points <-data.frame(x = r$outliers$time-1,
y = Nile[r$outliers$time-start(Nile)[[1]]])
foo <- data.frame(t = start(Nile):end(Nile),
n = Nile[1:100])
p <- ggplot(foo, aes(t, n)) +
geom_line() + xlab("") + ylab("Nile")
p <- p + geom_point(data = points, mapping=aes(x=x, y=y),
shape=1, size=5, color="red")
p
|
b4ed9fa4b40b7402c708e5a5461cbbdb8557cfd7
|
31df5e6e37bee75ba4b22a972cfa57a632ba2d23
|
/scripts/getUniqExon/human/step5_ensembl_nonoverlap_exons_bed.R
|
0515716505d3d9d8a420b6c3053915aa8c083f97
|
[] |
no_license
|
Sun-lab/asSeq
|
47fa0b451530114ce2e0cdcd40e64afbf21335d7
|
3dcfb91d603213057c570a98d532c6cfc5618929
|
refs/heads/master
| 2022-09-28T13:23:00.427278
| 2022-09-11T16:33:06
| 2022-09-11T16:33:06
| 134,029,698
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,601
|
r
|
step5_ensembl_nonoverlap_exons_bed.R
|
## when writing data into text file, it may use scientific format
## when you read it into c, and using atoi. it will make mistakes
## say 97000000 is written as 9.7e+07, and c think it is 9
## options("scipen") can control write out behavior
options(scipen=20)
# ---------------------------------------------------------
# read in data
# ---------------------------------------------------------
setwd("~/research/data/human/")
ff = "Homo_sapiens.GRCh37.66.nonoverlap.exon.gtf"
date()
inf = read.table(ff, sep="\t", as.is=TRUE, header=FALSE, quote="")
date()
names(inf) = c("chr", "source", "feature", "start", "end",
"score", "strand", "frame", "anno")
dim(inf)
inf[1:2,]
table(inf$chr)
table(inf$strand)
summary(inf$end - inf$start)
table(inf$end - inf$start == 0)
## for bed format, the first base in a chromosome is numbered 0.
## while in gtf format, the first base in a chromosome is numbered 1.
inf$start = inf$start - 1
# ---------------------------------------------------------
# obtain clust_id
# ---------------------------------------------------------
reg1 = regexpr('clustId\\s"(\\S+)";', inf$anno, perl=TRUE)
len1 = attributes(reg1)[[1]]
nadd = length(unlist(strsplit("clustId", split=""))) + 2
clustId = substr(inf$anno, reg1+nadd, reg1+len1-3)
# ---------------------------------------------------------
# obtain gene_id
# ---------------------------------------------------------
reg1 = regexpr('gene_id\\s"(\\S+)";', inf$anno, perl=TRUE)
len1 = attributes(reg1)[[1]]
nadd = length(unlist(strsplit("gene_id", split=""))) + 2
geneId = substr(inf$anno, reg1+nadd, reg1+len1-3)
# ---------------------------------------------------------
# obtain exon_id
# ---------------------------------------------------------
reg1 = regexpr('exon_id\\s"(\\S+)";', inf$anno, perl=TRUE)
len1 = attributes(reg1)[[1]]
nadd = length(unlist(strsplit("exon_id", split=""))) + 2
exonId = substr(inf$anno, reg1+nadd, reg1+len1-3)
# ---------------------------------------------------------
# construct bed file
# ---------------------------------------------------------
names = paste(clustId, geneId, exonId, sep="|")
score = rep("666", length(names))
bed = cbind(inf$chr, inf$start, inf$end, names, score, inf$strand)
# ---------------------------------------------------------
# write out results
# ---------------------------------------------------------
setwd("~/research/data/human/")
write.table(bed, col.names = FALSE, append = FALSE,
file = "Homo_sapiens.GRCh37.66.nonoverlap.exon.bed",
quote = FALSE, sep = "\t", row.names = FALSE)
|
1b2a744c6835e0fb82457e7a81daf4c187b86764
|
f92a7585f054a393674716f4e178a78f099f4917
|
/bin/csv_scrape.R
|
06c6b3ccfe2329d88cea3e11bbbe8132f4fe8bc6
|
[] |
no_license
|
parvezrana/forvol
|
7a29c9c2c77777ddb0ed94c47f398b04056f2e84
|
767d1739061fce7fc207a500ebff0216173d6512
|
refs/heads/master
| 2020-03-14T13:42:17.524275
| 2018-01-03T18:09:12
| 2018-01-03T18:09:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 630
|
r
|
csv_scrape.R
|
### Scrapes an input xlsx workbook of all subsheets and converts to csv
options(java.parameters = "-Xmx8000m")
library('xlsx')
CSVScrape <- function(workbook.path, dest.path) {
## Get number of sheets
sheets <- getSheets(loadWorkbook(workbook.path))
num_sheets <- length(sheets)
## Read the sheet and output as csv
for ( i in 1:num_sheets) {
gc()
sheet_i <- read.xlsx(workbook.path, i)
write.csv(sheet_i, file=file.path(dest.path, names(sheets)[i]), row.names = FALSE)
#print(names(sheets)[i])
#print(file.path(dest.path))
}
}
CSVScrape('../excel/volcfgrs_eqn_coefs.xlsx', '../excel/test')
|
dcbeeab47545fe36cf009b6090bb8417116c38ba
|
b8de739b0994bff1f15c8b888f7165ef58bc7d67
|
/Program/deps/silencing.r
|
0e061f59a72caf55ce4043511db360a928569b35
|
[
"MIT"
] |
permissive
|
arendma/GRN_code
|
dc00fb81ebb9c9f144e68fd25c451b18bb44d4fe
|
7630cd82dfef3c9e8dac061db4838d3fd4ae1476
|
refs/heads/main
| 2023-04-16T20:15:10.035250
| 2022-12-05T07:34:36
| 2022-12-05T07:34:36
| 450,481,870
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 431
|
r
|
silencing.r
|
Silencing <- function(G)
{
diag(G)=1
I <- diag(ncol(G))
D <- diag((G - I) %*% G)
c_inv <- try(solve(G),silent = F)
if (class(c_inv)[1] == "try-error")
c_inv <- mpinv(G)
S <- (G - I + diag(D)) %*% c_inv#solve(G)
diag(S) <- 0
#S = abs(S)/max(abs(S))
return(S)
}
mpinv <- function(A, eps = 1e-13) {
s <- svd(A)
e <- s$d
e[e > eps] <- 1/e[e > eps]
return(s$v %*% diag(e) %*% t(s$u))
}
|
8b02a455d0ef3e46c90d6715f659b228feacf191
|
92cd10bc06fb5961bc272fcdc50eb8c99e7a70ea
|
/man/tree_var.Rd
|
2a0f25f12a49d5a3d913a512cf99f109705c75bf
|
[] |
no_license
|
alexandereric995/lares
|
b9af1622432d059be8f0e804fef8afe174c601c0
|
db81f7bf3c959efffbab3a38d5e0373fd56ba714
|
refs/heads/master
| 2023-02-06T13:01:41.441067
| 2020-12-29T16:45:32
| 2020-12-29T16:45:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,763
|
rd
|
tree_var.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trees.R
\name{tree_var}
\alias{tree_var}
\title{Recursive Partitioning and Regression Trees}
\usage{
tree_var(
df,
target,
max = 3,
min = 20,
cp = 0,
size = 0.7,
ohse = TRUE,
plot = TRUE,
...
)
}
\arguments{
\item{df}{Data frame}
\item{target}{Variable}
\item{max}{Integer. Maximal depth of the tree}
\item{min}{Integer. The minimum number of observations that must
exist in a node in order for a split to be attempted}
\item{cp}{Numeric. Complexity parameter}
\item{size}{Numeric. Textsize of plot}
\item{ohse}{Boolean. Auto generate One Hot Smart Encoding?}
\item{plot}{Boolean. Return a plot? If not, rpart object}
\item{...}{rpart.plot custom parameters}
}
\description{
Fit and plot a rpart model for exploratory purposes using rpart
and rpart.plot libraries. Idea from explore library.
}
\seealso{
Other Exploratory:
\code{\link{corr_cross}()},
\code{\link{corr_var}()},
\code{\link{crosstab}()},
\code{\link{df_str}()},
\code{\link{distr}()},
\code{\link{freqs_df}()},
\code{\link{freqs_list}()},
\code{\link{freqs_plot}()},
\code{\link{freqs}()},
\code{\link{lasso_vars}()},
\code{\link{missingness}()},
\code{\link{plot_cats}()},
\code{\link{plot_df}()},
\code{\link{plot_nums}()},
\code{\link{summer}()},
\code{\link{trendsRelated}()}
Other Visualization:
\code{\link{distr}()},
\code{\link{freqs_df}()},
\code{\link{freqs_list}()},
\code{\link{freqs_plot}()},
\code{\link{freqs}()},
\code{\link{gg_bars}()},
\code{\link{gg_pie}()},
\code{\link{noPlot}()},
\code{\link{plot_chord}()},
\code{\link{plot_survey}()},
\code{\link{plot_timeline}()},
\code{\link{summer}()},
\code{\link{theme_lares}()}
}
\concept{Exploratory}
\concept{Visualization}
|
ae821ad2e2b9e1daeacacf2c604401363b361924
|
baeee810a3d266a7d2aa08b725cb3df28dab1a06
|
/pnn_diabetes.R
|
1fbc0247bd3b33c98a20c60d9867cf5623904873
|
[] |
no_license
|
alfredt/5206-CA1
|
d2e09befbc8db8b03ce06cfae71085ea048b380c
|
5c0e7786f37b7f2afd0851d7740861596214e86f
|
refs/heads/master
| 2020-12-30T18:02:59.158409
| 2017-05-19T11:12:44
| 2017-05-19T11:12:44
| 90,946,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,067
|
r
|
pnn_diabetes.R
|
pkgs <- c('caret', 'doParallel', 'foreach', 'pnn')
lapply(pkgs, require, character.only=T)
registerDoParallel(cores=8)
data_all <- read.csv(file="Diabetes.csv", head=FALSE, sep=",")
# PRE-PROCESSING DATA
column = ncol(data_all)
data_all <- data.frame(scale(data_all[-ncol(data_all)]), data_all[ncol(data_all)])
require("caTools")
set.seed(101)
sample = sample.split(data_all, SplitRatio = .75)
train_all = subset(data_all, sample == TRUE)
test_all = subset(data_all, sample == FALSE)
# DEFINE A FUNCTION TO SCORE GRNN
predict_pnn <- function(x, pnn){
xlst <- split(x, 1:nrow(x))
pred <- foreach(i=xlst, .combine=rbind) %dopar% {
c(pred=pnn::guess(pnn, as.matrix(i))$category)
}
}
# SEARCH FOR THE OPTIMAL VALUE OF SIGMA BY THE VALIDATION SAMPLE
cv <- foreach(sigma=seq(0.2, 1, 0.05), .combine=rbind) %dopar% {
set.seed(101)
model_pnn <- pnn::smooth(pnn::learn(train_all, category.column=column), sigma=sigma)
model_pnn.pred <- predict_pnn(test_all[, -column], model_pnn)
u_pnn <- union(test_all[column], model_pnn.pred)
m <- caret::confusionMatrix(table(true=factor(test_all[[column]], u_pnn), predictions=factor(model_pnn.pred, u_pnn)))
a <- m$overall[1]
data.frame(sigma, accuracy=a)
}
cat("\n### BEST SIGMA WITH THE HIGHEST ACCURACY ###\n")
print(best.sigma <- cv[cv$accuracy==max(cv$accuracy), 1:2])
# sigma accuracy
# Accuracy11 0.75 0.765625
#
# sigma accuracy
# Accuracy 0.20 0.6718750
# Accuracy1 0.25 0.6718750
# Accuracy2 0.30 0.6757812
# Accuracy3 0.35 0.6796875
# Accuracy4 0.40 0.6835938
# Accuracy5 0.45 0.7070312
# Accuracy6 0.50 0.7265625
# Accuracy7 0.55 0.7265625
# Accuracy8 0.60 0.7304688
# Accuracy9 0.65 0.7304688
# Accuracy10 0.70 0.7421875
# Accuracy11 0.75 0.7656250
# Accuracy12 0.80 0.7578125
# Accuracy13 0.85 0.7617188
# Accuracy14 0.90 0.7578125
# Accuracy15 0.95 0.7578125
# Accuracy16 1.00 0.7539062
set.seed(101)
model_pnn <- pnn::smooth(pnn::learn(train_all, category.column=column), sigma=0.75)
model_pnn.predClass <- as.integer(predict_pnn(test_all[, -column], model_pnn))
test_class <- as.integer(test_all$V9)
u_pnn <- union(test_class, model_pnn.predClass)
caret::confusionMatrix(table(true=factor(test_class, u_pnn), predictions=factor(model_pnn.predClass, u_pnn)))
#
# Confusion Matrix and Statistics
#
# predictions
# true 1 0
# 1 73 32
# 0 28 123
#
# Accuracy : 0.7656
# 95% CI : (0.7089, 0.8161)
# No Information Rate : 0.6055
# P-Value [Acc > NIR] : 4.257e-08
#
# Kappa : 0.5128
# Mcnemar's Test P-Value : 0.6985
#
# Sensitivity : 0.7228
# Specificity : 0.7935
# Pos Pred Value : 0.6952
# Neg Pred Value : 0.8146
# Prevalence : 0.3945
# Detection Rate : 0.2852
# Detection Prevalence : 0.4102
# Balanced Accuracy : 0.7582
#
# 'Positive' Class : 1
|
4561b53ce473cbabf317b8b7c5d49b3b689e8b40
|
5378b28dde3f365587dfb646d15a0eb66ac40f1e
|
/R/model_output_lists.R
|
a69844e976e1a7331ae746fe9f30b4540a3b976a
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
wwheeler6/CalCAT-1
|
64a69e3603c616397c9d9cbf74c7caa000734820
|
ee73c7fcaade36cf3e785cc562688f2857471ffc
|
refs/heads/master
| 2022-11-27T22:32:50.286561
| 2020-08-11T04:37:50
| 2020-08-11T04:37:50
| 286,646,310
| 0
| 0
|
MIT
| 2020-08-11T04:35:20
| 2020-08-11T04:35:20
| null |
UTF-8
|
R
| false
| false
| 7,572
|
r
|
model_output_lists.R
|
JHUoutputs <- c("Peak* Hospitalizations" = "hosp_occup",
"Peak* ICU Beds" = "icu_occup" ,
#"Peak* COVID Hospital Occupancy" = "hosp_occup",
#"Peak* New COVID Daily Hospital Admissions" = "hosp_admit" ,
#"Peak* COVID ICU Bed Occupancy" = "icu_occup" ,
#"Peak* New COVID Daily ICU Admissions" = "icu_admit" ,
#"Peak* New Daily COVID Infections" = "new_infect",
#"Peak* New COVID Daily Deaths" = "new_deaths",
"Peak Cumulative Deaths" = "cum_deaths"
)
CANoutputs <- c(#"Peak New Infections" = "infected",
"Peak Hospitalizations" = "hospitalizations",
#"Beds Needed?" <- "beds"
"Peak Cumulative Deaths" = "deaths"
)
IHMEoutputs.ts <- c("All Beds" = "allbed_mean",
"ICU Beds" = "ICUbed_mean",
"Inv. Ventitilators" = "invVen_mean",
"Deaths" = "deaths_mean",
"Admissions" = "admis_mean",
"New ICU" = "newICU_mean",
"Total Deaths" = "totdea_mean",
"Beds Over" = "bedover_mean",
"ICU Over" = "icuover_mean"
)
COVIDvar <- c( #"Total Confirmed Cases" = "Total.Count.Confirmed",
"Patients Positive for COVID-19" = "COVID.19.Positive.Patients",
#"Suspected COVID-19 Patients" = "Suspected.COVID.19.Positive.Patients",
"ICU Patients Positive for COVID-19"= "ICU.COVID.19.Positive.Patients",
"Total Deaths, Confirmed" = "Total.Count.Deaths"
#"ICU Patients Suspected for COVID-19" = "ICU.COVID.19.Suspected.Patients",
#"Positive + Suspected Hospital Patients" = "total.hospital",
#"Positive + Suspected ICU Patients" = "total.icu"
)
COVIDvar.ts <- c( #"Total Confirmed Cases" = 20,
"Patients Positive for COVID-19" = 22,
#"Suspected COVID-19 Patients" = 23,
"ICU Patients Positive for COVID-19"= 24,
"Total Deaths, Confirmed" = 21
#"ICU Patients Suspected for COVID-19" = 25,
#"Positive + Suspected Patients" = 26,
#"Positive + Suspected ICU Patients" = 27
)
scenarios <- data.frame(
colvar = c(
"strictDistancingNow",
"weakDistancingNow",
"IHME_sts",
"UK.Fixed.30_40",
"UK.Fixed.40_50",
"UK.Fixed.50_60",
"UK.Fixed.60_70",
"Continued_Lockdown",
"Slow.paced_Reopening",
"Moderate.paced_Reopening",
"Fast.paced_Reopening"
),
label = c(
'CAN: Shelter in Place',
'CAN: Delay/Distancing',
'IHME Model',
'JHU: NPIs 30-40% Effective',
'JHU: NPIs 40-50% Effective',
'JHU: NPIs 50-60% Effective',
'JHU: NPIs 60-70% Effective',
'JHU: Continued Lockdown',
'JHU: Slow-paced Reopening',
'JHU: Moderate-paced Reopening',
'JHU: Fast-paced Reopening'
),
group = c(
'other',
'other',
'other',
"UK",
"UK",
"UK",
"UK",
"UK",
"UK",
"UK",
"UK"
),
descrip = c(
"Shelter-in-place or Containment/Delay: Three months of voluntary/VolunTold 'shelter-in-place' community-wide home quarantine (especially firm for high-risk groups), shutdown of non-essential businesses, close schools, ban on events over 10 people, passive monitoring, public advocacy around physical distancing and enhanced hygiene. Possibly closed borders or restricted travel. Public aid relief bill. Roll-out of free population-wide testing and quarantine, so that quarantines can be relaxed for those who are not infected. Strict physical distancing: Three month of shelter at home, reducing transmission between mildy sympotomatic individuals and the susceptible population. Treat everyone as infected. Forced community-wide home quarantine, full shutdown of all businesses, closed borders, active monitoring, full population-wide mandatory testing and aggressive quarantine.",
"Delay/Distancing: Three months of voluntary 'shelter-in-place' for high-risk groups, ban on events over 50 people, public advocacy around “physical distancing” and enhanced hygiene, possible school closures, restricted travel, and passive monitoring. Roll-out of population-wide testing and quarantine, so that quarantines can be relaxed for those who are not infected.",
"Assumes school closures, essential services closed, and Shelter in place beginning March 19th and extending indefinitely.",
"Fixed UK Lockdown followed by physical distancing: This scenario has statewide school closures from March 13-19 followed by a statewide stay-at-home policy from March 19 through April 30 where individuals remain socially distanced with constant effectiveness over the 6-week period. From May 1 through March 1, 2021, there is constant physical distancing with a 30-40% effectiveness.",
"Fixed UK Lockdown followed by physical distancing: This scenario has statewide school closures from March 13-19 followed by a statewide stay-at-home policy from March 19 through April 30 where individuals remain socially distanced with constant effectiveness over the 6-week period. From May 1 through March 1, 2021, there is constant physical distancing with a 40-50% effectiveness.",
"Fixed UK Lockdown followed by physical distancing: This scenario has statewide school closures from March 13-19 followed by a statewide stay-at-home policy from March 19 through April 30 where individuals remain socially distanced with constant effectiveness over the 6-week period. From May 1 through March 1, 2021, there is constant physical distancing with a 50-60% effectiveness.",
"Fixed UK Lockdown followed by physical distancing: This scenario has statewide school closures from March 13-19 followed by a statewide stay-at-home policy from March 19 through April 30 where individuals remain socially distanced with constant effectiveness over the 6-week period. From May 1 through March 1, 2021, there is constant physical distancing with a 60-70% effectiveness.",
"Stay-at-home policy is in place through August 31.",
"Stay-at-home policy is in place through May 8. Restrictions are loosened in 6-week phases with social distancing effectiveness between 50–70% from May 9–June 19 for Stage 2, 35–55% from June 20–July 31 for Stage 3, and 20–40% from August 1–31 for Stage 4.",
"Stay-at-home policy is in place through May 8. Restrictions are loosened in 4-week phases with social distancing effectiveness between 50–70% from May 9–June 5 for Stage 2, 35–55% from June 6–July 3 for Stage 3, and 20–40% from July 4–August 31 for Stage 4.",
"Stay-at-home policy is in place through May 8. Restrictions are loosened in 2-week phases with social distancing effectiveness between 50–70% from May 9–22 for Stage 2, 35–55% from May 23–June 5 for Stage 3, and 20–40% from June 6–August 31 for Stage 4."
)
)
modellist <- as.list(as.character(scenarios[,"colvar"]))
names(modellist) <- scenarios[,as.character("label")]
UKlist <- as.list(as.character(scenarios[which(scenarios$group == "UK"),"colvar"]))
names(UKlist) <- scenarios[which(scenarios$group == "UK"),"label"]
otherlist <- as.list(as.character(scenarios[which(scenarios$group == "other"),"colvar"]))
names(otherlist) <- scenarios[which(scenarios$group == "other"),"label"]
|
88a15e19eee49539b298179026f23c7faafff133
|
e2f37b60e1cd4fdf9c002cd267a79f2881b248dd
|
/demo/unit_test2.R
|
b72d2b4f3655491aedbab16c9d7091e099bf5e0b
|
[
"CC0-1.0"
] |
permissive
|
cboettig/pdg_control
|
8b5ac745a23da2fa7112c74b93765c72974ea9b9
|
d29c5735b155d1eb48b5f8b9d030479c7ec75754
|
refs/heads/master
| 2020-04-06T03:40:50.205377
| 2017-10-17T02:47:24
| 2017-10-17T02:47:24
| 2,390,280
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,277
|
r
|
unit_test2.R
|
require(PDGcontrol)
# Define all parameters
delta <- 0.1 # economic discounting rate
OptTime <- 50 # stopping time
sigma_g <- 0.2 # Noise in population growth
gridsize <- 10 # gridsize (discretized population)
sigma_m <- .2 #
sigma_i <- .2 #
interval <- 1
# Chose the state equation / population dynamics function
f <- BevHolt
pars <- c(2,4)
K <- (pars[1]-1)/pars[2]
xT <- 0
e_star <- 0
# Choose grids
x_grid <- seq(0, 2*K, length=gridsize) # population size
h_grid <- x_grid # vector of havest levels, use same res as stock
p <- pars
i <- 3
h <- x_grid[3]
y <- x_grid[4]
require(cubature)
expected <- f(y,h,p)
if(expected==0){
Prob <- numeric(gridsize)
Prob[1] <- 1
} else {
# dividing x by the expected value is same as scaling distribution to mean 1
pdf_zg <- function(x, expected) dlnorm(x/expected, 0, sigma_g)
pdf_zm <- function(x) dlnorm(x, 0, sigma_m)
pdf_zi <- function(x,q) dlnorm(x, log(q), sigma_i)
Prob <- sapply(x_grid, function(y){
F <- function(x)
pdf_zg(y, f(x[1], x[2], p)) * pdf_zm(x[1]) * pdf_zi(x[2], h)
int <- adaptIntegrate(F, c(0, 0), c(10*K, 10*K))
int$integral
})
}
Prob/sum(Prob)
|
84b7b5a405ec12e87646de3b382ecd03c9207e56
|
dcefc68f0e065d9cd6f50e0061bab61c4eee995c
|
/badges.R
|
e4ad710bce1fc81f3d3f84a9f4c0284ac9b3f9a4
|
[] |
no_license
|
halpo/halpo.github.io
|
9765eee7e3be31316ae77f26f9d16207b13af541
|
0b98f0d4a1b921d4f436dc07b8c36db732628d5c
|
refs/heads/master
| 2022-11-26T06:44:57.122627
| 2020-08-10T01:48:36
| 2020-08-10T01:48:36
| 197,820,361
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,938
|
r
|
badges.R
|
# Utilities -------------------------------------------------------
make_md_badge <- function(name, href, src){
as.character(glue::glue("[]({href})"))
}
make_badges <-
function( pkg
, user = gh_whoami()$login
, repo = pkg
, branch='master'
, stage = NULL
){
tibble( Package = pkg
, Latest = unclass(make_activity_shield(repo, 'last-commit', user, branch=branch))
, Travis = unclass(make_badge_travis(repo, user=user))
, Coverage = unclass(make_badge_codecov(repo, user=user, branch=branch))
, CRAN = unclass(make_badge_cran(pkg))
)
}
# Specific Badges -------------------------------------------------
make_badge_cran <- function(pkg){
make_md_badge( glue("CRAN Status")
, glue("https://CRAN.R-project.org/package={pkg}")
, glue("https://www.r-pkg.org/badges/version/{pkg}")
)
}
make_badge_lifecycle <- local({
stages <- usethis:::stages
function( pkg
, stage=names(stages)
, colour = stages[[stage]]
){
make_md_badge( glue("Lifecycle Stage: {stage}")
, glue("https://img.shields.io/badge/lifecycle-{stage}-{colour}.svg")
, glue("https://www.tidyverse.org/lifecycle/#{stage}")
)
}})
make_badge_travis <- function(repo, user=gh_whoami()$login, ext=c('org', 'com')){
ext <- match.arg(ext)
url <- glue("https://travis-ci.{ext}/{user}/{repo}")
img <- glue("{url}.svg?branch=master")
make_md_badge("Travis build status", url, img)
}
make_badge_codecov <- function(repo, user=gh_whoami()$login, branch='master'){
url <- glue("https://codecov.io/gh/{user}/{repo}?branch={branch}")
img <- glue("https://codecov.io/gh/{user}/{repo}/branch/{branch}/graph/badge.svg")
make_md_badge("Codecov test coverage", url, img)
}
|
8b17fadb9921a7f6d2bbd3bfa4706100e812e6e9
|
e6024449a5e52e00251d476ef3fdb3544cb4b43e
|
/R/knit2hugo.r
|
f2c55e0a2ccb16e4cdeac215a18ca48b349e86b4
|
[] |
no_license
|
kjhealy/kjhutils
|
99b05520de91c150efb21626dba0d8502b36e8c5
|
054b9a6002b96053f9392edcbe9eb1cf2ca4eb30
|
refs/heads/master
| 2021-03-12T23:41:10.370372
| 2019-03-16T20:56:46
| 2019-03-16T20:56:46
| 9,861,040
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,108
|
r
|
knit2hugo.r
|
##' knitr output hook for hugo highlight shortcode.
##'
##' Output hook suitable for use with hugo's syntax highlighter shortcode
##'
##' Put render_hugo() in the first chunk of your document to make
##' knitr output hugo-friendly {{< highlight >}} shortcode code fences
##' instead of markdown's default triple-backticks.
##'
##' @title render hugo-friendly markdown from knitr
##' @return Output hook
##' @author Kieran Healy
##' @export
render_hugo <- function(extra="") {
require(knitr)
render_markdown(TRUE)
hook.r <- function(x, options) {
paste0("\n\n{{< highlight ", tolower(options$engine),
if (extra !="") " ", extra, " >}}\n", x, "\n{{< /highlight >}}\n\n")
}
hook.t <- function(x, options) { paste0("\n\n{{< highlight text >}}\n",
x, "{{< /highlight >}}\n\n")
}
knit_hooks$set(source = function(x, options) {
x <- paste(knitr:::hilight_source(x, "markdown", options), collapse = "\n")
hook.r(x, options)
}, output = hook.t, warning = hook.t, error = hook.t, message = hook.t)
}
|
9e38de5e7fe8e0bbfa5081cc2564f8c2cef7cec7
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Biostatistics:_Basic_Concepts_And_Methodology_For_The_Health_Sciences_by_Daniel_W._Wayne,_Chad_L._Cross/CH10/EX10.3.1/Ex10_3_1.R
|
a1afddbdd7e0d592d283710c4cc544e63b5d089d
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 1,121
|
r
|
Ex10_3_1.R
|
##Example 10.3.1 Pg.493
##Multiple regression equation
age<- c(72,68,65,85,84,90,79,74,69,87,84,79,71,76,73,86,69,66,79,87,71,81,66,81,80,82,65,73,85,83,83,76,77,83,79,69,65,71,80,81,66,76,70,76,67,72,68,102,67,66,75,91,74,90,66,75,77,78,83,85,76,75,70,79,75,94,76,84,79,78,79)
edlevel <- c(20,12,13,14,13,15,12,10,12,15,12,12,12,14,14,12,17,11,12,12,14,16,16,16,13,12,13,16,16,17,8,20,12,12,14,12,16,14,18,11,14,17,12,12,12,20,18,12,12,14,18,13,15,15,14,12,16,12,20,10,18,14,16,16,18,8,18,18,17,16,12)
cda <- c(4.57,-3.04,1.39,-3.55,-2.56,-4.66,-2.70,0.30,-4.46,-6.29,-4.43,0.18,-1.37,3.26,-1.12,-0.77,3.73,-5.92,3.17,-1.19,0.99,-2.94,-2.21,-.75,5.07,-5.86,5,0.63,2.62,1.77,-3.79,1.44,-5.77,-5.77,-4.62,-2.03,5.74,2.83,-2.40,-0.29,4.44,3.35,-3.13-2.14,9.61,7.57,2.21,-2.3,1.73,6.03,-0.02,-7.65,4.17,-0.68,-2.22,0.80,-0.75,-4.60,2.68,-3.69,4.85,-0.08,0.63,5.92,3.63,-7.07,6.39,-0.08,1.07,5.31,0.30,0.30)
dt = data.frame(age,edlevel,cda)
pairs(dt) #multiple scatter plots
reg <- lm(cda~age+edlevel) #multiple regression model
reg
summary(reg)
#Answers might slightly differ due to approximation
|
4e728ebcee106dbc40ad13b5b2af8115b88ed191
|
e6b8d6b34577d601bee023a979a3dda88036df09
|
/MultivariateStatistics/guassian mixture clustering base script with example.R
|
4bbbc11c38d7edbf2cc6d0273caabf416e65df0b
|
[] |
no_license
|
mikaells/CeMistWorkshop2020
|
307fbcaacc811d2d7da0eefc2398c35f1c7d0373
|
6d4547d2b9ee81f9a199cfeb6d8e67e9e9d5e31f
|
refs/heads/master
| 2023-01-30T22:24:57.365726
| 2020-11-24T10:56:28
| 2020-11-24T10:56:28
| 300,240,862
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,007
|
r
|
guassian mixture clustering base script with example.R
|
#### clustering base script 2020 workshop ####
library("mclust")#mixture clustering package
# load data
Tab=iris #iris data is alredy in R base
X=iris[,-5] # Take out species
#from 1 to 10 cluster #compare different models with different covariance structure
GMM.model <- Mclust(X, G=1:10)
mclustAIC<-function(g,x){
IC <- Mclust(x, G=g)
aic <- 2*IC$df - 2*IC$loglik
return(aic)
}
AIC=sapply(1:10,mclustAIC,X)
BIC=mclustBIC(X,G=1:10,modelNames=GMM.model$modelName)
par(mfrow = c(1,1))
{
plot(1:10,AIC,type="o",pch=20,col="red",cex=1.5)
lines(1:10,abs(BIC),type="o",pch=18,col="blue",cex=1.5)
lines(rep(which.min(AIC),2),range(AIC),lty=2,col="red")
lines(rep(which.max(BIC),2),range(AIC),lty=2,col="blue")
legend(8, 700, c("AIC","BIC","best AIC","best BIC"), cex=1.2,lty=c(1,1,2,2), col=c("red","blue","red","blue"),
pch=c(19,18,NA,NA))
}
summary(GMM.model, parameters = TRUE) # outcomment to view summary
plot(GMM.model, what = "classification")
|
1045c83fbd8aded5c547bdbb23e31772e9d593e8
|
5b09cfc911f713193308f6fc53906ceeeceeaa32
|
/R/stackoverflow.R
|
573b3946a62ac053afbc3711e0af33cb2b556656
|
[] |
no_license
|
stevecondylios/rawr
|
c2e91550f8c153913504f004a0928bead77d16c7
|
1fb218cde7710c8f280bd9148d63693c4a50fdf2
|
refs/heads/master
| 2021-10-22T14:08:54.947648
| 2021-10-18T16:38:40
| 2021-10-18T16:38:40
| 205,609,858
| 16
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,433
|
r
|
stackoverflow.R
|
#' Retrieve raw R code from Stack Overflow website
#'
#'
#' @name stackoverflow
#'
#' @usage stackoverflow(url, method, padding)
#'
#' @param url Link to a page on Stack Overflow website (or any Stack Exchange)
#' @param method Not all websites are formatted consistently. To overcome this, try a different
#' method by setting the method
#' parameter to integers 2 and greater to try other available methods
#' @param padding Specify what goes between the last character of one code block and the
#' first character of the next code block. Default is a two new lines, which appears
#' visually as one new line between code blocks.
#'
#' @return A character vector of length 1 containing the R code from the target url. All code
#' at the target url (including javascript, ruby, python) will be returned.
#'
#' @import dplyr jsonlite xml2
#' @importFrom rvest html_nodes html_text html_attr
#' @importFrom utils file.edit
#'
#' @export
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' stackoverflow("https://stackoverflow.com/questions/58248102/date-input-dt-r-shiny")
#'
#' # Same as above but provided to cat for easy viewing
#' stackoverflow("https://stackoverflow.com/questions/58248102/date-input-dt-r-shiny") %>%
#' cat
#'}
#'
stackoverflow <- function(url, method, padding = "\n\n") {
url %>% xml2::read_html(.) %>%
html_nodes("code") %>% html_text %>% paste0(collapse=padding)
}
|
ffb32375dbf2ec4d23e55a54ddd094dd08e9989e
|
e37ef7efb1f111c1fa531ece65be38af11556ae6
|
/codes/DifferentialMethylation2.R
|
7c9ec0b743f525e260ed0bcc0ed23fcb3444e926
|
[] |
no_license
|
hjkim88/CRC
|
17ea3a6d0c51bae5b1c6debaeb16cd65a8d5c1d8
|
e92d7723e6e562dde5bc4158844bc41db503d8e8
|
refs/heads/master
| 2021-07-07T09:19:54.401524
| 2020-09-03T19:50:00
| 2020-09-03T19:50:00
| 180,240,623
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 88,176
|
r
|
DifferentialMethylation2.R
|
###
# File name : DifferentialMethylation2.R
# Author : Hyunjin Kim
# Date : July 6, 2019
# Email : hk2990@cumc.columbia.edu
# Purpose : Find differentially methylated probes and regions,
# perform pathway analysis on differentially methylated genes.
#
# Workflow
# https://bioconductor.org/packages/release/bioc/vignettes/ChAMP/inst/doc/ChAMP.html
#
# * NOW POLE-MUTATED SAMPLES ARE REMOVED FROM THE ANALYSIS AND
# LOCATION-ANALYSIS IS ALSO ADDED. MSI-L SAMPLES ARE NOW REGARDED AS MSS.
# THIS IS THE DIFFERENCE FROM DifferentialMethylation.R.
#
# Instruction
# 1. Source("DifferentialMethylation2.R")
# 2. Run the function "dma2" - specify the necessary input file paths and output directory
# 3. The differentially methylated results will be generated under the output directory
#
# Example
# > source("The_directory_of_DifferentialMethylation2.R/DifferentialMethylation2.R")
# > dma2(preprocessedBetaPath="./data/methylation/preprocessed/norm_beta_tcga_coad_read.rda",
# clinInfoPath_640 = "./data/coadread_tcga_clinical_data_updated2.txt",
# pvalThreshold = 0.05,
# cpg_cutoff = 10,
# dmrPrintNum = 3,
# dmrPrintSampleNum = 20,
# outputDir="./results/methylation/")
###
dma2 <- function(preprocessedBetaPath="//isilon.c2b2.columbia.edu/ifs/archive/shares/bisr/Parvathi_Myer/data/norm_beta_tcga_coad_read.rda",
clinInfoPath_640 = "//isilon.c2b2.columbia.edu/ifs/archive/shares/bisr/Parvathi_Myer/data/coadread_tcga_clinical_data_updated2.txt",
pvalThreshold = 0.05,
cpg_cutoff = 10,
dmrPrintNum = 3,
dmrPrintSampleNum = 20,
outputDir="//isilon.c2b2.columbia.edu/ifs/archive/shares/bisr/Parvathi_Myer/results/methylation/") {
### load libraries
options(java.parameters = "-Xmx8000m")
if(!require(ChAMP)) {
source("https://bioconductor.org/biocLite.R")
biocLite("ChAMP")
library(ChAMP)
}
if(!require(org.Hs.eg.db)) {
source("https://bioconductor.org/biocLite.R")
biocLite("org.Hs.eg.db")
library(org.Hs.eg.db)
}
if(!require(xlsx)) {
install.packages("xlsx")
library(xlsx)
}
### load the data
load(preprocessedBetaPath)
clinicalInfo_640 <- read.table(file = clinInfoPath_640, header = TRUE, sep = "\t",
stringsAsFactors = FALSE, check.names = FALSE)
rownames(clinicalInfo_640) <- clinicalInfo_640$`Sample ID`
### only retain info of the samples that have methylation level
normB$beta <- normB$beta[,intersect(colnames(normB$beta), rownames(clinicalInfo_640))]
normB$pd <- normB$pd[colnames(normB$beta),]
clinicalInfo_640 <- clinicalInfo_640[colnames(normB$beta),]
### remove POLE-muated samples
normB$beta <- normB$beta[,-which(clinicalInfo_640$POLE_MUTANT == TRUE)]
normB$pd <- normB$pd[colnames(normB$beta),]
clinicalInfo_640 <- clinicalInfo_640[colnames(normB$beta),]
### add tissue location info to the clinical info
clinicalInfo_640$TUMOR_LOCATION <- NA
clinicalInfo_640$TUMOR_LOCATION[which(clinicalInfo_640$`Patient Primary Tumor Site` == "Cecum")] <- "Proximal"
clinicalInfo_640$TUMOR_LOCATION[which(clinicalInfo_640$`Patient Primary Tumor Site` == "Ascending Colon")] <- "Proximal"
clinicalInfo_640$TUMOR_LOCATION[which(clinicalInfo_640$`Patient Primary Tumor Site` == "Hepatic Flexure")] <- "Proximal"
clinicalInfo_640$TUMOR_LOCATION[which(clinicalInfo_640$`Patient Primary Tumor Site` == "Transverse Colon")] <- "Proximal"
clinicalInfo_640$TUMOR_LOCATION[which(clinicalInfo_640$`Patient Primary Tumor Site` == "Splenic Flexure")] <- "Distal"
clinicalInfo_640$TUMOR_LOCATION[which(clinicalInfo_640$`Patient Primary Tumor Site` == "Descending Colon")] <- "Distal"
clinicalInfo_640$TUMOR_LOCATION[which(clinicalInfo_640$`Patient Primary Tumor Site` == "Sigmoid Colon")] <- "Distal"
clinicalInfo_640$TUMOR_LOCATION[which(clinicalInfo_640$`Patient Primary Tumor Site` == "Rectum")] <- "Distal"
clinicalInfo_640$TUMOR_LOCATION[which(clinicalInfo_640$`Patient Primary Tumor Site` == "Rectosigmoid Junction")] <- "Distal"
### add new MSI info to the sample info since MSI-L should be treated as MSS
clinicalInfo_640$NEW_MSI <- clinicalInfo_640$MSI
clinicalInfo_640$NEW_MSI[which(clinicalInfo_640$NEW_MSI == "MSI-L")] <- "MSS"
### change other MSI-related info
clinicalInfo_640$MSI_AGE_Status[intersect(which(clinicalInfo_640$MSI == "MSI-L"),
which(clinicalInfo_640$`Diagnosis Age` < 50))] <- "MSS_Young"
clinicalInfo_640$MSI_AGE_Status[intersect(which(clinicalInfo_640$MSI == "MSI-L"),
which(clinicalInfo_640$`Diagnosis Age` >= 50))] <- "MSS_Old"
clinicalInfo_640$MSI_RACE_Status1[intersect(which(clinicalInfo_640$MSI == "MSI-L"),
which(clinicalInfo_640$`Race Category` == "BLACK OR AFRICAN AMERICAN"))] <- "MSS_AA"
clinicalInfo_640$MSI_RACE_Status1[intersect(which(clinicalInfo_640$MSI == "MSI-L"),
which(clinicalInfo_640$`Race Category` == "WHITE"))] <- "MSS_CC"
clinicalInfo_640$MSI_RACE_Status2[intersect(which(clinicalInfo_640$MSI == "MSI-L"),
which(clinicalInfo_640$Prediction_Filtered == "African"))] <- "MSS_AA"
clinicalInfo_640$MSI_RACE_Status2[intersect(which(clinicalInfo_640$MSI == "MSI-L"),
which(clinicalInfo_640$Prediction_Filtered == "Caucasian"))] <- "MSS_CC"
# ******************************************************************************************
# Pathway Analysis with clusterProfiler package
# Input: geneList = a vector of gene Entrez IDs for pathway analysis [numeric or character]
# org = organism that will be used in the analysis ["human" or "mouse"]
# should be either "human" or "mouse"
# database = pathway analysis database (KEGG or GO) ["KEGG" or "GO"]
# title = title of the pathway figure [character]
# pv_threshold = pathway analysis p-value threshold (not DE analysis threshold) [numeric]
# displayNum = the number of pathways that will be displayed [numeric]
# (If there are many significant pathways show the few top pathways)
# imgPrint = print a plot of pathway analysis [TRUE/FALSE]
# dir = file directory path of the output pathway figure [character]
#
# Output: Pathway analysis results in figure - using KEGG and GO pathways
# The x-axis represents the number of DE genes in the pathway
# The y-axis represents pathway names
# The color of a bar indicates adjusted p-value from the pathway analysis
# For Pathview Result, all colored genes are found DE genes in the pathway,
# and the color indicates log2(fold change) of the DE gene from DE analysis
# ******************************************************************************************
pathwayAnalysis_CP <- function(geneList,
org,
database,
title="Pathway_Results",
pv_threshold=0.05,
displayNum=Inf,
imgPrint=TRUE,
dir="./") {
### load library
if(!require(clusterProfiler)) {
source("https://bioconductor.org/biocLite.R")
biocLite("clusterProfiler")
library(clusterProfiler)
}
if(!require(ggplot2)) {
install.packages("ggplot2")
library(ggplot2)
}
### colect gene list (Entrez IDs)
geneList <- geneList[which(!is.na(geneList))]
if(!is.null(geneList)) {
### make an empty list
p <- list()
if(database == "KEGG") {
### KEGG Pathway
kegg_enrich <- enrichKEGG(gene = geneList, organism = org, pvalueCutoff = pv_threshold)
if(is.null(kegg_enrich)) {
writeLines("KEGG Result does not exist")
return(NULL)
} else {
kegg_enrich@result <- kegg_enrich@result[which(kegg_enrich@result$p.adjust < pv_threshold),]
if(imgPrint == TRUE) {
if((displayNum == Inf) || (nrow(kegg_enrich@result) <= displayNum)) {
result <- kegg_enrich@result
description <- kegg_enrich@result$Description
} else {
result <- kegg_enrich@result[1:displayNum,]
description <- kegg_enrich@result$Description[1:displayNum]
}
if(nrow(kegg_enrich) > 0) {
p[[1]] <- ggplot(result, aes(x=Description, y=Count)) + labs(x="", y="Gene Counts") +
theme_classic(base_size = 16) + geom_bar(aes(fill = p.adjust), stat="identity") + coord_flip() +
scale_x_discrete(limits = rev(description)) +
guides(fill = guide_colorbar(ticks=FALSE, title="P.Val", barheight=10)) +
ggtitle(paste0("KEGG ", title))
png(paste0(dir, "kegg_", title, ".png"), width = 2000, height = 1000)
print(p[[1]])
dev.off()
} else {
writeLines("KEGG Result does not exist")
}
}
return(kegg_enrich@result)
}
} else if(database == "GO") {
### GO Pathway
if(org == "human") {
go_enrich <- enrichGO(gene = geneList, OrgDb = 'org.Hs.eg.db', readable = T, ont = "BP", pvalueCutoff = pv_threshold)
} else if(org == "mouse") {
go_enrich <- enrichGO(gene = geneList, OrgDb = 'org.Mm.eg.db', readable = T, ont = "BP", pvalueCutoff = pv_threshold)
} else {
go_enrich <- NULL
writeLines(paste("Unknown org variable:", org))
}
if(is.null(go_enrich)) {
writeLines("GO Result does not exist")
return(NULL)
} else {
go_enrich@result <- go_enrich@result[which(go_enrich@result$p.adjust < pv_threshold),]
if(imgPrint == TRUE) {
if((displayNum == Inf) || (nrow(go_enrich@result) <= displayNum)) {
result <- go_enrich@result
description <- go_enrich@result$Description
} else {
result <- go_enrich@result[1:displayNum,]
description <- go_enrich@result$Description[1:displayNum]
}
if(nrow(go_enrich) > 0) {
p[[2]] <- ggplot(result, aes(x=Description, y=Count)) + labs(x="", y="Gene Counts") +
theme_classic(base_size = 16) + geom_bar(aes(fill = p.adjust), stat="identity") + coord_flip() +
scale_x_discrete(limits = rev(description)) +
guides(fill = guide_colorbar(ticks=FALSE, title="P.Val", barheight=10)) +
ggtitle(paste0("GO ", title))
png(paste0(dir, "go_", title, ".png"), width = 2000, height = 1000)
print(p[[2]])
dev.off()
} else {
writeLines("GO Result does not exist")
}
}
return(go_enrich@result)
}
} else {
stop("database prameter should be \"GO\" or \"KEGG\"")
}
} else {
writeLines("geneList = NULL")
}
}
### gene mapping list
gs2eg <- unlist(as.list(org.Hs.egSYMBOL2EG))
### set sample groups for DMA - age
grp <- clinicalInfo_640[colnames(normB$beta),"MSI_AGE_Status"]
grp[which(grp == "MSI-H_Young")] <- "MSIHYOUNG"
grp[which(grp == "MSI-H_Old")] <- "MSIHOLD"
grp[which(grp == "MSS_Young")] <- "MSSYOUNG"
grp[which(grp == "MSS_Old")] <- "MSSOLD"
grp[which(is.na(grp))] <- "NOTHING"
### Young vs Old
### MSI-H
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSIHYOUNG", "MSIHOLD"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSI-H_Young_vs_Old.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSI-H_Young_vs_Old",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSI-H_Young_vs_Old.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSS
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSSYOUNG", "MSSOLD"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSS_Young_vs_Old.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSS_Young_vs_Old",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSS_Young_vs_Old.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### DMRCate and plots
### make a design matrix
design <- model.matrix(~0+grp)
colnames(design) <- levels(as.factor(grp))
### make a contrast matrix
contrastMat <- makeContrasts(MSIHYOUNG-MSIHOLD,
MSSYOUNG-MSSOLD,
levels = design)
### MSI-H
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSIHYOUNG - MSIHOLD")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSI-H_Young_vs_Old.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSIHYOUNG"), which(grp == "MSIHOLD"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSIHYOUNG", "MSIHOLD")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSI-H_Young_vs_Old.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSS
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSSYOUNG - MSSOLD")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSS_Young_vs_Old.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSSYOUNG"), which(grp == "MSSOLD"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSSYOUNG", "MSSOLD")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSS_Young_vs_Old.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### set sample groups for DMA - self-reported race
grp <- clinicalInfo_640[colnames(normB$beta),"MSI_RACE_Status1"]
grp[which(grp == "MSI-H_AA")] <- "MSIHAA"
grp[which(grp == "MSI-H_CC")] <- "MSIHCC"
grp[which(grp == "MSS_AA")] <- "MSSAA"
grp[which(grp == "MSS_CC")] <- "MSSCC"
grp[which(is.na(grp))] <- "NOTHING"
### AA vs CC - self-reported
### MSI-H
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSIHAA", "MSIHCC"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSI-H_AA_vs_CC.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSI-H_AA_vs_CC",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSI-H_AA_vs_CC.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSS
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSSAA", "MSSCC"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSS_AA_vs_CC.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSS_AA_vs_CC",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSS_AA_vs_CC.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### DMRCate and plots
### make a design matrix
design <- model.matrix(~0+grp)
colnames(design) <- levels(as.factor(grp))
### make a contrast matrix
contrastMat <- makeContrasts(MSIHAA-MSIHCC,
MSSAA-MSSCC,
levels = design)
### MSI-H
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSIHAA - MSIHCC")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSI-H_AA_vs_CC.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSIHAA"), which(grp == "MSIHCC"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSIHAA", "MSIHCC")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSI-H_AA_vs_CC.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSS
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSSAA - MSSCC")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSS_AA_vs_CC.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSSAA"), which(grp == "MSSCC"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSSAA", "MSSCC")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSS_AA_vs_CC.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### set sample groups for DMA - predicted race
grp <- clinicalInfo_640[colnames(normB$beta),"MSI_RACE_Status2"]
grp[which(grp == "MSI-H_AA")] <- "MSIHAA"
grp[which(grp == "MSI-H_CC")] <- "MSIHCC"
grp[which(grp == "MSS_AA")] <- "MSSAA"
grp[which(grp == "MSS_CC")] <- "MSSCC"
grp[which(is.na(grp))] <- "NOTHING"
### AA vs CC - predicted
### MSI-H
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSIHAA", "MSIHCC"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSI-H_AA_vs_CC_predicted.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSI-H_AA_vs_CC_predicted",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSI-H_AA_vs_CC_predicted.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSS
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSSAA", "MSSCC"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSS_AA_vs_CC_predicted.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSS_AA_vs_CC_predicted",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSS_AA_vs_CC_predicted.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### DMRCate and plots
### make a design matrix
design <- model.matrix(~0+grp)
colnames(design) <- levels(as.factor(grp))
### make a contrast matrix
contrastMat <- makeContrasts(MSIHAA-MSIHCC,
MSSAA-MSSCC,
levels = design)
### MSI-H
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSIHAA - MSIHCC")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSI-H_AA_vs_CC_predicted.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSIHAA"), which(grp == "MSIHCC"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSIHAA", "MSIHCC")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSI-H_AA_vs_CC_predicted.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSS
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSSAA - MSSCC")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSS_AA_vs_CC_predicted.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSSAA"), which(grp == "MSSCC"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSSAA", "MSSCC")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSS_AA_vs_CC_predicted.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### location-based analysis
### set sample groups for DMA - age
grp <- clinicalInfo_640[colnames(normB$beta),"MSI_AGE_Status"]
grp[which(grp == "MSI-H_Young")] <- "MSIHYOUNG"
grp[which(grp == "MSI-H_Old")] <- "MSIHOLD"
grp[which(grp == "MSS_Young")] <- "MSSYOUNG"
grp[which(grp == "MSS_Old")] <- "MSSOLD"
grp[which(is.na(grp))] <- "NOTHING"
grp <- paste0(grp, clinicalInfo_640$TUMOR_LOCATION)
### MSIHYOUNGProximal-MSIHYOUNGDistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSIHYOUNGProximal", "MSIHYOUNGDistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSI-H_Young_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSI-H_Young_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSI-H_Young_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSIHOLDProximal-MSIHOLDDistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSIHOLDProximal", "MSIHOLDDistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSI-H_Old_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSI-H_Old_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSI-H_Old_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSSYOUNGProximal-MSSYOUNGDistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSSYOUNGProximal", "MSSYOUNGDistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSS_Young_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSS_Young_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSS_Young_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSSOLDProximal-MSSOLDDistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSSOLDProximal", "MSSOLDDistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSS_Old_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSS_Old_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSS_Old_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### DMRCate and plots
### make a design matrix
design <- model.matrix(~0+grp)
colnames(design) <- levels(as.factor(grp))
### make a contrast matrix
contrastMat <- makeContrasts(MSIHYOUNGProximal-MSIHYOUNGDistal,
MSIHOLDProximal-MSIHOLDDistal,
MSSYOUNGProximal-MSSYOUNGDistal,
MSSOLDProximal-MSSOLDDistal,
levels = design)
### MSIHYOUNGProximal-MSIHYOUNGDistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSIHYOUNGProximal - MSIHYOUNGDistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSI-H_Young_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSIHYOUNGProximal"), which(grp == "MSIHYOUNGDistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSIHYOUNGProximal", "MSIHYOUNGDistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSI-H_Young_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSIHOLDProximal-MSIHOLDDistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSIHOLDProximal - MSIHOLDDistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSI-H_Old_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSIHOLDProximal"), which(grp == "MSIHOLDDistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSIHOLDProximal", "MSIHOLDDistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSI-H_Old_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSSYOUNGProximal-MSSYOUNGDistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSSYOUNGProximal - MSSYOUNGDistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSS_Young_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSSYOUNGProximal"), which(grp == "MSSYOUNGDistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSSYOUNGProximal", "MSSYOUNGDistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSS_Young_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSSOLDProximal-MSSOLDDistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSSOLDProximal - MSSOLDDistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSS_Old_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSSOLDProximal"), which(grp == "MSSOLDDistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSSOLDProximal", "MSSOLDDistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSS_Old_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### set sample groups for DMA - self-reported race
grp <- clinicalInfo_640[colnames(normB$beta),"MSI_RACE_Status1"]
grp[which(grp == "MSI-H_AA")] <- "MSIHAA"
grp[which(grp == "MSI-H_CC")] <- "MSIHCC"
grp[which(grp == "MSS_AA")] <- "MSSAA"
grp[which(grp == "MSS_CC")] <- "MSSCC"
grp[which(is.na(grp))] <- "NOTHING"
grp <- paste0(grp, clinicalInfo_640$TUMOR_LOCATION)
### MSIHAAProximal-MSIHAADistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSIHAAProximal", "MSIHAADistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSI-H_AA_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSI-H_AA_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSI-H_AA_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSIHCCProximal-MSIHCCDistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSIHCCProximal", "MSIHCCDistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSI-H_CC_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSI-H_CC_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSI-H_CC_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSSAAProximal-MSSAADistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSSAAProximal", "MSSAADistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSS_AA_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSS_AA_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSS_AA_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSSCCProximal-MSSCCDistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSSCCProximal", "MSSCCDistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSS_CC_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSS_CC_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSS_CC_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### DMRCate and plots
### make a design matrix
design <- model.matrix(~0+grp)
colnames(design) <- levels(as.factor(grp))
### make a contrast matrix
contrastMat <- makeContrasts(MSIHAAProximal-MSIHAADistal,
MSIHCCProximal-MSIHCCDistal,
MSSAAProximal-MSSAADistal,
MSSCCProximal-MSSCCDistal,
levels = design)
### MSIHAAProximal-MSIHAADistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSIHAAProximal - MSIHAADistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSI-H_AA_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSIHAAProximal"), which(grp == "MSIHAADistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSIHAAProximal", "MSIHAADistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSI-H_AA_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSIHCCProximal-MSIHCCDistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSIHCCProximal - MSIHCCDistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSI-H_CC_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSIHCCProximal"), which(grp == "MSIHCCDistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSIHCCProximal", "MSIHCCDistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSI-H_CC_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSSAAProximal-MSSAADistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSSAAProximal - MSSAADistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSS_AA_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSSAAProximal"), which(grp == "MSSAADistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSSAAProximal", "MSSAADistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSS_AA_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSSCCProximal-MSSCCDistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSSCCProximal - MSSCCDistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSS_CC_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSSCCProximal"), which(grp == "MSSCCDistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSSCCProximal", "MSSCCDistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSS_CC_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### set sample groups for DMA - predicted race
grp <- clinicalInfo_640[colnames(normB$beta),"MSI_RACE_Status2"]
grp[which(grp == "MSI-H_AA")] <- "MSIHAA"
grp[which(grp == "MSI-H_CC")] <- "MSIHCC"
grp[which(grp == "MSS_AA")] <- "MSSAA"
grp[which(grp == "MSS_CC")] <- "MSSCC"
grp[which(is.na(grp))] <- "NOTHING"
grp <- paste0(grp, clinicalInfo_640$TUMOR_LOCATION)
### MSIHAAProximal-MSIHAADistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSIHAAProximal", "MSIHAADistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSI-H_AA_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSI-H_AA_predicted_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSI-H_AA_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSIHCCProximal-MSIHCCDistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSIHCCProximal", "MSIHCCDistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSI-H_CC_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSI-H_CC_predicted_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSI-H_CC_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSSAAProximal-MSSAADistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSSAAProximal", "MSSAADistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSS_AA_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSS_AA_predicted_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSS_AA_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### MSSCCProximal-MSSCCDistal
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("MSSCCProximal", "MSSCCDistal"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_MSS_CC_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_MSS_CC_predicted_Proximal_vs_Distal",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_MSS_CC_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### DMRCate and plots
### make a design matrix
design <- model.matrix(~0+grp)
colnames(design) <- levels(as.factor(grp))
### make a contrast matrix
contrastMat <- makeContrasts(MSIHAAProximal-MSIHAADistal,
MSIHCCProximal-MSIHCCDistal,
MSSAAProximal-MSSAADistal,
MSSCCProximal-MSSCCDistal,
levels = design)
### MSIHAAProximal-MSIHAADistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSIHAAProximal - MSIHAADistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSI-H_AA_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSIHAAProximal"), which(grp == "MSIHAADistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSIHAAProximal", "MSIHAADistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSI-H_AA_predicted_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSIHCCProximal-MSIHCCDistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSIHCCProximal - MSIHCCDistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSI-H_CC_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSIHCCProximal"), which(grp == "MSIHCCDistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSIHCCProximal", "MSIHCCDistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSI-H_CC_predicted_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSSAAProximal-MSSAADistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSSAAProximal - MSSAADistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSS_AA_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSSAAProximal"), which(grp == "MSSAADistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSSAAProximal", "MSSAADistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSS_AA_predicted_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### MSSCCProximal-MSSCCDistal
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "MSSCCProximal - MSSCCDistal")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_MSS_CC_predicted_Proximal_vs_Distal.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "MSSCCProximal"), which(grp == "MSSCCDistal"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("MSSCCProximal", "MSSCCDistal")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_MSS_CC_predicted_Proximal_vs_Distal.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### set sample groups for DMA - global age: Young vs Old
grp <- clinicalInfo_640[colnames(normB$beta),"Diagnosis Age"]
tIdx1 <- which(grp < 50)
tIdx2 <- which(grp >= 50)
tIdx3 <- which(is.na(grp))
grp[tIdx1] <- "YOUNG"
grp[tIdx2] <- "OLD"
grp[tIdx3] <- "NOTHING"
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("YOUNG", "OLD"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_Young_vs_Old.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_Young_vs_Old",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_Young_vs_Old.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### DMRCate and plots
### make a design matrix
design <- model.matrix(~0+grp)
colnames(design) <- levels(as.factor(grp))
### make a contrast matrix
contrastMat <- makeContrasts(YOUNG-OLD,
levels = design)
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "YOUNG - OLD")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_Young_vs_Old.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "YOUNG"), which(grp == "OLD"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("YOUNG", "OLD")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_Young_vs_Old.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### set sample groups for DMA - global race: AA vs CC (self-reported)
grp <- clinicalInfo_640[colnames(normB$beta),"Race Category"]
grp[which(grp == "BLACK OR AFRICAN AMERICAN")] <- "AA"
grp[which(grp == "WHITE")] <- "CC"
grp[which(is.na(grp))] <- "NOTHING"
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("AA", "CC"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_AA_vs_CC.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_AA_vs_CC",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_AA_vs_CC.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### DMRCate and plots
### make a design matrix
design <- model.matrix(~0+grp)
colnames(design) <- levels(as.factor(grp))
### make a contrast matrix
contrastMat <- makeContrasts(AA-CC,
levels = design)
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "AA - CC")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_AA_vs_CC.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "AA"), which(grp == "CC"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("AA", "CC")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_AA_vs_CC.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### set sample groups for DMA - global race: AA vs CC (predicted)
grp <- clinicalInfo_640[colnames(normB$beta),"Prediction_Filtered"]
grp[which(grp == "African")] <- "AA"
grp[which(grp == "Caucasian")] <- "CC"
grp[which(is.na(grp))] <- "NOTHING"
### differentially methylated positions
dmps <- champ.DMP(beta = normB$beta, pheno = grp, compare.group = c("AA", "CC"),
adjPVal = pvalThreshold, adjust.method = "BH", arraytype = "450K")[[1]]
write.xlsx2(data.frame(CpG_Site=rownames(dmps), dmps),
file = paste0(outputDir, "DMPs_AA_vs_CC_predicted.xlsx"),
sheetName = "DMPs", row.names = FALSE)
### pathway analysis with the DMPs
dm_genes <- as.character(dmps$gene)
dm_genes <- dm_genes[which(dm_genes != "")]
dm_genes <- gs2eg[dm_genes]
dm_genes <- dm_genes[which(!is.na(dm_genes))]
pathways <- pathwayAnalysis_CP(geneList = dm_genes, org = "human", database = "GO", imgPrint = TRUE,
title = "Top_50_DMG-associated_Pathways_AA_vs_CC_predicted",
displayNum = 50, dir = outputDir)
write.xlsx2(pathways, file = paste0(outputDir, "go_DMG-associated_Pathways_AA_vs_CC_predicted.xlsx"),
sheetName = "DMG-associated_Pathways", row.names = FALSE)
### DMRCate and plots
### make a design matrix
design <- model.matrix(~0+grp)
colnames(design) <- levels(as.factor(grp))
### make a contrast matrix
contrastMat <- makeContrasts(AA-CC,
levels = design)
### annotation for DMR(Differentially Methylated Region)s
myAnno <- cpg.annotate(object = normB$beta, datatype = "array", what = "Beta",
arraytype = "450K", fdr = pvalThreshold,
annotation=c(array = "IlluminaHumanMethylation450k", annotation = "ilmn12.hg19"),
analysis.type = "differential", design = design,
contrasts = TRUE, cont.matrix = contrastMat,
coef = "AA - CC")
### get DMRs and save
if(length(which(myAnno$is.sig == TRUE)) > 0) {
### get DMRs
DMRs <- dmrcate(myAnno, lambda=1000, C=2)
### extract ranges from DMRs
results.ranges <- extractRanges(DMRs, genome = "hg19")
### filter with Stouffer combined p-values and save the DMR info
if(length(results.ranges) > 0) {
results.ranges <- results.ranges[which(results.ranges$Stouffer < pvalThreshold),]
results.ranges <- results.ranges[which(results.ranges$no.cpgs > cpg_cutoff)]
write.xlsx2(data.frame(DMR=paste0(rep("DMR", length(results.ranges)), 1:length(results.ranges)), results.ranges),
file = paste0(outputDir, "DMRs_AA_vs_CC_predicted.xlsx"),
sheetName = "DMRs", row.names = FALSE)
idx <- union(which(grp == "AA"), which(grp == "CC"))
pheno <- c("skyblue", "pink")
names(pheno) <- c("AA", "CC")
cols <- pheno[grp]
min_num <- min(dmrPrintSampleNum, min(length(which(cols[idx] == pheno[1])), length(which(cols[idx] == pheno[2]))))
set.seed(1234)
for(i in 1:min(dmrPrintNum, length(results.ranges))) {
png(paste0(outputDir, "DMR", i, "_AA_vs_CC_predicted.png"), width = 1800, height = 1000)
DMR.plot(ranges=results.ranges, dmr=i, CpGs=normB$beta[,idx], phen.col=cols[idx], what = "Beta",
arraytype = "450K", pch=19, toscale=TRUE, plotmedians=TRUE, genome="hg19",
samps = union(sample(which(cols[idx] == pheno[1]), min_num),
sample(which(cols[idx] == pheno[2]), min_num)))
dev.off()
}
}
}
### write out the normalized beta table
write.table(data.frame(CpG_site=rownames(normB$beta), normB$beta,
stringsAsFactors = FALSE, check.names = FALSE),
file = paste0(outputDir, "norm_beta_tcga_coad_read.txt"),
sep = "\t", row.names = FALSE)
### generate QC plots with the normalized beta
png(paste0(outputDir, "BMIQ_beta_qc_plots.png"), width = 2000, height = 1000, res = 120)
par(mfrow=c(1,2))
colors = rainbow(length(unique(normB$pd$Project)))
names(colors) = unique(as.character(normB$pd$Project))
plotMDS(normB$beta, top = 1000, pch = 19, col = colors[as.character(normB$pd$Project)],
xlab = "Dimension1", ylab = "Dimension2", main = "MDS_BMIQ_Beta")
legend("topright", legend = unique(as.character(normB$pd$Project)),
col = colors[unique(as.character(normB$pd$Project))], pch = 19,
title = "Sample Groups", cex = 0.7)
plot(density(as.numeric(normB$beta[,1])), main = "Density_BMIQ_Beta", ylim = c(0, 6),
col = colors[as.character(normB$pd$Project[1])])
for(i in 2:ncol(normB$beta)) {
lines(density(as.numeric(normB$beta[,i])), col = colors[as.character(normB$pd$Project[i])])
}
legend("topright", legend = unique(as.character(normB$pd$Project)),
col = colors[unique(as.character(normB$pd$Project))], pch = 19,
title = "Sample Groups", cex = 0.7)
dev.off()
}
|
6c782790652401418b069ed3adaf59a171f857d4
|
f13ad56d41c9daa694959aded6fe1a207dc6660e
|
/plot1.R
|
091b089cb3a309c70ec475d6adc871295a3a3561
|
[] |
no_license
|
Cuffnela/ExData_Plotting1
|
256d40adb1d4b873111d337bb9422da4fbbe90a4
|
d12958e7498f7cdd9fad7a23b8d70225c6841fe9
|
refs/heads/master
| 2020-10-02T06:04:13.926184
| 2019-12-13T02:08:46
| 2019-12-13T02:08:46
| 227,717,864
| 0
| 0
| null | 2019-12-12T23:52:17
| 2019-12-12T23:52:16
| null |
UTF-8
|
R
| false
| false
| 1,387
|
r
|
plot1.R
|
################################################################################
# #
# Coursera Project #
# Exploratory Data Analysis Week 1 #
# Data Science with Johns Hopkins #
# #
################################################################################
# read data in from file
################################################################################
data<-read.table("household_power_consumption.txt",
colClasses="character",sep=";",header=FALSE, skip = 66637,
nrows = 2879)
vars<-read.table("household_power_consumption.txt",
colClasses="character",sep=";",header=FALSE, nrows = 1)
names(data)<-vars
################################################################################
# Requirments
# PNG file 480px by 480px
# Plot1 Global Active Power (kilowatts) histogram with red bars
#open png graphics device
png(filename = "plot1.png")
with(data, hist(as.numeric(Global_active_power), col="red",
xlab = "Global Active Power (kilowatts)",
main="Global Active Power"))
dev.off()
|
021f8573c41964a73f834bfe25eb954dc619b837
|
6464efbccd76256c3fb97fa4e50efb5d480b7c8c
|
/cran/paws.security.identity/man/wafregional_delete_permission_policy.Rd
|
f92a115d435cb0ee2b6358e497a0989be0de6b9a
|
[
"Apache-2.0"
] |
permissive
|
johnnytommy/paws
|
019b410ad8d4218199eb7349eb1844864bd45119
|
a371a5f2207b534cf60735e693c809bd33ce3ccf
|
refs/heads/master
| 2020-09-14T23:09:23.848860
| 2020-04-06T21:49:17
| 2020-04-06T21:49:17
| 223,286,996
| 1
| 0
|
NOASSERTION
| 2019-11-22T00:29:10
| 2019-11-21T23:56:19
| null |
UTF-8
|
R
| false
| true
| 800
|
rd
|
wafregional_delete_permission_policy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wafregional_operations.R
\name{wafregional_delete_permission_policy}
\alias{wafregional_delete_permission_policy}
\title{Permanently deletes an IAM policy from the specified RuleGroup}
\usage{
wafregional_delete_permission_policy(ResourceArn)
}
\arguments{
\item{ResourceArn}{[required] The Amazon Resource Name (ARN) of the RuleGroup from which you want to
delete the policy.
The user making the request must be the owner of the RuleGroup.}
}
\description{
Permanently deletes an IAM policy from the specified RuleGroup.
}
\details{
The user making the request must be the owner of the RuleGroup.
}
\section{Request syntax}{
\preformatted{svc$delete_permission_policy(
ResourceArn = "string"
)
}
}
\keyword{internal}
|
05dcdfff999819401e1b2f953537aa82e75e420c
|
b7b104e4f7b87ebca206c0156e3c06c5e1689b11
|
/gamlss - heathcote.R
|
e50bf540f0d0bf36812e4ec498fe1ea2f51e9c9c
|
[] |
no_license
|
Bramtimm/Thesis-project
|
3712457ac42186b2f31b066dda26c59b43b18495
|
cc97e57e17dcd5ffc561a94baaa40f44e1a5f985
|
refs/heads/master
| 2020-04-05T08:42:54.922057
| 2019-01-24T15:38:25
| 2019-01-24T15:38:25
| 156,726,266
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,933
|
r
|
gamlss - heathcote.R
|
# GAMLSS object for WALD
dSwald<-function(x, mu=mu, sigma=sigma,nu=nu, log=FALSE){
t=x
x=t
fy <-seqmodels::dinvgauss(t=t,kappa=mu,xi=sigma,tau=nu,sigma=1,ln=log)
if(is.vector(x)==TRUE){ as.vector(fy)
}else if(is.matrix(x)==TRUE){
fy <- matrix(fy,nrow=nrow(x),ncol=ncol(x))
}else if(is.array(x)==TRUE){
fy <- array(fy,dim=dim(x))
}
fy
}
pSwald <- function(q, mu=mu, sigma=sigma,nu=nu, lower.tail = TRUE, log.p = FALSE){
t=q
q=t
cdf <- seqmodels::pinvgauss(t=t, kappa=mu, xi=sigma, tau = nu, sigma = as.numeric(c(1)),
ln = log.p, lower_tail = lower.tail)
if(is.vector(q)==TRUE){ as.vector(cdf)
}else if(is.matrix(q)==TRUE){
cdf <- matrix(cdf,nrow=nrow(q),ncol=ncol(q))
}else if(is.array(x)==TRUE){
cdf <- array(fy,dim=dim(q))
}
cdf
}
qSwald <- function(p, mu=mu,sigma=sigma,nu=nu,lower.tail=TRUE,log.p=FALSE){
q <- seqmodels::qinvgauss(p=p, kappa=mu, xi=sigma, tau = nu, sigma = as.numeric(c(1)),
bounds = 3, em_stop = 20, err = 1e-08)
if(is.vector(p)==TRUE){ as.vector(q)
}else if(is.matrix(p)==TRUE){
q <- matrix(q,nrow=nrow(p),ncol=ncol(p))
}else if(is.array(p)==TRUE){
q <- array(fy,dim=dim(p))
}
q
}
rSwald <- function(n, mu=mu, sigma=sigma, nu=nu){
r<-seqmodels::rinvgauss(n, kappa=mu, xi=sigma, tau = nu, sigma = as.numeric(c(1)))
if(is.vector(n)==TRUE){ as.vector(r)
}else if(is.matrix(n)==TRUE){
r <- matrix(r,nrow=nrow(n),ncol=ncol(n))
}else if(is.array(n)==TRUE){
r <- array(fy,dim=dim(n))
}
r
}
Swald <- function (mu.link = "log", sigma.link = "log", nu.link = "log")
{
mstats <- checklink("mu.link", "Shifted Wald", substitute(mu.link),
c("1/mu^2", "inverse", "log", "identity", "own"))
dstats <- checklink("sigma.link", "Shifted Wald", substitute(sigma.link),
c("inverse", "log", "identity", "own"))
vstats <- checklink("nu.link", "Shifted Wald", substitute(nu.link),
c("inverse", "log", "identity", "own"))
structure(list(family = c("Swald", "Shifted wald"),
parameters = list(mu = TRUE, sigma = TRUE, nu = TRUE),
nopar = 3,
type = "Continuous",
mu.link = as.character(substitute(mu.link)),
sigma.link = as.character(substitute(sigma.link)),
nu.link = as.character(substitute(nu.link)),
mu.linkfun = mstats$linkfun,
sigma.linkfun = dstats$linkfun,
nu.linkfun = vstats$linkfun,
mu.linkinv = mstats$linkinv,
sigma.linkinv = dstats$linkinv,
nu.linkinv = vstats$linkinv,
mu.dr = mstats$mu.eta,
sigma.dr = dstats$mu.eta,
nu.dr = vstats$mu.eta,
#step1 done
dldm = function(y,mu,sigma,nu){
dldm <- sigma+1/mu-sigma*(1/(y-nu))
dldm
},
#step2 done
d2ldm2 = function(y,mu,nu){
d2ldm2 <- -1/mu^2-1/(y-nu)
d2ldm2
},
#step3 done
dldd = function(y,mu,sigma,nu){
dldd <- mu-sigma*(y-nu)
dldd
},
#step4 done
d2ldd2 = function(y,nu){
d2ldd2 <- nu-y
d2ldd2
},
#step5 done
dldv = function(y,mu,sigma,nu){
dldv <- (3/2)*(1/(y-nu))-(mu^2/2)*(1/(y-nu)^2)+(sigma^2/2)
d1dv
},
#step6 done
d2ldv2 = function(y,mu,nu){
d2ldv2 <- ((3/2)-mu^2*(1/(y-nu)))*(1/(y-nu)^2)
d2ldv2
},
#step7
d2ldmdd = function(y){rep(1,length(y))},
d2ldmdv = function(y,mu,nu){-mu/(y-nu)^2},
d2ldddv = function(y,sigma){rep(sigma,length(y))},
G.dev.incr = function(y, mu, sigma, nu,...) -2 * dSwald(x=y, mu = mu, sigma = sigma, nu=nu,log = TRUE),
rqres = expression(rqres(pfun = "pSwald",type = "Continuous", y = y, mu = mu,
sigma = sigma, nu = nu)),
mu.initial = expression(mu <- (y + mean(y))/2),
sigma.initial = expression(sigma <- sd(y)/(mean(y))^1.5),
nu.initial = expression({nu <- rep(min(y),length(y))}),
mu.valid = function(mu) all(mu > 0),
sigma.valid = function(sigma) all(sigma > 0),
nu.valid = function(nu) all(nu > 0),
y.valid = function(y) all(y > 0),
mean = function(mu,sigma,nu) ((mu/sigma)+nu),
variance = function(mu,sigma)((mu/sigma^3)),
shift = function(nu) nu),
class = c("gamlss.family", "family"))
}
|
8a2b56d69b95fd96e5b32c24e1f2fe8353a38469
|
7e1fff52409b817e17f8ce7a7d61298df54c78db
|
/diffMetAnal_functions.R
|
9258e6f39a595ac7f17974ad784f11441c070100
|
[] |
no_license
|
guisantagui/MSK_repo_new
|
fad0b0bce6a7c33358be22eefb4ec5e78879b40f
|
ccc27afe72fb48c7d12f8fd3873a647b601696d3
|
refs/heads/master
| 2021-07-03T23:22:31.328607
| 2021-02-09T23:27:58
| 2021-02-09T23:27:58
| 220,031,177
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,831
|
r
|
diffMetAnal_functions.R
|
# Quantile normalizes metabolomic matrix
quantNorm <- function(m){
meanrank <- function(x){rank(x)/length(x)}
apply(m, 2, meanrank)
}
# Removes the metabolites with "_?" (the tag we used to label ambiguous metabolites)
rmAmbig <- function(topDiffMets){
removed <- topDiffMets[-grep("_?", names(topDiffMets), fixed = T)]
return(removed)
}
# Gets the pathways per differential metabolite, the counts each pathway appears and the percentage of representation,
# for a certain organism.
getRelatedPaths <- function(keggidxs, org = NULL, a = NULL, b = NULL){
library("KEGGREST")
mets <- keggList("compound")
keggindexes <- list()
if(is.null(org) == F){
pathlist <- keggList("pathway", org)
}else{pathlist <- keggList("pathway")}
pathways <- list()
path_counts <- c()
for(j in 1:length(keggidxs)){
paths <- keggLink("pathway", keggidxs[j])
if(length(paths) > 0 & is.null(org) == F){
paths <- sapply(paths, gsub, pattern = "map", replacement = org)
pathways[[j]] <- paths[paths %in% names(pathlist)]
}
else if(length(paths) > 0 & is.null(org) ==T){
pathways[[j]] <- paths
}else{pathways[[j]] <- paste("Any pathway found for", keggidxs[j])}
names(pathways)[j] <- keggidxs[j]
for(k in 1:length(pathways[[j]])){
if(is.na(strsplit(pathways[[j]], ":")[[k]][2])){
path_counts <- path_counts
}else{path_counts <- c(path_counts, strsplit(pathways[[j]], ":")[[k]][2])}
}
}
path_counts <- as.data.frame(table(path_counts))
path_counts[, "Pathways"] <- NA
for(l in 1:length(path_counts[, 1])){
path_counts[l, 3] <- pathlist[[paste("path:", path_counts[l,1], sep = "")]]
}
path_counts <- path_counts[order(path_counts$Freq, decreasing = T), ]
rownames(path_counts) <- c()
if(!is.null(a) & !is.null(b) == T){
pathdif <- cbind("Cluster 1" = a, "Cluster 2" = b)
pathdif <- pathdif[match(keggidxs, rownames(pathdif)), ]
numbPaths <- c()
for(i in 1:length(keggidxs)){
numbPaths[i] <- length(pathways[[i]])
}
pathdif <- cbind(pathdif, "Number of Related Pathways" = numbPaths)
pathdif <- pathdif[rep(1:nrow(pathdif), times = pathdif[, 3]), ]
path_indexes <- c()
path_names <- c()
for(i in 1:length(pathways)){
for(j in 1:length(pathways[[i]])){
path_indexes <- c(path_indexes, pathways[[i]][j])
path_names <- c(path_names, pathlist[pathways[[i]][j]])
}
}
path_names[which(is.na(path_names))] <- path_indexes[which(is.na(path_names))]
pathdif <- cbind.data.frame(pathdif, "Pathway Indexes" = path_indexes)
pathdif <- cbind.data.frame(pathdif, "Pathway Names" = path_names)
compPerPath <- list()
if(!is.null(org) == T){
for(ii in 1:length(unique(pathdif[, 4]))){
compPerPath[[ii]] <- keggLink("compound", as.character(unique(sapply(pathdif[, 4], gsub, pattern = org, replacement = "map"))[ii]))
}
}
if(is.null(org) == T){
for(ii in 1:length(unique(pathdif[, 4]))){
compPerPath[[ii]] <- keggLink("compound", as.character(unique(pathdif[, 4])[ii]))
}
}
names(compPerPath) <- as.character(unique(pathdif[, 5]))
reprPath <- c()
comp_in_path <- c()
for(ii in 1:length(compPerPath)){
reprPath[ii] <- (length(compPerPath[[ii]][compPerPath[[ii]] %in% paste("cpd:", names(pathways), sep = "")])/length(compPerPath[[ii]]))*100
comp_in_path[ii] <- paste(compPerPath[[ii]][compPerPath[[ii]] %in% paste("cpd:", names(pathways), sep = "")], collapse = " ")
}
names(reprPath) <- names(compPerPath)
pathdif_ord <- pathdif[order(pathdif[, 5]),]
pos <- c()
neg <- c()
medPathDif <- c()
for(ii in 1:length(unique(path_indexes))){
submat <- pathdif_ord[which(pathdif_ord[, 4] == unique(path_indexes)[ii]), c(1, 2)]
pos[ii] <- length(which((submat[, 1] - submat[, 2]) > 0))
neg[ii] <- length(which((submat[, 1] - submat[, 2]) < 0))
medPathDif[ii] <- median(submat[, 1] - submat[, 2])
}
names(pos) <- unique(path_indexes)
names(neg) <- unique(path_indexes)
names(medPathDif) <- unique(path_indexes)
reprPath <- cbind.data.frame(reprPath, pos, neg, medPathDif, comp_in_path)
reprPath <- reprPath[order(reprPath[, 1], decreasing = T), ]
colnames(reprPath) <- c("% of Representation", "Higher in Clust 1", "Higher in Clust 2", "Median of Difference", "Comp. Indx.")
return(list("Pathways per Metabolite" = pathways, "Pathway Counts" = path_counts,
"Mets & Rel. Pathways" = pathdif, "Representation of Pathways"= reprPath))
}else{
return(list("Pathways per Metabolite" = pathways,
"Pathway Counts" = path_counts))
}
}
# Does Over representation analysis given a vector of differential metabolites, the total metabolites detected, and a
# level of significance, for a certain organism.
doORA <- function(diffMetObjkt, allMetsObjkt, org = NULL, alpha = 0.05){
if(!require(KEGGREST)) install.packages("KEGGREST")
library(KEGGREST)
if(is.null(org)){
paths <- keggList("pathway")
}else{
paths <- keggList("pathway", org)
}
diffMet <- diffMetObjkt[!is.na(diffMetObjkt)]
diffMet <- sapply("cpd:", diffMet, FUN = paste, sep = "")[, 1]
totPaths <- unique(unlist(sapply(allMetsObjkt, keggLink, target = "pathway")))
if(!is.null(org)){
totPaths <- totPaths[gsub("map", replacement = org, totPaths) %in% names(paths)]
}
compsPerPath <- sapply(totPaths, keggLink, target = "compound")
allComps <- unique(unlist(compsPerPath))
allCompsLen <- length(allComps)
contMat <- function(x) {
compsInPath <- length(x)
mat <- matrix(c(compsInPath, allCompsLen - compsInPath, sum(diffMet %in% x), sum(!diffMet %in% x)),
ncol = 2,
nrow = 2,
dimnames = list(c("in_path", "not_in_path"),
c("met_not_interest", "met_in_interest")))
return(mat)
}
contMatsPaths <- lapply(compsPerPath, contMat)
fishRes <- lapply(contMatsPaths, fisher.test)
filt <- function(x) x$p.value <= alpha
vecTrue <- unlist(lapply(fishRes, filt))
sign <- fishRes[vecTrue]
pVals <- sapply(sign, function(f) f$p.value)
if(!is.null(org)){names(pVals) <- gsub("map", replacement = org, names(pVals))}
signMat <- cbind.data.frame(paths[match(names(pVals), names(paths))], pVals)
colnames(signMat) <- c("Pathways", "p.values")
return(signMat)
}
doORA <- function(diffMetObjkt, allMetsObjkt, org = NULL, alpha = 0.05, target = "compound"){
if(!require(KEGGREST)) install.packages("KEGGREST")
library(KEGGREST)
if(target != "compound" && target != "enzyme"){
return("Target must be compound or enzyme")
stop(call. = F)
}
if(is.null(org)){
paths <- keggList("pathway")
}else{
paths <- keggList("pathway", org)
}
diffMet <- diffMetObjkt[!is.na(diffMetObjkt)]
if(target == "compound"){
diffMet <- sapply("cpd:", diffMet, FUN = paste, sep = "")[, 1]
}
if(target == "enzyme"){
diffMet <- sapply("ec:", diffMet, FUN = paste, sep = "")[, 1]
}
totPaths <- unique(unlist(sapply(allMetsObjkt, keggLink, target = "pathway")))
if(!is.null(org)){
totPaths <- totPaths[gsub("map", replacement = org, totPaths) %in% names(paths)]
}
compsPerPath <- sapply(totPaths, keggLink, target = target)
allComps <- unique(unlist(compsPerPath))
allCompsLen <- length(allComps)
contMat <- function(x) {
compsInPath <- length(x)
mat <- matrix(c(compsInPath, allCompsLen - compsInPath, sum(diffMet %in% x), sum(!diffMet %in% x)),
ncol = 2,
nrow = 2,
dimnames = list(c("in_path", "not_in_path"),
c("met_not_interest", "met_in_interest")))
return(mat)
}
contMatsPaths <- lapply(compsPerPath, contMat)
fishRes <- lapply(contMatsPaths, fisher.test)
filt <- function(x) x$p.value <= alpha
vecTrue <- unlist(lapply(fishRes, filt))
sign <- fishRes[vecTrue]
pVals <- sapply(sign, function(f) f$p.value)
if(!is.null(org)){names(pVals) <- gsub("map", replacement = org, names(pVals))}
signMat <- cbind.data.frame(paths[match(names(pVals), names(paths))], pVals)
colnames(signMat) <- c("Pathways", "p.values")
return(signMat)
}
# Gets the median of the replicates of the metabolomic matrix.
getStrainMedian <- function(normMets){
normMetsMedians <- matrix(nrow = nrow(normMets)/3, ncol = ncol(normMets))
for(j in 1:nrow(normMetsMedians)){
for(i in 1:ncol(normMetsMedians)){
normMetsMedians[j, i] <- median(normMets[(1+3*(j-1)):(3*j), i])
}
}
rownames(normMetsMedians) <- unique(gsub("\\_.*", "", rownames(normMets)))
colnames(normMetsMedians) <-colnames(normMets)
return(normMetsMedians)
}
|
d04a83887bdb96e2bd2a6b3e8d60d42e2c0cff69
|
c166a28030821b7eb0523a7f2515701abc8e9eaa
|
/Rnsgaii/TestScript.R
|
35001b19ff830291c83b9a688a416c18b31c8865
|
[
"MIT"
] |
permissive
|
UCD-GW-Nitrate/nsgaii
|
c82e96e397698dfe2e14af366b25ccf3d8908912
|
f2d985dada0850c3bd648fd38c13778ec24c3597
|
refs/heads/master
| 2023-04-07T00:35:40.054396
| 2021-04-16T21:26:06
| 2021-04-16T21:26:06
| 216,633,904
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 275
|
r
|
TestScript.R
|
library(plotly)
plot_ly( x = pH[[401]][,2], y = -pH[[401]][,1],
marker = list(size = 5,
color = 'rgba(255, 182, 193, .9)',
line = list(color = 'rgba(152, 0, 0, .8)',
width = 0)))
|
4c650bb5cdac55efe7f19ec2cf8130bd8a8631c8
|
6cf28ab84a3653da0f3b0f838de1d1522fabb677
|
/bindings/R/rlibkriging/tests/testthat/test-NoiseKrigingFit.R
|
11533804ecdd02de218d195227c39b6e2139aff5
|
[
"Apache-2.0"
] |
permissive
|
libKriging/libKriging
|
e23ed9488159db9e66d8a2e23aa8b5bdc647bd62
|
2a75a7e0be4245dece2672652c8c2311e3454595
|
refs/heads/master
| 2023-08-21T23:18:49.311663
| 2023-07-11T10:49:17
| 2023-07-11T10:49:17
| 192,920,489
| 29
| 8
|
Apache-2.0
| 2023-08-04T16:06:21
| 2019-06-20T12:58:55
|
C++
|
UTF-8
|
R
| false
| false
| 7,474
|
r
|
test-NoiseKrigingFit.R
|
context("Fit: 1D")
f = function(x) 1-1/2*(sin(12*x)/(1+x)+2*cos(7*x)*x^5+0.7)
n <- 5
set.seed(123)
X <- as.matrix(runif(n))
y = f(X) + 0.1*rnorm(nrow(X))
k = NULL
r = NULL
k = DiceKriging::km(design=X,response=y,noise.var=rep(0.1^2,nrow(X)),covtype = "gauss",control = list(trace=F),nugget.estim=F,optim.method='BFGS',multistart = 20)
r <- NoiseKriging(y,rep(0.1^2,nrow(X)), X, "gauss", optim = "BFGS")
l = as.list(r)
ll = Vectorize(function(x) logLikelihoodFun(r,c(x,k@covariance@sd2))$logLikelihood)
plot(ll,xlim=c(0.000001,1))
for (x in seq(0.000001,1,,11)){
envx = new.env()
ll2x = logLikelihoodFun(r,c(x,k@covariance@sd2))$logLikelihood
gll2x = logLikelihoodFun(r,c(x,k@covariance@sd2),grad = T)$logLikelihoodGrad[1]
arrows(x,ll2x,x+.1,ll2x+.1*gll2x,col='red')
}
theta_ref = optimize(ll,interval=c(0.001,1),maximum=T)$maximum
abline(v=theta_ref,col='black')
abline(v=as.list(r)$theta,col='red')
abline(v=k@covariance@range.val,col='blue')
test_that(desc="Noise / Fit: 1D / fit of theta by DiceKriging is right",
expect_equal(theta_ref, k@covariance@range.val, tol= 1e-3))
test_that(desc="Noise / Fit: 1D / fit of theta by libKriging is right",
expect_equal(array(theta_ref), array(as.list(r)$theta), tol= 0.01))
#############################################################
context("Fit: 2D (Branin)")
f = function(X) apply(X,1,DiceKriging::branin)
n <- 15
set.seed(1234)
X <- cbind(runif(n),runif(n))
y = f(X)+ 10*rnorm(nrow(X))
k = NULL
r = NULL
k = DiceKriging::km(design=X,response=y,noise.var=rep(10^2,nrow(X)),covtype = "gauss",control = list(trace=F),nugget.estim=F,optim.method='BFGS',multistart = 20)
r <- NoiseKriging(y, noise=rep(10^2,nrow(X)),X, "gauss", optim = "BFGS")
#plot(Vectorize(function(a) r$logLikelihoodFun(c(r$theta(),a))$logLikelihood))
l = as.list(r)
# save(list=ls(),file="fit-nugget-2d.Rdata")
sigma2_k = k@covariance@sd2
sigma2_r = as.list(r)$sigma2
test_that(desc="Noise / Fit: 2D (Branin) / fit of LL by DiceKriging is same that libKriging",
expect_equal(k@logLik,r$logLikelihood(), tol= 1e-2))
ll = function(X) {if (!is.matrix(X)) X = matrix(X,ncol=2);
# print(dim(X));
apply(X,1,
function(x) {
y=-logLikelihoodFun(r,c(unlist(x),sigma2_k))$logLikelihood
#print(y);
y})}
#DiceView::contourview(ll,xlim=c(0.1,2),ylim=c(0.1,2))
x=seq(0.1,1,,51)
contour(x,x,matrix(ll(as.matrix(expand.grid(x,x))),nrow=length(x)),nlevels = 30)
theta_ref = optim(par=matrix(c(.2,.5),ncol=2),ll,lower=c(0.1,0.1),upper=c(2,2),method="L-BFGS-B")$par
points(theta_ref,col='black')
points(as.list(r)$theta[1],as.list(r)$theta[2],col='red')
points(k@covariance@range.val[1],k@covariance@range.val[2],col='blue')
test_that(desc="Noise / Fit: 2D (Branin) / fit of theta 2D is _quite_ the same that DiceKriging one",
expect_equal(ll(array(as.list(r)$theta)), ll(k@covariance@range.val), tol=1e-1))
#############################################################
context("Fit: 2D (Branin) multistart")
f = function(X) apply(X,1,DiceKriging::branin)
n <- 15
set.seed(1234)
X <- cbind(runif(n),runif(n))
y = f(X) + 10*rnorm(nrow(X))
k = NULL
r = NULL
parinit = matrix(runif(10*ncol(X)),ncol=ncol(X))
k <- tryCatch( # needed to catch warning due to %dopar% usage when using multistart
withCallingHandlers(
{
error_text <- "No error."
DiceKriging::km(design=X,response=y,noise.var=rep(10^2,nrow(X)),covtype = "gauss", parinit=parinit,control = list(trace=F),nugget.estim=F,optim.method='BFGS',multistart = 20)
},
warning = function(e) {
error_text <<- trimws(paste0("WARNING: ", e))
invokeRestart("muffleWarning")
}
),
error = function(e) {
return(list(value = NA, error_text = trimws(paste0("ERROR: ", e))))
},
finally = {
}
)
r <- NoiseKriging(y,noise=rep(10^2,nrow(X)), X, "gauss", parameters=list(theta=parinit))
l = as.list(r)
# save(list=ls(),file="fit-nugget-multistart.Rdata")
sigma2_k = k@covariance@sd2
sigma2_r = as.list(r)$sigma2
test_that(desc="Noise / Fit: 2D (Branin) multistart / fit of LL by DiceKriging is same that libKriging",
expect_equal(k@logLik,r$logLikelihood(), tol= 0.01))
ll = function(X) {if (!is.matrix(X)) X = matrix(X,ncol=2);
# print(dim(X));
apply(X,1,
function(x) {
# print(dim(x))
#print(matrix(unlist(x),ncol=2));
y=-logLikelihoodFun(r,c(unlist(x),sigma2_k))$logLikelihood
#print(y);
y})}
#DiceView::contourview(ll,xlim=c(0.1,2),ylim=c(0.1,2))
x=seq(0.1,2,,51)
contour(x,x,matrix(ll(as.matrix(expand.grid(x,x))),nrow=length(x)),nlevels = 30)
theta_ref = optim(par=matrix(c(.2,.5),ncol=2),ll,lower=c(0.1,0.1),upper=c(2,2),method="L-BFGS-B")$par
points(theta_ref,col='black')
points(as.list(r)$theta[1],as.list(r)$theta[2],col='red')
points(k@covariance@range.val[1],k@covariance@range.val[2],col='blue')
test_that(desc="Noise / Fit: 2D (Branin) multistart / fit of theta 2D is _quite_ the same that DiceKriging one",
expect_equal(ll(array(as.list(r)$theta)), ll(k@covariance@range.val), tol= 1e-1))
################################################################################
context("Fit: 2D _not_ in [0,1]^2")
# "unnormed" version of Branin: [0,1]x[0,15] -> ...
branin_15 <- function (x) {
x1 <- x[1] * 15 - 5
x2 <- x[2] #* 15
(x2 - 5/(4 * pi^2) * (x1^2) + 5/pi * x1 - 6)^2 + 10 * (1 - 1/(8 * pi)) * cos(x1) + 10
}
f = function(X) apply(X,1,branin_15)
n <- 15
set.seed(1234)
X <- cbind(runif(n,0,1),runif(n,0,15))
y = f(X) + 10*rnorm(nrow(X))
k = NULL
r = NULL
k = DiceKriging::km(design=X,response=y,noise.var=rep(10^2,nrow(X)),covtype = "gauss",control = list(trace=F),nugget.estim=FALSE,optim="BFGS",multistart=20)#,parinit = c(0.5,5))
r <- NoiseKriging(y,noise=rep(10^2,nrow(X)), X, "gauss",, optim = "BFGS")#, parameters=list(theta=matrix(c(0.5,5),ncol=2)))
l = as.list(r)
# save(list=ls(),file="fit-nugget-2d-not01.Rdata")
sigma2_k = k@covariance@sd2
sigma2_r = as.list(r)$sigma2
test_that(desc="Noise / Fit: 2D _not_ in [0,1]^2 / fit of LL by DiceKriging is same that libKriging",
expect_equal(k@logLik,r$logLikelihood(), tol= 0.01))
ll_r = function(X) {if (!is.matrix(X)) X = matrix(X,ncol=2);
# print(dim(X));
apply(X,1,
function(x) {
# print(dim(x))
#print(matrix(unlist(x),ncol=2));
-logLikelihoodFun(r,c(unlist(x),sigma2_k))$logLikelihood
#print(y);
})}
#DiceView::contourview(ll,xlim=c(0.1,2),ylim=c(0.1,2))
x1=seq(0.001,2,,51)
x2=seq(0.001,30,,51)
contour(x1,x2,matrix(ll_r(as.matrix(expand.grid(x1,x2))),nrow=length(x1)),nlevels = 30,col='red')
points(as.list(r)$theta[1],as.list(r)$theta[2],col='red')
ll_r(t(as.list(r)$theta))
ll_k = function(X) {if (!is.matrix(X)) X = matrix(X,ncol=2);
apply(X,1,function(x) {-DiceKriging::logLikFun(c(x,sigma2_k),k)})}
contour(x1,x2,matrix(ll_k(as.matrix(expand.grid(x1,x2))),nrow=length(x1)),nlevels = 30,add=T)
points(k@covariance@range.val[1],k@covariance@range.val[2])
ll_k(k@covariance@range.val)
theta_ref = optim(par=matrix(c(.2,10),ncol=2),ll_r,lower=c(0.001,0.001),upper=c(2,30),method="L-BFGS-B")$par
points(theta_ref,col='black')
test_that(desc="Noise / Fit: 2D _not_ in [0,1]^2 / fit of theta 2D is _quite_ the same that DiceKriging one",
expect_equal(ll_r(array(as.list(r)$theta)), ll_k(k@covariance@range.val), tol=1e-1))
|
11b47377338841096bbaa3d13f0a0c306f944f69
|
c73553a8936ec72cbbfbcc5e88cabbc946ce32a6
|
/R/model-evaluation.R
|
2c197322b94b8b61aedebb191827fef8e52703f1
|
[
"MIT"
] |
permissive
|
stephaneghozzi/trendbreaker
|
b0f9a1475f56bd7f14fb10573c5fe2b807c508e5
|
5002f7f12dc90c079f4391eb88be4166916403c5
|
refs/heads/master
| 2022-10-22T02:47:22.895042
| 2020-06-17T09:17:19
| 2020-06-17T09:17:19
| 275,192,241
| 1
| 0
|
NOASSERTION
| 2020-06-26T15:47:17
| 2020-06-26T15:47:16
| null |
UTF-8
|
R
| false
| false
| 5,301
|
r
|
model-evaluation.R
|
#' Tools for model evaluation
#'
#' These functions provide tools for evaluating models, based on the goodness of
#' fit or on predictive power. `evaluate_aic` evaluates the goodness of fit of a
#' single model using Akaike's information criterion, measuring the deviance of
#' the model while penalising its complexity. `evaluate_resampling` uses
#' repeated K-fold cross-validation and the Root Mean Square Error (RMSE) of
#' testing sets to measure the predictive power of a single
#' model. `evaluate_aic` is faster, but `evaluate_resampling` is better-suited
#' to select best predicting models. `evaluate_models` uses either
#' `evaluate_aic` or `evaluate_resampling` to compare a series of
#' models. `select_model` does the same, but returns the 'best' model according
#' to the chosen method.
#'
#' @details These functions wrap around existing functions from several
#' packages. `stats::AIC` is used in `evaluate_aic`, and `evaluate_resampling`
#' uses `rsample::vfold_cv` for cross-validation and `yardstick::rmse` to
#' calculate RMSE.
#'
#' @seealso [`stats::AIC`](stats::AIC) for computing AIC;
#' [`rsample::vfold_cv`](rsample::vfold_cv) for cross validation;
#' [`yardstick::rmse`](yardstick::rmse) for calculating RMSE; `yardstick` also
#' implements a range of other metrics for assessing model fit outlined at
#' \url{https://yardstick.tidymodels.org/};
#' [`?trendbreaker_model`](trendbreaker_model) for the different ways to build
#' `trendbreaker_model` objects
#'
#' @param model a model specified as an `trendbreaker_model` object, as returned by
#' `lm_model`, `glm_model`, `glm_nb_model`, `brms_model`; see
#' [`?trendbreaker_model`](trendbreaker_model) for details
#'
#' @param data a `data.frame` containing data (including the response variable
#' and all predictors) used in `model`
#'
#' @param metrics a list of functions assessing model fit, with a similar
#' interface to `yardstick::rmse`; see \url{https://yardstick.tidymodels.org/}
#' for more information
#'
#' @param v the number of equally sized data partitions to be used for K-fold
#' cross-validation; `v` cross-validations will be performed, each using `v -
#' 1` partition as training set, and the remaining partition as testing
#' set. Defaults to 1, so that the method uses leave-one-out cross validation,
#' akin to Jackknife except that the testing set (and not the training set) is
#' used to compute the fit statistics.
#'
#' @param repeats the number of times the random K-fold cross validation should
#' be repeated for; defaults to 1; larger values are likely to yield more
#' reliable / stable results, at the expense of computational time
#'
#' @param ... further arguments passed to [`stats::AIC`](stats::AIC)
#'
#' @param models a `list` of models specified as an `trendbreaker_model` object, as
#' returned by `lm_model`, `glm_model`, `glm_nb_model`, `brms_model`; see
#' [`?trendbreaker_model`](trendbreaker_model) for details
#'
#' @param method a `function` used to evaluate models: either
#' `evaluate_resampling` (default, better for selecting models with good
#' predictive power) or `evaluate_aic` (faster, focuses on goodness-of-fit
#' rather than predictive power)
#'
#'
#' @export
#' @rdname evaluate_models
#' @aliases evaluate_resampling
evaluate_resampling <- function(model,
data,
metrics = list(yardstick::rmse),
v = nrow(data),
repeats = 1) {
training_split <- rsample::vfold_cv(data, v = v, repeats = repeats)
metrics <- do.call(yardstick::metric_set, metrics)
res <- lapply(training_split$splits, function(split) {
fit <- model$train(rsample::analysis(split))
validation <- fit$predict(rsample::assessment(split))
# TODO: always sort by time component
metrics(validation, observed, pred)
})
res <- dplyr::bind_rows(res)
res <- dplyr::group_by(res, .metric)
res <- dplyr::summarise(res, estimate = mean(.estimate))
tibble::tibble(
metric = res$.metric,
score = res$estimate
)
}
#' @export
#' @rdname evaluate_models
#' @aliases evaluate_aic
evaluate_aic <- function(model, data, ...) {
full_model_fit <- model$train(data)
tibble::tibble(
metric = "aic",
score = stats::AIC(full_model_fit$model, ...)
)
}
#' @export
#' @rdname evaluate_models
#' @aliases evaluate_models
evaluate_models <- function(data, models, method = evaluate_resampling, ...) {
# dplyr::bind_rows(out, .id = "model")
# data <- dplyr::select(data, ..., everything())
# TODO: think about one metric per col
out <- lapply(models, function(model) method(model, data, ...))
out <- dplyr::bind_rows(out, .id = "model")
tidyr::pivot_wider(
out,
id_cols = "model",
names_from = "metric",
values_from = "score"
)
}
#' @export
#' @rdname evaluate_models
#' @aliases select_model
select_model <- function(data, models, method = evaluate_resampling, ...) {
stats <- evaluate_models(data = data, models = models, method = method, ...)
stats <- stats[order(stats[, 2, drop = TRUE]), ]
# per convention the first row is the best model sorted by the first metric
list(best_model = models[[stats$model[[1]]]], leaderboard = stats)
}
|
10329058e62b462b79612463b2cd7c49c44a5c90
|
408def85f938dd10412aa89c144c380e09d1879d
|
/man/sdf_schema_json.Rd
|
61f92d1a2de3e91959b0edffffbb17c5d000ea8a
|
[
"Apache-2.0"
] |
permissive
|
mitre/sparklyr.nested
|
3faa7ace6b59b15bf6078cf71e98d9f68e000fca
|
e7abb7067b008d7eb4f8b76816e3cf54c13b8088
|
refs/heads/master
| 2023-09-04T12:50:04.186509
| 2023-02-20T21:38:02
| 2023-02-20T21:38:02
| 101,699,537
| 30
| 5
|
Apache-2.0
| 2022-05-19T10:22:22
| 2017-08-29T00:14:48
|
R
|
UTF-8
|
R
| false
| true
| 3,012
|
rd
|
sdf_schema_json.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/schema.R
\name{sdf_schema_json}
\alias{sdf_schema_json}
\alias{sdf_schema_viewer}
\title{Work with the schema}
\usage{
sdf_schema_json(
x,
parse_json = TRUE,
simplify = FALSE,
append_complex_type = TRUE
)
sdf_schema_viewer(
x,
simplify = TRUE,
append_complex_type = TRUE,
use_react = FALSE
)
}
\arguments{
\item{x}{An \code{R} object wrapping, or containing, a Spark DataFrame.}
\item{parse_json}{Logical. If \code{TRUE} then the JSON return value will be parsed into an R list.}
\item{simplify}{Logical. If \code{TRUE} then the schema will be folded into itself such that
\code{{"name" : "field1", "type" : {"type" : "array", "elementType" : "string", "containsNull" : true},
"nullable" : true, "metadata" : { } }} will be rendered simply \code{{"field1 (array)" : "[string]"}}}
\item{append_complex_type}{Logical. This only matters if \code{parse_json=TRUE} and \code{simplify=TRUE}.
In that case indicators will be included in the return value for array and struct types.}
\item{use_react}{Logical. If \code{TRUE} schemas will be rendered using \link[listviewer]{reactjson}.
Otherwise they will be rendered using \link[listviewer]{jsonedit} (the default). Using react works better
in some contexts (e.g. bookdown-rendered HTML) and has a different look & feel. It does however carry
an extra dependency on the \code{reactR} package suggested by \code{listviewer}.}
}
\description{
These functions support flexible schema inspection both algorithmically and in human-friendly ways.
}
\examples{
\dontrun{
library(testthat)
library(jsonlite)
library(sparklyr)
library(sparklyr.nested)
sample_json <- paste0(
'{"aircraft_id":["string"],"phase_sequence":["string"],"phases (array)":{"start_point (struct)":',
'{"segment_phase":["string"],"agl":["double"],"elevation":["double"],"time":["long"],',
'"latitude":["double"],"longitude":["double"],"altitude":["double"],"course":["double"],',
'"speed":["double"],"source_point_keys (array)":["[string]"],"primary_key":["string"]},',
'"end_point (struct)":{"segment_phase":["string"],"agl":["double"],"elevation":["double"],',
'"time":["long"],"latitude":["double"],"longitude":["double"],"altitude":["double"],',
'"course":["double"],"speed":["double"],"source_point_keys (array)":["[string]"],',
'"primary_key":["string"]},"phase":["string"],"primary_key":["string"]},"primary_key":["string"]}'
)
with_mock(
# I am mocking functions so that the example works without a real spark connection
spark_read_parquet = function(x, ...){return("this is a spark dataframe")},
sdf_schema_json = function(x, ...){return(fromJSON(sample_json))},
spark_connect = function(...){return("this is a spark connection")},
# the meat of the example is here
sc <- spark_connect(),
spark_data <- spark_read_parquet(sc, path="path/to/data/*.parquet", name="some_name"),
sdf_schema_viewer(spark_data)
)
}
}
\seealso{
\code{\link[sparklyr]{sdf_schema}}
}
|
6b24d597c7bc571dbd1e3108feaec81d729fe72f
|
bcbe85dd43636833ef0f07fbdafdcc355993a619
|
/plot3.R
|
0ed0cd311d9f50f346bac9371f0990eb5a9e7f93
|
[] |
no_license
|
yaseminyf/ExData_Plotting1
|
a93dc2066b2dcbbae82834531411869acb440a54
|
8be3c7a2925849321c2227df5eae44157030891f
|
refs/heads/master
| 2020-04-01T22:25:26.906800
| 2018-10-20T21:52:44
| 2018-10-20T21:52:44
| 153,707,619
| 0
| 0
| null | 2018-10-19T01:09:51
| 2018-10-19T01:09:50
| null |
UTF-8
|
R
| false
| false
| 1,248
|
r
|
plot3.R
|
# read in the file
hpc <- read.table("./household_power_consumption.txt", header=TRUE, sep = ";", na.strings="?")
# combine the Date and Time variables in a string
dt <- paste(as.character(hpc$Date), as.character(hpc$Time))
# convert the type of the newly formed string to class Date
datetime <- strptime(dt,"%d/%m/%Y %H:%M:%S")
# strip the Date and Time variables from the original data set
mydata <- subset(hpc, select = Global_active_power:Sub_metering_3)
# add the newly formed datetime variable to the stripped data set
mydata <- data.frame(datetime, mydata)
# take the subset of the data that interests us
usedata <- subset(mydata, datetime >= "2007-02-01" & datetime < "2007-02-03")
# open the png device
png(file="plot3.png")
# prepare the plotting area with the correct axis labels
with(usedata, plot(datetime, Sub_metering_1, xlab="", ylab="Energy sub metering", type="n"))
# plot the three lines
with(usedata, lines(datetime, Sub_metering_1))
with(usedata, lines(datetime, Sub_metering_2, col="red"))
with(usedata, lines(datetime, Sub_metering_3, col="blue"))
# add the legend
legend("topright", lty=1, col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# close the png device
dev.off()
|
4314b4750b02dada7357a347fd697c4629bca671
|
ec2c6eb45f2155d66f4daa62cb9cf60211b769d7
|
/add_concat.R
|
dd78a55203d217f1fe1c95bd0afa170b526b96f0
|
[] |
no_license
|
mindcrime/LearningR
|
dedf431d39d4622e6d601b395bbae9147cab1e41
|
04de699585927edc73797bf78a4b5bf18ce45a7e
|
refs/heads/master
| 2020-05-09T18:10:34.277064
| 2015-12-25T12:14:59
| 2015-12-25T12:14:59
| 40,801,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 329
|
r
|
add_concat.R
|
# Author: prhodes
###############################################################################
foo = 1:4 + 6:9
# paste adds the string to each element of the vector!
cat( paste( "vector added: ", foo ) )
cat( "\n" )
# the "c" function concatenates values to make a vector
cat( c( 1, 3, 6, 10, 15 ) + c( 0, 1, 3, 6, 10 ) )
|
6730b9918f616bea300f12b48d35d1bcdb801f48
|
a69b74b9f6ebc26eae26c2d79748fb11588999ff
|
/button_commandsReviewed.R
|
2bab6734062b74369a93bfe3491533185f119b2b
|
[] |
no_license
|
BryanSWeber/TheButtonAnalysis
|
e89f1395cbc3032d6097bfd207d240e9b9996287
|
56408eb74dacf465b35d56e83942bda1d3874d7e
|
refs/heads/main
| 2023-05-04T08:02:21.109902
| 2021-05-25T00:31:35
| 2021-05-25T00:31:35
| 370,519,283
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,515
|
r
|
button_commandsReviewed.R
|
#install.packages("corrgram")
#install.packages("zoo")
#install.packages("forecast")
#install.packages("lubridate")
library("lubridate", lib.loc="~/R/win-library/3.1")
#Source of data
#http://tcial.org/the-button/button.csv
button <- read.csv("~/Button Data/button5_20.csv")
button$time<-as.POSIXct(button$now_timestamp, origin="1970-01-01") #taken from http://stackoverflow.com/questions/13456241/convert-unix-epoch-to-date-object-in-r
#I must manually imput a minimum for the button, it never was below 8.
button$seconds_left[button$seconds_left<1]<-99
#The following line was my first instinct. Will cause a crash if I try to run it. Lots of data.
# plot(button$time,button$seconds_left)
#
# #this should be operational
# x<-dim(button)
# x[1]
# subsample<-sort(sample(1:x[1],5000))
# plot(button$time[subsample],60-button$seconds_left[subsample],type="l",main="Sample of Time Elapsed Since Last Press (Size=5000)", xlab="Time",ylab="Time Elapsed")
#
# #the following represents a typical, naieve biased estimate.It is biased because the time for t+1 depends on time 1. The button is an AUTO-regressive sequence.
# biased<-lm((60-button$seconds_left)~button$time) #use full sample to generate this. Note the high significance is a result of autoregressive bias!
# abline(biased,col="red")
#First there is the missing data. There is the periods between clicks where the timer clicks down by 1 second, and actually missing data.
#http://bocoup.com/weblog/padding-time-series-with-r/
#Get opening and closing time to sequence data.
time.min<-button$time[1]
time.max<-button$time[length(button$time)]
all.dates<-seq(time.min, time.max, by="sec")
all.dates.frame<-data.frame(list(time=all.dates))
#merge data into single data frame with all data
merged.data<-merge(all.dates.frame, button,all=FALSE)
list_na<-is.na(merged.data$seconds_left)
# #na values have been established, but we cannot slavishly make them 0, and we need to mark periods that are LONG (>30sec, selected arbitrarily) as imputed.
# #http://www.cookbook-r.com/Manipulating_data/Finding_sequences_of_identical_values/
# # table_na<-rle(list_na)
# # cumsum(table_na$lengths)
# # table_na$lengths[table_na$values==TRUE]
# # Mark all of them. It's easier- http://r.789695.n4.nabble.com/find-data-date-gaps-in-time-series-td908012.html
# merged.data$missing_data[!merged.data$time %in% button$time<4]<-1 #if it's not in merged data, it's missing. If it's 0, it's missing.
# merged.data$missing_data[is.na(merged.data$missing_data)]<-0 #the rest were present.
#table(merged.data$missing_data)
#We must fill in these NA's with the last value -1. http://stackoverflow.com/questions/19838735/how-to-na-locf-in-r-without-using-additional-packages
library("zoo", lib.loc="~/R/win-library/3.1")
library("xts", lib.loc="~/R/win-library/3.1")
#I trust that I did this correctly. Let us replace the button data frame now, officially.
# merged.data$imputed_sec_left<-imputed_data
button<-merged.data
#let us collapse this http://stackoverflow.com/questions/17389533/aggregate-values-of-15-minute-steps-to-values-of-hourly-steps
#Need things as xts: http://stackoverflow.com/questions/4297231/r-converting-a-data-frame-to-xts
#https://stat.ethz.ch/pipermail/r-help/2011-February/267752.html
button_xts<-as.xts(button[,-1],order.by=button[,1])
button_xts<-button_xts['2015/'] #2015 to end of data set. Fixes odd error timings.
# button_xts<-button_xts[button_xts$missing_data==0]
t<-10 #how many minutes each period is 10 minutes will allow for NO inf to show up. No shortage>15 min.
end<-endpoints(button_xts,on="seconds",t*60) # t minute periods
col1<-period.apply(button_xts$seconds_left,INDEX=end,FUN=function(x) {min(x,na.rm=TRUE)}) #generates some empty sets
col2<-period.apply(button_xts$participants,INDEX=end,FUN=function(x) {min(x,na.rm=TRUE)})
button_xts<-merge(col1,col2)
# we will add a lowest observed badge function.
min_badge<-c(1:length(button_xts$seconds_left))
for(i in 1:length(button_xts$seconds_left)){
min_badge[i]<-floor(min(button_xts$seconds_left[1:(max(c(i-60/t,1)))])/10) #lowest badge seen yesterday is important.
}
badge_class<-model.matrix(~~as.factor(min_badge)) #let's get these factors as dummy variables. http://stackoverflow.com/questions/5048638/automatically-expanding-an-r-factor-into-a-collection-of-1-0-indicator-variables
#Seasons matter: http://robjhyndman.com/hyndsight/longseasonality/
fourier <- function(t,terms,period)
{
n <- length(t)
X <- matrix(,nrow=n,ncol=2*terms)
for(i in 1:terms)
{
X[,2*i-1] <- sin(2*pi*i*t/period)
X[,2*i] <- cos(2*pi*i*t/period)
}
colnames(X) <- paste(c("S","C"),rep(1:terms,rep(2,terms)),sep="")
return(X)
}
hours<-fourier(1:length(index(button_xts)),1,60/t)
days<-fourier(1:length(index(button_xts)),1,24*60/t)
weeks<-fourier(1:length(index(button_xts)),1,7*24*60/t)
regressors<-data.frame(hours,days,weeks,badge_class[,2:dim(badge_class)[2]]) #badge_class[,2:dim(badge_class)[2]] #tried to use particpants. They are not significant.
library("forecast", lib.loc="~/R/win-library/3.1")
#reg_auto<-auto.arima(button_xts$seconds_left,xreg=regressors) #automatically chose from early ARIMA sequences, seasonal days, weeks, individual badge numbers are accounted for as a DRIFT term in the ARIMA sequence.
reg<-Arima(button_xts$seconds_left,order=c(1,1,1),xreg=regressors,include.constant=TRUE)
res<-residuals(reg)
png(filename="~/Button Data/5_20_acf.png")
acf(res,na.action=na.omit)
dev.off()
png(filename="~/Button Data/5_20_pacf.png")
pacf(res,na.action=na.omit)
dev.off()
#Let's see how good this plot is of the hourly trend?
t.o.forecast<-paste("Prediction starts at: ", date(),sep="")
png(filename="~/Button Data/5_20_historical.png")
plot(fitted(reg), main="Past Values of Button", xlab="Time (in 10 minute increments)", ylab="Lowest Button Time in 10 minute Interval)", ylim=c(0,60))
mtext(paste(t.o.forecast),side=1,line=4)
dev.off()
png(filename="~/Button Data/5_20_error.png")
plot(res, main="Error of Forecast",,xlab="Time (in 10 minute increments)", ylab="Error of Forecast Technique on Past Data")
mtext(paste(t.o.forecast),side=1,line=4)
dev.off()
png(filename="~/Button Data/5_20_overlay.png")
plot(fitted(reg), main="Past Values of Button overlayed with Forecast",xlab="Time (in 10 minute increments)", ylab="Lowest Button Time in 10 minute Interval", ylim=c(0,60))
mtext(paste(t.o.forecast),side=1,line=4)
lines(as.vector(button_xts),col="red")
dev.off()
#forecast value of button:
#size of forecast
w<-2 #weeks of repetition of our last week.
n<-7*24*60/t
viable<-(dim(regressors)[1]-n):dim(regressors)[1] #gets the last week.
#regressors$missing_data<-median(regressors$missing_data) #but we don't want to assume a bunch of unneeded missing data.
forecast_values<-forecast(reg,xreg=regressors[rep(viable,w),],level=75)
start<-index(button_xts)[1]
f_cast<-append(forecast_values$x,forecast_values$mean)
a=as.Date(seq(start, by="15 min",length.out=length(f_cast)))
png(filename="~/Button Data/5_20_forecast.png")
plot(forecast_values,ylim=c(0,60), main="Lowest Button Time In Every 10 minute Period", ylab="10 minute Minimum of Button", xlab="Number of 10 minute Periods Since Button Creation")
footnotes<-paste("Timer Death in about 4 weeks. Prediction starts at ", date(),". 75% CI in Grey.",sep="")
mtext(paste(footnotes),side=1,line=4)
dev.off()
|
96399f9985bff2187ee77836b40e8b5341bb7d7f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ltsa/examples/TrenchMean.Rd.R
|
0b5349da95ded11c3ca7fa85afb9511506bc7e26
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 499
|
r
|
TrenchMean.Rd.R
|
library(ltsa)
### Name: TrenchMean
### Title: Exact MLE for mean given the autocorrelation function
### Aliases: TrenchMean
### Keywords: ts
### ** Examples
#compare BLUE and sample mean
phi<- -0.9
a<-rnorm(100)
z<-numeric(length(a))
phi<- -0.9
n<-100
a<-rnorm(n)
z<-numeric(n)
mu<-100
sig<-10
z[1]<-a[1]*sig/sqrt(1-phi^2)
for (i in 2:n)
z[i]<-phi*z[i-1]+a[i]*sig
z<-z+mu
r<-phi^(0:(n-1))
meanMLE<-TrenchMean(r,z)
meanBLUE<-mean(z)
ans<-c(meanMLE, meanBLUE)
names(ans)<-c("BLUE", "MLE")
ans
|
86fc52127e874c7b6c24094c831148ed9d96825e
|
dce03b2fc14d766358e899a86a560b967555b6b4
|
/plot2.R
|
089d9dd5695e3f5faf458909a5328319168e7c14
|
[] |
no_license
|
gitcub/ExData_Plotting1
|
ec3153eba14038cd4e2de3c8ff2678e6557ceb7c
|
cf8845079809422950ea30e18a0c589c2f809ab3
|
refs/heads/master
| 2020-12-02T16:35:49.144858
| 2015-11-08T18:13:44
| 2015-11-08T18:13:44
| 45,787,781
| 0
| 0
| null | 2015-11-08T16:09:53
| 2015-11-08T16:09:53
| null |
UTF-8
|
R
| false
| false
| 898
|
r
|
plot2.R
|
# source data
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "dataset.zip", method = "curl", mode = "wb")
unzip("dataset.zip")
unlink("dataset.zip")
# load data
rawData <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";",
stringsAsFactors = FALSE, na.strings="?")
data <- subset(rawData, Date == "1/2/2007" | Date == "2/2/2007")
# create variables in correct format
dateTime <- strptime(paste(data$Date, data$Time, sep = " "),
format = "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(data$Global_active_power)
# plot
plot(dateTime, globalActivePower, type = "l", main = "Global Active Power",
xlab = "", ylab = "Global Active Power (kilowatts)")
# save to png file
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off()
|
ec1bf3b1e559d941c5de799211ee9e64737ac57c
|
083f86ded1e5acc975958810d77a86b817b1cddf
|
/ui.R
|
26971bc907b6453e3e8167df4d526a68164f741f
|
[] |
no_license
|
arpignotti/DevDataProduct
|
259de4e0b722aee2fe2c8e1d24e967479d8b822b
|
45cfa3a2c255c0292db3717671bfb254e12e93a3
|
refs/heads/master
| 2021-01-10T13:56:51.353941
| 2015-12-22T01:53:19
| 2015-12-22T01:53:19
| 48,344,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,051
|
r
|
ui.R
|
library(shiny)
library(RCurl)
library(maps)
library(mapproj)
source("helper/helpers.R")
x <- getURL("https://raw.githubusercontent.com/arpignotti/DevDataProduct/master/Data/dset.csv")
dset <- read.csv(text = x)
shinyUI(pageWithSidebar(
headerPanel("Medicare Advantage Parent Organizations' MA Market Share"),
sidebarPanel(
selectInput("var",
label = "Parent Organization:",
choices = c("Aetna",
"Anthem",
'CIGNA',
'Health Net',
'Highmark',
'Humana',
"Kaiser",
"UnitedHealth",
'WellCare'),
selected = "UnitedHealth"),
selectInput("zoom",
label = "Region:",
choices = c("National", "Midwest","New England","Mid-Atlantic",
"Southeast","Southwest", "West"))
),
mainPanel(
plotOutput("map"))
))
|
ea24446586f7a43719b4bf71fa71b960fcf916fc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/semTools/examples/standardizeMx-deprecated.Rd.R
|
fbca0a4ea0997140aa79db7709047c0fb2d124a7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,960
|
r
|
standardizeMx-deprecated.Rd.R
|
library(semTools)
### Name: standardizeMx-deprecated
### Title: Find standardized estimates for OpenMx output
### Aliases: standardizeMx-deprecated
### Keywords: internal
### ** Examples
## Not run:
##D library(OpenMx)
##D data(myFADataRaw)
##D myFADataRaw <- myFADataRaw[,c("x1","x2","x3","x4","x5","x6")]
##D oneFactorModel <- mxModel("Common Factor Model Path Specification",
##D type="RAM",
##D mxData(
##D observed=myFADataRaw,
##D type="raw"
##D ),
##D manifestVars=c("x1","x2","x3","x4","x5","x6"),
##D latentVars="F1",
##D mxPath(from=c("x1","x2","x3","x4","x5","x6"),
##D arrows=2,
##D free=TRUE,
##D values=c(1,1,1,1,1,1),
##D labels=c("e1","e2","e3","e4","e5","e6")
##D ),
##D # residual variances
##D # -------------------------------------
##D mxPath(from="F1",
##D arrows=2,
##D free=TRUE,
##D values=1,
##D labels ="varF1"
##D ),
##D # latent variance
##D # -------------------------------------
##D mxPath(from="F1",
##D to=c("x1","x2","x3","x4","x5","x6"),
##D arrows=1,
##D free=c(FALSE,TRUE,TRUE,TRUE,TRUE,TRUE),
##D values=c(1,1,1,1,1,1),
##D labels =c("l1","l2","l3","l4","l5","l6")
##D ),
##D # factor loadings
##D # -------------------------------------
##D mxPath(from="one",
##D to=c("x1","x2","x3","x4","x5","x6","F1"),
##D arrows=1,
##D free=c(TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,FALSE),
##D values=c(1,1,1,1,1,1,0),
##D labels =c("meanx1","meanx2","meanx3","meanx4","meanx5","meanx6",NA)
##D )
##D # means
##D # -------------------------------------
##D ) # close model
##D # Create an MxModel object
##D # -----------------------------------------------------------------------------
##D oneFactorFit <- mxRun(oneFactorModel)
##D standardizeMx(oneFactorFit)
##D
##D # Compare with lavaan
##D library(lavaan)
##D script <- "f1 =~ x1 + x2 + x3 + x4 + x5 + x6"
##D fit <- cfa(script, data=myFADataRaw, meanstructure=TRUE)
##D standardizedSolution(fit)
## End(Not run)
|
519162315e1019f6d6edea8e2b744d987712621a
|
1e6b7b9682e5a5d51df135a8a644273d7cb7108b
|
/Amadeus_Summary_Regional.R
|
d3ce92810b659f27d14bafe4e790550da1ecf6db
|
[] |
no_license
|
Orbis-Amadeus-Oxford/Amadeus-Datawork
|
96b1eaa00464ad2676b14927d9468c0136ef501d
|
094d1c48384dbd514437ad08979aa7ce1add5b08
|
refs/heads/master
| 2020-04-01T21:03:32.278584
| 2019-09-25T14:13:50
| 2019-09-25T14:13:50
| 153,635,496
| 0
| 3
| null | 2020-02-24T15:29:57
| 2018-10-18T14:15:40
|
R
|
UTF-8
|
R
| false
| false
| 11,293
|
r
|
Amadeus_Summary_Regional.R
|
#trace(stop, quote(print(sys.calls())))
require(entropy) # for entropy
require(acid) # for weighted entropy
require(dplyr)
require(matrixStats) # for weighted sd and median
# Note that instead of using radiant.data.weighted.sd we might want to correct the sd for the sample size
# after considering weights (i.e. in terms of employees not in terms of number of firms). This function does
# not do that. But acid.weighted.moments is able to.
# auxiliary functions for aggregation
# Shannon entropy from entropy
entrop <- function(dat, w = "ignored", na.rm="ignored") {
dat <- na.omit(dat)
ee <- entropy.empirical(dat)
return(ee)
}
# Weighted entropy from acid
# Problem: This results in many NaN values because it includes logs of dat which are undefined where dat<0.
weighted.entrop <- function(dat, w, na.rm="ignored") {
df <- na.omit(data.frame(dat = dat, w = w))
df <- na.omit(df)
ee <- weighted.entropy(df$dat, w=df$w)
# if(is.nan(ee)){
# print(ee)
# print(head(df))
# print(head(dat))
# print(head(w))
# save(df,file="tester.Rda")
# }
return(ee)
}
# sd does not accept additional unused arguments, hence we must wrap it to be able to supply unused weights.
# Instead, we could use ("weights" %in% formalArgs(func)), to distinguish the two cases, but if this is the
# only function that cannot handle unused arguments, this is not necessary and would complicate the code.
stdev <- function(dat, na.rm=F, w = ignored) {
sd <- sd(dat, na.rm=na.rm)
return(sd)
}
# weighted sd from weightedVar in matrixStats
weighted.stdev <- function (dat, w, na.rm) {
wvar <- weightedVar(dat, w=w, na.rm=na.rm)
wsd <- (wvar)^.5
return(wsd)
}
# weighted median without interpolation from matrixStats
# Note that interpolated weighted medians have terribly many implementations in R some of which produce different
# results. See: https://stackoverflow.com/q/2748725/1735215
# The common implementation is the weighted percentile method see wikipedia: https://en.wikipedia.org/
# wiki/Percentile#Weighted_percentile but that seems to not be the only one.
# For now, we just use non-interpolated weighted medians.
weighted.median <- function (dat, w, na.rm) {
wm <- weightedMedian(dat, w, interpolate=F, na.rm=na.rm)
return(wm)
}
# Number of non NA observations
num_obs <- function(dat, w = "ignored", na.rm="ignored") {
non_na <- sum(!is.na(dat))
return(non_na)
}
# Compute descriptive statistics of capital productivity and profitability (returns on capital) by NUTS2 region
desc_stat_by_file <- function(nuts_code, cfile, country, country_short_code, stat_variables = c("CP", "RoC")) {
print(paste("Commencing", country, sep=" "))
# load data file
load(cfile, verbose=F)
# will catch cases with empty data files
if(nrow(Cleaned_dat_INDEX)==0) {
return(NA)
}
# remove what we do not need
if(nuts_code!="NUTS_0") {
Cleaned_dat_INDEX <- subset(Cleaned_dat_INDEX, select = c(IDNR, Year, get(nuts_code)))
} else {
Cleaned_dat_INDEX <- subset(Cleaned_dat_INDEX, select = c(IDNR, Year))
Cleaned_dat_INDEX["NUTS_0"] <- country_short_code
}
# merge into one frame
framelist <- list(Cleaned_dat_Productivity, Cleaned_dat_Profitability, Cleaned_dat_cost_structure, Cleaned_dat_firm_size, Cleaned_dat_RD)
for (frame in framelist) {
unique_columns <- !(colnames(frame) %in% colnames(Cleaned_dat_INDEX))
unique_columns[match(c("IDNR", "Year"), colnames(frame))] <- TRUE
Cleaned_dat_INDEX <- merge(Cleaned_dat_INDEX, frame[unique_columns], c("IDNR", "Year"))
rm(frame)
}
#print(colnames(Cleaned_dat_INDEX))
retained_columns <- c("Year", nuts_code, "EMPL", stat_variables)
Cleaned_dat_INDEX <- subset(Cleaned_dat_INDEX, select = retained_columns)
colnames(Cleaned_dat_INDEX) <- c("Year", "nuts_code", "EMPL", stat_variables)
Cleaned_dat_INDEX_weights <- Cleaned_dat_INDEX[!is.na(Cleaned_dat_INDEX$EMPL),]
Cleaned_dat_INDEX$EMPL <- NULL
# compute statistics by region
for(func in list("mean", "median", "stdev", "entrop", "num_obs")) {
agg <-aggregate(.~nuts_code+Year, Cleaned_dat_INDEX, FUN=func, na.rm=T, na.action=NULL)
agg <- agg[!(agg[,"nuts_code"] == ""),]
colnames(agg) <- c(nuts_code, "Year", paste(stat_variables, func, sep="_"))
if(exists("all_results")) {
all_results <- merge(all_results, agg, c(nuts_code, "Year"))
} else {
all_results <- agg
}
}
# compute statistics by region for dplyr (for variables that require weights)
dplyr_flist = list(weighted.mean, weighted.median, weighted.stdev, weighted.entrop)
dplyr_fnames = list("weighted.mean", "weighted.median", "weighted.stdev", "weighted.entrop")
for(i in 1:length(dplyr_flist)) {
func = dplyr_flist[[i]]
func_name = dplyr_fnames[[i]]
agg <- Cleaned_dat_INDEX_weights %>% group_by(nuts_code, Year) %>% summarise_at(vars(-EMPL,-Year,-nuts_code),funs(func(., EMPL, na.rm=T)))
# Removing entries without NUTS record. This must control for empty results since agg[,"nuts_code"] will otherwise fail
if(nrow(agg) > 0){
agg <- agg[!(agg[,"nuts_code"] == ""),]
}
colnames(agg) <- c(nuts_code, "Year", paste(stat_variables, func_name, sep="_"))
if(exists("all_results")) {
all_results <- merge(all_results, agg, c(nuts_code, "Year"))
} else {
all_results <- agg
}
}
# will catch cases in which the results frame has no elements (presumably because of too few observations for each region)
if(nrow(all_results)==0) {
return(NA)
}
# add country to results and return
all_results$Country <- country
return(all_results)
}
# handle iteration over files list, call function to compute descriptive statistics for all, merge results
desc_stat_all_files <- function (nuts_code, filenames, country_names, country_short, stat_variables = c("CP", "RoC")) {
nfiles = length(filenames)
for(i in 1:nfiles) {
cfile = filenames[[i]]
country = country_names[[i]]
country_short_code = country_short[[i]]
res <- desc_stat_by_file(nuts_code, cfile, country, country_short_code, stat_variables)
if(!is.na(res)) {
if(exists("all_results")) {
all_results <- rbind(all_results, res)
} else {
all_results <- res
}
}
#print(all_results)
}
return(all_results)
}
# main entry point
# NUTS level. May be {0, 1, 2, 3}
nuts_level <- 2
nuts_code <- paste("NUTS", nuts_level, sep="_")
# variables for which the descriptive statistics are to be computed
stat_variables = c("CP", "RoC", "PW_ratio", "TOAS", "LP", "CP_change", "C_com", "Zeta")
# Stats variables could include any or all of the following:
# [1] "LP"
# [5] "CP" "LP_change" "CP_change" "Zeta"
# [9] "RoC" "RoC_fix" "RoC_RCEM" "RoC_RTAS"
#[13] "WS" "PS" "PW_ratio" "C_com"
#[17] "PW_ratio_change" "PW_ratio_lr" "SALE" "EMPL"
#[21] "TOAS" "SALE_change" "EMPL_change" "VA"
#[25] "SALE_lr" "EMPL_lr" "TOAS_lr" "RD"
#[29] "TOAS.1" "CUAS" "FIAS" "IFAS"
#[33] "TFAS" "OCAS" "OFAS"
# input files
filenames = c('panels_J!&Albania.Rda', 'panels_J!&Austria.Rda', 'panels_J!&Belarus.Rda', 'panels_J!&Belgium.Rda', 'panels_J!&Bosnia and Herzegovina.Rda', 'panels_J!&Bulgaria.Rda', 'panels_J!&Croatia.Rda', 'panels_J!&Cyprus.Rda', 'panels_J!&Czech Republic.Rda', 'panels_J!&Denmark.Rda', 'panels_J!&Estonia.Rda', 'panels_J!&Finland.Rda', 'panels_J!&France.Rda', 'panels_J!&Germany.Rda', 'panels_J!&Greece.Rda', 'panels_J!&Hungary.Rda', 'panels_J!&Iceland.Rda', 'panels_J!&Ireland.Rda', 'panels_J!&Italy.Rda', 'panels_J!&Kosovo.Rda', 'panels_J!&Latvia.Rda', 'panels_J!&Liechtenstein.Rda', 'panels_J!&Lithuania.Rda', 'panels_J!&Luxembourg.Rda', 'panels_J!&Macedonia, FYR.Rda', 'panels_J!&Malta.Rda', 'panels_J!&Moldova.Rda', 'panels_J!&Monaco.Rda', 'panels_J!&Montenegro.Rda', 'panels_J!&Netherlands.Rda', 'panels_J!&Norway.Rda', 'panels_J!&Poland.Rda', 'panels_J!&Portugal.Rda', 'panels_J!&Romania.Rda', 'panels_J!&Russian Federation.Rda', 'panels_J!&Serbia.Rda', 'panels_J!&Slovakia.Rda', 'panels_J!&Slovenia.Rda', 'panels_J!&Spain.Rda', 'panels_J!&Sweden.Rda', 'panels_J!&Switzerland.Rda', 'panels_J!&Turkey.Rda', 'panels_J!&Ukraine.Rda', 'panels_J!&United Kingdom.Rda')
filenames = c("panels_J!&Albania.Rda", "panels_J!&Austria.Rda", "panels_J!&Belarus.Rda", "panels_J!&Belgium.Rda", "panels_J!&Bulgaria.Rda", "panels_J!&Croatia.Rda", "panels_J!&Cyprus.Rda", "panels_J!&Czech Republic.Rda", "panels_J!&Denmark.Rda", "panels_J!&Estonia.Rda", "panels_J!&Finland.Rda", "panels_J!&France.Rda", "panels_J!&Germany.Rda", "panels_J!&Greece.Rda", "panels_J!&Hungary.Rda", "panels_J!&Iceland.Rda", "panels_J!&Ireland.Rda", "panels_J!&Italy.Rda", "panels_J!&Kosovo.Rda", "panels_J!&Latvia.Rda", "panels_J!&Liechtenstein.Rda", "panels_J!&Lithuania.Rda", "panels_J!&Luxembourg.Rda", "panels_J!&Malta.Rda", "panels_J!&Moldova.Rda", "panels_J!&Monaco.Rda", "panels_J!&Montenegro.Rda", "panels_J!&Netherlands.Rda", "panels_J!&Norway.Rda", "panels_J!&Poland.Rda", "panels_J!&Portugal.Rda", "panels_J!&Russian Federation.Rda", "panels_J!&Serbia.Rda", "panels_J!&Slovakia.Rda", "panels_J!&Spain.Rda", "panels_J!&Sweden.Rda", "panels_J!&Switzerland.Rda", "panels_J!&Turkey.Rda", "panels_J!&United Kingdom.Rda")
country_names = c("Albania", "Austria", "Belarus", "Belgium", "Bosnia and Herzegovina", "Bulgaria", "Croatia", "Cyprus", "Czech Republic", "Denmark", "Estonia", "Finland", "France", "Germany", "Greece", "Hungary", "Iceland", "Ireland", "Italy", "Kosovo", "Latvia", "Liechtenstein", "Lithuania", "Luxembourg", "Macedonia", "Malta", "Moldova", "Monaco", "Montenegro", "Netherlands", "Norway", "Poland", "Portugal", "Romania", "Russian Federation", "Serbia", "Slovakia", "Slovenia", "Spain", "Sweden", "Switzerland", "Turkey", "Ukraine", "United Kingdom")
country_short = c("AL", "AT", "BY", "BE", "BH", "BG", "HR", "CY", "CZ", "DK", "EE", "FI", "FR", "DE", "GR", "HU", "IS", "IE", "IT", "XK", "LV", "LI", "LT", "LU", "MK", "MT", "MD", "MC", "ME", "NL", "NO", "PL", "PT", "RO", "RU", "RS", "SK", "SI", "ES", "SE", "CH", "TK", "UA", "UK")
#filenames = c("panels_J!&Austria.Rda", "panels_J!&Serbia.Rda")
#country_names = c("Austria", "Serbia")
desc_stats <- desc_stat_all_files(nuts_code, filenames, country_names, country_short, stat_variables)
print(desc_stats)
# save descriptive statistics
output_file_name = paste(paste("Reg", nuts_code, sep="_"), "desc_stats.Rda", sep="_")
save(desc_stats, file=output_file_name)
|
07884ffae0db6142a2ab9c66d5ebba5856afa539
|
5038954117333b59dda21d2d12a05fc6fc82061b
|
/man/topN_mat.Rd
|
5a7c958bc05cb1c7d5063c6ef3d6e538f9256e2e
|
[] |
no_license
|
fossbert/binilib
|
b4887c996d2fbbcdfb7c4f988822460a9e37e37e
|
173e5fe7e4fb16dec10e1c57fe30cd155a5b895a
|
refs/heads/master
| 2021-06-05T14:55:00.691039
| 2021-04-19T09:20:05
| 2021-04-19T09:20:05
| 157,078,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,080
|
rd
|
topN_mat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{topN_mat}
\alias{topN_mat}
\title{For a numeric matrix containing samples or conditions in its columns and typically genes
in its rows, this function will find the top N genes for a given condition. You can choose to retrieve
both genes with highest and lowest values, respectively or either one of them.}
\usage{
topN_mat(mat, direction = c("both", "up", "down"), nn = 25, verbose = TRUE)
}
\arguments{
\item{mat}{numeric matrix}
\item{direction}{character, which tail should be returned, defaults to both up and down}
\item{nn}{integer indicating how many genes to isolate for each tail (default = 25)}
}
\value{
a data frame with gene names for from the rownames of the input matrix
}
\description{
For a numeric matrix containing samples or conditions in its columns and typically genes
in its rows, this function will find the top N genes for a given condition. You can choose to retrieve
both genes with highest and lowest values, respectively or either one of them.
}
|
d07daf150549245b8223dfe1eb29342ef3ea81b6
|
60820a424e8088dd9c588561b874f9f9b493afad
|
/stats/ToLog_function.R
|
b72aadcb8b3fc44b243364855b4ce436506ebae9
|
[] |
no_license
|
PWaryszak/Jena_Exp
|
20a3ab6f29c692e387ac5918123d2927800c3331
|
95f0a10c363ed95a8b4ef9ed3871df3631d3b178
|
refs/heads/master
| 2021-05-30T16:59:45.393286
| 2015-07-22T13:38:53
| 2015-07-22T13:38:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,583
|
r
|
ToLog_function.R
|
#########################
#Function files investiguating log-transformation vs GLM
#author Lionel Hertzog, date 21.07.2015
#code heavily based on the supplementary material of
#Ives, A. R. (2015), For testing the significance of regression coefficients, go ahead and log-transform count data. Methods in Ecology and Evolution, 6: 828–835. doi: 10.1111/2041-210X.12386
###################################################################
### Univariate, negative binomial functions (Figs. 1 and 2A-C)
EstPvalue = function(y, x, add = 0,transformation) {
if (transformation == "negbin") {
try({
z<-glm.nb(y ~ x, control = glm.control(maxit = 30))
conv<-z$converged
if(conv==1){
val <- summary(z)[[11]][8]
b1.est <- coef(z)[2]
}
else{
val <- NA
b1.est <- NA
}
})
}
if (transformation == "qpois") {
z <- glm(y ~ x, family = quasipoisson, control = list(maxit = 1000))
conv<-z$converged
if(conv==1){
val <- summary(z)[[12]][8]
b1.est <- coef(z)[2]
}
else{
val<-NA
b1.est<-NA
}
}
if (transformation == "glmm") {
try({
z <- glmer(y ~ x + (1 | as.factor(1:length(x))), family = "poisson")
conv <- (attributes(z)$optinfo$conv$opt == 0)
if (conv == 1) {
val <- summary(z)[[10]][2, 4]
b1.est <- fixef(z)[2]
} else {
val <- NA
b1.est <- NA
}
})
}
if (transformation == "log") {
z <- lm(log(y + add) ~ x)
val <- summary(z)[[4]][8]
b1.est <- coef(z)[2]
}
if(exists("val")){
ret <- c(val, b1.est)
return(ret)
}
else{
return(c(NA,NA))
}
}
# Function to estimate p-values
EstStats.pvalue <- function(Data, transformation, b1,Add = 0, alpha = 0.05) {
pvalues <- apply(Data$y,2,function(y) EstPvalue(y = y,x = Data$x,transformation = transformation,add = Add))
ret <- c(mean(pvalues[1, ] < alpha,na.rm=TRUE), mean(pvalues[2, ]-b1,na.rm=TRUE))
return(ret)
}
# Function to fit all models
GetAnalyses = function(Data, alpha = 0.05,b1,GLMM=FALSE) {
NB = EstStats.pvalue(Data, transformation = "negbin",b1)
QPois = EstStats.pvalue(Data, transformation = "qpois",b1)
DataHalf <- Data
DataHalf$y[DataHalf$y == 0] <- 0.5
LogPlusHalf = EstStats.pvalue(DataHalf, transformation = "log",b1)
Log1 = EstStats.pvalue(Data, transformation = "log",b1, Add = 1)
Log0001 = EstStats.pvalue(Data, transformation = "log", b1,Add = 1e-04)
if(GLMM){
GLMM<-EstStats.pvalue(Data,transformation = "glmm",b1)
d<-data.frame(Model=c("NB","QuasiP","GLMM","LogHalf","Log1","Log0001"),Reject=c(NB[1],QPois[1],GLMM[1],LogPlusHalf[1],Log1[1],Log0001[1]),Bias=c(NB[2],QPois[2],GLMM[2],LogPlusHalf[2],Log1[2],Log0001[2]))
}
else{
d<-data.frame(Model=c("NB","QuasiP","LogHalf","Log1","Log0001"),Reject=c(NB[1],QPois[1],LogPlusHalf[1],Log1[1],Log0001[1]),Bias=c(NB[2],QPois[2],LogPlusHalf[2],Log1[2],Log0001[2]))
}
return(d)
}
# Function to simulate and fit data for the univariate negative binomial model
compute.stats <- function(NRep = 50, b1.range = 0, n.range = 100, dispersion.range = 1, b0.range = log(1), alpha = 0.05,seed=20150721) {
val<-expand.grid(b1=b1.range,n=n.range,disper=dispersion.range,b0=b0.range)
tmp<-mapply(function(b1,n,disper,b0){
set.seed(seed)
x <- runif(n,-2,2)
mean.y <- exp(b0 + b1 * x)
y <- replicate(NRep, rnbinom(n, disper, mu = mean.y))
Data <- list(x = x, y = y)
g <- GetAnalyses(Data, alpha,b1)
g$b1<-b1
g$n<-n
g$disper<-disper
g$b0<-b0
return(g)
},b1=val$b1,b0=val$b0,n=val$n,disper=val$disper,SIMPLIFY=FALSE)
output<-rbind.fill(tmp)
return(output)
}
#########################################################################
### Univariate, lognormal-Poisson hierarchical model functions
# Function to simulate and fit data for the univariate negative binomial model
compute.statsGLMM <- function(NRep = 50, b1.range = 0, n.range = 100, sd.eps.range = 1, b0.range = log(1), alpha = 0.05,seed=20150723) {
val<-expand.grid(b1=b1.range,n=n.range,b0=b0.range,sd.eps=sd.eps.range)
tmp<-mapply(function(b1,n,b0,sd.eps){
set.seed(seed)
x <- runif(n,-2,2)
eps<-rnorm(n,0,sd.eps)
mean.y <- exp(b0 + b1 * x+eps)
y <- replicate(NRep, rpois(n, mean.y))
Data <- list(x = x, y = y)
g <- GetAnalyses(Data, alpha,b1,GLMM=TRUE)
g$b1<-b1
g$n<-n
g$b0<-b0
g$sd.eps<-sd.eps
return(g)
},b1=val$b1,b0=val$b0,n=val$n,sd.eps=val$sd.eps,SIMPLIFY=FALSE)
output<-rbind.fill(tmp)
return(output)
}
|
559e6a62ef1af26b3f560c262155d376fda4ec09
|
3a9b76a8f5b8b7cb931471830dbbe202b388695a
|
/man/runRECA.Rd
|
930b939b66312a9617212378a067bafbf88dae3e
|
[] |
no_license
|
edvinf/prepRECA
|
3411ff4fdcd85608f91ee687cd7d7f8c4226f8f9
|
85dd2bb39c4c408a98678d01cfd280627f00e2be
|
refs/heads/master
| 2020-09-01T03:10:04.259873
| 2019-11-06T15:38:29
| 2019-11-06T15:38:29
| 218,865,912
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,052
|
rd
|
runRECA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepRECA.R
\name{runRECA}
\alias{runRECA}
\title{Run R-ECA}
\usage{
runRECA(RecaObj, nSamples, burnin, lgamodel = "log-linear",
fitfile = "fit", predictfile = "pred", resultdir = NULL,
thin = 10, delta.age = 0.001, seed = NULL, caa.burnin = 0)
}
\arguments{
\item{RecaObj}{as returned from \code{\link[prepRECA]{prepRECA}}}
\item{nSamples}{number of MCMC samples that will be made available for \code{\link[Reca]{eca.predict}}. See documentation for \code{\link[Reca]{eca.estimate}},}
\item{burnin}{number of MCMC samples run and discarded by \code{\link[Reca]{eca.estimate}} before any samples are saved. See documentation for \code{\link[Reca]{eca.estimate}}.}
\item{lgamodel}{The length age relationship to use for length-age fits (options: "log-linear", "non-linear": Schnute-Richards model). See documentation for \code{\link[Reca]{eca.estimate}}.}
\item{fitfile}{name of output files in resultdir. See documentation for \code{\link[Reca]{eca.estimate}}.}
\item{predictfile}{name of output files in resultdir. See documentation for \code{\link[Reca]{eca.predict}}.}
\item{resultdir}{a directory where Reca may store temp-files \code{\link[Reca]{eca.estimate}} and \code{\link[Reca]{eca.predict}}. . If NULL, a temporary directory will be created. See documentation for \code{\link[Reca]{eca.estimate}}.}
\item{thin}{controls how many iterations are run between each samples saved. This may be set to account for autocorrelation introduced by Metropolis-Hastings simulation. see documentation for \code{\link[Reca]{eca.estimate}}}
\item{delta.age}{see documentation for \code{\link[Reca]{eca.estimate}}}
\item{seed}{see documentation for \code{\link[Reca]{eca.estimate}}}
\item{caa.burnin}{see documentation for \code{\link[Reca]{eca.predict}}}
}
\value{
list() with elements:
\describe{
\item{fit}{as returned by \code{\link[Reca]{eca.estimate}}}
\item{prediction}{as returned by \code{\link[Reca]{eca.predict}}}
\item{covariateMaps}{list() mapping from Reca covariate encoding to values fed to \code{\link[prepRECA]{prepRECA}}. As on parameter 'RecaObj'}
}
}
\description{
Runs \code{\link[Reca]{eca.estimate}} and \code{\link[Reca]{eca.predict}}.
}
\details{
\code{\link[Reca]{eca.estimate}} performs Markov-chain Monte Carlo (MCMC) simulations to determine maximum likelihood of parameters for the given samples.
\code{\link[Reca]{eca.predict}} samples the posterior distributions of parameters estimated in \code{\link[Reca]{eca.estimate}},
in order to obtain proportinos of catches and fish parameters.
Using these parameters and the given total landings, predictions of distribution of catch-parameter distributions will be calculated.
If resultdir is NULL, atemporary directory will be created for its purpose.
This will be attempted removed after execution.
If removal is not successful a warning will be issued which includes the path to the temporary directory.
}
\examples{
\dontrun{runRECA(prepRECA::recaPrepExample, 500, 500)}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.