blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
34b610674482a7e7b16f5476969d94d2d54cea35
|
cbec21b1f1959bdfa922b3eec5706432d3d89bdc
|
/Examples/AD-MA_examples.R
|
3110f6275cfb7c0da89d35e133ee988537f124cb
|
[] |
no_license
|
MichailBelias/meta-analysis
|
b3a28b11ca2eda505bb35d08181b1413fdb6f047
|
605fc3516a8c36276907d0ee696b387af80868bf
|
refs/heads/master
| 2022-02-13T16:34:23.824998
| 2019-02-25T12:26:22
| 2019-02-25T12:26:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,689
|
r
|
AD-MA_examples.R
|
## ----echo=F, warning=FALSE, message=FALSE--------------------------------
library(knitr)
opts_chunk$set(fig.width=9, fig.height=6, fig.path='Figs/',
echo=F, warning=FALSE, message=FALSE, fig.pos = "H", comment = "")
## ------------------------------------------------------------------------
# install.packages('metafor')
library(metafor)
## ------------------------------------------------------------------------
dat.bcg
## ----echo=F,message=FALSE, out.width = "50%", out.height="50%"----------
library(knitr)
#img1 <- readPNG("Figures/2x2.png", native = TRUE, info = TRUE)
include_graphics("Figures/2x2.png")
## ---- fig.height=5, fig.cap="2x2 matrix", fig.width=5--------------------
dat <- escalc(measure="RR", ai=tpos, bi=tneg, ci=cpos, di=cneg, data=dat.bcg)
dat
## ------------------------------------------------------------------------
Risk_exposed = with(dat, tpos/(tpos+tneg) )
Risk_unexposed = with(dat, cpos/(cpos+cneg) )
RR = Risk_exposed/Risk_unexposed
log_RR= log(RR)
## ------------------------------------------------------------------------
FE = rma(data = dat, yi = yi , vi = vi,
slab = paste(author, year),# indicate which variable contains the names of the trials
method = "FE")
FE = metabin(measure="RR", event.e =tpos,n.e = (tneg+tpos), event.c = cpos, n.c = (cneg+cpos),
data = dat.bcg,studlab = paste(author, year) )
forest(FE)
dat$weights = 1/dat$vi
ggscatter(data= dat, y = "yi", x=0.2, size = "weights")
## ------------------------------------------------------------------------
forest(FE, atransf = exp ,showweights = T)
## ------------------------------------------------------------------------
RE <- rma(yi, vi, data=dat, method="DL", slab = author)
RE
forest(RE)
## ------------------------------------------------------------------------
?rma
## ------------------------------------------------------------------------
RE.EB <- rma(yi, vi, data=dat, method="EB", slab = author)
RE.EB
forest(RE.EB)
## ------------------------------------------------------------------------
res.ME <- rma(yi, vi, mods=~I(ablat-33.46), data=dat, method="EB")
res.ME
## ------------------------------------------------------------------------
anova(RE, res.ME)
## ------------------------------------------------------------------------
predict(res.ME, newmods=c(33.46,42)-33.46, transf=exp, digits=2)
## ------------------------------------------------------------------------
res.FE <- rma(yi, vi, mods=~I(ablat-33.46), data=dat, method="FE")
res.FE
## ------------------------------------------------------------------------
predict(res.FE, newmods=c(33.46,42)-33.46, transf=exp, digits=2)
|
69bf7f19d197d84c02b95e0e14553ff8ffb95b60
|
7d4211b87ce623e242e5fe4fe2379a45c6d48160
|
/database_script.r
|
f4501f11c20017ed34bcb5210aaacfe33a1e60f8
|
[] |
no_license
|
erichoffman1217/mls_data
|
858b7babb8af2b96f4b242772c6db0aa22618946
|
4a0a8e8834a5619a862376548ae48532ecd2581c
|
refs/heads/master
| 2020-04-08T21:29:17.622078
| 2019-01-23T04:06:05
| 2019-01-23T04:06:05
| 159,746,109
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,004
|
r
|
database_script.r
|
library(tidyverse)
library(readxl)
team_db <- read_delim("database/raw/team_db.txt",
"|", escape_double = FALSE,trim_ws = TRUE)
game_db <- read_delim("database/raw/game_db.txt",
"|", escape_double = FALSE,trim_ws = TRUE)
players_db <- read_delim("database/raw/players_db.txt",
"|", escape_double = FALSE,trim_ws = TRUE)
goalie_db <- read_delim("database/raw/goalie_db.txt",
"|", escape_double = FALSE,trim_ws = TRUE)
team_key <- read_excel("~/R/mls_data/team_key.xlsx")
dist_matrix <- read_delim("dist_matrix.txt",
"|", escape_double = FALSE,trim_ws = TRUE)
### Team
team_db %>%
group_by(team_no) %>%
count (team_no)-> test
### Game
dist_matrix <- select(dist_matrix, home_team, away_team, dist_1)
game_db <- merge(game_db, dist_matrix, by.x = c('home_team_no', 'away_team_no'),by.y = c('home_team', 'away_team'))
### Players
### Goalie
|
04910cb6f853a834b66380b11eeae3192029df0a
|
5a72adedad0a87f05d38c1d7949ab5cf2cb8e6a8
|
/run_model_IRT.R
|
0364a66a2f55fe6689c92f6db413f9e57f13329b
|
[] |
no_license
|
saudiwin/arab_tweets
|
c81907de0e1b5f6aeaca45006dda62732b96bab4
|
b0334835b2a5a39e096dc315d6ce5c076e18f888
|
refs/heads/master
| 2021-05-02T16:59:04.737350
| 2020-09-23T08:14:14
| 2020-09-23T08:14:14
| 72,550,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,361
|
r
|
run_model_IRT.R
|
# Loading data
require(dplyr)
require(tidyr)
require(RSQLite)
require(rstan)
require(bayesplot)
require(ggplot2)
require(readr)
require(forcats)
require(googledrive)
#Load in codings
# elite_coding <- read_csv('data/Coding Comparison - Sheet1.csv') %>%
# mutate(final_code=coalesce(`Dana Coding`,
# `Hana Coding`)) %>%
# separate(final_code,into=c('Religion','Regime'),sep='-') %>%
# mutate(country=c(rep('Tunisia',64),
# rep('Egypt',n()-64)),
# coding=paste0(Religion,'_',country)) %>%
# filter(!is.na(Religion)) %>%
# mutate(coding_num=as.numeric(factor(coding)),
# Username=tolower(Username))
# Load in revised codings
elite_codings2 <- read_csv('data/check_complete.csv') %>%
mutate(coding=paste0(coding,'_',Country),
coding_num=as.numeric(factor(coding)),
Username=tolower(Username)) %>%
filter(person=='dana')
#SQLite databases
all_tunis <- dbConnect(SQLite(),'data/tunis_tweets.sqlite')
all_egypt <- dbConnect(SQLite(),'data/egypt_tweets.sqlite')
tunis_rts <- dbReadTable(all_tunis,'unique_rts')
egypt_rts <- dbReadTable(all_egypt,'unique_rts')
# get rid of all SNs who RT less than 3 different people
filter_tunis <- group_by(tunis_rts,rt_ids,username) %>% count %>% group_by(rt_ids) %>% count() %>% filter(n>2)
filter_tunis %>% ggplot(aes(x=n)) +
geom_histogram() +
theme_minimal() +
theme(panel.grid=element_blank()) +
xlab('Number of Unique Retweeted Elites') +
ylab('Count of Citizen Users') +
geom_vline(aes(xintercept=mean(n)),
size=1,colour='red',
linetype=3)
ggsave('tunis_users_RTS.png')
# same for egypt
filter_egypt <- group_by(egypt_rts,rt_ids,username) %>% count %>% group_by(rt_ids) %>% count() %>% filter(n>2)
filter_egypt %>% ggplot(aes(x=n)) +
geom_histogram() +
theme_minimal() +
theme(panel.grid=element_blank()) +
xlab('Number of Unique Retweeted Elites') +
ylab('Count of Citizen Users') +
geom_vline(aes(xintercept=mean(n)),
size=1,colour='red',
linetype=3)
ggsave('egypt_users_RTS.png')
combined_data <- bind_rows(filter(egypt_rts,rt_ids %in% filter_egypt$rt_ids),
filter(tunis_rts,rt_ids %in% filter_tunis$rt_ids)) %>%
mutate(username=tolower(username))
# need to make the coup indicator & change time to three day intervals
coup_day <- lubridate::yday('2013-07-03')
old_days <- min(combined_data$time):max(combined_data$time)
seq_val <- floor(length(old_days)/3)
new_days <- rep(1:seq_val,each=3)
if(length(new_days)<length(old_days)) {
new_days <- c(new_days,rep(seq_val+1,times=length(old_days)-length(new_days)))
}
coup_day_new <- new_days[which(old_days>coup_day & (old_days %% 3))[1]]
times <- data_frame(time=old_days,time_three=new_days,
coup=if_else(time_three>coup_day_new,2L,1L),
coup_day=coup_day)
combined_data <- left_join(combined_data,
times)
times <- distinct(times,time_three,coup)
combined_data_small <- left_join(combined_data,
elite_codings2,
by=c('username'='Username')) %>%
group_by(time_three,
coding_num,
rt_ids,coup) %>% tally
# drop missing
combined_data_small_nomis <- filter(combined_data_small,!is.na(coding_num))
# drop the random six in the dataset
# combined_data_small_nomis <- mutate(combined_data_small_nomis,
# nn=if_else(nn==6,4L,nn))
# let's look at histograms of tweets
lookat <- group_by(combined_data_small_nomis,time_three,coding_num) %>% summarize(sum_count=sum(nn)) %>%
mutate(Series=recode(as.character(coding_num),`1`='Islamist Egypt',
`2`='Islamist Tunisia',
`3`='Secularist Egypt',
`4`='Secularist Tunisia'))
ggplot(lookat,aes(y=sum_count,x=time_three)) + geom_path() + theme_minimal() + facet_wrap(~Series,scales='free_y') +
ylab('') + xlab('') +
scale_x_continuous(breaks=c(6,32,75),
labels=c('2013-03-31','2013-07-02','2013-11-08')) +
geom_vline(aes(xintercept=32),linetype=3)
ggsave('retweets_counts.png')
# types of retweets over time
lookat_c_ret <- group_by(combined_data_small_nomis,time_three,coding_num) %>% summarize(onet=sum(nn==1),
twot=sum(nn==2),
threet=sum(nn==3),
fourt=sum(nn==4))
lookat_cit_ratio <- group_by(combined_data_small_nomis,rt_ids,coding_num) %>% tally %>%
group_by(rt_ids) %>%
mutate(prop_group=n/sum(n))
lookat_cit_top <- lookat_cit_ratio %>%
filter(prop_group>.8) %>%
group_by(coding_num) %>%
top_n(2,n)
lookat_cit_patriot <- lookat_cit_ratio %>%
filter(prop_group==1)
combined_data_small_nomis <- anti_join(combined_data_small_nomis,lookat_cit_patriot,by='rt_ids') %>%
ungroup() %>%
mutate(cit_ids=as.numeric(factor(rt_ids)))
# start_func <- function() {
# list(alpha=rbind(matrix(c(-1,-1,1,1),ncol=4),
# matrix(rep(0, (max(combined_data_small_nomis$time_three)-1)*4),ncol=4)),
# gamma1=c(0.5,0.5),
# gamma2=c(0.5,0.5),
# ts_sigma=rep(0.25,4),
# adj=c(1,1),
# mean_delta=0,
# mean_beta=0,
# sigma_beta=1,
# sigma_delta=1,
# beta=rnorm(max(combined_data_small_nomis$cit_ids)),
# delta=rnorm(max(combined_data_small_nomis$cit_ids)),
# gamma_par1=0,
# gamma_par2=0)
# }
# run it again, and this time constrain deltas
# get_time <- rstan::extract(out_fit,pars='delta',permute=T)$delta
#
# mean_vals <- apply(get_time,2,mean)
# sd_vals <- apply(get_time,2,sd)
# filtered <- data_frame(mean_vals,sd_vals,discrim_id=1:ncol(get_time)) %>%
# filter(sd_vals<2)
#
# # number to identify
# id_num_high <- 40
# id_num_low <- 4
# top_two <- dplyr::arrange(filtered,desc(mean_vals)) %>% slice(1:id_num_high) %>% pull(discrim_id)
# bottom_two <- dplyr::arrange(filtered,mean_vals) %>% slice(1:id_num_low) %>% pull(discrim_id)
#
# new_vals <- factor(combined_data_small_nomis$cit_ids) %>% fct_relevel(as.character(c(top_two,bottom_two))) %>%
# as.numeric
#
# new_vals[which(combined_data_small_nomis$cit_ids %in% top_two)]
#
# combined_data_small_nomis$cit_ids <- new_vals
# code_compile <- stan_model(file='ord_irt_v1.stan')
#
# out_fit <- vb(code_compile,
# data=list(J=max(combined_data_small_nomis$coding_num),
# K=max(combined_data_small_nomis$cit_ids),
# `T`=max(combined_data_small_nomis$time_three),
# N=nrow(combined_data_small_nomis),
# C=3,
# jj=combined_data_small_nomis$coding_num,
# kk=combined_data_small_nomis$cit_ids,
# tt=combined_data_small_nomis$time_three,
# y=as.integer(combined_data_small_nomis$nn),
# coup=as.integer(floor(max(combined_data_small_nomis$time_three)/2)),
# start_vals=c(-1,-1,1,1),
# time_gamma=times$coup[-nrow(times)]),
# init=start_func)
#
# # run it again, and this time constrain deltas
#
# get_time <- rstan::extract(out_fit,pars='delta',permute=T)$delta
#
# mean_vals <- apply(get_time,2,mean)
# sd_vals <- apply(get_time,2,sd)
# filtered <- data_frame(mean_vals,sd_vals,discrim_id=1:ncol(get_time)) %>%
# filter(sd_vals<2)
#
# # number to identify
# id_num_high <- 20
# id_num_low <- 4
# top_two <- dplyr::arrange(filtered,desc(mean_vals)) %>% slice(1:id_num_high) %>% pull(discrim_id)
# bottom_two <- dplyr::arrange(filtered,mean_vals) %>% slice(1:id_num_low) %>% pull(discrim_id)
#
# # new_vals <- factor(combined_data_small_nomis$cit_ids) %>% fct_relevel(as.character(c(top_two,bottom_two))) %>%
# # as.numeric
# #
# # new_vals[which(combined_data_small_nomis$cit_ids %in% top_two)]
# #
# # combined_data_small_nomis$cit_ids <- new_vals
start_func <- function() {
list(alpha=rbind(matrix(c(-.5,-.5,.5,.5),ncol=4),
matrix(rep(0, (max(combined_data_small_nomis$time_three)-1)*4),ncol=4)),
gamma1=c(0.5,0.5),
gamma2=c(0.5,0.5),
ts_sigma=rep(0.25,4),
adj1=c(1,1),
adj2=c(1,1),
mean_delta=1,
mean_beta=1,
sigma_beta=1,
sigma_delta=.8,
shape=1,
beta=rnorm(max(combined_data_small_nomis$cit_ids)),
delta=rnorm(max(combined_data_small_nomis$cit_ids)),
gamma_par1=0,
gamma_par2=0)
}
code_compile <- stan_model(file='poisson_irt_id_v4.stan')
# out_fit_vb <- vb(code_compile,
# data=list(J=max(combined_data_small_nomis$coding_num),
# K=max(combined_data_small_nomis$cit_ids),
# `T`=max(combined_data_small_nomis$time_three),
# N=nrow(combined_data_small_nomis),
# C=max(combined_data_small_nomis$nn),
# id_num_high=1,
# id_num_low=1,
# jj=combined_data_small_nomis$coding_num,
# kk=combined_data_small_nomis$cit_ids,
# tt=combined_data_small_nomis$time_three,
# y=as.integer(combined_data_small_nomis$nn),
# coup=as.integer(floor(max(combined_data_small_nomis$time_three)/2)),
# start_vals=c(-.5,-.5,.5,.5),
# time_gamma=times$coup[-nrow(times)]),
# init=start_func)
this_time <- Sys.time()
# saveRDS(object = out_fit_vb,paste0('out_fit_vb_',this_time,'.rds'))
# drive_upload(paste0('out_fit_vb_',this_time,'.rds'))
# cores=4,thin=5,
out_fit_id <- sampling(code_compile,cores=4,chains=4,iter=1200,warmup=1000,
data=list(J=max(combined_data_small_nomis$coding_num),
K=max(combined_data_small_nomis$cit_ids),
`T`=max(combined_data_small_nomis$time_three),
N=nrow(combined_data_small_nomis),
C=max(combined_data_small_nomis$nn),
id_num_high=1,
id_num_low=1,
jj=combined_data_small_nomis$coding_num,
kk=combined_data_small_nomis$cit_ids,
tt=combined_data_small_nomis$time_three,
y=as.integer(combined_data_small_nomis$nn),
coup=as.integer(floor(max(combined_data_small_nomis$time_three)/2)),
start_vals=c(-.5,-.5,.5,.5),
time_gamma=times$coup[-nrow(times)]),
init=start_func)
saveRDS(out_fit_id,paste0('out_fit_id_',this_time,'.rds'))
#drive_upload(paste0('out_fit_id_',this_time,'.rds'))
to_plot <- as.array(out_fit_id)
mcmc_intervals(to_plot,regex_pars = 'adj')
mcmc_trace(to_plot,pars='alpha[50,4]')
mcmc_trace(to_plot,pars='sigma_beta')
mcmc_trace(to_plot,pars='sigma_delta')
mcmc_trace(to_plot,pars='gamma2[2]')
mcmc_intervals(to_plot,regex_pars = c('gamma1|gamma2'))
mcmc_intervals(to_plot,regex_pars = c('alpha'))
gamma1 <- rstan::extract(out_fit_id,pars='gamma1')$gamma1
gamma2 <- rstan::extract(out_fit_id,pars='gamma2')$gamma2
all_gammas <- data_frame(Islamists=gamma1[,2]-gamma1[,1],
Secularists=gamma2[,2]-gamma2[,1]) %>%
gather(`Ideological\nPairing`,Difference) %>%
group_by(`Ideological\nPairing`) %>%
mutate(mean_val=median(Difference))
ggplot(all_gammas,aes(x=Difference)) +
geom_density(aes(fill=`Ideological\nPairing`),colour=NA,alpha=0.5,adjust=0.5) +
theme_minimal() +
theme(panel.grid = element_blank()) +
xlab('Gamma Difference') +
ylab('Posterior Density') +
geom_vline(aes(xintercept=mean_val,linetype=`Ideological\nPairing`))
summarize(all_gammas,mean_val=mean(Difference),
median_val=median(Difference),
upper=quantile(Difference,0.9),
lower=quantile(Difference,0.1))
get_time <- rstan::extract(out_fit_id,pars='alpha',permute=T)$alpha
get_time <- get_time[sample(1:nrow(get_time),101),,]
get_time <- lapply(1:dim(get_time)[3],function(x) get_time[,,x]) %>%
lapply(as_data_frame) %>%
bind_rows(.id='Series') %>%
mutate(Series=factor(Series),
Series=fct_recode(Series,`Islamist Egypt`='1',
`Islamist Tunisia`='2',
`Secularist Egypt`='3',
`Secularist Tunisia`='4')) %>%
gather(time_pts,out_vals,-Series) %>%
mutate(time_pts=as.numeric(factor(time_pts)))
get_time %>%
filter(time_pts<93) %>%
ggplot(aes(y=out_vals,x=time_pts)) +
stat_smooth() + theme_minimal() +
theme(panel.grid=element_blank()) + xlab('Time') + ylab('Ideological Positions') +
scale_colour_brewer(palette='paired',name='') +
facet_wrap(~Series) +
scale_linetype(name='')
get_time %>%
filter(time_pts<93) %>%
ggplot(aes(y=out_vals,x=time_pts)) +
stat_summary(geom='ribbon',fun.data = 'median_hilow',fill='grey80') + theme_minimal() +
stat_summary(fun.y='median',geom='path',linetype=2) +
theme(panel.grid=element_blank()) + xlab('Time') + ylab('Ideological Positions') +
scale_colour_brewer(palette='paired',name='') +
facet_wrap(~Series) +
scale_linetype(name='') +
geom_vline(aes(xintercept=32),linetype=3)
ggsave('arab_ideology.png')
deltas <- rstan::extract(out_fit_id,pars='delta',permuted=T)$delta
betas <- rstan::extract(out_fit_id,pars='beta',permuted=T)$beta
apply(deltas,2,mean) %>% hist
apply(betas,2,mean) %>% hist
lookat <- summary(out_fit_id)
hist(lookat$summary[,'Rhat'])
#
non_identified_parameters <- lookat$summary[which(lookat$summary[,'Rhat']>1.1),]
mcmc_trace(to_plot,regex_pars='steps')
mcmc_trace(to_plot,pars='delta[7000]')
# mcmc_trace(to_plot,pars='lp__')
|
5b39e470427455eef14d9150fc5b9daa40f1693d
|
6ad8c187f7576bd9e0e480cbee41ddbc7e47ad39
|
/man/tidy_levels_labels.Rd
|
0afe80281f76dfb58cd3ddcdc127511eddb61dd4
|
[] |
no_license
|
mllg/pixiedust
|
8b946d3ffed58a73965672d0fc4499904aec7f0b
|
176d4426ab56cde9240b7ec64e50f475a3747d85
|
refs/heads/master
| 2020-03-09T07:56:22.288426
| 2018-04-08T19:57:23
| 2018-04-08T19:57:23
| 128,676,688
| 1
| 0
| null | 2018-04-08T19:54:41
| 2018-04-08T19:54:41
| null |
UTF-8
|
R
| false
| true
| 6,865
|
rd
|
tidy_levels_labels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_levels_labels.R
\name{tidy_levels_labels}
\alias{tidy_levels_labels}
\title{Term and Level Descriptions for \code{pixiedust} Tables}
\usage{
tidy_levels_labels(object, descriptors = "term", numeric_level = c("term",
"term_plain", "label"), argcheck = NULL)
}
\arguments{
\item{object}{A model object, ideally with a \code{model.frame} method.
It is unclear at the moment (18 Sept. 2015) what will happen if
an object is passed that does not have a \code{model.frame} method.}
\item{descriptors}{A character vector indicating the descriptors to
be used in the table. Acceptable inputs are \code{"term"},
\code{"term_plain"}, \code{"label"}, \code{"level"}, and
\code{"level_detail"}. These may be used in any combination and
any order, with the descriptors appearing in the table from left
to right in the order given. The default, \code{"term"}, returns
only the term descriptor and is identical to the output provided
by \code{broom::tidy} methods. See Details for a full explanation
of each option and the Examples for sample output.}
\item{numeric_level}{A character string that determines which descriptor
is used for numeric variables in the \code{"level_detail"} descriptor
when a numeric has an interaction with a factor. Acceptable inputs
are \code{"term"}, \code{"term_plain"}, and \code{"label"}.}
\item{argcheck}{An assert collection created by \code{checkmate::makeAssertCollection}.
Under normal circumstances, this is passed from \code{dust}. If \code{NULL},
as in the case it is run outside of \code{dust}, a new collection is
created and the assertions are reported within \code{tidy_levels_labels}.}
}
\description{
Default model objects identify rows of results with
appropriate term name. More often than not, the term name is
not suitable for formally reported output. \code{tidy_levels_labels}
performs some basic work to quickly provide more readable
descriptors for cases where they can easily be obtained. These
descriptors are retrieved from the data, however, so the
utility is determined by the user's habits in providing
term labels and meaningful factor levels.
Due to the complexity of the terms that could be used for a model,
it isn't practical to attempt to recover human-ready descriptors
for every conceivable term. This would require recovering variable
names for any number of functions. \code{pixiedust} only
goes after the easiest to obtain. Replacements no managed by
\code{tidy_levels_labels} may still be made with the \code{replace}
sprinkle.
}
\details{
The user may select up to five columns of descriptors,
although doing so would certainly create some ambiguity. See
the Examples for sample output.
\itemize{
\item{\code{"term"} }{The term name used in the R model summary}
\item{\code{"term_plain"} }{The term name used in the formula.
For variables that produce multiple term names (such as factors),
the plain term name may be duplicated. For example, a factor that
has term names \code{FctrB} and \code{FctrC}, indicating rows for
levels \code{B} and \code{C} of the variable \code{Fctr}, will
have two rows of \code{"term_plain"} of just \code{Fctr}.}
\item{\code{"label"} }{Provides the label attached to the data using
\code{labelVector::get_label}. When a term is not associated with a label,
the value of \code{term_plain} is returned instead. Note that, variable names
will disassociate with a label if they are used in a function (such
as \code{factor(x)} or \code{x^2}.}
\item{\code{"level"} }{Indicates the level being compared within a factor
(or an interaction involving a factor), otherwise it returns \code{NA}.
It may also be said that this value is the appendix to a factor name.
For the term \code{FctrB}, this would just be \code{B}.}
\item{\code{"level_detail"} }{Gives additional information to \code{level}
by including the reference level of the factor. For the term \code{FctrB},
this would return \code{"B vs A"}. When an interaction with a numeric
variable is present, the \code{level} for the numeric may be either
\code{term_plain} or \code{label}, the choice being controlled by the
\code{level_detail} argument.}
}
}
\section{Restrictions}{
The descriptors, other than \code{"term"}, generally don't make sense for data
frame objects. The use of \code{tidy_levels_labels} is not permitted within
the \code{dust} function, but is allowed if you really want it by
\code{pixiedust:::tidy_levels_labels}.
Other special cases noted in future uses will be documented here, but in
general, if it isn't a model object, you probably don't really want to
use this.
}
\examples{
#* Descriptors for lm output with no interactions
mtcars2 <- mtcars
mtcars2$mpg <- labelVector::set_label(mtcars2$mpg, "Gas Mileage")
mtcars2$qsec <- labelVector::set_label(mtcars2$qsec, "Quarter Mile Time")
mtcars2$am <- labelVector::set_label(mtcars2$am, "Transmission")
mtcars2$wt <- labelVector::set_label(mtcars2$wt, "Weight")
mtcars2$gear <- labelVector::set_label(mtcars2$gear, "Gears")
#* Basic Output for a model with no interactions
#* Note: numeric_level has no impact as there are no
#* interactions involving numeric variables.
fit <- lm(mpg ~ qsec + factor(am) + wt + factor(gear), data = mtcars2)
pixiedust:::tidy_levels_labels(fit,
descriptors = c("term", "term_plain", "label", "level", "level_detail"),
numeric_level = "term")
#* Assign factors ahead of the model. This allows
#* the user to determine the levels that display.
#* Compare the output for 'am' with the output for 'gear'
mtcars2$am <- factor(mtcars2$am, 0:1, c("Automatic", "Manual"))
mtcars2$am <- labelVector::set_label(mtcars2$am, "Transmission")
# Label was lost in variable conversion
fit <- lm(mpg ~ qsec + am + wt + factor(gear), data = mtcars2)
pixiedust:::tidy_levels_labels(fit,
descriptors = c("term", "term_plain", "label", "level", "level_detail"),
numeric_level = "term")
#* Include an interaction between a factor and numeric.
fit <- lm(mpg ~ qsec + am * wt + factor(gear), data = mtcars2)
pixiedust:::tidy_levels_labels(fit,
descriptors = c("term", "term_plain", "label", "level", "level_detail"),
numeric_level = "term")
#* Now observe how 'level' and 'level_detail' change
#* in the interaction terms as we choose different
#* values for 'numeric_level'
pixiedust:::tidy_levels_labels(fit,
descriptors = c("term", "term_plain", "label", "level", "level_detail"),
numeric_level = "term_plain")
pixiedust:::tidy_levels_labels(fit,
descriptors = c("term", "term_plain", "label", "level", "level_detail"),
numeric_level = "label")
}
\author{
Benjamin Nutter
}
|
e843094c14f1e7e4656416da8a7d41e59eceef7d
|
af901bc01d668ecd411549625208b07024df3ffd
|
/R/standalone-downstream-deps.R
|
44e7848a6c90d45d7b985f97b845ba4d19813e00
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
r-lib/rlang
|
2784186a4dafb2fde7357c79514b3761803d0e66
|
c55f6027928d3104ed449e591e8a225fcaf55e13
|
refs/heads/main
| 2023-09-06T03:23:47.522921
| 2023-06-07T17:01:51
| 2023-06-07T17:01:51
| 73,098,312
| 355
| 128
|
NOASSERTION
| 2023-08-31T13:11:13
| 2016-11-07T16:28:57
|
R
|
UTF-8
|
R
| false
| false
| 9,358
|
r
|
standalone-downstream-deps.R
|
# ---
# repo: r-lib/rlang
# file: standalone-downstream-deps.R
# last-updated: 2022-01-19
# license: https://unlicense.org
# ---
#
# No dependencies but uses rlang and pak if available. In interactive
# sessions the user is prompted to update outdated packages. If they
# choose no, they are informed about the global option
# `rlib_downstream_check` to turn off these prompts. In non
# interactive sessions a warning is issued. This happens when the
# outdated dep is being loaded.
#
# ## Changelog
#
# 2022-01-19:
#
# * Prompt results are no longer cached in the `org:r-lib` search path
# environment in non-interactive sessions. This is to avoid side
# effects causing R CMD check failures.
#
#
# 2021-06-08:
#
# * User response is cached in the global env to avoid asking again
# when session is reloaded.
#
#
# 2021-05-20:
#
# * Fixed issue when downstream package is not installed.
#
#
# 2021-05-17:
#
# * Added an `info` argument intended to inform users about the
# consequences of not updating right away.
#
#
# 2021-05-12:
#
# * All packages are now updated at once. The user is not prompted
# again after accepting or declining to update the packages, even
# when one of the packages is loaded later on.
#
#
# 2021-05-07:
#
# * In interactive sessions, user is now prompted to update outdated
# packages.
#
# * Added global option `rlib_downstream_check` to turn off prompts or
# warnings.
#
# * Renamed to `check_downstream()`.
#
# * The requirement format is now "pkg (>= 0.0.0)", consistently with
# DESCRIPTION fields.
#
# nocov start
check_downstream <- function(ver,
...,
info = NULL) {
env <- topenv(parent.frame())
if (!isNamespace(env)) {
stop("`check_downstream()` must be called from a namespace.", call. = FALSE)
}
pkg <- unname(getNamespaceName(env))
deps <- c(...)
if (!is.character(deps)) {
stop("`...` must be strings.", call. = FALSE)
}
deps_key <- paste0(deps, collapse = " ")
deps <- .rlang_downstream_parse_deps(deps)
on_package_load <- function(pkg, expr) {
if (isNamespaceLoaded(pkg)) {
expr
} else {
thunk <- function(...) expr
setHook(packageEvent(pkg, "onLoad"), thunk)
}
}
is_interactive <- .rlang_downstream_compat("is_interactive")
if (is_interactive()) {
cache <- .rlang_downstream_get_cache()
cache[[pkg]][[deps_key]] <- FALSE
}
checked <- FALSE
for (dep in deps) {
on_package_load(
dep[["pkg"]],
.rlang_downstream_check(
pkg,
ver,
deps,
info = info,
deps_key = deps_key
)
)
}
}
.rlang_downstream_parse_deps <- function(deps) {
str_trim <- function(x) {
sub("^\\s+", "", sub("\\s+$", "", x))
}
deps <- lapply(strsplit(deps, "\\("), str_trim)
deps <- lapply(deps, sub, pattern = "\\)$", replacement = "")
deps <- lapply(deps, .rlang_downstream_parse_min_requirement)
deps
}
.rlang_downstream_parse_min_requirement <- function(dep) {
if (length(dep) != 2) {
stop("Parsing error during downstream check.", call. = FALSE)
}
is_string <- function(x) {
is.character(x) && length(x) == 1 && !is.na(x)
}
parts <- strsplit(dep[[2]], " +")[[1]]
if (length(parts) != 2) {
stop("Parsing error during downstream check.", call. = FALSE)
}
op <- parts[[1]]
ver <- parts[[2]]
stopifnot(is_string(op), is_string(ver))
if (op != ">=") {
stop("Can only check `>=` requirements.", call. = FALSE)
}
c(pkg = dep[[1]], min = ver)
}
.rlang_downstream_check <- function(pkg,
pkg_ver,
deps,
info,
deps_key = as.character(stats::runif(1)),
env = parent.frame()) {
isFALSE <- function(x) {
is.logical(x) && length(x) == 1L && !is.na(x) && !x
}
if (isFALSE(getOption("rlib_downstream_check"))) {
return(NULL)
}
# Check cache in the global environment. This cache gets saved along
# with the session. This avoids getting repeated checks when session
# is reloaded, e.g. when revisiting RStudio servers.
is_interactive <- .rlang_downstream_compat("is_interactive")
if (is_interactive()) {
cache <- .rlang_downstream_get_cache()
if (isTRUE(cache[[pkg]][[deps_key]])) {
return(NULL)
}
}
# Still check closure env in case the cache in the global
# environment has been deleted
if (isTRUE(env$checked)) {
return(NULL)
}
# Don't ask again. Flip now instead of on exit to defensively
# prevent recursion.
if (is_interactive()) {
cache[[pkg]][deps_key] <- list(TRUE)
}
env$checked <- TRUE
pkgs <- vapply(deps, `[[`, "", "pkg")
mins <- vapply(deps, `[[`, "", "min")
# Don't use `requireNamespace()` to avoid loading packages
is_on_disk <- function(pkg) nzchar(system.file(package = pkg))
on_disk <- vapply(pkgs, is_on_disk, NA)
pkgs <- pkgs[on_disk]
mins <- mins[on_disk]
vers <- lapply(pkgs, utils::packageVersion)
ok <- as.logical(Map(`>=`, vers, mins))
if (all(ok)) {
return(TRUE)
}
pkgs <- pkgs[!ok]
mins <- mins[!ok]
pkgs_quoted <- paste0("`", pkgs, "` (>= ", mins, ")")
pkgs_enum <- .rlang_downstream_collapse(pkgs_quoted, final = "and")
n <- length(pkgs)
if (n == 1) {
header <- paste0("The package ", pkgs_enum, " is required")
} else {
header <- paste0("The packages ", pkgs_enum, " are required")
}
header <- sprintf("%s as of %s %s.", header, pkg, pkg_ver)
warn <- .rlang_downstream_compat("warn")
inform <- .rlang_downstream_compat("inform")
is_interactive <- .rlang_downstream_compat("is_interactive")
if (!is_interactive() || !is.null(getOption("rlang:::no_downstream_prompt"))) {
warn(header)
return(FALSE)
}
if (n == 1) {
question <- "Would you like to update it now?"
} else {
question <- "Would you like to update them now?"
}
# Use "i" bullets by default
if (!is.null(info) && is.null(names(info))) {
names(info) <- rep("i", length(info))
}
prompt <- c(
"!" = question,
" " = "You will likely need to restart R if you update now.",
info
)
inform(c(header, prompt))
if (utils::menu(c("Yes", "No")) != 1) {
inform("Set `options(rlib_downstream_check = FALSE)` to disable this prompt.")
return(FALSE)
}
if (is_installed("pak")) {
pkg_install <- get(envir = asNamespace("pak"), "pkg_install")
pkg_install(pkgs, ask = FALSE)
} else {
utils::install.packages(pkgs)
}
TRUE
}
# Keep in sync with standalone-linked-version.R
.rlang_downstream_howto_reinstall_msg <- function(pkg) {
os <- tolower(Sys.info()[["sysname"]])
if (os == "windows") {
url <- "https://github.com/jennybc/what-they-forgot/issues/62"
c(
i = sprintf("Please update %s to the latest version.", pkg),
i = sprintf("Updating packages on Windows requires precautions:\n <%s>", url)
)
} else {
c(
i = sprintf("Please update %s with `install.packages(\"%s\")` and restart R.", pkg, pkg)
)
}
}
.rlang_downstream_collapse <- function(x, sep = ", ", final = "or") {
n <- length(x)
if (n < 2) {
return(x)
}
n <- length(x)
head <- x[seq_len(n - 1)]
last <- x[length(x)]
head <- paste(head, collapse = sep)
# Write a or b. But a, b, or c.
if (n > 2) {
paste0(head, sep, final, " ", last)
} else {
paste0(head, " ", final, " ", last)
}
}
.rlang_downstream_compat <- function(fn, try_rlang = TRUE) {
# Compats that behave the same independently of rlang's presence
out <- switch(
fn,
is_installed = return(function(pkg) requireNamespace(pkg, quietly = TRUE))
)
# Only use rlang if it is fully loaded (#1482)
if (try_rlang &&
requireNamespace("rlang", quietly = TRUE) &&
environmentIsLocked(asNamespace("rlang"))) {
switch(
fn,
is_interactive = return(rlang::is_interactive)
)
# Make sure rlang knows about "x" and "i" bullets
if (utils::packageVersion("rlang") >= "0.4.2") {
switch(
fn,
abort = return(rlang::abort),
warn = return((rlang::warn)),
inform = return(rlang::inform)
)
}
}
# Fall back to base compats
is_interactive_compat <- function() {
opt <- getOption("rlang_interactive")
if (!is.null(opt)) {
opt
} else {
interactive()
}
}
format_msg <- function(x) paste(x, collapse = "\n")
switch(
fn,
is_interactive = return(is_interactive_compat),
abort = return(function(msg) stop(format_msg(msg), call. = FALSE)),
warn = return(function(msg) warning(format_msg(msg), call. = FALSE)),
inform = return(function(msg) message(format_msg(msg)))
)
stop(sprintf("Internal error in rlang shims: Unknown function `%s()`.", fn))
}
.rlang_downstream_get_cache <- function() {
if (!"org:r-lib" %in% search()) {
do.call(
attach,
list(
list(),
pos = length(search()),
name = "org:r-lib"
)
)
}
cache_env <- as.environment("org:r-lib")
check_cache_name <- "rlang_downstream_check"
cache <- cache_env[[check_cache_name]]
if (is.null(cache)) {
cache <- new.env(parent = emptyenv())
cache_env[[check_cache_name]] <- cache
}
cache
}
#nocov end
|
adac7ade802a25aeadba64c29a749ef2482ae12e
|
7ad3ffcfb001733227962a2aeacc00657d30350f
|
/inst/resources/scripts/book/data_management.r
|
c5dc454eecc5beec63caafd729b289582df69e6c
|
[] |
no_license
|
cran/FAwR
|
b70f10a5ada58a3da4a56464d86534eb1a59fbb0
|
9917873167c1a0109136e772024009c7e81131ab
|
refs/heads/master
| 2021-06-02T13:37:48.157146
| 2020-11-09T04:20:02
| 2020-11-09T04:20:02
| 17,679,114
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,208
|
r
|
data_management.r
|
### R code from vignette source 'data_management.rnw'
###################################################
### code chunk number 1: Setup
###################################################
options(repos="http://cran.r-project.org")
if(!require(Hmisc, quietly=TRUE)) install.packages("Hmisc")
if(!require(lattice, quietly=TRUE)) install.packages("lattice")
if(!require(maptools, quietly=TRUE)) install.packages("maptools")
#if(!require(RODBC, quietly=TRUE)) install.packages("RODBC")
if(!require(colorspace, quietly=TRUE)) install.packages("colorspace")
if(!require(quantreg, quietly=TRUE)) install.packages("quantreg")
lattice.options(default.theme = canonical.theme(color = FALSE))
options(repos="http://www.bioconductor.org/")
if(!require(hexbin, quietly=TRUE)) install.packages("hexbin")
options(width = 65)
rm(list=ls())
source("../../scripts/functions.R")
###################################################
### code chunk number 2: data_management.rnw:122-124
###################################################
fia.plots <- read.table("../../data/fia_plots.csv", sep = ",",
header = TRUE, row.names = 1)
###################################################
### code chunk number 3: data_management.rnw:142-143
###################################################
fia.plots <- read.csv("../../data/fia_plots.csv")
###################################################
### code chunk number 4: data_management.rnw:149-150
###################################################
class(fia.plots)
###################################################
### code chunk number 5: data_management.rnw:162-163 (eval = FALSE)
###################################################
## write.table(fia.plots, file = "fia-plots.csv")
###################################################
### code chunk number 6: data_management.rnw:203-210
###################################################
fvs.trees <-
read.fwf("../../data/stnd73.fvs",
widths = c(4, 3, 6, 1, 3, 4, -3, 3, -7, 1),
as.is = FALSE, row.names = NULL,
col.names = c("plot", "tree", "tree.count",
"history", "species", "dbh", "live.tht",
"crown.code"))
###################################################
### code chunk number 7: data_management.rnw:219-220
###################################################
library(lattice)
###################################################
### code chunk number 8: fig-fvs-xyplot
###################################################
xyplot(live.tht ~ dbh | species, data = fvs.trees)
###################################################
### code chunk number 9: data_management.rnw:229-232
###################################################
print(
xyplot(live.tht ~ dbh | species, data = fvs.trees)
)
###################################################
### code chunk number 10: data_management.rnw:269-271
###################################################
eg <- scan(file = "../../data/scan-example.txt",
sep = "\n", what = "")
###################################################
### code chunk number 11: data_management.rnw:282-285
###################################################
n.in <- length(eg)
eg.trees <- eg.plots <- vector(mode = "list", length = n.in)
plot.n <- tree.n <- 1
###################################################
### code chunk number 12: data_management.rnw:298-319
###################################################
for (i in 1 : n.in) {
chunk <- eg[[i]]
if (substr(chunk, 1, 4) == "Plot") {
plot.id <- as.numeric(substr(chunk, 6, 9))
crew.id <- substr(chunk, 16, 16)
comments <- ifelse(nchar(chunk) > 17,
substr(chunk, 17, nchar(chunk)),
"")
eg.plots[[plot.n]] <-
list(plot.id, crew.id, comments)
plot.n <- plot.n + 1
} else {
tree <- strsplit(chunk, " +")[[1]]
tree.id <- as.character(tree[1])
species <- as.character(tree[2])
dbh.cm <- as.numeric(tree[3])
eg.trees[[tree.n]] <-
list(plot.id, tree.id, species, dbh.cm)
tree.n <- tree.n + 1
}
}
###################################################
### code chunk number 13: data_management.rnw:328-331
###################################################
eg.plots <- as.data.frame(do.call(rbind, eg.plots))
names(eg.plots) <- c("plot", "crew", "comments")
eg.plots
###################################################
### code chunk number 14: data_management.rnw:334-337
###################################################
eg.trees <- as.data.frame(do.call(rbind, eg.trees))
names(eg.trees) <- c("plot", "tree", "species", "dbh.cm")
eg.trees
###################################################
### code chunk number 15: data_management.rnw:426-437 (eval = FALSE)
###################################################
## library(RPostgreSQL)
## drv <- dbDriver("PostgreSQL")
## con <- dbConnect(drv,
## dbname="forestco",
## user="hamannj",
## host="localhost")
## sql.command <-
## sprintf( "select * from plots where plottype = 'fixed';" )
## rs <- dbSendQuery(con, statement = sql.command )
## fixed.plots <- fetch(rs, n = -1)
## dbDisconnect(con)
###################################################
### code chunk number 16: stands
###################################################
library(foreign)
stands <- read.dbf("../../data/stands.dbf")
###################################################
### code chunk number 17: data_management.rnw:582-583
###################################################
names(stands)
###################################################
### code chunk number 18: fig-stands-hexbin
###################################################
stands.non.zero <- stands[stands$QMD > 0,]
plot(hexbin(stands.non.zero$QMD*2.54 ~
stands.non.zero$TPA*2.47),
ylab = "Average Diameter (cm)",
xlab = "Stem Density (stems/ha)")
###################################################
### code chunk number 19: data_management.rnw:604-605
###################################################
stands.non.zero <- stands[stands$QMD > 0,]
plot(hexbin(stands.non.zero$QMD*2.54 ~
stands.non.zero$TPA*2.47),
ylab = "Average Diameter (cm)",
xlab = "Stem Density (stems/ha)")
###################################################
### code chunk number 20: data_management.rnw:666-671
###################################################
library(maptools)
stands <- readShapePoly("../../data/stands.shp")
###################################################
### code chunk number 21: data_management.rnw:675-679
###################################################
sum( stands$AREA ) / 43560.0
###################################################
### code chunk number 22: data_management.rnw:683-684
###################################################
nrow(stands)
###################################################
### code chunk number 23: data_management.rnw:689-692
###################################################
names(stands)
###################################################
### code chunk number 24: fig-plot-stands
###################################################
plot(stands, axes = TRUE)
###################################################
### code chunk number 25: data_management.rnw:706-707
###################################################
plot(stands, axes = TRUE)
###################################################
### code chunk number 26: data_management.rnw:870-872
###################################################
herbdata <- read.table("../../data/herbdata.txt",
header = TRUE, sep = ",")
###################################################
### code chunk number 27: data_management.rnw:879-880 (eval = FALSE)
###################################################
## str(herbdata)
###################################################
### code chunk number 28: data_management.rnw:882-883
###################################################
str(herbdata, vec.len = 1)
###################################################
### code chunk number 29: data_management.rnw:895-897
###################################################
herbdata$date <- as.POSIXct(strptime(herbdata$date,
"%m/%d/%Y"))
###################################################
### code chunk number 30: fig-herb-coplot
###################################################
coplot(height ~ dia | treat * rep, type = "p",
data = herbdata[herbdata$isalive == 1,],
ylab = "Height (cm)", xlab = "Basal Diameter (mm)")
###################################################
### code chunk number 31: data_management.rnw:911-912
###################################################
coplot(height ~ dia | treat * rep, type = "p",
data = herbdata[herbdata$isalive == 1,],
ylab = "Height (cm)", xlab = "Basal Diameter (mm)")
###################################################
### code chunk number 32: data_management.rnw:974-975
###################################################
head(herbdata[is.na(herbdata$height),])
###################################################
### code chunk number 33: data_management.rnw:983-984
###################################################
table(complete.cases(herbdata), herbdata$isalive)
###################################################
### code chunk number 34: fig-herb-date-coplot
###################################################
coplot(height ~ dia | treat * factor(date),
data = herbdata[herbdata$isalive == 1,],
type = "p",
ylab = "Height (cm)",
xlab = "Basal Diameter (mm)")
###################################################
### code chunk number 35: data_management.rnw:1020-1021
###################################################
coplot(height ~ dia | treat * factor(date),
data = herbdata[herbdata$isalive == 1,],
type = "p",
ylab = "Height (cm)",
xlab = "Basal Diameter (mm)")
###################################################
### code chunk number 36: data_management.rnw:1035-1037
###################################################
levels(herbdata$treat)
levels(herbdata$rep)
###################################################
### code chunk number 37: data_management.rnw:1041-1042
###################################################
sort(unique(herbdata$date))
###################################################
### code chunk number 38: data_management.rnw:1048-1052
###################################################
bad.index <- herbdata$treat == levels(herbdata$treat)[1] &
herbdata$rep == levels(herbdata$rep)[2] &
herbdata$date == sort(unique(herbdata$date))[7]
bad.data <- herbdata[bad.index,]
###################################################
### code chunk number 39: data_management.rnw:1055-1056
###################################################
print(head(bad.data))
###################################################
### code chunk number 40: data_management.rnw:1062-1064
###################################################
##bad.data[(nrow(bad.data)-3):nrow(bad.data),]
print(tail(bad.data))
###################################################
### code chunk number 41: data_management.rnw:1075-1076
###################################################
herbdata$dia[bad.index] <- herbdata$dia[bad.index] / 2.54
###################################################
### code chunk number 42: data_management.rnw:1082-1083
###################################################
herbdata$dbh[bad.index] <- herbdata$dbh[bad.index] / 2.54
###################################################
### code chunk number 43: data_management.rnw:1173-1178
###################################################
split.herb <- split(herbdata, herbdata$treat)
class(split.herb)
names(split.herb)
nrow(split.herb$CONTROL)
nrow(split.herb$OUST)
###################################################
### code chunk number 44: data_management.rnw:1183-1184
###################################################
names(split.herb$CONTROL)
###################################################
### code chunk number 45: data_management.rnw:1192-1193
###################################################
lapply(split.herb, nrow)
###################################################
### code chunk number 46: data_management.rnw:1244-1245
###################################################
sort(unique(herbdata$date))
###################################################
### code chunk number 47: data_management.rnw:1249-1253
###################################################
herbdata.shorter <-
herbdata[herbdata$date == max(herbdata$date), c(1,2,6,8)]
split.herb.shorter <-
split(herbdata.shorter, herbdata.shorter$treat)
###################################################
### code chunk number 48: make-st
###################################################
rt <- cbind(herbdata.shorter,
dc = cut(herbdata.shorter$dbh,
breaks = c(0, 50, 100, 150, 200, 300, 400, 999),
labels = c("000--050", "050--100", "100--150",
"150--200", "200--300", "300--400","400+")))
st <- aggregate(x = list(basal.area = pi/(4*10^2) * rt$dbh^2,
tht = rt$height,
stems = rep(1, nrow(rt))),
by = list(treat = rt$treat,
diac = rt$dc),
FUN = sum)
st
###################################################
### code chunk number 49: data_management.rnw:1283-1285
###################################################
st$tht <- st$tht / st$stems / 100
st
###################################################
### code chunk number 50: oust
###################################################
cap <- "OUST herbicide trials."
st <- st[order(st$treat, st$diac),]
st$treat <- as.character(st$treat)
st$diac <- as.character(st$diac)
names(st) <- c("Treatment", "Dia. Class (mm)",
"Basal Area ($\\mbox{mm}^2$)",
"Mean Total Height (m)", "Stems")
###################################################
### code chunk number 51: data_management.rnw:1354-1355
###################################################
names(split.herb)
###################################################
### code chunk number 52: data_management.rnw:1366-1375
###################################################
areas <-
with(herbdata,
aggregate(x = list(plot.bh.area = pi/400 * dbh^2,
plot.bas.area = pi/400 * dia^2),
by = list(treat = treat,
rep = rep,
date = date),
FUN = sum))
###################################################
### code chunk number 53: data_management.rnw:1380-1381
###################################################
areas[1:10,]
###################################################
### code chunk number 54: data_management.rnw:1396-1406
###################################################
areas <-
with(herbdata,
aggregate(x = list(plot.bh.area = pi/400 * dbh^2,
plot.bas.area = pi/400 * dia^2),
by = list(treat = treat,
rep = rep,
date = date),
FUN = sum,
na.rm = TRUE))
areas[1:10,]
###################################################
### code chunk number 55: data_management.rnw:1413-1416
###################################################
final.data <- merge(herbdata, areas)
names(final.data)
head(final.data[,c(1,2,3,4,7,10)])
###################################################
### code chunk number 56: data_management.rnw:1434-1449
###################################################
show.cols.with.na <- function(x) {
## First, check that object is a data frame
if (class(x) != "data.frame")
stop("x must be a data frame.\n")
## Count the missing values by column.
missing.by.column <- colSums(is.na(x))
## Are any missing?
if (sum(missing.by.column) == 0) {
cat("No missing values.\n")
} else {
## Only return columns with missing values.
missing <- which(missing.by.column > 0)
return(missing.by.column[missing])
}
}
###################################################
### code chunk number 57: MDD
###################################################
stands <- readShapePoly("../../data/stands.shp",
verbose = FALSE)
perim <- readShapePoly("../../data/boundary.shp",
verbose = FALSE)
###################################################
### code chunk number 58: data_management.rnw:1551-1554
###################################################
stands <- readShapePoly("../../data/stands.shp",
verbose=FALSE)
names(stands)
###################################################
### code chunk number 59: data_management.rnw:1565-1617 (eval = FALSE)
###################################################
##
## ##stands$id <- 1:nrow(stands)
##
## ## this can also be done away with...
##
## blks <- as.numeric(1:length(stands$TAGE))[stands$TAGE == 0]
## stands$TAGE[blks] <- NA
##
## blks <- as.numeric(1:length(stands$BHAGE))[stands$BHAGE == 0]
## stands$BHAGE[blks] <- NA
##
## blks <- as.numeric(1:length(stands$DF_SITE))[stands$DF_SITE == 0]
## stands$DF_SITE[blks] <- NA
##
## blks <- as.numeric(1:length(stands$TPA))[stands$TPA == 0]
## stands$TPA[blks] <- NA
##
## blks <- as.numeric(1:length(stands$QMD))[stands$QMD == 0]
## stands$QMD[blks] <- NA
##
## blks <- as.numeric(1:length(stands$BA))[stands$BA == 0]
## stands$BA[blks] <- NA
##
## blks <- as.numeric(1:length(stands$TOTHT))[stands$TOTHT == 0]
## stands$TOTHT[blks] <- NA
##
## blks <- as.numeric(1:length(stands$CUBVOL_AC))[stands$CUBVOL_AC == 0]
## stands$CUBVOL_AC[blks] <- NA
##
##
## ## blk.rows <- as.numeric(rownames(stands$att.data[stands$att.data$BHAGE == 0,]))
## ## stands$att.data[blk.rows,]$BHAGE <- NA
##
## ## blk.rows <- as.numeric(rownames(stands$att.data[stands$att.data$DF_SITE == 0,]))
## ## stands$att.data[blk.rows,]$DF_SITE <- NA
##
## ## blk.rows <- as.numeric(rownames(stands$att.data[stands$att.data$TPA == 0,]))
## ## stands$att.data[blk.rows,]$TPA <- NA
##
## ## blk.rows <- as.numeric(rownames(stands$att.data[stands$att.data$QMD == 0,]))
## ## stands$att.data[blk.rows,]$QMD <- NA
##
## ## blk.rows <- as.numeric(rownames(stands$att.data[stands$att.data$BA == 0,]))
## ## stands$att.data[blk.rows,]$BA <- NA
##
## ## blk.rows <- as.numeric(rownames(stands$att.data[stands$att.data$TOTHT == 0,]))
## ## stands$att.data[blk.rows,]$TOTHT <- NA
##
## ## blk.rows <- as.numeric(rownames(stands$att.data[stands$att.data$CUBVOL_AC == 0,]))
## ## stands$att.data[blk.rows,]$CUBVOL_AC <- NA
##
##
###################################################
### code chunk number 60: data_management.rnw:1623-1626
###################################################
names(stands)
###################################################
### code chunk number 61: data_management.rnw:1655-1658
###################################################
plots <- readShapePoints("../../data/plots.shp")
###################################################
### code chunk number 62: data_management.rnw:1667-1670 (eval = FALSE)
###################################################
##
## plot(plots, add=TRUE, pch=46)
##
###################################################
### code chunk number 63: fig-stands-and-plots
###################################################
lev <- as.numeric(stands$ALLOCATION)
fgs <- gray(length(levels(stands$ALLOCATION)):1 / 3)
plot(stands,
col=fgs[lev],
add=FALSE,
axes=TRUE)
title(paste("McDonald-Dunn Research Forest",
"Stand Boundaries and Plot Locations",
sep = "\n"))
legend(1280000, 365000,
levels(stands$ALLOCATION)[3:1],
fill = fgs[3:1],
cex = 0.7,
title = "Land Allocations")
plot(plots, add=TRUE, pch=46)
###################################################
### code chunk number 64: fig-stands-and-plots
###################################################
lev <- as.numeric(stands$ALLOCATION)
fgs <- gray(length(levels(stands$ALLOCATION)):1 / 3)
plot(stands,
col=fgs[lev],
add=FALSE,
axes=TRUE)
title(paste("McDonald-Dunn Research Forest",
"Stand Boundaries and Plot Locations",
sep = "\n"))
legend(1280000, 365000,
levels(stands$ALLOCATION)[3:1],
fill = fgs[3:1],
cex = 0.7,
title = "Land Allocations")
plot(plots, add=TRUE, pch=46)
###################################################
### code chunk number 65: read.dbf
###################################################
mdtrees <- read.dbf("../../data/mdtrees.dbf")
head(mdtrees)
###################################################
### code chunk number 66: data_management.rnw:1757-1760
###################################################
mdtrees$EXPF <- NA
###################################################
### code chunk number 67: data_management.rnw:1767-1774
###################################################
mdtrees$EXPF[mdtrees$SUBPLOT == 1] <-
20.0 / (0.0054541539 *
mdtrees$DBH[mdtrees$SUBPLOT == 1] ^2)
mdtrees$EXPF[mdtrees$SUBPLOT == 2] <- 43560 / (pi * 7.78^2)
mdtrees$EXPF[mdtrees$SUBPLOT == 3] <- 43560 / (pi * 15.56^2)
###################################################
### code chunk number 68: data_management.rnw:1780-1783
###################################################
head(mdtrees[, 3:11])
###################################################
### code chunk number 69: data_management.rnw:1800-1803
###################################################
trees.by.plot <- split(mdtrees, mdtrees$PLOT)
###################################################
### code chunk number 70: data_management.rnw:1829-1859
###################################################
get.plot.sums <- function(trs) {
# /******************************************************/
# /* Bruce, D. 1981. Consistent height-growth and */
# /* growth-rate estimates for remeasured plots. */
# /* Forest Science 27:711-725. */
# /******************************************************/
site.index.bruce.1981 <- function(tht, bha) {
tht * exp(-21.663 * (3.744e-2 - (bha + 8.0)^ -0.809))
}
not.missing.dbh <- !is.na(trs$DBH)
bh.idx <- not.missing.dbh & trs$THT > 4.5
expf.tot <- sum(trs$EXPF)
expf.bh <- sum(trs$EXPF[not.missing.dbh])
ba <- sum(0.0054541539 * trs$DBH[not.missing.dbh] ^ 2 *
trs$EXPF[not.missing.dbh])
qmd <- sqrt(ba / expf.bh / 0.0054541539)
s.trs <- trs[trs$SITETREE == 1 & trs$SPCODE == "DF" &
!is.na(trs$THT),]
nst <- nrow(s.trs)
site.bar <-
ifelse(nst > 0,
weighted.mean(site.index.bruce.1981(s.trs$THT,
s.trs$AGE),
s.trs$EXPF),
NA)
return(c(nrow(trs), expf.bh, expf.tot,
ba, qmd, nst, site.bar))
}
###################################################
### code chunk number 71: data_management.rnw:1866-1870
###################################################
plot.sums <-
data.frame(t(sapply(trees.by.plot, get.plot.sums)))
###################################################
### code chunk number 72: data_management.rnw:1882-1888
###################################################
plot.sums$id <- as.numeric(names(trees.by.plot))
names(plot.sums) <- c("trees","expf.bh","expf.tot",
"ba","qmd","nst","site","id")
print(head(plot.sums), digits=3)
###################################################
### code chunk number 73: data_management.rnw:1896-1904
###################################################
plot.id <- as.numeric(as.character(plots$UNIPLOT))
plot.centers <- data.frame(cbind(coordinates(plots), plot.id))
names(plot.centers) <- c("x","y","id")
final.plots <- merge(plot.centers, plot.sums, all = TRUE)
print(head(final.plots[,c(1:3,5:10)]), digits = 3)
###################################################
### code chunk number 74: data_management.rnw:1912-1915
###################################################
write.csv( final.plots, "../../data/final-plots.csv")
###################################################
### code chunk number 75: data_management.rnw:1943-1947
###################################################
leusch.ylds <- read.table("../../data/leuschner.txt",
header = TRUE)
###################################################
### code chunk number 76: data_management.rnw:2383-2402
###################################################
Stangle("fia.rnw")
source("fia.R")
Stangle("gutten.rnw")
source("gutten.R")
Stangle("pref.rnw")
source("pref.R")
Stangle("stage.rnw")
source("stage.R")
Stangle("sweetgum.rnw")
source("sweetgum.R")
Stangle("ufc.rnw")
source("ufc.R")
system("rm -fr package-Ch2")
package.skeleton(name = "package-Ch2")
|
32ee2bc161976ae3c0d2d2ebfcd78557ce3495d6
|
36c253c5be0a91b937b1fa09908cdf33a94f8e87
|
/Crime_Rate.R
|
ea9ab0dc1fffe8281bf8b17748b817172da987c4
|
[] |
no_license
|
aliasgerovs/Crime-Rate
|
407b5e0563257a020e423333e358b0c21e6a7484
|
d61a5c8fc7ace53d5f43be4ec306fcbe549a37b1
|
refs/heads/main
| 2023-04-26T15:46:59.616797
| 2021-05-16T18:19:21
| 2021-05-16T18:19:21
| 367,954,906
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,011
|
r
|
Crime_Rate.R
|
#Case Study Week 7
# Imporing libraries & dataset
library(tidyverse)
library(data.table)
library(rstudioapi)
library(recipes)
library(caret)
library(skimr)
library(purrr)
library(inspectdf)
library(mice)
library(graphics)
library(Hmisc)
library(glue)
library(highcharter)
library(plotly)
library(h2o)
library(car)
my_dat <- fread("crimes.csv")
my_dat %>% skim()
nrows <- nrow(my_dat)
ncomplete = sum (complete.cases(my_dat))
ncomplete/nrows #answer is 1 it means we dont have any na value in our data
#1. Find multicollinearity by applying VIF;
plot(my_dat)
my_cor <- cor(my_dat)
vif(model)
mean(vif(model))
#2. Standardize features;
my_dat %>% glimpse()
my_dat$PctEmplProfServ <- my_dat$PctEmplProfServ %>% scale(center = TRUE, scale = TRUE)
my_dat$PctOccupManu <- my_dat$PctOccupManu %>% scale(center = TRUE, scale = TRUE)
my_dat$PctOccupManu <- my_dat$PctOccupMgmtProf %>% scale(center = TRUE, scale = TRUE)
my_dat$PctOccupManu <- my_dat$MalePctDivorce %>% scale(center = TRUE, scale = TRUE)
my_dat$MalePctNevMarr <- my_dat$MalePctNevMarr %>% scale(center = TRUE, scale = TRUE)
my_dat$FemalePctDiv <- my_dat$FemalePctDiv %>% scale(center = TRUE, scale = TRUE)
my_dat$TotalPctDiv <- my_dat$TotalPctDiv %>% scale(center = TRUE, scale = TRUE)
my_dat$PersPerFam <- my_dat$PersPerFam %>% scale(center = TRUE, scale = TRUE)
#3. Split data into train and test sets using seed=123;
set.seed(123)
sample <- sample.split(my_dat$TotalPctDiv, SplitRatio = 0.70)
# Training Data
train = subset(my_dat, sample == TRUE)
# Testing Data
test = subset(my_dat, sample == FALSE)
#4. Build linear regression model. p value of variables should be max 0.05;
model <- lm(NumIlleg ~ . , data = train)
summary(model) # p-value: < 2.2e-16
predicts <- predict(model , test) %>% as.data.frame()
predicts$pred <- predicts$.
#5. Calculate RMSE
test_set <- test %>% as.data.frame()
residuals = test_set$NumImmig - predicts$pred
RMSE = sqrt(mean(residuals^2))
#Adjusted R-squared;
y_test_mean = mean(test_set$NumIlleg)
tss = sum((test_set$NumIlleg - y_test_mean)^2) #total sum of squares
rss = sum(residuals^2) #residual sum of squares
R2 = 1 - (rss/tss); R2
n <- test_set %>% nrow() #sample size
k <- features %>% length() #number of independent variables
Adjusted_R2 = 1-(1-R2)*((n-1)/(n-k-1))
tibble(RMSE = round(RMSE,1),
R2, Adjusted_R2)
#6. Check overfitting.
y_pred_train <- model %>% predict(newdata = train) %>% as.data.frame()
train_set <- train %>% as.data.frame()
residuals = train_set$NumIlleg - y_pred_train$predict
RMSE_train = sqrt(mean(residuals^2))
y_train_mean = mean(train_set$NumIlleg)
tss = sum((train_set$NumIlleg - y_train_mean)^2)
rss = sum(residuals^2)
R2_train = 1 - (rss/tss); R2_train
n <- train_set %>% nrow() #sample size
k <- features %>% length() #number of independent variables
Adjusted_R2_train = 1-(1-R2_train)*((n-1)/(n-k-1))
|
35d4546eb43f1fe430b6407f84e3017794759d34
|
22f6f25b97b0bf425f365f43511ddfea5e02ff42
|
/CompletitudDiaria.R
|
b533fb0c6592e82f3090eb4e0091b855a7a193c9
|
[] |
no_license
|
JuanAlvarezVazquez/TFM
|
f4da74710d67dc80592707c3373e0e87b09fe5ff
|
5ecde6acb959eb3738384e91212087b6baf0162e
|
refs/heads/master
| 2020-07-19T23:12:43.682477
| 2019-09-05T10:00:33
| 2019-09-05T10:00:33
| 206,529,501
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,763
|
r
|
CompletitudDiaria.R
|
#=============Cargamos librerias y archivo de inspeccion=================
library(tidyverse)
library(dplyr)
library(lubridate)
library(ggrepel)
registroLingotes<-read_csv2("Datos/hist_inspec.csv", col_names = c("Horno","Fecha","Aleacion","Formato","Colada","Lingotera","Id_Especialista","Especialista","Id_Mando","Mando"), skip = 1, cols_only(Horno="f", Fecha =col_date(format ="%d-%m-%y"), Aleacion="f", Formato="f", Colada="i", Lingotera="i", Id_Especialista="f", Especialista="f", Id_Mando="f", Mando="f"))
#============Cambio theme_minimal para poner la leyenda debajo
theme_minimal <- function(base_size = 11, base_family = "",
base_line_size = base_size / 22,
base_rect_size = base_size / 22) {
# Starts with theme_bw and remove most parts
theme_bw(
base_size = base_size,
base_family = base_family,
base_line_size = base_line_size,
base_rect_size = base_rect_size
) %+replace%
theme(
axis.ticks = element_blank(),
legend.background = element_blank(),
legend.key = element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
strip.background = element_blank(),
plot.background = element_blank(),
legend.position = "bottom",
complete = TRUE
)
}
#===========================
lingotesNoReg<-registroLingotes%>%
filter((Mando=="No_Reg")|(Especialista=="No_Reg"))
calendario<-registroLingotes%>%
select(Fecha)%>%
distinct(Fecha)%>%
mutate(Z=as.integer(0))
#==============Me doy cuenta de que coinciden con multiplos de colada y calculo por colada===========
lingotesNoregEspecialista<-lingotesNoReg%>%
filter(Especialista=="No_Reg")%>%
group_by(Fecha,Colada)%>%
distinct(Colada)%>%
group_by(Fecha)%>%
summarize(Coladas=n())%>%
full_join(calendario, lingotesNoregEspecialista, by="Fecha")%>%
mutate(No_reg = replace_na(Coladas, 0))%>%
select(-Z,-Coladas)%>%
mutate(Especialista="Especialista")%>%
arrange(Fecha)
lingotesNoregMando<-lingotesNoReg%>%
filter(Mando=="No_Reg")%>%
group_by(Fecha,Colada)%>%
distinct(Colada)%>%
group_by(Fecha)%>%
summarize(Coladas=n())%>%
full_join(calendario, lingotesNoregMando, by="Fecha")%>%
mutate(No_Reg = replace_na(Coladas, 0))%>%
select(-Z,-Coladas)%>%
mutate(Mando="Mando")%>%
arrange(Fecha)
coladasPorFormatoyAleacion2<-registroLingotes%>%
distinct(Aleacion,Formato,Colada,Fecha)%>%
arrange(Aleacion,Formato,Colada,Fecha)
Completitud<-coladasPorFormatoyAleacion2%>%
group_by(Aleacion,Formato,Fecha)%>%
summarize(primeraCol=min(Colada),ultimaCol=max(Colada),coladasTotales=(ultimaCol-primeraCol+1), coladasRegistradas=n(), porcentajeDeReg=(coladasRegistradas/coladasTotales*100))
completitudDiaria<-Completitud%>%
group_by(Fecha)%>%
summarise(total=sum(coladasTotales), registradas=sum(coladasRegistradas), No_Reg=(total-registradas),Ambos="Ambos")
CompResumen<-completitudDiaria%>%
mutate(Mando=lingotesNoregMando$No_Reg, Especialista=lingotesNoregEspecialista$No_reg)%>%
select(-Ambos)%>%
mutate(PorcenMando=100*Mando/total, PorcenEsp=100*Especialista/total,
PorcenOmit=100*No_Reg/total,PorcenNoReg=100*(Mando+Especialista+No_Reg)/total )
colMax <- function(data) sapply(data, max, na.rm = TRUE)
colSuma<-function(data) sapply(data, sum, na.rm = TRUE)
maximos<-colMax(filter(CompResumen, Fecha>"2019-03-25"))
sumas<-colSuma(select(filter(CompResumen, Fecha>"2019-03-25"),-Fecha))
SinRegistrar<-data.frame(Fecha=Fecha<-c(completitudDiaria$Fecha,completitudDiaria$Fecha,lingotesNoregMando$Fecha,lingotesNoregEspecialista$Fecha),
SinReg=SinReg<-c(completitudDiaria$registradas,completitudDiaria$No_Reg,lingotesNoregMando$No_Reg,lingotesNoregEspecialista$No_reg),
Tipo=Tipo<-c(rep("Registradas",60),completitudDiaria$Ambos,lingotesNoregMando$Mando,lingotesNoregEspecialista$Especialista))
ggplot(data=SinRegistrar, mapping=(aes(x=Fecha,fill=Tipo,y=SinReg )))+
geom_bar(stat="identity")+
scale_fill_discrete(name = " ", labels = c("No registrada", "Registrada por Mando(*)", "Registrada por Especialista(**)","Correctamente registrada"))+
theme_minimal()+
labs(title="Coladas, ¿se están registrando todas?",
subtitle = "El porcentaje de coladas registradas desde el 25/03 supera el 98%.
Los mandos registran más del 97% de las coladas, mientras que para los especialistas el porcentaje supera el 99%. ",
y="Coladas", x="", caption="(*)Falta el registro del especialista.\n(**)Falta el registro del mando")+
scale_x_date(labels = date_format("%d/%m"), date_breaks = "week" )
|
03b13fec7659c80ab29bdec2ecca624d95dc8e5e
|
0e2859631b841ccdaf020dbdcdd42c5193b77192
|
/tests/testthat/test-helpers.R
|
5f6ecf83ef8f504f80b86829ccfaa444e8613b1d
|
[
"Apache-2.0"
] |
permissive
|
bcgov/rems
|
4f41ccf7588e38bc15e88ed1d6880505eac644cf
|
85cbbca54aff440909d9751e7c387adb8fbbba63
|
refs/heads/master
| 2023-08-17T07:49:23.354561
| 2023-08-10T16:55:46
| 2023-08-10T16:55:46
| 63,990,524
| 20
| 6
|
Apache-2.0
| 2023-08-10T16:55:47
| 2016-07-23T00:03:38
|
R
|
UTF-8
|
R
| false
| false
| 445
|
r
|
test-helpers.R
|
context("testing helpers")
test_that("checking correct number EMS_IDs and REQ_IDs of lt_lake_sites and lt_lake_req()",
{
expect_length(lt_lake_sites(), 74)
expect_length(lt_lake_req(), 654)
}
)
test_that("checking data type of lt_lake_sites() and lt_lake_req()",
{
expect_type(lt_lake_sites(), "character")
expect_type(lt_lake_req(), "character")
})
|
f2a4d6595d29d6aef1fbe64b7ab1420bfd3766fc
|
dba7646b74a68ef18375cd5c0ca3233ba1684ea1
|
/manuscript/figure_spearman_heatmaps/simpleHeatmapExample.R
|
d6920a132d3f8bc9b04317745a6c3a81ebcd7e12
|
[] |
no_license
|
jmig5776/targetscore
|
130dfe6aec7e6437e2f9ae6e3d075e5a871097e8
|
83e6550446abea571f78bfe39aeb7e4fde894c98
|
refs/heads/master
| 2023-02-06T14:17:23.311838
| 2020-12-18T13:36:08
| 2020-12-18T13:36:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 643
|
r
|
simpleHeatmapExample.R
|
library(ggplot2)
library(reshape2)
library(readr)
mydata <- mtcars[, c(1,3,4,5,6,7)]
cormat <- round(cor(mydata),2)
melted_cormat <- melt(cormat, na.rm=TRUE)
# Heatmap
p <- ggplot(data = melted_cormat, aes(Var2, Var1, fill = value)) +
geom_tile(color = "white") +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal() +
theme(
axis.text.x = element_text(angle = 45, vjust = 1, size = 12, hjust = 1),
legend.position = "bottom") +
coord_fixed()
p
ggsave("plot.pdf", p)
|
1773e0bade9bbda4eb00ef23f060f3ea3e53083b
|
3fca57c782031174c3ddcbb5fac094811b77f840
|
/hugo_summarise_data.Rd
|
6de5b71d4c1a750a865513486d65e5c203f13bcc
|
[] |
no_license
|
woznicak/hugo
|
f82f67f3e669246be79172f0e79812c52f3401ec
|
af3fb40a267d2ac33636d9e5f9cf72646f92503a
|
refs/heads/master
| 2021-09-15T23:51:48.591332
| 2018-06-13T05:48:03
| 2018-06-13T05:48:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 374
|
rd
|
hugo_summarise_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hugo_summarise_data.R
\name{hugo_summarise_data}
\alias{hugo_summarise_data}
\title{Title}
\usage{
hugo_summarise_data(data, earlier_settings = TRUE, output = "pdf",
replace = TRUE, mode = "check", report_title = NULL)
}
\arguments{
\item{report_title}{}
}
\description{
Title
}
|
4aed7e00474c0cef4cdae936716a1697312028e2
|
913d325204484c4311e1d8dba5d6c455e8aa1703
|
/make_national_tables.r
|
10b69d86508af4ae13e70a61dad1086f9e4aa635
|
[] |
no_license
|
f-edwards/ai_an_transitions
|
0e0dd38724f92e750e4461ba39c724abf3d82cd5
|
455733b5d74893cb6f77fcc0b093d0ff872d7004
|
refs/heads/master
| 2023-04-03T18:23:31.044963
| 2023-03-22T12:56:57
| 2023-03-22T12:56:57
| 228,456,286
| 2
| 0
| null | 2020-12-08T20:42:55
| 2019-12-16T19:07:18
|
HTML
|
UTF-8
|
R
| false
| false
| 4,874
|
r
|
make_national_tables.r
|
###################################################
### age-specific National tables
###################################################
### make national tables for each outcome
## investigation
inv_nat_tab<-list()
race_id<-unique(ncands_inv$race_ethn)
imps<-unique(ncands_inv$.imp)
index<-1
for(i in 1:length(imps)){
for(r in 1:length(race_id)){
temp<-ncands_inv
temp<-temp %>%
filter(race_ethn == race_id[r])
temp<-temp %>%
filter(.imp==i)
temp<-temp %>%
group_by(.imp, age, race_ethn) %>%
summarise(var = sum(var),
pop = sum(pop)) %>%
ungroup()
inv_nat_tab[[index]]<-make_life_table(temp)
index<-index+1
}
}
inv_nat_tab<-bind_rows(inv_nat_tab)
## substantiation
sub_nat_tab<-list()
race_id<-unique(ncands_sub$race_ethn)
imps<-unique(ncands_sub$.imp)
index<-1
for(i in 1:length(imps)){
for(r in 1:length(race_id)){
temp<-ncands_sub
temp<-temp %>%
filter(race_ethn == race_id[r])
temp<-temp %>%
filter(.imp==i)
temp<-temp %>%
group_by(.imp, age, race_ethn) %>%
summarise(var = sum(var),
pop = sum(pop)) %>%
ungroup()
sub_nat_tab[[index]]<-make_life_table(temp)
index<-index+1
}
}
sub_nat_tab<-bind_rows(sub_nat_tab)
###### FOSTER CARE
fc_nat_tab<-list()
race_id<-unique(ncands_sub$race_ethn)
imps<-unique(ncands_sub$.imp)
index<-1
for(i in 1:length(imps)){
for(r in 1:length(race_id)){
temp<-afcars_fc
temp<-temp %>%
filter(race_ethn == race_id[r])
temp<-temp %>%
filter(.imp==i)
temp<-temp %>%
group_by(.imp, age, race_ethn) %>%
summarise(var = sum(var),
pop = sum(pop)) %>%
ungroup()
fc_nat_tab[[index]]<-make_life_table(temp)
index<-index+1
}
}
fc_nat_tab<-bind_rows(fc_nat_tab)
### TPR
tpr_nat_tab<-list()
race_id<-unique(ncands_inv$race_ethn)
imps<-unique(ncands_inv$.imp)
index<-1
for(i in 1:length(imps)){
for(r in 1:length(race_id)){
temp<-tpr_tables
temp<-temp %>%
filter(race_ethn == race_id[r])
temp<-temp %>%
filter(.imp==i)
temp<-temp %>%
group_by(.imp, age, race_ethn) %>%
summarise(var = sum(var),
pop = sum(pop)) %>%
ungroup()
tpr_nat_tab[[index]]<-make_life_table(temp)
index<-index+1
}
}
tpr_nat_tab<-bind_rows(tpr_nat_tab)
########################### JOINTS AND CONDITIONALS
#######################
## P(sub|inv)
########################
cond_sub_inv<-sub_nat_tab %>%
rename(sub = q) %>%
select(.imp, age, race_ethn, sub) %>%
left_join(inv_nat_tab %>%
rename(inv = q) %>%
select(.imp, race_ethn, age, inv)) %>%
mutate(cond = sub / inv,
var = "P(sub|inv)")
#######################
## P(FC,inv)
########################
fc_inv_nat_tab<-list()
race_id<-unique(afcars_fc_inv$race_ethn)
imps<-unique(afcars_fc_inv$.imp)
index<-1
for(i in 1:length(imps)){
for(r in 1:length(race_id)){
temp<-afcars_fc_inv
temp<-temp %>%
filter(race_ethn == race_id[r])
temp<-temp %>%
filter(.imp==i)
temp<-temp %>%
group_by(.imp, age, race_ethn) %>%
summarise(var = sum(var),
pop = sum(pop)) %>%
ungroup()
fc_inv_nat_tab[[index]]<-make_life_table(temp)
index<-index+1
}
}
fc_inv_nat_tab<-bind_rows(fc_inv_nat_tab)
###### P(FC|INV)
cond_fc_inv<-fc_inv_nat_tab %>%
select(.imp, age, race_ethn, q) %>%
rename(fc_inv = q) %>%
right_join(inv_nat_tab %>%
select(.imp, age, race_ethn, q) %>%
rename(inv = q)) %>%
mutate(cond = fc_inv / inv,
var = "P(FC|inv)")
#######################
## P(FC,sub)
########################
fc_sub_nat_tab<-list()
race_id<-unique(afcars_fc_sub$race_ethn)
imps<-unique(afcars_fc_sub$.imp)
index<-1
for(i in 1:length(imps)){
for(r in 1:length(race_id)){
temp<-afcars_fc_sub
temp<-temp %>%
filter(race_ethn == race_id[r])
temp<-temp %>%
filter(.imp==i)
temp<-temp %>%
group_by(.imp, age, race_ethn) %>%
summarise(var = sum(var),
pop = sum(pop)) %>%
ungroup()
fc_sub_nat_tab[[index]]<-make_life_table(temp)
index<-index+1
}
}
fc_sub_nat_tab<-bind_rows(fc_sub_nat_tab)
###### P(FC|SUB)
cond_fc_sub<-fc_sub_nat_tab %>%
select(.imp, age, race_ethn, q) %>%
rename(fc_sub = q) %>%
right_join(sub_nat_tab %>%
select(.imp, age, race_ethn, q) %>%
rename(sub = q)) %>%
mutate(cond = fc_sub / sub,
var = "P(FC|sub)")
###### P(TPR|FC)
cond_tpr_fc<-tpr_nat_tab %>%
select(.imp, age, race_ethn, q) %>%
rename(tpr = q) %>%
right_join(fc_nat_tab %>%
select(.imp, age, race_ethn, q) %>%
rename(fc = q)) %>%
mutate(cond = tpr / fc,
var = "P(TPR|FC)")
|
3688ceeb912414af634406e3f9cec15ca85989cd
|
e699d84bd076c5ef9c31ac0a4f3caa5fb21464eb
|
/man/phylo.betapart.core.rd
|
2538bb495dec240ffbb1bacc8e18849e1f5d64b1
|
[] |
no_license
|
cran/betapart
|
e28acb1b8a3c80f7163453b3ecfc2a0e7699d6a1
|
5a785771c8f59f1ac14ee0cf789e7f22723711cf
|
refs/heads/master
| 2023-03-15T21:59:30.702820
| 2023-03-13T16:10:15
| 2023-03-13T16:10:15
| 17,694,697
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,295
|
rd
|
phylo.betapart.core.rd
|
\encoding{utf8}
\name{phylo.betapart.core}
\alias{phylo.betapart.core}
\title{
Core calculations of phylogenetic dissimilarities metrics
}
\description{
Computes the basic quantities needed for computing the multiple-site phylogenetic beta diversity measures
and pairwise phylogenetic dissimilarity matrices.
}
\usage{
phylo.betapart.core(x, tree)
}
\arguments{
\item{x}{ a community matrix or data frame, where rows are sites and columns are species.}
\item{tree}{ a phylogenetic tree of class phylo with tips names identic to species names from the community matrix.}
}
\value{
The function returns a list with:
\item{sumSi}{ the sum of the phylogenetic diversity values of all sites}
\item{St}{ the total phylogenetic diversity in the dataset}
\item{shared}{ a matrix containing the phylogenetic diversity shared between pairs of sites}
\item{sum.not.shared}{ a matrix containing the total phylogenetic diversity not shared between pairs of sites: b+c}
\item{max.not.shared}{ a matrix containing the total maximum phylogenetic diversity not shared between pairs of sites: max(b,c)}
\item{min.not.shared}{ a matrix containing the total minimum phylogenetic diversity not shared between pairs of sites: min(b,c)}
}
\references{
Baselga A. (2012) The relationship between species replacement, dissimilarity derived from nestedness, and nestedness.
Global Ecology and Biogeography 21, 1223-1232
Bryant JA, Lamanna C, Morlon H, Kerkhoff AJ, Enquist BJ, et al. (2008) Microbes on mountainsides: Contrasting elevational patterns of bacterial and plant diversity. Proceedings of the National Academy of Sciences of the United States of America 105: 11505-11511.
Faith DP, Lozupone CA, Nipperess D, Knight R (2009) The Cladistic Basis for the Phylogenetic Diversity (PD) Measure Links Evolutionary Features to Environmental Gradients and Supports Broad Applications of Microbial Ecology's "Phylogenetic Beta Diversity" Framework. Int J Mol Sci 10: 4723-4741. doi: 10.3390/ijms10114723.
Leprieur F, Albouy C, De Bortoli J, Cowman PF, Bellwood DR, et al. (2012) Quantifying Phylogenetic Beta Diversity: Distinguishing between "True" Turnover of Lineages and Phylogenetic Diversity Gradients. PLoS ONE 7(8): e42760. doi:10.1371/journal.pone.0042760
Lozupone C, Knight R (2005) UniFrac: a new phylogenetic method for comparing microbial communities. Applied and Environmental Microbiology 71: 8228-8235.
}
\author{
Julien De Bortoli (juldebortoli@yahoo.fr), Fabien Leprieur(fabien.leprieur@univ-montp2.fr), Andrés Baselga and David Orme
}
\seealso{
\code{\link{phylo.beta.pair}}, \code{\link{phylo.beta.multi}}
}
\examples{
# toy tree for 6 species (sp1 to sp6)
require(ape)
toy.tree<-read.tree(text="(((sp1:1,sp2:1):5,(sp3:3,sp4:3):3):2,(sp5:7,sp6:7):1);")
plot(toy.tree)
# toy community table with 6 assemblages (A to F) with 6 species (sp1 to sp6)
toy.comm<-matrix(nrow=6, ncol=6)
rownames(toy.comm)<-c("A","B","C","D","E","F")
colnames(toy.comm)<-c("sp1","sp2","sp3","sp4","sp5","sp6")
toy.comm[1,]<-c(1,1,1,0,0,0)
toy.comm[2,]<-c(0,1,1,1,0,0)
toy.comm[3,]<-c(0,0,1,1,1,0)
toy.comm[4,]<-c(0,0,1,1,1,1)
toy.comm[5,]<-c(0,0,0,1,1,1)
toy.comm[6,]<-c(1,0,0,1,1,1)
toy.phylocore<-phylo.betapart.core(toy.comm, toy.tree)
}
|
85f7a6cbb14c6cb46679d302c4958a5891be06c7
|
0bddcf275b7786d95ee6d96b5a0740c8b58eb958
|
/PELfun 2.R
|
0b9672c4ed71eb918ea73f166d7e737239246b6f
|
[] |
no_license
|
sheng-ying/PEL-logit
|
3f83d68bfd315baafc4fd914e0eed6806dc7e331
|
e5e6f21a3d8162de6ad3e356e7d074646a2c637e
|
refs/heads/master
| 2023-02-12T14:48:38.863766
| 2021-01-06T04:54:35
| 2021-01-06T04:54:35
| 246,386,630
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,493
|
r
|
PELfun 2.R
|
library(Matrix)
library(foreach)
library(mvtnorm)
library(glmnet)
library(BB)
#=============================================================================#
# the extended PEL_alpha estimator that accounts for population heterogeneity
# b: paramter of interest
# a: parameter in the density ratio model
# eta: Lagrange multipliers
#=============================================================================#
# elastic net MLE
enlr <- function (x,y){
alpha <- c(0:10)/10
cv.value <- rep(0,length(alpha))
for (i in 1:length(alpha)){
cv.obj <- cv.glmnet(x[,-1], y, nfolds=10, family="binomial", alpha=alpha[i])
cv.value[i] <- min(cv.obj$cvm)
}
opt <- which.min(cv.value)
cv.opt <- cv.glmnet(x[,-1], y, nfolds=10, family="binomial", alpha=alpha[opt])
obj <- glmnet(x[,-1], y, family="binomial", lambda = cv.opt$lambda.min, alpha=alpha[opt])
c(obj$a0, as.vector(obj$beta))
}
# given b, obtain initial value for a based on estimation equations
get.a <- function (x,b,info,setB,index){
fa <- function(a){
xa <- x[,setB]
psi0 <- t(exp(xa%*%a)-1)
prob <- apply(x,1,function(u){1-1/(1+exp(b%*%u))})
psi1 <- exp(xa%*%a)*(prob-info[1]); psi1[-g1]<-0
psi2 <- exp(xa%*%a)*(prob-info[2]); psi2[-g2]<-0
psi3 <- exp(xa%*%a)*(x[,index[1]]-info[3])
psi4 <- exp(xa%*%a)*(x[,index[2]]-info[4])
mean <- c( mean(psi0), mean(psi1), mean(psi2), mean(psi3), mean(psi4))
sum (mean^2)
}
optim(rep(0,length(setB)),fa,control = list(maxit=100))$par
}
# given a and b, lleta/seta: constraint log full likelihood/score w.r.t. eta (negative)
lleta <- function(eta,x,a,b,info,setB,index){
xa <- x[,setB]
psi0 <- t(exp(xa%*%a)-1)
prob <- apply(x,1,function(u){1-1/(1+exp(b%*%u))})
psi.ex <- rbind(psi0, as.vector(exp(xa%*%a)*(prob-info[1])), as.vector(exp(xa%*%a)*(prob-info[2])),
as.vector(exp(xa%*%a)*(x[,index[1]]-info[3])), as.vector(exp(xa%*%a)*(x[,index[2]]-info[4])) )
psi.ex[2,-g1]<-0; psi.ex[3,-g2]<-0
eta.psi <- eta%*%psi.ex
eta.psi[which(eta.psi<=(1/n-1))] <- (1/n-1)
eta.psi[which(eta.psi>n)] <- n
sum(sapply(1+eta.psi,function(u){log(u)}))
}
seta <- function(eta,x,a,b,info,setB,index){
xa <- x[,setB]
psi0 <- t(exp(xa%*%a)-1)
prob <- apply(x,1,function(u){1-1/(1+exp(b%*%u))})
psi.ex <- rbind(psi0, as.vector(exp(xa%*%a)*(prob-info[1])), as.vector(exp(xa%*%a)*(prob-info[2])),
as.vector(exp(xa%*%a)*(x[,index[1]]-info[3])), as.vector(exp(xa%*%a)*(x[,index[2]]-info[4])) )
psi.ex[2,-g1]<-0; psi.ex[3,-g2]<-0
(1/(1+eta%*%psi.ex))%*%t(psi.ex)
}
# given eta and a, llb.ex/sb.ex: constraint log full likelihood/score w.r.t. b (negative)
llb.ex <- function(a,b,x,y,eta,info,setB,index){
prob <- apply(x,1,function(u){1-1/(1+exp(b%*%u))})
prob[which(prob==0)]<-1/n
prob[which(prob==1)]<-1-1/n
lc <- sum(y*sapply(prob,function(u){log(u)})+(1-y)*sapply(1-prob,function(u){log(u)}))
lm <- lleta(eta,x,a,b,info,setB,index)
-lc + lm
}
sb.ex <- function(a,b,x,y,eta,info,setB,index){
xa <- x[,setB]
psi0 <- t(exp(xa%*%a)-1)
prob <- apply(x,1,function(u){1-1/(1+exp(b%*%u))})
psi.ex <- rbind(psi0, as.vector(exp(xa%*%a)*(prob-info[1])), as.vector(exp(xa%*%a)*(prob-info[2])),
as.vector(exp(xa%*%a)*(x[,index[1]]-info[3])), as.vector(exp(xa%*%a)*(x[,index[2]]-info[4])) )
psi.ex[2,-g1]<-0;psi.ex[3,-g2]<-0
d1.ex <- d4.ex <- d5.ex <- matrix(0,ncol = (p+1),nrow = n)
d2.ex <- as.vector(exp(xa%*%a))*x*(prob*(1-prob))
d3.ex <- as.vector(exp(xa%*%a))*x*(prob*(1-prob))
d2.ex[-g1,]<-0; d3.ex[-g2,]<-0
d.ex <- eta%*%rbind( (1/(1+eta%*%psi.ex))%*%d1.ex, (1/(1+eta%*%psi.ex))%*%d2.ex, (1/(1+eta%*%psi.ex))%*%d3.ex,
(1/(1+eta%*%psi.ex))%*%d4.ex, (1/(1+eta%*%psi.ex))%*%d5.ex)
- t(x)%*%(y-prob) + t(d.ex)
}
# local quadratic approximation (LQA) for alasso
pen.ex <- function(b,b.ini,b.old,tau,v){
b.ini[which(b.ini==0)] <- 0.001
b.old[which(b.old==0)] <- 0.001
tau*b[-1]%*%diag(1/abs(2*b.old[-1]*abs(b.ini[-1])^v))%*%b[-1]
}
dpen.ex <- function(b,b.ini,b.old,tau,v){
b.ini[which(b.ini==0)] <- 0.001
b.old[which(b.old==0)] <- 0.001
tau*diag(1/abs(b.old*abs(b.ini)^v))%*%c(0,b[-1])
}
# BIC with constant = max(log(log(p)),1)
bicex <- function(x,y,a,b,eta,info,setB,index){
2*llb.ex(a,b,x,y,eta,info,setB,index) + sum(b[-1]!=0)*log(n)*max(log(log(p)),1)
}
# standard error estimation
get.cov.ex <- function(setA, setB, x, b.ini, b.ex, a.ex, info, index, tau, v){
x.true <- x[,setA]
xa <- x[,setB]
b.ex <- b.ex[setA]
b1.ex <- sapply(b.ex,function(u){max(u,0.1)})
b.ini <- sapply(b.ini,function(u){max(u,0.1)})
weight <- abs(b.ini[setA])^(-v)
dpen.ex <- tau*diag(weight/b1.ex)/n
prob3 <- apply(x.true,1,function(u){1-1/(1+exp(b.ex%*%u))})
d5 <- t(x.true)%*%diag(prob3*(1-prob3))%*%x.true
psi0 <- t(exp(xa%*%a.ex)-1)
psi.ex <- rbind(psi0, as.vector(exp(xa%*%a.ex)*(prob3-info[1])), as.vector(exp(xa%*%a.ex)*(prob3-info[2])),
as.vector(exp(xa%*%a.ex)*(x[,index[1]]-info[3])), as.vector(exp(xa%*%a.ex)*(x[,index[2]]-info[4])) )
psi.ex[2,-g1]<-0; psi.ex[3,-g2]<-0
cov3.psi <- psi.ex%*%t(psi.ex)
d1.ex <- cbind( matrix(0,ncol = (length(setA)),nrow = n), xa*as.vector(exp(xa%*%a.ex)))
d2.ex <- cbind( as.vector(exp(xa%*%a.ex))*x.true*(prob3*(1-prob3)), xa*as.vector(exp(xa%*%a.ex)*(prob3-info[1])) );
d3.ex <- cbind( as.vector(exp(xa%*%a.ex))*x.true*(prob3*(1-prob3)), xa*as.vector(exp(xa%*%a.ex)*(prob3-info[2])) );
d2.ex[-g1,]<-0; d3.ex[-g2,]<-0
d4.ex <- cbind( matrix(0,ncol = length(setA), nrow = n), xa*as.vector(exp(xa%*%a.ex))*x[,index[1]] )
d5.ex <- cbind( matrix(0,ncol = length(setA), nrow = n), xa*as.vector(exp(xa%*%a.ex))*x[,index[2]] )
d.ex <- rbind(colSums(d1.ex),colSums(d2.ex),colSums(d3.ex),colSums(d4.ex),colSums(d5.ex))
cov1.ex <- as.matrix(bdiag(d5,matrix(0,ncol=length(setB),nrow=length(setB))))/n + t(d.ex)%*%solve(cov3.psi)%*%d.ex/n
cov1.ex <- cov1.ex[1:length(setA),1:length(setA)]
cov.ex <- solve(cov1.ex+dpen.ex)%*%cov1.ex%*%solve(cov1.ex+dpen.ex)
se.ex <- sqrt(diag(solve(cov.ex))/n)[1:length(setA)]
return(se.ex)
}
# iterative algorithm for exteneded PEL_alpha
pel.ex <- function (x,y,info,tau,v,setB,b.ini,index){
K <- length(info)
eta.old <- eta.ini <- rep(0,K+1)
b.old <- b.ini
a.old <- a.ini <- get.a(x,b.ini,info,setB,index)
ab.old <- c(a.old, b.old)
k <- 1
while (k <= 5){
# given eta, update a and b
fab <- function(ab){
a <- ab[1:length(a.old)]
b <- ab[-c(1:length(a.old))]
llb.ex(a,b,x,y,eta.old,info,setB,index) + pen.ex(b,b.ini,b.old,tau,v)
}
ab.new <- optim(ab.old,fab,method = "L-BFGS-B",control = list(maxit=1))$par
a.new <- ab.new[1:length(a.old)]
b.new <- ab.new[-c(1:length(a.old))]
b.new[which(abs(b.new) < 0.001)] <- 0
# given a and b, update eta
feta <- function(eta){ lleta(eta,x,a.new,b.new,info,setB,index) }
geta <- function(eta){ seta(eta,x,a.new,b.new,info,setB,index) }
eta.new <- optim(eta.old,feta,geta,method = "L-BFGS-B",lower=rep(-1/2,(K+1)),upper=rep(1/2,(K+1)),control = list(maxit=100,fnscale=-1))$par
if (max(abs(ab.new- ab.old)) <= 0.00001){break}
k <- k+1
if (llb.ex(a.new,b.new,x,y,eta.new,info,setB,index)< llb.ex(a.old,b.old,x,y,eta.old,info,setB,index)){
b.old <- b.new
a.old <- a.new
ab.old <- c(a.old, b.old)
eta.old <- eta.new
}
}
list(b.new=b.new, a.new=a.new, eta.new=eta.new)
}
|
009b5aa16e8d74b4db53ed8302e91bdf3fd36529
|
41f079ce35208231a5b4dd3f6f89218d9436d6fb
|
/hw/rlab_1.R
|
3bc823552bb0eae9f754fb4194ff9dbf73b3f242
|
[] |
no_license
|
dalsgit/897
|
838a8c278480a97d49d359932e8090b8500f2479
|
7caf877e70351bbadc08f927c0f0971a4a8b57a2
|
refs/heads/master
| 2021-01-21T22:14:31.415897
| 2017-11-27T23:20:24
| 2017-11-27T23:20:24
| 102,136,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 615
|
r
|
rlab_1.R
|
install.packages("rmarkdown")
install.packages("ISLR")
library(ISLR)
x=c(1,2,3,3)
summary(x)
set.seed(3)
y=rnorm(100)
summary(y)
?plot
x3=matrix(1:12,3)
x3
x3[,2]
fix(Auto)
attach (Auto)
cylinders =as.factor (cylinders )
plot(cylinders , mpg)
#install.packages("doBy")
library(doBy)
summaryBy(mpg ~ cylinders, data = Auto,
FUN = list(mean, max, min, median, sd))
hist(mpg)
hist(mpg ,col =2)
hist(mpg ,col =2, breaks =15)
pairs(??? mpg + displacement + horsepower + weight +
acceleration , Auto)
cor(Auto[sapply(Auto, is.numeric)])
plot(horsepower ,mpg)
identify (horsepower ,mpg ,name)
|
3ef9ea4f9751c65db29c0cf6b9eb089e950697bf
|
bc664d2c4810c04a398cbb461ce8727a68391499
|
/R_scripts.R
|
a1e8bbeb471bd9165f6aab9e7028588ebc3986b9
|
[] |
no_license
|
JingChen1114/RepData_PeerAssessment1
|
f3206de03dd7b25019db7f3c9bc8bb85e02c408c
|
6a488cd26df5bc794e0bb12ec534620ca4e323b1
|
refs/heads/master
| 2020-12-03T09:00:55.331898
| 2020-01-02T15:56:40
| 2020-01-02T15:56:40
| 231,264,628
| 0
| 0
| null | 2020-01-01T21:13:43
| 2020-01-01T21:13:42
| null |
UTF-8
|
R
| false
| false
| 3,617
|
r
|
R_scripts.R
|
###############################################
# R_scripts.R
# DESCRIPTION: script for Reproducible Research
# Course Project1
###############################################
library(data.table)
library(dplyr)
library(ggplot2)
#Loading and processing data
unzip("activity.zip")
activity <- read.csv("activity.csv",header=TRUE,na.strings = "NA")
activity$date <- as.Date(activity$date,"%Y-%m-%d")
activity <- transform(activity,interval=factor(interval))
activity <- as.data.table(activity)
#What is mean total number of steps taken per day?
#1.barplot1
activity_day <- group_by(activity,date)
sum_step_day <- summarize(activity_day,sum=sum(steps,na.rm=TRUE))
png("./figure/barplot1.png")
ggplot(sum_step_day,aes(date,sum))+geom_bar(stat="identity")+theme_bw()+labs(x="Date",y="Total number of steps",title="Barplot of the total number of steps taken each day")
dev.off()
#2.histogram1
png("./figure/histogram1.png")
ggplot(sum_step_day,aes(sum))+geom_histogram(color="pink",fill="pink",bins=30)+theme_bw()+labs(x="Total number of steps taken per day",y="Frequency",title="Histogram of the total number of steps taken each day")
dev.off()
#3.mean and median
mean <- mean(sum_step_day$sum)
median <- median(sum_step_day$sum)
#What is the average daily activity pattern?
#1.plot1
activity_interval <- group_by(activity,interval)
ave_step_interval <- summarize(activity_interval,average=mean(steps,na.rm=TRUE))
png("./figure/plot1.png")
ggplot(ave_step_interval,aes(x=as.integer(interval),y=average))+geom_line(color="red")+labs(x="Time interval",y="Average number of steps")+scale_x_continuous(breaks=seq(1,288,24),labels=ave_step_interval$interval[seq(1,288,24)])
dev.off()
#2.maximum
max_step_interval <- filter(ave_step_interval,average==max(average))
#Imputing missing values
#1.total missing data
sum_na <- sum(is.na(activity$steps))
#2.impute missing values based on average number of steps in particular 5-minutes interval
#3.impute missing values
activity_without_na <- activity
activity_without_na$steps <- as.numeric(activity_without_na$steps)
for(i in 1:nrow(activity)){
if(is.na(activity[i,"steps"])==TRUE){
activity_without_na[i,"steps"] <- filter(ave_step_interval,interval==activity[i,"interval"][[1]]) %>% select(average)
}
}
#4.histogram2, mean, median
activityNoNA_day <- group_by(activity_without_na,date)
sum_stepNoNA_day <- summarize(activityNoNA_day,sum=sum(steps))
png("./figure/histogram2_noNA.png")
ggplot(sum_stepNoNA_day,aes(sum))+geom_histogram(color="pink",fill="pink",bins=30)+theme_bw()+labs(x="Total number of steps taken per day",y="Frequency",title="Histogram of the total number of steps taken each day (NA imputed)")
dev.off()
mean_NoNA <- mean(sum_stepNoNA_day$sum)
median_NoNA <- median(sum_stepNoNA_day$sum)
#Are there differences in activity patterns between weekdays and weekends?
#1.weekday/weekend
is_weekday <- function(x){
if(weekdays(x) %in% c("Saturday","Sunday")){
result <- "weekend"
}else{
result <- "weekday"
}
}
activity_without_na <- mutate(activity_without_na,weekdays=sapply(date,is_weekday))
#2.plot2
activity_without_na <- transform(activity_without_na,weekdays=factor(weekdays))
activityNoNA_interval <- group_by(activity_without_na,interval,weekdays)
ave_stepNoNA_interval <- summarize(activityNoNA_interval,average=mean(steps))
png("./figure/plot2.png")
ggplot(ave_stepNoNA_interval,aes(x=as.integer(interval),y=average,color=weekdays))+geom_line()+facet_grid(weekdays~.)+labs(x="Time interval",y="Average number of steps")+scale_x_continuous(breaks=seq(1,288,48),labels=ave_step_interval$interval[seq(1,288,48)])
dev.off()
|
c6f1a28e687a6dc366c95cc3ef0d1dc157a2bc59
|
b215ed7c605e5750f748b4dd2276e4cebf588318
|
/R/grab_rt_live.R
|
53e9fd8fe5e6a338ee6a5f89cda09aa406ca5cab
|
[] |
no_license
|
LaurenHarriman/CalCAT
|
43b79fc874ea4ca218687069e302395a094b6e9b
|
6c974ee984b2b31e89fd6fd62283ddc225b6eade
|
refs/heads/master
| 2022-11-05T19:19:40.366508
| 2020-06-25T19:45:16
| 2020-06-25T19:45:16
| 274,999,811
| 1
| 0
| null | 2020-06-25T19:35:03
| 2020-06-25T19:35:02
| null |
UTF-8
|
R
| false
| false
| 625
|
r
|
grab_rt_live.R
|
grab_rt_live <- function(State = state_name, ST = state_abbrv){
url <- "https://d14wlfuexuxgcm.cloudfront.net/covid/rt.csv"
if ( as.character(url_file_exists(url)[1]) == "TRUE" ) {
rt_live <- read.csv("https://d14wlfuexuxgcm.cloudfront.net/covid/rt.csv") %>%
filter(region == ST) %>%
mutate(date = as.Date(as.character(date)),
region = as.character(region))
msg <- paste0("Successfully download data from Rt.live for ", State, " on ", Sys.Date())
} else {
msg <- paste0("Problem with Rt.live link to file updates. Check URL.")
}
print(msg)
return(rt_live)
}
|
5d6b971e566d0c47b5b7827e1da8dece7d022fbe
|
8b879b032d3ec61b5d389d3deec855e5b5522b81
|
/tmp/mosdepth.R
|
c49c321df0a356ee2d05b9167820ba9bfadd6c8b
|
[] |
no_license
|
msubirana/ergWgsTools
|
5756e5a42c8ec6c090a2be11f6548eefca2ced3c
|
f692050f574ed78b9b2e425d3b84f686a6103b2e
|
refs/heads/master
| 2021-03-29T06:05:10.588693
| 2020-06-03T15:10:31
| 2020-06-03T15:10:31
| 247,924,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 380
|
r
|
mosdepth.R
|
source('/imppc/labs/lplab/share/marc/repos/ergWgsTools/tmp/variables.R')
library(devtools)
devtools::load_all('/imppc/labs/lplab/share/marc/repos/ergWgsTools')
threads <- parallel::detectCores()
args <- commandArgs(trailingOnly = TRUE)
inputFile <- args[1]
outPath <- args[2]
threads <- args[3]
mosdepth(inputFile=inputFile,
threads=threads,
outPath=outPath)
|
009c101da124bbdcaf0878af5406ba93095965bd
|
b6f61d4ab4b273d985a5a2ac0cb3aaad74ae4dad
|
/cachematrix.R
|
ea56592472c2ea7e8c9ec319f0b63b5357a0e862
|
[] |
no_license
|
lautier/1st_repo
|
e6729d99c4e5171d6779ab45898a753c658d707d
|
46833b1183ad8442b2bbcfe5111acdc6de826bde
|
refs/heads/master
| 2021-01-16T23:06:37.733362
| 2015-11-07T18:43:43
| 2015-11-07T18:43:43
| 40,971,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,691
|
r
|
cachematrix.R
|
## Functions defined below are aimed at caching the inverse
## of a matrix instead of computing it multiple times,
## hence making the computations quicker.
## The matrix has to be square and invertible.
##makeCacheMatrix creates a matrix object, which can be used
##for caching the inverse of a matrix.
makeCacheMatrix <- function(x = matrix()) {
matrixInverse <- NULL ##sets the value of the initial matrix object to null
set <- function(y) { ##sets the value of the matrix
x <<- y
matrixInverse <<- NULL
}
get <- function() x ##gets the value of the matrix
setsolve <- function(solve) matrixInverse <<- solve ##sets the value of the inverse
getsolve <- function() matrixInverse ##gets the value of the inverse
list(set = set, get = get,
setsolve = setsolve,
getsolve= getsolve) ##a list of defined functions
}
## cacheSolve function is used to compute the inverse of the matrix returned by makeCacheMatrix
## in case of the already computed inverse, the function returns inverse from the cache
cacheSolve <- function(x, ...) {
matrixInverse <- x$getsolve()
if(!is.null(matrixInverse)) {
message("retrieving inverse from cache")
return(matrixInverse) ##checks whether the inverse was previously calcuted
##returns it if it was calculated
}
data <- x$get() ##calculates the inverse of the matrix in case it was not cached
matrixInverse <- solve(data, ...)
x$setsolve(matrixInverse)
matrixInverse ##returns a matrix that is the inverse of 'x'
}
|
0f10c87d35a5f728cc7ef2c39783664380f4ecb3
|
b2d7e379ec3d409c3c097df88805ed68ecae085d
|
/ksvm_mais.R
|
b3f48dd4f00079b30dd5569d2e348a14468243d4
|
[] |
no_license
|
renluqin/MachineLearningProjects
|
96d57e6917037528d1c38a777688252d61f47b98
|
92b62908ac09df0991160bf7ba074e1658df3324
|
refs/heads/master
| 2022-12-04T09:22:23.532201
| 2020-08-27T21:09:47
| 2020-08-27T21:09:47
| 290,880,093
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 731
|
r
|
ksvm_mais.R
|
mais <- read.csv(file = "mais_train.csv", header = TRUE,na.strings="?",row.names=NULL)
head(mais)
mais <- subset(mais, select = -X )
#mais <- as.data.frame(scale(mais))
library(e1071)
#install.packages("kernlab")
library(kernlab)
K <- 10
folds = sample(1:K,nrow(mais),replace=TRUE)
CV <- matrix(data=0,nrow = 10,ncol = 9)
kernel<-c("rbfdot","polydot","vanilladot","tanhdot","laplacedot","besseldot","anovadot","splinedot")
I<-9
for(i in (1:I)){
for(k in (1:K) ){
reg.kcross <- ksvm(yield_anomaly ~ . , mais[folds!=k,],kernel=kernel[1])
pred.kcross <- predict(reg.kcross,newdata = as.data.frame(mais[folds==k,]))
CV[k,1]<- mean((mais$yield_anomaly[folds==k]-pred.kcross)^2)
}
}
boxplot(CV)
|
071627e6ef4ce789db05a43be29f0f1a74e4b7bb
|
735d13ef3b0a2f7c640951c3c26944aabaa5908f
|
/R/info_summary.R
|
26f845cacf7654a1e0c110718417e28feb10ff91
|
[] |
no_license
|
cran/DIFtree
|
c6f2582a868e36dc12a514a428a8c4b205bbb929
|
a1dfeb6c9e89a078c543da504698939992578700
|
refs/heads/master
| 2020-12-25T17:13:07.031869
| 2020-06-05T08:30:03
| 2020-06-05T08:30:03
| 34,724,799
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,459
|
r
|
info_summary.R
|
info_summary <-
function(splits,
item,
model,
type){
if(model==2 & type==2){
dif_items <- unique(c(splits[[1]][,"item"],splits[[2]][,"item"]))
} else{
dif_items <- unique(splits[,"item"])
}
dif <- ifelse(item %in% dif_items,"yes","no")
if(dif=="yes"){
if(model==1 | model==3 | (model==2 & type==1)){
type <- "uniform"
variables <- paste(unique(splits[splits[,"item"]==item,"variable"]),collapse=",")
nos <- nrow(splits[splits[,"item"]==item,])
}
if(model==2 & type==2){
type <- ifelse(item %in% unique(splits[[2]][,"item"]),"non-uniform","uniform")
variables <- paste(unique(c(splits[[1]][splits[[1]][,"item"]==item,"variable"],splits[[2]][splits[[2]][,"item"]==item,"variable"])),collapse=",")
nos <- nrow(rbind(splits[[1]][splits[[1]][,"item"]==item,],splits[[2]][splits[[2]][,"item"]==item,]))
}
if(model==2 & type==3){
type <- "non-uniform"
variables <- paste(unique(splits[splits[,"item"]==item,"variable"]),collapse=",")
nos <- nrow(splits[splits[,"item"]==item,])
}
} else{
type <- variables <- nos <- "---"
}
output <- list("item"=item,
"dif"=dif,
"type"=type,
"variables"=variables,
"nos"=nos)
return(output)
}
|
73202a83bb79c95f83f10071745b2e3563e7f06c
|
daa4e8cf09f8b0a7437c72e8400c901798cf5102
|
/man/subset_models.Rd
|
742946f420ec58959c397f4dbb240d0d4913d6eb
|
[] |
no_license
|
jacobbien/simulator
|
cbab7e91945b9a6dbe9b309a256f41e8ef1a6a30
|
3d325ec78d48c57cbdc16d1455c2ffcc85ae8bb1
|
refs/heads/master
| 2023-05-26T15:26:59.755742
| 2023-02-02T07:53:44
| 2023-02-02T07:53:44
| 62,213,043
| 51
| 13
| null | 2023-05-19T04:46:04
| 2016-06-29T09:21:45
|
R
|
UTF-8
|
R
| false
| true
| 553
|
rd
|
subset_models.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model-class.R
\name{subset_models}
\alias{subset_models}
\title{Subset Models}
\usage{
subset_models(m, ...)
}
\arguments{
\item{m}{list of \code{\linkS4class{Model}} objects}
\item{...}{logical expression involving parameters of Models. For now, can
only be parameters that are of length 1 and either of class numeric or
character}
}
\description{
Given a list of \code{\linkS4class{Model}} objects, returns model names
which meet conditions. Uses \code{\link{subset}}
}
|
03ab0ceddbe597c6d8f709356a6ac66a88bc4c5c
|
b7457a6e39c6f2d9e0d54d0ba19fb013517a11bf
|
/man/load_install_packages.Rd
|
5d19d703cbf84c4ba429180f36d6f1b5ad33aa91
|
[
"MIT"
] |
permissive
|
stevenndungu/quickr
|
57522cb1d2bb2e34e9823f81a03e4f78ff0d06f3
|
3d30c85d7bced4550a40ac1b8fcde81add83694a
|
refs/heads/master
| 2022-05-27T22:52:15.251104
| 2020-05-01T09:32:16
| 2020-05-01T09:32:16
| 228,349,778
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 535
|
rd
|
load_install_packages.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/install_load_packages.R
\name{load_install_packages}
\alias{load_install_packages}
\title{A function that loads installed packages and loads and installs those not installed.}
\usage{
load_install_packages(x)
}
\arguments{
\item{x}{A vector of all the packages you want to install or load}
}
\description{
A function that loads installed packages and loads and installs those not installed.
}
\examples{
load_install_packages(c("tidyverse","knitr"))
}
|
ff6db7b142dc3856b18ad0406a7592be977ed248
|
527f5efb3c31800f50b725c851fd0e86a239bb33
|
/data-mining-iti8730_assignment-3/DegreeCentrality_DegreePrestige_NodeGregariousness.R
|
b0e5a36b7938b85194fbcd1e909b8459ca488418
|
[
"MIT"
] |
permissive
|
shwetasuran/Data_Mining_Assignments
|
778f10429e1a071c7cd81362cf4ace65fd6ace56
|
af8bd0b30da0ae364309bf9d329ef5bad7c9d59d
|
refs/heads/master
| 2023-09-04T20:38:28.610053
| 2021-11-14T17:09:49
| 2021-11-14T17:09:49
| 427,987,947
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,270
|
r
|
DegreeCentrality_DegreePrestige_NodeGregariousness.R
|
# Degree Centrality - Degree Prestige &Node Gregariousness
rm(list = ls())
graphics.off()
library("igraph")
Edges <- read.csv("Dataset1-Media-Example-EDGES.csv")
Nodes <- read.csv("Dataset1-Media-Example-NODES.csv")
NodeCount = nrow(Nodes)
EdgeCount = nrow(Edges)
NodeName = Nodes$media
NodeWeight = c(Nodes$audience.size)
GraphPlot <- vector(mode="character", 2*EdgeCount)
l = 1
for (m in 1:EdgeCount) {
GraphPlot[l] = as.character(Edges$from[m])
GraphPlot[l + 1] = as.character(Edges$to[m])
l = l + 2
}
GGraph = graph(c(GraphPlot))
plot(GGraph, edge.arrow.size = 0.15, edge.color = "blue", vertex.size = 25,
vertex.color = "yellow", vertex.frame.color = "blue", vertex.label.color = "black")
title(main = "Graph_Of_Input")
NodeDegree <- array(0, NodeCount)
# Node_DEgree (Computation)
for (m in 1:NodeCount) {
for (j in 1:EdgeCount) {
if (m == as.numeric(substring(Edges$from[j], 2)) ||
m == as.numeric(substring(Edges$to[j], 2))) {
NodeDegree[m] = NodeDegree[m] + 1 }}}
In_NodeDegree <- array(0, NodeCount)
Out_NodeDegree <- array(0, NodeCount)
# calculate node degree
for (m in 1:NodeCount) {
for (j in 1:EdgeCount) {
if (m == as.numeric(substring(Edges$to[j], 2))) {
In_NodeDegree[m] = In_NodeDegree[m] + 1
}
}
}
Out_NodeDegree = NodeDegree - In_NodeDegree
DegreeCentrality = NodeDegree/(NodeCount-1)
DegreePrestige = In_NodeDegree/(NodeCount-1)
NodeGregariousness = Out_NodeDegree/(NodeCount-1)
cat("\n Degree_Centrality:\n", DegreeCentrality)
cat("\n Degree_Prestige:\n", DegreePrestige)
cat("\n Node_Gregariousness:\n", NodeGregariousness)
plot(GGraph, edge.arrow.size = 0.15, edge.color = "blue", vertex.size = c(NodeDegree*5),
vertex.color = "yellow", vertex.frame.color = "blue", vertex.label.color = "black")
title(main = "Degree_Centrality")
plot(GGraph, edge.arrow.size = 0.15, edge.color = "blue", vertex.size = c(In_NodeDegree*5),
vertex.color = "yellow", vertex.frame.color = "blue", vertex.label.color = "black")
title(main = "Degree_Prestige")
plot(GGraph, edge.arrow.size = 0.15, edge.color = "blue", vertex.size = c(Out_NodeDegree*5),
vertex.color = "yellow", vertex.frame.color = "blue", vertex.label.color = "black")
title(main = "Node_Gregariousness")
|
433524ef3a1b0c44875e5637c4c3c469a7ecbf65
|
51250726e0ce12a81f75572be193d0b6742554cf
|
/man/p_base.Rd
|
9da5d613eaa9e3f11b558b82ae764fe89f679b96
|
[] |
no_license
|
dpastoor/pacman
|
6ead1b9913e7d2a6b018fc2e6390fd2d86ff4673
|
3b4c2c7f47f2d7faf7563f7b76a92953da47f884
|
refs/heads/master
| 2021-01-23T22:01:25.517006
| 2014-11-04T00:00:09
| 2014-11-04T00:00:09
| 26,233,734
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 831
|
rd
|
p_base.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{p_base}
\alias{p_base}
\title{Base Install Packages}
\usage{
p_base(base.only = TRUE, open = FALSE, basemarker = "***")
}
\arguments{
\item{base.only}{logical. If \code{TRUE} a character vector of only base
install packages is returned.}
\item{open}{logical. If \code{TRUE} opens the directory of the base install
packages.}
\item{basemarker}{Character string. The string to append to mark which
packages are part of the default packages.}
}
\description{
List just base packages or list all the packages in the local library and
mark those in a base install.
}
\note{
Packages that are installed when R starts are marked with an asterisk(*).
}
\examples{
\dontrun{
p_base()
p_base(TRUE)
}
}
\seealso{
\code{\link[base]{getOption}}
}
\keyword{base}
\keyword{package}
|
cb3a89c6d0a9c91da0274f7e015816f6cd92906c
|
67a81b3866bcf28cb8471f923004cb53c48990b7
|
/man/scglrTheme.Rd
|
7654e8b51d4f84431ed2bf253eed50d8538d95ab
|
[] |
no_license
|
cran/SCGLR
|
b446421e879170dc7d80fab35f4a73e2fd5b934a
|
f3777ab03ac360c000915777863a0c91aa500e21
|
refs/heads/master
| 2021-01-17T13:03:49.640138
| 2018-09-28T08:30:03
| 2018-09-28T08:30:03
| 17,693,491
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,123
|
rd
|
scglrTheme.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme.r
\name{scglrTheme}
\alias{scglrTheme}
\title{Function that fits the theme model}
\usage{
scglrTheme(formula, data, H, family, size = NULL, weights = NULL,
offset = NULL, subset = NULL, na.action = na.omit, crit = list(),
method = methodSR(), st = FALSE)
}
\arguments{
\item{formula}{an object of class "\code{\link[=multivariateFormula]{MultivariateFormula}}" (or one that can be coerced
to that class): a symbolic description of the model to be fitted. The details
of model specification are given under Details.}
\item{data}{data frame.}
\item{H}{vector of R integer. Number of components to keep for each theme}
\item{family}{a vector of character of the same length as the number of dependent variables:
"bernoulli", "binomial", "poisson" or "gaussian" is allowed.}
\item{size}{describes the number of trials for the binomial dependent variables.
A (number of statistical units * number of binomial dependent variables) matrix is expected.}
\item{weights}{weights on individuals (not available for now)}
\item{offset}{used for the poisson dependent variables.
A vector or a matrix of size: number of observations * number of Poisson dependent variables is expected.}
\item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.}
\item{na.action}{a function which indicates what should happen when the data contain NAs. The default is set to \code{na.omit}.}
\item{crit}{a list of two elements : maxit and tol, describing respectively the maximum number of iterations and
the tolerance convergence criterion for the Fisher scoring algorithm. Default is set to 50 and 10e-6 respectively.}
\item{method}{structural relevance criterion. Object of class "method.SCGLR"
built by \code{\link{methodSR}} for Structural Relevance.}
\item{st}{logical (FALSE) theme build and fit order. TRUE means random, FALSE means sequential (T1, ..., Tr)}
}
\value{
a list of SCGLRTHM class. Each element is a SCGLR object
}
\description{
Calculates the components to predict all the dependent variables.
}
\details{
Models for theme are specified symbolically. A model as the form \code{response ~ terms} where \code{response}
is the numeric response vector and terms is a series of R themes composed of
predictors. Themes are separated by "|" (pipe) and are composed. ... Y1+Y2+...
~ X11+X12+...+X1_ | X21+X22+... | ...+X1_+... | A1+A2+... See \code{\link{multivariateFormula}}.
}
\examples{
\dontrun{
library(SCGLR)
# load sample data
data(genus)
# get variable names from dataset
n <- names(genus)
n <-n[!n\%in\%c("geology","surface","lon","lat","forest","altitude")]
ny <- n[grep("^gen",n)] # Y <- names that begins with "gen"
nx1 <- n[grep("^evi",n)] # X <- remaining names
nx2 <- n[-c(grep("^evi",n),grep("^gen",n))]
form <- multivariateFormula(ny,nx1,nx2,A=c("geology"))
fam <- rep("poisson",length(ny))
testthm <-scglrTheme(form,data=genus,H=c(2,2),family=fam,offset = genus$surface)
plot(testthm)
}
}
|
3e6d8b24b2812da03f4f64189b211a860ae21a20
|
81dc5d5cbbf5335e1d951374c04a73937a3a9766
|
/11_gene_content_analysis/function_make_pcoaTable.R
|
1dde12ffd54b1d31f9d7fef5d421635d577db617
|
[] |
no_license
|
LebeerLab/caseiGroup_mSystems_pipeline
|
19f90be776fd47c98cb561724644560d7169cc77
|
78f013f861d28080d1ed11c4ceb236752381cafe
|
refs/heads/master
| 2021-01-01T18:44:31.406047
| 2018-05-14T09:37:43
| 2018-05-14T09:37:43
| 98,420,988
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 426
|
r
|
function_make_pcoaTable.R
|
make_pcoaTable = function(T_genome_orthogroup) {
T_genome_orthogroup %>%
select(orthogroup, genome, ngenes) %>%
spread(key = orthogroup, value = ngenes, fill = 0) %>%
remove_rownames() %>%
column_to_rownames(var = "genome") %>%
as.matrix() %>%
vegdist(method = "bray") %>%
cmdscale(k = 2) %>%
as.data.frame() %>%
rownames_to_column(var = "genome") %>%
return()
}
|
fec328ea3dbead5215eab08691466bdce6232a95
|
10873a4e41464f753732b28ba9425cda5520f850
|
/emulator/res/045.r
|
d4d82af524aecaae29165e89511e9a365845956b
|
[] |
no_license
|
uatach/mc861-nes
|
3b3e11bb6876ca47b319acd25d7714b7c7bb9069
|
9583086364ab5866c7104408bb154bec6f3e164c
|
refs/heads/master
| 2020-08-02T12:47:28.823972
| 2019-10-04T20:53:15
| 2019-10-04T20:53:15
| 211,356,279
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,816
|
r
|
045.r
|
| pc = 0xc002 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc003 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x0100 | p[NV-BDIZC] = 00110110 |
| pc = 0xc005 | a = 0x00 | x = 0x42 | y = 0x00 | sp = 0x0100 | p[NV-BDIZC] = 00110100 |
| pc = 0xc006 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x0100 | p[NV-BDIZC] = 00110110 |
| pc = 0xc008 | a = 0x00 | x = 0xff | y = 0x00 | sp = 0x0100 | p[NV-BDIZC] = 10110100 |
| pc = 0xc009 | a = 0x00 | x = 0xff | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 10110100 |
| pc = 0xc00b | a = 0x00 | x = 0x42 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110100 |
| pc = 0xc00c | a = 0x00 | x = 0xff | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 10110100 |
| pc = 0xc00e | a = 0x00 | x = 0xff | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110110 |
| pc = 0xc010 | a = 0x00 | x = 0x42 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110100 |
| pc = 0xc011 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110110 |
| pc = 0xc013 | a = 0x80 | x = 0x00 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 10110100 |
| pc = 0xc015 | a = 0x80 | x = 0x42 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110100 |
| pc = 0xc016 | a = 0x80 | x = 0x80 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 10110100 |
| pc = 0xc018 | a = 0x80 | x = 0x00 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110110 |
| pc = 0xc01a | a = 0x42 | x = 0x00 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110100 |
| pc = 0xc01b | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110110 |
| pc = 0xc01d | a = 0x00 | x = 0x80 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 10110100 |
| pc = 0xc01f | a = 0x42 | x = 0x80 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110100 |
| pc = 0xc020 | a = 0x80 | x = 0x80 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 10110100 |
| pc = 0xc022 | a = 0x00 | x = 0x80 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110110 |
| pc = 0xc024 | a = 0x00 | x = 0x80 | y = 0x42 | sp = 0x01ff | p[NV-BDIZC] = 00110100 |
| pc = 0xc025 | a = 0x00 | x = 0x80 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110110 |
| pc = 0xc027 | a = 0x80 | x = 0x80 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 10110100 |
| pc = 0xc029 | a = 0x80 | x = 0x80 | y = 0x42 | sp = 0x01ff | p[NV-BDIZC] = 00110100 |
| pc = 0xc02a | a = 0x80 | x = 0x80 | y = 0x80 | sp = 0x01ff | p[NV-BDIZC] = 10110100 |
| pc = 0xc02c | a = 0x80 | x = 0x80 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110110 |
| pc = 0xc02e | a = 0x42 | x = 0x80 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110100 |
| pc = 0xc02f | a = 0x00 | x = 0x80 | y = 0x00 | sp = 0x01ff | p[NV-BDIZC] = 00110110 |
| pc = 0xc031 | a = 0x00 | x = 0x80 | y = 0x80 | sp = 0x01ff | p[NV-BDIZC] = 10110100 |
| pc = 0xc033 | a = 0x42 | x = 0x80 | y = 0x80 | sp = 0x01ff | p[NV-BDIZC] = 00110100 |
| pc = 0xc034 | a = 0x80 | x = 0x80 | y = 0x80 | sp = 0x01ff | p[NV-BDIZC] = 10110100 |
|
36dd963556895f76d9d8a4b9a1234620b35f84a7
|
675b6e49d198bb4156860333f24b0247d3beed01
|
/server.R
|
01fd80bf426371ca5d34ac907e40bd5f1e937ea8
|
[] |
no_license
|
geohemex/DDP
|
eee0d26625977d50ecc780ac48f9c13574124394
|
2d3d7bc6a5359f1cefc47b986cccdbd9ec033da9
|
refs/heads/master
| 2021-01-10T23:04:30.421013
| 2016-10-09T21:16:48
| 2016-10-09T21:16:48
| 70,425,974
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 728
|
r
|
server.R
|
# library(shiny)
# library(dplyr)
# setwd("C:\\Users\\Geovanni\\Desktop\\Prueba")
# Data<-read.csv("total.csv", skip=1,header=F)
# setwd("C:\\Users\\Geovanni\\Desktop\\Data Products\\Prueba")
# colnames(Data)<-c("Hour", "Node_ID","LMP","Ener_comp","Lo_comp","Cong_comp", "Date", "NADA")
# Data$Node_ID<-NULL
# Data$NADA<-NULL
# Data$Hour<-factor(Data$Hour)
# Data$Date<-as.Date(Data$Date)
function(input, output) {
formulaText <- reactive({
paste(input$variable, "~ Hour" )
})
output$Plot <- renderPlot({
boxplot(as.formula(formulaText()),
data = Data,
outline = input$outliers,
col="red",
xlab="Hours",
ylab="$/MWh")
})
}
|
42a0183e945d5efc2f42a2233b73baf128d3c9c1
|
2fa7055a4bbb879ad8a47440a5311e2cbd58eb8b
|
/IV.Genomic_analysis/Scripts/A.MethRNAseq.R
|
01e2f0dafa2983116d73f3b534a42476826b0de9
|
[
"BSD-3-Clause"
] |
permissive
|
Christensen-Lab-Dartmouth/brca_lowstage_DMGRs
|
d6332bd8a8eee885ed4c5e888aab00269c6577aa
|
1b1a00d00cefea380d2bb1cbd1589e570fd8fdbd
|
refs/heads/master
| 2020-04-06T04:36:32.312410
| 2017-09-14T14:31:05
| 2017-09-14T14:31:05
| 45,754,471
| 4
| 7
| null | 2017-03-10T19:29:38
| 2015-11-07T21:03:23
|
R
|
UTF-8
|
R
| false
| false
| 6,282
|
r
|
A.MethRNAseq.R
|
#####################################################################
# ~~~~~~~~~~~~~~~~~~
# Tumor subtype and cell type independent DNA methylation alterations
# associated with stage progression in invasive breast carcinoma
# ~~~~~~~~~~~~~~~~~~
# Way, G., Johnson, K., Christensen, B. 2015
#
# Examine whether DNA methylation of selected CpGs is associated with
# gene expression
#####################################################################
################################
#Load Libraries and the Plot Function
################################
library(readr)
library(plyr)
source("IV.Genomic_analysis/Scripts/Functions/MethRNAseq_functions.R")
source("III.DMGR_analysis/Scripts/Functions/make_heatmaps.R") #Use the scripts in here to extract the common CpGs
################################
# Load and Subset Data
################################
# Stages and subtypes of interest
low <- c("Stage I", "Stage IA", "Stage IB", "Stage II", "Stage IIA", "Stage IIB")
high <- c("Stage III", "Stage IIIA", "Stage IIIB", "Stage IIIC", "Stage IV")
stages <- list(low = low, high = high)
subtypes <- c("Basal", "Her2", "LumA", "LumB", "Normal")
# Load Betas
Betas <- read_tsv("I.Data_Processing/Data/TCGA_BRCA_Betas.tsv")
rownames(Betas) <- Betas[[1]]
Betas[[1]] <- NULL
Betas <- as.data.frame(Betas)
# Load TCGA BRCA Normal RNAseq Data
NormalRNAseq <- read_tsv("IV.Genomic_analysis/Data/unc.edu_BRCA_IlluminaHiSeq_RNASeqV2.geneExp.whitelist_normal")
rownames(NormalRNAseq) <- NormalRNAseq[[1]]
NormalRNAseq[[1]] <- NULL
NormalRNAseq <- as.data.frame(NormalRNAseq)
NormalRNAseq <- NormalRNAseq[-grep("[?]", laply(rownames(NormalRNAseq),
function (x) {unlist(strsplit(x, "[|]"))[1]})), ]
NormalRNAseq <- NormalRNAseq[-grep("SLC35E2", laply(rownames(NormalRNAseq),
function (x) {unlist(strsplit(x, "[|]"))[1]})), ]
colnames(NormalRNAseq) <- substr(colnames(NormalRNAseq), 1, 15)
colnames(NormalRNAseq) <- gsub("-", ".", colnames(NormalRNAseq))
rownames(NormalRNAseq) <- laply(rownames(NormalRNAseq), function (x) {unlist(strsplit(x, "[|]"))[1]})
# Load annotation file
annotation <- read_csv("I.Data_Processing/Files/HumanMethylation450K_Annotation_File.csv", skip = 7)
annotation <- as.data.frame(annotation)
# Load Covariates
covariates <- read.table("I.Data_Processing/Files/BRCAtarget_covariates.csv", row.names = 1,
header = T, sep = ",", stringsAsFactors = F)
# The colnames for the beta file have an "X" appended to the beginning of each basename, remove it
rownames(covariates) <- covariates$Basename
# Subset the covariate data to only the samples in the beta file and then to only the ones with PAM50 data
covariates <- covariates[intersect(rownames(covariates), colnames(Betas)), ]
covariates <- covariates[covariates$PAM50.RNAseq != "", ]
# Interested in "low" vs "high"
for (i in 1:length(stages)) {
subset <- stages[[i]]
for (j in 1:length(subset)) {
covariates$pathologic_stage[covariates$pathologic_stage == subset[j]] <- names(stages)[i]
}
}
# Make sure the "tumor adjacent" samples are marked in this column
covariates$pathologic_stage[covariates$sample.type == "Solid Tissue Normal"] <- "normal"
# Only accept samples that have high or low assignments
covariates <- covariates[covariates$pathologic_stage == "low" | covariates$pathologic_stage == "high" | covariates$pathologic_stage == "normal",]
# Subset Betas to those samples with PAM50 data and stage of interest
Betas <- Betas[ ,rownames(covariates)]
################################
# Run Function
################################
# Load Common Overlaps
CommonOverlaps <- read.csv("III.DMGR_analysis/Tables/commonLowStageOverlaps_FullAnnotation_extended.csv",
row.names = 1, header = T, stringsAsFactors = F)
# Get all the genes in common to low stage tumors
Genes <- laply(rownames(CommonOverlaps), function(x){unlist(strsplit(x, " "))[1]})
# What are the number of comparisons made here? Bonferroni adjusted p value required.
num_unique_cpgs <- 0
for (gene in 1:length(Genes)) {
CpGs <- unique(ExtractCommonCGs(Genes[gene], CommonOverlaps))
num_cpgs <- length(CpGs)
num_unique_cpgs <- num_unique_cpgs + num_cpgs
}
# 101 Unique CpGs, made for 6 comparisons (5 subtypes + all)
# Bonferroni adjustment should be made for 6 * 101 =
alpha <- 0.05 / (6 * num_unique_cpgs)
# Loop over all genes to output several plots investigating methylation influencing gene expression
significantCor <- c()
#for (gene in 17:length(Genes)) {
for (gene in 1:length(Genes)) {
# Extract the CGs associated with a specific gene
#CpGs <- unique(ExtractCommonCGs(Genes[gene], CommonOverlaps))
CpGs <- unique(ExtractCommonCGs(rownames(CommonOverlaps)[gene], CommonOverlaps))
for (i in 1:length(CpGs)) {
# Create and save all of the plots for each combination of CpGs and Genes
png(paste("IV.Genomic_analysis/Figures/GeneExprs/", Genes[gene], "_", CpGs[i], ".png", sep = ""),
height = 400, width = 400)
corTable <- methSeqPlot(gene = Genes[gene], betas = Betas, cg = CpGs[i], covariates = covariates,
method = 'spearman', stages = "low", subtypes = subtypes,
normalExprs = NormalRNAseq)
dev.off()
# Output the Correlation Analysis to File as well
write.table(corTable, paste("IV.Genomic_analysis/Tables/GeneExprs/", Genes[gene], "_", CpGs[i], ".csv", sep = ""),
row.names = T, col.names = NA, sep = ",")
# Test if there are any siginificant findings
if (length(corTable[corTable[ , 3] <= alpha, ]) > 0 | length(corTable[corTable[ , 4] <= alpha, ]) > 0) {
for (sig_row in 1:nrow(corTable)) {
sigHit <- paste("IV.Genomic_analysis/Tables/GeneExprs/", Genes[gene], "_", CpGs[i], ".csv", sep = "")
if (corTable[sig_row, 3] <= 0.05 | corTable[sig_row, 4] <= 0.05 ) {
hitinclude <- c(sigHit, corTable[sig_row, ], rownames(corTable)[sig_row])
significantCor <- rbind(significantCor, hitinclude)
}
}
}
}
}
|
01e067d59bcbdc7b82ba2b23010da239e123d48e
|
cb325ebdbfcc6ae43fd4d6e916acb2a90720d812
|
/man/draw_plots.Rd
|
dc618185f24d0c9d8adc6baa0094a6b4f8c81d2b
|
[] |
no_license
|
resplab/voiPeermodels
|
798a3096b3cd495bc789bb206a0ff01343b9d826
|
2b9c04b2ebcc9d40a51ed52fe1ba5f335261f64d
|
refs/heads/master
| 2023-07-14T13:55:57.830478
| 2021-08-17T16:44:02
| 2021-08-17T16:44:02
| 394,715,302
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 394
|
rd
|
draw_plots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1client_lib.R
\name{draw_plots}
\alias{draw_plots}
\title{draws plots generated by the model in R Session}
\usage{
draw_plots(plot_number = NULL)
}
\arguments{
\item{plot_number}{the number of the plot to be rendered}
}
\value{
graphical object in R
}
\description{
draws plots generated by the model in R Session
}
|
1ba67629a681d77aa7edd7f177600c0b023a156f
|
6e32987e92e9074939fea0d76f103b6a29df7f1f
|
/googleidentitytoolkitv2.auto/man/GoogleCloudIdentitytoolkitV2MfaTotpSignInRequestInfo.Rd
|
fbe756b6526d6ff361193d76523a551c4675b49a
|
[] |
no_license
|
justinjm/autoGoogleAPI
|
a8158acd9d5fa33eeafd9150079f66e7ae5f0668
|
6a26a543271916329606e5dbd42d11d8a1602aca
|
refs/heads/master
| 2023-09-03T02:00:51.433755
| 2023-08-09T21:29:35
| 2023-08-09T21:29:35
| 183,957,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 803
|
rd
|
GoogleCloudIdentitytoolkitV2MfaTotpSignInRequestInfo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/identitytoolkit_objects.R
\name{GoogleCloudIdentitytoolkitV2MfaTotpSignInRequestInfo}
\alias{GoogleCloudIdentitytoolkitV2MfaTotpSignInRequestInfo}
\title{GoogleCloudIdentitytoolkitV2MfaTotpSignInRequestInfo Object}
\usage{
GoogleCloudIdentitytoolkitV2MfaTotpSignInRequestInfo(verificationCode = NULL)
}
\arguments{
\item{verificationCode}{User-entered verification code}
}
\value{
GoogleCloudIdentitytoolkitV2MfaTotpSignInRequestInfo object
}
\description{
GoogleCloudIdentitytoolkitV2MfaTotpSignInRequestInfo Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
TOTP verification info for FinalizeMfaSignInRequest.
}
\concept{GoogleCloudIdentitytoolkitV2MfaTotpSignInRequestInfo functions}
|
1fc767c9baa36b7c47cc66e6c8311e81c0dbc463
|
9ba2359e8c4217607ba39d176fc841a5c01351af
|
/tests/testthat/test_Mbin_noint.R
|
399b3a0e8c4720b51e120eb812b16082a148944b
|
[] |
no_license
|
shiandy/causalMediation
|
69e011cdc292db95d256850d24aa26cf169b96c7
|
2c301a871d64333d67e001c2f2990f7bdb61597c
|
refs/heads/master
| 2021-01-22T01:58:27.284583
| 2017-01-15T05:44:26
| 2017-01-15T05:44:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,946
|
r
|
test_Mbin_noint.R
|
#
# # df <- read.csv("data/Mbin_noint_data.txt", sep = " ")
# #df <- read.csv("data/Mbin_noint_data_10000.txt", sep = " ")
#
# df <- read.csv("Mbin_noint_data_10000.txt", sep = " ")
#
# ##----- Bootstrap - Y_cont_noint
#
# set.seed(1234)
# s_boot_Y_cont_noint_M_bin <- causalMediation(data = df,
# outcome = "Y_cont_noint",
# treatment = 'A',
# mediator = 'M_bin',
# covariates = "C",
# interaction = FALSE,
# yreg = "linear", mreg = "logistic",
# boot = TRUE, nboot = 500)
# s_boot_Y_cont_noint_M_bin
# sink(file = "s_boot_Y_cont_noint_M_bin.txt")
# s_boot_Y_cont_noint_M_bin
# sink()
#
# ##----- Delta - Y_cont_noint
#
# s_delta_Y_cont_noint_M_bin <- causalMediation(data = df,
# outcome = "Y_cont_noint",
# treatment = 'A',
# mediator = 'M_bin',
# covariates = "C",
# interaction = FALSE,
# yreg = "linear", mreg = "logistic",
# boot = FALSE)
# s_delta_Y_cont_noint_M_bin
# sink(file = "s_delta_Y_cont_noint_M_bin.txt")
# s_delta_Y_cont_noint_M_bin
# sink()
#
# ##----- Bootstrap - Y_bin_noint
#
# set.seed(1234)
# s_boot_Y_bin_noint_M_bin <- causalMediation(data = df,
# outcome = "Y_bin_noint",
# treatment = 'A',
# mediator = 'M_bin',
# covariates = "C",
# interaction = FALSE,
# yreg = "logistic", mreg = "logistic",
# boot = TRUE, nboot = 500)
# s_boot_Y_bin_noint_M_bin
# sink(file = "s_boot_Y_bin_noint_M_bin.txt")
# s_boot_Y_bin_noint_M_bin
# sink()
#
# ##----- Delta - Y_bin_noint
#
# s_delta_Y_bin_noint_M_bin <- causalMediation(data = df,
# outcome = "Y_bin_noint",
# treatment = 'A',
# mediator = 'M_bin',
# covariates = "C",
# interaction = FALSE,
# yreg = "logistic", mreg = "logistic",
# boot = FALSE)
# s_delta_Y_bin_noint_M_bin
# sink(file = "s_delta_Y_bin_noint_M_bin.txt")
# s_delta_Y_bin_noint_M_bin
# sink()
|
836265536d19fcf7a7b7a6e48a24f30af7bc0a54
|
a71b7fe35d652d86f136823cd1801eb51d902839
|
/glucose.R
|
342ceafe3efb85bb73b6bd56cfe61b52944f8feb
|
[] |
no_license
|
StaThin/data
|
9efd602022db768b927c3338e5ce7483f57e3469
|
d7f6c6b5d4df140527c269b032bb3b0be45ceeeb
|
refs/heads/master
| 2023-03-29T18:40:09.694794
| 2023-03-15T09:32:42
| 2023-03-15T09:32:42
| 29,299,462
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,126
|
r
|
glucose.R
|
"glucose" <-
structure(list(Y = c(6.8, 9.9, 8.3, 10.9, 7, 8.8, 11.5, 9.9,
9.5, 7, 13.5, 7.4, 10.8, 10.2, 6.2, 6.8, 8.3, 8.3, 8.3, 13.2,
9, 12.9, 10, 6.9, 11.5, 10.2, 9.1, 9.7, 9.2, 8.8, 7.3, 5.9, 7.4,
8.9, 6.4, 10.4, 9, 14.1, 11.7, 10.5, 9.2, 11.2, 10, 10.8, 7.6,
8, 9.2, 7.4, 6.8, 8.5, 11, 11, 5.4, 8.7, 13, 10.2, 8.5, 11.3,
13.6, 8, 8.7, 8.6, 11.7, 10.7, 8.5, 8.4, 7.8, 10.1), X = c(35,
38, 42, 44, 38, 41, 40, 37, 30, 41, 35, 36, 34, 25, 41, 39, 40,
35, 44, 11, 27, 16, 41, 39, 34, 42, 28, 27, 24, 44, 42, 42, 40,
40, 37, 26, 30, 32, 42, 42, 34, 41, 24, 21, 31, 42, 28, 26, 42,
44, 42, 37, 38, 35, 34, 26, 38, 35, 34, 35, 42, 46, 45, 26, 37,
33, 39, 31), Z = c(22, 28, 20, 15, 15, 28, 12, 21, 23, 19, 21,
18, 15, 24, 18, 19, 14, 17, 8, 21, 33, 28, 15, 22, 17, 17, 21,
30, 22, 11, 17, 12, 15, 27, 14, 31, 28, 15, 14, 15, 14, 21, 13,
25, 21, 16, 23, 17, 16, 14, 14, 12, 13, 23, 18, 20, 16, 25, 20,
19, 20, 10, 18, 22, 22, 11, 25, 24), U = c(32, 30, 20, 22, 25,
35, 39, 24, 31, 26, 24, 29, 27, 31, 17, 28, 22, 25, 14, 18, 30,
26, 22, 25, 21, 25, 29, 42, 17, 11, 25, 31, 14, 30, 29, 29, 28,
27, 23, 22, 13, 24, 33, 34, 21, 23, 28, 22, 32, 18, 8, 14, 17,
34, 20, 30, 13, 29, 24, 24, 17, 8, 17, 24, 27, 18, 18, 31), V = c(35,
37, 38, 44, 44, 48, 42, 42, 43, 45, 45, 44, 48, 41, 47, 37, 44,
44, 48, 45, 27, 37, 40, 38, 42, 38, 44, 46, 46, 46, 47, 46, 47,
44, 34, 43, 43, 47, 40, 44, 45, 38, 39, 38, 39, 34, 35, 38, 43,
41, 45, 48, 40, 27, 37, 42, 45, 39, 38, 40, 35, 47, 38, 37, 38,
39, 36, 47), W = c(3.08333333333, 8.75, 5, 15.4166666667, 5,
17, 0.666666666667, 0.75, 17, 5, 0.0833333333333, 20, 0.416666666667,
12.0833333333, 1.5, 8, 1.33333333333, 20, 1.5, 8, 11.6666666667,
7, 0, 10, 11.25, 10, 24, 20, 18, 5, 17.5, 0.666666666667, 16.25,
21.25, 0.416666666667, 6.25, 20, 5, 7, 9, 20, 11.6666666667,
16.6666666667, 2.08333333333, 15, 14.1666666667, 6.25, 9.16666666667,
18, 15, 14.1666666667, 5, 10, 7.08333333333, 0.25, 10, 7.08333333333,
12.0833333333, 9.16666666667, 10, 8.33333333333, 0.166666666667,
18.3333333333, 10, 22.5, 20.8333333333, 20, 21.6666666667), A = structure(c(2,
2, 2, 1, 2, 2, 1, 1, 1, 2, 1, 2, 1, 2, 2, 1, 2, 1, 2, 1, 1, 1,
2, 1, 2, 2, 1, 1, 1, 2, 1, 2, 1, 1, 2, 1, 1, 1, 1, 2, 2, 1, 1,
1, 1, 1, 1, 2, 1, 2, 1, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1,
1, 1, 2, 1), .Label = c("1", "2"), class = "factor"), B = structure(c(2,
1, 2, 1, 1, 1, 2, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 1, 2, 2,
1, 2, 1, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1,
1, 2, 2, 2, 2, 2, 1, 2, 1, 1, 2, 2, 1, 2, 2, 2, 1, 1, 1, 2, 1,
2, 2, 2, 2), .Label = c("1", "2"), class = "factor")), .Names = c("Y",
"X", "Z", "U", "V", "W", "A", "B"), class = "data.frame", row.names = c("1",
"2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13",
"14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24",
"25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35",
"36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46",
"47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57",
"58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68"
))
|
88b084f138587c51497799b39dee1adea3cf9edb
|
989ad7379c164a5a3b42fbc31c4d1915a1e9595a
|
/prog/plot.R
|
a2c46e14b4ccbeb963ad226ff8be1c15cd01d57b
|
[
"Apache-2.0"
] |
permissive
|
kenoshiro/AIM-CCU
|
6d7f42885333c58277824c9037c2cfb7eeceadba
|
1ffc4f6b378cd1c4b25848d1e1a715e844564b78
|
refs/heads/main
| 2023-08-03T10:52:22.303131
| 2023-07-22T07:03:32
| 2023-07-22T07:03:32
| 518,424,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 113,272
|
r
|
plot.R
|
mytheme <- list()
mytheme$set1 <- theme_bw()+theme(plot.title=element_text(size=9.5),
panel.grid=element_blank(),
panel.border=element_blank(),
axis.line=element_line(color='black',size=.3),
axis.ticks=element_line(color='black',size=.3),
axis.text=element_text(size=9),
axis.title=element_text(size=10),
legend.title=element_text(size=10))
df$scen_lab <- tribble(~Scenario,~scen_lab,~scen_wrap,
'NoPOL','NoPOL','NoPOL',
'500C-CCU','1.5C-CCU','1.5C-CCU',
'500C-DEC','1.5C-DEC','1.5C-DEC',
'500C-CDR','1.5C-CDR','1.5C-CDR',
'500C-CCU-Adv','1.5C-CCU-AdvTech','1.5C-CCU\n-AdvTech',
'500C-DEC-Adv','1.5C-DEC-AdvTech','1.5C-DEC\n-AdvTech',
'500C-CDR-Adv','1.5C-CDR-AdvTech','1.5C-CDR\n-AdvTech',
'500C-CCU-Conv','1.5C-CCU-ConvTech','1.5C-CCU\n-ConvTech',
'500C-DEC-Conv','1.5C-DEC-ConvTech','1.5C-DEC\n-ConvTech',
'500C-CDR-Conv','1.5C-CDR-ConvTech','1.5C-CDR\n-ConvTech',
'500C-CCU-LimElecP','1.5C-CCU-LimElec+','1.5C-CCU\n-LimElec+',
'500C-CCU-LimElecM','1.5C-CCU-LimElec-','1.5C-CCU\n-LimElec-',
'500C-CCU-LimElecS','1.5C-CCU-LimElec--','1.5C-CCU\n-LimElec--',
'1000C-CCU','WB2C-CCU','WB2C-CCU',
'1000C-DEC','WB2C-DEC','WB2C-DEC',
'1000C-CDR','WB2C-CDR','WB2C-CDR')
df$scen_sens_cat <- tribble(~Scenario,~scen_sens_base,~scen_sens_var,
'1000C-CCU','WB2C-CCU','Default',
'1000C-DEC','WB2C-DEC','Default',
'1000C-CDR','WB2C-CDR','Default',
'500C-CCU','1.5C-CCU','Default',
'500C-DEC','1.5C-DEC','Default',
'500C-CDR','1.5C-CDR','Default',
'500C-CCU-Adv','1.5C-CCU','AdvTech',
'500C-DEC-Adv','1.5C-DEC','AdvTech',
'500C-CDR-Adv','1.5C-CDR','AdvTech',
'500C-CCU-Conv','1.5C-CCU','ConvTech',
'500C-DEC-Conv','1.5C-DEC','ConvTech',
'500C-CDR-Conv','1.5C-CDR','ConvTech',
'500C-CCU-LimElecP','1.5C-CCU','LimElec+',
'500C-CCU-LimElecM','1.5C-CCU','LimElec-',
'500C-CCU-LimElecS','1.5C-CCU','LimElec--')
lst$scen_rep <- c('500C-CCU','500C-DEC','500C-CDR')
lst$scen_500all <- c('500C-CCU','500C-DEC','500C-CDR',
'500C-CCU-Adv','500C-DEC-Adv','500C-CDR-Adv',
'500C-CCU-Conv','500C-DEC-Conv','500C-CDR-Conv',
'500C-CCU-LimElecP','500C-CCU-LimElecM','500C-CCU-LimElecS')
lst$scen_cat <- c('1.5C-CCU','1.5C-DEC','1.5C-CDR','WB2C-CCU','WB2C-DEC','WB2C-CDR')
lst$scen_sens <- c('Default','AdvTech','ConvTech')
lst$scen_sens_all <- c('Default','AdvTech','ConvTech','LimElec+','LimElec-','LimElec--')
lst$scen_col <- c('1.5C-CCU'='#D55E00','1.5C-DEC'='#56B4E9','1.5C-CDR'='#E69F00')
lst$scen_col_all <- c('1.5C-CCU'='#D55E00','1.5C-DEC'='#56B4E9','1.5C-CDR'='#E69F00',
'WB2C-CCU'='#CC79a7','WB2C-DEC'='#0072B2','WB2C-CDR'='#009E73')
lst$lin_scen <- c('Default'='solid','AdvTech'='blank','ConvTech'='blank','LimElec+'='blank','LimElec-'='blank','LimElec--'='blank')
lst$IMP_all <- c('GS','Neg','Ren','LD','SP','Neg-2.0','Ren-2.0','ModAct','CurPol')
lst$IMP_main <- c('GS','Neg','Ren','LD','SP','Neg-2.0','Ren-2.0')
lst$IMP_main_shp <- c('GS'=8,'Neg'=9,'Ren'=3,'LD'=11,'SP'=4,'Neg-2.0'=10,'Ren-2.0'=12)
lst$IMP_cat_shp <- c('C1'=1,'C2'=2,'C3'=0,lst$IMP_main_shp)
df$scen_sens_shape <- tribble(~scen_sens_var,~Shape,
'Default',21,
'AdvTech',23,
'ConvTech',25,
'LimElec+',22,
'LimElec-',24,
'LimElec--',7)
df$R5map <- tribble(~Region,~R5lab,
'R5ASIA','Asia',
'R5LAM','Latin\nAmerica',
'R5MAF','Middle East\n& Africa',
'R5OECD90+EU','OECD & EU',
'R5REF','Reforming\nEconomies')
p$l_rangeleg <- tribble(~y,~y2,~label,
-.1,0,'Min',
1.1,1,'Max',
.5,.5,'Median',
.2,.2,'10th percentile',
.8,.8,'90th percentile') %>%
mutate(x=0) %>%
ggplot()+
geom_crossbar(aes(x=x),ymin=.2,ymax=.8,y=.5,width=.25,color='white',fill='grey')+
geom_crossbar(aes(x=x),ymin=0,ymax=1,y=0,width=.25,color='grey',fill='transparent',fatten=0)+
geom_point(aes(y=y2),x=.18,shape=95,color='grey',size=1.5)+
geom_text(aes(y=y,label=label),x=.22,hjust=0,size=3)+
labs(title=' AR6 range')+
xlim(-.2,1.6)+ylim(-.2,1.3)+
theme_void()+theme(plot.margin=unit(c(1,0,0,3),'mm'),plot.title=element_text(size=10))
# Fig.1 -----------------------------------------------------------
df$var <- tribble(~Variable,~Legend,~Color,
'Fin_Ene_SolidsCoa','Coal','grey70',
'Fin_Ene_Liq_Oil_and_Nat_Gas','Liquids-fossil','sandybrown',
'Fin_Ene_Liq_Hyd_syn','Liquids-synfuel','orchid',
'Fin_Ene_Gas_Fos','Gases-fossil','moccasin',
'Fin_Ene_Gas_Hyd_syn','Gases-synfuel','orchid1',
'Fin_Ene_Liq_and_Sol_Bio','Biomass','darkolivegreen2',
'Fin_Ene_Ele','Electricity','lightsteelblue',
'Fin_Ene_Heat','Heat','salmon',
'Fin_Ene_Hyd','Hydrogen','thistle2',
'Fin_Ene_Oth_inc_Solarand_Geo','Other','grey90')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
df$var2 <- tribble(~Variable,~Legend,~Color,~Shape,
'Fin_Ene_Share_Ele','Electricity share','blue',21,
'Fin_Ene_Share_Syn_Hyd','Synfuels share','purple',23,
'Fin_Ene_Share_Hyd_Car','Hydrocarbon share','orange',24)
lst$leg2 <- as.character(df$var2$Legend); names(lst$leg2) <- as.character(df$var2$Variable)
lst$col2 <- as.character(df$var2$Color); names(lst$col2) <- as.character(df$var2$Variable)
lst$shp2 <- as.numeric(df$var2$Shape); names(lst$shp2) <- as.character(df$var2$Variable)
lst$finene_max <- df$all %>%
filter(Variable=='Fin_Ene',Scenario%in%lst$scen_rep,Year==2050,Region=='World') %>%
mutate(Value=ceiling(Value/10)*10) %>% .$Value %>% max()
df$tmp <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable%in%c(df$var$Variable,df$var2$Variable)) %>%
left_join(df$var,by='Variable') %>%
left_join(df$var2,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(Variable=factor(Variable,levels=rev(c(df$var$Variable,df$var2$Variable))),scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab))
df$subaxis <- df$tmp %>% filter(Variable%in%df$var2$Variable) %>%
mutate(Value=Value*lst$finene_max)
p$fec2 <- df$tmp %>% filter(Region=='World') %>%
filter(Variable%in%df$var$Variable) %>%
ggplot()+
geom_area(aes(x=Year,y=Value,fill=Variable),stat='identity',position='stack',show.legend=T)+
geom_path(data=df$subaxis,aes(x=Year,y=Value,color=Variable),show.legend=T)+
geom_point(data=df$subaxis,aes(x=Year,y=Value,color=Variable,shape=Variable),fill='white',show.legend=T)+
labs(x=NULL,y=expression(paste('Final energy (EJ ',yr^{-1},')')))+
facet_grid(.~scen_lab)+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_y_continuous(limits=c(0,NA),sec.axis=sec_axis(~./lst$finene_max*100,labels=function(x){paste0(sprintf('%.0f',x),'%')},name='Final energy share (%)'))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)+
scale_color_manual(values=lst$col2,labels=lst$leg2,name=NULL)+
scale_shape_manual(values=lst$shp2,labels=lst$leg2,name=NULL)+
guides(fill=guide_legend(title='Demand (left-axis)',ncol=1,override.aes=list(linetype=NULL,shape=NULL,color='transparent')),
color=guide_legend(title='Share (right-axis)'),
shape=guide_legend(title='Share (right-axis)',override.aes=list(fill='white')))
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Fin_Ene_Share_Hyd_Car',Year=='2050',Category%in%c('C1','C2','C3'),IMP_marker%in%lst$IMP_main) %>%
mutate(IMP_marker=factor(IMP_marker,levels=lst$IMP_main))
p$fec_hydcar_AR6 <- df$load_AR6_global %>%
filter(Variable=='Fin_Ene_Share_Hyd_Car',Year=='2050',Category%in%c('C1','C2','C3')) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n)),y=0,size=2)+
geom_text(x=.7,label='Hydrocarbon\nshare',y=.9,size=2.8,hjust=0,color='grey50')+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,color='orange')+
scale_y_continuous(limits=c(0,1),sec.axis=sec_axis(~.,labels=scales::percent_format(accuracy=1),name='Final energy share (%)'))+
labs(x=NULL,y=NULL)+
mytheme$set1+theme(axis.text.y.left=element_blank(),axis.line.y.left=element_blank(),axis.ticks.y.left=element_blank())+
scale_shape_manual(values=lst$IMP_main_shp)+
guides(shape=guide_legend(title='AR6 IMPs'))
lst$fin_ene_sec_max <- df$all %>%
filter(Region=='World',Scenario%in%lst$scen_rep,Variable%in%c('Fin_Ene_Ind','Fin_Ene_Res_and_Com','Fin_Ene_Tra'),Year==2050) %>%
mutate(Value=ceiling(Value/10)*10) %>% .$Value %>% max()
df$var <- tribble(~Variable,~Sector,~Carrier,
'Fin_Ene_Ind_SolidsCoa','Industry','Coal',
'Fin_Ene_Ind_Liq_Oil','Industry','Oil',
'Fin_Ene_Ind_Liq_Hyd_syn','Industry','Synfuel-liquids',
'Fin_Ene_Ind_Gas_Fos','Industry','Gas',
'Fin_Ene_Ind_Gas_Hyd_syn','Industry','Synfuel-gases',
'Fin_Ene_Ind_Liq_and_Sol_Bio','Industry','Biomass',
'Fin_Ene_Ind_Heat','Industry','Heat',
'Fin_Ene_Ind_Ele','Industry','Electricity',
'Fin_Ene_Ind_Hyd','Industry','Hydrogen',
'Fin_Ene_Ind_Oth','Industry','Other',
'Fin_Ene_Res_and_Com_SolidsCoa','Buildings','Coal',
'Fin_Ene_Res_and_Com_Liq_Oil','Buildings','Oil',
'Fin_Ene_Res_and_Com_Liq_Hyd_syn','Buildings','Synfuel-liquids',
'Fin_Ene_Res_and_Com_Gas_Fos','Buildings','Gas',
'Fin_Ene_Res_and_Com_Gas_Hyd_syn','Buildings','Synfuel-gases',
'Fin_Ene_Res_and_Com_SolidsBio','Buildings','Biomass',
'Fin_Ene_Res_and_Com_Heat','Buildings','Heat',
'Fin_Ene_Res_and_Com_Ele','Buildings','Electricity',
'Fin_Ene_Res_and_Com_Hyd','Buildings','Hydrogen',
'Fin_Ene_Res_and_Com_Oth','Buildings','Other',
'Fin_Ene_Tra_Liq_Coa','Transport','Coal',
'Fin_Ene_Tra_Liq_Oil','Transport','Oil',
'Fin_Ene_Tra_Liq_Nat_Gas','Transport','Gas',
'Fin_Ene_Tra_Liq_Hyd_syn','Transport','Synfuel-liquids',
'Fin_Ene_Tra_Gas','Transport','Gas',
'Fin_Ene_Tra_Liq_Bio','Transport','Biomass',
'Fin_Ene_Tra_Ele','Transport','Electricity',
'Fin_Ene_Tra_Hyd','Transport','Hydrogen',
'Fin_Ene_Tra_Oth','Transport','Other')
df$car <- tribble(~Carrier,~Color,
'Oil','sandybrown',
'Coal','grey70',
'Gas','moccasin',
'Biomass','darkolivegreen2',
'Heat','salmon',
'Electricity','lightsteelblue',
'Hydrogen','thistle2',
'Synfuel-liquids','orchid',
'Synfuel-gases','orchid1',
'Other','grey90')
lst$leg <- as.character(df$car$Carrier); names(lst$leg) <- as.character(df$car$Carrier)
lst$col <- as.character(df$car$Color); names(lst$col) <- as.character(df$car$Carrier)
p$fin_sec <- df$all %>% filter(Region=='World',Year==2050) %>%
filter(Variable%in%df$var$Variable) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(Carrier=factor(Carrier,levels=rev(unique(df$var$Carrier))),scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
Sector=factor(Sector,levels=unique(df$var$Sector))) %>%
ggplot()+
geom_bar(aes(x=scen_lab,y=Value,fill=Carrier),stat='identity',position='stack',show.legend=T)+
facet_grid(.~Sector)+
labs(title=NULL,x=NULL,y=expression(paste('Final energy (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
guides(fill=guide_legend(ncol=1))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)
df$var2 <- tribble(~Variable,~Sector,~Carrier,
'Fin_Ene_Ind_Share_Ele','Industry','Electricity share',
'Fin_Ene_Ind_Share_Syn_Hyd','Industry','Synfuels share',
'Fin_Ene_Ind_Share_Hyd_Car','Industry','Hydrocarbon share',
'Fin_Ene_Res_and_Com_Share_Ele','Buildings','Electricity share',
'Fin_Ene_Res_and_Com_Share_Syn_Hyd','Buildings','Synfuels share',
'Fin_Ene_Res_and_Com_Share_Hyd_Car','Buildings','Hydrocarbon share',
'Fin_Ene_Tra_Share_Ele','Transport','Electricity share',
'Fin_Ene_Tra_Share_Syn_Hyd','Transport','Synfuels share',
'Fin_Ene_Tra_Share_Hyd_Car','Transport','Hydrocarbon share')
df$car2 <- tribble(~Carrier,~Color,~Shape,
'Electricity share','blue',21,
'Synfuels share','purple',23,
'Hydrocarbon share','orange',24)
lst$leg2 <- as.character(df$car2$Carrier); names(lst$leg2) <- as.character(df$car2$Carrier)
lst$col2 <- as.character(df$car2$Color); names(lst$col2) <- as.character(df$car2$Carrier)
lst$shp2 <- as.numeric(df$car2$Shape); names(lst$shp2) <- as.character(df$car2$Carrier)
df$tmp <- df$all %>% filter(Region=='World') %>%
filter(Variable%in%c(df$var$Variable,df$var2$Variable)) %>%
inner_join(bind_rows(df$var,df$var2),by=c('Variable')) %>%
inner_join(df$scen_lab,by='Scenario') %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(Variable=factor(Variable,levels=rev(c(df$var$Variable,df$var2$Variable))),Sector=factor(Sector,levels=unique(df$var$Sector)),
Carrier=factor(Carrier,levels=rev(unique(c(df$var$Carrier,df$var2$Carrier)))),scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab))
df$subaxis <- df$tmp %>% filter(Variable%in%df$var2$Variable,Year==2050) %>%
mutate(Value=Value*lst$fin_ene_sec_max)
p$fec_sec2 <- df$tmp %>% filter(Region=='World',Year==2050) %>%
filter(Variable%in%df$var$Variable) %>%
ggplot()+
geom_bar(aes(x=scen_lab,y=Value,fill=Carrier),stat='identity',position='stack',show.legend=T)+
geom_point(data=df$subaxis,aes(x=scen_lab,y=Value,color=Carrier,shape=Carrier),fill='white',show.legend=T)+
facet_grid(.~Sector)+
labs(title=NULL,x=NULL,y=expression(paste('Final energy (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_y_continuous(limits=c(0,lst$fin_ene_sec_max),sec.axis=sec_axis(~./lst$fin_ene_sec_max*100,labels=function(x){paste0(sprintf('%.0f',x),'%')},name='Final energy share (%)'))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)+
scale_color_manual(values=lst$col2,labels=lst$leg2,name=NULL)+
scale_shape_manual(values=lst$shp2,labels=lst$leg2,name=NULL)+
guides(fill=guide_legend(title='Demand (left-axis)',ncol=1,override.aes=list(linetype=NULL,shape=NULL,color='transparent')),
color=guide_legend(title='Share (right-axis)',override.aes=list(fill='transparent')),
shape=guide_legend(title='Share (right-axis)',override.aes=list(fill='transparent')))
p$fec_sec2_ind <- df$tmp %>% filter(Region=='World',Year==2050) %>%
filter(Variable%in%df$var$Variable) %>%
filter(Sector=='Industry') %>%
ggplot()+
geom_bar(aes(x=scen_lab,y=Value,fill=Carrier),stat='identity',position='stack',show.legend=T)+
geom_point(data=df$subaxis %>% filter(Sector=='Industry'),
aes(x=scen_lab,y=Value,color=Carrier,shape=Carrier),fill='white',show.legend=T)+
labs(title='Industry',x=NULL,y=expression(paste('Final energy (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_y_continuous(limits=c(0,lst$fin_ene_sec_max),sec.axis=sec_axis(~./lst$fin_ene_sec_max*100,labels=function(x){paste0(sprintf('%.0f',x),'%')},name='Final energy share (%)'))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)+
scale_color_manual(values=lst$col2,labels=lst$leg2,name=NULL)+
scale_shape_manual(values=lst$shp2,labels=lst$leg2,name=NULL)+
guides(fill=guide_legend(title='Demand (left-axis)',ncol=1,override.aes=list(linetype=NULL,shape=NULL,color='transparent')),
color=guide_legend(title='Share (right-axis)',override.aes=list(fill='transparent')),
shape=guide_legend(title='Share (right-axis)',override.aes=list(fill='transparent')))
p$fec_sec2_bui <- df$tmp %>% filter(Region=='World',Year==2050) %>%
filter(Variable%in%df$var$Variable) %>%
filter(Sector=='Buildings') %>%
ggplot()+
geom_bar(aes(x=scen_lab,y=Value,fill=Carrier),stat='identity',position='stack',show.legend=T)+
geom_point(data=df$subaxis %>% filter(Sector=='Buildings'),
aes(x=scen_lab,y=Value,color=Carrier,shape=Carrier),fill='white',show.legend=T)+
labs(title='Buildings',x=NULL,y=expression(paste('Final energy (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_y_continuous(limits=c(0,lst$fin_ene_sec_max),sec.axis=sec_axis(~./lst$fin_ene_sec_max*100,labels=function(x){paste0(sprintf('%.0f',x),'%')},name='Final energy share (%)'))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)+
scale_color_manual(values=lst$col2,labels=lst$leg2,name=NULL)+
scale_shape_manual(values=lst$shp2,labels=lst$leg2,name=NULL)+
guides(fill=guide_legend(title='Demand (left-axis)',ncol=1,override.aes=list(linetype=NULL,shape=NULL,color='transparent')),
color=guide_legend(title='Share (right-axis)',override.aes=list(fill='transparent')),
shape=guide_legend(title='Share (right-axis)',override.aes=list(fill='transparent')))
p$fec_sec2_tra <- df$tmp %>% filter(Region=='World',Year==2050) %>%
filter(Variable%in%df$var$Variable) %>%
filter(Sector=='Transport') %>%
ggplot()+
geom_bar(aes(x=scen_lab,y=Value,fill=Carrier),stat='identity',position='stack',show.legend=T)+
geom_point(data=df$subaxis %>% filter(Sector=='Transport'),
aes(x=scen_lab,y=Value,color=Carrier,shape=Carrier),fill='white',show.legend=T)+
labs(title='Transport',x=NULL,y=expression(paste('Final energy (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_y_continuous(limits=c(0,lst$fin_ene_sec_max),sec.axis=sec_axis(~./lst$fin_ene_sec_max*100,labels=function(x){paste0(sprintf('%.0f',x),'%')},name='Final energy share (%)'))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)+
scale_color_manual(values=lst$col2,labels=lst$leg2,name=NULL)+
scale_shape_manual(values=lst$shp2,labels=lst$leg2,name=NULL)+
guides(fill=guide_legend(title='Demand (left-axis)',ncol=1,override.aes=list(linetype=NULL,shape=NULL,color='transparent')),
color=guide_legend(title='Share (right-axis)',override.aes=list(fill='transparent')),
shape=guide_legend(title='Share (right-axis)',override.aes=list(fill='transparent')))
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Fin_Ene_Ind_Share_Hyd_Car',Year=='2050',Category%in%c('C1','C2','C3'),IMP_marker%in%lst$IMP_main) %>%
mutate(IMP_marker=factor(IMP_marker,levels=lst$IMP_main))
p$fec_sec_hydcar_ind_AR6 <- df$load_AR6_global %>%
filter(Variable=='Fin_Ene_Ind_Share_Hyd_Car',Year=='2050',Category%in%c('C1','C2','C3')) %>%
mutate(Value=pmin(Value,1)) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n),y=p100),size=2,angle=90,hjust=-.2,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,color='orange',show.legend=F)+
scale_y_continuous(limits=c(0,1),sec.axis=sec_axis(~.,labels=scales::percent_format(accuracy=1),name='Final energy share (%)'))+
labs(x=NULL,y=NULL)+
mytheme$set1+theme(axis.text.x=element_text(angle=90,hjust=-.2,vjust=.5),
axis.text.y.left=element_blank(),axis.line.y.left=element_blank(),axis.ticks.y.left=element_blank(),
strip.background=element_blank())+
scale_shape_manual(values=lst$IMP_main_shp)
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Fin_Ene_Res_and_Com_Share_Hyd_Car',Year=='2050',Category%in%c('C1','C2','C3'),IMP_marker%in%lst$IMP_main) %>%
mutate(IMP_marker=factor(IMP_marker,levels=lst$IMP_main))
p$fec_sec_hydcar_bui_AR6 <- df$load_AR6_global %>%
filter(Variable=='Fin_Ene_Res_and_Com_Share_Hyd_Car',Year=='2050',Category%in%c('C1','C2','C3')) %>%
mutate(Value=pmin(Value,1)) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n),y=p100),size=2,angle=90,hjust=-.2,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,color='orange',show.legend=F)+
scale_y_continuous(limits=c(0,1),sec.axis=sec_axis(~.,labels=scales::percent_format(accuracy=1),name='Final energy share (%)'))+
labs(x=NULL,y=NULL)+
mytheme$set1+theme(axis.text.x=element_text(angle=90,hjust=-.2,vjust=.5),
axis.text.y.left=element_blank(),axis.line.y.left=element_blank(),axis.ticks.y.left=element_blank(),
strip.background=element_blank())+
scale_shape_manual(values=lst$IMP_main_shp)
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Fin_Ene_Tra_Share_Hyd_Car',Year=='2050',Category%in%c('C1','C2','C3'),IMP_marker%in%lst$IMP_main) %>%
mutate(IMP_marker=factor(IMP_marker,levels=lst$IMP_main))
p$fec_sec_hydcar_tra_AR6 <- df$load_AR6_global %>%
filter(Variable=='Fin_Ene_Tra_Share_Hyd_Car',Year=='2050',Category%in%c('C1','C2','C3')) %>%
mutate(Value=pmin(Value,1)) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n),y=p0),size=2,angle=90,hjust=1.2,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,color='orange',show.legend=F)+
scale_y_continuous(limits=c(0,1),sec.axis=sec_axis(~.,labels=scales::percent_format(accuracy=1),name='Final energy share (%)'))+
labs(x=NULL,y=NULL)+
mytheme$set1+theme(axis.text.x=element_text(angle=90,hjust=-.2,vjust=.5),
axis.text.y.left=element_blank(),axis.line.y.left=element_blank(),axis.ticks.y.left=element_blank(),
strip.background=element_blank())+
scale_shape_manual(values=lst$IMP_main_shp)
df$var <- tribble(~Variable,~Device,
'Tec_sto_Sha_Tra_Fre_Roa_BEV','Freight',
'Tec_sto_Sha_Tra_Fre_Roa_FCV','Freight',
'Tec_sto_Sha_Tra_Pss_Roa_PHV','Passenger',
'Tec_sto_Sha_Tra_Pss_Roa_BEV','Passenger',
'Tec_sto_Sha_Tra_Pss_Roa_FCV','Passenger',
'Tec_sto_Sha_Ind_HeatingBoi_Ele','Boiler',
'Tec_sto_Sha_Ind_HeatingBoi_Hyd','Boiler',
'Tec_sto_Sha_Ind_HeatingFur_Ele','Furnace',
'Tec_sto_Sha_Ind_HeatingFur_Hyd','Furnace',
'Tec_sto_Sha_Com_HeatingSpa_EHP','Commercial',
'Tec_sto_Sha_Com_HeatingSpa_Oth','Commercial',
'Tec_sto_Sha_Res_HeatingSpa_EHP','Residential',
'Tec_sto_Sha_Res_HeatingSpa_Oth','Residential')
df$lab_tech <- tribble(~Device,~Tech_Label,
'Passenger','Passenger\nroad transport',
'Freight','Freight\nroad transport',
'Boiler','Industry\nboiler',
'Furnace','Industry\nfurnace',
'Residential','Residential\nspace heating',
'Commercial','Commercial\nspace heating')
p$techshare <- df$all %>% filter(Region=='World',Year>=2020) %>%
inner_join(df$var,by='Variable') %>%
group_by(Model,Scenario,Device,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
inner_join(df$lab_tech,by='Device') %>%
mutate(Tech_Label=factor(Tech_Label,levels=df$lab_tech$Tech_Label)) %>%
inner_join(df$scen_lab,by='Scenario') %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab)) %>%
ggplot()+
geom_line(aes(x=Year,y=Value,color=scen_lab))+
geom_point(aes(x=Year,y=Value,color=scen_lab),shape=21,fill='white')+
scale_x_continuous(limits=c(2020,2050),breaks=seq(2020,2050,by=10))+
scale_y_continuous(limits=c(0,1), labels=scales::percent)+
facet_wrap(~Tech_Label,nrow=1)+
scale_color_manual(values=lst$scen_col)+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),strip.text=element_text(size=9),axis.text.x=element_text(angle=45,hjust=1),legend.margin=margin(0,0,0,0))+
labs(x=NULL,y='Diffusion rate')+
guides(color=guide_legend(title=NULL))
p$l_fec <- get_legend(p$fec2+theme(legend.position='right',legend.key.size=unit(4,'mm'),legend.spacing=unit(0,'mm')))
p$l_fec2 <- get_legend(p$fec_hydcar_AR6+theme(legend.position='right',legend.key.size=unit(3.5,'mm'))+
guides(shape=guide_legend(ncol=2,title='AR6 IMPs',override.aes=list(color='black'))))
p$l_tmp <- plot_grid(p$l_fec,ggplotGrob(p$l_rangeleg),p$l_fec2,ncol=1,rel_heights=c(1,.35,.35))
p$tmp0 <- plot_grid(p$fec2+theme(legend.position='none',axis.title.y.right=element_blank(),axis.text.y.right=element_blank(),axis.line.y.right=element_blank(),axis.ticks.y.right=element_blank(),plot.margin=unit(c(5.5,0,5.5,5.5),unit='pt')),
p$fec_hydcar_AR6+theme(legend.position='none',plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt')),nrow=1,axis='tb',align='h',rel_widths=c(1,.35))
p$tmp1 <- plot_grid(p$fec_sec2_ind+theme(legend.position='none',axis.title.y.right=element_blank(),axis.text.y.right=element_blank(),axis.line.y.right=element_blank(),axis.ticks.y.right=element_blank(),plot.margin=unit(c(5.5,0,5.5,5.5),unit='pt')),
p$fec_sec_hydcar_ind_AR6+theme(plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt'),axis.title.y.right=element_blank(),axis.text.y.right=element_blank()),
p$fec_sec2_bui+theme(legend.position='none',axis.title.y=element_blank(),axis.text.y=element_blank(),axis.line.y.right=element_blank(),axis.ticks.y.right=element_blank(),plot.margin=unit(c(5.5,0,5.5,5.5),unit='pt')),
p$fec_sec_hydcar_bui_AR6+theme(plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt'),axis.title.y.right=element_blank(),axis.text.y.right=element_blank()),
p$fec_sec2_tra+theme(legend.position='none',axis.title.y=element_blank(),axis.text.y=element_blank(),axis.line.y.right=element_blank(),axis.ticks.y.right=element_blank(),plot.margin=unit(c(5.5,0,5.5,5.5),unit='pt')),
p$fec_sec_hydcar_tra_AR6+theme(plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt')),
nrow=1,axis='tb',align='h',rel_widths=c(1.5,.55,1,.55,1,1.1))
p$tmp2 <- plot_grid(p$tmp0,p$tmp1,ncol=1,labels=c('A','B'),label_size=12,rel_heights=c(1,1.1))
p$tmp3 <- plot_grid(p$tmp2,p$l_tmp,nrow=1,rel_widths=c(1,.3))
p$tmp <- plot_grid(p$tmp3,p$techshare,ncol=1,rel_heights=c(1,.4),labels=c('','C'),label_size=12)
ggsave(filename='output/fig1.png',plot=p$tmp,width=174,height=170,units='mm',dpi=300)
ggsave(filename='output/fig1.pdf',plot=p$tmp,width=174,height=170,units='mm',dpi=300,device=cairo_pdf)
# Fig.2 -------------------------------------------------------------------
lst$sec_ene_ele_max <- df$all %>%
filter(Scenario%in%lst$scen_rep,Region=='World',Year==2050) %>%
filter(Variable=='Sec_Ene_Ele') %>%
group_by(Model,Scenario,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
mutate(Value=ceiling(Value/50)*50) %>% .$Value %>% max()
df$var <- tribble(~Variable,~Source,~Hydrogen,~Synfuel,~Final,
'Sec_Ene_Hyd_Fos','Fossil','Hydrogen','Loss2','Loss',
'Sec_Ene_Hyd_Bio','Biomass','Hydrogen','Loss2','Loss',
'Los_Ele','Electricity','Loss','Loss','Loss',
'Sec_Ene_Ele_Own_Use_DAC','Electricity','DAC','Loss2','Loss',
'Fin_Ene_Ele','Electricity','Electricity','Electricity','Electricity',
'Los_Hyd','Electricity','Hydrogen','Loss2','Loss',
'Fin_Ene_Liq_Hyd_syn','Electricity','Hydrogen','Synfuel','Synfuel',
'Fin_Ene_Gas_Hyd_syn','Electricity','Hydrogen','Synfuel','Synfuel',
'Fin_Ene_Hyd','Electricity','Hydrogen','Hydrogen','Hydrogen')
lst$carlev <- c('Synfuel','Hydrogen','Electricity','Fossil','Biomass',
'DAC','Loss2','Loss')
lst$col <- c('Synfuel'='orchid','Hydrogen'='thistle2','Electricity'='lightsteelblue','Fossil'='sandybrown','Biomass'='darkolivegreen2',
'DAC'='darkgoldenrod2','Loss'='grey','Loss2'='grey')
p$seceneflo2 <- df$all %>% filter(Region=='World',Year==2050) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
filter(Scenario%in%lst$scen_rep) %>%
select(colnames(df$var),Value,scen_lab) %>%
pivot_longer(cols=!c(scen_lab,Variable,Value),names_to='x',values_to='Carrier') %>%
mutate(Label=Carrier,Alpha=1,Positionh=.5,Positionv=.5) %>%
mutate(Label=ifelse(x=='Hydrogen'&Carrier%in%c('Electricity'),' ',Label)) %>%
mutate(Label=ifelse(x=='Synfuel'&Carrier%in%c('Electricity','Hydrogen','Loss'),' ',Label)) %>%
mutate(Label=ifelse(x=='Synfuel'&Carrier=='Loss2','Loss',Label)) %>%
mutate(Label=ifelse(x=='Final'&Carrier=='Synfuel',' ',Label)) %>%
mutate(Alpha=ifelse(x=='Hydrogen'&Carrier%in%c('Electricity'),.5,Alpha)) %>%
mutate(Alpha=ifelse(x=='Synfuel'&Carrier%in%c('Electricity','Hydrogen','Loss'),.5,Alpha)) %>%
mutate(Alpha=ifelse(x=='Final'&Carrier=='Loss',.5,Alpha)) %>%
mutate(Positionh=ifelse(x=='Source',.2,ifelse(x=='Final',.8,.5))) %>%
mutate(Positionv=ifelse(x=='Source'&Carrier=='Fossil',-.5,Positionv)) %>%
# filter(!is.na(Carrier)) %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
Carrier=factor(Carrier,levels=rev(lst$carlev))) %>%
ggplot(aes(x=x,y=Value,alluvium=Variable,stratum=Carrier,label=Carrier))+
geom_flow(aes(fill=Carrier),show.legend=F)+
geom_stratum(aes(fill=Carrier,alpha=Alpha),color='transparent',show.legend=F)+
geom_text(aes(label=Label,hjust=Positionh,vjust=Positionv),stat='stratum',size=2.5)+
labs(title=NULL,x=NULL,y=expression(paste('Secondary energy (EJ ',yr^{-1},')')))+
ylim(0,lst$sec_ene_ele_max)+
scale_x_discrete(limits=colnames(df$var)[-1],labels=c('Source','','','Demand'),expand=c(.05,.05))+
scale_fill_manual(values=lst$col,name=NULL)+
scale_alpha_continuous(limits=c(0,1),range=c(0,1))+
facet_grid(.~scen_lab)+
mytheme$set1+
theme(legend.position='bottom',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
guides(fill=guide_legend(title=NULL))
lst$Prm_Ene_NonBioRen_max <- df$all %>%
filter(Scenario%in%lst$scen_rep,Region=='World',Year==2050) %>%
filter(Variable=='Prm_Ene_NonBioRen') %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max()
p$tmp1 <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable=='Prm_Ene_NonBioRen') %>%
inner_join(df$scen_lab,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab)) %>%
filter(Scenario%in%lst$scen_rep) %>%
ggplot()+
geom_path(aes(x=Year,y=Value,color=scen_lab),show.legend=T)+
geom_point(aes(x=Year,y=Value,color=scen_lab),fill='white',shape=21,show.legend=T)+
ylim(0,lst$Prm_Ene_NonBioRen_max)+
scale_color_manual(values=lst$scen_col)+
labs(title='Non-biomass renewables\nprimary supply',x=NULL,y=expression(paste('Primary energy (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='none',strip.background=element_blank())+
guides(color=guide_legend(title=NULL))
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Prm_Ene_NonBioRen',Year=='2050',Category%in%c('C1','C2','C3'),!(is.na(IMP_marker)))
p$tmp2 <- df$load_AR6_global %>%
filter(Variable=='Prm_Ene_NonBioRen',Year=='2050',Category%in%c('C1','C2','C3')) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n),y=p100),size=2,angle=90,hjust=-.2,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,show.legend=F)+
ylim(0,lst$Prm_Ene_NonBioRen_max)+
labs(x=NULL,y=NULL)+
scale_shape_manual(values=lst$IMP_main_shp)+
mytheme$set1
p$Prm_Ene_NonBioRen <- plot_grid(p$tmp1+theme(plot.margin=unit(c(5.5,5.5,5.5,5.5),unit='pt')),
p$tmp2+theme(plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt'),axis.text.y=element_blank(),axis.line.y=element_blank(),axis.ticks.y=element_blank(),plot.background=element_blank()),
nrow=1,axis='tb',align='h',rel_widths=c(1,.3))
lst$Sec_Ene_Hyd_max <- df$all %>%
filter(Scenario%in%lst$scen_rep,Region=='World',Year==2050) %>%
filter(Variable=='Sec_Ene_Hyd') %>%
mutate(Value=ceiling(Value/10)*10) %>% .$Value %>% max()
p$tmp1 <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable=='Sec_Ene_Hyd') %>%
inner_join(df$scen_lab,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab)) %>%
filter(Scenario%in%lst$scen_rep) %>%
ggplot()+
geom_path(aes(x=Year,y=Value,color=scen_lab),show.legend=T)+
geom_point(aes(x=Year,y=Value,color=scen_lab),fill='white',shape=21,show.legend=T)+
ylim(0,lst$Sec_Ene_Hyd)+
scale_color_manual(values=lst$scen_col)+
labs(title='Hydrogen generation',x=NULL,y=expression(paste('Hydrogen supply (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='none',strip.background=element_blank(),legend.key.height=unit(3,'mm'),legend.text=element_text(size=7.5),legend.background=element_blank())+
guides(color=guide_legend(title=NULL))
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Sec_Ene_Hyd',Year=='2050',Category%in%c('C1','C2','C3'),!(is.na(IMP_marker)))
p$tmp2 <- df$load_AR6_global %>%
filter(Variable=='Sec_Ene_Hyd',Year=='2050',Category%in%c('C1','C2','C3')) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n),y=p100),size=2,angle=90,hjust=-.2,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,show.legend=F)+
ylim(0,lst$Sec_Ene_Hyd)+
scale_shape_manual(values=lst$IMP_main_shp)+
labs(x=NULL,y=NULL)+
mytheme$set1
p$Sec_Ene_Hyd <- plot_grid(p$tmp1+theme(plot.margin=unit(c(5.5,5.5,5.5,5.5),unit='pt')),
p$tmp2+theme(plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt'),axis.text.y=element_blank(),axis.line.y=element_blank(),axis.ticks.y=element_blank(),plot.background=element_blank()),
nrow=1,axis='tb',align='h',rel_widths=c(1,.3))
lst$Prm_Ene_Fos_max <- df$all %>%
filter(Scenario%in%lst$scen_rep,Region=='World',Year>=2020) %>%
filter(Variable%in%c('Prm_Ene_Coa','Prm_Ene_Oil','Prm_Ene_Gas')) %>%
group_by(Model,Scenario,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max()
lst$Prm_Ene_Fos_AR6_max <- df$load_AR6_global %>%
filter(Variable%in%c('Prm_Ene_Coa','Prm_Ene_Oil','Prm_Ene_Gas'),Year==2050,Category%in%c('C1','C2','C3')) %>%
group_by(Model,Scenario,Region,Year,Category) %>% summarise(Value=sum(Value),.groups='drop') %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max(lst$Prm_Ene_Fos_max)
p$tmp1 <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable%in%c('Prm_Ene_Coa','Prm_Ene_Oil','Prm_Ene_Gas')) %>%
group_by(Model,Scenario,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
inner_join(df$scen_lab,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab)) %>%
filter(Scenario%in%lst$scen_rep) %>%
ggplot()+
geom_path(aes(x=Year,y=Value,color=scen_lab),show.legend=F)+
geom_point(aes(x=Year,y=Value,color=scen_lab),fill='white',shape=21,show.legend=F)+
ylim(0,lst$Prm_Ene_Fos_AR6_max*1.25)+
scale_color_manual(values=lst$scen_col)+
labs(title='Fossil primary supply',x=NULL,y=expression(paste('Primary energy (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position=c(.35,.8),strip.background=element_blank(),plot.margin=unit(c(5.5,0,5.5,5.5),unit='pt'))
df$tmp <- df$load_AR6_global %>%
filter(Variable%in%c('Prm_Ene_Coa','Prm_Ene_Oil','Prm_Ene_Gas'),Year=='2050',Category%in%c('C1','C2','C3'),IMP_marker%in%lst$IMP_main) %>%
group_by(Model,Scenario,Region,Year,Category,IMP_marker) %>% summarise(Value=sum(Value),.groups='drop') %>%
mutate(IMP_marker=factor(IMP_marker,levels=lst$IMP_main))
p$tmp2 <- df$load_AR6_global %>%
filter(Variable%in%c('Prm_Ene_Coa','Prm_Ene_Oil','Prm_Ene_Gas'),Year==2050,Category%in%c('C1','C2','C3')) %>%
group_by(Model,Scenario,Region,Year,Category) %>% summarise(Value=sum(Value),.groups='drop') %>%
mutate(Variable='Prm_Ene_Fos') %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n),y=p100),size=2,angle=90,hjust=-.2,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,show.legend=F)+
scale_y_continuous(limits=c(0,lst$Prm_Ene_Fos_AR6_max*1.25))+
labs(x=NULL,y=NULL)+
scale_shape_manual(values=lst$IMP_main_shp)+
mytheme$set1+theme(axis.text.y.left=element_blank(),axis.line.y.left=element_blank(),axis.ticks.y.left=element_blank(),
plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt'))
p$prm_ene_fos <- plot_grid(p$tmp1+theme(plot.margin=unit(c(5.5,5.5,5.5,5.5),unit='pt')),
p$tmp2,nrow=1,axis='tb',align='h',rel_widths=c(1,.3))
lst$Prm_Ene_Bio_max <- df$all %>%
filter(Scenario%in%lst$scen_rep,Region=='World',Year>=2020) %>%
filter(Variable=='Prm_Ene_Bio') %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max()
lst$Prm_Ene_Bio_AR6_max <- df$load_AR6_global %>%
filter(Variable=='Prm_Ene_Bio',Year==2050,Category%in%c('C1','C2','C3')) %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max(lst$Prm_Ene_Bio_max)
p$tmp1 <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable=='Prm_Ene_Bio') %>%
inner_join(df$scen_lab,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab)) %>%
filter(Scenario%in%lst$scen_rep) %>%
ggplot()+
geom_path(aes(x=Year,y=Value,color=scen_lab),show.legend=T)+
geom_point(aes(x=Year,y=Value,color=scen_lab),fill='white',shape=21,show.legend=T)+
ylim(0,lst$Prm_Ene_Bio_AR6_max*1.25)+
scale_color_manual(values=lst$scen_col)+
labs(title='Biomass primary supply',x=NULL,y=expression(paste('Primary energy (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position=c(.4,.8),strip.background=element_blank(),plot.margin=unit(c(5.5,0,5.5,5.5),unit='pt'))+
guides(color=guide_legend(title=NULL))
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Prm_Ene_Bio',Year=='2050',Category%in%c('C1','C2','C3'),IMP_marker%in%lst$IMP_main) %>%
mutate(IMP_marker=factor(IMP_marker,levels=lst$IMP_main))
p$tmp2 <- df$load_AR6_global %>%
filter(Variable=='Prm_Ene_Bio',Year==2050,Category%in%c('C1','C2','C3')) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n),y=p100),size=2,angle=90,hjust=-.2,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,show.legend=T)+
scale_y_continuous(limits=c(0,lst$Prm_Ene_Bio_AR6_max*1.25))+
labs(x=NULL,y=NULL)+
scale_shape_manual(values=lst$IMP_main_shp)+
mytheme$set1+theme(axis.text.y.left=element_blank(),axis.line.y.left=element_blank(),axis.ticks.y.left=element_blank(),
plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt'))
p$l_IMP <- get_legend(p$tmp2+theme(legend.key.size=unit(5,'mm'))+guides(shape=guide_legend(title='AR6 IMPs')))
p$l_scen <- get_legend(p$tmp1)
p$prm_ene_bio <- plot_grid(p$tmp1+theme(legend.position='none',plot.margin=unit(c(5.5,5.5,5.5,5.5),unit='pt')),
p$tmp2+theme(legend.position='none'),nrow=1,axis='tb',align='h',rel_widths=c(1,.3))
df$var <- tribble(~Variable,~Legend,~Color,
'Sec_Ene_Ele_Fos','Fossil','grey50',
'Sec_Ene_Ele_Nuc','Nuclear','moccasin',
'Sec_Ene_Ele_Hyd','Hydro','lightsteelblue',
'Sec_Ene_Ele_Bio','Biomass','darkolivegreen2',
'Sec_Ene_Ele_SolarPV','Solar PV','lightsalmon',
'Sec_Ene_Ele_Win','Wind','lightskyblue3',
'Sec_Ene_Ele_Oth_Ren','Other renewables','grey')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
df$tmp <- df$all %>% filter(Variable=='Sec_Ene_Ele',Region=='World',Year>=2020) %>%
inner_join(df$scen_lab,by='Scenario') %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab))
p$sec_ene_ele_CCU <- df$all %>% filter(Region=='World',Year>=2020) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
filter(scen_lab=='1.5C-CCU') %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab)) %>%
ggplot()+
geom_area(aes(x=Year,y=Value,fill=Variable),stat='identity',position='stack',show.legend=T)+
geom_line(data=df$tmp,aes(x=Year,y=Value,color=scen_lab))+
geom_point(data=df$tmp,aes(x=Year,y=Value,color=scen_lab),shape=21,fill='white')+
labs(x=NULL,y=expression(paste('Electricity supply (EJ ',yr^{-1},')')))+
ylim(0,lst$sec_ene_ele_max)+
mytheme$set1+
theme(legend.position='none',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1),legend.background=element_blank(),legend.key.size=unit(4,'mm'))+
scale_fill_manual(values=lst$col,labels=lst$leg,name=NULL)+
scale_color_manual(values=lst$scen_col)
p$l_sec_ene_ele_CCU <- p$sec_ene_ele_CCU+
guides(fill=guide_legend(title=NULL,nrow=1),color='none')+
theme(legend.position='bottom',legend.margin=margin(0,0,0,0))
p$l_tmp1 <- plot_grid(p$l_scen,p$l_IMP,ggplotGrob(p$l_rangeleg),ncol=1,rel_heights=c(.5,1,.6))
p$l_tmp2 <- get_legend(p$l_sec_ene_ele_CCU+theme(legend.key.size=unit(3.5,'mm'),legend.spacing=unit(0,'mm')))
p$tmp1 <- plot_grid(p$Sec_Ene_Hyd+theme(plot.margin=unit(c(0,0,0,2.5),unit='mm')),p$Prm_Ene_NonBioRen+theme(plot.margin=unit(c(0,0,0,2.5),unit='mm')),
p$prm_ene_fos+theme(plot.margin=unit(c(0,0,0,2.5),unit='mm')),p$prm_ene_bio+theme(plot.margin=unit(c(0,0,0,2.5),unit='mm')),
nrow=2,labels=c('B','C','D','E'),label_size=12,rel_widths=c(1,1),rel_heights=c(1,1))
p$tmp2 <- plot_grid(p$tmp1,p$l_tmp1,
nrow=1,rel_widths=c(1,.25))
p$tmp3 <- plot_grid(p$sec_ene_ele_CCU+theme(plot.margin=unit(c(5.5,0,0,5.5),unit='pt')),
p$seceneflo2+theme(axis.title.y=element_blank(),axis.text.y=element_blank(),axis.line.y=element_blank(),plot.margin=unit(c(5.5,5.5,0,0),unit='pt')),
nrow=1,axis='tb',align='h',rel_widths=c(1,2.8))
p$tmp <- plot_grid(p$tmp3,p$l_tmp2,p$tmp2,ncol=1,labels=c('A','',''),label_size=12,rel_heights=c(1,.13,1.3))
ggsave(filename='output/fig2.png',plot=p$tmp,width=174,height=174,units='mm',dpi=300)
ggsave(filename='output/fig2.pdf',plot=p$tmp,width=174,height=174,units='mm',dpi=300,device=cairo_pdf)
# Fig.3 -------------------------------------------------------------------
df$var <- tribble(~Variable,~Energy,
'Prm_Ene_Coa','Primary',
'Prm_Ene_Oil','Primary',
'Prm_Ene_Gas','Primary',
'Prm_Ene_Bio','Primary',
'Fin_Ene_Solids','Final',
'Fin_Ene_Gas','Final',
'Fin_Ene_Liq','Final')
df$position <- tribble(~Scenario,~hjust,~vjust,
'500C-CCU',.5,-1,
'500C-DEC',.8,-2.3,
'500C-CDR',-.5,7)
lst$scen_sens_size <- c('Default'=3,'Sensitivity'=1)
df$tmp <- df$load_AR6_global %>% filter(Region=='World',Year==2050,Category%in%c('C1','C2','C3')) %>%
inner_join(df$var,by='Variable') %>%
group_by(Model,Scenario,Region,Energy,Category) %>% summarise(Value=sum(Value),.groups='drop') %>%
pivot_wider(names_from='Energy',values_from='Value') %>%
filter(!is.na(Primary),!is.na(Final))
df$tmp2 <- df$load_AR6_global %>% filter(Region=='World',Year==2050,!(is.na(IMP_marker)),Category%in%c('C1','C2','C3')) %>%
inner_join(df$var,by='Variable') %>%
group_by(IMP_marker,Region,Energy,Category) %>% summarise(Value=sum(Value),.groups='drop') %>%
pivot_wider(names_from='Energy',values_from='Value') %>%
filter(!is.na(Primary),!is.na(Final))
p$tmp1 <- df$all %>% filter(Region=='World',Year==2050) %>%
inner_join(df$var,by='Variable') %>%
group_by(Model,Scenario,Region,Energy) %>% summarise(Value=sum(Value),.groups='drop') %>%
pivot_wider(names_from='Energy',values_from='Value',values_fill=0) %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(scen_sens_var=ifelse(scen_sens_var=='Default','Default','Sensitivity')) %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat)) %>%
filter(Scenario%in%lst$scen_500all) %>%
ggplot()+
geom_point(data=df$tmp,aes(x=Primary,y=Final,shape=Category),color='grey',size=1)+
geom_point(data=df$tmp2,aes(x=Primary,y=Final,shape=IMP_marker),color='black',size=1,show.legend=F)+
geom_text(data=df$tmp2,aes(x=Primary,y=Final,label=IMP_marker),color='black',size=3,vjust=1.5,show.legend=F)+
geom_point(aes(x=Primary,y=Final,color=scen_sens_base,size=scen_sens_var))+
geom_text(data=. %>% filter(Scenario%in%lst$scen_rep) %>% inner_join(df$position,by='Scenario'),
aes(x=Primary,y=Final,color=scen_sens_base,label=scen_sens_base,hjust=hjust,vjust=vjust),size=3,show.legend=F)+
geom_segment(x=600,xend=200,y=30,yend=30,arrow=arrow(length=unit(2.5,'mm')))+
geom_text(x=375,y=10,label='Low dependency on fossil and biomass',size=3)+
geom_segment(x=30,xend=30,y=100,yend=300,arrow=arrow(length=unit(2.5,'mm')))+
geom_text(x=0,y=200,label='Hydrocarbon availability in end-use',size=3,angle=90)+
geom_segment(x=360,xend=420,y=200,yend=130,size=.3,color='#E69F00')+
xlim(0,NA)+ylim(0,NA)+
scale_color_manual(values=lst$scen_col)+
scale_size_manual(values=lst$scen_sens_size)+
scale_shape_manual(breaks=c('C1','C2','C3'),values=lst$IMP_cat_shp)+
labs(x=expression(paste('Fossil and biomass primary supply (EJ ',yr^{-1},')')),
y=expression(paste('Hydrocarbon energy in end-use (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',legend.box='vertical',legend.margin=margin(0,0,0,0),legend.key.height=unit(3,'mm'))+
guides(shape=guide_legend(title='AR6 category'),color=guide_legend(title="This study's\nscenarios"),size=guide_legend(title=NULL))
p$tmp2 <- p$tmp1+
scale_shape_manual(breaks=lst$IMP_main,values=lst$IMP_cat_shp)+
theme(legend.key.size=unit(3,'mm'))+
guides(shape=guide_legend(title='AR6 IMPs',override.aes=list(color='black')),size='none',color='none')
p$l1 <- get_legend(p$tmp1)
p$l2 <- get_legend(p$tmp2)
p$l_hydcar <- plot_grid(p$l1,p$l2,ncol=1,rel_heights=c(1,.6))
p$hydrocar <- p$tmp1+theme(legend.position='none')
p$tmp <- plot_grid(p$hydrocar,p$l_hydcar,nrow=1,rel_widths=c(1,.25))
ggsave(filename='output/fig3.png',plot=p$tmp,width=114,height=90,units='mm',dpi=300)
ggsave(filename='output/fig3.pdf',plot=p$tmp,width=114,height=90,units='mm',dpi=300,device=cairo_pdf)
p$tmp <- p$tmp1+
theme(legend.position=c(.92,.3),legend.box.margin=margin(0,0,0,0),
legend.background=element_blank(),legend.box.background=element_blank(),
plot.margin=unit(c(10,15,0,0),unit='pt'))
ggsave(filename='output/abstract.pdf',plot=p$tmp,width=4,height=4,units='in',dpi=300,device=cairo_pdf)
# Fig.4 ------------------------------------------------------------------
lst$CDR_max_AR6 <- df$load_AR6_global %>%
filter(Variable%in%c('Car_Seq_Dir_Air_Cap','Car_Seq_CCS_Bio'),Region=='World',Year=='2050',Category%in%c('C1','C2','C3')) %>%
group_by(Model,Scenario,Category,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
mutate(Value=Value/1000) %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max()
lst$CDR_max <- df$all %>%
filter(Scenario%in%lst$scen_rep,Region=='World',Year==2050) %>%
filter(Variable%in%c('CCUSGeo_Sto_Bio','CCUSGeo_Sto_Dir_Air_Cap')) %>%
mutate(Value=Value/1000) %>%
group_by(Model,Scenario,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max(lst$CDR_max_AR6)+2
p$tmp1 <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable%in%c('CCUSGeo_Sto_Bio','CCUSGeo_Sto_Dir_Air_Cap')) %>%
group_by(Model,Scenario,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
inner_join(df$scen_lab,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab)) %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(Value=Value/1000) %>%
ggplot()+
geom_path(aes(x=Year,y=Value,color=scen_lab),show.legend=T)+
geom_point(aes(x=Year,y=Value,color=scen_lab),fill='white',shape=21,show.legend=T)+
ylim(0,lst$CDR_max)+
labs(title='CDR in energy sector',x=NULL,y=expression(paste('CDR (Gt-',CO[2],' ',yr^{-1},')')))+
scale_color_manual(values=lst$scen_col)+
mytheme$set1+theme(legend.position=c(.35,.8),strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
guides(color=guide_legend(title=NULL))
df$tmp <- df$load_AR6_global %>%
filter(Variable%in%c('Car_Seq_Dir_Air_Cap','Car_Seq_CCS_Bio'),Year=='2050',Category%in%c('C1','C2','C3'),IMP_marker%in%lst$IMP_main) %>%
group_by(Model,Scenario,Category,IMP_marker,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
mutate(Value=Value/1000) %>%
mutate(IMP_marker=factor(IMP_marker,levels=lst$IMP_main))
p$tmp2 <- df$load_AR6_global %>%
filter(Variable%in%c('Car_Seq_Dir_Air_Cap','Car_Seq_CCS_Bio'),Year=='2050',Category%in%c('C1','C2','C3')) %>%
group_by(Model,Scenario,Category,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
mutate(Variable='Car_Seq_CDR',Value=Value/1000) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n),y=p100),size=2,angle=90,hjust=-.2,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,show.legend=F)+
ylim(0,lst$CDR_max)+
labs(x=NULL,y=NULL)+
scale_shape_manual(values=lst$IMP_main_shp)+
mytheme$set1
p$CDR <- plot_grid(p$tmp1+theme(legend.position='none',plot.margin=unit(c(5.5,0,5.5,5.5),unit='pt')),
p$tmp2+theme(axis.text.x=element_text(angle=90,hjust=1,vjust=.5),axis.text.y=element_blank(),axis.line.y=element_blank(),axis.ticks.y=element_blank(),plot.background=element_blank(),plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt')),
nrow=1,axis='tb',align='h',rel_widths=c(1,.3))
lst$DAC_max <- df$all %>%
filter(Scenario%in%lst$scen_rep,Region=='World',Year==2050) %>%
filter(Variable=='Car_Cap_Dir_Air_Cap') %>%
mutate(Value=Value/1000) %>%
group_by(Model,Scenario,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
mutate(Value=ceiling(Value*10)/10) %>% .$Value %>% max()
p$tmp1 <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable=='Car_Cap_Dir_Air_Cap') %>%
group_by(Model,Scenario,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
inner_join(df$scen_lab,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab)) %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(Value=Value/1000) %>%
ggplot()+
geom_path(aes(x=Year,y=Value,color=scen_lab),show.legend=T)+
geom_point(aes(x=Year,y=Value,color=scen_lab),fill='white',shape=21,show.legend=T)+
ylim(0,lst$DAC_max)+
scale_color_manual(values=lst$scen_col)+
labs(title=expression(paste(CO[2],' capture by DAC')),x=NULL,y=expression(paste('Carbon capture (Gt-',CO[2],' ',yr^{-1},')')))+
mytheme$set1+theme(legend.position=c(.35,.8),strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
guides(color=guide_legend(title=NULL))
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Car_Seq_Dir_Air_Cap',Year=='2050',Category%in%c('C1','C2','C3'),IMP_marker%in%lst$IMP_main) %>%
mutate(Value=Value/1000) %>%
mutate(IMP_marker=factor(IMP_marker,levels=lst$IMP_main))
p$tmp2 <- df$load_AR6_global %>%
filter(Variable=='Car_Seq_Dir_Air_Cap',Year=='2050',Category%in%c('C1','C2','C3')) %>%
filter(Value>=0) %>%
mutate(Value=Value/1000) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n),y=p100),size=2,angle=90,hjust=-.2,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,show.legend=F)+
ylim(0,lst$DAC_max)+
labs(x=NULL,y=NULL)+
scale_shape_manual(values=lst$IMP_main_shp)+
mytheme$set1
p$DAC <- plot_grid(p$tmp1+theme(legend.position='none',legend.background=element_blank(),plot.margin=unit(c(5.5,0,5.5,5.5),unit='pt')),
p$tmp2+theme(axis.text.x=element_text(angle=90,hjust=1,vjust=.5),axis.text.y=element_blank(),axis.line.y=element_blank(),axis.ticks.y=element_blank(),plot.background=element_blank(),plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt')),
nrow=1,axis='tb',align='h',rel_widths=c(1,.3))
lst$CCS_max_AR6 <- df$load_AR6_global %>%
filter(Variable=='Car_Seq_CCS',Region=='World',Year=='2050',Category%in%c('C1','C2','C3')) %>%
mutate(Value=Value/1000) %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max()
lst$CCS_max <- df$all %>%
filter(Scenario%in%lst$scen_rep,Region=='World',Year==2050) %>%
filter(Variable=='Car_Seq_CCS') %>%
mutate(Value=Value/1000) %>%
group_by(Model,Scenario,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max(lst$CCS_max_AR6)+4
p$tmp1 <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable=='Car_Seq_CCS') %>%
group_by(Model,Scenario,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
inner_join(df$scen_lab,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab)) %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(Value=Value/1000) %>%
ggplot()+
geom_path(aes(x=Year,y=Value,color=scen_lab),show.legend=T)+
geom_point(aes(x=Year,y=Value,color=scen_lab),fill='white',shape=21,show.legend=T)+
ylim(0,lst$CCS_max)+
scale_color_manual(values=lst$scen_col)+
labs(title='CCS',x=NULL,y=expression(paste('CCS (Gt-',CO[2],' ',yr^{-1},')')))+
mytheme$set1+theme(legend.position=c(.35,.8),strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
guides(color=guide_legend(title=NULL))
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Car_Seq_CCS',Year=='2050',Category%in%c('C1','C2','C3'),IMP_marker%in%lst$IMP_main) %>%
mutate(Value=Value/1000) %>%
mutate(IMP_marker=factor(IMP_marker,levels=lst$IMP_main))
p$tmp2 <- df$load_AR6_global %>%
filter(Variable=='Car_Seq_CCS',Year=='2050',Category%in%c('C1','C2','C3')) %>%
filter(Value>=0) %>%
mutate(Value=Value/1000) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n),y=p100),size=2,angle=90,hjust=-.2,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5)+
ylim(0,lst$CCS_max)+
labs(x=NULL,y=NULL)+
scale_shape_manual(values=lst$IMP_main_shp)+
mytheme$set1
p$l_IMP <- get_legend(p$tmp2+theme(legend.key.size=unit(5,'mm'))+guides(shape=guide_legend(title='AR6 IMPs')))
p$l_CCS_AR6 <- get_legend(p$tmp2+theme(legend.key.height=unit(4,'mm'))+guides(shape=guide_legend(title='AR6 IMPs')))
p$CCS <- plot_grid(p$tmp1+theme(legend.position=c(.52,.8),plot.margin=unit(c(5.5,0,5.5,5.5),unit='pt'),legend.background=element_blank()),
p$tmp2+theme(legend.position='none',axis.text.x=element_text(angle=90,hjust=1,vjust=.5),axis.text.y=element_blank(),axis.line.y=element_blank(),axis.ticks.y=element_blank(),plot.background=element_blank(),plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt')),
nrow=1,axis='tb',align='h',rel_widths=c(1,.3))
df$var <- tribble(~Variable,~Source,~'Capture & Use',~Sequestration,~Destination,
'Emi_CO2_Ene_Com_exc_CCUS','Fossil','Fossil','Emission','Atmosphere',
'Emi_CO2_Ene_Bio_exc_CCUS','Atmosphere','Biomass','Neutral','Atmosphere',
'CCUSUti_Ene_Bio','Atmosphere','Biomass','Utilization','Atmosphere',
'CCUSUti_Ene_Dir_Air_Cap','Atmosphere','DAC','Utilization','Atmosphere',
'CCUSUti_Ene_Fos','Fossil','Fossil','Utilization','Atmosphere',
'CCUSGeo_Sto_Bio','Atmosphere','Biomass','Storage','Ground',
'CCUSGeo_Sto_Dir_Air_Cap','Atmosphere','DAC','Storage','Ground',
'CCUSGeo_Sto_Fos','Fossil','Fossil','Storage','Ground')
lst$Seq <- c('Storage','Utilization','Neutral','Emission','DAC','Biomass','Fossil','Ground','Atmosphere')
lst$col <- c('Storage'='moccasin','Utilization'='orchid1','Neutral'='darkolivegreen2','Emission'='grey',
'Biomass'='darkolivegreen2','DAC'='thistle2','Fossil'='grey',
'Ground'='grey','Atmosphere'='lightsteelblue2')
p$co2eneflo <- df$all %>% filter(Region=='World',Year==2050) %>%
inner_join(df$var,by='Variable') %>%
mutate(Value=Value/1000) %>%
inner_join(df$scen_lab,by='Scenario') %>%
filter(Scenario%in%lst$scen_rep) %>%
select(colnames(df$var),Value,scen_lab) %>%
pivot_longer(cols=!c(scen_lab,Variable,Value),names_to='x',values_to='Carrier') %>%
mutate(Alpha=ifelse(x=='Sequestration'&Carrier%in%c('Neutral','Emission'),0.5,1)) %>%
mutate(Label=ifelse(x=='Sequestration'&Carrier%in%c('Neutral','Emission'),' ',Carrier)) %>%
mutate(Position=ifelse(x=='Destination',.8,ifelse(x=='Source',.2,.5))) %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
Carrier=factor(Carrier,levels=rev(lst$Seq))) %>%
ggplot(aes(x=x,y=Value,alluvium=Variable,stratum=Carrier,label=Carrier))+
geom_flow(aes(fill=Carrier),alpha=.5,show.legend=F)+
geom_stratum(aes(fill=Carrier,alpha=Alpha),color='transparent',show.legend=F)+
geom_text(aes(label=Label,hjust=Position),stat='stratum',size=3)+
labs(title=NULL,x=NULL,y=expression(paste('Carbon flow (Gt-',CO[2],' ',yr^{-1},')')))+
scale_x_discrete(limits=colnames(df$var)[-1],expand=c(.05,.05))+
scale_fill_manual(values=lst$col,name=NULL)+
scale_alpha_continuous(limits=c(0,1),range=c(0,1))+
facet_grid(.~scen_lab)+
mytheme$set1+
theme(legend.position='bottom',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
guides(fill=guide_legend(title=NULL))
p$l_tmp <- plot_grid(p$l_CCS_AR6,ggplotGrob(p$l_rangeleg),ncol=1,rel_heights=c(1,.7))
p$tmp1 <- plot_grid(p$DAC,p$CDR,p$CCS,p$l_tmp,nrow=1,labels=c('A','B','C',''),label_size=12,rel_widths=c(1,1,1,.63))
p$tmp <- plot_grid(p$tmp1,p$co2eneflo,ncol=1,labels=c('','D'),label_size=12,rel_heights=c(1,1.4))
ggsave(filename='output/fig4.png',plot=p$tmp,width=174,height=150,units='mm',dpi=300)
ggsave(filename='output/fig4.pdf',plot=p$tmp,width=174,height=150,units='mm',dpi=300,device=cairo_pdf)
# Fig.5 -------------------------------------------------------------------
lst$shp <- as.integer(df$scen_sens_shape$Shape); names(lst$shp) <- as.character(df$scen_sens_shape$scen_sens_var)
p$polcosdisc <- df$all %>% filter(Region=='World',Year==2050) %>%
filter(Variable=='Pol_Cos_per_GDP_Disc') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
filter(Scenario%in%lst$scen_500all) %>%
ggplot()+
geom_bar(data=. %>% filter(Scenario%in%lst$scen_rep),
aes(x=scen_sens_base,y=Value,fill=scen_lab),stat='identity',show.legend=F)+
geom_point(aes(x=scen_sens_base,y=Value,shape=scen_sens_var),fill='white',stat='identity',show.legend=T)+
scale_y_continuous(limits=c(0,NA),labels=scales::percent)+
labs(title=NULL,x=NULL,y='Energy system cost\n(% of GDP)')+
mytheme$set1+theme(legend.position='bottom',strip.background=element_blank())+
scale_fill_manual(values=lst$scen_col)+
scale_shape_manual(values=lst$shp,name=NULL)+
guides(fill='none')
lst$Prc_Car_AR6_max <- df$load_AR6_global %>%
filter(Variable=='Prc_Car',Year==2050,Category%in%c('C1','C2','C3')) %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max()
lst$Prc_Car_max <- df$all %>%
filter(Scenario%in%lst$scen_rep,Region=='World',Year==2050) %>%
filter(Variable=='Prc_Car') %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max(lst$Prc_Car_AR6_max)
p$tmp1 <- df$all %>% filter(Region=='World',Year==2050) %>%
filter(Variable=='Prc_Car') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
ggplot()+
geom_bar(data=. %>% filter(scen_sens_var=='Default'),
aes(x=scen_sens_base,y=Value,fill=scen_sens_base),stat='identity',position='stack',show.legend=T)+
geom_point(aes(x=scen_sens_base,y=Value,shape=scen_sens_var),color='black',fill='white',show.legend=T)+
scale_y_continuous(limits=c(0,lst$Prc_Car_max))+
coord_cartesian(ylim=c(0,2000))+
scale_fill_manual(values=lst$scen_col_all)+
scale_shape_manual(values=lst$shp,name=NULL)+
labs(title=NULL,x=NULL,y=expression(paste('Carbon prices (US$ t-',{CO[2]}^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
guides(fill=guide_legend(title=NULL,override.aes=list(color='transparent')),
shape=guide_legend(title="This study's\nscenario",override.aes=list(fill='transparent')))
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Prc_Car',Year=='2050',Category%in%c('C1','C2','C3'),IMP_marker%in%lst$IMP_main) %>%
mutate(IMP_marker=factor(IMP_marker,levels=lst$IMP_main))
p$tmp2 <- df$load_AR6_global %>%
filter(Variable=='Prc_Car',Year==2050,Category%in%c('C1','C2','C3')) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n)),y=2000,size=2,angle=90,hjust=1.3,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,show.legend=T)+
scale_y_continuous(limits=c(0,lst$Prc_Car_max))+
coord_cartesian(ylim=c(0,2000))+
labs(x=NULL,y=NULL)+
scale_shape_manual(values=lst$IMP_main_shp)+
mytheme$set1+theme(axis.text.x=element_text(angle=45,hjust=1),
axis.text.y.left=element_blank(),axis.line.y.left=element_blank(),axis.ticks.y.left=element_blank())
p$l_carpri <- get_legend(p$tmp1)
p$tmp3 <- df$all %>% filter(Region=='World',Year==2050) %>%
filter(Variable=='Prc_Car') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
filter(Scenario%in%lst$scen_500all) %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
ggplot()+
geom_bar(data=. %>% filter(scen_sens_var=='Default'),
aes(x=scen_sens_base,y=Value,fill=scen_sens_base),stat='identity',position='stack',show.legend=T)+
geom_point(aes(x=scen_sens_base,y=Value,shape=scen_sens_var),color='black',fill='white',show.legend=T)+
scale_y_continuous(limits=c(0,lst$Prc_Car_max))+
coord_cartesian(ylim=c(0,2000))+
scale_fill_manual(values=lst$scen_col_all)+
scale_shape_manual(values=lst$shp,name=NULL)+
labs(title=NULL,x=NULL,y=expression(paste('Carbon prices (US$ t-',{CO[2]}^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
guides(fill=guide_legend(title=NULL,override.aes=list(color='transparent')),
shape=guide_legend(title="This study's\nscenario",override.aes=list(fill='transparent')))
p$carpol_all <- plot_grid(p$tmp3+theme(legend.position='none',plot.margin=unit(c(5.5,0,5.5,13),unit='pt')),
p$tmp2+theme(legend.position='none',plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt')),
nrow=1,axis='tb',align='h',rel_widths=c(1,.4))
df$var <- tribble(~Variable,~Legend,~Color,
'Prc_Sec_Ene_Ele','Electricity','lightsteelblue',
'Prc_Sec_Ene_Hyd','Hydrogen','thistle2',
'Prc_Sec_Ene_Liq_Hyd_syn','Synfuel','orchid')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
lst$shp <- as.integer(df$scen_sens_shape$Shape); names(lst$shp) <- as.character(df$scen_sens_shape$scen_sens_var)
lst$eneprc_max <- df$all %>% filter(Region=='World') %>%
filter(Variable%in%df$var$Variable,Value>0,Year>=2030) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
filter(scen_sens_base=='1.5C-CCU') %>%
mutate(Value=ceiling(Value)) %>%
.$Value %>% max()
p$eneprc <- df$all %>% filter(Region=='World') %>%
filter(Variable%in%df$var$Variable,Value>0,Year>=2030) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
filter(scen_sens_base=='1.5C-CCU') %>%
ggplot()+
geom_line(aes(x=Year,y=Value,color=Variable,linetype=scen_sens_var),show.legend=T)+
geom_point(aes(x=Year,y=Value,color=Variable,shape=scen_sens_var),fill='transparent',show.legend=T)+
ylim(0,lst$eneprc_max)+
labs(x=NULL,y=expression(paste('Production costs (US$ ',GJ^{-1},')')))+
mytheme$set1+theme(legend.position='bottom',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_color_manual(values=rev(lst$col),labels=lst$leg,name=NULL)+
scale_shape_manual(values=lst$shp,name=NULL)+
scale_linetype_manual(values=lst$lin_scen,name=NULL)+
guides(shape='none',linetype='none')
df$var <- tribble(~Variable,~Carrier,~Legend,~Color,
'Prc_Sec_Ene_Liq_Hyd_syn_Cap_Cos','Synfuel\nliquids','Capital','lightskyblue3',
'Prc_Sec_Ene_Liq_Hyd_syn_Hyd_Cos','Synfuel\nliquids','Hydrogen','thistle2',
'Prc_Sec_Ene_Liq_Hyd_syn_CO2_Cos','Synfuel\nliquids','CO2','moccasin',
'Prc_Sec_Ene_Liq_Hyd_syn_Oth_Cos','Synfuel\nliquids','Other','grey')
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Legend)
p$synf_cost <- df$all %>% filter(Region=='World',Year==2050) %>%
filter(Variable%in%df$var$Variable) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all),
Legend=factor(Legend,levels=rev(df$var$Legend)),Carrier=factor(Carrier,levels=unique(df$var$Carrier))) %>%
filter(Scenario%in%lst$scen_500all,scen_sens_base=='1.5C-CCU') %>%
ggplot()+
geom_bar(aes(x=scen_sens_var,y=Value,fill=Legend),stat='identity',position='stack',show.legend=T)+
ylim(0,lst$eneprc_max)+
# facet_grid(.~Carrier)+
labs(x=NULL,y=expression(paste('Energy production costs (US$ ',GJ^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_fill_manual(values=rev(lst$col),name=NULL)
df$var <- tribble(~Variable,~Legend,~Color,
'Inv_Add_Ene_Dem','Energy\ndemand','darkolivegreen2',
'Inv_Add_Ene_Sup_Ele','Electricity','lightsteelblue',
'Inv_Add_Ene_Sup_Hyd','Hydrogen','thistle2',
'Inv_Add_Ene_Sup_Oth','Other energy\nsupply','moccasin',
'Inv_Add_CCS','CCS','darkgoldenrod2')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
p$inv <- df$all %>% filter(Region=='World',Year>2020) %>%
filter(Variable%in%df$var$Variable) %>%
mutate(Year=ceiling(Year/10)*10) %>%
group_by(Model,Scenario,Region,Variable,Year) %>% summarise(Value=sum(Value)/n(),.groups='drop') %>%
mutate(Year=str_c(Year-9,'-',Year-2000)) %>%
mutate(Value=Value/1000) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens)) %>%
ggplot()+
geom_bar(aes(x=Year,y=Value,fill=Variable),stat='identity',position='stack',show.legend=T)+
facet_grid(.~scen_lab)+
labs(x=NULL,y=expression(paste('Additional investment (trillion US$ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',legend.margin=margin(0,0,0,0),strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)
p$l_tmp <- plot_grid(p$l_IMP,ggplotGrob(p$l_rangeleg),ncol=1,rel_heights=c(1,.6))
p$tmp1 <- plot_grid(p$polcosdisc+theme(legend.position='right',axis.text.x=element_text(angle=45,hjust=1)),
p$carpol_all+theme(legend.position='right',axis.text.x=element_text(angle=45,hjust=1)),
p$l_tmp,nrow=1,rel_widths=c(1,.85,.5),labels=c('A','B'),label_size=12)
p$l_tmp1 <- get_legend(p$eneprc+theme(legend.position='right'))
p$l_tmp2 <- get_legend(p$synf_cost+theme(legend.position='right'))
p$l_tmp <- plot_grid(p$l_tmp1,p$l_tmp2,ncol=1)
p$tmp2 <- plot_grid(p$eneprc+theme(legend.position='none',plot.margin=unit(c(5.5,0,5.5,13),unit='pt')),
p$synf_cost+theme(legend.position='none',axis.line.y=element_blank(),axis.text.y=element_blank(),axis.title.y=element_blank(),axis.ticks.y=element_blank(),plot.background=element_blank(),plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt')),
p$l_tmp,nrow=1,rel_widths=c(1,.7,.6),axis='tb',align='h')
p$tmp3 <- plot_grid(p$tmp2,p$inv+theme(plot.margin=unit(c(5.5,0,5.5,13),unit='pt')),nrow=1,rel_widths=c(1,1.15),labels=c('C','D'),label_size=12)
p$tmp <- plot_grid(p$tmp1,p$tmp3,ncol=1,rel_heights=c(1,1))
ggsave(filename='output/fig5.png',plot=p$tmp,width=174,height=130,units='mm',dpi=300)
ggsave(filename='output/fig5.pdf',plot=p$tmp,width=174,height=130,units='mm',dpi=300,device=cairo_pdf)
# Supplementary Fig.1 ------------------------------------------------------------------
df$var <- tribble(~Variable,~Legend,~Color,
'Fin_Ene_SolidsCoa','Coal','grey70',
'Fin_Ene_Liq_Oil_and_Nat_Gas','Liquids-fossil','sandybrown',
'Fin_Ene_Liq_Hyd_syn','Liquids-synfuel','orchid',
'Fin_Ene_Gas_Fos','Gases-fossil','moccasin',
'Fin_Ene_Gas_Hyd_syn','Gases-synfuel','orchid1',
'Fin_Ene_Liq_and_Sol_Bio','Biomass','darkolivegreen2',
'Fin_Ene_Ele','Electricity','lightsteelblue',
'Fin_Ene_Heat','Heat','salmon',
'Fin_Ene_Hyd','Hydrogen','thistle2',
'Fin_Ene_Oth_inc_Solarand_Geo','Other','grey90')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
p$tmp1 <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable%in%df$var$Variable) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_wrap=factor(scen_wrap,levels=df$scen_lab$scen_wrap),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
ggplot()+
geom_area(aes(x=Year,y=Value,fill=Variable),position='stack',show.legend=T)+
facet_wrap(~scen_wrap,nrow=3)+
labs(title=NULL,x=NULL,y=expression(paste('Final energy demand (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)
df$var <- tribble(~Variable,~Legend,~Color,~Shape,
'Fin_Ene_Share_Fos','Fossil fuel share','tan3',21,
'Fin_Ene_Share_Syn_Hyd','Synfuels share','purple',23,
'Fin_Ene_Share_Bio','Biomass share','darkolivegreen3',24)
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
lst$shp <- as.numeric(df$var$Shape); names(lst$col) <- as.character(df$var$Variable)
p$tmp2 <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable%in%df$var$Variable) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_wrap=factor(scen_wrap,levels=df$scen_lab$scen_wrap),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
ggplot()+
geom_path(aes(x=Year,y=Value,color=Variable),show.legend=T)+
geom_point(aes(x=Year,y=Value,color=Variable,shape=Variable),fill='white',show.legend=T)+
scale_y_continuous(limits=c(0,NA),labels=scales::percent)+
facet_wrap(~scen_wrap,nrow=3)+
labs(title=NULL,x=NULL,y='Final energy share (%)')+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_color_manual(values=lst$col,labels=lst$leg,name=NULL)+
scale_shape_manual(values=lst$shp,labels=lst$leg,name=NULL)
p$tmp <- plot_grid(p$tmp1,p$tmp2,ncol=1,rel_heights=c(1.2,1),labels=c('A','B'),label_size=12)
ggsave(filename='output/figS1.png',plot=p$tmp,width=174,height=200,units='mm',dpi=300)
ggsave(filename='output/figS1B.png',plot=p$tmp2,width=174,height=90,units='mm',dpi=300)
# Supplementary Fig.2 -----------------------------------------------------
df$var <- tribble(~Variable,~Device,
'Tec_sto_Sha_Tra_Fre_Roa_BEV','Freight',
'Tec_sto_Sha_Tra_Fre_Roa_FCV','Freight',
'Tec_sto_Sha_Tra_Pss_Roa_PHV','Passenger',
'Tec_sto_Sha_Tra_Pss_Roa_BEV','Passenger',
'Tec_sto_Sha_Tra_Pss_Roa_FCV','Passenger',
'Tec_sto_Sha_Ind_HeatingBoi_Ele','Boiler',
'Tec_sto_Sha_Ind_HeatingBoi_Hyd','Boiler',
'Tec_sto_Sha_Ind_HeatingFur_Ele','Furnace',
'Tec_sto_Sha_Ind_HeatingFur_Hyd','Furnace',
'Tec_sto_Sha_Com_HeatingSpa_EHP','Commercial',
'Tec_sto_Sha_Com_HeatingSpa_Oth','Commercial',
'Tec_sto_Sha_Res_HeatingSpa_EHP','Residential',
'Tec_sto_Sha_Res_HeatingSpa_Oth','Residential')
df$lab_tech <- tribble(~Device,~Tech_Label,
'Passenger','Passenger\nroad transport',
'Freight','Freight\nroad transport',
'Boiler','Industry\nboiler',
'Furnace','Industry\nfurnace',
'Residential','Residential\nspace heating',
'Commercial','Commercial\nspace heating')
lst$shp <- as.integer(df$scen_sens_shape$Shape); names(lst$shp) <- as.character(df$scen_sens_shape$scen_sens_var)
p$tmp <- df$all %>% filter(Region=='World',Year>=2020) %>%
inner_join(df$var,by='Variable') %>%
group_by(Model,Scenario,Device,Region,Year) %>% summarise(Value=sum(Value),.groups='drop') %>%
inner_join(df$lab_tech,by='Device') %>%
mutate(Tech_Label=factor(Tech_Label,levels=df$lab_tech$Tech_Label)) %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
filter(Scenario%in%lst$scen_500all) %>%
ggplot()+
geom_line(aes(x=Year,y=Value,color=scen_sens_base,linetype=scen_sens_var),show.legend=T)+
geom_point(aes(x=Year,y=Value,color=scen_sens_base,shape=scen_sens_var),show.legend=T)+
scale_x_continuous(limits=c(2020,2050),breaks=seq(2020,2050,by=10))+
scale_y_continuous(limits=c(0,1), labels=scales::percent)+
facet_wrap(~Tech_Label,nrow=1)+
scale_color_manual(values=lst$scen_col)+
scale_shape_manual(values=lst$shp,name=NULL)+
scale_linetype_manual(values=lst$lin_scen,name=NULL)+
mytheme$set1+theme(legend.position='bottom',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
labs(x=NULL,y='Technology diffusion rate')+
guides(color=guide_legend(title=NULL))
ggsave(filename='output/figS2.png',plot=p$tmp,width=174,height=90,units='mm',dpi=300)
# Supplementary Fig.3 -----------------------------------------------------
df$var <- tribble(~Variable,~Legend,~Color,
'Sec_Ene_Ele_Tra_Los','Transmission loss','moccasin',
'Sec_Ene_Ele_Sto_Los','Storage loss','darkgoldenrod2',
'Sec_Ene_Ele_Cur','Curtailment','tan3')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
p$tmp <- df$all %>% filter(Region=='World',Year%in%c(2030,2040,2050)) %>%
filter(Variable%in%df$var$Variable) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_wrap=factor(scen_wrap,levels=df$scen_lab$scen_wrap)) %>%
ggplot()+
geom_bar(aes(x=Year,y=Value,fill=Variable),stat='identity',position='stack',show.legend=T)+
facet_wrap(~scen_wrap,nrow=3)+
labs(x=NULL,y=expression(paste('Electricity losses (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)
ggsave(filename='output/figS3.png',plot=p$tmp,width=174,height=100,units='mm',dpi=300)
# Supplementary Fig.4 -----------------------------------------------------
df$var <- tribble(~Variable,~Legend,~Color,
'Sec_Ene_Hyd_Ele','Electricity','lightsteelblue',
'Sec_Ene_Hyd_Fos_wo_CCS','Fossil w/o CCS','sandybrown',
'Sec_Ene_Hyd_Fos_w_CCS','Fossil w/ CCS','tan3',
'Sec_Ene_Hyd_Bio_wo_CCS','Biomass w/o CCS','darkolivegreen2',
'Sec_Ene_Hyd_Bio_w_CCS','Biomass w/ CCS','darkolivegreen4')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
p$tmp <- df$all %>% filter(Region=='World',Year%in%c(2030,2040,2050)) %>%
filter(Variable%in%df$var$Variable) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_wrap=factor(scen_wrap,levels=df$scen_lab$scen_wrap),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
ggplot()+
geom_bar(aes(x=Year,y=Value,fill=Variable),position='stack',stat='identity',show.legend=T)+
facet_wrap(~scen_wrap,nrow=3)+
labs(title=NULL,x=NULL,y=expression(paste('Hydrogen generation (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_fill_manual(values=lst$col,labels=lst$leg,name=NULL)
ggsave(filename='output/figS4.png',plot=p$tmp,width=174,height=100,units='mm',dpi=300)
# Supplementary Fig.5 -----------------------------------------------------
df$var <- tribble(~Variable,~Legend,~Color,
'Prm_Ene_Oil_wo_CCS','Oil w/o CCS','sandybrown',
'Prm_Ene_Oil_w_CCS','Oil w/ CCS','tan3',
'Prm_Ene_Coa_wo_CCS','Coal w/o CCS','grey50',
'Prm_Ene_Coa_w_CCS','Coal w/ CCS','grey30',
'Prm_Ene_Gas_wo_CCS','Gas w/o CCS','lightgoldenrod',
'Prm_Ene_Gas_w_CCS','Gas w/ CCS','lightgoldenrod3',
'Prm_Ene_Nuc','Nuclear','moccasin',
'Prm_Ene_Bio_wo_CCS','Biomass w/o CCS','darkolivegreen2',
'Prm_Ene_Bio_w_CCS','Biomass w/ CCS','darkolivegreen4',
'Prm_Ene_Hyd','Hydro','lightsteelblue',
'Prm_Ene_Geo','Geothermal','peru',
'Prm_Ene_Solar','Solar','lightsalmon',
'Prm_Ene_Win','Wind','lightskyblue3',
'Prm_Ene_Oce','Ocean','paleturquoise3',
'Prm_Ene_Oth','Other','grey')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
p$tmp1 <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable%in%df$var$Variable) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_wrap=factor(scen_wrap,levels=df$scen_lab$scen_wrap),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
ggplot()+
geom_area(aes(x=Year,y=Value,fill=Variable),position='stack',show.legend=T)+
facet_wrap(~scen_wrap,nrow=3)+
labs(title=NULL,x=NULL,y=expression(paste('Primary energy (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)
df$var <- tribble(~Variable,~Legend,~Color,
'Sec_Ene_Ele_Oil_wo_CCS','Oil w/o CCS','sandybrown',
'Sec_Ene_Ele_Oil_w_CCS','Oil w/ CCS','tan3',
'Sec_Ene_Ele_Coa_wo_CCS','Coal w/o CCS','grey50',
'Sec_Ene_Ele_Coa_w_CCS','Coal w/ CCS','grey30',
'Sec_Ene_Ele_Gas_wo_CCS','Gas w/o CCS','lightgoldenrod',
'Sec_Ene_Ele_Gas_w_CCS','Gas w/ CCS','lightgoldenrod3',
'Sec_Ene_Ele_Nuc','Nuclear','moccasin',
'Sec_Ene_Ele_Hyd','Hydro','lightsteelblue',
'Sec_Ene_Ele_Bio_wo_CCS','Biomass w/o CCS','darkolivegreen2',
'Sec_Ene_Ele_Bio_w_CCS','Biomass w/ CCS','darkolivegreen4',
'Sec_Ene_Ele_Geo','Geothermal','peru',
'Sec_Ene_Ele_SolarCSP','CSP','darksalmon',
'Sec_Ene_Ele_SolarPV','Solar PV','lightsalmon',
'Sec_Ene_Ele_Win','Wind','lightskyblue3',
'Sec_Ene_Ele_Oce','Ocean','paleturquoise3',
'Sec_Ene_Ele_Hyd_GT','Hydrogen','orchid',
'Sec_Ene_Ele_Oth','Other','grey')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
p$tmp2 <- df$all %>% filter(Region=='World',Year>=2020) %>%
filter(Variable%in%df$var$Variable) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_wrap=factor(scen_wrap,levels=df$scen_lab$scen_wrap),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
ggplot()+
geom_area(aes(x=Year,y=Value,fill=Variable),position='stack',show.legend=T)+
facet_wrap(~scen_wrap,nrow=3)+
labs(x=NULL,y=expression(paste('Power generation (EJ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)
p$tmp <- plot_grid(p$tmp2,p$tmp1,ncol=1,rel_heights=c(1,1),labels=c('A','B'),label_size=12)
ggsave(filename='output/figS5.png',plot=p$tmp,width=174,height=240,units='mm',dpi=300)
# Supplementary Fig.6 -----------------------------------------------------
df$var <- tribble(~Variable,~Legend,~Color,~Axis,
'Car_Seq_Geo_Sto','Underground\nstorage','darkgoldenrod2','Storage',
'Car_Uti_Ene','Utilization','orchid1','Storage',
'Car_Cap_Fos_Ene_Sup','Energy\nsupply','moccasin','Capture',
'Car_Cap_Fos_Ene_Dem_Ind','Industry','salmon','Capture',
'Car_Cap_Ind_Pro','Industrial\nprocess','grey','Capture',
'Car_Cap_Bio_Ene_Sup','Bioenergy','darkolivegreen2','Capture',
'Car_Cap_Dir_Air_Cap','DAC','lightsteelblue','Capture')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
p$tmp <- df$all %>% filter(Region=='World',Year%in%seq(2030,2050,10)) %>%
filter(Variable%in%df$var$Variable) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(Value=Value/1000) %>%
mutate(Value=if_else(Axis=='Storage',-Value,Value)) %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_wrap=factor(scen_wrap,levels=df$scen_lab$scen_wrap),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
ggplot()+
geom_hline(yintercept=0,color='black',size=.25)+
geom_bar(aes(x=Year,y=Value,fill=Variable),position='stack',stat='identity',show.legend=T)+
facet_wrap(~scen_wrap,nrow=3)+
labs(title=NULL,x=NULL,y=expression(paste('Carbon capture and sequestration (Gt-',CO[2],' ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)
ggsave(filename='output/figS6.png',plot=p$tmp,width=174,height=100,units='mm',dpi=300)
# Supplementary Fig.7 -----------------------------------------------------
df$var <- tribble(~Variable,~Source,~'Capture & Use',~Sequestration,~Destination,
'Emi_CO2_Ene_Com_exc_CCUS','Fossil','Fossil','Emission','Atmosphere',
'Emi_CO2_Ene_Bio_exc_CCUS','Atmosphere','Biomass','Neutral','Atmosphere',
'CCUSUti_Ene_Bio','Atmosphere','Biomass','Utilization','Atmosphere',
'CCUSUti_Ene_Dir_Air_Cap','Atmosphere','DAC','Utilization','Atmosphere',
'CCUSUti_Ene_Fos','Fossil','Fossil','Utilization','Atmosphere',
'CCUSGeo_Sto_Bio','Atmosphere','Biomass','Storage','Ground',
'CCUSGeo_Sto_Dir_Air_Cap','Atmosphere','DAC','Storage','Ground',
'CCUSGeo_Sto_Fos','Fossil','Fossil','Storage','Ground')
lst$Seq <- c('Storage','Utilization','Neutral','Emission','DAC','Biomass','Fossil','Ground','Atmosphere')
lst$col <- c('Storage'='moccasin','Utilization'='orchid1','Neutral'='darkolivegreen2','Emission'='grey',
'Biomass'='darkolivegreen2','DAC'='thistle2','Fossil'='grey',
'Ground'='grey','Atmosphere'='lightsteelblue2')
p$tmp <- df$all %>% filter(Region=='World',Year%in%c(2020,2030,2050)) %>%
inner_join(df$var,by='Variable') %>%
mutate(Value=Value/1000) %>%
inner_join(df$scen_lab,by='Scenario') %>%
filter(scen_lab=='1.5C-CCU') %>%
select(colnames(df$var),Value,Year) %>%
pivot_longer(cols=!c(Year,Variable,Value),names_to='x',values_to='Carrier') %>%
mutate(Alpha=ifelse(x=='Sequestration'&Carrier%in%c('Neutral','Emission'),0.5,1)) %>%
mutate(Label=ifelse(x=='Sequestration'&Carrier%in%c('Neutral','Emission'),' ',Carrier)) %>%
mutate(Position=ifelse(x=='Destination',.8,ifelse(x=='Source',.2,.5))) %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),
Carrier=factor(Carrier,levels=rev(lst$Seq))) %>%
ggplot(aes(x=x,y=Value,alluvium=Variable,stratum=Carrier,label=Carrier))+
geom_flow(aes(fill=Carrier),alpha=.5,show.legend=F)+
geom_stratum(aes(fill=Carrier,alpha=Alpha),color='transparent',show.legend=F)+
geom_text(aes(label=Label,hjust=Position),stat='stratum',size=2.5)+
labs(title=NULL,x=NULL,y=expression(paste('Carbon flow (Gt-',CO[2],' ',yr^{-1},')')))+
scale_x_discrete(limits=colnames(df$var)[-1],expand=c(.05,.05))+
scale_fill_manual(values=lst$col,name=NULL)+
scale_alpha_continuous(limits=c(0,1),range=c(0,1))+
facet_grid(.~Year)+
mytheme$set1+
theme(legend.position='bottom',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
guides(fill=guide_legend(title=NULL))
ggsave(filename='output/figS7.png',plot=p$tmp,width=174,height=80,units='mm',dpi=300)
# Supplementary Fig.8 -----------------------------------------------------
lst$Prc_Car_AR6_max <- df$load_AR6_global %>%
filter(Variable=='Prc_Car',Year==2050,Category%in%c('C1','C2','C3')) %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max()
lst$Prc_Car_max <- df$all %>%
filter(Scenario%in%lst$scen_rep,Region=='World',Year==2050) %>%
filter(Variable=='Prc_Car') %>%
mutate(Value=ceiling(Value)) %>% .$Value %>% max(lst$Prc_Car_AR6_max)
p$tmp1 <- df$all %>% filter(Region=='World',Year==2050) %>%
filter(Variable=='Prc_Car') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
ggplot()+
geom_bar(data=. %>% filter(scen_sens_var=='Default'),
aes(x=scen_sens_base,y=Value,fill=scen_sens_base),stat='identity',position='stack',show.legend=T)+
geom_point(aes(x=scen_sens_base,y=Value,shape=scen_sens_var),color='black',fill='white',show.legend=T)+
scale_y_continuous(limits=c(0,lst$Prc_Car_max))+
coord_cartesian(ylim=c(0,2000))+
scale_fill_manual(values=lst$scen_col_all)+
scale_shape_manual(values=lst$shp,name=NULL)+
labs(title=NULL,x=NULL,y=expression(paste('Carbon prices (US$ t-',{CO[2]}^{-1},')')))+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
guides(fill=guide_legend(title=NULL,override.aes=list(color='transparent')),
shape=guide_legend(title="This study's\nscenario",override.aes=list(fill='transparent')))
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Prc_Car',Year=='2050',Category%in%c('C1','C2','C3'),IMP_marker%in%lst$IMP_main) %>%
mutate(IMP_marker=factor(IMP_marker,levels=lst$IMP_main))
p$tmp2 <- df$load_AR6_global %>%
filter(Variable=='Prc_Car',Year==2050,Category%in%c('C1','C2','C3')) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n)),y=2000,size=2,angle=90,hjust=1.3,vjust=.5)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5,show.legend=T)+
scale_y_continuous(limits=c(0,lst$Prc_Car_max))+
coord_cartesian(ylim=c(0,2000))+
labs(x=NULL,y=NULL)+
scale_shape_manual(values=lst$IMP_main_shp)+
mytheme$set1+theme(axis.text.x=element_text(angle=45,hjust=1),
axis.text.y.left=element_blank(),axis.line.y.left=element_blank(),axis.ticks.y.left=element_blank())
p$l_carpri <- get_legend(p$tmp1)
p$tmp3 <- df$all %>% filter(Region=='World',Year==2050) %>%
filter(Variable=='Pol_Cos_per_GDP') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
ggplot()+
geom_bar(data=. %>% filter(scen_sens_var=='Default'),
aes(x=scen_sens_base,y=Value,fill=scen_sens_base),stat='identity',position='stack',show.legend=T)+
geom_point(aes(x=scen_sens_base,y=Value,shape=scen_sens_var),color='black',fill='white',show.legend=T)+
scale_y_continuous(limits=c(0,NA),labels=scales::percent_format(accuracy=1))+
scale_fill_manual(values=lst$scen_col_all)+
scale_shape_manual(values=lst$shp,name=NULL)+
scale_linetype_manual(values=lst$lin_scen,name=NULL)+
labs(title=NULL,x=NULL,y='Energy system cost (% of GDP)')+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
guides(fill=guide_legend(title=NULL,override.aes=list(color='transparent')),
shape=guide_legend(title=NULL,override.aes=list(fill='transparent')))
p$polcos_all <- plot_grid(p$tmp1+theme(legend.position='none',plot.margin=unit(c(5.5,0,5.5,5.5),unit='pt')),
p$tmp2+theme(legend.position='none',plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt')),
p$tmp3+theme(legend.position='none'),
nrow=1,axis='tb',align='h',rel_widths=c(1,.4,1),labels=c('A','','B'),label_size=12)
df$var <- tribble(~Variable,~Legend,~Color,
'Inv_Add_Ene_Dem','Energy demand','darkolivegreen2',
'Inv_Add_Ene_Sup_Ele','Electricity','lightsteelblue',
'Inv_Add_Ene_Sup_Hyd','Hydrogen','thistle2',
'Inv_Add_Ene_Sup_Oth','Other energy supply','moccasin',
'Inv_Add_CCS','CCS','darkgoldenrod2')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
p$inv_R5 <- df$all %>% filter(Region%in%df$R5map$Region,Year>2020) %>%
filter(Variable%in%df$var$Variable) %>%
mutate(Year=ceiling(Year/10)*10) %>%
group_by(Model,Scenario,Region,Variable,Year) %>% summarise(Value=sum(Value)/n(),.groups='drop') %>%
filter(Year==2050) %>%
mutate(Year=str_c(Year-9,'-',Year-2000)) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
inner_join(df$R5map,by='Region') %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens),
R5lab=factor(R5lab,levels=df$R5map$R5lab)) %>%
ggplot()+
geom_bar(aes(x=scen_lab,y=Value,fill=Variable),stat='identity',position='stack',show.legend=T)+
facet_wrap(~R5lab,nrow=1)+
labs(x=NULL,y=expression(paste('Additional investment (billion US$ ',yr^{-1},')')))+
mytheme$set1+theme(legend.position='bottom',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1),plot.margin=unit(c(5.5,0,5.5,13),unit='pt'))+
scale_fill_manual(values=lst$col,labels=lst$leg,name=NULL)
p$l_tmp <- plot_grid(p$l_IMP,ggplotGrob(p$l_rangeleg),ncol=1,rel_heights=c(1,.6))
p$tmp1 <- plot_grid(p$polcos_all,p$l_carpri,p$l_tmp,nrow=1,rel_widths=c(1,.23,.27))
p$tmp <- plot_grid(p$tmp1,p$inv_R5,ncol=1,rel_heights=c(1.1,1),labels=c('','C'),label_size=12)
ggsave(filename='output/figS8.png',plot=p$tmp,width=174,height=175,units='mm',dpi=300)
# Supplementary Fig.9 ----------------------------------------------------
df$var <- tribble(~Variable,~Legend,~Color,
'Fin_Ene_SolidsCoa','Coal','grey70',
'Fin_Ene_Liq_Oil_and_Nat_Gas','Liquids\n-fossil','sandybrown',
'Fin_Ene_Liq_Hyd_syn','Liquids\n-synfuel','orchid',
'Fin_Ene_Gas_Fos','Gases\n-fossil','moccasin',
'Fin_Ene_Gas_Hyd_syn','Gases\n-synfuel','orchid1',
'Fin_Ene_Liq_and_Sol_Bio','Biomass','darkolivegreen2',
'Fin_Ene_Ele','Electricity','lightsteelblue',
'Fin_Ene_Heat','Heat','salmon',
'Fin_Ene_Hyd','Hydrogen','thistle2',
'Fin_Ene_Oth_inc_Solarand_Geo','Other','grey90')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
p$tmp <- df$all %>% filter(Region%in%df$R5map$Region,Year>=2020) %>%
filter(Variable%in%df$var$Variable) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$R5map,by='Region') %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(Variable=factor(Variable,levels=rev(df$var$Variable)),scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
R5lab=factor(R5lab,levels=df$R5map$R5lab)) %>%
ggplot()+
geom_area(aes(x=Year,y=Value,fill=Variable),stat='identity',position='stack',show.legend=T)+
labs(x=NULL,y=expression(paste('Final energy demand (EJ ',yr^{-1},')')))+
facet_grid(R5lab~scen_lab,scales='free_y')+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),legend.spacing=unit(10,'mm'),axis.text.x=element_text(angle=45,hjust=1))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)+
guides(fill=guide_legend(byrow=T))
print(p$tmp)
ggsave(filename='output/figS9.png',plot=p$tmp,width=174,height=130,units='mm',dpi=300)
# Supplementary Fig.10 ----------------------------------------------------
df$var <- tribble(~Variable,~Device,
'Cap_Cos_Ele_SolarPV','Solar PV',
'Cap_Cos_Ele_Win_Ons','Wind onshore',
'Cap_Cos_Hyd_Ele','Electrolysis',
'Cap_Cos_Liq_Hyd_syn','Synfuel\nproduction',
'Cap_Cos_Dir_Air_Cap','DAC')
p$tmp <- df$all %>% filter(Region=='R5OECD90+EU',Year>=2020) %>%
inner_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
inner_join(df$scen_sens_cat,by='Scenario') %>%
mutate(scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab),
scen_sens_base=factor(scen_sens_base,levels=lst$scen_cat),scen_sens_var=factor(scen_sens_var,lst$scen_sens_all)) %>%
mutate(Device=factor(Device,levels=df$var$Device)) %>%
filter(Scenario%in%lst$scen_500all) %>%
filter(scen_sens_base=='1.5C-CCU') %>%
mutate(Value=ifelse(Variable=='Cap_Cos_Dir_Air_Cap'&Year<2030,NA,Value)) %>%
ggplot()+
geom_line(aes(x=Year,y=Value,linetype=scen_sens_var),show.legend=T)+
geom_point(aes(x=Year,y=Value,shape=scen_sens_var),fill='white',show.legend=T)+
scale_x_continuous(limits=c(2020,2050),breaks=seq(2020,2050,by=10))+
scale_y_continuous(limits=c(0,NA))+
facet_wrap(~Device,nrow=1,scales='free_y')+
scale_shape_manual(values=lst$shp,name=NULL)+
mytheme$set1+theme(legend.position='bottom',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
labs(x=NULL,y='Capital cost')+
guides(linetype=guide_legend(title=NULL),
shape=guide_legend(title=NULL))
ggsave(filename='output/figS10.png',plot=p$tmp,width=174,height=85,units='mm',dpi=300)
# Supplementary Fig.11 ----------------------------------------------------
df$var <- tribble(~Variable,~Legend,~Color,
'Emi_CO2_Ene_Sup','Energy Supply','moccasin',
'Emi_CO2_Ene_Dem_Ind_and_AFO','Industry','salmon',
'Emi_CO2_Ene_Dem_Res_and_Com','Buildings','lightsteelblue',
'Emi_CO2_Ene_Dem_Tra','Transportation','darkolivegreen2',
'Emi_CO2_Oth','DACCS','darkgoldenrod2')
lst$leg <- as.character(df$var$Legend); names(lst$leg) <- as.character(df$var$Variable)
lst$col <- as.character(df$var$Color); names(lst$col) <- as.character(df$var$Variable)
lst$emi_co2ene_max <- df$all %>%
filter(Variable=='Emi_CO2_Ene_inc_Dir_Air_Cap',Region=='World',Scenario%in%lst$scen_rep) %>%
mutate(Value=ceiling(Value/1000)) %>% .$Value %>% max()
p$emi_co2ene <- df$all %>% filter(Region=='World',Variable%in%c(df$var$Variable,'Emi_CO2_Ene_inc_Dir_Air_Cap')) %>%
left_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
filter(Scenario%in%lst$scen_rep) %>%
mutate(Value=Value/1000) %>%
mutate(Variable=factor(Variable,levels=rev(c(df$var$Variable,'Emi_CO2_Ene_inc_Dir_Air_Cap'))),scen_lab=factor(scen_lab,levels=df$scen_lab$scen_lab)) %>%
group_by(Model,Scenario,Region,Variable) %>% arrange(Year) %>%
mutate(Value=if_else(Variable=='Emi_CO2_Oth'&Value==0&lead(Value)<0,-.001,Value)) %>% ungroup() %>%
ggplot()+
geom_area(data=. %>% filter(Variable!='Emi_CO2_Ene_inc_Dir_Air_Cap'),
aes(x=Year,y=Value,fill=Variable),stat='identity',position='stack',show.legend=T)+
geom_path(data=. %>% filter(Variable=='Emi_CO2_Ene_inc_Dir_Air_Cap'),aes(x=Year,y=Value),show.legend=F)+
geom_point(data=. %>% filter(Variable=='Emi_CO2_Ene_inc_Dir_Air_Cap'),aes(x=Year,y=Value),shape=21,fill='white',show.legend=F)+
labs(x=NULL,y=expression(paste(CO[2],' emissions (Gt-',CO[2],' yr'^{-1},')')))+
facet_grid(.~scen_lab)+
mytheme$set1+theme(legend.position='right',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_y_continuous(limits=c(-10,lst$emi_co2ene_max))+
scale_fill_manual(values=rev(lst$col),labels=lst$leg,name=NULL)+
guides(fill=guide_legend(title=NULL,override.aes=list(linetype=NULL,shape=NULL,color='transparent')))
p$emi_co2ene_all <- df$all %>% filter(Region=='World',Variable%in%c(df$var$Variable,'Emi_CO2_Ene_inc_Dir_Air_Cap')) %>%
left_join(df$var,by='Variable') %>%
inner_join(df$scen_lab,by='Scenario') %>%
mutate(Value=Value/1000) %>%
mutate(Variable=factor(Variable,levels=rev(c(df$var$Variable,'Emi_CO2_Ene_inc_Dir_Air_Cap'))),scen_wrap=factor(scen_wrap,levels=df$scen_lab$scen_wrap)) %>%
group_by(Model,Scenario,Region,Variable) %>% arrange(Year) %>%
mutate(Value=if_else(Variable=='Emi_CO2_Oth'&Value==0&lead(Value)<0,-.001,Value)) %>% ungroup() %>%
ggplot()+
geom_area(data=. %>% filter(Variable!='Emi_CO2_Ene_inc_Dir_Air_Cap'),
aes(x=Year,y=Value,fill=Variable),stat='identity',position='stack',show.legend=T)+
geom_path(data=. %>% filter(Variable=='Emi_CO2_Ene_inc_Dir_Air_Cap'),aes(x=Year,y=Value),show.legend=F)+
geom_point(data=. %>% filter(Variable=='Emi_CO2_Ene_inc_Dir_Air_Cap'),
aes(x=Year,y=Value),shape=21,fill='white',show.legend=F)+
labs(x=NULL,y=expression(paste(CO[2],' emissions (Gt-',CO[2],' yr'^{-1},')')))+
facet_wrap(~scen_wrap,nrow=3)+
mytheme$set1+theme(legend.position='bottom',strip.background=element_blank(),axis.text.x=element_text(angle=45,hjust=1))+
scale_y_continuous(limits=c(-10,lst$emi_co2ene_max))+
scale_fill_manual(values=lst$col,labels=lst$leg,name=NULL)+
guides(fill=guide_legend(title=NULL,override.aes=list(linetype=NULL,shape=NULL,color='transparent')))
df$tmp <- df$load_AR6_global %>%
filter(Variable=='Emi_CO2_Ene',Year=='2050',Category%in%c('C1','C2','C3'),!(is.na(IMP_marker))) %>%
mutate(Value=Value/1000)
p$Emi_CO2ene_AR6 <- df$load_AR6_global %>%
filter(Variable=='Emi_CO2_Ene',Year=='2050',Category%in%c('C1','C2','C3')) %>%
mutate(Value=Value/1000) %>%
fcalc_range_category() %>%
ggplot()+
geom_crossbar(aes(x=Category,ymin=p10,ymax=p90,y=p50),width=.75,color='white',fill='grey')+
geom_crossbar(aes(x=Category,ymin=p0,ymax=p100,y=p0),width=.75,color='grey',fill='transparent',fatten=0)+
geom_text(aes(x=Category,label=str_c('n=',n),y=p100),size=2,vjust=-1)+
geom_point(data=df$tmp,aes(x=Category,y=Value,shape=IMP_marker),size=1.5)+
ylim(-10,lst$emi_co2ene_max)+
labs(x=NULL,y=NULL)+
scale_shape_manual(values=lst$IMP_main_shp)+
mytheme$set1+
guides(shape=guide_legend(title='AR6 IMPs'))
p$l_co2 <- get_legend(p$emi_co2ene+theme(legend.position='right'))
p$tmp1 <- plot_grid(p$emi_co2ene+theme(legend.position='none',plot.margin=unit(c(5.5,0,5.5,5.5),unit='pt')),
p$Emi_CO2ene_AR6+theme(axis.text.y=element_blank(),axis.line.y=element_blank(),axis.ticks.y=element_blank(),plot.margin=unit(c(5.5,5.5,5.5,0),unit='pt')),
p$l_co2,nrow=1,rel_widths=c(1,.5,.3),axis='tb',align='h')
p$tmp <- plot_grid(p$tmp1,p$emi_co2ene_all+theme(legend.position='none'),ncol=1,labels=c('A','B'),label_size=12,rel_heights=c(1,1.5))
ggsave(filename='output/figS11.png',plot=p$tmp,width=174,height=150,units='mm',dpi=300)
|
7bca2ee0fe6677380a41d20aab7986360e963cfc
|
db1a7b8867acc251e5765d0a307890f21ecf233a
|
/HW2/tmp/working/q3_1.r
|
0c834d48cf2a20df24d4cb9adb5ae60c8548f633
|
[] |
no_license
|
DeepeshLall/BayesianAnalysis
|
c2011be5543c7d94e331744ec23bb442f666d7db
|
a4a1be952d807101259e2356c6636cc2fb345670
|
refs/heads/master
| 2023-03-21T01:33:27.702735
| 2021-03-19T16:09:13
| 2021-03-19T16:09:13
| 349,481,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,191
|
r
|
q3_1.r
|
library(readxl)
library(plyr)
library(dplyr)
library(modeest)
library(truncnorm)
library(invgamma)
library(psych)
noOfData = 1240
# Reading the data into y and X from .Rdata file created by q2.r
q2_result <- read.table("~/Desktop/working/q2_result.Rdata", quote="\"", comment.char="")
Y <- q2_result[,1]
X <- q2_result[,2:16]
X = as.matrix(X)
X = cbind(1,X)
colnames(X)[1] <- "Intercept"
for(i in 2:16){
str=paste0("x",i)
colnames(X)[i] <- str
}
# Summary Report
# print(summary(Y))
# print(summary(X))
###################### Q3.a #######################
# Function to get likelihood for given Y, X and beta.
get_ith_NLL <- function(y,X,beta){
Phi = pnorm(X %*% beta)
f = sum(y*log(Phi)) + sum((1-y)*log(1-Phi))
f = -f
return(f)
}
getNLL <- function(Y,X,beta,noOfData){
result = 0
for(i in 1:noOfData){
result = result + get_ith_NLL(Y[i],X[i,],beta)
}
return(result)
}
# Likelihood function for given Y and rho as sample mean(i.e. MLE)
get_ith_mean_NLL <- function(y,rho){
Phi = pnorm(rho)
f = sum(y*log(Phi)) + sum((1-y)*log(1-Phi))
f = -f
return(f)
}
get_mean_NLL <- function(Y,rho,noOfData){
result = 0
for(i in 1:noOfData){
result = result + get_ith_mean_NLL(Y[i],rho)
}
return(result)
}
###################### Q3.b #######################
# Computing the Sample mean from given dataset.
sample_mean = sum(Y)/noOfData
rho_dash_dash = qnorm(sample_mean, mean=0, sd=1)
rho_dash = rep(rho_dash_dash,noOfData)
# print(rho_dash_dash)
matequal <- function(x, y)
is.matrix(x) && is.matrix(y) && dim(x) == dim(y) && all(x == y)
beta_MLE = solve(X[1:16,]) %*% rho_dash[1:16]
if(matequal(X %*% beta_MLE ,rho_dash)){
# print("Matrix Invertible")
}else{
# print(X %*% beta_MLE -rho_dash)
# print("MLE matrix not invertible")
}
# print(beta_MLE)
# print(X %*% beta_MLE)
beta_OLS = solve(t(X) %*% X) %*% t(X) %*% Y
# print(beta_OLS)
# print(mean(X %*% beta_OLS))
# Decide Beta
beta_non_mcmc = beta_OLS
xi_time_beta_non_mcmc = mean(X %*% beta_non_mcmc)
rownames(beta_non_mcmc) <- t(c("beta1","beta2","beta3","beta4","beta5","beta6","beta7","beta8","beta9","beta10","beta11","beta12","beta13","beta14","beta15","beta16"))
print("Q3.b")
print(beta_non_mcmc)
###################### Q3.c #######################
covariateEffect = rep(0,max(X[,2]-min(X[,2])))
# Covariate effect of increasing age by 1 year.
for(i in min(X[,2]):(max(X[,2])-5)){
covariateEffect[i-min(X[,2])] = pnorm(xi_time_beta_non_mcmc-mean(X[,2])*beta_non_mcmc[2]+(i+5)*beta_non_mcmc[2], mean=0, sd=1) - pnorm(xi_time_beta_non_mcmc-mean(X[,2])*beta_non_mcmc[2]+i*beta_non_mcmc[2], mean=0, sd=1)
}
cat("\n\n")
print("Covariate Effect of Age difference of 5yr (Q3.c)")
print(covariateEffect[1:(max(X[,2])-5-min(X[,2]))])
print(paste0("Mean of Age Covariate : ",mean(covariateEffect[1:(max(X[,2])-5-min(X[,2]))])))
###################### Q3.d #######################
binaryCovariateEffect = 0
binaryCovariateEffect = pnorm(xi_time_beta_non_mcmc-mean(X[,6])*beta_non_mcmc[6]+beta_non_mcmc[6], mean=0, sd=1) - pnorm(xi_time_beta_non_mcmc-mean(X[,6])*beta_non_mcmc[6], mean=0, sd=1)
cat("\n\n")
print(paste0("Binary covariate of parent (Q3.d) : ",binaryCovariateEffect))
|
3a0a1f4a0b45f3b4b97f4efa332e00f0704c0561
|
6322ce6b8a91ce012868bf40dd9ff28999ba2819
|
/A_Developing_a_reference_set/functions/required.funcs.r
|
5c5f775191dfed649767003b07002ff30ea2e062
|
[
"MIT"
] |
permissive
|
ices-eg/wg_WGNEPS
|
35d81c310e9c0e972c604fc20f03e37139234e46
|
a00af2764a97133bdf6f87f46e0ab27f42b83e92
|
refs/heads/master
| 2022-02-08T01:27:45.092876
| 2021-11-18T17:20:36
| 2021-11-18T17:20:36
| 120,731,989
| 0
| 2
|
MIT
| 2022-01-25T17:37:58
| 2018-02-08T08:19:22
|
R
|
UTF-8
|
R
| false
| false
| 9,618
|
r
|
required.funcs.r
|
#tapply.ID is rather like aggregate, but I wrote it before I discovered "aggregate" and actually it does a few more things!
#tapply.ID takes a data frame, a vector of the names of the data column, a vector of the factors to apply across, the name of the function to apply (i.e. sum) and the name of the new variable
#we then create an ID field in the dataframe and use that to run the tapply on. this way we don't try to expand out to include missing combinations of strata.
#returns a dataframe with the new value
tapply.ID <- function(df,datacols, factorcols, func, newnames, pad, padval, na.stuff, func.args)
{
#if pad omitted, defalut is false
#if padval omitted, default is "NA"
if (missing(pad)) pad=FALSE
if(missing(padval))padval=NA
if(missing(na.stuff)) na.stuff=TRUE
if(missing(func.args)) func.args=NA
len <- length(factorcols)
colnum <- 0
good<-TRUE
#check that all of the data columns and all of the factors are contained in the DF
temp<-match( datacols, names(df))
if(is.na(sum(temp)))
{
#print(datacols[is.na(temp)])
warning(paste("returned nothing, requested data columns", datacols[is.na(temp)], "do not exist\n", sep=""))
good<-FALSE
}
temp<-match( factorcols, names(df))
if(is.na(sum(temp)))
{
#print(factorcols[is.na(temp)])
warning(paste("returned nothing, requested factor columns", factorcols[is.na(temp)], "do not exist\n",sep=""))
good<-FALSE
}
if(good)
{
for(d in c(1:length(datacols)))
{
#find the column number whose name matches datacol
for(i in 1:length(df[1,]))
{
if(names(df)[i]==datacols[d]) {
colnum <- i}
}
#only proceed if colnum>0
if(colnum>0)
{
#first up create the ID999 field
df$ID999 <- df[,factorcols[1]]
if(len>1)
{
for(i in 2:len)
{
df$ID999 <- paste(df$ID999, "@", df[,factorcols[i]], sep="")
}
}
#now run the tapply
if(is.na(func.args))new.df <- vectorise.names(with(df, tapply(df[,colnum], ID999, func, na.rm=na.stuff)), c(newnames[d], "ID999"))
if(!is.na(func.args))new.df <- vectorise.names(with(df, tapply(df[,colnum], ID999, func,func.args, na.rm=na.stuff)), c(newnames[d], "ID999"))
if(pad==T)
{
#now work out all potential permutations of the factors - remove those that are already done and
#put padval into those remaining ones.
#get all the possible values for the first factorcols
t <- as.character((unique(df[, factorcols[1]])))
t <- as.data.frame(t)
names(t)[1] <- "ID999"
if(length(factorcols)>1)
{
for(i in c(2:length(factorcols)))
{
#how many values for the next factor
v <- sort(unique(df[, factorcols[i]]))
v.len <- length(v)
#print(v)
#create a new vector long enough to hold the interaction, fill with the existing vector,
#repeated as many times as there are new unique factors
t.len <- length(t$ID999)
x <-rep(t$ID999, each=v.len)
t2 <- as.data.frame(x)
names(t2)[1] <- "ID999"
#print(t)
#print(rep(t, each=v.len))
#print(v)
t2$temp <- rep(v, times=t.len)
t2$ID999 <- paste(t2$ID999, t2$temp, sep="@")
t <- data.frame.drop(t2, "temp")
}
}
t$temp <- rep(padval, length(t$ID999))
done <- unique(new.df$ID999)
names(t)[2] <- newnames[d]
blank <- t[!t$ID999 %in% done,]
#print(summary(blank))
#now stick onto the bottom of new.df
new.df <- rbind(new.df, blank[c(2,1)])
#print(summary(new.df))
}
#and now unstitch the ID999 field
s <- strsplit(new.df$ID999, "@")
sdf <- as.data.frame(unlist(s))
names(sdf)[1] <- "val"
sdf$num <- rep(c(1:len), length(sdf[,1])/len)
#work through each of the parts of the ID999 field
#for some reason, integers need special handling to coerce them back into the same format they started in
for(i in 1:len)
{
if(class(df[,factorcols[i]])=="integer" || class(df[,factorcols[i]])=="numeric")
{
new.df[,(i+2)] <-as.character(sdf$val[sdf$num==i])
}
else if (class(df[,factorcols[i]])=="character" )
{
new.df[,(i+2)] <-as.character(sdf$val[sdf$num==i])
}
else
{
new.df[,(i+2)] <-as.factor(as.character(sdf$val[sdf$num==i]))
}
class(new.df[,i+2]) <- class(df[,factorcols[i]])
names(new.df)[i+2] <- factorcols[i]
}
#print(new.df)
}
if(d==1) {final.df<-new.df}
if(d>1)
{
final.df<-cbind(final.df, new.df[,1])
names(final.df)[length(names(final.df))]<-names(new.df)[1]
}
}
data.frame.drop(final.df,"ID999")
}
}
##########################################################################
data.frame.drop<-function(df, cols){
#drops selected columns out of the data frame
for(i in 1:length(cols)){
a<-names(df)
b<-a[a!=cols[i]]
df<-df[b]
}
df
}
##########################################################################
NA.to.val<-function( col, val){
pos<-c(1:length(col))
reppos<-pos[is.na(col)]
res<-replace(col, reppos, val)
res
}
##########################################################################
NA.to.0<-function( col){
pos<-c(1:length(col))
reppos<-pos[is.na(col)]
res<-replace(col, reppos, 0)
res
}
##########################################################################
##########################################################################################
vectorise.names<-function (tab,name){
n<-length(attributes(tab)[[1]])
dims<-attributes(tab)[[1]]
len<-prod(attributes(tab)[[1]])
d2<-c(dims, 0)
n1 <- name[1]
n2 <- name[2:length(name)]
#set up the data frame to be the correct length
df<-data.frame(as.vector(tab))
names(df)<-"value"
j<-2
for(i in 1 : n){
ech<- max(1,prod(dims[0:(i-1)])) # this is the number of sets
reps<-max(1,prod(d2[(i+1):n])) # this is the number of repeats of each number within a set
df[j]<-rep(dimnames(tab)[[i]],reps,each=ech)
j<-j+1
}
names(df)<-c("value", n2)
names(df)[1] <- n1
df
}
##############################################################
sort.data.frame <- function(form,dat){
# Author: Kevin Wright
# Some ideas from Andy Liaw
# http://tolstoy.newcastle.edu.au/R/help/04/07/1076.html
# Use + for ascending, - for decending.
# Sorting is left to right in the formula
# Useage is either of the following:
# sort.data.frame(~Block-Variety,Oats)
# sort.data.frame(Oats,~-Variety+Block)
# If dat is the formula, then switch form and dat
if(inherits(dat,"formula")){
f=dat
dat=form
form=f
}
if(form[[1]] != "~")
stop("Formula must be one-sided.")
# Make the formula into character and remove spaces
formc <- as.character(form[2])
formc <- gsub(" ","",formc)
# If the first character is not + or -, add +
if(!is.element(substring(formc,1,1),c("+","-"))) formc <- paste("+",formc,sep="")
# Extract the variables from the formula
# Remove spurious "" terms
vars <- unlist(strsplit(formc, "[\\+\\-]"))
vars <- vars[vars!=""]
# Build a list of arguments to pass to "order" function
calllist <- list()
pos=1 # Position of + or -
for(i in 1:length(vars)){
varsign <- substring(formc,pos,pos)
pos <- pos+1+nchar(vars[i])
if(is.factor(dat[,vars[i]])){
if(varsign=="-")
calllist[[i]] <- -rank(dat[,vars[i]])
else
calllist[[i]] <- rank(dat[,vars[i]])
}
else {
if(varsign=="-")
calllist[[i]] <- -dat[,vars[i]]
else
calllist[[i]] <- dat[,vars[i]]
}
}
dat[do.call("order",calllist),]
}
|
3992f0ad856cb320dfe64346e508b643be6974c6
|
e1cbbf8791b0ac6d40f6d5b397785560105441d9
|
/man/cdfgam.Rd
|
359bbb3d79ad0be4bb3663f56b665f9bf73ea3a4
|
[] |
no_license
|
wasquith/lmomco
|
96a783dc88b67017a315e51da3326dfc8af0c831
|
8d7cc8497702536f162d7114a4b0a4ad88f72048
|
refs/heads/master
| 2023-09-02T07:48:53.169644
| 2023-08-30T02:40:09
| 2023-08-30T02:40:09
| 108,880,810
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,470
|
rd
|
cdfgam.Rd
|
\name{cdfgam}
\alias{cdfgam}
\title{Cumulative Distribution Function of the Gamma Distribution}
\description{
This function computes the cumulative probability or nonexceedance probability of the Gamma distribution given parameters (\eqn{\alpha} and \eqn{\beta}) computed by \code{\link{pargam}}. The cumulative distribution function has no explicit form but is expressed as an integral:
\deqn{F(x) = \frac{\beta^{-\alpha}}{\Gamma(\alpha)}\int_0^x t^{\alpha - 1}
\exp(-t/\beta)\; \mbox{d}t \mbox{,}}
where \eqn{F(x)} is the nonexceedance probability for the quantile \eqn{x}, \eqn{\alpha} is a shape parameter, and \eqn{\beta} is a scale parameter.
Alternatively, a three-parameter version is available following the parameterization of the Generalized Gamma distribution used in the \pkg{gamlss.dist} package and is
\deqn{F(x) =\frac{\theta^\theta\, |\nu|}{\Gamma(\theta)}\int_0^x \frac{z^\theta}{x}\,\mathrm{exp}(-z\theta)\; \mbox{d}x \mbox{,}}
where \eqn{z =(x/\mu)^\nu}, \eqn{\theta = 1/(\sigma^2\,|\nu|^2)} for \eqn{x > 0}, location parameter \eqn{\mu > 0}, scale parameter \eqn{\sigma > 0}, and shape parameter \eqn{-\infty < \nu < \infty}. The three parameter version is automatically triggered if the length of the \code{para} element is three and not two.
}
\usage{
cdfgam(x, para)
}
\arguments{
\item{x}{A real value vector.}
\item{para}{The parameters from \code{\link{pargam}} or \code{\link{vec2par}}.}
}
\value{
Nonexceedance probability (\eqn{F}) for \eqn{x}.
}
\references{
Hosking, J.R.M., 1990, L-moments---Analysis and estimation of
distributions using linear combinations of order statistics: Journal
of the Royal Statistical Society, Series B, v. 52, pp. 105--124.
Hosking, J.R.M., and Wallis, J.R., 1997, Regional frequency analysis---An
approach based on L-moments: Cambridge University Press.
}
\author{W.H. Asquith}
\seealso{\code{\link{pdfgam}}, \code{\link{quagam}}, \code{\link{lmomgam}}, \code{\link{pargam}}}
\examples{
lmr <- lmoms(c(123,34,4,654,37,78))
cdfgam(50,pargam(lmr))
# A manual demonstration of a gamma parent
G <- vec2par(c(0.6333,1.579),type='gam') # the parent
F1 <- 0.25 # nonexceedance probability
x <- quagam(F1,G) # the lower quartile (F=0.25)
a <- 0.6333 # gamma parameter
b <- 1.579 # gamma parameter
# compute the integral
xf <- function(t,A,B) { t^(A-1)*exp(-t/B) }
Q <- integrate(xf,0,x,A=a,B=b)
# finish the math
F2 <- Q$val*b^(-a)/gamma(a)
# check the result
if(abs(F1-F2) < 1e-8) print("yes")
\dontrun{
# 3-p Generalized Gamma Distribution and gamlss.dist package parameterization
gg <- vec2par(c(7.4, 0.2, 14), type="gam"); X <- seq(0.04,9, by=.01)
GGa <- gamlss.dist::pGG(X, mu=7.4, sigma=0.2, nu=14)
GGb <- cdfgam(X, gg) # lets compare the two cumulative probabilities
plot( X, GGa, type="l", xlab="X", ylab="PROBABILITY", col=3, lwd=6)
lines(X, GGb, col=2, lwd=2) #}
\dontrun{
# 3-p Generalized Gamma Distribution and gamlss.dist package parameterization
gg <- vec2par(c(4, 1.5, -.6), type="gam"); X <- seq(0,1000, by=1)
GGa <- 1-gamlss.dist::pGG(X, mu=4, sigma=1.5, nu=-.6) # Note 1-... (pGG bug?)
GGb <- cdfgam(X, gg) # lets compare the two cumulative probabilities
plot( X, GGa, type="l", xlab="X", ylab="PROBABILITY", col=3, lwd=6)
lines(X, GGb, col=2, lwd=2) #}
}
\keyword{distribution}
\keyword{cumulative distribution function}
\keyword{Distribution: Gamma}
\keyword{Distribution: Generalized Gamma}
\keyword{Package: gamlss.dist}
|
489cc8cc4e21ef335de9513eec0771285f954e91
|
b621dc04edbca760936fbdd9ea6725484a24bf85
|
/interactive_heatmap_dotplot/interactive_heatmap_dotplot.R
|
b8b3d58ff12bbcd9c72e73591d61e2e935e7e8f5
|
[] |
no_license
|
DoruMP/Fast-data-portals-for-scRNAseq-data
|
416a0f5d88c1ab41c9e894482ffe974cd9d0ea4b
|
b256d0d9d9386662bd2a8de6b2ee5c4906984fba
|
refs/heads/master
| 2020-05-14T05:22:41.243501
| 2019-05-14T13:48:05
| 2019-05-14T13:48:05
| 181,701,003
| 14
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,797
|
r
|
interactive_heatmap_dotplot.R
|
library(Seurat)
library(methods)
python.addr = 'python3.6'
args = commandArgs(trailingOnly=T)
options_file = args[1]
options_fobj = file(options_file, 'r')
options_fields = readLines(options_fobj)
close(options_fobj)
file_name = options_fields[1]
set.ident = options_fields[2]
output_folder = options_fields[3]
save_to = options_fields[4]
data_name = options_fields[5]
dir.create(output_folder)
# check file_mame extension
# if is RDS assume this is a Seurat object and go on
# if is h5ad then assume it is a scanpy object and get help form Python to extract the required data
file_name_extension = unlist(strsplit(file_name, "\\."))
file_name_extension = file_name_extension[length(file_name_extension)]
if (file_name_extension == 'h5ad'){ # handle a scanpy object
# must obtained an expression matrix aggregated by genes
# rownames are cell types
# colnames are genes
print('Handling a Scanpy object')
command = sprintf("%s aggregate_matrix_from_seurat.py %s %s %s", python.addr, file_name, set.ident, output_folder)
system(command, wait = T)
# read the expression sparse matrix from disk
expression.data = readMM(file.path(output_folder, 'expression.mtx'))
# convert the expression data to dgCMatrix so aggregation will be faster
expression.data = as(expression.data, "dgCMatrix")
# reading gene names from disk
input_file = file(file.path(output_folder, 'gene_names.txt'))
gene_names = readLines(input_file)
close(input_file)
# reading cell types from disk
input_file = file(file.path(output_folder, "cell_types.txt"))
cell_types = readLines(input_file)
close(input_file)
# update colnames and rownames of expression data
rownames(expression.data) = cell_types
colnames(expression.data) = gene_names
# aggregate the expression matrix by gene
no.genes = ncol(expression.data)
start_index = 1
while (start_index < no.genes){
end_index = start_index + 999
end_index = min(end_index, no.genes)
expression.data_ = data.matrix(expression.data[, start_index:end_index])
expression.data_ = as.data.frame(expression.data_)
expression.data_ = cbind(data.frame(CellLabels = cell_types), expression.data_)
expression.data_ = aggregate(expression.data_[2:dim(expression.data_)[2]], list(expression.data_$CellLabels), mean)
expression.data_ = cbind(data.frame(CellType = expression.data_$Group.1), expression.data_[, 2:dim(expression.data_)[2]])
rownames(expression.data_) = expression.data_$CellType
expression.data_ = expression.data_[, 2:ncol(expression.data_)]
print(start_index)
if (start_index == 1){
gene.expression.data = expression.data_
}else{
gene.expression.data = cbind(gene.expression.data, expression.data_)
}
start_index = start_index + 1000
}
# Save the expression matrix (aggregated by cell type using median) to the output folder
write.csv(gene.expression.data, file.path(output_folder, 'expression.csv'))
file.remove(file.path(output_folder, 'gene_names.txt'))
file.remove(file.path(output_folder, 'cell_types.txt'))
}else{
print("Loading data ... ")
seurat.obj = readRDS(file_name)
seurat.obj = SetAllIdent(object=seurat.obj, id=set.ident)
# create expression data aggregated by the median expression of each gene in each cell population
no.genes = nrow(seurat.obj@data)
start_index = 1
while (start_index < no.genes){
end_index = start_index + 999
end_index = min(end_index, no.genes)
expression.data_ = data.matrix(seurat.obj@data[start_index:end_index, ])
expression.data_ = t(expression.data_)
expression.data_ = as.data.frame(expression.data_)
expression.data_ = cbind(data.frame(CellLabels = as.vector(seurat.obj@ident)), expression.data_)
expression.data_ = aggregate(expression.data_[2:dim(expression.data_)[2]], list(expression.data_$CellLabels), mean)
expression.data_ = cbind(data.frame(CellType = expression.data_$Group.1), expression.data_[, 2:dim(expression.data_)[2]])
rownames(expression.data_) = expression.data_$CellType
expression.data_ = expression.data_[, 2:ncol(expression.data_)]
print(start_index)
if (start_index == 1){
expression.data = expression.data_
}else{
expression.data = cbind(expression.data, expression.data_)
}
start_index = start_index + 1000
}
# Save the expression matrix (aggregated by cell type using median) to the output folder
write.csv(expression.data, file.path(output_folder, 'expression.csv'))
}
# start the python script
command = sprintf('%s compile_app.py %s', python.addr, options_file)
system(command, wait = T)
# clean-up
file.remove(file.path(output_folder, 'expression.csv'))
file.remove(file.path(output_folder, 'expression.mtx'))
# end
print('Ended beautifully')
|
d3389ab72480657c3c49a60b0594d19b27467d82
|
ab7db0dbd19eb599fae18f11b8767a8862c3eff9
|
/cachematrix.R
|
9d26de8c9461d8fc43b2b868d393a71ce4d8725d
|
[] |
no_license
|
tripah/ProgrammingAssignment2
|
cfe33ad947d998f553ddfc744adf8a30ba727d69
|
3731f2473ffe10152e7b6cc484594f3c05dfdde2
|
refs/heads/master
| 2021-01-14T14:23:39.394517
| 2014-11-23T19:46:52
| 2014-11-23T19:46:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,262
|
r
|
cachematrix.R
|
## Overall description of what your
## functions do:
## 1. caching a matrix and it's inverse: methods for setting and getting both objects
## 2. calculate the inverse if not already done and setting the inverse "inverse-x".
## this function
## first: it assigns a matrix to x and sets the object "inverse" to NULL
## then: a method for getting the matrix, a method to set the inverse of the matrix and a method for getting the inverse
makeCacheMatrix <- function(x = matrix()) {
inverse-x = matrix()
inverse-x <- NULL
set <- function(y) {
x <<- y
inverse-x <<-NULL
}
get <- function () x
setinverse <- function(inverse) inverse-x <<- inverse
getinverse <- function() inverse-x
list(set = set, setinverse = setinverse, getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse-x <- x$getinverse()
if(!is.null(inverse-x)) {
message("getting cached data")
return(inverse-x)
}
matrix-x <- x$get()
inverse-x <- solve(matrix-x)
x$setinverse(inverse-x)
inverse-x
}
|
55894743d68c7d0fbc3e54acdf6b5ed4a14f7711
|
d6302bdd07645e0da8ad4430a261d3ebe2149435
|
/man/bipartitionShi.Rd
|
da34cb147b379250128bcc68b2176fff645a087f
|
[] |
no_license
|
cran/RclusTool
|
3a8fec24edeaedee42ef0f255f32dfc4be107dfe
|
7ed428f6c896889a9b291a279e1e82f8f6d9cd3b
|
refs/heads/master
| 2022-09-04T15:47:33.547991
| 2022-08-29T07:40:08
| 2022-08-29T07:40:08
| 236,879,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,341
|
rd
|
bipartitionShi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codeSpectral.R
\name{bipartitionShi}
\alias{bipartitionShi}
\title{Spectral clustering}
\usage{
bipartitionShi(sim)
}
\arguments{
\item{sim}{similarity matrix.}
}
\value{
The function returns a list containing:
\item{label}{vector of labels.}
\item{eigenvector}{matrix containing, in columns, the eigenvectors of the similarity matrix.}
\item{eigenvalue}{vector containing the eigenvalues of the similarity matrix.}
}
\description{
Perform spectral clustering thanks to a similarity matrix (according to Shi and Malik, 2000).
}
\details{
bipartitionShi returns a partition obtained by spectral clustering (according to Shi and Malik, 2000)
}
\examples{
dat <- rbind(matrix(rnorm(100, mean = 0, sd = 0.3), ncol = 2),
matrix(rnorm(100, mean = 2, sd = 0.3), ncol = 2))
sim <- computeGaussianSimilarity(dat, 1)
res <- bipartitionShi(sim)
plot(dat[,1], dat[,2], type = "p", xlab = "x", ylab = "y",
col = res$label, main = "Initial features space")
plot(res$eigenvector, type = "p", xlab = "Indices", ylab = "1st eigenvector",
col = res$label, main = "Spectral embedding")
}
\references{
J. Shi, J. Malik, Normalized cuts and image segmentation, IEEE Transactions on Pattern Analysis and Machine Intelligence, 2000, 22(8), 888-905.
}
\keyword{internal}
|
63ac89897d763983ed88cbb1fe948753ba60590d
|
e1d7a92b1a0f23f62c683b71a82681ab5f65ab59
|
/plot4.R
|
d617cfcf20de3233db8ca7bfead428813787456d
|
[] |
no_license
|
luisben/ExData_Plotting1
|
2e19bbd85efd3b5fda2fcce7c5b4a1502f527845
|
d763493bd4497127bfbeedb031fc57146646cf0c
|
refs/heads/master
| 2021-01-15T21:14:46.100563
| 2014-08-09T17:11:40
| 2014-08-09T17:11:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 988
|
r
|
plot4.R
|
source("dataLoad.r")
prev_locale <- Sys.getlocale(category = "LC_ALL")
Sys.setlocale(category = "LC_ALL", locale = "C")
png("myplots/plot4.png",width = 480, height = 480, units = "px")
#set mfcol
par(mfcol = c(2,2))
#first plot
with(pw_data,plot(timestamp,Global_active_power,type = "l",xlab="",ylab="Global Active Power"))
#second plot
with(pw_data,plot(timestamp,Sub_metering_1,type = "l",xlab="",ylab="Energy sub metering",col="black"))
with(pw_data,lines(timestamp,Sub_metering_2,type="l",col="red"))
with(pw_data,lines(timestamp,Sub_metering_3,type="l",col="blue"))
legend(x="topright",bty="n",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lty=c(1,1,1))
#third plot
with(pw_data,plot(timestamp,Voltage,type = "l",xlab="datetime",ylab="Voltage"))
#fourth plot
with(pw_data,plot(timestamp,Global_reactive_power,type = "l",xlab="datetime"))
dev.off()
#restore mfcol
par(mfcol = c(1,1))
Sys.setlocale(category = "LC_ALL", locale = prev_locale)
|
e5c0cac6eac7123f7bbcfee7b5a866d0b062bf7c
|
cc45e66be835f29864d3bf2ac39bd2983f6a05c4
|
/Dumbbell_Chart.R
|
3423d5ccbd8f437c7539809ea9c34b15717b0fb7
|
[
"MIT"
] |
permissive
|
paulinelemenkova/R-11-Dumbbell-Charts
|
aaa7524b3cbf1a82577ce88c36daf6619d33e16e
|
ce35b048f2d00b1f86e65f092c264787081d88af
|
refs/heads/master
| 2020-03-19T11:34:30.723816
| 2019-06-17T13:09:15
| 2019-06-17T13:09:15
| 136,462,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,992
|
r
|
Dumbbell_Chart.R
|
library(ggplot2) # используем данные библиотеки
library(ggalt) # используем данные библиотеки
# ЧАСТЬ-1. формируем исходный датафрейм.
# шаг-1. вчитываем таблицу. делаем из нее исходный датафрейм.
MorDF <- read.csv("Morphology.csv", header=TRUE, sep = ",")
head(MorDF)
summary(MorDF)
# ЧАСТЬ-2. строим график Дамббелла
theme_set(theme_classic())
# шаг-2. устанавливаем факторное значение (здесь: номера профилей 1:25)
profile<- factor(MorDF$profile, levels=as.character(MorDF$profile)) # for right ordering of the dumbells
# шаг-3. сравниваем распределение точек по паре "Тихоокеанская vs Филиппинская плиты"
PacPhil <- ggplot(MorDF, aes(x = plate_phill, xend = plate_pacif, y = profile, group = profile)) +
geom_dumbbell(color = "thistle4", size=0.3, colour_xend = "deeppink", size_xend = 1.5, colour_x = "darkviolet", size_x = 1.5, show.legend = T) +
labs(x = "Observation Points",
y = "Profiles",
title="马里亚纳海沟。剖面1-25。Dumbbell Chart \nMariana Trench, Profiles Nr.1-25.",
subtitle="Value Change: Observation Points; Philippine Plate vs Pacific Plate",
caption="海洋大学, 青岛市2018. \nStatistics Processing and Graphs: R Programming. Data Source: QGIS") +
scale_x_continuous(breaks = c(seq(0, 500, by = 100)), minor_breaks = c(seq(0, 500, by = 50))) + # насечки оси X через 1, от -3 до 3.
scale_y_continuous(breaks = c(seq(1, 25, by = 1))) +
theme(
#plot.background=element_rect(fill="#f7f7f7"),
plot.title = element_text(margin = margin(t = 0, r = 20, b = 5, l = 0), family = "Kai", face = "bold", size = 12),
plot.subtitle = element_text(margin = margin(t = 0, r = 20, b = 4, l = 0), family = "Hei", face = "bold", size = 8),
plot.caption = element_text(margin = margin(t = 20, r = 10, b = 4, l = 0), family = "Kai", face = "bold", size = 8),
plot.background=element_rect(fill = "white"),
axis.text.x = element_text(face = 3, color = "gray24", size = 6),
axis.text.y = element_text(face = 3, color = "gray24", size = 6),
axis.title.y = element_text(size = 8),
axis.title.x = element_text(size = 8),
axis.ticks=element_blank(),
legend.position="top",
panel.background=element_rect(fill = "grey95", colour = "grey95"),
panel.grid.major = element_line("white", size = 0.4),
panel.grid.minor = element_line("white", size = 0.4, linetype = "dotted"),
panel.border=element_blank())
PacPhil
# шаг-4. сравниваем распределение точек по паре "Марианская vs Каролинская плиты"
MarCar <- ggplot(MorDF, aes(x = plate_maria, xend = plate_carol, y = profile, group = profile)) +
geom_dumbbell(color = "thistle4", size=0.3, colour_xend = "orange", size_xend = 1.5, colour_x = "navy", size_x = 1.5, show.legend = T) +
labs(x = "Observation Points",
y = "Profiles",
title="马里亚纳海沟。剖面1-25。Dumbbell Chart \nMariana Trench, Profiles Nr.1-25.",
subtitle="Value Change: Observation Points; Mariana Plate vs Caroline Plate",
caption="海洋大学, 青岛市2018. \nStatistics Processing and Graphs: R Programming. Data Source: QGIS") +
scale_x_continuous(breaks = c(seq(0, 500, by = 100)), minor_breaks = c(seq(0, 500, by = 50))) + # насечки оси X через 1, от -3 до 3.
scale_y_continuous(breaks = c(seq(1, 25, by = 1))) +
theme(
#plot.background=element_rect(fill="#f7f7f7"),
plot.title = element_text(margin = margin(t = 0, r = 20, b = 5, l = 0), family = "Kai", face = "bold", size = 12),
plot.subtitle = element_text(margin = margin(t = 0, r = 20, b = 4, l = 0), family = "Hei", face = "bold", size = 8),
plot.caption = element_text(margin = margin(t = 20, r = 10, b = 4, l = 0), family = "Kai", face = "bold", size = 8),
plot.background=element_rect(fill = "white"),
axis.text.x = element_text(face = 3, color = "gray24", size = 6),
axis.text.y = element_text(face = 3, color = "gray24", size = 6),
axis.title.y = element_text(size = 8),
axis.title.x = element_text(size = 8),
axis.ticks=element_blank(),
legend.position="top",
panel.background=element_rect(fill = "grey95", colour = "grey95"),
panel.grid.major = element_line("white", size = 0.4),
panel.grid.minor = element_line("white", size = 0.4, linetype = "dotted"),
panel.border=element_blank())
MarCar
# шаг-5. размещаем оба графика на один лист.
figure <-plot_grid(MarCar, PacPhil, labels = c("1", "2"), ncol = 2, nrow = 1)
|
62135879cf7cdc14a243df8c316216e96b6f6b09
|
0ca78ef5a8670fbdab55409eecda579cec2baf68
|
/DM/mlMDS.R
|
452289e170dd56eab4fd5e8a4dc6f7fb614d7e43
|
[] |
no_license
|
zhurui1351/RSTOCK_TRAIL
|
ab83fdef790778a1e792d08a876522ef13a872e6
|
2396c512c8df81a931ea3ca0c925c151363a2652
|
refs/heads/master
| 2021-01-23T09:01:36.814253
| 2019-05-17T15:26:19
| 2019-05-17T15:26:19
| 23,482,375
| 9
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,051
|
r
|
mlMDS.R
|
#https://github.com/johnmyleswhite/ML_for_Hackers/blob/master/09-MDS/chapter09.R
library('foreign')
library('ggplot2')
set.seed(851982) # To make sure results are consistent
ex.matrix <- matrix(sample(c(-1, 0, 1), 24, replace = TRUE),
nrow = 4,
ncol = 6)
row.names(ex.matrix) <- c('A', 'B', 'C', 'D')
colnames(ex.matrix) <- c('P1', 'P2', 'P3', 'P4', 'P5', 'P6')
ex.matrix
#转置
t(ex.matrix)
ex.mult <- ex.matrix %*% t(ex.matrix)
ex.mult
#欧式距离
sqrt(sum((ex.mult[1, ] - ex.mult[4, ]) ^ 2))
ex.dist <- dist(ex.mult)
ex.dist
ex.mds <- cmdscale(ex.dist,2)
plot(ex.mds, type = 'n')
text(ex.mds, c('A', 'B', 'C', 'D'))
#国会投票结果分析
data.dir <- file.path("c:/data", "roll_call")
data.files <- list.files(data.dir)
rollcall.data <- lapply(data.files,
function(f)
{
read.dta(file.path(data.dir, f), convert.factors = FALSE)
})
dim(rollcall.data[[1]])
head(rollcall.data[[1]])
rollcall.simplified <- function(df)
{
no.pres <- subset(df, state < 99)
for(i in 10:ncol(no.pres))
{
no.pres[,i] <- ifelse(no.pres[,i] > 6, 0, no.pres[,i])
no.pres[,i] <- ifelse(no.pres[,i] > 0 & no.pres[,i] < 4, 1, no.pres[,i])
no.pres[,i] <- ifelse(no.pres[,i] > 1, -1, no.pres[,i])
}
return(as.matrix(no.pres[,10:ncol(no.pres)]))
}
rollcall.simple <- lapply(rollcall.data, rollcall.simplified)
rollcall.dist <- lapply(rollcall.simple, function(m) dist(m %*% t(m)))
rollcall.mds <- lapply(rollcall.dist,
function(d) as.data.frame((cmdscale(d, k = 2)) * -1))
congresses <- 101:111
for(i in 1:length(rollcall.mds))
{
names(rollcall.mds[[i]]) <- c("x", "y")
congress <- subset(rollcall.data[[i]], state < 99)
congress.names <- sapply(as.character(congress$name),
function(n) strsplit(n, "[, ]")[[1]][1])
rollcall.mds[[i]] <- transform(rollcall.mds[[i]],
name = congress.names,
party = as.factor(congress$party),
congress = congresses[i])
}
head(rollcall.mds[[1]])
cong.110 <- rollcall.mds[[9]]
base.110 <- ggplot(cong.110, aes(x = x, y = y)) +
scale_size(range = c(2,2), guide = 'none') +
scale_alpha(guide = 'none') +
theme_bw() +
theme(axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
panel.grid.major = element_blank()) +
ggtitle("Roll Call Vote MDS Clustering for 110th U.S. Senate") +
xlab("") +
ylab("") +
scale_shape(name = "Party", breaks = c("100", "200", "328"),
labels = c("Dem.", "Rep.", "Ind."), solid = FALSE) +
scale_color_manual(name = "Party", values = c("100" = "black",
"200" = "dimgray",
"328"="grey"),
breaks = c("100", "200", "328"),
labels = c("Dem.", "Rep.", "Ind."))
print(base.110 + geom_point(aes(shape = party,
alpha = 0.75,
size = 2)))
print(base.110 + geom_text(aes(color = party,
alpha = 0.75,
label = cong.110$name,
size = 2)))
all.mds <- do.call(rbind, rollcall.mds)
all.plot <- ggplot(all.mds, aes(x = x, y = y)) +
geom_point(aes(shape = party, alpha = 0.75, size = 2)) +
scale_size(range = c(2, 2), guide = 'none') +
scale_alpha(guide = 'none') +
theme_bw() +
theme(axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
panel.grid.major = element_blank()) +
ggtitle("Roll Call Vote MDS Clustering for U.S. Senate (101st - 111th Congress)") +
xlab("") +
ylab("") +
scale_shape(name = "Party",
breaks = c("100", "200", "328"),
labels = c("Dem.", "Rep.", "Ind."),
solid = FALSE) +
facet_wrap(~ congress)
print(all.plot)
for(i in 1:length(rollcall.mds))
{
mds <- rollcall.mds[[i]]
congress <- congresses[i]
plot.title <- paste("Roll Call Vote MDS Clustering for ",
congress,
" U.S. Senate",
sep = "")
# Build base plot
mds.plot <- ggplot(mds, aes(x = x, y = y)) +
scale_size(range = c(2, 2), guide = 'none') +
scale_alpha(guide = 'none') +
theme_bw() +
theme(axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
panel.grid.major = element_blank()) +
ggtitle(plot.title) +
xlab("") +
ylab("")
# Build up point and text plots separately
mds.point <- mds.plot + geom_point(aes(shape = party,
alpha = 0.75,
size = 2))
mds.text <- mds.plot + geom_text(aes(color = party,
alpha = 0.75,
label = mds$name,
size = 2))
# Fix labels, shapes and colors
if(length(levels(mds$party)) > 2)
{
mds.point <- mds.point + scale_shape(name = "Party",
breaks = c("100", "200", "328"),
labels = c("Dem.", "Rep.", "Ind."),
solid = FALSE)
mds.text <- mds.text + scale_color_manual(name = "Party",
values = c("100" = "black",
"200" = "dimgray",
"328" = "gray"),
breaks = c("100", "200", "328"),
labels = c("Dem.", "Rep.", "Ind."))
}
else
{
mds.point <- mds.point + scale_shape(name = "Party",
breaks = c("100", "200"),
labels = c("Dem.", "Rep."),
solid = FALSE)
mds.text <- mds.text + scale_color_manual(name = "Party",
values = c("100" = "black",
"200" = "dimgray"),
breaks = c("100", "200"),
labels = c("Dem.", "Rep."))
}
ggsave(plot = mds.point,
filename = file.path('images',
'senate_plots',
paste(congress, "_point.pdf", sep = "")),
width = 8,
height = 5)
ggsave(plot = mds.text,
filename = file.path('images',
'senate_plots',
paste(congress, "_names.pdf", sep = "")),
width = 8,
height = 5)
}
|
d14645fa1b78b0d47e49dee7f7f138c15c94ebbf
|
a257667eb2a709200f5b9664e63c6c974c051a67
|
/code/stan-plots.R
|
b6f654437ea4c5f42e94b8dec0b3d1e251a8319f
|
[] |
no_license
|
chiangwe/covid19-forecast-evals
|
ef892145304c9defaf61bc21a09154f047d97545
|
6a964c8ad60cefc14ca2b12b23cd2c4095e4246a
|
refs/heads/main
| 2023-03-02T04:53:42.808052
| 2021-02-08T03:30:30
| 2021-02-08T03:30:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,233
|
r
|
stan-plots.R
|
stan_fit_nb <- readRDS("../paper-inputs/20200927-stan-fit-scores-negbin.rds")
launch_shinystan(stan_fit_nb)
pp_samples <- posterior_predict(stan_fit_nb)
dim(pp_samples)
scored_models_df_pred$pp1 <- pp_samples[1,]
scored_models_df_pred$pp2 <- pp_samples[143,]
scored_models_df_pred$pp3 <- pp_samples[1430,]
scored_models_df_pred$pp4 <- pp_samples[1864,]
scored_models_df_pred$pp5 <- pp_samples[1111,]
scored_models_df_pred$pp6 <- pp_samples[2000,]
tmp_preds <- scored_models_df_pred %>%
select(model, target, location_name, first_fcast_sat, truth, abs_error, abs_error_rnd, pp1, pp2, pp3, pp4, pp5, pp6) %>%
pivot_longer(cols = c(abs_error, abs_error_rnd, pp1, pp2, pp3, pp4, pp5, pp6))
## by location
ggplot(mapping=aes(x=value, col=name)) +
## posterior samples
geom_density(data=dplyr::filter(tmp_preds, name %in% paste0("pp", 1:6)), color="grey", alpha=.5, aes(group=name)) +
## observed data
geom_density(data=dplyr::filter(tmp_preds, name=="abs_error_rnd"), color="black", size=1) +
scale_x_sqrt() +
facet_wrap(.~location_name, scales="free")
## by model
ggplot(mapping=aes(x=value, col=name)) +
## posterior samples
geom_density(data=dplyr::filter(tmp_preds, name %in% paste0("pp", 1:6)), color="grey", alpha=.5, aes(group=name)) +
## observed data
geom_density(data=dplyr::filter(tmp_preds, name=="abs_error_rnd"), color="black", size=1) +
scale_x_sqrt() +
facet_wrap(.~model, scales="free")
## by target
ggplot(mapping=aes(x=value, col=name)) +
## posterior samples
geom_density(data=dplyr::filter(tmp_preds, name %in% paste0("pp", 1:6)), color="grey", alpha=.5, aes(group=name)) +
## observed data
geom_density(data=dplyr::filter(tmp_preds, name=="abs_error_rnd"), color="black", size=1) +
scale_x_sqrt() +
facet_wrap(.~target, scales="free")
## by target
ggplot(mapping=aes(x=value, col=name)) +
## posterior samples
geom_density(data=dplyr::filter(tmp_preds, name %in% paste0("pp", 1:6)), color="grey", alpha=.5, aes(group=name)) +
## observed data
geom_density(data=dplyr::filter(tmp_preds, name=="abs_error_rnd"), color="black", size=1) +
scale_x_sqrt() +
facet_wrap(.~first_fcast_sat, scales="free")
|
3a9dbd3b3fe5fb9e6a079b00f8ef3175ac647e6d
|
8ac1fa003fd7be97fb170bea5d92e3008cf6e09b
|
/R/sql_insert.r
|
95d655725114b6e617c7df9e8bbc5fe3516dd296
|
[] |
no_license
|
swish-climate-impact-assessment/swishdbtools
|
28271f06aef8b85740c8e2630c6c84eff55ee95d
|
acae288c23a9fb74e8daaddc3c319777d06e765e
|
refs/heads/master
| 2020-08-25T03:19:32.312877
| 2020-05-15T07:12:08
| 2020-05-15T07:12:08
| 7,141,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,244
|
r
|
sql_insert.r
|
################################################################
# name:sqlquery_insert
sqlquery_insert <- function(channel, variables="*",
from_schema = 'public', from_table,
where=NA, limit=1, eval = FALSE)
{
# assume ch exists
exists <- pgListTables(channel, from_schema, from_table)
if(nrow(exists) == 0)
{
stop("Table doesn't exist.")
}
if(variables=="*")
{
variables <- names(
dbGetQuery(channel,
paste("select ", variables, " from ",
from_schema, ".",
from_table, " limit 1",
sep = ""))
)
variables <- paste(variables, collapse = ", ", sep = "")
}
sqlquery <- paste("select ", variables, "\nfrom ", from_schema, ".",
from_table, "\n",
sep = "")
if(!is.na(where))
{
sqlquery <- paste(sqlquery, "where ", where, "\n", sep = "")
}
if(limit > 0)
{
sqlquery <- paste(sqlquery, "limit ", limit, "\n", sep = "")
}
if(eval)
{
dat <- dbGetQuery(channel,sqlquery)
return(dat)
} else {
return(sqlquery)
}
}
|
d92ed94688849f68146360c5152b4b3b6b335773
|
50d9adf0b0ff309013c2c963aa8220ea2bcf6339
|
/get_required_dataset.R
|
288fca58f6a6dd2d958d1b8ea442c235bb3dbb9a
|
[] |
no_license
|
ravikanth1979/Data-Analysis-and-Visualization
|
343542334d63aaeaf6f5fb3539606b38b6ddcbb7
|
9abf5287cf9d9b5434bd2f4fabde8e88f9836cc8
|
refs/heads/master
| 2020-06-21T20:58:34.293576
| 2019-07-18T09:04:12
| 2019-07-18T09:04:12
| 197,550,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,758
|
r
|
get_required_dataset.R
|
get_lpi_dataset <- function(dataset){
lpi_indicators = c("LP.LPI.CUST.XQ",
"LP.LPI.INFR.XQ",
"LP.LPI.ITRN.XQ",
"LP.LPI.LOGS.XQ",
"LP.LPI.OVRL.XQ",
"LP.LPI.TIME.XQ",
"LP.LPI.TRAC.XQ"
)
lpi_dataset <- dataset[dataset$`Indicator Code` %in% lpi_indicators,]
require(tidyr)
### assign column name to the first column
colnames(lpi_dataset)[1] <- "Country_Name"
colnames(lpi_dataset)[2] <- "Country_Code"
colnames(lpi_dataset)[3] <- "Indicator_Name"
colnames(lpi_dataset)[4] <- "Indicator_Code"
### combine the year columns into a single column with separate rows for each year; assign to new vector
lpi_dataset <- gather(lpi_dataset,Year,Indicator_Score,-c(Country_Name,Country_Code,Indicator_Name,Indicator_Code))
row_nums = list()
count = 1
for(row in 1:nrow(lpi_dataset)){
if(sum(is.na(lpi_dataset[row,]))>0){
row_nums[count] = row
count = count + 1
}
}
lpi_dataset = lpi_dataset[-unlist(row_nums),]
return(lpi_dataset)
}
get_tourism_dataset <- function(dataset){
tourism_indicators = c("ST.INT.ARVL",
"ST.INT.DPRT",
"ST.INT.RCPT.CD",
"ST.INT.RCPT.XP.ZS",
"ST.INT.TRNR.CD",
"ST.INT.TRNX.CD",
"ST.INT.TVLR.CD",
"ST.INT.TVLX.CD",
"ST.INT.XPND.CD",
"ST.INT.XPND.MP.ZS")
tourism_dataset <- dataset[dataset$`Indicator Code` %in% tourism_indicators,]
require(tidyr)
colnames(tourism_dataset)[1] <- "Country_Name"
colnames(tourism_dataset)[2] <- "Country_Code"
colnames(tourism_dataset)[3] <- "Indicator_Name"
colnames(tourism_dataset)[4] <- "Indicator_Code"
### combine the year columns into a single column with separate rows for each year; assign to new vector
tourism_dataset <- gather(tourism_dataset,Year,Indicator_Score,-c(Country_Name,Country_Code,Indicator_Name,Indicator_Code))
row_nums = list()
count = 1
for(row in 1:nrow(tourism_dataset)){
if(sum(is.na(tourism_dataset[row,]))>0){
row_nums[count] = row
count = count + 1
}
}
tourism_dataset = tourism_dataset[-unlist(row_nums),]
return(tourism_dataset)
}
get_trade_dataset <- function(dataset){
trade_indicators = c("NE.EXP.GNFS.CD",
"NE.EXP.GNFS.KD",
"NE.EXP.GNFS.KD.ZG",
"NE.EXP.GNFS.ZS",
"NE.IMP.GNFS.CD",
"NE.IMP.GNFS.KD",
"NE.IMP.GNFS.KD.ZG",
"NE.IMP.GNFS.ZS",
"NE.RSB.GNFS.CD",
"NE.RSB.GNFS.ZS",
"NE.TRD.GNFS.ZS")
trade_dataset <- dataset[dataset$`Indicator Code` %in% trade_indicators,]
require(tidyr)
colnames(trade_dataset)[1] <- "Country_Name"
colnames(trade_dataset)[2] <- "Country_Code"
colnames(trade_dataset)[3] <- "Indicator_Name"
colnames(trade_dataset)[4] <- "Indicator_Code"
### combine the year columns into a single column with separate rows for each year; assign to new vector
trade_dataset <- gather(trade_dataset,Year,Indicator_Score,-c(Country_Name,Country_Code,Indicator_Name,Indicator_Code))
row_nums = list()
count = 1
for(row in 1:nrow(trade_dataset)){
if(sum(is.na(trade_dataset[row,]))>0){
row_nums[count] = row
count = count + 1
}
}
trade_dataset = trade_dataset[-unlist(row_nums),]
return(trade_dataset)
}
get_net_trade_dataset <- function(dataset){
net_trade_indicators = c("TX.VAL.MRCH.CD.WT")
net_trade_dataset <- dataset[dataset$`Indicator Code` %in% net_trade_indicators,]
require(tidyr)
colnames(net_trade_dataset)[1] <- "Country_Name"
colnames(net_trade_dataset)[2] <- "Country_Code"
colnames(net_trade_dataset)[3] <- "Indicator_Name"
colnames(net_trade_dataset)[4] <- "Indicator_Code"
### combine the year columns into a single column with separate rows for each year; assign to new vector
net_trade_dataset <- gather(net_trade_dataset,Year,Indicator_Score,-c(Country_Name,Country_Code,Indicator_Name,Indicator_Code))
row_nums = list()
count = 1
for(row in 1:nrow(net_trade_dataset)){
if(sum(is.na(net_trade_dataset[row,]))>0){
row_nums[count] = row
count = count + 1
}
}
net_trade_dataset = net_trade_dataset[-unlist(row_nums),]
return(net_trade_dataset)
}
|
9644261333be2dcd002543d4271c19e9b7d48092
|
bd43b64c52566ff9a18c12608728bcac15468877
|
/man/pnls.Rd
|
55f2a30bc90cddceaef51b4d8657b679932b26bd
|
[] |
no_license
|
cran/nlsr
|
09cef8d9d081ce911b5ca77f34ceb1f7465f33c4
|
b1f05b425dfa0570fde55543026a09ffa3cbd2ca
|
refs/heads/master
| 2023-05-25T21:49:56.874260
| 2023-05-10T14:50:02
| 2023-05-10T14:50:02
| 81,798,223
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 275
|
rd
|
pnls.Rd
|
\name{pnls}
\alias{pnls}
\title{pnls}
\usage{
pnls(x)
}
\arguments{
\item{x}{an nls() result object from nls() or nlsLM()}
}
\value{
none
}
\description{
Compact display of specified \code{nls} object \code{x}
}
\author{
J C Nash 2014-7-16, 2023-5-8 nashjc _at_ uottawa.ca
}
|
7172ca187689d403223dce14acfd34d960590fe3
|
56e14c3b514a0af4d8967cee1171b96bcab0c27c
|
/Munge.R
|
25bdf23e1fed13de182f23d0a47cee949ffcd8a0
|
[] |
no_license
|
rgknight/ma-enrollment-analysis
|
cbe149648cecbb67c0b48d956a993f9599a58c67
|
9db8fbee64b209d2aecec94ee65bfbb7526c0933
|
refs/heads/master
| 2020-03-28T15:49:06.492494
| 2015-05-08T12:49:49
| 2015-05-08T12:49:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,137
|
r
|
Munge.R
|
# Calculate the number of students enrolled in Boston by Grade
require(dplyr)
require(tidyr)
require(stringr)
options(stringsAsFactors=F)
setwd('C:/Users/rknight/Documents/GitHub/ma-doe-data')
# School Files
cleanup <- function(thisone){
ifiles <- list.files(path= "data/enrollmentbygrade", pattern=paste(thisone))
all <- NULL
for (file in ifiles) {
print(file)
raw <- read.csv(paste('data/enrollmentbygrade/', file, sep = '/'))
parts <- strsplit(file, " ")
year <- parts[[1]][3]
year <-substr(year, 1, nchar(year)-4) #remove the last 4 char (.csv)
raw$year <- year
# There are two sets of columns
if ('UG' %in% names(raw)) {
raw <- mutate(raw, SP = NA)
} else {
raw <- mutate(raw, UG = NA, X13 = NA, X14 = NA)
}
all <- rbind(all, raw)
}
names(all) <- tolower(names(all))
if ("school" %in% names(all)) {
tmp <- rename(all, name = school)
}
else{
tmp <- rename(all, name = district)
}
long <- tmp %>%
gather(measure, students, -name, -org.code, -year) %>%
mutate(grade = extract_numeric(measure),
grade = ifelse(measure == "k", 0,
ifelse(measure == 'pk', -1, grade)))
long %>%
select(-measure) %>%
filter(extract_numeric(students) > 0) %>%
group_by(name) %>%
mutate(maxgrade = max(grade))
}
district <- cleanup("district")
schools <- cleanup("school")
# Go get location information for schools
locations <- read.csv('data/address-source/school addresses.csv')
closed.charter <- read.csv('data/address-source/closed charter school addresses.csv')
closed.bps <- read.csv('data/address-source/closed BPS school addresses.csv')
zips <- read.csv('data/address-source//boston zip codes.csv')
locations <- rbind(locations, closed.charter %>% select(-school), closed.bps %>% select(-school) )
locations$zip <- as.numeric(str_sub(locations$address, -5))
locations$boston <- locations$zip %in% zips$zip
locations <- locations %>% rename(org.code = orgcode)
long.loc <- left_join(schools, locations)
long.loc$charter <- grepl("charter", tolower(long.loc$name))
long.loc$charter.alt <- grepl("cs$", tolower(long.loc$name))
long.loc$charter <- ifelse(long.loc$charter.alt==TRUE, TRUE, long.loc$charter)
need.addy <- unique(long.loc %>%
filter(charter==TRUE, is.na(boston)) %>%
select(org.code, name))
write.csv(need.addy, "data/address-source/Charter need address.csv", na="", row.names = F)
long.loc$boston2 <- grepl("^boston", tolower(long.loc$name))
need.addy <- unique(long.loc %>%
filter(charter==FALSE, is.na(boston), boston2 == TRUE) %>%
select(org.code, name))
write.csv(need.addy, "data/address-source/BPS need address.csv", na="", row.names = F)
bos.charter.enroll <- long.loc %>%
filter(charter==TRUE, boston == TRUE) %>%
group_by(year, grade) %>%
summarise(students = sum(extract_numeric(students), na.rm = T),
name = "Boston Charters",
org.code = 999,
maxgrade = NA)
boston.enroll <- rbind(district %>% filter(org.code==350000), bos.charter.enroll)
boston.enroll$name <- with(boston.enroll, ifelse(org.code ==350000 , "District School", name))
write.csv(boston.enroll, "data/enroll-output/Total Boston Enrollment.csv", na="", row.names = F)
write.csv(long.loc %>% filter(boston == TRUE), "data/enroll-output/Boston School Enrollment with Location.csv", na="", row.names = F)
# Goal: Round lat/long, take the average in an area
latlong <- long.loc %>%
filter(year %in% c(2005, 2010, 2015), !is.na(grade), boston == TRUE) %>%
select(org.code, year, students, grade, latitude, longitude, zip, address, charter, name) %>%
mutate( latitude = str_sub(as.character(latitude), 1, 6),
longitude = str_sub(as.character(longitude), 1, 7),
latlong = paste(as.character(latitude), as.character(longitude)))
latlong2 <- latlong %>%
group_by(year, grade, latlong) %>%
summarise(students = sum(extract_numeric(students), na.rm = T) ,
longitude = max(longitude),
latitude = max(latitude))
latlong2 <- latlong2 %>%
mutate(students = ifelse(is.na(students), 0, students)) %>%
spread(year, students, fill = 0) %>%
mutate(stu_2015_2010 = `2015` - `2010`,
stu_2015_2005 = `2015` - `2005`,
stu_2015_2010_abs = abs(stu_2015_2010),
stu_2015_2005_abs = abs(stu_2015_2005))
join_names <- latlong %>% select(latlong, name, address)
join_names <- distinct(join_names)
join_names <- join_names %>%
group_by(latlong) %>%
summarise(name = last(name),
address = last(address))
latlong3 <- left_join(latlong2, join_names)
write.csv(latlong3, "data/enroll-output/Boston Enrollment Wide.csv", row.names = F, na = "0")
# Figure out grade level configurations
gl <- long.loc %>%
filter(boston == TRUE, !is.na(grade), students > 0) %>%
group_by(org.code, year) %>%
mutate(maxgrade = max(grade),
mingrade = min(grade))
write.csv(gl, "data/enroll-output/Boston Enrollment by School Type.csv", row.names = F, na = "0")
|
80e06ca0da393d89685fabfe8c43222949e441e7
|
53ff4594cd6256d1d222adbab1b96ceb5391cada
|
/kallisto_deseq2/Fig1D_heatmaps_proteostasis.R
|
55e8d3b19ecd382e36789e01383c4b05ff985445
|
[] |
no_license
|
brunetlab/Leeman_et_al_2017
|
1803a443bfffb6b8c23b659134db8e2871686617
|
8d8e9b071f2c06dfb0332e67ee3eb3a693eb29d8
|
refs/heads/master
| 2021-09-05T04:11:58.001606
| 2018-01-24T04:06:33
| 2018-01-24T04:06:33
| 113,623,951
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,204
|
r
|
Fig1D_heatmaps_proteostasis.R
|
## Make three (small) heatmaps that are attached underneath each other in Adobe Illustrator: proteasome, lysosome, unfolded protein binding.
library(Biobase)
library(pheatmap)
library(RColorBrewer)
library(Hmisc)
load("Data/eset_vsd_replicate_corrected.RData")
eset <- eset[, c(21:24, 17:20, 34:35, 31:33,
28:30, 25:27, 13:16, 9:12)]
rownames(eset) <- toupper(rownames(eset))
## Lysosome, Proteasome, GO unfolded protein binding
genes1 <- sort(toupper(read.table("Data/PKEGG_Lysosome.txt",
stringsAsFactors = FALSE)[,1]))
genes2 <- sort(toupper(read.table("Data/KEGG_Proteasome.txt",
stringsAsFactors = FALSE)[,1]))
genes3 <- sort(toupper(read.table("Data/Chaperones_Dena_Oct22_2015.txt",
stringsAsFactors = FALSE,
skip = 1)[,1]))
eset.part <- eset[is.element(rownames(eset), genes1),]
data <- exprs(eset.part)
colnames(data) <- paste(pData(eset.part)$label, pData(eset)$age)
# 114 genes
pdf("Results/heatmap_Lysosome.pdf", width = 4, height = 2)
pheatmap(data,
scale = "row",
color = colorRampPalette(rev(brewer.pal(n = 11, name = "RdYlBu")))(100),
cluster_cols = FALSE,
show_rownames = FALSE,
show_colnames = FALSE)
dev.off()
eset.part <- eset[is.element(rownames(eset), genes2),]
data <- exprs(eset.part)
colnames(data) <- paste(pData(eset.part)$label, pData(eset)$age)
pdf("Results/heatmap_Proteasome.pdf", width = 3, height = 0.9)
pheatmap(data,
scale = "row",
color = colorRampPalette(rev(brewer.pal(n = 11, name = "RdYlBu")))(100),
cluster_cols = FALSE,
show_rownames = FALSE,
show_colnames = FALSE)
dev.off()
eset.part <- eset[is.element(rownames(eset), genes3),]
data <- exprs(eset.part)
colnames(data) <- paste(pData(eset.part)$label, pData(eset)$age)
pdf("Results/heatmap_chaperones.pdf", width = 3, height = 2)
pheatmap(data,
scale = "row",
color = colorRampPalette(rev(brewer.pal(n = 11, name = "RdYlBu")))(100),
cluster_cols = FALSE,
show_rownames = FALSE,
show_colnames = FALSE)
dev.off()
|
216d66024437ce60d42640f29a1228aaf2376a85
|
0dca3db7f3534c43d1dce59a5156680621164ebc
|
/Project 9.R
|
82885c752cf46b78bbc7915c335c4c892513fbe6
|
[] |
no_license
|
cRistiancec/Credit-Risk-Modeling-ARIMA-Time-Series-Forecasting-ADF-Test
|
cfd95624ca07428bf70b37404537e230023704a3
|
c1f85e51d8a55ad8d4076e78e198a60f4b56cef4
|
refs/heads/main
| 2023-01-01T19:13:13.193764
| 2020-10-28T13:06:15
| 2020-10-28T13:06:15
| 397,710,802
| 1
| 0
| null | 2021-08-18T19:15:36
| 2021-08-18T19:15:36
| null |
UTF-8
|
R
| false
| false
| 25,133
|
r
|
Project 9.R
|
getwd()
setwd("C:/Users/HP/Documents/R Dataset")
rm(list = ls())
library(DataExplorer)
library(ggplot2)
library(readxl)
library(Hmisc)
library(naniar)
library(nFactors)
library(psych)
train <- read_excel("GL-raw-data.xlsx")
summary(train)
str(train)
names(train)
describe(train)
dim(train)
#reform variable names
names(train) <- gsub(" ", ".", names(train))
names(train) <- gsub("/", "by", names(train))
names(train) <- gsub("%", "p", names(train))
names(train) <- gsub("&", "n", names(train))
names(train) <- gsub("-", ".", names(train))
names(train)
#Add the Default variable
rm(Default)
train$Default <- ifelse(train$Networth.Next.Year >0, 0, 1)
prop.table(table(train$Default))
#Convert "NA" to 0 before converting to the correct data type
train$Creditors.turnover <- gsub("NA", "0", train$Creditors.turnover)
train$Debtors.turnover <- gsub("NA", "0", train$Debtors.turnover)
train$PE.on.BSE <- gsub("NA", "0", train$PE.on.BSE)
train$Total.income <- gsub("NA", "0", train$Total.income)
#Convert to the correct data type
str(train)
sum(is.na(PE.on.BSE))
train$Creditors.turnover <- as.numeric(train$Creditors.turnover)
train$Debtors.turnover <- as.numeric(train$Debtors.turnover)
train$PE.on.BSE <- as.numeric(train$PE.on.BSE)
train$Default <- as.factor(train$Default)
train$Total.income <- as.numeric(train$Total.income)
#MISSING VALUE TREATMENT
library(VIM)
plot_missing(train)
gg_miss_var(train)
train$Total.income[is.na(train$Total.income)] <- train$Sales+train$Income.from.financial.services+train$Other.income
(3541-3256)/3541
#select variables
train <- train[,c(1:5,12:16,29:31,36,39,41:43,49,52,53)]
names(train)
train <- na.omit(train)
#OUTLIER TREATMENT
boxplot(train)
mystats <- function(x)
{
nmiss<-sum(is.na(x)) #to calculate the missing values
a <- x[!is.na(x)]
m <- mean(a) #to calculate the mean
n <- length(a) #the length
s <- sd(a) #the standard devistion
min <- min(a) #the minimum value
p1<-quantile(a,0.01)
p99<-quantile(a,0.99)
max <- max(a) #the max value
UC <- m+3*s #the upper limit
LC <- m-3*s #the lower limit
outlier_flag<- max>UC | min<LC #mark the variable/data with outlierflag, if it is above Upper cut-off/ lower than the Lower cut-off
return(c(n=n, nmiss=nmiss, outlier_flag=outlier_flag, mean=m, stdev=s,min = min, p1=p1, p99=p99,max=max, UC=UC, LC=LC ))
}
#select the variables from the dataset, on which the calculations are to be performed.
diag_stats<-t(data.frame(apply(train[,c(1:20)], 2, mystats)))
#Missing Values & outliers
View(diag_stats)
#Outlier Capping
train$Networth.Next.Year[train$Networth.Next.Year>quantile(train$Networth.Next.Year, 0.99)]<- quantile(train$Networth.Next.Year, 0.99)
train$Networth.Next.Year[train$Networth.Next.Year<quantile(train$Networth.Next.Year, 0.01)]<- quantile(train$Networth.Next.Year, 0.01)
train$Total.assets[train$Total.assets>quantile(train$Total.assets, 0.99)]<- quantile(train$Total.assets, 0.99)
train$Total.assets[train$Total.assets<quantile(train$Total.assets, 0.01)]<- quantile(train$Total.assets, 0.01)
train$Net.worth[train$Net.worth>quantile(train$Net.worth, 0.99)]<- quantile(train$Net.worth, 0.99)
train$Net.worth[train$Net.worth<quantile(train$Net.worth, 0.01)]<- quantile(train$Net.worth, 0.01)
train$Total.income[train$Total.income>quantile(train$Total.income, 0.99)]<- quantile(train$Total.income, 0.99)
train$Total.income[train$Total.income<quantile(train$Total.income, 0.01)]<- quantile(train$Total.income, 0.01)
train$PBDITA.as.p.of.total.income[train$PBDITA.as.p.of.total.income>quantile(train$PBDITA.as.p.of.total.income, 0.99)]<- quantile(train$PBDITA.as.p.of.total.income, 0.99)
train$PBDITA.as.p.of.total.income[train$PBDITA.as.p.of.total.income<quantile(train$PBDITA.as.p.of.total.income, 0.01)]<- quantile(train$PBDITA.as.p.of.total.income, 0.01)
train$PBT.as.p.of.total.income[train$PBT.as.p.of.total.income>quantile(train$PBT.as.p.of.total.income, 0.99)]<- quantile(train$PBT.as.p.of.total.income, 0.99)
train$PBT.as.p.of.total.income[train$PBT.as.p.of.total.income<quantile(train$PBT.as.p.of.total.income, 0.01)]<- quantile(train$PBT.as.p.of.total.income, 0.01)
train$PAT.as.p.of.total.income[train$PAT.as.p.of.total.income>quantile(train$PAT.as.p.of.total.income, 0.99)]<- quantile(train$PAT.as.p.of.total.income, 0.99)
train$PAT.as.p.of.total.income[train$PAT.as.p.of.total.income<quantile(train$PAT.as.p.of.total.income, 0.01)]<- quantile(train$PAT.as.p.of.total.income, 0.01)
train$Cash.profit.as.p.of.total.income[train$Cash.profit.as.p.of.total.income>quantile(train$Cash.profit.as.p.of.total.income, 0.99)]<- quantile(train$Cash.profit.as.p.of.total.income, 0.99)
train$Cash.profit.as.p.of.total.income[train$Cash.profit.as.p.of.total.income<quantile(train$Cash.profit.as.p.of.total.income, 0.01)]<- quantile(train$Cash.profit.as.p.of.total.income, 0.01)
train$PAT.as.p.of.net.worth[train$PAT.as.p.of.net.worth>quantile(train$PAT.as.p.of.net.worth, 0.99)]<- quantile(train$PAT.as.p.of.net.worth, 0.99)
train$PAT.as.p.of.net.worth[train$PAT.as.p.of.net.worth<quantile(train$PAT.as.p.of.net.worth, 0.01)]<- quantile(train$PAT.as.p.of.net.worth, 0.01)
train$TOLbyTNW[train$TOLbyTNW>quantile(train$TOLbyTNW, 0.99)]<- quantile(train$TOLbyTNW, 0.99)
train$TOLbyTNW[train$TOLbyTNW<quantile(train$TOLbyTNW, 0.01)]<- quantile(train$TOLbyTNW, 0.01)
train$Total.term.liabilities.by.tangible.net.worth[train$Total.term.liabilities.by.tangible.net.worth>quantile(train$Total.term.liabilities.by.tangible.net.worth, 0.99)]<- quantile(train$Total.term.liabilities.by.tangible.net.worth, 0.99)
train$Total.term.liabilities.by.tangible.net.worth[train$Total.term.liabilities.by.tangible.net.worth<quantile(train$Total.term.liabilities.by.tangible.net.worth, 0.01)]<- quantile(train$Total.term.liabilities.by.tangible.net.worth, 0.01)
train$`Contingent.liabilities.by.Net.worth-p`[train$`Contingent.liabilities.by.Net.worth-p`>quantile(train$`Contingent.liabilities.by.Net.worth-p`, 0.99)]<- quantile(train$`Contingent.liabilities.by.Net.worth-p`, 0.99)
train$`Contingent.liabilities.by.Net.worth-p`[train$`Contingent.liabilities.by.Net.worth-p`<quantile(train$`Contingent.liabilities.by.Net.worth-p`, 0.01)]<- quantile(train$`Contingent.liabilities.by.Net.worth-p`, 0.01)
train$Net.working.capital[train$Net.working.capital>quantile(train$Net.working.capital, 0.99)]<- quantile(train$Net.working.capital, 0.99)
train$Net.working.capital[train$Net.working.capital<quantile(train$Net.working.capital, 0.01)]<- quantile(train$Net.working.capital, 0.01)
#train$Quick.ratio.times[train$Quick.ratio.times>quantile(train$Quick.ratio.times, 0.99)]<- quantile(train$Quick.ratio.times, 0.99)
#train$Quick.ratio.times[train$Quick.ratio.times<quantile(train$Quick.ratio.times, 0.01)]<- quantile(train$Quick.ratio.times, 0.01)
#train$Current.ratio.times[train$Current.ratio.times>quantile(train$Current.ratio.times, 0.99)]<- quantile(train$Current.ratio.times, 0.99)
#train$Current.ratio.times[train$Current.ratio.times<quantile(train$Current.ratio.times, 0.01)]<- quantile(train$Current.ratio.times, 0.01)
train$Debt.to.equity.ratio.times[train$Debt.to.equity.ratio.times>quantile(train$Debt.to.equity.ratio.times, 0.99)]<- quantile(train$Debt.to.equity.ratio.times, 0.99)
train$Debt.to.equity.ratio.times[train$Debt.to.equity.ratio.times<quantile(train$Debt.to.equity.ratio.times, 0.01)]<- quantile(train$Debt.to.equity.ratio.times, 0.01)
#train$Cash.to.current.liabilities.times[train$Current.ratio.times>quantile(train$Cash.to.current.liabilities.times, 0.99)]<- quantile(train$Cash.to.current.liabilities.times, 0.99)
#train$Cash.to.current.liabilities.times[train$Current.ratio.times<quantile(train$Cash.to.current.liabilities.times, 0.01)]<- quantile(train$Cash.to.current.liabilities.times, 0.01)
train$Cash.to.average.cost.of.sales.per.day[train$Cash.to.average.cost.of.sales.per.day>quantile(train$Cash.to.average.cost.of.sales.per.day, 0.99)]<- quantile(train$Cash.to.average.cost.of.sales.per.day, 0.99)
train$Cash.to.average.cost.of.sales.per.day[train$Cash.to.average.cost.of.sales.per.day<quantile(train$Cash.to.average.cost.of.sales.per.day, 0.01)]<- quantile(train$Cash.to.average.cost.of.sales.per.day, 0.01)
train$Creditors.turnover[train$Creditors.turnover>quantile(train$Creditors.turnover, 0.99)]<- quantile(train$Creditors.turnover, 0.99)
train$Creditors.turnover[train$Creditors.turnover<quantile(train$Creditors.turnover, 0.01)]<- quantile(train$Creditors.turnover, 0.01)
train$Debtors.turnover[train$Debtors.turnover>quantile(train$Debtors.turnover, 0.99)]<- quantile(train$Debtors.turnover, 0.99)
train$Debtors.turnover[train$Debtors.turnover<quantile(train$Debtors.turnover, 0.01)]<- quantile(train$Debtors.turnover, 0.01)
train$EPS[train$EPS>quantile(train$EPS, 0.99)]<- quantile(train$EPS, 0.99)
train$EPS[train$EPS<quantile(train$EPS, 0.01)]<- quantile(train$EPS, 0.01)
train$PE.on.BSE[train$PE.on.BSE>quantile(train$PE.on.BSE, 0.99)]<- quantile(train$PE.on.BSE, 0.99)
train$PE.on.BSE[train$PE.on.BSE<quantile(train$PE.on.BSE, 0.01)]<- quantile(train$PE.on.BSE, 0.01)
#Correlation Check
library(corrplot)
plot_correlation(train)
cormat <- round(cor(train[,c(1:20)]),2)
cor.plot(cormat)
corrplot(cor(train[,c(-21)]), method = "circle", type = "upper")
#UNIVARIATE ANALYSIS
#CONTINUOUS VARIABLES
attach(train)
library(viridis)
boxplot(Total.assets~Default, col = viridis(3))
boxplot(Net.worth~Default, col = viridis(3))
boxplot(Total.income~Default, col = viridis(3))
boxplot(PBDITA.as.p.of.total.income~Default, col = viridis(3))
boxplot(PBT.as.p.of.total.income~Default, col = viridis(3))
boxplot(PAT.as.p.of.total.income~Default, col = viridis(3))
boxplot(Cash.profit.as.p.of.total.income~Default, col = viridis(3))
boxplot(PAT.as.p.of.net.worth~Default, col = viridis(3))
boxplot(TOLbyTNW~Default, col = viridis(3))
boxplot(Total.term.liabilities.by.tangible.net.worth~Default, col = viridis(3))
boxplot(Contingent.liabilities.by.Net.worth.p~Default, col = viridis(3))
boxplot(Net.working.capital~Default, col = viridis(3))
boxplot(Debt.to.equity.ratio.times~Default, col = viridis(3))
boxplot(Cash.to.average.cost.of.sales.per.day~Default, col = viridis(3))
boxplot(Creditors.turnover~Default, col = viridis(3))
boxplot(Debtors.turnover~Default, col = viridis(3))
boxplot(EPS~Default, col = viridis(3))
boxplot(PE.on.BSE~Default, col = viridis(3))
#BIVARIATE ANALYSIS
#PLOT - Independent Variables vs Dependent Variable
##1. SCATTER PLOT
p1 <- qplot(Net.working.capital, Cash.to.average.cost.of.sales.per.day, colour = Default, data = train)
p2 <- qplot(Total.assets, Debtors.turnover, colour = Default, data = train)
gridExtra::grid.arrange(p1, p2, ncol = 2)
p3 <- qplot(Net.worth, PAT.as.p.of.net.worth, colour = Default, data = train)
p4 <- qplot(Total.income, Cash.profit.as.p.of.total.income, colour = Default, data = train)
gridExtra::grid.arrange(p3, p4, ncol = 2)
p5 <- qplot(Net.worth, TOLbyTNW, colour = Default, data = train)
p6 <- qplot(Total.assets, Contingent.liabilities.by.Net.worth.p, colour = Default, data = train)
gridExtra::grid.arrange(p5, p6, ncol = 2)
p7 <- qplot(Cash.profit.as.p.of.total.income, Cash.to.average.cost.of.sales.per.day, colour = Default, data = train)
p8 <- qplot(PAT.as.p.of.net.worth, Debtors.turnover, colour = Default, data = train)
gridExtra::grid.arrange(p7, p8, ncol = 2)
p9 <- qplot(Cash.to.average.cost.of.sales.per.day, TOLbyTNW, colour = Default, data = train)
p10 <- qplot(Debtors.turnover, Debt.to.equity.ratio.times, colour = Default, data = train)
gridExtra::grid.arrange(p9, p10, ncol = 2)
..............................................................................
# PCA
..............................................................................
#Dataset Creation
PCA.train <- train[,c(-1,-2,-21)]
#Eigen value comoutation
ev <- eigen(cor(PCA.train))
eigenvalues <- ev$values
eigenvectors <- ev$vectors
plot(eigenvalues, type = "lines", xlab = "Principal Component", ylab = "Eigen Values")
factors <- c(1:18)
scree <- data.frame(factors, eigenvalues)
plot(scree, main = "Scree Plot", col = "Blue", ylim = c(0,4), xlab = "Principal Component",
ylab = "Eigen Values", xlim = c(0,10))
lines(scree, col = "Red")
eigenvalues
library(psych)
unrotate <- principal(PCA.train, nfactors = 4, rotate = "none")
unrotate
rotate <- principal(PCA.train, nfactors = 4, rotate = "varimax") #orthogonal rotation will make the factors independent
rotate
rotatedprofile <- plot(rotate, row.names(rotate$loadings), cex = 1.0)
plot(rotate)
#Translate PCA into regression
ndata <- as.data.frame(rotate$scores)
PCA.train <- cbind(train$Default, ndata)
names(PCA.train) <- c("Default", "Profit", "Size", "Leverage", "Liquidity")
names(PCA.train)
summary(PCA.train)
plot_correlation(PCA.train)
..............................................................................
# SMOTE Dataset & Logistic Regression
..............................................................................
library(DMwR)
set.seed(1234)
#SMOTE
SMOTE.train <- SMOTE(Default~., PCA.train, perc.over = 200, k = 5, perc.under =100)
prop.table(table(SMOTE.train$Default))
library(car)
#LOGISTIC REGRESSION
model1 <- glm(SMOTE.train$Default~., family = "binomial", data = SMOTE.train)
model2 <- glm(train$Default~., data = train, family = "binomial")
summary(model2)
model1$fitted.values
vif(model1)
#Prediction
LR.prob <- predict(model1, data = SMOTE.train)
LR.pred <- ifelse(LR.prob>0.1, "1","0")
table(SMOTE.train$Default, LR.pred)
#Variable Importance
library(caret)
varImp(model1)
imp <- as.data.frame(varImp(model2))
imp <- data.frame(names = rownames(imp),overall = imp$Overall)
imp[order(imp$overall, decreasing = T),]
..........................................................................
# MODEL VALIDATION
..........................................................................
test <- read_excel("GL-validation_data.xlsx")
#reform variable names
names(test) <- gsub(" ", ".", names(test))
names(test) <- gsub("/", "by", names(test))
names(test) <- gsub("%", "p", names(test))
names(test) <- gsub("&", "n", names(test))
names(test) <- gsub("-", ".", names(test))
names(test)
#Convert "NA" to 0 before converting to the correct data type
test$Creditors.turnover <- gsub("NA", "0", test$Creditors.turnover)
test$Debtors.turnover <- gsub("NA", "0", test$Debtors.turnover)
test$PE.on.BSE <- gsub("NA", "0", test$PE.on.BSE)
test$Total.income <- gsub("NA", "0", test$Total.income)
#Convert to the correct data type
str(test)
test$Total.income[is.na(test$Total.income)] <- test$Sales+test$Income.from.financial.services+test$Other.income
test$Creditors.turnover <- as.numeric(test$Creditors.turnover)
test$Debtors.turnover <- as.numeric(test$Debtors.turnover)
test$Total.income <- as.numeric(test$Total.income)
test$PE.on.BSE <- as.numeric(test$PE.on.BSE)
test$Default...1 <- as.factor(test$Default...1)
names(test)[2] <- "Default"
#VARIABLE SELECTION
test <- test[,c(1:5,12:16,29:31,36,39,41:43,49,52)]
#MISSING VALUE TREATMENT
test <- na.omit(test)
#OUTLIER CAPPING
test$Networth.Next.Year[test$Networth.Next.Year>quantile(test$Networth.Next.Year, 0.99)]<- quantile(test$Networth.Next.Year, 0.99)
test$Networth.Next.Year[test$Networth.Next.Year<quantile(test$Networth.Next.Year, 0.01)]<- quantile(test$Networth.Next.Year, 0.01)
test$Total.assets[test$Total.assets>quantile(test$Total.assets, 0.99)]<- quantile(test$Total.assets, 0.99)
test$Total.assets[test$Total.assets<quantile(test$Total.assets, 0.01)]<- quantile(test$Total.assets, 0.01)
test$Net.worth[test$Net.worth>quantile(test$Net.worth, 0.99)]<- quantile(test$Net.worth, 0.99)
test$Net.worth[test$Net.worth<quantile(test$Net.worth, 0.01)]<- quantile(test$Net.worth, 0.01)
test$Total.income[test$Total.income>quantile(test$Total.income, 0.99)]<- quantile(test$Total.income, 0.99)
test$Total.income[test$Total.income<quantile(test$Total.income, 0.01)]<- quantile(test$Total.income, 0.01)
test$PBDITA.as.p.of.total.income[test$PBDITA.as.p.of.total.income>quantile(test$PBDITA.as.p.of.total.income, 0.99)]<- quantile(test$PBDITA.as.p.of.total.income, 0.99)
test$PBDITA.as.p.of.total.income[test$PBDITA.as.p.of.total.income<quantile(test$PBDITA.as.p.of.total.income, 0.01)]<- quantile(test$PBDITA.as.p.of.total.income, 0.01)
test$PBT.as.p.of.total.income[test$PBT.as.p.of.total.income>quantile(test$PBT.as.p.of.total.income, 0.99)]<- quantile(test$PBT.as.p.of.total.income, 0.99)
test$PBT.as.p.of.total.income[test$PBT.as.p.of.total.income<quantile(test$PBT.as.p.of.total.income, 0.01)]<- quantile(test$PBT.as.p.of.total.income, 0.01)
test$PAT.as.p.of.total.income[test$PAT.as.p.of.total.income>quantile(test$PAT.as.p.of.total.income, 0.99)]<- quantile(test$PAT.as.p.of.total.income, 0.99)
test$PAT.as.p.of.total.income[test$PAT.as.p.of.total.income<quantile(test$PAT.as.p.of.total.income, 0.01)]<- quantile(test$PAT.as.p.of.total.income, 0.01)
test$Cash.profit.as.p.of.total.income[test$Cash.profit.as.p.of.total.income>quantile(test$Cash.profit.as.p.of.total.income, 0.99)]<- quantile(test$Cash.profit.as.p.of.total.income, 0.99)
test$Cash.profit.as.p.of.total.income[test$Cash.profit.as.p.of.total.income<quantile(test$Cash.profit.as.p.of.total.income, 0.01)]<- quantile(test$Cash.profit.as.p.of.total.income, 0.01)
test$PAT.as.p.of.net.worth[test$PAT.as.p.of.net.worth>quantile(test$PAT.as.p.of.net.worth, 0.99)]<- quantile(test$PAT.as.p.of.net.worth, 0.99)
test$PAT.as.p.of.net.worth[test$PAT.as.p.of.net.worth<quantile(test$PAT.as.p.of.net.worth, 0.01)]<- quantile(test$PAT.as.p.of.net.worth, 0.01)
test$TOLbyTNW[test$TOLbyTNW>quantile(test$TOLbyTNW, 0.99)]<- quantile(test$TOLbyTNW, 0.99)
test$TOLbyTNW[test$TOLbyTNW<quantile(test$TOLbyTNW, 0.01)]<- quantile(test$TOLbyTNW, 0.01)
test$Total.term.liabilities.by.tangible.net.worth[test$Total.term.liabilities.by.tangible.net.worth>quantile(test$Total.term.liabilities.by.tangible.net.worth, 0.99)]<- quantile(test$Total.term.liabilities.by.tangible.net.worth, 0.99)
test$Total.term.liabilities.by.tangible.net.worth[test$Total.term.liabilities.by.tangible.net.worth<quantile(test$Total.term.liabilities.by.tangible.net.worth, 0.01)]<- quantile(test$Total.term.liabilities.by.tangible.net.worth, 0.01)
test$`Contingent.liabilities.by.Net.worth-p`[test$`Contingent.liabilities.by.Net.worth-p`>quantile(test$`Contingent.liabilities.by.Net.worth-p`, 0.99)]<- quantile(test$`Contingent.liabilities.by.Net.worth-p`, 0.99)
test$`Contingent.liabilities.by.Net.worth-p`[test$`Contingent.liabilities.by.Net.worth-p`<quantile(test$`Contingent.liabilities.by.Net.worth-p`, 0.01)]<- quantile(test$`Contingent.liabilities.by.Net.worth-p`, 0.01)
test$Net.working.capital[test$Net.working.capital>quantile(test$Net.working.capital, 0.99)]<- quantile(test$Net.working.capital, 0.99)
test$Net.working.capital[test$Net.working.capital<quantile(test$Net.working.capital, 0.01)]<- quantile(test$Net.working.capital, 0.01)
test$Debt.to.equity.ratio.times[test$Debt.to.equity.ratio.times>quantile(test$Debt.to.equity.ratio.times, 0.99)]<- quantile(test$Debt.to.equity.ratio.times, 0.99)
test$Debt.to.equity.ratio.times[test$Debt.to.equity.ratio.times<quantile(test$Debt.to.equity.ratio.times, 0.01)]<- quantile(test$Debt.to.equity.ratio.times, 0.01)
test$Cash.to.average.cost.of.sales.per.day[test$Cash.to.average.cost.of.sales.per.day>quantile(test$Cash.to.average.cost.of.sales.per.day, 0.99)]<- quantile(test$Cash.to.average.cost.of.sales.per.day, 0.99)
test$Cash.to.average.cost.of.sales.per.day[test$Cash.to.average.cost.of.sales.per.day<quantile(test$Cash.to.average.cost.of.sales.per.day, 0.01)]<- quantile(test$Cash.to.average.cost.of.sales.per.day, 0.01)
test$Creditors.turnover[test$Creditors.turnover>quantile(test$Creditors.turnover, 0.99)]<- quantile(test$Creditors.turnover, 0.99)
test$Creditors.turnover[test$Creditors.turnover<quantile(test$Creditors.turnover, 0.01)]<- quantile(test$Creditors.turnover, 0.01)
test$Debtors.turnover[test$Debtors.turnover>quantile(test$Debtors.turnover, 0.99)]<- quantile(test$Debtors.turnover, 0.99)
test$Debtors.turnover[test$Debtors.turnover<quantile(test$Debtors.turnover, 0.01)]<- quantile(test$Debtors.turnover, 0.01)
test$EPS[test$EPS>quantile(test$EPS, 0.99)]<- quantile(test$EPS, 0.99)
test$EPS[test$EPS<quantile(test$EPS, 0.01)]<- quantile(test$EPS, 0.01)
test$PE.on.BSE[test$PE.on.BSE>quantile(test$PE.on.BSE, 0.99)]<- quantile(test$PE.on.BSE, 0.99)
test$PE.on.BSE[test$PE.on.BSE<quantile(test$PE.on.BSE, 0.01)]<- quantile(test$PE.on.BSE, 0.01)
#PCA
PCA.test <- test[,c(-1,-2)]
rotate.test <- principal(PCA.test, nfactors = 4, rotate = "varimax")
rotate.test
newdf.test <- rotate.test$scores
ndata.test <- as.data.frame(newdf.test)
PCA.test <- cbind(test$Default, ndata.test)
names(PCA.test) <- c("Default", "Profit", "Size", "Leverage", "Liquidity")
names(regPCA)
#PREDICTION/VALIDATING the MODEL
LR.prob.test <- predict(model1, newdata = PCA.test, type = "response")
LR.pred.test <- ifelse(LR.prob.txest>0.6, 1,0)
#MODEL VALIDATION MEASURES
##CONFUSION MATRIX
LR_CM.test = table(PCA.test$Default, LR.pred.test>0.01)
LR_CM.test
##ERROR RATE
(LR_CM.test[1,2]+LR_CM.test[2,1])/nrow(PCA.test)
##ACCURACY
(LR_CM.test[1,1]+LR_CM.test[2,2])/nrow(PCA.test)
#SENSITIVITY
LR_CM.test[1,1]/sum(LR_CM.test[1,1], LR_CM.test[1,2])
#SPECIFICITY
LR_CM.test[2,2]/sum(LR_CM.test[2,1], LR_CM.test[2,2])
##ROC CURVE
test.ROC = prediction(LR.prob.test, PCA.test$Default)
test.ROC.plot = performance(test.ROC,"tpr","fpr")
plot(performance(test.ROC,"tpr","fpr"), col = "red", main = "ROC Curve for Test Data")
abline(0,1,lty = 8, col = "blue")
##KS
max(test.ROC.plot@y.values[[1]]-test.ROC.plot@x.values[[1]])
##AUC
test.AUC=performance(test.ROC,"auc")
slot(test.AUC, "y.values")
##GINI COEFFICIENT
ineq(LR.pred.test, "gini")
...........................................................................
#MODEL PERFORMANCE MEASURES
...........................................................................
#ROC is one of the measures if the AUC should be >70% - then very good model, KS > 40
##Similarly from the confusion matrix you can calculate sensitivity and specificity
##Probablity related parameters like KS,ROC,AUC,Concordance,discordance and gini
#regression-can use root mean sq. error to determine - sqrt(predected-actual value) - minimum! is good
#KS - how well you can classify b/w the +ve and the -ve or is it randomly distributed
#Lift - max - best
#gini - (area of A/area of A+B OR 2AUC-1)how well the model is bifurcating
#AUC - higher, the better
library(ROCR)
library(ineq)
library(InformationValue)
##CONFUSION MATRIX
LR_CM = table(SMOTE.train$Default, LR.pred>0.01)
LR_CM
##ERROR RATE
(LR_CM[1,2]+LR_CM[2,1])/nrow(SMOTE.train) #1,2; 2,1 refers to the placements
##ACCURACY
(LR_CM[1,1]+LR_CM[2,2])/nrow(SMOTE.train)
#SENSITIVITY
LR_CM[1,1]/sum(LR_CM[1,1], LR_CM[1,2])
#SPECIFICITY
LR_CM[2,2]/sum(LR_CM[2,1], LR_CM[2,2])
##ROC CURVE
LR.ROC = prediction(LR.prob, SMOTE.train$Default)
LR.ROC.plot = performance(LR.ROC,"tpr","fpr")
plot(performance(LR.ROC,"tpr","fpr"), col = "red", main = "ROC Curve for Train Data")
abline(0,1,lty = 8, col = "blue")
##KS
max(LR.ROC.plot@y.values[[1]]-LR.ROC.plot@x.values[[1]])
##AUC
LR.AUC=performance(LR.ROC,"auc")
slot(LR.AUC, "y.values")
#as.numeric(LR.AUC@y.values)
##GINI COEFFICIENT
ineq(LR.pred, "gini")
##Concordance
Concordance(actuals=SMOTE.train$Default, predictedScores = LR.pred)
library(pscl)
pR2(model1)
#McFadden = 0.4424367, indicates that the model is not an excellent, but is a good fit for the given dataset.
#DECILING
#Dividing the dataset into deciles
library(dplyr)
#SMOTE.train <- SMOTE.train[,c(-6,-7,-8)]
final <- data.frame(SMOTE.train, LR.prob)
final$LR.prob <- round(final$LR.prob, 2)
head(final)
L.F <- arrange(final, desc(LR.prob))
L.F$decile <- with(L.F, cut_number(LR.prob, 10, labels = 10:1))
head(L.F)
train.score <- L.F %>% group_by(decile)
Fold1 <- subset(train.score, decile==1)
Fold2 <- subset(train.score, decile==2)
Fold3 <- subset(train.score, decile==3)
Fold4 <- subset(train.score, decile==4)
Fold5 <- subset(train.score, decile==5)
Fold6 <- subset(train.score, decile==6)
Fold7 <- subset(train.score, decile==7)
Fold8 <- subset(train.score, decile==8)
Fold9 <- subset(train.score, decile==9)
Fold10 <- subset(train.score, decile==10)
train.score1 <- train.score %>%
summarise_each(funs(sum), Default) %>%
arrange(desc(decile))
train.score2 <- L.F %>%
group_by(decile) %>%
summarise(Default = n()) %>%
arrange(desc(decile))
train.table <- left_join(train.score1, train.score2, by = "decile")
|
6773cea45ed0ced719fa05b268a7c5b56c9aa36e
|
dc0d8d8ee472e4623b4b53097d220f80176d7ee6
|
/app.R
|
fe956e026a7d0effdbcdb08546e0ae2adb010b62
|
[] |
no_license
|
rodserr/trading-ML
|
75aa8eea1431375c6bb460becab5dd46171c0954
|
43d5f47b1fe703a5f0e6418e3ed50041da718887
|
refs/heads/master
| 2020-04-17T13:41:16.271881
| 2019-05-20T15:05:50
| 2019-05-20T15:05:50
| 166,625,875
| 0
| 0
| null | 2019-05-20T15:05:51
| 2019-01-20T05:15:43
|
TeX
|
UTF-8
|
R
| false
| false
| 8,568
|
r
|
app.R
|
# libraries----
library(shiny)
library(shinydashboard)
library(lubridate)
library(magrittr)
library(plotly)
library(tidyverse)
library(PerformanceAnalytics)
library(TTR)
library(reshape2)
library(dbplyr)
library(RPostgres)
library(gridExtra)
library(caret)
library(factoextra)
library(purrr)
library(xtable)
library(rsconnect)
source('scripts/functions.R')
# Model Generator ----
serie <- c('S&P_500', 'NASDAQ', 'NIKKEI_225', 'FTSE_100', 'BOVESPA')
list_serie <- list()
for(s in serie){
.aux_serie <- paste('data/', s, '.csv', sep = '') %>%
read_csv(locale = locale(decimal_mark = ",", grouping_mark = ".")) %>%
select(-one_of(c('Vol.', '% var.'))) %>%
mutate(Fecha = dmy(Fecha)) %>%
setNames(c('timestamp', 'close', 'open', 'high', 'low')) %>%
arrange(timestamp)
list_serie %<>% rlist::list.append(.aux_serie)
}
# UI-----
ui <- shinyUI(dashboardPage(
dashboardHeader(title = 'PCA Analisys', titleWidth = 240),
# Sidebar----
dashboardSidebar(
tags$a(href = "http://www.ucv.ve/",
tags$img(src = "white-logo.png", height = 100, width = 100,
style = 'margin-left: 55px; margin-top: 12px;')),
tags$a(href = "http://www.ucv.ve/estructura/facultades/facultad-de-ciencias-economicas-y-sociales.html",
tags$img(src = "faces.png", height = 75, width = 150,
style = 'margin-left: 30px; margin-top: 12px;')),
br(),
br(),
sidebarMenu(
# Setting id makes input$tabs give the tabName of currently-selected tab
id = "tabs",
menuItem("Contributions", tabName = "contrib", icon = icon("bar-chart-o")),
menuItem("Factor Map", tabName = "factorMap", icon = icon("dashboard"))),
selectInput('index',
label = 'Choose Index',
choices = list('S&P_500' = 1,
'NASDAQ' = 2,
'NIKKEI_225' = 3,
'FTSE_100' = 4,
'BOVESPA' = 5),
selected = 1),
sliderInput('sample', 'Choose Sample',
min = 2013, max = 2017,
value = 2013)
),
# Body----
dashboardBody(
# Color BodyHeader same as DashboardHeader
tags$head(tags$style(HTML('
.skin-blue .main-header .logo {
background-color: #3c8dbc;
}
.skin-blue .main-header .logo:hover {
background-color: #3c8dbc;
}
.content-wrapper {
background-color: #778899;
}
'))),
tabItems(
# Dashboard
tabItem('contrib',
fluidRow(column(6, box(plotlyOutput("eigenvalue"), width = 12)),
column(6, box(plotlyOutput("variance"), width = 12))),
fluidRow(column(6, box(plotlyOutput("contrib1"), width = 12)),
column(6, box(plotlyOutput("contrib2"), width = 12))),
fluidRow(column(6, box(plotlyOutput("contrib3"), width = 12)),
column(6, box(plotlyOutput("contrib4"), width = 12))),
fluidRow(column(6, box(plotlyOutput("contrib5"), width = 12)),
column(6, box(plotlyOutput("contrib6"), width = 12))),
fluidRow(column(6, box(plotlyOutput("contrib7"), width = 12)))
),
tabItem('factorMap',
fluidRow(column(1, radioButtons('comp1', 'Select x axes component:',
choices = list("Comp 1" = 1,
"Comp 2" = 2,
"Comp 3" = 3,
"Comp 4" = 4,
"Comp 5" = 5,
"Comp 6" = 6,
"Comp 7" = 7),
selected = 1,
inline = FALSE)),
column(1, radioButtons('comp2', 'Select y axes component:',
choices = list("Comp 1" = 1,
"Comp 2" = 2,
"Comp 3" = 3,
"Comp 4" = 4,
"Comp 5" = 5,
"Comp 6" = 6,
"Comp 7" = 7),
selected = 2,
inline = FALSE)),
column(10, box(plotOutput("factorMapPlot"), width = 12))),
fluidRow(column(10, sliderInput('fmtresh', 'Choose contribution treshold:',
min = 2, max = 45,
value = 20)))
)
))))
# Server----
server <- function(input, output, session) {
pca <- reactive({
.tp = 0.02
.sl = 0.025
.h = 20
.cut <- 0.5
i <- input$index %>% as.numeric()
predictors_var <- list_serie[[i]] %>% predict_tp(tp = .tp, sl = .sl, h = .h) %>%
mutate(class_2 = factor(class)) %>% #levels = c('stay', 'buy')))
select(-one_of('class')) %>%
createIndicators() %>%
filter(year(timestamp) %in% seq(2009, input$sample, 1)) %>%
getPredictors()
pca <- predictors_var %>% prcomp(scale = FALSE)
pca
})
output$eigenvalue <- renderPlotly({
fviz_eig(pca(), addlabels = TRUE,
choice = 'eigenvalue',
geom = 'bar',
barfill = 'tomato3',
barcolor = 'tomato3') +
labs(title = 'Histograma de Autovalores', x = 'Componentes', y = 'Autovalores')
})
output$variance <- renderPlotly({
fviz_eig(pca(), addlabels = TRUE,
choice = 'variance',
geom = 'bar',
barfill = 'tomato3',
barcolor = 'tomato3') +
labs(title = 'Histograma de variabilidad explicada', x = 'Componentes', y = '% Variabilidad explicada')
})
output$contrib1 <- renderPlotly({
fviz_contrib(pca(), choice = "var", axes = 1, top = 25) +
labs(y = '% Contribución', title = 'Contribución de variables a Comp-1')
})
output$contrib2 <- renderPlotly({
fviz_contrib(pca(), choice = "var", axes = 2, top = 25) +
labs(y = '% Contribución', title = 'Contribución de variables a Comp-2')
})
output$contrib3 <- renderPlotly({
fviz_contrib(pca(), choice = "var", axes = 3, top = 25) +
labs(y = '% Contribución', title = 'Contribución de variables a Comp-3')
})
output$contrib4 <- renderPlotly({
fviz_contrib(pca(), choice = "var", axes = 4, top = 25) +
labs(y = '% Contribución', title = 'Contribución de variables a Comp-4')
})
output$contrib5 <- renderPlotly({
fviz_contrib(pca(), choice = "var", axes = 5, top = 25) +
labs(y = '% Contribución', title = 'Contribución de variables a Comp-5')
})
output$contrib6 <- renderPlotly({
fviz_contrib(pca(), choice = "var", axes = 6, top = 25) +
labs(y = '% Contribución', title = 'Contribución de variables a Comp-6')
})
output$contrib7 <- renderPlotly({
fviz_contrib(pca(), choice = "var", axes = 7, top = 25) +
labs(y = '% Contribución', title = 'Contribución de variables a Comp-7')
})
output$factorMapPlot <- renderPlot({
comp1 <- input$comp1 %>% as.numeric()
comp2 <- input$comp2 %>% as.numeric()
contribt <- input$fmtresh %>% as.numeric()
fviz_pca_var(pca(),
axes = c(comp1, comp2),
col.var = "contrib",
select.var = list(contrib = contribt),
# alpha.var = "contrib",
repel = TRUE)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
baceb0301b572365443486b4c3e9e9ef64819cf2
|
3803548786efc94f399326c9d6e5066842538c37
|
/Definitive test July 16.R
|
a6b7a31b9a347a9b85b5f040196ffa4e82bc3f5b
|
[] |
no_license
|
JiayiQin/AshEcotox
|
97dd6c46c5e20724385a49801330cf8b9371dba6
|
c66f04879a17d989d40973821302221c3279219b
|
refs/heads/master
| 2021-01-21T17:31:58.426600
| 2018-03-12T11:34:41
| 2018-03-12T11:34:41
| 91,962,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,590
|
r
|
Definitive test July 16.R
|
PersonResiduals<-residuals(LinerRegression,"pearson")
#Point out the working folder
setwd ("/Users/JiayiQin/Dropbox/ASHBACK/BIOS/single-species test")
#Import datafile
SUM <- read.table( file = "Briefly summary.csv", header=TRUE, sep =",", dec =".")
SUM
summary (SUM)
#############################Foulum-soil Definitive test Folsomia candida#########################
# create working dataset with subset()
FouDefFol <- subset(SUM, Soil.type == "Foulum soil" &
Test.type == "definitive" &
Species == "Folsomia candida", select=Species:Juvenile,drop=FALSE)
FouDefFol
# It's important to tell R where does the vector come from(data$vector)
FouDefFol_Conc <- FouDefFol$Concentration
FouDefFol_Adult<- FouDefFol$Adult
FouDefFol_Juvenile <- FouDefFol$Juvenile
FouDefFol_pH<- FouDefFol$pH
NormalDistribution(FouDefFol_Juvenile)
##################### Foulum Folsomia Candida pH Turkey and Dunnett test######################
FouDefFol2 <- subset (FouDefFol, FouDefFol$pH>0,select=Species:Juvenile,drop=FALSE)
FouDefFol2
FFouDefFol_pH<- factor (FouDefFol2$pH)
ftestFolJuv_pH<-aov(FouDefFol2$Juvenile ~ FFouDefFol_pH)
summary(ftestFolJuv_pH)
Dunnett.FolJuv_pH<-glht(ftestFolJuv_pH,linfct=mcp(FFouDefFol_pH="Dunnett"),alternative = "less")
summary(Dunnett.FolJuv_pH)
Tukey.FolJuv_pH<-glht(ftestFolJuv_pH,linfct=mcp(FFouDefFol_pH="Tukey"),alternative = "less")
summary(Tukey.FolJuv_pH)
#####################Foulum Folsomia Candida pH EC 50######################
library (drc)
FouDefFol2
UPPER<-max (FouDefFol2$Juvenile )
UPPER
fitFolJuv_pH<- drm(FouDefFol2$Juvenile~FouDefFol2$pH,
data=FouDefFol2,
fct = LL.4(names=c("Slope","Lower Limit","Upper Limit", "ED50")),
control=drmc(),
lowerl = c(-Inf, 0, -Inf, -Inf), #Limitation for parameters
upperl = c(Inf, Inf, UPPER, Inf)
)
summary(fitFolJuv_pH)
ED(fitFolJuv_pH,c(10,50,90),interval="delta")
##################### Foulum Folsomia Candida Concentration Turkey and Dunnett test######################
FFouDefFol_Conc<- factor (FouDefFol_Conc)
ftestFolJuv_Conc<-aov(FouDefFol_Juvenile ~ FFouDefFol_Conc)
summary(ftestFolJuv_Conc)
Dunnett.FolJuv_Conc<-glht(ftestFolJuv_Conc,linfct=mcp(FFouDefFol_Conc="Dunnett"),alternative = "less")
summary(Dunnett.FolJuv_Conc)
Tukey.FolJuv_Conc<-glht(ftestFolJuv_Conc,linfct=mcp(FFouDefFol_Conc="Tukey"),alternative = "less")
summary(Tukey.FolJuv_Conc)
#####################Foulum Folsomia Candida Concentration EC 50######################
library (drc)
UPPER<-max (FouDefFol_Juvenile )
UPPER
fitFolJuv_Conc<- drm(FouDefFol_Juvenile ~ FouDefFol_Conc,
data=FouDefFol,
fct = LL.4(names=c("Slope","Lower Limit","Upper Limit", "ED50")),
control=drmc(),
lowerl = c(-Inf, 0, -Inf, -Inf), #Limitation for parameters
upperl = c(Inf, Inf, UPPER, Inf)
)
summary(fitFolJuv_Conc)
ED(fitFolJuv_Conc,c(10,50,90),interval="delta")
###################################################################################################
#############################Foulum-soil Definitive test Onychiurus yodai#########################
# create working dataset with subset()
FouDefOny <- subset(SUM, Soil.type == "Foulum soil" &
Test.type == "definitive" &
Species == "Onychiurus yodai", select=Species:Juvenile,drop=FALSE)
FouDefOny
# It's important to tell R where does the vector come from(data$vector)
FouDefOny_Conc <- FouDefOny$Concentration
FouDefOny_Adult<- FouDefOny$Adult
FouDefOny_Juvenile <- FouDefOny$Juvenile
FouDefOny_pH<- FouDefOny$pH
NormalDistribution(FouDefOny_Juvenile[0:16])
##################### Foulum Onychiurus yodai pH Turkey and Dunnett test######################
FFouDefOny_pH<- factor (FouDefOny_pH)
ftestOnyJuv_pH<-aov(FouDefOny_Juvenile[0:15] ~ FFouDefOny_pH[0:15])
summary(ftestOnyJuv_pH)
Dunnett.OnyJuv_pH<-glht(ftestOnyJuv_pH,linfct=mcp(FFouDefOny_pH="Dunnett"),alternative = "less")
summary(Dunnett.OnyJuv_pH)
Tukey.OnyJuv_pH<-glht(ftestOnyJuv_pH,linfct=mcp(FFouDefOny_pH="Tukey"),alternative = "less")
summary(Tukey.OnyJuv_pH)
FFouDefOny_pH<- factor (FouDefOny_pH)
ftestOnyAdult_pH<-aov(FouDefOny_Adult~ FFouDefOny_pH)
summary(ftestOnyJuv_pH)
Dunnett.OnyAdult_pH<-glht(ftestOnyAdult_pH,linfct=mcp(FFouDefOny_pH="Dunnett"),alternative = "less")
summary(Dunnett.OnyAdult_pH)
Tukey.OnyAdult_pH<-glht(ftestOnyAdult_pH,linfct=mcp(FFouDefOny_pH="Tukey"),alternative = "less")
summary(Tukey.OnyAdult_pH)
#####################Foulum Onychiurus pH EC 50######################
library (drc)
UPPER<-max (FouDefOny_Juvenile )
UPPER
fitOnyJuv_pH<- drm(FouDefOny_Juvenile ~ FouDefOny_pH,
data=FouDefOny,
fct = LL.4(names=c("Slope","Lower Limit","Upper Limit", "ED50")),
control=drmc(),
lowerl = c(-Inf, 0, -Inf, -Inf), #Limitation for parameters
upperl = c(Inf, Inf, UPPER, Inf)
)
summary(fitOnyJuv_pH)
ED(fitOnyJuv_pH,c(10,50,90),interval="delta")
UPPER<-max (FouDefOny_Adult )
UPPER
p_FouDefOny_Adult <- FouDefOny_Adult/15
fitOnyAdult_pH<- drm(p_FouDefOny_Adult ~ FouDefOny_pH,
data=FouDefOny,
fct = LL.4(names=c("Slope","Lower Limit","Upper Limit", "ED50")),
control=drmc(),
lowerl = c(-Inf, 0, -Inf, -Inf), #Limitation for parameters
upperl = c(Inf, Inf, 1, Inf),
type ="binomial"
)
summary(fitOnyAdult_pH)
ED(fitOnyAdult_pH,c(10,50,90),interval="delta")
##############################################################################################################
##################### Foulum Onychiurus yodai Concentration Turkey and Dunnett test######################
FFouDefOny_Conc<- factor (FouDefOny_Conc)
ftestOnyJuv_Conc<-aov(FouDefOny_Juvenile[0:15] ~ FFouDefOny_Conc[0:15])
summary(ftestOnyJuv_Conc)
Dunnett.OnyJuv_Conc<-glht(ftestOnyJuv_Conc,linfct=mcp(FFouDefOny_Conc="Dunnett"),alternative = "less")
summary(Dunnett.OnyJuv_Conc)
Tukey.OnyJuv_Conc<-glht(ftestOnyJuv_Conc,linfct=mcp(FFouDefOny_Conc="Tukey"),alternative = "less")
summary(Tukey.OnyJuv_Conc)
ftestOnyAdult_Conc<-aov(FouDefOny_Adult ~ FFouDefOny_Conc)
summary(ftestOnyAdult_Conc)
Dunnett.OnyAdult_Conc<-glht(ftestOnyAdult_Conc,linfct=mcp(FFouDefOny_Conc="Dunnett"),alternative = "less")
summary(Dunnett.OnyAdult_Conc)
Tukey.OnyAdult_Conc<-glht(ftestOnyAdult_Conc,linfct=mcp(FFouDefOny_Conc="Tukey"),alternative = "less")
summary(Tukey.OnyAdult_Conc)
#####################Foulum Onychiurus yodai Concentration EC 50######################
library (drc)
UPPER<-max (FouDefOny_Juvenile )
UPPER
fitOnyJuv_Conc<- drm(FouDefOny_Juvenile ~ FouDefOny_Conc,
data=FouDefOny,
fct = LL.4(names=c("Slope","Lower Limit","Upper Limit", "ED50")),
control=drmc(),
lowerl = c(-Inf, 0, -Inf, -Inf), #Limitation for parameters
upperl = c(Inf, Inf, UPPER, Inf)
)
summary(fitOnyJuv_Conc)
ED(fitOnyJuv_Conc,c(10,50,90),interval="delta")
fitOnyAdult_Conc<- drm(p_FouDefOny_Adult~ FouDefOny_Conc,
data=FouDefOny,
fct = LL.4(names=c("Slope","Lower Limit","Upper Limit", "ED50")),
control=drmc(),
lowerl = c(-Inf, 0, -Inf, -Inf), #Limitation for parameters
upperl = c(Inf, Inf, 1, Inf),
type="binomial"
)
summary(fitOnyAdult_Conc)
ED(fitOnyAdult_Conc,c(10,50,90),interval="delta")
#############################################Gedhus soil_Folsomia candida##################################################################
GedDefFol <- subset(SUM, Soil.type == "Gedhus soil" &
Test.type == "definitive" &
Species == "Folsomia candida", select=Species:Juvenile,drop=FALSE)
# It's important to tell R where does the vector come from(data$vector)
GedDefFol_Conc <- GedDefFol$Concentration
GedDefFol_Adult<- GedDefFol$Adult
GedDefFol_Juvenile <- GedDefFol$Juvenile
GedDefFol_pH<- GedDefFol$pH
fitGedDefFol_Adult <- lm (GedDefFol_Adult~GedDefFol_Conc)
fitGedDefFol_Juvenile <- lm (GedDefFol_Juvenile~GedDefFol_Conc)
#############################################Gedhus soil_Onychiurus yodai##################################################################
GedDefOny <- subset(SUM, Soil.type == "Gedhus soil" &
Test.type == "definitive" &
Species == "Onychiurus yodai", select=Species:Juvenile,drop=FALSE)
# It's important to tell R where does the vector come from(data$vector)
GedDefOny_Conc <- GedDefOny$Concentration
GedDefOny_Adult<- GedDefOny$Adult
GedDefOny_Juvenile <- GedDefOny$Juvenile
GedDefOny_pH<- GedDefOny$pH
fitGedDefOny_Adult <- lm (GedDefOny_Adult~GedDefOny_Conc)
fitGedDefOny_Juvenile <- lm (GedDefOny_Juvenile~GedDefOny_Conc)
glmGedDefOny_Juvenile<- glm(GedDefOny_Juvenile~GedDefOny_Conc)
summary(glmGedDefOny_Juvenile)
anova(glmGedDefOny_Juvenile)
##### Non parametric test for survival
nonparaGedDefOny_Adult<-kruskal.test(GedDefOny_Adult~GedDefOny_Conc)
#http://www.baike.com/wiki/%E9%9D%9E%E5%8F%82%E6%95%B0%E6%A3%80%E9%AA%8C
nonparaFouDefOny_Adult<-kruskal.test(FouDefOny_Adult~FouDefOny_Conc)
nonparaFouDefOny_Adult
nonparaGedDefOny_Adult
nonparaGedDefFol_Adult<-kruskal.test(GedDefFol_Adult~GedDefFol_Conc)
#http://www.baike.com/wiki/%E9%9D%9E%E5%8F%82%E6%95%B0%E6%A3%80%E9%AA%8C
nonparaFouDefFol_Adult<-kruskal.test(FouDefFol_Adult~FouDefFol_Conc)
nonparaFouDefFol_Adult
nonparaGedDefFol_Adult
nonparaGedDefOny_Juvenile<-kruskal.test(GedDefOny_Juvenile~GedDefOny_Conc)
nonparaGedDefOny_Juvenile
###############################################################################################################
###########################################Graphic for pH_O.yodai/F.candida#########################
par()
par(mfrow=c(2,2)) #一个图版显示2行,3列。
plotfitOnyAdult_pH <- plot(fitOnyAdult_pH,#y~x, expression for x and y axis
# broken=TRUE,
log="",#log x or y axis. If none of them needed, put "" on
main="Survival",#main title of the graph
xlab = "Soil pH",#Lable for x axis
ylab = "Number of Adults",# Lable for y axis
xlim = c(5,10),# The range for x axis
ylim = c(0,16),#The range for y axis
xaxs = "i",# The number level of axis, i means accordino setting, r means it goes with the range of custome data
yaxs = "i",
col = "black", #color for graph
pch = 21,#Options for dots style
type="all", # dot/line /all
#cex=1, cex.axis=1.2, cex.lab=1.2,
lwd=2)
Upper<- max(FouDefOny_Juvenile)
Upper
plotfitOnyJuv_pH <- plot(fitOnyJuv_pH,#y~x, expression for x and y axis
# broken=TRUE,
log="",#log x or y axis. If none of them needed, put "" on
main="Juvenile",#main title of the graph
xlab = "Soil pH",#Lable for x axis
ylab = "Number of Juveniles",# Lable for y axis
xlim = c(5,10),# The range for x axis
ylim = c(0,1000),#The range for y axis
xaxs = "i",# The number level of axis, i means accordino setting, r means it goes with the range of custome data
yaxs = "i",
col = "black", #color for graph
pch = 21,#Options for dots style
type="all", # dot/line /all
#cex=1, cex.axis=1.2, cex.lab=1.2,
lwd=2)
par(new=TRUE)
Upper<- max(FouDefFol_Juvenile)
Upper
plotfitFolJuv_pH <- plot(fitFolJuv_pH,#y~x, expression for x and y axis
# broken=TRUE,
log="", #log x or y axis. If none of them needed, put "" on
#main="Juvenile of O.yodai",#main title of the graph
xlab = "",#Lable for x axis
ylab = "",# Lable for y axis
xlim = c(5,10),# The range for x axis
ylim = c(0,1000),#The range for y axis
xaxs = "i",# The number level of axis, i means accordino setting, r means it goes with the range of custome data
yaxs = "i",
col = "red", #color for graph
pch = 22,#Options for dots style
type="all", # dot/line /all
#cex=1, cex.axis=1.2, cex.lab=1.2,
lwd=2)
###########################################Graphic for Concentration_O.yodai/F.candida#########################
par(new=FALSE)
plotfitOnyAdult_Conc <- plot(fitOnyAdult_Conc,#y~x, expression for x and y axis
# broken=TRUE,
log="",#log x or y axis. If none of them needed, put "" on
main="",#main title of the graph
xlab = "Concentration of Wood Ash (g/kg)",#Lable for x axis
ylab = "Number of Adults",# Lable for y axis
xlim = c(0,80),# The range for x axis
ylim = c(0,16),#The range for y axis
xaxs = "i",# The number level of axis, i means accordino setting, r means it goes with the range of custome data
yaxs = "i",
col = "black", #color for graph
pch = 21,#Options for dots style
type="all", # dot/line /all
#cex=1, cex.axis=1.2, cex.lab=1.2,
lwd=2)
par(new=FALSE)
plotfitOnyJuv_Conc <- plot(fitOnyJuv_Conc,#y~x, expression for x and y axis
# broken=TRUE,
log="",#log x or y axis. If none of them needed, put "" on
main="",#main title of the graph
xlab = "Concentration of Wood Ash (g/kg)",#Lable for x axis
ylab = "Number of Juveniles",# Lable for y axis
xlim = c(0,80),# The range for x axis
ylim = c(0,1000),#The range for y axis
xaxs = "i",# The number level of axis, i means accordino setting, r means it goes with the range of custome data
yaxs = "i",
col = "black", #color for graph
pch = 21,#Options for dots style
type="all", # dot/line /all
#cex=1, cex.axis=1.2, cex.lab=1.2,
lwd=2)
par(new=TRUE)
Upper<- max(FouDefFol_Juvenile)
Upper
plotfitFolJuv_Conc <- plot(fitFolJuv_Conc,#y~x, expression for x and y axis
# broken=TRUE,
log="", #log x or y axis. If none of them needed, put "" on
main="",#main title of the graph
xlab = "",#Lable for x axis
ylab = "",# Lable for y axis
xlim = c(0,80),# The range for x axis
ylim = c(0,1000),#The range for y axis
xaxs = "i",# The number level of axis, i means accordino setting, r means it goes with the range of custome data
yaxs = "i",
col = "red", #color for graph
pch = 22,#Options for dots style
type="all", # dot/line /all
#cex=1, cex.axis=1.2, cex.lab=1.2,
lwd=2)
par(new=TRUE)
legend(-3,3, c("Folsomia candida", "Onychiurus yodai"),
col = c( "black","red"),
text.col = "black",
#lty = c(2, -1, 1),
pch = c(21,22),
bty="n")
###### Two_way ANOVA F.candida ###############
# All the output dont fit normal distribution, so it is not proper to use two-way ANOVA
# Hopeless for transformation
# Alternative choice is needed.
DefFol <- subset(SUM, Test.type == "definitive" &
Species == "Folsomia candida", select=Soil.type&Species:Juvenile,drop=FALSE)
NormalDistribution <- function(i){
st<- shapiro.test(i)
print (st)
# Shapiro-Wilk normality test, p与查表中的W alpha 比较. p< W的可能性>0.05,即在0.05水平,p >0.05 就认为数据不符合正态分布 p值小于0.05,数据为正态分布
kt<- ks.test(i, "pnorm", mean = mean(i), sd = sqrt(var(i)))
# This test is used for big data set>5000
#Kolmogorov-Smirnov检验需要三个输入变量,及数据本身、均值及标准差
# p与查表中的D alpha 比较. p > D 的可能性>0.05, 即,在0.05水平,p >0.05 就认为数据符合正态分布
print(kt)
}
TWANOVA_candida_1<- aov(DefFol$Juvenile~DefFol$Soil.type*factor(DefFol$Concentration))
summary(TWANOVA_candida_1)
anova(TWANOVA_candida_1)
TWANOVA_candida_2<- aov(DefFol$Juvenile~DefFol$Soil.type+factor(DefFol$Concentration))
summary(TWANOVA_candida_2)
anova(TWANOVA_candida_2)
anova(TWANOVA_candida_1,TWANOVA_candida_2)
# significant different, so TWANOVA_candida_1 is proper
anova(TWANOVA_candida_1)
## Test Binomial distribution of Adult Folsomia candida ####
FouDefFol_Adult
Pro<-function(i){10-FouDefFol_Adult}
respFol<-cbind(FouDefFol_Adult,Pro(FouDefFol_Adult))
respFol
binomial1 <- glm(respFol~0+factor(FouDefFol$Concentration),family= binomial(link="logit") )
# For binmial distribution, link has option: logit, probit, log, cloglog
summary(binomial1)
dev<-deviance(binomial1)
n.observations <- length(FouDefFol$Concentration)
n.observations
n.parameters.fit1 <- length(coef(binomial1))
n.parameters.fit1
n.df.fit1 <- n.observations - n.parameters.fit1
n.df.fit1
# Test the binomial distribution, p>0.5 fit.
# 检验是否符合二项分布, p>0.5符合二项分布
pchisq(dev, df=n.df.fit1, lower.tail=F)
GedDefFol_Adult
Pro<-function(i){10-GedDefFol_Adult}
respFol<-cbind(GedDefFol_Adult,Pro(GedDefFol_Adult))
respFol
binomial1 <- glm(respFol~0+factor(GedDefFol$Concentration),family= binomial(link="cloglog") )
# For binmial distribution, link has option: logit, probit, log, cloglog
summary(binomial1)
dev<-deviance(binomial1)
n.observations <- length(GedDefFol$Concentration)
n.observations
n.parameters.fit1 <- length(coef(binomial1))
n.parameters.fit1
n.df.fit1 <- n.observations - n.parameters.fit1
n.df.fit1
# Performing the test 检验是否符合二项分布, p>0.5符合二项分布
pchisq(dev, df=n.df.fit1, lower.tail=F)
###### Two_way ANOVA O.yodai_juvenile ###############
DefOny <- subset(SUM, Test.type == "definitive" &
Species == "Onychiurus yodai", select=Soil.type&Species:Juvenile,drop=FALSE)
NormalDistribution(GedDefOny_Juvenile)
hist(asin(GedDefOny_Juvenile/163))
x<- log(sqrt(GedDefOny_Juvenile)+10)
NormalDistribution(x)
hist(x)
y<- log(sqrt(FouDefOny_Juvenile)+10)
NormalDistribution(y[1:12])
hist(y)
j<-log(sqrt(DefOny$Juvenile)+10)
NormalDistribution(j)
TWANOVA_yodai_1<- aov(j~DefOny$Soil.type*factor(DefOny$Concentration))
summary(TWANOVA_yodai_1)
anova(TWANOVA_yodai_1)
TWANOVA_yodai_2<- aov(j~DefOny$Soil.type+factor(DefOny$Concentration))
summary(TWANOVA_yodai_2)
anova(TWANOVA_yodai_1,TWANOVA_yodai_2)
# significant different, so TWANOVA_yodai_1 is proper
anova(TWANOVA_yodai_1)
#### test binomial distribution of Adult yodai #####
FouDefOny_Adult
Pro<-function(i){15-FouDefOny_Adult}
respOny<-cbind(FouDefOny_Adult,Pro(FouDefOny_Adult))
respOny
binomial1 <- glm(respOny~0+factor(FouDefOny$Concentration),family= binomial(link="cloglog") )
# For binmial distribution, link has option: logit, probit, log, cloglog
summary(binomial1)
binomial2 <- glm(respOny~factor(FouDefOny$Concentration),family= binomial(link="cloglog") )
anova(binomial1,binomial2, test="Chisq")
binomial3 <- glm(respOny~FouDefOny$Concentration,family= binomial(link="cloglog") )
anova(binomial2,binomial3, test="Chisq")
dev<-deviance(binomial2)
n.observations <- length(FouDefOny$Concentration) #length of dose
n.observations
n.parameters.fit1 <- length(coef(binomial2))
n.parameters.fit1
n.df.fit1 <- n.observations - n.parameters.fit1
n.df.fit1
# Performing the test 检验是否符合二项分布, p>0.05符合二项分布
pchisq(dev, df=n.df.fit1, lower.tail=F)
GedDefOny_Adult
Pro<-function(i){15-GedDefOny_Adult}
respOny<-cbind(GedDefOny_Adult,Pro(GedDefOny_Adult))
respOny
binomial1 <- glm(respOny~0+factor(GedDefOny$Concentration),family= binomial(link="cloglog") )
# For binmial distribution, link has option: logit, probit, log, cloglog
summary(binomial1)
binomial2 <- glm(respOny~factor(GedDefOny$Concentration),family= binomial(link="cloglog") )
anova(binomial1,binomial2, test="Chisq")
binomial3 <- glm(respOny~GedDefOny$Concentration,family= binomial(link="cloglog") )
anova(binomial2,binomial3, test="Chisq")
dev<-deviance(binomial2)
n.observations <- length(GedDefOny$Concentration)
n.observations
n.parameters.fit1 <- length(coef(binomial2))
n.parameters.fit1
n.df.fit1 <- n.observations - n.parameters.fit1
n.df.fit1
# Performing the test 检验是否符合二项分布, p>0.5符合二项分布
pchisq(dev, df=n.df.fit1, lower.tail=F)
################# non parametric two way ANOVA test for FolAdult #################
#Create a variable containing the ranks each variable
rank_FolAdult<-rank(DefFol$Adult)
rank_Folsoil<-rank(DefFol$Soil.type)
rank_FolConc<-rank(DefFol$Concentration)
# Do the test
aov_rank_FolAdult<- aov(rank_FolAdult~rank_Folsoil*rank_FolConc)
summary(aov_rank_FolAdult)
# extract the sum of squares and degrees of freedom information and sum both
Df <- anova(aov_rank_FolAdult)[,"Df"]
Sum_Df<- sum(Df)
SS <- anova(aov_rank_FolAdult)[,"Sum Sq"]
Sum_SS<- sum(SS)
# Calculate the MS value
MS <- Sum_SS/Sum_Df
# Calculate the H value
SS[1]/MS
SS[2]/MS
SS[3]/MS
# Convert into probability
p_DefFol_Soil<-1-pchisq(SS[1]/MS,Df[1])
p_DefFol_Conc<-1-pchisq(SS[2]/MS,Df[2])
p_DefFol_Soil.Conc<-1-pchisq(SS[3]/MS,Df[3])
p_DefFol_Soil
p_DefFol_Conc
p_DefFol_Soil.Conc
################# non parametric two way ANOVA test forOnyAdult #################
#Create a variable containing the ranks each variable
rank_OnyAdult<-rank(DefOny$Adult)
rank_Onysoil<-rank(DefOny$Soil.type)
rank_OnypH<-rank(DefOny$pH)
# Do the test
aov_rank_OnyAdult<- aov(rank_OnyAdult~rank_Onysoil*rank_OnypH)
summary(aov_rank_OnyAdult)
# extract the sum of squares and degrees of freedom information and sum both
Df <- anova(aov_rank_OnyAdult)[,"Df"]
Sum_Df<- sum(Df)
SS <- anova(aov_rank_OnyAdult)[,"Sum Sq"]
Sum_SS<- sum(SS)
# Calculate the MS value
MS <- Sum_SS/Sum_Df
# Calculate the H value
SS[1]/MS
SS[2]/MS
SS[3]/MS
# Convert into probability
p_DefOny_Soil<-1-pchisq(SS[1]/MS,Df[1])
p_DefOny_Conc<-1-pchisq(SS[2]/MS,Df[2])
p_DefOny_Soil.Conc<- 1-pchisq(SS[3]/MS,Df[3])
p_DefOny_Soil
p_DefOny_Conc
p_DefOny_Soil.Conc
|
b6cfecf74f3625d10652be8ea47fa090d88f6514
|
c9ce69cc331f06c751e6bc58bd43131335869bc2
|
/Fig_1_State_of_photosynthetic.R
|
12eff616fa44465d92581a3a3868690750e623f9
|
[] |
no_license
|
lkulmala/Hari-et-al
|
730652fa80fc2f9c59f25eded166d005bc278fd0
|
59f55bdd1abf1ed869c074913dfe92c0cec24cc9
|
refs/heads/master
| 2021-01-23T04:19:31.314233
| 2017-03-25T20:29:00
| 2017-03-25T20:29:00
| 86,183,947
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 819
|
r
|
Fig_1_State_of_photosynthetic.R
|
setwd('input/')
datafile<-read.table("Fig_1_State_of_photosynthetic.txt", header=TRUE)
S2011<-datafile$X2011
S2012<-datafile$X2012
S2013<-datafile$X2013
S2014<-datafile$X2014
days<-seq(as.Date("2014-04-01"), as.Date("2014-10-16"), by="days", format="%m-%d-%Y")
xlimits<-c(as.Date("2014-03-25", sep=""),as.Date("2014-10-20", sep=""))
par(mfrow=c(1, 1))
par(mar = c(0.5, 4, 2, 0), oma = c(2, 0, 0, 0.5)) # Marginaalien asetuksia
plot(y=S2011, x=days, col="cornflowerblue", ylim=c(-0,3500), xlim=xlimits, type="l",xlab="",ylab="State of photosynthetic machinery")
lines(y=S2012, x=days, col="chocolate3")
lines(y=S2013, x=days, col="black")
lines(y=S2014, x=days, col="black", lty=2)
legend("topleft", c("2011","2012","2013","2014"), col=c("chocolate3","cornflowerblue","black","black"), lty=c(1,1,1,2), bty = "n")
|
5fef4d8cb1258dc3a959e4846dcde8ab143564b7
|
768b26832ce21ac800f547bcf1e282b5f9e691a0
|
/Shiny/Recommendation/ui.R
|
3903602d4a499c669ed9408e560b6e014c8d3a48
|
[
"Apache-2.0"
] |
permissive
|
dautroc1/Recommendation-system
|
2526aa809f311846e47d66307a6d0b06545b2aa8
|
087ef82598b9cd0be482d7b91bcd9964e37587c8
|
refs/heads/main
| 2023-02-16T03:22:14.157505
| 2021-01-10T02:07:17
| 2021-01-10T02:07:17
| 323,897,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 707
|
r
|
ui.R
|
shinyUI(fluidPage(
# Application title
titlePanel("Anime recommendation"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
h2("Instruction"),
h5("1. Input user id to the box"),
h5("2. Press enter and the recommendation will display below.")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel("Recommendation",
textInput("textinput","Input value"),
submitButton("Enter"),
tableOutput("text")))
)
)
))
|
d6650a98a7ba7da443442389fac08afbe414f6da
|
528e2696dbc7ad1ac2fa899dc2f862ca4116e21b
|
/map-suspected-reinfections.R
|
3bd1465dd2729c6c25ccbcc00245f6d92b1adafa
|
[
"MIT"
] |
permissive
|
jmcastagnetto/bnonews-reinfection-covid19
|
45ea1cca6880e58cad710747be9db7283529361e
|
e21ba69e5a2ecc1b6ceb558ba513859eb0bb2bed
|
refs/heads/main
| 2023-02-24T11:16:54.610871
| 2021-02-01T18:02:52
| 2021-02-01T18:02:52
| 321,224,874
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,198
|
r
|
map-suspected-reinfections.R
|
library(tidyverse)
library(tmap)
data(World)
raw <-read_csv(
"data/covid19_suspected_reinfections.csv",
na = c("", "-", "NA", "N/A")
)
suspected <- raw %>%
group_by(
country, iso3c
) %>%
summarise(
Suspected = sum(cases, na.rm = TRUE)
)
n_suspected <- sum(suspected$Suspected, na.rm = TRUE)
date_range <- range(raw$reported, na.rm = TRUE)
my_data <- World %>%
left_join(
suspected,
by = c("iso_a3" = "iso3c")
)
tmap_mode("plot")
map1 <- tm_shape(my_data) +
tm_polygons("Suspected", palette = "-inferno") +
tm_layout(
main.title = glue::glue("Suspected COVID-19 reinfections\nWorldwide total: {scales::comma(n_suspected)}. From {date_range[1]} to {date_range[2]}"),
) +
tm_credits(
text = paste0("Source: BNO News (https://bit.ly/suspectedcovid19reinfections)\n", Sys.Date()," // @jmcastagnetto, Jesus M. Castagnetto"),
fontface = "bold",
fontfamily = "Inconsolata",
size = 1,
bg.color = "white",
position = c("center", "bottom")
)
map1
fname <- paste0(
"plots/",
format(Sys.Date(), "%Y%m%d"),
"-map-suspected-cases-covid19-reinfection.png"
)
tmap_save(
tm = map1,
filename = fname,
width = 10,
height = 6
)
|
c5fac2f58329e066fafd4be7c51550859dcde2b9
|
d52bc56044d2a90407f1c08048a4d6a88ce39193
|
/man/imports_put_archive.Rd
|
efa72c34658955971b7a75964975f407a920ddf7
|
[
"BSD-3-Clause"
] |
permissive
|
wlattner/civis-r
|
71e15c650ff99e41b7028d32ccaa9c3df54e1f36
|
d794680c8b155c6302dc9d41b500f719e3a8c16c
|
refs/heads/master
| 2021-06-25T17:16:54.733530
| 2017-08-21T23:19:18
| 2017-08-21T23:19:18
| 100,273,729
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,240
|
rd
|
imports_put_archive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{imports_put_archive}
\alias{imports_put_archive}
\title{Update the archive status of this object}
\usage{
imports_put_archive(id, status)
}
\arguments{
\item{id}{integer required. The ID of the object.}
\item{status}{boolean required. The desired archived status of the object.}
}
\value{
A list containing the following elements:
\item{name}{string, The name of the import.}
\item{syncType}{string, The type of sync to perform; one of Dbsync, AutoImport, SilverpopDataImport, SilverpopContactImport, GdocImport, GdocExport, and Salesforce.}
\item{source}{object, A list containing the following elements:
\itemize{
\item remoteHostId integer,
\item credentialId integer,
\item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id.
\item name string,
}}
\item{destination}{object, A list containing the following elements:
\itemize{
\item remoteHostId integer,
\item credentialId integer,
\item additionalCredentials array, Array that holds additional credentials used for specific imports. For salesforce imports, the first and only element is the client credential id.
\item name string,
}}
\item{schedule}{object, A list containing the following elements:
\itemize{
\item scheduled boolean, If the object is scheduled
\item scheduledDays array, Day based on numeric value starting at 0 for Sunday
\item scheduledHours array, Hours of the day it is scheduled on
\item scheduledMinutes array, Minutes of the day it is scheduled on
\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour
}}
\item{notifications}{object, A list containing the following elements:
\itemize{
\item urls array, URLs to receive a POST request at job completion
\item successEmailSubject string, Custom subject line for success e-mail.
\item successEmailBody string, Custom body text for success e-mail, written in Markdown.
\item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully.
\item failureEmailAddresses array, Addresses to notify by e-mail when the job fails.
\item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes.
\item successOn boolean, If success email notifications are on
\item failureOn boolean, If failure email notifications are on
}}
\item{parentId}{integer, Parent id to trigger this import from}
\item{id}{integer, The ID for the import.}
\item{isOutbound}{boolean, }
\item{syncs}{array, An array containing the following fields:
\itemize{
\item id integer,
\item source object,
\item destination object,
\item advancedOptions object,
}}
\item{state}{string, }
\item{createdAt}{string, }
\item{updatedAt}{string, }
\item{lastRun}{object, A list containing the following elements:
\itemize{
\item id integer,
\item state string,
\item createdAt string, The time that the run was queued.
\item startedAt string, The time that the run started.
\item finishedAt string, The time that the run completed.
\item error string, The error message for this run, if present.
}}
\item{user}{object, A list containing the following elements:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{runningAs}{object, A list containing the following elements:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{nextRunAt}{string, The time of the next scheduled run.}
\item{timeZone}{string, The time zone of this import.}
\item{hidden}{boolean, The hidden status of the object. Setting this to true hides it from most API endpoints. The object can still be queried directly by ID}
\item{archived}{string, The archival status of the requested object(s).}
}
\description{
Update the archive status of this object
}
|
94c0064d2efe151727c82a0421bc7d2b182b80ba
|
b5547ead9c9590de07e4d661ba08605dc335329b
|
/ggplot.R
|
ee9d4c2fbe953066c90cfb32cf51a12a5ab594fc
|
[] |
no_license
|
shivani02/FinancialModelling-Rprograms
|
aa4accd3f1124adeda54a7f6aabe31eb69166c6f
|
9d04880caa1a87b9c6abfacc41390661504e3559
|
refs/heads/master
| 2020-06-08T09:34:22.923598
| 2015-05-09T19:58:31
| 2015-05-09T19:58:31
| 35,126,582
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 941
|
r
|
ggplot.R
|
# call price function
callprice<-function(x,t,T,r,sigma,K)
{
d2<- (log(x/K) +(r-0.5*sigma^2)*(T-t))/(sigma*sqrt(T-t))
d1<-d2+sigma*sqrt(T-t)
x*pnorm(d1)-K*exp(-r*(T-t))*pnorm(d2)
}
#implied volatility function
ImpliedVolatility<-function(x,t,T,r,K,ObsPrice)
{
sigma<-seq(0.001,10,by=0.0001)
length(sigma)
C<-callprice(x,t,T,r,sigma,K)
error<-abs(ObsPrice-C)
ImpliedSigma <- 0
for (i in 1:length(sigma))
{
if(error[i] <1)
{
ImpliedSigma<-sigma[i]
}
}
ImpliedSigma
}
z<-read.csv(file.choose(),header=TRUE)
head(z)
zFirstRow<-z[1,]
r<-0.04
#T<-1/4
t<- 0
x<-z$Underlying.Value
K<-z$Strike.Price
ObsPrice<-z$Settle.Price
T<-z$TTM
CalcSigmaGG<-mat.or.vec(length(x),1)
for(j in 1:length(x))
{
CalcSigmaGG[j]<-ImpliedVolatility(x[j],t,T[j]/252,r,K[j],ObsPrice[j])
}
require('rgl')
open3d()
plot3d(K,T,CalcSigmaGG,col=rainbow(1000),type="p",xlab="Strike Price",
ylab="Time to Maturity", zlab="Implied Volatility")
|
ba04f48f70df6a11fca51c4b1f105271ea83e3bf
|
5feca36689ab072f63447022f577c0b5dcdcd214
|
/man/pmcode_99329.Rd
|
c76e96e9773cbed956710f2f2dff5a4446953d89
|
[
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
iembry-USGS/ie2miscdata
|
dfc205ab712ff2b6adf1249fde4477104f9d6b20
|
142bffa6f678eb8d7cadc59669e8429d45da19c9
|
refs/heads/master
| 2021-01-21T04:35:30.514811
| 2016-07-16T06:55:22
| 2016-07-16T06:55:22
| 49,751,904
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 807
|
rd
|
pmcode_99329.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pmcode_99329.R
\docType{data}
\name{pmcode_99329}
\alias{pmcode_99329}
\title{99329 Coliphage, somatic, E. coli C-host, 2-stepenrichment presence/absence per 1 liter}
\format{A data frame with 2 rows and 3 variables:
\describe{
\item{Parameter Code}{USGS Parameter Code}
\item{Fixed Value}{Fixed Value}
\item{Fixed Text}{Fixed Text}
}}
\usage{
pmcode_99329
}
\description{
A table containing the USGS Coliphage, somatic, E. coli C-host,
2-stepenrichment presence/absence per 1 liter parameter codes.
}
\examples{
pmcode_99329
}
\references{
This data is from Table 26. Parameter codes with fixed values (USGS Water Quality Samples for USA: Sample Data). See \url{http://waterdata.usgs.gov/usa/nwis/qwdata?codes_table26_help}.
}
|
6d979cdf97edb2570ff28bd2a32f1c15e4b8188f
|
603ef4d458ae15590178a3bb83e41597bcbc0447
|
/R/summarize.r
|
21a640d9b85d497e4ad29deaf16bfbb8234e6316
|
[] |
no_license
|
ntncmch/myRtoolbox
|
8dace3f0d29e19670624e6e3c948ba6d0fa38cec
|
8ec2a6bc2e7dd33fb23d7f4b2c6cf2d95ca5ef8d
|
refs/heads/master
| 2020-05-14T14:14:34.465272
| 2014-09-22T13:17:47
| 2014-09-22T13:17:47
| 21,052,420
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,679
|
r
|
summarize.r
|
#'Basic accounting for NA values
#'
#'All these functions performs the same as the basic version (same name without \code{_na} at the end) but differ when all elements are \code{NA}. In this case, \code{NA} is returned in the same format as \code{x}, which is not always the case when the basic function is used with \code{\link[dplyr]{summarize}}.
#' @param x any object accepted by the basic function.
#' @param na_rm logical. If \code{TRUE}, \code{\link{na.omit}} is performed first. Default to \code{FALSE}
#' @export
#' @importFrom dplyr last
#' @name summarize with NA
#' @export
#' @aliases last_na
last_na <- function(x,na_rm=FALSE) {
if(all(is.na(x))){
return(x[1])
}
if(na_rm){
x <- na.omit(x)
}
return(last(x))
}
#' @name summarize with NA
#' @export
#' @aliases all_na
all_na <- function(x,na_rm=FALSE) {
if(all(is.na(x))){
return(x[1])
}
return(all(x,na.rm=na_rm))
}
#' @name summarize with NA
#' @export
#' @aliases any_na
any_na <- function(x,na_rm=FALSE) {
if(all(is.na(x))){
return(x[1])
}
return(any(x,na.rm=na_rm))
}
#' @name summarize with NA
#' @export
#' @importFrom dplyr first
#' @aliases first_na
first_na <- function(x,na_rm=FALSE) {
if(all(is.na(x))){
return(x[1])
}
if(na_rm){
x <- na.omit(x)
}
return(first(x))
}
#' @name summarize with NA
#' @export
#' @aliases max_na
max_na <- function(x,na_rm=FALSE) {
if(all(is.na(x))){
return(x[1])
}
if(na_rm){
x <- na.omit(x)
}
return(max(x))
}
#' @name summarize with NA
#' @export
#' @aliases min_na
min_na <- function(x,na_rm=FALSE) {
if(all(is.na(x))){
return(x[1])
}
if(na_rm){
x <- na.omit(x)
}
return(min(x))
}
|
902d13b17610bc1b2efac578f20daca67fd986dd
|
2c5a3c1b0ca9b746ca3657e811466ab9017be59f
|
/Gotham_Cabs/code/garbage_code/Linear_Regession/Script1_data_inspection.R
|
3d06bd5609add881d755cf6295d043cd1d24d80e
|
[] |
no_license
|
ccirelli2/ML_Final_Project_2019
|
5982263cdd2f7ef4818ae9b976dd7525f7dcdc0d
|
02b3df31f6a253ac0270ef27545eb46cfac51792
|
refs/heads/master
| 2020-05-15T13:28:49.872082
| 2019-05-02T20:02:34
| 2019-05-02T20:02:34
| 182,301,101
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,840
|
r
|
Script1_data_inspection.R
|
# DATA INSPECTION
'Content:
1.) Average Duration: By month, day and route.
Plot: Bar
Plot: Boxplot.
Observations:
Duration: See plot. It appears that the majority are
Month: Only 6 months worth of data (Jan-June)
There appears to be a gradual increase in duration
from months 1-6.
Day: Day 2, 6, 8, 10, 12, 14, 16, 18, 20, 22... not present.
No decernible relationship on a monthly basis.
Check to see if we can map these dates to days of the week.
Definitions: Boxplot Lower bar is Q1. 25% percentile.
Bold bar in middle is the median.
Box represents the "inner quartile".
Upper part of box is 3Q or 75% quartile.
2.) Route Frequency Routes most frequently traveled.
'
# CLEAR NAMESPACE
rm(list = ls())
# LOAD LIBRARIES
library(RMySQL)
install.packages("psych")
library(psych)
# SETUP CONNECTION TO DB
mydb <- dbConnect(RMySQL::MySQL(), user='ccirelli2',
password='Work4starr', dbname='GSU',
host = "127.0.0.1")
# Get List of Tables
dbListTables(mydb)
# QUERIES (1-N)---------------------------------------------------------------------
# Query 1: All Data
query1_alldata = dbSendQuery(mydb, '
SELECT
*
FROM ML_FinProj_GothamCab_Train
WHERE duration != 0
LIMIT 100000')
result_q1 = fetch(query1_alldata, n = -1)
describe.by(result_q1)
# Plot Durations
plot(result_q1$duration)
boxplot(result_q1$duration)
hist(result_q1$duration)
d = density(result_q1$duration)
plot(d)
# Query 2: Average Duration By Month
query2_rel_month_duration = dbSendQuery(mydb, '
SELECT
MONTH(pickup_datetime) AS "MONTH",
ROUND(AVG(duration),0) AS "AVERAGE_DURATION"
FROM GSU.ML_FinProj_GothamCab_Train
GROUP BY MONTH(pickup_datetime)
ORDER BY ROUND(AVG(duration),0);')
result_q2 = fetch(query2_rel_month_duration, n = -1)
barplot(result_q2$AVERAGE_DURATION, names.arg = result_q2$MONTH,
main = "Avg Duration By Month",
xlab = "Month",
ylab = "Duration")
# Query 3: Relationship of Average Duration By Day of Week
query3_rel_day_duration = dbSendQuery(mydb, '
SELECT
DAY(pickup_datetime) AS "DAY",
ROUND(AVG(duration),0) AS "AVERAGE_DURATION"
FROM GSU.ML_FinProj_GothamCab_Train
GROUP BY DAY(pickup_datetime)
ORDER BY DAY(pickup_datetime);')
result_q3 = fetch(query3_rel_day_duration, n = -1)
barplot(result_q3$AVERAGE_DURATION,
names.arg = result_q3$DAY,
main = "Average Duration By Day Of Week",
xlab = "Day",
ylab = "Duration")
# Query 4: Relationship of Average Duration By Day of Week
' MySQL DAYOFWEEK: 1 = Sunday
7 = Saturday'
query4_rel_weekday_duration = dbSendQuery(mydb, '
SELECT
Weekday,
ROUND(AVG(duration),0) AS "AVERAGE_DURATION"
FROM GSU.ML_FinProj_GothamCab_Train
GROUP BY Weekday
ORDER BY Weekday;')
result_q4 = fetch(query4_rel_weekday_duration, n = -1)
barplot(result_q4$AVERAGE_DURATION,
names.arg = result_q4$Weekday,
main = "Average Duration By Weekday",
xlab = "Weekday",
ylab = "Duration")
# Query 8: Route Duration By Hour of Day (Add Frequency By Day, Duration By Day)
query8_rel_dur_hour_of_day = dbSendQuery(mydb, '
SELECT
hour_,
ROUND(AVG(duration),0) AS "AVERAGE_DURATION"
FROM GSU.ML_FinProj_GothamCab_Train
GROUP BY hour_
ORDER BY RAND()
LIMIT 10000;')
result_q8 = fetch(query8_rel_dur_hour_of_day, n = -1)
plot(result_q8,
main = 'Average Duration By Hour of Day',
xlab = 'Hour of Day (0-24)',
ylab = 'Average Duration')
# Query 5: Most Traveled Routes
query5_rel_route_freq = dbSendQuery(mydb, '
SELECT
pickup_x,
pickup_y,
dropoff_x,
dropoff_y,
COUNT(duration) AS Route_Count
FROM GSU.ML_FinProj_GothamCab_Train
GROUP BY pickup_x, pickup_y, dropoff_x, dropoff_y
ORDER BY COUNT(duration) DESC
LIMIT 20;
')
result_q5 = fetch(query5_rel_route_freq, n = -1)
#setwd('/home/ccirelli2/Desktop/GSU/2019_Spring/ML_Course/Final_Project/Gotham_Cabs/Preliminary_Analysis')
#write.table(result_q5, 'Top_20_Most_Traveled_Results.xlsx')
# Query 7: Distance vs Duration
query7_duration_vs_distance = dbSendQuery(mydb, '
SELECT
duration,
distance
FROM GSU.ML_FinProj_GothamCab_Train
ORDER BY RAND()
LIMIT 10000;
')
result_q7 = fetch(query7_duration_vs_distance, n = -1)
plot(log(result_q7$distance), log(result_q7$duration), main = 'Distance vs Duration', xlab = 'Distance', ylab = 'Duration', col=c('red', 'blue'))
|
08ed8cc586396a859a09feeae58b4bc68e5b4599
|
6e0cdf0db71decd74e246cd8a2f5bbd4f4bef076
|
/R/graphs.R
|
1e10f2460e8a8a5755b677c5fc5dcbb64f597c7b
|
[
"MIT"
] |
permissive
|
jandraor/SEIR_cohorts_calibration
|
d88e2d7a56fd3ea8789d7565eeed73ff7dc254ee
|
2da19c9f7866769700d13976a0fcbe9019f5b6b8
|
refs/heads/master
| 2021-08-28T21:01:30.566173
| 2021-08-16T12:34:56
| 2021-08-16T12:34:56
| 246,302,611
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,547
|
r
|
graphs.R
|
draw_multiple_densities <- function(densities_df, stats_df,
x_pos, text_size = 5) {
g <- ggplot(densities_df, aes(x = x, y = y)) +
geom_line() +
geom_ribbon(aes(ymin = 0, ymax = y, fill = area)) +
facet_wrap(~ param, scales = "free") +
scale_fill_manual(values = c(NA, "lightgrey")) +
geom_vline(aes(xintercept = mean_value), stats_df, color = "#1261A0",
linetype ="dashed", size = 1) +
geom_text(aes(x = x_pos, y = y_pos_mean, label = mean_label),
stats_df, colour = "#1261A0", size = text_size) +
geom_text(aes(x = x_pos, y = y_pos_median, label = median_label),
stats_df, colour = "#1261A0", size = text_size) +
geom_text(aes(x = x_pos, y = y_pos_interval, label = interval_label),
stats_df, colour = "grey", size = text_size) +
theme_classic() +
theme(legend.position = "none",
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())
}
generate_graph_inputs <- function(stan_fit, x_pos, y_pos, y_pos_median,
y_pos_interval, pars, rename_pars = NULL) {
posterior_df <- as.data.frame(stan_fit)
for(rename_list in rename_pars) {
posterior_df <- posterior_df %>%
rename(!!rename_list$new := !!rename_list$old)
}
if("recoveryTime" %in% pars) {
posterior_df <- mutate(posterior_df, recoveryTime = 1 / recoveryProportion)
}
if("latent_period" %in% pars) {
posterior_df <- mutate(posterior_df,
latent_period = 1 / incubation_proportion)
}
params_df <- select(posterior_df, pars)
credible_intervals <- apply(params_df, 2, HPDI, prob = 0.95) %>% t() %>%
as.data.frame() %>%
rename(lower_interval = "|0.95", upper_interval = "0.95|") %>%
mutate(param = rownames(.))
means <- apply(params_df, 2, mean)
medians <- apply(params_df, 2, median)
stats_df <- data.frame(stringsAsFactors = FALSE,
param = names(means),
mean_value = means,
median_value = medians,
lower_interval = credible_intervals$lower_interval,
upper_interval = credible_intervals$upper_interval) %>%
mutate(mean_label = paste0("mean = ", round(mean_value, 3)),
median_label = paste0("median = ", round(median_value, 3)),
interval_label = paste0("[ ", round(lower_interval, 3), ", ",
round(upper_interval, 3), " ]"),
x_pos = x_pos,
y_pos_mean = y_pos,
y_pos_median = y_pos_median,
y_pos_interval = y_pos_interval)
densities <- apply(params_df, 2, density) %>% lapply(function(densityObj){
data.frame(x = densityObj$x, y = densityObj$y)
}) %>% bind_rows(.id = "param") %>%
inner_join(credible_intervals) %>%
mutate(area = x >= lower_interval & x <= upper_interval)
list(densities = densities,
stats_df = stats_df)
}
draw_density <- function(data_vector, g_params) {
credible_interval <- quantile(data_vector, c(0.025, 0.975))
mean_param <- mean(data_vector)
median_param <- median(data_vector)
hist.y <- density(data_vector) %$%
data.frame(x = x, y = y) %>%
mutate(area = x >= credible_interval[1] & x <= credible_interval[2])
g1 <- ggplot(hist.y, aes(x = x)) +
geom_line(aes(y = y)) +
geom_text(aes(x = g_params$x_pos, y = g_params$ypos_mean),
label = paste0("mean = ", round(mean_param, 4)),
colour = "#1261A0", size = g_params$text_size) +
geom_text(aes(x = g_params$x_pos, y = g_params$ypos_median),
label = paste0("median = ", round(median_param, 4)),
colour = "#009999", size = g_params$text_size) +
annotate("text", x = g_params$x_pos, y = g_params$ypos_interval,
label = paste0("[",
round(credible_interval[1], 4),",",
round(credible_interval[2], 4), "]"),
size = g_params$text_size,
colour = "grey") +
geom_ribbon(aes(ymin = 0, ymax = y, fill = area)) +
scale_fill_manual(values = c(NA, "lightgrey")) +
geom_vline(aes(xintercept = mean_param),
color = "#1261A0", linetype ="dashed", size = 1) +
geom_vline(aes(xintercept = median_param),
color = "#009999", linetype ="dashed", size = 1) +
theme_classic() +
theme(legend.position = "none",
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
plot.title = element_text(color = "#404040", size = 8)) +
labs(x = g_params$xlabel, title = g_params$title)
}
draw_WAIFW <- function(WAIFW, subtitle, interval_df = NULL,
precision = 0) {
library(reshape2)
WAIFW_df <- WAIFW %>% t() %>% melt()
if(!is.null(interval_df)) {
WAIFW_df <- bind_cols(WAIFW_df, interval_df)
}
g <- ggplot(data = WAIFW_df, aes(x = Var1,
y = ordered(Var2, levels = rev(sort(unique(Var2)))),
fill = value)) +
geom_tile() +
scale_fill_gradient(low = "lightblue", high = "darkblue") +
geom_text(aes(label = format(round(value, digits = precision),
nsmall = precision) ), colour = "white",
size = 2) +
theme_minimal() +
labs(y ="", x = "",subtitle = subtitle) +
theme(legend.position = "none",
plot.subtitle = element_text(color = "#404040", size = 8),
axis.text.x = element_text(size = 6),
axis.text.y = element_text(size = 6))
if(!is.null(interval_df)) {
g <- g + geom_text(
aes(label = paste("[", lower.interval," ,", upper.interval, "]")),
nudge_y = -0.2, size = 1.5, colour = "white")
}
g
}
ts_errors <- function(summaries_optim, inits) {
metrics_list <- purrr::map(summaries_optim, "metrics")
MASEs <- metrics_list %>% map_dbl("avg_MASE")
MSEs <- metrics_list %>% map_dbl("avg_MSE")
log_liks <- metrics_list %>% map_dbl("log_lik")
df_params_list <- list(
list(label = "MASE",
vals = MASEs),
list(label = "MSE",
vals = MSEs),
list(label = "Log lik",
vals = log_liks))
metric_df <- map_df(df_params_list, function(df_params, inits) {
if(df_params$label != "Log lik") var_best <- min(df_params$vals)
if(df_params$label == "Log lik") var_best <- max(df_params$vals)
tibble(init = inits, value = df_params$vals) %>%
mutate(is_Best = value == var_best,
metric = df_params$label)
}, inits = inits)
MASE_df <- metric_df %>% filter(metric == "MASE") %>%
arrange(desc(value))
metric_df <- mutate(metric_df, init = factor(init, levels = MASE_df$init))
ggplot(metric_df, aes(x = init, y = value)) +
facet_wrap(~ metric, scales = "free", nrow = 1) +
coord_flip() +
geom_lollipop(aes(colour = is_Best)) +
scale_colour_manual(values = c("grey", "steelblue")) +
theme_minimal() +
theme(legend.position = "none") +
labs(x = "Init id",
y = "Value",
title = "Predicted incidence accuracy")
}
draw_inits_comparison <- function(summaries_optim, actual_R0, inits) {
SMAPE_K <- map_dbl(summaries_optim, "SMAPE_K")
MSE_R0 <- map_dbl(summaries_optim, function(summary, actual_R0) {
MSE(actual_R0, summary$R_nought)
}, actual_R0 = actual_R0)
df_params_list <- list(
list(label = "K (SMAPE)",
vals = SMAPE_K),
list(label = "R0 (MSE)",
vals = MSE_R0))
MSEs_df <- map_df(df_params_list, function(df_params, inits) {
var_min <- min(df_params$vals)
tibble(init = inits, value = df_params$vals) %>%
mutate(is_Min = value == var_min,
variable = df_params$label)
}, inits = inits)
ggplot(MSEs_df, aes(x = as.factor(init), y = value)) +
facet_wrap(~ variable, scales = "free", nrow = 1) +
coord_flip() +
geom_lollipop(aes(colour = is_Min)) +
scale_colour_manual(values = c("grey", "steelblue")) +
theme_minimal() +
theme(legend.position = "none") +
labs(x = "Init id",
y = "Error")
}
# Draw distance comparison graph
draw_dcg <- function(df, limits, actual_val) {
g <- ggplot(df, aes(x = x, y = y)) +
geom_point() +
geom_errorbar(aes(ymin = lower.bound, ymax = upper.bound), width =.1) +
geom_hline(yintercept = actual_val, linetype = "dashed") +
scale_y_continuous(limits = limits) +
facet_wrap(~ method) +
theme_test() +
theme(legend.text = element_text(size = 3)) +
labs(x = "Variant", y = "Reporting probability")
}
# Compare time-series to data points
g_compare_ts <- function(sim_data, real_data, intervals = TRUE,
scales = "fixed", xlabel = "Days") {
g <- ggplot(sim_data, aes(x = time, y = y)) +
geom_line(colour = "steelblue", alpha = 0.9, size = 0.25) +
geom_point(data = real_data, size = 0.5, colour = "grey30",
alpha = 0.8) +
scale_y_continuous(labels = comma) +
facet_wrap(~ cohort, scales = scales)
if(isTRUE(intervals)) {
g <- g + geom_ribbon(aes(ymin = lower_bound, ymax = upper_bound),
alpha = 0.5, fill = "steelblue")
}
g <- g +
labs(x = xlabel, y = "Incidence") +
theme_pubr()
g
}
# ===================Pairs======================================================
dens_fn <- function(data, mapping, ...){
p <- ggplot(data = data, mapping = mapping) +
stat_density2d(aes(fill=..density..), geom = "tile", contour = FALSE) +
scale_fill_viridis_c()
p
}
cor_fun <- function(data, mapping, method = "pearson", ndp = 2, sz=5,
stars=TRUE, ...){
x <- eval_data_col(data, mapping$x)
y <- eval_data_col(data, mapping$y)
corr <- cor.test(x, y, method=method)
est <- corr$estimate
lb.size <- sz* abs(est)
palette <- gradient_n_pal(c("lightgrey", "black"))
if(stars){
stars <- c("***", "**", "*", "")[findInterval(corr$p.value,
c(0, 0.001, 0.01, 0.05, 1))]
lbl <- paste0(round(est, ndp), stars)
cor_colour <- palette(abs(est))
}else{
lbl <- round(est, ndp)
}
ggplot(data = data, mapping = mapping) +
annotate("text", x = mean(x, na.rm = TRUE), y = mean(y, na.rm=TRUE),
label=lbl, size = 3, colour = cor_colour,...)+
theme(panel.grid = element_blank())
}
pairs_posterior <- function(posterior, strip_text = 3) {
ggpairs(posterior, lower = list(continuous = dens_fn),
upper = list(continuous = cor_fun)) +
theme_pubr() +
theme(axis.text = element_text(size = 4),
strip.text = element_text(size = strip_text))
}
#===============================================================================
g_time_comparison <- function(t_df) {
ggplot(t_df, aes(x = matrix, y = time)) +
geom_lollipop(colour = "steelblue") +
scale_y_continuous() +
coord_flip() +
geom_text(aes(label = round(time, 0)), nudge_y = 10, size = 3) +
scale_colour_manual(values = c("grey", "steelblue")) +
facet_grid(scenario ~ method) +
theme_test() +
theme(legend.position = "none") +
labs(x = "Structure", y = "Time [Minutes]",
title = "Run time")
}
|
34e9668689b3fd7d48ade1e6c337f52ebfe09ff6
|
8188db98bf6f785fdd8f52a4ff341927b13a02aa
|
/man/readExposome.Rd
|
c2c7e1b5f0f8a48bc8f714498220979f351a0bea
|
[
"MIT"
] |
permissive
|
isglobal-brge/rexposome
|
a931ee6ac44044652dc40dbaf9752d6478b8f833
|
0bb431c5d01bd4f2112205c8e6f4d46478d037e5
|
refs/heads/master
| 2023-06-09T13:23:34.657993
| 2023-01-26T15:05:53
| 2023-01-26T15:05:53
| 79,567,386
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,768
|
rd
|
readExposome.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readExposome.R
\name{readExposome}
\alias{readExposome}
\title{Creation of an ExposomeSet from files}
\usage{
readExposome(
exposures,
description,
phenotype,
sep = ",",
na.strings = c("NA", "-", "?", " ", ""),
exposures.samCol = "sample",
description.expCol = "exposure",
description.famCol = "family",
phenotype.samCol = "sample",
exposures.asFactor = 5,
warnings = TRUE
)
}
\arguments{
\item{exposures}{String with the path to the file with the matrix of
exposures.}
\item{description}{String with the path to the file with the description of
the exposures (relation between exposures and exposure-family).}
\item{phenotype}{String with the path to the file with the phenotypes of
interest.}
\item{sep}{(default \code{","}) Separator used by \code{\link{read.table}} to
load the files "exposures", "description" and "phenotype".}
\item{na.strings}{(default \code{c("NA", "-", "?", " ", "")}) Character
defining the \code{NA} values in expsome's files.}
\item{exposures.samCol}{(default \code{"sample"}) Index where the samples'
name are found in file "exposures". It can be both charatcer or numeric.}
\item{description.expCol}{(default \code{"exposure"}) Index where the
exposures' name are found in file "description". It can be both numeric
or character.}
\item{description.famCol}{(default \code{"family"}) Index where the family's
name (per exposures) if found in file "description". It can be both numeric
or character.}
\item{phenotype.samCol}{(default \code{"sample"}) Index where the sample's
name are found in file "phenotype". It can be both numeric or character.}
\item{exposures.asFactor}{(default \code{5}) The exposures with more
than this number of unique items will be considered as "continuous" while
the exposures with less or equal number of items will be considered as
"factor".}
\item{warnings}{(default \code{TRUE}) If \code{TRUE} shows useful
information/warnings from the process of loading the exposome.}
}
\value{
An object of class \link{ExposomeSet}.
}
\description{
Given the files that defines the exposome (measures of exposome, exposome
description and individuals phentype) it loads them and creates an
object of type \link{ExposomeSet}.
}
\details{
The rows of the exposure's file, that corresponds to samples' names, must
be the same than the phenotype's file. In the same way, the columns in
exposure's file must be the same found as rows in description file.
}
\note{
\link{ExposomeSet}'s \code{fData} will contain some inner columns
called \code{.std}, \code{.trn}, \code{.fct} and \code{.type} in order to
trace the transformations an exposure suffers and to know, at eny moment, if
an exposure is categorical or continuous. The "description" file can
contains a column called \code{type} with values \code{"factor"} and
\code{"numeric"} to speficy how an exposure needs to be understood. If
given, this column will be renamed to \code{.type}. If not given, it will
be created using \code{exposures.asFactor} value.
}
\examples{
## Locate the data-files
path <- file.path(path.package("rexposome"), "extdata")
description <- file.path(path, "description.csv")
phenotype <- file.path(path, "phenotypes.csv")
exposures <- file.path(path, "exposures.csv")
## Create ExposomeSet from files
exp <- readExposome(
exposures = exposures,
description = description,
phenotype = phenotype,
exposures.samCol = 1,
description.expCol = 2,
description.famCol = 1,
phenotype.samCol = 1
)
}
\seealso{
\link{ExposomeSet} for class description,
\link{loadExposome} for constructor from loaded
\code{data.frame}s
}
|
ce98e0951660d7a39235a0d30fc3a3b9a9e1ce6b
|
fe04211f605d0ab8b5404accee6cfc725ad4bc53
|
/imp/saver/code/01_normalize_libsize.R
|
c8914ea92b2e72745eab9b836660a9af9d3c39c3
|
[] |
no_license
|
Winnie09/COVID_integrative
|
7707403d4d1dbced66704c4b6176d93785d8df0f
|
c024b999bd30c17f584000e16c2f96eb923db2c1
|
refs/heads/master
| 2023-02-27T17:23:05.152064
| 2021-02-08T04:00:14
| 2021-02-08T04:00:14
| 304,786,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 458
|
r
|
01_normalize_libsize.R
|
library(Matrix)
library(parallel)
setwd('/dcl02/hongkai/data/covid/data/200916/')
rdir <- '/dcl02/hongkai/data/whou/covid/imp/data/norm/'
pbmc <- readRDS('pbmc.rds')
ap <- sub(':.*', '', colnames(pbmc))
libsize <- colSums(pbmc)
libsize <- libsize/median(libsize)
nn <- sapply(unique(ap), function(p){
print(p)
tmp <- pbmc[, ap == p, drop = FALSE]
tmp <- sweep(tmp, 2, libsize[ap == p], '/')
saveRDS(tmp, paste0(rdir, p, '.rds'))
return(0)
})
|
e5f83aa235291c61ed905631d549b7d07a70f6de
|
a5f53a0276eed01abb9cab59b24206b167e0f086
|
/man/readMetaInformation.Rd
|
b834d5e0ab245f34bbb56e71beaf25021d9222a0
|
[] |
no_license
|
gridl/timeseriesdb
|
7c0eebde52e522eb86034aafb96a3242079ddb5e
|
0b0528fc2a1414eee777550506804cc0e3468dd7
|
refs/heads/master
| 2022-02-06T02:58:45.205568
| 2019-07-08T07:46:07
| 2019-07-08T07:46:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,557
|
rd
|
readMetaInformation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readMetaInformation.R
\name{readMetaInformation}
\alias{readMetaInformation}
\title{Read Meta Information from a Time Series Database}
\usage{
readMetaInformation(con, series, locale = "de",
tbl_localized = "meta_data_localized",
tbl_unlocalized = "meta_data_unlocalized", schema = "timeseries",
as_list = TRUE, regex = FALSE)
}
\arguments{
\item{con}{PostgreSQL connection object}
\item{series}{character name of a time series object.}
\item{locale}{character denoting the locale of the meta information that is queried.
defaults to 'de' for German. At the KOF Swiss Economic Institute meta information should be available
als in English 'en', French 'fr' and Italian 'it'. Set the locale to NULL to query unlocalized meta information.}
\item{tbl_localized}{character name of the table that contains localized meta information. Defaults to 'meta_data_localized'.}
\item{tbl_unlocalized}{character name of the table that contains general meta information. Defaults to 'meta_data_unlocalized'.}
\item{schema}{SQL schema name. Defaults to timeseries.}
\item{as_list}{Should the result be returned as a tsmeta.list instead of a tsmeta.dt? Default TRUE}
\item{regex}{If set to TRUE, series will be interpreted as a regular exporession, so that metadata for all time series whose keys match the pattern will be returned.}
}
\description{
This function reads meta information from a timeseriesdb package PostgreSQL
database and puts into a meta information environment.
}
|
0ab3c25e424cbc9ac55df00b1bef3a7d9067fa08
|
36529e0093254e9aa33f02f39d191d6201a2f826
|
/PQE-input/Masking up extreme floods in 70s, Gingera/masking_two_floods_in_70s.R
|
0a12fbe2f227f40051c4a6d7c448ca937d8db38c
|
[] |
no_license
|
LynnSeo/Sensitivity-Analysis
|
09a07d39f5011de3e130166cd1a2a4a6fbc73414
|
e8c763c4553aa56cad0ecbce8799d844dfda67fc
|
refs/heads/master
| 2021-01-23T07:34:52.522742
| 2017-06-19T05:17:11
| 2017-06-19T05:17:11
| 86,503,370
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,451
|
r
|
masking_two_floods_in_70s.R
|
#########exact data######Calibration of Sensitive Parameters Only
library('zoo')
library('hydromad')
library('xts')
library('stringr')
#set up working directory
wd='C:/UserData/seol/Sensitivity Analyses/Sacramento/Calibration considering SA/Calibration of Sensitive Parameters only/'
setwd(wd)
####################################
t_year='80s' #target year for calibration
t_catchment='Gingera' #target catchment
####################################
name_tspqe=str_c('real_',t_catchment,'_tsPQE_masked','.csv')
name_tspqe='real_Gingera_tsPQE.csv'
name_tspqe='real_masked_Gingera_tsPQE.csv'
tsPQE=read.zoo(name_tspqe,sep=',',header=TRUE)
tsPQE=as.xts(tsPQE)
#sub-decades
ts70s <- tsPQE["1970-01-01::1979-12-31"]
ts80s <- tsPQE["1980-01-01::1989-12-31"]
ts90s <- tsPQE["1990-01-01::1999-12-31"]
ts00s <- tsPQE["2000-01-01::2009-12-31"]
plot.zoo(ts70s)
plot.zoo(ts70s['1974-08-26::1974-09-16'])
plot.zoo(ts70s['1976-10-13::1976-10-25'])
ts70s$Q['1974-08-26::1974-09-16']=0
ts70s$P['1974-08-26::1974-09-16']=0
ts70s$P['1976-10-13::1976-10-25']=0
ts70s$Q['1976-10-13::1976-10-25']=0
plot.zoo(ts70s['1974-08-26::1974-09-16'])
plot.zoo(ts70s['1976-10-13::1976-10-25'])
tsPQE$Q['1974-08-26::1974-09-16']=0
tsPQE$P['1974-08-26::1974-09-16']=0
tsPQE$P['1976-10-13::1976-10-25']=0
tsPQE$Q['1976-10-13::1976-10-25']=0
write.csv(as.data.frame(tsPQE),'real_masked_Gingera.csv',row.names = TRUE)
|
b97a40821cc8669164a6dbbf6c2f14c32f24469e
|
4050c25b8aa1bd07808af59300bf8058c7890949
|
/Scripts/HornwortsLiverworts/LiverwortDiversity.R
|
6f0847f9af521faa39b481b9c1d993b9772d469e
|
[] |
no_license
|
KerkhoffLab/Bryophytes
|
6faf03b9f73be24eeff7017f092451e824ac15ca
|
9bb7a8e4c0fa5a9f16e4dbfc937b643da0a69ab4
|
refs/heads/master
| 2021-07-23T17:12:46.284440
| 2021-07-22T16:26:02
| 2021-07-22T16:26:02
| 193,754,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,334
|
r
|
LiverwortDiversity.R
|
#Adapted from HLDiversity.R and MossDiversity.R
#Kathryn Dawdy, Summer 2020
#Load data
BryophytePresence <- readRDS("Data/BryophytePresence.rds")
#Subset liverwort data
LiverwortPresence <- subset(BryophytePresence, BryophytePresence$Group=="Liverworts")
#Create occurrence by cell matrix by reshaping dataframe, then convert to presence-absence matrix
require(reshape2)
LiverwortRichness <- tally(group_by(LiverwortPresence, CellID))
saveRDS(LiverwortRichness, file="Data/LiverwortRichness.rds")
SpeciesCellID <- LiverwortPresence[,c(1,4)]
melted <- melt(SpeciesCellID, id=c("Species", "CellID"), na.rm = TRUE)
CellID <- LiverwortRichness$CellID
cellvector <- c(1:15038)
neighbor <- function(cellvector) {(adjacent(BlankRas, cellvector, directions=8, pairs=FALSE, target=CellID, sorted=TRUE, include=FALSE, id=FALSE))}
neighbors <- lapply(cellvector, neighbor)
names(neighbors) <- cellvector
bryneighbors <- neighbors[CellID]
LiverwortCellMatrix <- acast(SpeciesCellID, CellID~Species, margins=FALSE, fill=0)
LiverwortCellMatrix[LiverwortCellMatrix > 0] <- 1
#Using betadiver to compute B-diversity using Sorensen dissimilarity
#betadiver(help = TRUE) gives you indices
LiverwortBetaMat <- betadiver(LiverwortCellMatrix, method = "sor", order = FALSE, help = FALSE)
saveRDS(LiverwortBetaMat, file="Data/LiverwortBetaMat.rds")
|
537423d89e2fff543f5656737457c3c5dfcc001d
|
27c8c8337342e22d3e638d9738ca6499243bc86b
|
/man/pivot_wider_profile.Rd
|
900aa68e51528ee753b3cdc9680cc4be40c83cef
|
[] |
no_license
|
Eirinits/decoupleR
|
1f578ef44dd3a81496e276058fb3c6eca7d6608d
|
3926381bc63362a7ec7cb1b32b40a85f1f9a9cd1
|
refs/heads/master
| 2023-06-03T01:35:56.461380
| 2021-05-25T18:57:17
| 2021-05-25T18:57:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,061
|
rd
|
pivot_wider_profile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-profiles.R
\name{pivot_wider_profile}
\alias{pivot_wider_profile}
\title{Pivot a data frame to wider and convert it to matrix}
\usage{
pivot_wider_profile(
data,
id_cols,
names_from,
values_from,
values_fill = NA,
to_matrix = FALSE,
to_sparse = FALSE,
...
)
}
\arguments{
\item{data}{A data frame to pivot.}
\item{id_cols}{<\code{\link[tidyr:tidyr_tidy_select]{tidy-select}}> A set of columns that
uniquely identifies each observation. Defaults to all columns in \code{data}
except for the columns specified in \code{names_from} and \code{values_from}.
Typically used when you have redundant variables, i.e. variables whose
values are perfectly correlated with existing variables.}
\item{names_from}{<\code{\link[tidyr:tidyr_tidy_select]{tidy-select}}> A pair of
arguments describing which column (or columns) to get the name of the
output column (\code{names_from}), and which column (or columns) to get the
cell values from (\code{values_from}).
If \code{values_from} contains multiple values, the value will be added to the
front of the output column.}
\item{values_from}{<\code{\link[tidyr:tidyr_tidy_select]{tidy-select}}> A pair of
arguments describing which column (or columns) to get the name of the
output column (\code{names_from}), and which column (or columns) to get the
cell values from (\code{values_from}).
If \code{values_from} contains multiple values, the value will be added to the
front of the output column.}
\item{values_fill}{Optionally, a (scalar) value that specifies what each
\code{value} should be filled in with when missing.
This can be a named list if you want to apply different aggregations
to different value columns.}
\item{to_matrix}{Logical value indicating if the result should be a matrix.
Parameter is ignored in case \code{sparse} is \code{TRUE}.}
\item{to_sparse}{Logical value indicating whether the resulting matrix
should be sparse or not.}
\item{...}{Additional arguments passed on to methods.}
}
\value{
"widened" data; it is increasing the number of columns and
decreasing the number of rows.
}
\description{
Generates a kind of table where the rows come from \code{id_cols},
the columns from \code{names_from} and the values from \code{values_from}.
}
\details{
In the current state of the function, to ensure its operation,
the \code{id_cols} parameter is a single selector.
}
\examples{
\dontrun{
df <- tibble::tibble(
tf = c("tf_1", "tf_1", "tf_2", "tf_2"),
gene = c("gene_1", "gene_2", "gene_1", "gene_2"),
mor = c(1, -1, 1, -1)
)
# Return a tibble
pivot_wider_profile(
data = df,
id_cols = tf,
names_from = gene,
values_from = mor
)
# Return a matrix
pivot_wider_profile(
data = df,
id_cols = tf,
names_from = gene,
values_from = mor,
to_matrix = TRUE
)
# Return a sparse Matrix of class "dgCMatrix"
pivot_wider_profile(
data = df,
id_cols = tf,
names_from = gene,
values_from = mor,
to_sparse = TRUE
)
}
}
\keyword{internal}
|
8b7cb4470f28d9b10b062a751b430fd0988fcda1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tabr/vignettes/tabr-engraving.R
|
eb1529490282227b7a98151ef98f7db9ffa6b0b5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,929
|
r
|
tabr-engraving.R
|
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE, comment = "#>", message = FALSE, warning = FALSE, error = FALSE, tidy = TRUE, out.width = "100%"
)
## ----args----------------------------------------------------------------
library(tabr)
args(lilypond)
## ----song----------------------------------------------------------------
voice1 <- rp(p("c5 d5 e5 f5 g5", "1 2 4 4 1", "1*5"), 2)
notes <- "c e g c' e' c' g e g b d' g' f a c' f' c e g e c"
strings <- "5 4 3 2 1 2 3 4 4 3 2 1 4 3 2 1 5 4 3 4 5"
voice2 <- rp(p(notes, "8*20 2", strings), 2)
bass <- rp(p("c2e2*4 g1*2 f1*2 c2e2*3", "4*10 2", "32*4 4*4 32*3"), 2)
t1 <- track(voice1, voice = 1)
t2 <- track(voice2, voice = 2)
t3 <- track(bass, tuning = "bass", music_staff = "bass_8")
chords <- chord_set(c(c = "x32o1o", g = "355433", f = "133211"))
chord_seq <- rep(setNames(c(1, 2, 2, 1), names(chords)[c(1:3, 1)]), 3)
chords
chord_seq
song <- trackbind(t1, t2, t3, tabstaff = c(1, 1, 2)) %>% score(chords, chord_seq)
song
## ----metadata, results="hide", eval=FALSE--------------------------------
# lilypond(song, "ex32.ly", "dm", "2/2", "4 = 120")
# tab(song, "ex32.pdf", "dm", "2/2", "4 = 120")
## ----header, results="hide", eval=FALSE----------------------------------
# header <- list(
# title = "Song title",
# composer = "Words and music by the composer",
# performer = "Song performer",
# album = "Album title",
# subtitle = "Subtitle",
# arranger = "Arranged by tab arranger",
# copyright = "2018 <Record Label>",
# instrument = "guitar and bass",
# tagline = "A tagline",
# meter = "meter tag", opus = "opus tag", piece = "piece tag", poet = "poet tag"
# )
#
# tab(song, "ex33.pdf", header = header)
## ----cleanup, echo=FALSE-------------------------------------------------
unlink("*.mid")
|
9104c0e34f5a0491470559dffc4e98dcf4cacb08
|
cd3312ba777c36aaca541a6223addbdae5be7932
|
/scripts/stepthrough/step_2_calibration/s2.1_calibrate_ler_models.R
|
a3d1bf5f8fd1fd535355fe3961b4a06142c25eb4
|
[] |
no_license
|
jacob8776/sunapee_LER_projections
|
2aa96061fcfd8b354569cd86b85d42509274b109
|
dcd3a757487cbb773c93f134bedc9881d3ba185d
|
refs/heads/main
| 2023-04-30T21:28:24.512696
| 2022-07-22T15:15:00
| 2022-07-22T15:15:00
| 376,087,592
| 0
| 1
| null | 2022-08-08T19:18:39
| 2021-06-11T16:46:55
|
HTML
|
UTF-8
|
R
| false
| false
| 5,109
|
r
|
s2.1_calibrate_ler_models.R
|
Sys.setenv(TZ = "UTC")
# remotes::install_github("tadhg-moore/LakeEnsemblR", ref = "flare")
# remotes::install_github("tadhg-moore/gotmtools", ref = "yaml")
# remotes::install_github("tadhg-moore/LakeEnsemblR", ref = "flare")
# remotes::install_github("aemon-j/gotmtools", ref = "yaml", force = TRUE)
# devtools::install_github("tadhg-moore/LakeEnsemblR", ref = "flare")
# install.packages("here")
# Load libraries
library(gotmtools)
library(LakeEnsemblR)
library(ggplot2)
library(LakeEnsemblR)
library(ggpubr)
library(here)
# Set working directory
getwd()
setwd(paste0(here(), '/LER_calibration'))
# Set config file & models
config_file <- 'LakeEnsemblRsun.yaml'
model <- c("Simstrat")
ncdf <- "output/ensemble_output.nc"
config_file
# mantemp <- read.csv("Data/manual_buoy_temp_hrz.csv")
# str(mantemp)
# mantemp$datetime <- as.POSIXct(mantemp$datetime, format = "%Y-%m-%d %H:%M:%S")
# write.csv(mantemp, "Data/manual_buoy_temp_hrz_psx.csv", row.names = FALSE)
# LHC - Calibration ----
yaml <- read_yaml(config_file)
configr::read.config(config_file)
yaml$time$start <- "2005-06-27 00:00:00"
yaml$time$stop <- "2015-01-01 00:00:00"
# yaml$time$start <- "2007-06-11 12:00:00"
# yaml$time$stop <- "2012-01-01 00:00:00"
yaml$input$ice$use <- TRUE
yaml$output$time_step <- 24
yaml$output$time_unit <- "hour"
write_yaml(yaml, config_file)
num <- 500
spin_up <- 190
out_f <- "change"
cmethod <- "LHC"
model <- c("FLake", "Simstrat", "GOTM", "MyLake", "GLM")
folder <- "."
dir.create(out_f, showWarnings = FALSE)
# Run LER and inspect default output
export_config(config_file, model)
run_ensemble(config_file = config_file, model = model)
# file.rename("output/ensemble_output.nc", "output/ensemble_output_all_models_15Nov21.nc")
# ncdf <- "output/ensemble_output_all_models_15Nov21.nc"
lst <- load_var(ncdf, "temp")
summary(lst$Obs)
# plot heatmap
plot_heatmap(ncdf, model = model) +
scale_colour_gradientn(limits = c(0, 32),
colours = rev(RColorBrewer::brewer.pal(11, "Spectral"))) + theme_classic()
plot_ensemble(ncdf, model = model, var = "ice_height")
fit <- calc_fit(ncdf, model = model, spin_up = spin_up)
fit
# out <- analyze_ncdf(ncdf, model, spin_up = 190)
# out$stats
## Plot residuals
plist <- plot_resid(ncdf = ncdf, var = "temp")
ggarrange(plotlist = plist)
# param_file <- "calibration_results_MyLake_081321/MyLake_LHC_202108131525"
cali_ensemble(config_file, num = num, cmethod = cmethod, parallel = TRUE, model = model, folder = ".",
spin_up = spin_up, job_name = model, out_f = out_f)
cal_files <- list.files(out_f, full.names = TRUE)
cal_files <- cal_files[c(1,2)]
res <- load_LHC_results(config_file = config_file, model = model, res_files = cal_files)
dim(res[[model]])
df <- plyr::ldply(res, function(x) {
df <- x[, -c(3:7)]
reshape2::melt(df, id.vars = c("par_id", "rmse"))
}, .id = "model")
df$id_no <- as.numeric(gsub(".*?([0-9]+).*", "\\1", df$par_id))
bst_par <- df$id_no[which.min(df$rmse)]
sub <- df[df$id_no == bst_par, ]
sub
p1 <- ggplot(df) +
geom_point(aes(value, rmse)) +
facet_wrap(model~variable, scales = "free_x") +
geom_hline(yintercept = 2, linetype = "dashed") +
ylab("RMSE (\u00B0C)") +
geom_vline(data = sub, aes(xintercept = value)) +
geom_hline(yintercept = 3.5, color = "red", linetype = "dashed") +
# coord_cartesian(ylim = c(1, 4)) +
# scale_x_log10() +
theme_classic(base_size = 16)
p1
#
sub <- df[df$id_no == bst_par, ]
# sub <- df[df$id_no == 1, ] # Use this to try other parameter combinations
sub
#yaml$model_parameters$MyLake$`Phys.par/C_shelter` <- sub$value[3]
#yaml$scaling_factors$MyLake$wind_speed <- sub$value[1]
#yaml$scaling_factors$MyLake$swr <- sub$value[2]
# yaml$model_parameters$FLake$`LAKE_PARAMS/c_relax_C` <- sub$value[3]
# yaml$scaling_factors$FLake$wind_speed <- sub$value[1]
# yaml$scaling_factors$FLake$swr <- sub$value[2]
# yaml$model_parameters$FLake$`LAKE_PARAMS/depth_bs_lk` <- sub$value[4]
# yaml$model_parameters$FLake$`LAKE_PARAMS/T_bs_lk` <- sub$value[5]
# yaml$scaling_factors$GLM$wind_speed <- sub$value[1]
# yaml$scaling_factors$GLM$swr <- sub$value[2]
# yaml$model_parameters$GLM$`sediment/sed_temp_mean` <- c(sub$value[3], sub$value[4])
# yaml$model_parameters$GLM$`glm_setup/max_layer_thick` <- sub$value[5]
# yaml$scaling_factors$GOTM$wind_speed <- sub$value[1]
# yaml$scaling_factors$GOTM$swr <- sub$value[2]
# yaml$model_parameters$GOTM$`turbulence/turb_param/k_min` <- sub$value[3]
# yaml$scaling_factors$Simstrat$wind_speed <- sub$value[1]
# yaml$scaling_factors$Simstrat$swr <- sub$value[2]
# yaml$model_parameters$Simstrat$`ModelParameters/a_seiche` <- sub$value[3]
write_yaml(yaml, config_file)
export_config(config_file, model)
run_ensemble(config_file, model)
fit <- calc_fit(ncdf, model = model, spin_up = spin_up)
fit # Results from running model with calib output as input
sub # Calibration results
plist <- plot_resid(ncdf = "output/ensemble_output.nc", var = "temp")
ggarrange(plotlist = plist)
#ggsave(file.path(out_f, "calib_results.png"), p1, dpi = 300,width = 384, height = 280, units = 'mm')
|
3edb31d3ff6afd5d909ddaa18a9b21c27d3a6b0c
|
b43e19276f5a9f498d3c04b7ab8d25373c9089f1
|
/cachematrix.R
|
e4dcc86b62e266a9db8602fe0b6482c3f30ac0a0
|
[] |
no_license
|
kushla/ProgrammingAssignment2
|
1577a8efc63ee9bceb24c7b0f36b04e5d2d4e9f7
|
3a1c234b450be2391bb747aa2fa3c4ca9b2ac7fc
|
refs/heads/master
| 2021-08-23T04:50:46.964477
| 2017-12-03T12:05:13
| 2017-12-03T12:05:13
| 112,907,669
| 0
| 0
| null | 2017-12-03T07:06:46
| 2017-12-03T07:06:45
| null |
UTF-8
|
R
| false
| false
| 1,923
|
r
|
cachematrix.R
|
## This function creates an R object that stores a matrix and its inverse
## Initialize function name and set default value (empty matrix) to a formal argument x
makeCacheMatrix <- function(x = matrix()) {
## Assign free argument s (result of the inverse) to NULL
s <- NULL
## Function "set" assignes value of free variable y to the value of x (a matrix)
## that is an object in parent environment of function "makeCacheMatrix",
## resets value of s in parent environment of function "makeCacheMatrix" to NULL
## that clears any previous calculation of the inverse matrix, if there was any
set <- function(y) {
x <<- y
s <<- NULL
}
## Function "get" is assigned with the value of the matrix
get <- function() x
## Function "setinv" assigns to the objects s of parent environment the input argument
setinv <- function(solve) s <<- solve
## Function "getinv" is assigned with the value of s
getinv <- function() s
## Create output vecor of 4 functions
list(set = set,
get = get,
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the matrix returned by makeCacheMatrix above
## or pulls an inverse matrix from cache if it is stored there
## Initialize function name that has single formal argument x
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getinv()
## Check if the inverse value has been already calculated and stored in cache
if(!is.null(s)) {
message("getting cached data")
return(s)
}
## If the cached inverse matrix is NULL, then the calculation of inverse commences
data <- x$get()
s <- solve(data, ...)
x$setinv(s)
s
}
|
c3adfd5b8c8680120e08742ef570deefe6b0c5f1
|
62da952d6afb59390d3e164081498503abd50fa9
|
/detectRUNS/man/heteroZygotTest.Rd
|
d41b638546a403bda2191207d79b038cbec22fa0
|
[] |
no_license
|
bioinformatics-ptp/detectRUNS
|
1739c908310c696296920b8e570f3e00a42c0ecf
|
e383906cf14a4597980b44830c0df7e9de4b82ef
|
refs/heads/master
| 2023-06-08T13:44:30.383026
| 2022-03-22T16:59:17
| 2022-03-22T16:59:17
| 61,555,187
| 8
| 3
| null | 2023-05-25T15:13:32
| 2016-06-20T14:44:24
|
R
|
UTF-8
|
R
| false
| true
| 1,049
|
rd
|
heteroZygotTest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funktionen.R
\name{heteroZygotTest}
\alias{heteroZygotTest}
\title{Function to check whether a window is (loosely) heterozygous or not}
\usage{
heteroZygotTest(x, gaps, maxHom, maxMiss, maxGap, i, windowSize)
}
\arguments{
\item{x}{vector of 0/1 genotypes (from genoConvert())}
\item{gaps}{vector of differences between consecutive positions (gaps) in bps}
\item{maxHom}{max n. of homozygous SNP in a heterozygous window}
\item{maxMiss}{max n. of missing in a window}
\item{maxGap}{max distance between consecutive SNP in a window to be still considered a potential run}
\item{i}{index along the genome (genome-vector for each individual)}
\item{windowSize}{size of window (n. of SNP)}
}
\value{
a list: i) TRUE/FALSE (whether a window is heterozygous or NOT); ii) indexes of "opposite and missing" genotype
}
\description{
This is a core function within the sliding-window workflow. Parameters on how to consider a window heterozygous are here (maxHom, maxMiss)
}
|
ac7ed40e3ebec52198643555d4d97f6378c36c00
|
d9e676cf47f9f50080538d9e8d8dbce5c2456a3d
|
/mcmcplots/R/as.mcmc.rjags.R
|
4401220e8f526009078d426259780310410a56d9
|
[] |
no_license
|
sumtxt/mcmcplots
|
33da3e490994b633e599a40550b2d693292a6ad7
|
bc13fd645c653f48bf7bcd6cd17505ac0f146dee
|
refs/heads/master
| 2020-12-29T19:04:02.966002
| 2014-05-14T12:46:45
| 2014-05-14T12:46:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
as.mcmc.rjags.R
|
as.mcmc.rjags <- function(x) as.mcmc.bugs(x$BUGSoutput)
|
81492890d7e82a659f01a029920a66ea01aa14a8
|
8af539b0b62a0c7838347fc8f766621ade9d7028
|
/Project2/final_code.R
|
3e799910fef648edb7124c421f83d34ce880f81e
|
[] |
no_license
|
alexaoh/stochmod
|
a4f16418cd151c5fa25c881c5d54fb8226b1d6b9
|
f15c2b1d390e4beaa9e1f98e3aa3c62877265fe6
|
refs/heads/master
| 2023-01-23T10:35:07.021417
| 2020-11-17T14:52:17
| 2020-11-17T14:52:17
| 290,275,470
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,116
|
r
|
final_code.R
|
#Project 2 Stochastic Modelling
#Problem 1: Modelling the common cold
#Constants
alpha = 0.1
lambda = 1/100
mu_L = 1/7
mu_H = 1/20
simulate_markov <- function(state0, tEnd, plot) {
time_infected = 0
time_between_Ih = 0
time_between_Ih_list <- c()
if (plot) {
plot(NULL, NULL,
xlim = c(0, tEnd),
ylim = c(-0.2, 2.2),
xlab = "Time (days)",
lwd = 5,
ylab = "State",
main = "One Realization, 5 years")
}
time = 0
state = state0
while (time < tEnd) {
if (state == 0) {
#Sojourn time
S = rexp(1,rate = lambda)
if (plot) {
lines(c(time,time + S), c(state,state), lwd = 1)
}
#In case the sojourn time exceeds the end time
if ( time + S > tEnd) {
S = tEnd - time
}
time = time + S
sample = runif(1)
if (sample <= alpha) {
state = 2
if (time_between_Ih > 0) {
time_between_Ih_list <- c(time_between_Ih_list, time - time_between_Ih)
time_between_Ih = 0
}
}
else {
state = 1
}
}
else if (state == 1) {
S = rexp(1,rate = mu_L)
if (plot) {
lines(c(time,time + S), c(state,state), lwd = 2)
}
if ( time + S > tEnd) {
S = tEnd - time
}
time = time + S
time_infected = time_infected + S
state = 0
}
else if (state == 2) {
S = rexp(1,rate = mu_H)
if (plot) {
lines(c(time,time + S), c(state,state), lwd = 3)
}
if ( time + S > tEnd) {
S = tEnd - time
}
time = time + S
time_infected = time_infected + S
time_between_Ih = time
state = 0
}
}
return (c(time_infected/tEnd,mean(time_between_Ih_list)))
}
get_mean <- function(state0, tEnd, numSim) {
result_list <- c()
for (i in 1:numSim) {
result <- simulate_markov(0,tEnd,FALSE)
result_list <- c(result_list,result[2])
}
return (mean(result_list))
}
#1.c
tEnd = 5*365
result <- simulate_markov(0, tEnd, TRUE)
#1.d
tEnd = 1000*365
result <- simulate_markov(0, tEnd, FALSE)
time_infected <- result[1]
cat("Proportion of time infected in one realization: ",time_infected)
#1.e
numSim = 100
result <- get_mean(0,tEnd,numSim)
cat("\nAverage time between heavy infections over ", numSim, " realizations: ", result)
####### Problem 2
#Calibrating Climate Models
#2.a
std = 0.5
mu = 0.5
cov_function <- function(theta1, theta2){
val = (1+15*abs(theta1 - theta2))*exp(-15*abs(theta1 -theta2)) * std^2
return (val)
}
theta_grid = seq(from = 0.25, to = 0.50, by = 0.0049999)
#Known values
sample_points = c(0.30,0.35,0.39,0.41,0.45)
sample_values = c(0.5, 0.32, 0.40, 0.35, 0.60)
#Function for creating covariance matrix
get_cov <- function(vecA,vecB) {
nA <- length(vecA)
nB <- length(vecB)
result <- matrix(nrow = nA, ncol = nB)
for (i in 1:nA) {
for (j in 1:nB) {
result[i,j] = cov_function(vecA[i],vecB[j])
}
}
return (result)
}
Sigma_AB <- get_cov(theta_grid, sample_points)
Sigma_AA <- get_cov(theta_grid, theta_grid)
Sigma_BB <- get_cov(sample_points, sample_points)
muA <- rep(mu,length(theta_grid))
muB <- rep(mu,length(sample_points))
muC <- muA + Sigma_AB %*% solve(Sigma_BB) %*% (sample_values - muB)
Sigma_C <- Sigma_AA - Sigma_AB %*% solve(Sigma_BB) %*% t(Sigma_AB)
L <- chol(Sigma_C)
z <- rnorm(length(theta_grid))
predict <- muC + L %*% z
plot(theta_grid, predict, main = "Prediction as a Function of Theta", xlab = "Theta", ylab = "Prediction")
lines(theta_grid, predict)
#Prediction interval
z005 = qnorm(0.95)
upper = muC + z005*sqrt(diag(Sigma_C))
lower = muC - z005*sqrt(diag(Sigma_C))
lines(theta_grid, muC, col = "blue", lwd = 2, lty = 3)
lines(theta_grid,upper, col = "red", lwd = 2, "l", lty = 2)
lines(theta_grid,lower, col = "red", lwd = 2, "l", lty = 2)
legend(0.35,0.7, legend=c("Pred. Int.", "Cond. Mean"), col=c("red", "blue"), lty=c(2,3))
#2.b
library(expm)
y <- rep(0.30,length(theta_grid))
std_matrix <- Sigma_C %^% -0.5
standardize <- std_matrix %*% (y - muC)
probs1 <- pnorm(standardize)
# Done differently, since the above was wrong.
chance_vec1 <- rep(0,51)
for (i in 1:51){
chance_vec1[i] <- pnorm(0.3,mean=muC[i],sd=sqrt(abs(Sigma_C[i,i])))
}
plot(theta_grid, chance_vec1, main = "Conditional Probability as Function of Theta", ylab = "Conditional Prob.", xlab = "Theta")
lines(theta_grid, chance_vec1)
#2.c
#Same as in a), but with one more point.
sample_points = c(0.30,0.35,0.39,0.41,0.45, 0.33)
sample_values = c(0.5, 0.32, 0.40, 0.35, 0.60, 0.40)
Sigma_AB <- get_cov(theta_grid, sample_points)
Sigma_AA <- get_cov(theta_grid, theta_grid)
Sigma_BB <- get_cov(sample_points, sample_points)
muA <- rep(mu,length(theta_grid))
muB <- rep(mu,length(sample_points))
muC <- muA + Sigma_AB %*% solve(Sigma_BB) %*% (sample_values - muB)
Sigma_C <- Sigma_AA - Sigma_AB %*% solve(Sigma_BB) %*% t(Sigma_AB)
Sigma_C
L = chol(Sigma_C)
z <- rnorm(length(theta_grid))
predict <- muC + L %*% z
plot(theta_grid, predict, main = "Prediction as a Function of Theta", xlab = "Theta", ylab = "Prediction")
lines(theta_grid, predict)
#Prediction interval
z005 <- qnorm(0.95)
upper <- muC + z005*sqrt(diag(Sigma_C))
lower <- muC - z005*sqrt(diag(Sigma_C))
lines(theta_grid,upper, col = "red", lwd = 2, "l", lty = 2)
lines(theta_grid,lower, col = "red", lwd = 2, "l", lty = 2)
legend(0.35,0.7, legend=c("Pred. Int."), col=c("red"), lty=2)
y <- rep(0.30,length(theta_grid))
std_matrix <- Sigma_C %^% -0.5
standardize <- std_matrix %*% (y - muC)
probs <- pnorm(standardize)
# Done differently, since the above was wrong.
chance_vec <- rep(0,51)
for (i in 1:51){
chance_vec[i] <- pnorm(0.3,mean=muC[i],sd=sqrt(abs(Sigma_C[i,i])))
}
plot(theta_grid, chance_vec, ylim = c(0, 0.25), main = "Conditional Probability as Function of Theta", ylab = "Conditional Prob.", xlab = "Theta")
lines(theta_grid, chance_vec)
lines(theta_grid, chance_vec1, col = "red", lty = 2)
legend(0.40, 0.10, c("Old Prob.", "New Prob."), col = c("red", "black"), lty=2:1)
|
2170da8d2868e03aeb074074ad56b71b78b75239
|
598a2f6059a264cb10a609cb65a8048086015e67
|
/offline-advetising-campaigns-efficiency/Moscow Best model.R
|
42eb3643762a16a436b302a268cf0aa95e25a975
|
[] |
no_license
|
alexey-nikolaev/code-examples
|
eb5142bcf3b9d19fff6c4badeda77278fc3251de
|
efd0f4a19aea0e388e024da8f211258458aa95fd
|
refs/heads/master
| 2021-05-09T18:50:35.500699
| 2018-01-27T19:46:31
| 2018-01-27T19:46:31
| 119,175,976
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,978
|
r
|
Moscow Best model.R
|
# https://www.periscopedata.com/app/wheely/115382/Advertising-Efficiency
library(readr)
data <- read_csv('https://app.periscopedata.com/api/wheely/chart/csv/8dbe6edf-0ee5-60c4-6bef-9b30eaa48935/189541')
library(CausalImpact) # based on BSTS
library(bsts) # BSTS
library(forecast) # auto.ARIMA
data$mm <- format(data$dd, "%m")
mm <- model.matrix( ~ mm - 1, data=data)
data <- cbind(data,mm) # add month dummies for ARIMA
pre <- subset(data, dd<as.Date("2017-04-01"))
post <- subset(data, dd>=as.Date("2017-04-01"))
for (metric in c('revenue', 'rides', 'riders', 'firstrides', 'secondrides', 'newusers',
'businessrides', 'businessfirstrides', 'quotes',
'quotedusers', 'viprides', 'vipfirstrides')) {
models <- list()
results <- list()
mean_abs_residuals <- list()
# the dataset will be divided in two parts: before the campaign started (pre) and after (post)
if(metric %in% c('quotes','quotedusers')) { # correct quotes calculations started from 2016-06-18
pre <- subset(data, dd>as.Date("2016-06-17"))
pre <- subset(pre, dd<as.Date("2017-04-01"))
} else {
pre <- subset(data, dd<as.Date("2017-04-01"))
}
post <- subset(data, dd>=as.Date("2017-04-01"))
# use business/vip online cars for business/vip orders
if((metric == 'businessrides') || (metric == 'businessfirstrides')) {
onlinecars <- pre$onlinebusinesscars
onlinecarsf <-post$onlinebusinesscars
} else if ((metric == 'viprides') || (metric == 'vipfirstrides')) {
onlinecars <- pre$onlinevipcars
onlinecarsf <-post$onlinevipcars
} else {
onlinecars <- pre$onlinecars
onlinecarsf <-post$onlinecars
}
# loop through models (BSTS with different trends & auto.ARIMA)
for (i in 1:4) {
if (i == 4) { # auto.ARIMA
errorflag <- FALSE # some auto.ARIMA models can't be fitted, skip then
y <- ts(pre[[metric]], frequency = 7)
xreg <- cbind(pre$isholiday, pre$previouscampaigns, onlinecars,
pre$mm01, pre$mm02, pre$mm03, pre$mm04, pre$mm05, pre$mm06,
pre$mm07, pre$mm08, pre$mm09, pre$mm10, pre$mm11)
colnames(xreg) <- c("isholiday", "previouscampaigns", "onlinecars",
"m1", "m2", "m3", "m4", "m5", "m6",
"m7", "m8", "m9", "m10", "m11")
xregf <- cbind(post$isholiday, post$previouscampaigns, onlinecarsf,
post$mm01, post$mm02, post$mm03, post$mm04, post$mm05, post$mm06,
post$mm07, post$mm08, post$mm09, post$mm10, post$mm11)
colnames(xregf) <- c("isholiday", "previouscampaigns", "onlinecars",
"m1", "m2", "m3", "m4", "m5", "m6",
"m7", "m8", "m9", "m10", "m11")
tryCatch(models[[i]] <- auto.arima(y, xreg=xreg, seasonal=TRUE),
error=function(e){ errorflag <<- TRUE })
# skip if model can't be fitted
if (errorflag) {
mean_abs_residuals[[i]] <- Inf # exclude model from comparison if an error occured
} else {
fc <- forecast(models[[i]], xreg=xregf, h=dim(post)[1])
results[[i]] <- fc
mean_abs_residuals[[i]] <- mean(abs(residuals(models[[i]], type='response', h=1)))
}
} else { # BSTS
target <- xts(data[[metric]], order.by=data$dd)
target_pre <- xts(pre[[metric]], order.by=pre$dd)
target_post <- post[[metric]]
if (i == 1) {
ss <- AddLocalLinearTrend(list(), target_pre)
} else if (i == 2) {
ss <- AddGeneralizedLocalLinearTrend(list(), target_pre)
} else if (i == 3) {
ss <- AddStudentLocalLinearTrend(list(), target_pre)
}
ss <- AddSeasonal(ss, y = target_pre, nseasons=7)
ss <- AddSeasonal(ss, y = target_pre, nseasons = 52, season.duration = 7)
target_bsts <- xts(c(pre[[metric]], rep(NA, length(post[[metric]]))),
order.by=c(pre$dd,post$dd))
cars <- c(onlinecars, onlinecarsf)
isholiday <- c(pre$isholiday, post$isholiday)
previouscampaigns <- c(pre$previouscampaigns, post$previouscampaigns)
models[[i]] <- bsts(target_bsts ~ cars + isholiday + previouscampaigns,
niter = 5000, state.specification=ss, seed=1)
results[[i]] <- CausalImpact(bsts.model = models[[i]],
post.period.response = target_post)
mean_abs_residuals[[i]] <- mean(abs(bsts.prediction.errors(models[[i]])))
}
}
model_types <- c('BSTS LocalLinearTrend', 'BSTS GeneralizedLocalLinearTrend', 'BSTS StudentLocalLinearTrend', 'auto.ARIMA')
best_model <- models[[which.min(mean_abs_residuals)]]
best_model_type <- model_types[[which.min(mean_abs_residuals)]]
result_best_model <- results[[which.min(mean_abs_residuals)]]
cat(sprintf("\n\n\nMetric: %s", metric), file='report_moscow.txt', append=TRUE)
cat(sprintf("\n\nBest model: %s\n\n", best_model_type), file='report_moscow.txt', append=TRUE)
if (best_model_type == 'auto.ARIMA') {
upper <- post[[metric]]-result_best_model$lower[,2]
mean <- post[[metric]]-result_best_model$mean
lower <- post[[metric]]-result_best_model$upper[,2]
upper80 <- post[[metric]]-result_best_model$lower[,1]
lower80 <- post[[metric]]-result_best_model$upper[,1]
cat(sprintf("Average effect size: %.2f\nCI 95 [%.2f, %.2f]\nCI 80 [%.2f, %.2f]",
mean(mean), mean(lower), mean(upper), mean(lower80), mean(upper80)),
file='report_moscow.txt', append=TRUE)
cat(sprintf("\n\nCumulative effect size: %.2f\nCI 95 [%.2f, %.2f]\nCI 80 [%.2f, %.2f]",
sum(mean), sum(lower), sum(upper), sum(lower80), sum(upper80)),
file='report_moscow.txt', append=TRUE)
} else {
sink(file = 'report_moscow.txt', append=TRUE)
summary(result_best_model)
sink()
}
png(paste0("moscow_", metric, ".png"),
width=8, height=3, units="in", res=700, pointsize=10)
par(mar=c(6, 4, 4, 2) + 0.1)
if (best_model_type == 'auto.ARIMA') {
fc_xts <- xts(c(fitted(best_model), result_best_model$mean),
order.by=c(pre$dd, post$dd))
plot(target, main=metric, major.ticks="months", major.format = "%Y-%m")
lines(fc_xts, major.ticks="months", major.format = "%Y-%m", col='blue', lty=2)
} else {
plot(as.xts(result_best_model$series$response), main=metric, major.ticks="months", major.format = "%Y-%m")
lines(as.xts(result_best_model$series$point.pred), major.ticks="months", major.format = "%Y-%m", col='blue', lty=2)
}
legend('topleft', legend=c('actual', 'baseline forecast', 'campaign start'),
col=c('black', 'blue', 'red'), lwd=c(1,1,3), lty=c(1,2,1), cex=1, pt.cex=1)
par(col='red', lwd=3)
abline(v=.index(target)[length(target)-length(post)+2])
par(col='black', lwd=1)
dev.off()
}
|
a367fdce06e87b8a2b1d709b693c40714c24df65
|
5535aebd21d291783f77aa8d35e127d66022acb9
|
/man/get_col.Rd
|
939038d64ce4897f4586c4c0ff11b83668f84489
|
[] |
no_license
|
arturochian/gspreadr
|
41cb4e276a703ba7bfdf3006153cc08793aa5b99
|
e7b0b0a155bc2d1b8e3aa858fee79143530a5dad
|
refs/heads/master
| 2020-12-11T03:21:03.254666
| 2015-01-13T00:14:38
| 2015-01-13T00:14:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 473
|
rd
|
get_col.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{get_col}
\alias{get_col}
\title{Get all values in a column.}
\usage{
get_col(ws, col)
}
\arguments{
\item{ws}{worksheet object}
\item{col}{column number or letter (case insensitive)}
}
\value{
A data frame.
}
\description{
Get all values in a column.
}
\seealso{
\code{\link{get_cols}}, \code{\link{get_row}},
\code{\link{get_rows}}, \code{\link{read_all}}, \code{\link{read_region}},
\code{\link{read_range}}
}
|
ec558b355b74e4fc9e6d86ef355e0aec844dec8c
|
b6312d8298f60b08b040b51ad1d66f8f3b6627a5
|
/R/multiQQPlot_function.R
|
17003ee787bd17775fd554dfaaf295be1dd15b1d
|
[] |
no_license
|
hutchisonjtw/JNCCTools
|
36ced580cb7feb91cf310684220451843996bb16
|
48242eac43c37d16b50aa50504dd4ca7f02c4551
|
refs/heads/master
| 2021-01-10T10:13:43.298389
| 2017-03-22T14:25:45
| 2017-03-22T14:25:45
| 54,502,489
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,069
|
r
|
multiQQPlot_function.R
|
#' multiQQPlot
#'
#' Plots quantile-quantile plots for multiple probability distributions on one set of axes.
#'
#' @param x Numeric vector of data values to be plotted to be plotted as a histogram.
#' @param main Title for the plot. Default value is \code{"QQ plot of sample data against likely distributions"}.
#' @param distr Character vector of distributions for which to produce QQ plots. Note that this uses the standard R names for the distributions which differ from those used in \code{emon}. Should be one or more of \code{"norm"} (normal), \code{"pois"} (Poisson), \code{"lnorm"} (log normal) and \code{"nbinom"} (negative binomial). By default all four are plotted.
#' @details QQ plots are used to visually assess how well a data sample fits a given probability distribution.
#' @return Primarily used for plotting, but invisibly returns an object of class multiQQ, which can be stored for later plotting if required.
#' @export
#'
multiQQPlot <- function(x, main = "QQ plot of sample data against likely distributions", distr = c("nbinom", "pois", "norm", "lnorm")) {
MASSLoaded <- require(MASS)
if(!isTRUE(MASSLoaded)) stop("Package 'MASS' could not be loaded. Is it installed?")
distrCheck <- distr %in% c("nbinom", "pois", "norm", "lnorm")
if(any(!distrCheck)) stop("distr must be one or more of 'norm', 'pois', 'lnorm', 'nbinom'. Other values are not permitted.")
probabilities <- 1:length(x)/(length(x) + 1)
theoreticalProbs <- list()
if ("nbinom" %in% distr) {
if (is.integer(x)) {
nbinomQuantiles <- cbind(sort(x), qnbinom(probabilities, size = fitdistr(x, "Negative Binomial")$estimate[1], mu = fitdistr(x, "Negative Binomial")$estimate[2]))
theoreticalProbs$nbinom <- nbinomQuantiles
} else {
warning("Negative binomial requires integer data so will not be plotted")
}
}
if ("pois" %in% distr) {
if (is.integer(x)) {
poisQuantiles <- cbind(sort(x), qpois(probabilities, fitdistr(x, "Poisson")$estimate))
theoreticalProbs$pois <- poisQuantiles
} else {
warning("Poisson requires integer data so will not be plotted")
}
}
if ("norm" %in% distr) {
normQuantiles <- cbind(sort(x), qnorm(probabilities, mean = fitdistr(x, "Normal")$estimate[1], sd = fitdistr(x, "Normal")$estimate[2]))
theoreticalProbs$norm <- normQuantiles
}
if ("lnorm" %in% distr) {
lnormQuantiles <- cbind(sort(x), qlnorm(probabilities, meanlog = fitdistr(x, "Lognormal")$estimate[1], sdlog = fitdistr(x, "Lognormal")$estimate[2]))
theoreticalProbs$lnorm <- lnormQuantiles
}
if (length(theoreticalProbs) == 0) stop("distr must be one or more of 'norm', 'pois', 'lnorm', 'nbinom'")
plot(x, x, type = "n", xlab = "Theoretical quantiles", ylab = "Sample quantiles", main = main)
lines(rbind(c(0,0),c(max(x), max(x))))
mapply(points, theoreticalProbs, col = 1:4, pch = 1:4)
legend("topleft", distr, col = 1:4, pch = 1:4)
output <- list(xmax = max(x), theoreticalProbs = theoreticalProbs, distr = distr, main = main)
class(output) <- "multiQQ"
invisible(output)
}
|
91ba8b17bd4402ece80857f92658f8b742608c8b
|
b2de870cc0a65b07724b3201970c2973b19d688b
|
/man/formattable.Rd
|
af97846aa13af853c355002b304d0c50c3fa3b00
|
[
"MIT"
] |
permissive
|
renkun-ken/formattable
|
81ca995467a44f79d979ba64371a6b79b26540d6
|
66f69944ef869156a4362dd332cc0b931fc157cd
|
refs/heads/master
| 2023-07-02T02:45:22.643478
| 2023-03-23T03:04:07
| 2023-03-23T03:07:00
| 33,297,795
| 720
| 102
|
NOASSERTION
| 2022-10-20T23:02:01
| 2015-04-02T08:22:11
|
R
|
UTF-8
|
R
| false
| true
| 640
|
rd
|
formattable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formattable.R
\name{formattable}
\alias{formattable}
\title{Generic function to create formattable object}
\usage{
formattable(x, ...)
}
\arguments{
\item{x}{an object.}
\item{...}{arguments to be passed to methods.}
}
\value{
a \code{formattable} object
}
\description{
This function is a generic function to create \code{formattable}
objects, i.e. an object to which a formatting function and
related attribute are attached. The object works as an ordinary vector
yet has specially defined behavior as being printed or converted to
a string representation.
}
|
6804d9831317572f4176ee2baa80340516919d42
|
430ca7d8ba944d18d20349ad40dd81f36756c04e
|
/multiple comparison_sample code.R
|
f9049137a53c9ac82262683b698e94d5cd38bd66
|
[] |
no_license
|
NxNiki/multidimensional_age_prediction
|
9a45bbccb9275b99e288b70e1966c7156bcd2feb
|
dcf42175937d8033d3ec20f10de5f27cdc89e80f
|
refs/heads/master
| 2022-08-08T04:39:18.947106
| 2022-08-02T04:04:13
| 2022-08-02T04:04:13
| 236,085,971
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 304
|
r
|
multiple comparison_sample code.R
|
p.values = read.csv()
p <- c(0.015, 0.036, 0.048, 0.052, 0.023 )
p.bon = p.adjust(p, "bonferroni")
p.adjust(p, "holm")
p.adjust(p, "hochberg")
p.adjust(p, "hommel")
x = rnorm(10)
y = .1*x+rnorm(mean = 10, sd = 2)
plot(x,y)
data = data.frame(x=x, y=y)
mod = lm(y~x, data)
summary(mod)
|
3601291239e08f44f60ac99e26b13df04dd06bab
|
e365698941fa20641c21c5dea00dcd2ebc853223
|
/ProgAssig1/plot4.R
|
f9e6f8abc3df59d9bf38509201822d1213067c72
|
[] |
no_license
|
danidelacuesta/ExData_Plotting1
|
bed545807b8297398cea9cf122fdeb3d0fc48092
|
182b1f1d09e6da3e34408e2edef3b31d8d8d4d1f
|
refs/heads/master
| 2021-01-20T23:40:59.934005
| 2015-03-09T01:38:33
| 2015-03-09T01:38:33
| 31,868,490
| 0
| 0
| null | 2015-03-08T22:32:48
| 2015-03-08T22:32:47
| null |
UTF-8
|
R
| false
| false
| 1,220
|
r
|
plot4.R
|
plot4 <- function() {
library(lubridate)
data <- fread(".//household_power_consumption.txt",skip=65000,nrows=5000)
data <- data[data$V1=="1/2/2007"|data$V1=="2/2/2007"]
data$Date_Combined <- dmy(data$V1)+hms(data$V2)
par(mfrow=c(2,2))
{
#graph1
plot(data$Date_Combined,data$V3,type="l",ylab="Global Active Power (kilowatts)",xlab="")
#graph2
plot(data$Date_Combined,data$V5,type="l",ylab="Voltage",xlab="datetime")
#graph3
plot(data$Date_Combined,data$V7,type="l",xlab="",ylab="Energy sub metering")
legend("topright", bty="n", lty = c(1,1,1),cex =0.6, y.intersp=0.3, yjust=0.5, col = c("black","red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
lines(data$Date_Combined,data$V8,col="red")
lines(data$Date_Combined,data$V9,col="blue")
#graph4
plot(data$Date_Combined,data$V4,type="l",ylab="Global_reactive_power",xlab="datetime")
}
dev.copy(png,file="plot4.png")
dev.off()
}
|
c801e182b7719ae5f58bb84a23d41f235ecd21ec
|
af883594be37bf9b58d4d11c05ac685bb0919652
|
/R/ex16_지도시각화.R
|
a52f91a361baa7c4114770f6345b8fa5143a03f5
|
[] |
no_license
|
handaeho/lab_R
|
7593574af1dc345c1f98f92c219c3af3d411d493
|
c735aaf2cb004254bf57637ec059ee8344c3f4f9
|
refs/heads/master
| 2020-11-26T08:40:18.922074
| 2019-12-19T09:17:59
| 2019-12-19T09:17:59
| 229,017,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,312
|
r
|
ex16_지도시각화.R
|
# 지도 시각화를 해보자
# 지도위에 통계 값 표시하기
# 패키지 설치 & 로드
# ggplot2::map_data()함수가 지도 데이터를 처리하기 위해 필요한 패키지
install.packages("maps")
install.packages("ggplot2")
install.packages("mapproj") # ggplot2::coord_map()함수가 사용하는 패키지
library(ggplot2)
# 동북아시아 지도 만들기
# 지역 정보를 가지고 있는 데이터 프레임 만들기
asia_map = map_data(map = "world",
region = c("North Korea",
"South Korea",
"Japan",
"china",
"India"))
str(asia_map)
head(asia_map)
tail(asia_map)
# longitude(경도) : 영국 그리니치 천문대를 기준으로 동, 서 좌표
# latitude(위도) : 적도를 기준으로 남, 북 좌표
# order : 선분으로 이어질 점들의 순서
# group : 함께 연결할 위도, 경도 점들을 그룹화(나라, 주, 도시 등)
# 지도 그리기
# ggplot2 패키지를 사용 시,
# 데이터 파라미터 : 위도, 경도 정보를 가지고 있는 map 데이터를 전달
# x축 mapping 파라미터 : long(경도)
# y축 mapping 파라미터 : lat(위도)
# 그래프 종류 : geom_ploygon()
ggplot(data = asia_map, mapping = aes(x = long, y = lat, group = group, fill = region)) +
geom_polygon() +
coord_map("polyconic")
# x 좌표와 y 좌표 간격을 다르게 설정해, 보기 좋게 만들어준다.
# "polyconic" ---> 극 지방에 맞추어 지도를 둥글게 만들어 준다
# 대한민국 지도 그리기
korea_map = map_data(map = "world",
region = c("South Korea" ,
"North Korea"))
head(korea_map)
ggplot(korea_map, aes(long, lat, group = group, fill = region)) +
geom_polygon(color = "black") + # color를 주면 경계선을 그려준다.
coord_map("polyconic")
# 미국 지도 그리기(주, state)
us_state = map_data(map = "state")
str(us_state)
head(us_state)
tail(us_state)
ggplot(us_state, aes(long, lat, group = group, fill = region)) +
geom_polygon(color = "black") +
coord_map("polyconic")
# --------------------------------------------------------------------------------
# 단계 구분도(Choropleth Map)
# • 지역별 통계치를 색깔의 차이로 표현한 지도
# • 인구나 소득 같은 특성이 지역별로 얼마나 다른지 쉽게 이해할 수 있음
# 미국 주별 강력 범죄율 단계 구분도 만들기
# 패키지 설치 & 로드
library(dplyr) # 데이터 전처리 위함
library(tibble) # rownames_to_column() 사용하기 위함
search()
# 데이터 프레임 구성
str(USArrests)
head(USArrests)
tail(USArrests)
# USArrests 데이터 프레임에는 각 state의 이름들이 행 이름으로 설정 되어있음.
# us_map 데이터프레임과 Join하기 위해서는 state 이름들이 데이터 프레임 변수로 있어야 한다.
us_crime = rownames_to_column(USArrests, var = "state")
# 컬럼명은 state, 변수에는 state 이름들이 추가되었다.
str(us_crime)
head(us_crime)
tail(us_crime)
# us_state의 region 변수는 모두 소문자인데, us_crime은 첫글자가 대문자.
# 따라서 모두 소문자로 바꾸어주자.
us_crime$state = tolower(us_crime$state)
head(us_crime)
# 이제 us_state와 us_crime 데이터 프레임을 Join 해보자.
state_crime = left_join(us_state, us_crime,
by = c("region" = "state")) # 조인 조건에 region과 state는 같다고 알려줌.
head(state_crime)
# 지도를 그려보자
# state 별, 살인 사건 발생률
ggplot(state_crime, aes(long, lat, group = group, fill = Murder)) +
geom_polygon(color = "black") +
coord_map("polyconic") +
scale_fill_continuous(low = "white", high = "darkred")
# 발생률은 연속적인 값이기 때문에 scale_fill_continuous를 사용.
# 발생률이 낮은곳은 흰색, 높은곳은 파란색으로
# state 별, 폭력 사건 발생률
ggplot(state_crime, aes(long, lat, group = group, fill = Assault)) +
geom_polygon(color = "black") +
coord_map("polyconic") +
scale_fill_continuous(low = "white", high = "blue")
# state 별, 강간 사건 발생률
ggplot(state_crime, aes(long, lat, group = group, fill = Rape)) +
geom_polygon(color = "black") +
coord_map("polyconic") +
scale_fill_continuous(low = "white", high = "black")
# ggiraphExtra 패키지를 이용해 단계 구분도 그리기
install.packages("ggiraphExtra")
library(ggiraphExtra)
ggChoropleth(data = us_crime,
mapping = aes(fill = Murder,
map_id = state),
map = us_state)
# ggChoropleth()의 변수들
# data : 통계값이 들어있는 데이터 프레임
# map : 지도를 그릴 수 있는 데이터 프레임(위도, 경도, 지역, 그룹 등을 가짐)
# mapping
# 1) map_id : 데이터와 map을 join 할 수 있는 통계 데이터 프레임의 변수 이름.
# map의 region과 일치하는 데이터 변수.
# 2) fill : 지도의 각 그룹을 색으로 채울 변수.(살인 사건 발생률 등)
ggChoropleth(data = us_crime, map = us_state,
mapping = aes(fill = Murder,
map_id = state),
interactive = T)
# interactive = T로 설정하고, 지역에 마우스을 올리면 지역명과 해당 수치가 나온다.
# --------------------------------------------------------------------------------
# 문자 인코딩, 변환 관련 기능 패키지 설치
install.packages("stringi")
# 개발자 도구 설치
install.packages("devtools")
# 대한민국 시도별 인구, 결핵 환자 수 단계 구분도 만들기
# Github에서 데이터 다운로드
devtools::install_github("cardiomoon/kormaps2014")
# install.packages() ---> R 공식 홈페이지에서 다운로드
# devtools::install_github ---> Github.com에서 다운로드
# 필요 패키지 로드
library(kormaps2014)
library(ggplot2)
library(dplyr)
library(ggiraphExtra)
search()
# kormaps2014 패키지의 인구 조사 데이터 프레임
str(korea_map)
# str(korpop1)
# 그런데 데이터 프레임의 컬럼 이름들이 한글이어서 제대로 보이지가 않는다.
str(changeCode(korpop1))
# kormaps2014::changeCode()
# ---> 데이터 프레임의 컬럼(변수)이름이 한글인 것을 처리해줌.
head(korpop1)
head(changeCode(korpop1)) # 거주지역, 인구, 주거형태, ...
# 컬럼 이름이 한글이면 에러가 날 수 있어, 영어로 변경한다.
korpop1 = rename(korpop1,
name = 행정구역별_읍면동,
pop = 총인구_명)
korpop1
# korpop1(거주지역, 인구)를 그려보자.
ggChoropleth(data = korpop1, map = kormap1,
mapping = aes(fill = pop, map_id = code, tooltip = name),
interactive = T)
# tbc : 결핵 환자 수 데이터 프레임
head(changeCode(tbc))
ggChoropleth(data = tbc, map = kormap1,
mapping = aes(fill = NewPts, map_id = code, tooltip = name1),
interactive = T)
tbc2015 = tbc %>%
filter(year == 2015)
ggChoropleth(data = tbc2015, map = kormap1,
mapping = aes(fill = NewPts, map_id = code,
tooltip = name),
interactive = T)
|
fa899ca0e37ed923b085ef5b758826ef53560e15
|
244393a89b3f8a836ee5afdd2ec9c91f5e52a6cd
|
/Visualization/case_study_trends_in_world_health.R
|
ff4962bd0fe8a50f1204984cf2ab4646a0932015
|
[] |
no_license
|
mjchenko/R_for_Data_Science
|
c33e470bb7b054ba5255df99aa06f60c2940976d
|
a2d228b738400a80fa2ab6fbf9df7af40a2ad83e
|
refs/heads/main
| 2023-02-01T13:39:57.324999
| 2020-12-18T20:27:20
| 2020-12-18T20:27:20
| 322,691,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 513
|
r
|
case_study_trends_in_world_health.R
|
library(dslabs)
data(gapminder)
head(gapminder)
gapminder %>%
filter(year == 2015 & country %in% c("Sri Lanka", "Turkey", "Poland",
"South Korea", "Malaysia", "Russia",
"Pakistan", "Vietnam", "Thailand",
"South Africa")) %>%
select(country, infant_mortality)
ds_theme_set()
filter(gapminder, year == 1962) %>%
ggplot(aes(fertility, life_expectancy, color = continent)) +
geom_point()
|
45de45e85a4bdcc148bb6dd352e97dccece0d32e
|
cbffba8db095c390010b0c73039ffae2154fbc52
|
/scripts/data_preprocessing.R
|
c6e1e51896b0dc6fcc45a32a01ebafcfc2447270
|
[] |
no_license
|
caramirezal/hcv_scDataIntegration
|
b005f283cf72e76c893ea02c6eeb6023bf45b3cd
|
6bb008ac8b36ea4ae3683881b036a4e77f7f6d86
|
refs/heads/master
| 2020-12-22T07:14:26.169716
| 2020-03-16T17:24:48
| 2020-03-16T17:24:48
| 236,708,218
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,798
|
r
|
data_preprocessing.R
|
## Data preprocessing of HCV project data
library(Seurat)
library(Signac)
library(tidyverse)
library(liger)
## Input: A seurat object
## Performs normalization, scaling, pca and umap
## Output: A processed seurat object
st_workflow <- function(
seurat_object,
n_features = 3000,
n_pca_dims = 15
){
cat('Normalizing and finding variable features\n')
seurat.p <- NormalizeData(seurat_object) %>%
FindVariableFeatures(selection.method = 'vst',
nfeatures = n_features)
cat('Scaling and projection\n')
seurat.p <- ScaleData(seurat.p,
verbose = FALSE) %>%
RunPCA(npcs = n_pca_dims,
verbose = FALSE) %>%
RunUMAP(reduction = "pca",
dims = 1:n_pca_dims)
return(seurat.p)
}
## Maike Hoffman data preprocessing
himmer <- read.csv('../data/nina_thimme_raw_counts.csv',
header = TRUE)
r_names <- himmer$X
himmer <- select(himmer, -X)
himmer_mtx <- apply(himmer, 2, as.numeric)
rownames(himmer_mtx) <- gsub('__chr.*', '', r_names)
## Seurat object construction
himmer_seu <- CreateSeuratObject(counts = himmer_mtx, project = 'himmer', min.cells = 1, assay = 'rna')
himmer_seu <- st_workflow(himmer_seu, n_features = 1000, n_pca_dims = 100)
saveRDS(himmer_seu, '../data/himmer_seu.rds')
## Splitting Hoffman data for liger pipeline
patients <- himmer_seu$orig.ident
himmer_df <- as.data.frame(t(himmer_mtx))
himmer_sp <- split(himmer_df, f = patients)
himmer_sp <- lapply(himmer_sp, function(x) t(x) )
saveRDS(himmer_sp, '../data/himmer_splitted.rds')
## Standardizing gene names
## Gene names should be in upper case in order to align with human data.
## Run once in order to create ../data/miller2019_upper_Case/
dir.create('../data/miller2019_upper_Case')
file.copy('../data/miller2019/matrix.mtx', '../data/miller2019_upper_Case/')
file.copy('../data/miller2019/barcodes.tsv', '../data/miller2019_upper_Case/')
genes_lc <- read.table('../data/miller2019/genes.tsv', header = FALSE)
genes_lc$V2 <- toupper(genes_lc$V2)
write_tsv(genes_lc, '../data/miller2019_upper_Case/genes.tsv', col_names = FALSE)
### Seurat standard preprocessing
miller_mtx <- Read10X('../data/miller2019_upper_Case/')
miller <- CreateSeuratObject(counts = miller_mtx, project = 'lcmv', assay = 'RNA', min.cells = 1, min.features = 200)
miller <- NormalizeData(miller)
miller <- FindVariableFeatures(miller, selection.method = 'vst', nfeatures = 3000)
miller <- ScaleData(miller, verbose = FALSE)
miller <- RunPCA(miller, npcs = 30, verbose = FALSE)
miller <- RunUMAP(miller, reduction = "pca", dims = 1:30)
miller <- FindNeighbors(miller, dims = 1:30, verbose = FALSE)
miller <- FindClusters(miller, resolution= 0.1, verbose = FALSE)
saveRDS(miller, '../data/miller_seu.rds')
## Miller Cluster annotation
miller$orig.ident <- miller$seurat_clusters
miller_ann <- data.frame('cluster' = miller$orig.ident,
'cell_type' = plyr::mapvalues(x = miller$orig.ident,
from = as.factor(c(3,0,4,2,1)),
to = c('Proliferating', 'Effector', 'Naive',
'Progenitor Ex', 'Terminally Ex')
)
)
miller <- AddMetaData(miller, metadata = miller_ann)
saveRDS(miller_ann, '../data/miller_annotations.rds')
## checking no empty intersection of genes
length( intersect( rownames(himmer_sp$DW), rownames(miller_mtx)))
## merging miller and himmer in a list
merged_himmer_miller <- c('Miller_dataset'=miller_mtx, himmer_sp)
saveRDS(merged_himmer_miller, '../data/merged_himmer_miller.rds')
############################################################################################################################
## ##
## Subsetting Satpathy data ##
## ##
############################################################################################################################
## barcodes
summaryTCells <- readRDS('../data/satpathy2016/scATAC_TME_TCells_SummarizedExperiment.final.rds')
## peaks
peaks <- read.table('../data/satpathy2016/genes.tsv', header = TRUE)
peaks <- as.character(peaks$Feature)
## counts
counts <- readMM('../data/satpathy2016/matrix.mtx')
rownames(counts) <- as.character(peaks)
colnames(counts) <- summaryTCells$Group_Barcode
## cluster annotation
t_cell_clust <- summaryTCells$T_Cell_Cluster
cluster_labs <- paste0('Cluster', 1:19)
cell_type <- c('1-Naive CD4 T', '2-Activated CD4', '3-Th1', '4-Memory CD4 T',
'5-Th17', '6-Th 1', '7-Th 2', '8-Treg 1',
'9-Treg 2', '10-Treg 3', '11-Treg 4', '12-Effector CD8 T',
'13-Naive CD8 T', '14-Memory CD8 T', '15-Early TEx', '16-Intermediate TEx',
'17-Terminal TEx', '18-Other T', '19-Other T')
cluster_cell_types <- plyr::mapvalues(t_cell_clust,
from = cluster_labs,
to = cell_type)
TEx <- c('15-Early TEx',
'16-Intermediate TEx',
'17-Terminal TEx')
is_TEx <- sapply(cluster_cell_types, function(x) x %in% TEx)
TEx_names <- summaryTCells$Group_Barcode[is_TEx]
## Dropping non CD8 T Cells and duplicated barcodes
counts <- counts[, is_TEx]
saveRDS(counts, '../data/satpathy_cdtcells_only_counts.rds')
|
3c4fe08d1c87bdfe90a8b9c9f9680c26bda0c2c4
|
2abf0a9ba6ca20f8599feee37ce84e5749e41a40
|
/man/dist_ij_k.Rd
|
07369943b304b31ae6c87cb7f86e3d3166d58de1
|
[] |
no_license
|
talegari/bigdist
|
40635eae08fa3696932c701762ebeca050717e92
|
a7f8365f40cd8e61a4a471cfd15699b00dfd8b38
|
refs/heads/master
| 2021-06-11T23:37:10.341795
| 2021-04-05T06:24:14
| 2021-04-05T06:24:14
| 146,413,978
| 4
| 2
| null | 2021-04-05T06:24:14
| 2018-08-28T08:10:14
|
R
|
UTF-8
|
R
| false
| true
| 452
|
rd
|
dist_ij_k.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dist_index_converters.R
\name{dist_ij_k}
\alias{dist_ij_k}
\title{Vectorized version of dist_ij_k_}
\usage{
dist_ij_k(i, j, size)
}
\arguments{
\item{i}{row indexes}
\item{j}{column indexes}
\item{size}{value of size attribute of the dist object}
}
\value{
k indexes
}
\description{
Convert ij indexes to k indexes for a dist object
}
\examples{
dist_ij_k(1:3, 4:6, 10)
}
|
3edeb4e5bea7e00989fbdc7185fd824b0b68ce7d
|
0b4dece4d948400f501bdd830c3001c498ef69df
|
/Code TP3.R
|
220812f3836627a2188410d39dd74aa42c06422b
|
[] |
no_license
|
ghatfan99/StatDescrptive
|
a54e9be7e5a09a7b18bd71e4b07aa7636156fe9d
|
3c760b6da587bc6861f0cce2fe87a8a6538da67e
|
refs/heads/master
| 2021-07-06T21:21:42.129732
| 2020-12-13T16:52:25
| 2020-12-13T16:52:25
| 210,345,607
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 979
|
r
|
Code TP3.R
|
data=read.csv("diplome_sexe.csv",header =TRUE,sep=";")
dim(data)
head(data)
diplome=data$Diplome
sexe=data$Sexe
levels(diplome)
table(diplome)
length(which(diplome=="Licence"))
diplome_sexe=table(diplome,sexe)
chisq.test(diplome_sexe)
qchisq(0.95,2) # valeur critique, on peut l'obtenir dans le tableau fourni en TD
cramer <- function(x, y) {
res <- chisq.test(x, y)
chi2 <- as.numeric(res$statistic)
n <- length(x)
p <- length(levels(x))
q <- length(levels(y))
m <- min(p-1,q-1)
V <- sqrt(chi2/(n * m))
return(V)
}
cramer(sexe,diplome)
film <- as.factor(sample(c("A","C","S"),75,replace=T))
niveau<- as.factor(sample(c("C","L","U"),75,replace=T))
tab.obs=table(film,niveau)
chisq.test(tab.obs)
qchisq(0.95,1)
tab.theo=data.frame()
for(i in 1:nrow(tab.obs)){
for(j in 1:ncol(tab.obs)){
tab.theo[i,j]=sum(tab.obs[i,])*sum(tab.obs[,j])/75
}
}
chi2=sum(((tab.obs-tab.theo)^2)/tab.theo)
cramer(sexe,examen)
|
22d4afb6113a6cf52b8fbee489f380f0cd6ea033
|
02bfe46647db874539d5c41cfd5521046c382717
|
/Codes for running simulated data.R
|
72972419a87a11fb7565b8bb41cd707ce5796aef
|
[] |
no_license
|
zaq0718/A-Two-decision-Unfolding-Tree-Model
|
2bdbbc46cdafbfc64b08aa10faf78037441d47f3
|
aec269ba262695ae2d8fd3308970f8df18bb0e68
|
refs/heads/master
| 2021-06-21T04:24:30.332000
| 2020-12-14T13:03:49
| 2020-12-14T13:03:49
| 146,249,940
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,320
|
r
|
Codes for running simulated data.R
|
library(R2WinBUGS)
GGUMTree <- function () {
for (i in 1:I) {
theta[i,1:2] ~ dmnorm(mu[1:2], I_cov[1:2,1:2])
for (j in 1:J) {
# Process I
num.p1[i,j,1] <- 1
num.p1[i,j,2] <- exp(alpha[j]*(1*(theta[i,1]-delta[j]) - tau[j]))
num.p1[i,j,3] <- exp(alpha[j]*(2*(theta[i,1]-delta[j]) - tau[j]))
num.p1[i,j,4] <- exp(alpha[j]*(3*(theta[i,1]-delta[j])))
denom.p1[i,j] <- sum(num.p1[i,j,])
prob.1[i,j,1] <- (num.p1[i,j,1]+num.p1[i,j,4])/denom.p1[i,j] # Disagree
prob.1[i,j,2] <- (num.p1[i,j,2]+num.p1[i,j,3])/denom.p1[i,j] # Agree
# Process II
logit(prob.2[i,j]) <- beta[j]*(theta[i,2] - xi[j])
P[i,j,1] <- prob.1[i,j,1]*prob.2[i,j] # Strongly disagree
P[i,j,2] <- prob.1[i,j,1]*(1-prob.2[i,j]) # Disagree
P[i,j,3] <- prob.1[i,j,2]*(1-prob.2[i,j]) # Agree
P[i,j,4] <- prob.1[i,j,2]*prob.2[i,j] # Strongly agree
r[i,j] ~ dcat(P[i,j,])
}
}
rho ~ dunif(-1, 1)
mu[1] <- 0
mu[2] <- 0
Cov[1,1] <- 1
Cov[1,2] <- rho
Cov[2,1] <- rho
Cov[2,2] <- 1
I_cov[1:2, 1:2] <- inverse(Cov[1:2, 1:2])
for (j in 1:J) {
# Process I
alpha[j] ~ dlnorm(0, 4)
delta[j] ~ dnorm(0, 0.25)
tau[j] ~ dnorm(-2, 0.25)
# Process II
beta[j] ~ dlnorm(0, 4)
xi[j] ~ dnorm(0, 0.25)
}
}
init_p <- function() {
list(alpha=rep(1, J), delta=rep(0, J), tau=rep(-2, J), beta=rep(1, J), xi=rep(0, J), rho=0.2)}
parameters <- c("alpha", "delta", "tau", "beta", "xi", "rho", "theta")
setwd("P:/Research/GGUM/JBES/N = 1000")
for (i in 6:20) {
outpath <- paste("Raw data/data_", formatC(i, format="d", digit=2, flag="0"), ".txt", sep="")
r <- data.matrix(read.table(outpath, header=FALSE))
I <- nrow(r)
J <- ncol(r)
fdata <- list("r", "I", "J")
output <- bugs(fdata, init_p, parameters, GGUMTree, n.chains=1, n.iter=10000, n.burnin=5000, n.thin=1,bugs.directory = "C:/WinBUGS14")
pd <- paste("pD =", output$pD)
dic <- paste("DIC =", output$DIC)
outpath <- paste("Output/output_", formatC(i, format="d", digit=2, flag="0"), ".txt", sep="")
write.table(output$summary, outpath, sep="\t")
write(c(pd, dic), outpath, sep="\t", append=T)
}
quit("no")
|
874ea0735a7ae1490283aa66dbaefc95a782b2d4
|
839302e3d94ffc434f1f3106782b31498105dabd
|
/DataScienceFromAtoZ/Script/Script/Part_2/R_Demo_4_subscript_filter.R
|
255cf34815c10003f93a049554fdea8e09c06ca1
|
[] |
no_license
|
bcafferky/shared
|
cb084a457b39e591ef30beb932b3341a861f81ed
|
c1eb1b8d5172a8ffe7407241af0115fe7fdb5b85
|
refs/heads/master
| 2023-08-19T07:42:24.604452
| 2023-08-16T12:10:08
| 2023-08-16T12:10:08
| 96,798,774
| 384
| 312
| null | 2023-05-22T15:16:37
| 2017-07-10T16:29:03
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 677
|
r
|
R_Demo_4_subscript_filter.R
|
# R is all about arrays
myvect <- 1:10
myvect
myvect > 3 # Returns logicals or booleans
myvect[myvect > 3] # Returns elements
myvectbool <- myvect > 3
myvectbool
yourvect <- 20:30
yourvect
# Now let's extract from yourvect using the boolean array from myvect
yourvect[myvectbool]
# Wait...Reverse that...
yourvect[myvectbool==FALSE]
# Taking this a step further
yourvect[yourvect > 22]
yourvect[myvect > 3]
mymat = matrix(1:32,nrow=8)
mymat
# Find values in column 2 > 10
mymat[mymat[,2] >10,]
v1[b1]
# Same filter in one line...
mymat[,2][mymat[,2] >10]
# Give names to columns...
colnames(mymat) <- c("First","Second","Third","Fourth")
mymat[,"First"]
|
1ec35c41492f7fc45957639d83eaf760842f8e59
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/Luminescence/R/internals_RLum.R
|
87fd4ac383881b8fb7c298475911fe1fa0b620dd
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 726
|
r
|
internals_RLum.R
|
####################################################################################################
## INTERNAL HELPER FUNCTIONS ##
####################################################################################################
#' Create unique object identifier
#'
#' This function creates a unique object identifier. This function has no further parameters, as
#' an md5 hash is calcualted from the system timestamp adding an abitrary value using \code{runif}
#'
#' @return
#' Returns a UID
#'
#' @examples
#' .create_UID()
#'
#' @noRd
.create_UID <- function(){
digest::digest(object = paste(as.character(Sys.time()), runif(1)), algo = "md5")
}
|
8d10247c715a09a3ace97e2b6bc2e82344e859f4
|
9d4f4c06a13bf23ea65f5b2969ab7a03e050efe7
|
/man/popmid.Rd
|
3733b680c6c1ad12d7cac1fc80a1b24c857ed279
|
[] |
no_license
|
m-allik/SocEpi
|
817fc7ba1dbc0ea8c0aa6e4c15c047d35821288a
|
aa90c4110061bd3feb7c510a11abc05ae936aeab
|
refs/heads/master
| 2021-07-14T05:00:54.159205
| 2020-05-18T08:12:37
| 2020-05-18T08:12:37
| 139,704,200
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 577
|
rd
|
popmid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/popmid_fun.R
\name{popmid}
\alias{popmid}
\title{Population midpoints}
\usage{
popmid(x)
}
\arguments{
\item{x}{Vector of population distribution that sums to 1.}
}
\value{
A vector of population midpoints.
}
\description{
\code{popmid} calculates the cumulative midpoints for population percentiles that can then be used as the
predictor in the OLS model for calculating the slope, i.e. the slope index of inequality (SII).
For internal use within the \code{rii} function.
}
\keyword{internal}
|
5b0341896a4d599c52a319b1ead6dcfc5de2bc9d
|
3b9dac7e9b3806989c11dec24bee44b203060e36
|
/time_series/final_proj/report_graphics.R
|
562ccba7f276306cff0c58f1366aec61f59e74c7
|
[] |
no_license
|
dhbrand/iaa_fall2_hw
|
d07bb9c3de6ebd0ef3e4253f35dd1d81c6f0b977
|
26acf1fb8a03c99183a9adf0b6f3254b36d49e7a
|
refs/heads/master
| 2020-04-25T05:25:53.430686
| 2019-02-25T16:37:10
| 2019-02-25T16:37:10
| 172,542,914
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,632
|
r
|
report_graphics.R
|
library(haven)
library(tidyverse)
library(forecast)
library(lubridate)
fcast <- read_sas("data/forecast.sas7bdat")
str(fcast)
tail(fcast)
fc_ts <- ts(fcast$FORECAST)
timeset <- seq(ymd_h("2016-06-01 0", tz = "est"), ymd_h("2018-06-12 23", tz = "est"), "hours") %>%
tibble %>%
select(date = 1)
fcast <- bind_cols(date = timeset, fcast)
+
geom_ribbon(data = fcast, aes(x = date, y = FORECAST, ymin=L95, ymax=U95), alpha=0.3)
line <- data.frame(date = as.Date("2016-06-01"))
plot_subset <- fcast %>%
filter(date >= "2018-05-01")
ggplot(data = plot_subset, aes(x = date, y = FORECAST)) +
geom_line(color = "blue", size = 1.5) +
geom_line(aes(y= well), color = "red", size = 2) +
geom_ribbon(aes(ymin=L95,ymax=U95), alpha=0.3, fill = "lightskyblue") +
geom_vline(xintercept=as.POSIXct("2018-06-05 23:00:00"), color="grey40", linetype=4) +
theme_bw() +
scale_x_datetime(date_breaks = "1 week") +
labs(x = "Date", y = "Well Depth (in Feet)", title = "Forecasted vs Actual Depth of Well G-561_T" ) +
theme(plot.title = element_text(hjust = 0.5))
# seasonal component plot
well <- read_csv("data/g561_final.csv")
well_ts <- ts(well$well, frequency = 8766, start = c(2007.10, 5))
well_stl <- stl(well_ts, s.window = 7)
seas <- as.numeric(well_stl$time.series[,1])
seas_df <- data.frame(date = date(well$date), seas = seas)
ggplot(seas_df, aes(date, seas)) +
geom_line(color = "blue", size = 1) +
theme_bw() +
scale_x_date(date_breaks = "1 year") +
labs(x = "Date", y = "Well Depth (in Feet)", title = "Seasonal Component of Well G-561_T" ) +
theme(plot.title = element_text(hjust = 0.5))
|
09ca49042cda41dd5fc5bc22254b19d4179535c8
|
69162f720f226384a4f48c8ab5f72af856ff8e6d
|
/src/train_test_split.R
|
44c62950497a0fa25a500ed515a1fa47878ce38b
|
[] |
no_license
|
SoftServeSAG/aws_anomaly_detection
|
4899fdeb93218fa6f50cda190152b5e2211981ad
|
ca5c4d924032efe770eaaf238e721fb3d305d58d
|
refs/heads/master
| 2021-09-13T09:40:48.016445
| 2018-04-28T02:00:33
| 2018-04-28T02:00:33
| 107,226,234
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,171
|
r
|
train_test_split.R
|
require(xts)
require(lubridate)
require(dplyr)
require(dygraphs)
#Data Preprocessing Functionality: Train-test split
train_test_split_time_series <- function(data, split_data=0.75){
# Function splitting time-series data in train and test, plot time-series data
#
# Input:
# data - xts object with time-series data
# split_data - number from 0 to 1, split coefficient for train and test
#
# Output:
# train - train data
# test - test data
# plot - dygraphs plot of all time series data
# Change type of input data to xts
if (!is.xts(data)){
data <- as.xts(data)
}
#define split's index
split_index <- max(as.integer(split_data * length(index(data))), 1)
# define train and test data
train <- head(data, split_index)
test <- tail(data, dim(data)[1] - split_index)
# define dygraph plot
if (length(test) > 0)
{
plot_data <- dygraph(merge(train, test, fill = NA)) %>%
dySeries("train", label = "Train Data", color='steelblue') %>%
dySeries("test", label = "Test Data", color=rgb(0.2,0.7,0.5)) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),
highlightCircleSize = 4,
highlightSeriesBackgroundAlpha = 0.2,
hideOnMouseOut = FALSE) %>%
dyLegend(show = "always", hideOnMouseOut = FALSE) %>%
dyOptions(colors = RColorBrewer::brewer.pal(4, "Accent"))
}
else{
plot_data <- dygraph(train) %>%
dySeries(label = "Train Data", color='steelblue') %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 2),
highlightCircleSize = 4,
highlightSeriesBackgroundAlpha = 0.2,
hideOnMouseOut = FALSE) %>%
dyLegend(show = "always", hideOnMouseOut = FALSE) %>%
dyOptions(colors = RColorBrewer::brewer.pal(4, "Accent"))
}
return(list(train=train,
test=test,
plot=plot_data))
}
|
0f1a8877cd3b508b535a25e9b750862a2fbba236
|
cc8fce695e24ab817e2e1646725c8e05423dcdbe
|
/textMining/dataFetcher.R
|
0f09a1b1b637b1995373bb2cda721b80d28efb4d
|
[] |
no_license
|
Regateiro/kdd
|
afbeb33d37372d24d3bf86b391248b8e2dd99e52
|
ba88dcc2cdee2b048a4486380312c1eaef108433
|
refs/heads/master
| 2021-01-10T15:29:35.307639
| 2015-12-16T12:23:52
| 2015-12-16T12:23:52
| 43,708,804
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,269
|
r
|
dataFetcher.R
|
#install.packages("httr")
library(httr)
#URL and API keys for Import.IO movie list
url = "https://api.import.io/store/data/71e12311-6ce3-436e-8b5d-9c366a87276d/_query?input/webpage/url=http%3A%2F%2Fwww.imdb.com%2Fsearch%2Ftitle%3Flanguages%3Den%257C1%26num_votes%3D10000%2C%26sort%3Duser_rating%2Cdesc%26start%3D"
url2 = "%26title_type%3Dfeature&_user=4911226d-82c8-4f2f-9e06-e04bffad812e&_apikey=4911226d82c84f2f9e06e04bffad812e3ee3dd322ae4d7309f3751ce6913e2da924f27aabd67c1a8d8aee488dc392b80c9de4885cd3d16c6d630151818526d3b5b50b3d57bcf816bfa015eeeb4989a1f"
index = 1
ids = c()
while(index<5200){
# HTTP Request
r <- GET(paste(url, index, url2, sep=""))
results = content(r)$result
for(i in 1:length(results)){
ids=c(ids, substr(results[[i]]$image_link, 27, 35))
}
index = index + 50
}
# URL and API keys for Import.IO reviews
url = "http://api.import.io/store/data/66ff6f91-93ef-4f86-8307-ba7e6dcbc628/_query?input/webpage/url=http%3A%2F%2Fwww.imdb.com%2Ftitle%2F"
url2 = "%2Fcriticreviews%3Fref_%3Dtt_ov_rt&_user=4911226d-82c8-4f2f-9e06-e04bffad812e&_apikey=4911226d82c84f2f9e06e04bffad812e3ee3dd322ae4d7309f3751ce6913e2da924f27aabd67c1a8d8aee488dc392b80c9de4885cd3d16c6d630151818526d3b5b50b3d57bcf816bfa015eeeb4989a1f"
# HTTP Request
#critNumber=c()
#critReview=c()
#critID=c()
critics=c()
#P=data.frame(Value=c("0"),Comment=c("sucks"))
for(j in 1:length(ids)){
print(ids[j])
r <- GET(paste(url, ids[j], url2, sep=""))
while(r[2]!=200){
r <- GET(paste(url, ids[j], url2, sep=""))
}
results = content(r)$result
if(length(results)!=0){
for(i in 1:length(results)){
#P=rbind(P,c(results[i][[1]]$critscore_number, results[i][[1]]$summary_description))
#critNumber=c(critNumber, results[i][[1]]$critscore_number)
#critReview=c(critReview, results[i][[1]]$summary_description)
#critID=c(critID, ids[j])
critics=c(critics, ids[j], results[i][[1]]$critscore_number, results[i][[1]]$summary_description)
#critic = c(content(r)$result[i][[1]]$critscore_number,
# content(r)$result[i][[1]]$summary_description)
#print(critic)
}
}
}
P=data.frame(MovieId=c(0), Value=c(0),Comment=c(0))
j=1
while(j<length(critics)){
P=rbind(P, c(critics[j], critics[j+1], critics[j+2]))
j=j+3
}
write.csv(P,"ReviewList.csv", row.names=FALSE)
|
238c011337d41f5e90d68d357343fab41d1de5b9
|
85e9720bd0a467ee1425d64a93fdd8f75128dbe7
|
/Caso2/mejor.R
|
15ab5f0f90b2001a228a040b6ef3fa44f9a54fdc
|
[] |
no_license
|
FranciscoGarCar/Programacion_Actuarial_III_OT16
|
d648f3a521e3ac80fe6fa5ae022902a931346b1c
|
6e32d177bd17dc821b279542c89ac13310cea641
|
refs/heads/master
| 2020-04-17T15:19:53.818794
| 2016-11-07T05:46:00
| 2016-11-07T05:46:00
| 66,847,613
| 0
| 0
| null | null | null | null |
ISO-8859-2
|
R
| false
| false
| 2,991
|
r
|
mejor.R
|
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
head(outcome)
ncol(outcome)
colnames(outcome)
names(outcome)
outcome[, 11] <- as.numeric(outcome[, 11])
##
hist(outcome[, 11])
mejor<- function (estado,resultado){
#LECTURA DE DATOS
getwd()
setwd("~/GitHub/Programacion_Actuarial_III_OT16/Calidad de Hospitales - data")
a<-read.csv("outcome-of-care-measures.csv", colClasses = "character")
#REVISION DE LA VALIDEZ
b<-factor(a[,7])
c<-attr(b,"levels")
d<-estado
e<-c( "ataque al corazon","falla cardiaca", "neumonia")
f<-factor(e)
g<-attr(f,"levels")
h<-resultado
if (length(c[c==d]) == 0 && length(g[g==h])==0 ) {
stop("Estado inválido y resultado inválido")
}
if (length(c[c==d]) == 0) {
stop("Estado inválido")
}
if (length(g[g==h])==0){
stop("resultado inválido")
}
#HOSPITAL CON TASA DE MORTALIDAD MAS BAJA
suma<-0
y<-0
if (resultado== "ataque al corazon"){
for (i in 1:nrow(a)){
if (a[i,7] == estado){
v<- a[i,11]
y<-c(y,v)
suma<-suma+1
}
}
suma1<-suma+1
x<-min(y[2:suma1],na.rm=T)
final1<-NA
for (j in 1:nrow(a)){
if (a[j,7] == estado){
if (a[j,11]==x){
final<-as.character(a[j,2])
final1<-c(final1,final)
}
}
}
final2<-sort(final1)
print(final2[1])
}
suma<-0
y<-0
if (resultado== "falla cardiaca"){
for (i in 1:nrow(a)){
if (a[i,7] == estado){
v<- a[i,17]
y<-c(y,v)
suma<-suma+1
}
}
suma1<-suma+1
x<-min(as.numeric(y[2:suma1]),na.rm=T)
final1<-NA
for (j in 1:nrow(a)){
if (a[j,7] == estado){
if (a[j,17]==x){
final<-as.character(a[j,2])
final1<-c(final1,final)
}
}
}
final2<-sort(final1)
print(final2[1])
}
suma<-0
y<-0
if (resultado== "neumonia"){
for (i in 1:nrow(a)){
if (a[i,7] == estado){
v<- a[i,23]
y<-c(y,v)
suma<-suma+1
}
}
suma1<-suma+1
x<-min(as.numeric(y[2:suma1]),na.rm=T)
final1<-NA
for (j in 1:nrow(a)){
if (a[j,7] == estado){
if ( a[j,23]==x){
final<-as.character(a[j,2])
final1<-c(final1,final)
}
}
}
final2<-sort(final1)
print(final2[1])
}
}
mejor("MD","ataque al corazon")
|
ff29e506c5d3493b6352b7aff38906e83344fb77
|
a19555ba297495802404e97e710c6c007c5e4f43
|
/man/Alg_RVB1.Rd
|
5cdef1b4492ef31997876488e44ead5712312c7a
|
[
"MIT"
] |
permissive
|
hrnasif/rvb
|
2fd8568277ae440cb04088f65f5fcb36eba73b5b
|
fdaada9c5b77613a6734253244dea3fc6145f47b
|
refs/heads/main
| 2023-05-19T13:50:31.426629
| 2021-06-10T05:06:59
| 2021-06-10T05:06:59
| 365,866,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 883
|
rd
|
Alg_RVB1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Alg_RVB1.R
\name{Alg_RVB1}
\alias{Alg_RVB1}
\title{RVB1 Algorithm implementation}
\usage{
Alg_RVB1(y, X, Z, Wprior, etahat, model, m = 1)
}
\arguments{
\item{y}{List. Responses per cluster}
\item{X}{List. Covariates per cluster for fixed effects}
\item{Z}{List. Covariates per cluster for random effects}
\item{Wprior}{List. Wishart prior for random effect covariance}
\item{etahat}{List. Estimate of canonical parameter about which to approximate}
\item{model}{Character. Either "poisson" or "binomial"}
\item{m}{Integer. Number of trials if model is binomial. Keep m = 1 if model is "poisson"}
}
\value{
List containing posterior covariance C, posterior mean mu, the ELBO values per
each 1000 iterations, the run duration, and the final ELBO value.
}
\description{
RVB1 Algorithm implementation
}
|
9be0556d00055e8f725310533e4eae494820c82d
|
f5199fc56c1a4e0f2a28c8eceb8f8f8955101e87
|
/Into bayesian data analysis.R
|
e5f4cc8849e73cde7ceef6568cad4d63899fcb61
|
[] |
no_license
|
mshasan/BayesianDataAnalysis
|
4940a507d77e5b79ce67259b9678e2d1a8ad0cfe
|
e6e320b78076f94000c516b6fef63bfd99978ccb
|
refs/heads/master
| 2021-01-20T08:40:27.025570
| 2017-05-03T17:49:53
| 2017-05-03T17:49:53
| 90,175,198
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 819
|
r
|
Into bayesian data analysis.R
|
## Problem 2 ----------------
library(ggplot2)
library(grid)
library(gridExtra)
x <- seq(0, 1, length = 100)
a <- dbeta(x, .5, .5)
b <- dbeta(x, 10.2, 1.5)
c <- dbeta(x, 1.5, 10.2)
d <- dbeta(x, 100, 62)
p1 <- qplot(x,a, main="Beta (0.5, 0.5)",geom="line")
p2 <- qplot(x,b, main="Beta (10.2, 1.5)",geom="line")
p3 <- qplot(x,c, main="Beta (1.5, 10.2)",geom="line")
p4 <- qplot(x,d, main="Beta (100, 62)",geom="line")
grid.arrange(p1, p2, p3, p4, nrow=2, ncol = 2, main = "Different Beta Distribution")
## How to create direct pdf in R
pdf("Different Beta distribution.pdf", width = 8, height = 6)
grid.arrange(p1, p2, p3, p4, nrow=2, ncol = 2, main = "Different Beta Distribution")
## Problem 5--------------------
y <- c(46,58,40,47,47,54,51,50,52,50,53,43,48,50,55,49,50,52,56,49);y
sum((y-51)^2)
|
3622c3a5cc0e62447d05daa64f1eb2d9f54b8787
|
5d0bc9fa9c48a468d115e9930f5eac66a0764789
|
/inst/snippets/Exploration2.2.10.R
|
d8d319dc377cad186302e279313ffc706bf0d381
|
[] |
no_license
|
rpruim/ISIwithR
|
a48aac902c9a25b857d2fd9c81cb2fc0eb0e848e
|
7703172a2d854516348267c87319ace046508eef
|
refs/heads/master
| 2020-04-15T20:36:55.171770
| 2015-05-21T09:20:21
| 2015-05-21T09:20:21
| 21,158,247
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 53
|
r
|
Exploration2.2.10.R
|
histogram(~ sleepHrs, data = SleepTimes, nint = 15)
|
04e81101465031de5f9f6c5d204bdf1fd91a98b6
|
39f4df1f5c2faadbdf366d65ede30aa5edba3497
|
/man/reverse.Rd
|
20423c770f5c2f6ba650eb8df459e156d99880e7
|
[] |
no_license
|
cran/kutils
|
a19c69b6730548aa849ca841291d95de92dd3863
|
53ada7e4308f456a0a109955ecfd9122f6263aba
|
refs/heads/master
| 2023-07-10T17:11:29.010416
| 2023-06-26T21:40:02
| 2023-06-26T21:40:02
| 77,183,151
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,123
|
rd
|
reverse.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/factors.R
\name{reverse}
\alias{reverse}
\title{Reverse the levels in a factor}
\usage{
reverse(x, eol = c("Skip", "DNP"))
}
\arguments{
\item{x}{a factor variable}
\item{eol}{values to be kept at the end of the list. Does not
accept regular expresssions, just literal text strings
representing values.}
}
\value{
a new factor variable with reversed values
}
\description{
Simple literal reversal. Will stop with an error message if x is
not a factor (or ordered) variable.
}
\details{
Sometimes people want to
reverse some levels, excluding others and leaving them at the end
of the list. The "eol" argument sets aside some levels and puts
them at the end of the list of levels.
The use case for the \code{eol} argument is a factor
with several missing value labels, as appears in SPSS. With
up to 18 different missing codes, we want to leave them
at the end. In the case for which this was designed, the
researcher did not want to designate those values as
missing before inspecting the pattern of observed values.
}
\examples{
## Consider alphabetication of upper and lower
x <- factor(c("a", "b", "c", "C", "a", "c"))
levels(x)
xr1 <- reverse(x)
xr1
## Keep "C" at end of list, after reverse others
xr2 <- reverse(x, eol = "C")
xr2
y <- ordered(x, levels = c("a", "b", "c", "C"))
yr1 <- reverse(y)
class(yr1)[1] == "ordered"
yr1
## Hmm. end of list amounts to being "maximal".
## Unintended side-effect, but interesting.
yr2 <- reverse(y, eol = "C")
yr2
## What about a period as a value (SAS missing)
z <- factor(c("a", "b", "c", "b", "c", "."))
reverse(z)
z <- factor(c(".", "a", "b", "c", "b", "c", "."))
reverse(z)
## How about R NA's
z <- factor(c(".", "a", NA, "b", "c", "b", NA, "c", "."))
z
reverse(z)
z <- ordered(c(".", "a", NA, "b", "c", "b", NA, "c", "."))
z
str(z)
## Put "." at end of list
zr <- reverse(z, eol = ".")
zr
str(zr)
z <- ordered(c(".", "c", NA, "e", "a", "c", NA, "e", "."),
levels = c(".", "c", "e", "a"))
reverse(z, eol = ".")
reverse(z, eol = c("a", "."))
}
\author{
Paul Johnson <pauljohn@ku.edu>
}
|
fcfb6b5fbd52de76e3f0ef6df2c302980733ad05
|
64ba42a2cb4b5ee3c4aac24362cf01e6a88c40f8
|
/man/dot-get_trait_individuals_values.Rd
|
df88c3ea0e1442ff498a633babeb013ab6ccf8e7
|
[] |
no_license
|
gdauby/bdd_plots_central_africa
|
8ac5bbb87429aa7d2e02b873f57d96d228dd1680
|
467d7f31aebd5b2caa39b44428d45ac88d2c26e4
|
refs/heads/master
| 2023-06-23T00:49:19.408360
| 2023-06-09T13:58:21
| 2023-06-09T13:58:21
| 165,248,727
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,145
|
rd
|
dot-get_trait_individuals_values.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_manip_db.R
\name{.get_trait_individuals_values}
\alias{.get_trait_individuals_values}
\title{Internal function}
\usage{
.get_trait_individuals_values(
traits,
src_individuals = NULL,
id_individuals = NULL,
ids_plot = NULL,
skip_dates = TRUE,
show_multiple_measures = FALSE,
collapse_multiple_val = FALSE
)
}
\arguments{
\item{traits}{string vector with trait needed}
\item{id_individuals}{numeric vector with id_n individuals requested}
\item{ids_plot}{numeric vector with id_plots requested}
\item{skip_dates}{logical whether include day, month and year of observations}
\item{show_multiple_measures}{logical whether multiple measures (i.e. census or sometimes more than one value for given measure)}
\item{collapse_multiple_val}{logical whether multiple traits measures should be collapsed (resulting values as character, separated by dash)}
}
\value{
tibble
}
\description{
Get for each trait, a tibble of individuals with measures or observations, deal with several observations
}
\author{
Gilles Dauby, \email{gilles.dauby@ird.fr}
}
|
fa59f861eec86b6879ea053c3eafabe31ccbeaf2
|
e5f9833167d902326e1404180d759c62038f7507
|
/run_analysis.R
|
0725943e30898cc047214500d6e071e5b40afbd7
|
[] |
no_license
|
werderhg/Data-Cleaning-Course-Project
|
5ae326de4d99fff75cd57f51778b7c3cdae42fa3
|
47b8ec840736ba29527e9a3a4af9a35c70d47377
|
refs/heads/master
| 2021-01-25T10:43:37.456222
| 2014-12-20T10:39:17
| 2014-12-20T10:39:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,102
|
r
|
run_analysis.R
|
library(downloader)
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download(url, "dataset.zip", mode="wb",method = "curl")
unzip("dataset.zip")
unlink(url)
### START
#set my working directory to the location where the files are unzipped.
# Merges the training and the test sets to create one data set.
# 1. STEP
# Read the files into data frames
# Number of the persons of training (subject_train)
# Activities of the data of training (y_train)
# Data for each person of the training (Y_train)
# the Number of the persons of test (subject_test)
# Activities of the data of test (y_test)
# Data for each person of the test (Y_test)
subject_train<-read.table("subject_train.txt")
y_train<-read.table("y_train.txt")
X_train<-read.table("X_train.txt")
subject_test<-read.table("subject_test.txt")
y_test<-read.table("y_test.txt")
X_test<-read.table("X_test.txt")
# 2. STEP
# Add the files together into one file
train_raw<- cbind(subject_train, y_train,X_train)
test_raw<-cbind(subject_test,y_test, X_test)
Data_raw<-rbind(train_raw,test_raw)
# 3. give the column the right labels (V1 etc is replaced my the feature name)
# read the names of the columns
a<-read.table("features.txt")
# create the first two
q<-cbind (c("1","2"),c("subject", "activity"))
#make one data.frame
q<-rbind(q,a)
#create a factor with only the new column names
my_new_col_names<-q[,2]
colnames(Data_raw)<-my_new_col_names
# 4. Subsetting the data needed for analysing
# which have name mean as vector of true and false
is_mean<-grepl("mean",my_new_col_names)
#which have sdt as vector of true and false
is_std<-grepl("std",my_new_col_names)
is_both<-(is_mean|is_std) #vector with TRUE if "mean" or "std"
is_both[1]<-TRUE #subject must be also true
is_both[2]<-TRUE #activitiy must also be true
# is_both has now all TRUE for the columns that we want in tidy data
#reduction of Data_raw to all the column with "mean" and "sdt"
Data_select<-subset(Data_raw,select=is_both)
# 5. Activity (1..6) shall be replace by the labels (Walking, Waklking_upstairs,..)
act<-read.table("activity_labels.txt")
for (i in (1:length(Data_select[,1]))){
if (Data_select[i,2]== 1) {Data_select[i,2]<-"WALKING"}
if (Data_select[i,2]== 2) {Data_select[i,2]<-"WALKING_UPSTAIRS"}
if (Data_select[i,2]== 3) {Data_select[i,2]<-"WALKING_DOWNSTAIRS"}
if (Data_select[i,2]== 4) {Data_select[i,2]<-"SITTING"}
if (Data_select[i,2]== 5) {Data_select[i,2]<-"STANDING"}
if (Data_select[i,2]== 6) {Data_select[i,2]<-"LAYING"}
}
# 6. Send the data to a file
write.table(Data_select,"tidy_data.txt",row.names=FALSE)
# 7. calculate the means (feature variable start in 3rd column ub to the full length)
Data_average <- ddply(Data_select, .(subject, activity), function(x) colMeans(x[, 3:length(Data_select)]))
# 8. Send the data to a file
write.table(Data_average,"average_data.txt",row.names=FALSE)
|
3eb4eaba9f09dc43026d426dcdb335368f3cc04b
|
fe24de2e850eb944394cdd2be8e3eb976a1d5e23
|
/run_analysis.R
|
72e546fdb6b5a036d032ef98d5cecb8120f133b1
|
[] |
no_license
|
Tennlin/Getting-and-Clean-Data-project
|
ca91696244c08e14d66dfd7a4541f2d763f8dfe2
|
4e0d364a1aeaae57e81d315a533264ec842284a8
|
refs/heads/master
| 2021-01-10T15:12:38.017318
| 2015-11-22T17:21:54
| 2015-11-22T17:21:54
| 46,670,380
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,056
|
r
|
run_analysis.R
|
run_analysis <- function(){
x_train <- read.table("X_train.txt",sep = "")
y_train <- read.table("y_train.txt",sep = "")
sub_train <- read.table("subject_train.txt",sep = "")
x_test <- read.table("X_test.txt",sep = "")
y_test <- read.table("y_test.txt",sep = "")
sub_test <- read.table("subject_test.txt",sep = "")
fea_name <- read.table("features.txt",sep = "")
x_train <- cbind(x_train, y_train)
x_train <- cbind(x_train, sub_train)
x_test <- cbind(x_test, y_test)
x_test <- cbind(x_test, sub_test)
mergedata <- rbind(x_train, x_test)
colnames(mergedata) <- fea_name$V2
names(mergedata)[562] <- "activity"
names(mergedata)[563] <- "subject"
mean_std <- mergedata[,c(1,2,3,4,5,6,562,563)]
colnames(mean_std) <- c("AccmeanX","AccmeanY","AccmeanZ","AccstdX","AccstdY","AccstdZ","activity","subject")
tidydata <- aggregate(AccmeanX+AccmeanY+AccmeanZ+AccstdX+AccstdY+AccstdZ ~ activity + subject, data = mean_std, mean)
write.table(tidydata, "C:/Users/T/Desktop/tidy_data.txt", sep="\t", row.name=FALSE)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.