blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9788843912191eee4fc46ea93223bc6e86ca80c8
|
a4493811b918ec3b77a0d819ef2d2ed4475c5cda
|
/funcoes.R
|
4198e689d627c3c80792c510dc2d5c26002f7f03
|
[
"MIT"
] |
permissive
|
GabrielReisR/shiny_corrs
|
8651fe938baf0c50233b686f5e70fbe778dcf6bd
|
4c452387e7c4494e726b085f1cdd30513ff14275
|
refs/heads/main
| 2023-06-06T04:56:09.331931
| 2021-06-26T21:54:55
| 2021-06-26T21:54:55
| 380,274,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,764
|
r
|
funcoes.R
|
# Nome: Understanding correlations
# Autor: Gabriel dos Reis Rodrigues
# June, 2021
# Last update: 2021-06-25
# ----------------------------------------
# Initial loading ====
if(!require("faux"))
install.packages("faux"); library(faux)
if(!require("ggplot2"))
install.packages("ggplot2"); library(ggplot2)
if(!require("plotly"))
install.packages("plotly"); library(plotly)
# Minimalist theme ====
project_theme <-
theme(legend.position = "none",
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.y = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
panel.border = element_blank(),
plot.title = element_blank(),
plot.subtitle = element_blank(),
axis.text = element_blank(),
axis.title = element_text(size = 14))
# Correlation plot function ====
corr_plot <- function(corr = 0,
sample = 1000,
line = T){
set.seed(42)
df <- rnorm_multi(n = sample,
mu = c(20, 20),
sd = c(5, 5),
r = corr,
varnames = c("X", "Y"),
empirical = T)
if(line == T){
ggplot(df, aes(x = X, y = Y)) +
# Points
geom_point(alpha = 0.5, position = 'jitter', color = "#011e5a") +
# Line
stat_smooth(method = "lm", se = F, color = "#011F5a", size = 1.2) +
# Themes
theme_classic() + project_theme} else{
ggplot(df, aes(x = X, y = Y)) +
# Points
geom_point(alpha = 0.5, position = 'jitter', color = "#011e5a") +
# Themes
theme_classic() + project_theme}
}
|
dd272251e97dd8bcacd64935a29a602653bd6249
|
a58657feb0655fe9b1be89dc8b88e3c07f71bc17
|
/source/Shiny/rCharts/Leaflet.R
|
ed48b55827959fb31eb2085b43530eba29271e3a
|
[] |
no_license
|
irichgreen/R_Practice
|
f026a16de6df62571ee296fd747e5f98e1824fa9
|
457f33a9c0051950cfa837a2387b7f28b43aa813
|
refs/heads/master
| 2020-05-22T04:13:08.815089
| 2017-01-14T01:58:19
| 2017-01-14T01:58:19
| 63,691,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,820
|
r
|
Leaflet.R
|
..p. <- function() invisible(readline("\nPress <return> to continue: "))
require(rCharts)
map1 = Leaflet$new()
map1$setView(c(45.5236, -122.675), 13)
map1$tileLayer("http://a.tiles.mapbox.com/v3/mapbox.control-room/{z}/{x}/{y}.png", zoom = 8)
map1
..p.() # ================================
map1 = Leaflet$new()
map1$setView(c(45.50867, -73.55399), 13)
map1
..p.() # ================================
map2 = Leaflet$new()
map2$setView(c(45.5236, -122.675), 10)
map2$tileLayer("http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png")
map2
..p.() # ================================
map3 <- Leaflet$new()
map3$setView(c(51.505, -0.09), zoom = 13)
map3$tileLayer(
"http://{s}.tile.cloudmade.com/BC9A493B41014CAABB98F0471D759707/997/256/{z}/{x}/{y}.png",
maxZoom = 18
)
map3$marker(c(51.5, -0.09), bindPopup = "<p> Hi. I am a popup </p>")
map3$marker(c(51.495, -0.083), bindPopup = "<p> Hi. I am another popup </p>")
map3$show(cdn = T)
map3$circle(c(51.5, -0.09))
..p.() # ================================
map4 = Leaflet$new()
map4$setView(c(29.6779, -95.4379), 10)
map4$tileLayer("http://{s}.tile.cloudmade.com/BC9A493B41014CAABB98F0471D759707/997/256/{z}/{x}/{y}.png")
# map4$tileLayer(provider = 'Stamen.Terrain')
data(crime, package = 'ggmap')
dat <- head(crime)[,c('lat', 'lon', 'offense')]
names(dat) <- c('lat', 'lng', 'offense')
map4$geocsv(dat)
map4
..p.() # ================================
#map5 = Leaflet$new()
#map5$setView(37.27667, -91.60611, 4)
#map5$tileLayer("http://{s}.tile.cloudmade.com/BC9A493B41014CAABB98F0471D759707/997/256/{z}/{x}/{y}.png")
#
#dat <- read.csv('geoCoded.csv')
#names(dat) <- c('address', 'lat', 'lng')
#dat <- transform(dat, color = 'red', fillColor = '#f03', fillOpacity = 0.5, radius = 10)
#map5$circle(dat)
#map5
..p.() # ================================
rMap <- function(location = 'montreal', zoom = 10, provider = 'MapQuestOpen.OSM'){
m1 <- Leaflet$new()
lnglat <- as.list(ggmap::geocode(location))
m1$setView(lnglat$lat, lnglat$lon, zoom = zoom)
m1$tileLayer(provider = provider)
return(m1)
}
r1 <- rMap()
mcgill <- as.list(ggmap::geocode('mcgill univesity'))
r1$marker(mcgill$lat, mcgill$lon, bindPopup = 'mcgill university')
r1
..p.() # ================================
map6 = Leaflet$new()
map6$setView(45.372, -121.6972, 12)
map6$tileLayer(provider ='Stamen.Terrain')
map6$marker(45.3288, -121.6625, bindPopup = 'Mt. Hood Meadows')
map6$marker(45.3311, -121.7113, bindPopup = 'Timberline Lodge')
..p.() # ================================
map1b = Leaflet$new()
map1b$setView(c(45.5236, -122.675), zoom = 14)
map1b$tileLayer(provider = 'MapQuestOpen.OSM')
map1b
..p.() # ================================
map3 <- Leaflet$new()
map3$setView(c(51.505, -0.09), zoom = 13)
map3$tileLayer(
"http://{s}.tile.cloudmade.com/BC9A493B41014CAABB98F0471D759707/997/256/{z}/{x}/{y}.png",
maxZoom = 18
)
map3$circle(c(51.5, -0.09), 100)
..p.() # ================================
map2 = Leaflet$new()
map2$setView(c(45.5236, -122.6750), 13)
map2$tileLayer(provider = 'Stamen.Toner')
map2$marker(c(45.5244, -122.6699), bindPopup = 'The Waterfront')
map2$circle(c(45.5215, -122.6261), radius = 500, bindPopup = 'Laurelhurst Park')
map2
..p.() # ================================
# devtools::install_github('rCharts', 'bbest') # tweak to make var geojsonLayer available
json = '{"type":"FeatureCollection","features":[
{"type":"Feature",
"properties":{"region_id":1, "region_name":"Australian Alps"},
"geometry":{"type":"Polygon","coordinates":[[[141.13037109375,-38.788345355085625],[141.13037109375,-36.65079252503469],[144.38232421875,-36.65079252503469],[144.38232421875,-38.788345355085625],[141.13037109375,-38.788345355085625]]]}},
{"type":"Feature",
"properties":{"region_id":4, "region_name":"Shark Bay"},
"geometry":{"type":"Polygon","coordinates":[[[143.10791015625,-37.75334401310656],[143.10791015625,-34.95799531086791],[146.25,-34.95799531086791],[146.25,-37.75334401310656],[143.10791015625,-37.75334401310656]]]}}
]}'
regions=RJSONIO::fromJSON(json)
lmap <- Leaflet$new()
lmap$tileLayer(provide='Stamen.TonerLite')
lmap$setView(c(-37, 145), zoom = 6)
lmap$geoJson(
regions,
style = "#! function(feature) {
var rgn2col = {1:'red',2:'blue',4:'green'};
return {
color: rgn2col[feature.properties['region_id']],
strokeWidth: '1px',
strokeOpacity: 0.5,
fillOpacity: 0.2
}; } !#",
onEachFeature = "#! function (feature, layer) {
// info rollover
if (document.getElementsByClassName('info leaflet-control').length == 0 ){
info = L.control({position: 'topright'}); // NOTE: made global b/c not ideal place to put this function
info.onAdd = function (map) {
this._div = L.DomUtil.create('div', 'info');
this.update();
return this._div;
};
info.update = function (props) {
this._div.innerHTML = '<h4>Field Name</h4>' + (props ?
props['region_id'] + ': <b> + props[fld] + </b>'
: 'Hover over a region');
};
info.addTo(map);
};
// mouse events
layer.on({
// mouseover to highlightFeature
mouseover: function (e) {
var layer = e.target;
layer.setStyle({
strokeWidth: '3px',
strokeOpacity: 0.7,
fillOpacity: 0.5
});
if (!L.Browser.ie && !L.Browser.opera) {
layer.bringToFront();
}
info.update(layer.feature.properties);
},
// mouseout to resetHighlight
mouseout: function (e) {
geojsonLayer.resetStyle(e.target);
info.update();
},
// click to zoom
click: function (e) {
var layer = e.target;
if ( feature.geometry.type === 'MultiPolygon' ) {
// for multipolygons get true extent
var bounds = layer.getBounds(); // get the bounds for the first polygon that makes up the multipolygon
// loop through coordinates array, skip first element as the bounds var represents the bounds for that element
for ( var i = 1, il = feature.geometry.coordinates[0].length; i < il; i++ ) {
var ring = feature.geometry.coordinates[0][i];
var latLngs = ring.map(function(pair) {
return new L.LatLng(pair[1], pair[0]);
});
var nextBounds = new L.LatLngBounds(latLngs);
bounds.extend(nextBounds);
}
map.fitBounds(bounds);
} else {
// otherwise use native target bounds
map.fitBounds(e.target.getBounds());
}
}
});
} !#")
legend_vec = c('red'='high', 'blue'='medium', 'green'='low')
lmap$legend(position = 'bottomright',
colors = names(legend_vec),
labels = as.vector(legend_vec))
lmap
..p.() # ================================
|
d618c35c5aa1b690fcc53ab2be2e38ce5281bcdd
|
8dfee68e3695253eb9aa719a2571ea5607a5311b
|
/R/drive_update.R
|
e3c08e5ee6dbf751008e11bd4b4712b7217323d7
|
[
"MIT"
] |
permissive
|
fuentesortiz/googledrive
|
49e7384a0749fbb9870821541e7b8e3ca1d7f735
|
20ffe8cb87ef180246fd3a94e00010879117aaa1
|
refs/heads/master
| 2023-03-07T17:37:20.406535
| 2020-11-19T21:47:22
| 2020-11-19T21:47:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,050
|
r
|
drive_update.R
|
#' Update an existing Drive file
#'
#' Update an existing Drive file id with new content ("media" in Drive
#' API-speak), new metadata, or both. To create a new file or update existing,
#' depending on whether the Drive file already exists, see [drive_put()].
#'
#' @seealso Wraps the `files.update` endpoint:
#' * <https://developers.google.com/drive/v3/reference/files/update>
#'
#' This function supports media upload:
#' * <https://developers.google.com/drive/v3/web/manage-uploads>
#'
#' @template file-singular
#' @template media
#' @template dots-metadata
#' @template verbose
#'
#' @template dribble-return
#' @export
#'
#' @examples
#' \dontrun{
#' ## Create a new file, so we can update it
#' x <- drive_upload(drive_example("chicken.csv"))
#'
#' ## Update the file with new media
#' x <- x %>%
#' drive_update(drive_example("chicken.txt"))
#'
#' ## Update the file with new metadata.
#' ## Notice here `name` is not an argument of `drive_update()`, we are passing
#' ## this to the API via the `...``
#' x <- x %>%
#' drive_update(name = "CHICKENS!")
#'
#' ## We can add a parent folder by passing `addParents` via `...`.
#' folder <- drive_mkdir("second-parent-folder")
#' x <- x %>%
#' drive_update(addParents = as_id(folder))
#' ## Verify the file now has multiple parents
#' purrr::pluck(x, "drive_resource", 1, "parents")
#'
#' ## Update the file with new media AND new metadata
#' x <- x %>%
#' drive_update(drive_example("chicken.txt"), name = "chicken-poem-again.txt")
#'
#' ## Clean up
#' drive_rm(x, folder)
#' }
drive_update <- function(file,
media = NULL,
...,
verbose = TRUE) {
if (!is.null(media) && !file.exists(media)) {
stop_glue("\nLocal file does not exist:\n * {media}")
}
file <- as_dribble(file)
file <- confirm_single_file(file)
meta <- toCamel(rlang::list2(...))
if (is.null(media) && length(meta) == 0) {
if (verbose) message("No updates specified.")
return(invisible(file))
}
meta[["fields"]] <- meta[["fields"]] %||% "*"
if (is.null(media)) {
out <- drive_update_metadata(file, meta)
} else {
if (length(meta) == 0) {
out <- drive_update_media(file, media)
} else {
out <- drive_update_multipart(file, media, meta)
}
}
if (verbose) {
message_glue("\nFile updated:\n * {out$name}: {out$id}")
}
invisible(out)
}
## currently this can never be called, because we always send fields
drive_update_media <- function(file, media) {
request <- request_generate(
endpoint = "drive.files.update.media",
params = list(
fileId = file$id,
uploadType = "media",
fields = "*"
)
)
## media uploads have unique body situations, so customizing here.
request$body <- httr::upload_file(path = media)
response <- request_make(request, encode = "json")
as_dribble(list(gargle::response_process(response)))
}
drive_update_metadata <- function(file, meta) {
request <- request_generate(
endpoint = "drive.files.update",
params = c(
fileId = file$id,
meta
)
)
response <- request_make(request, encode = "json")
as_dribble(list(gargle::response_process(response)))
}
drive_update_multipart <- function(file, media, meta) {
request <- request_generate(
endpoint = "drive.files.update.media",
params = c(
fileId = file$id,
uploadType = "multipart",
## We provide the metadata here even though it's overwritten below,
## so that request_generate() still validates it.
meta
)
)
meta_file <- tempfile()
on.exit(unlink(meta_file))
writeLines(jsonlite::toJSON(meta), meta_file)
## media uploads have unique body situations, so customizing here.
request$body <- list(
metadata = httr::upload_file(
path = meta_file,
type = "application/json; charset=UTF-8"
),
media = httr::upload_file(path = media)
)
response <- request_make(request, encode = "multipart")
as_dribble(list(gargle::response_process(response)))
}
|
0071177fe9033e65128cc1f3d01b89829d041aef
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/Robinlovelace/cycling-chd/analysis-minap.R
|
6df45437ce50c5ed43f8f3ffdbbb0161fd1d14a5
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,966
|
r
|
analysis-minap.R
|
##########################################
######## Cross-sectional Analyses ########
##########################################
# Libraries
library(data.table)
library(plyr)
library(MASS) # For Negative Binomial Regression
library(AER) # Test for over-dispersion
library(pscl) # Test for over-dispersion
# Load data
# minap_msoas <- readRDS("data/msoas_observed_expected_counts.Rds")
source("R/process-minap.R")
# Aggregate by MSOA
dt <- data.table(minap_msoas)
msoa_persons <- dt[, list(admissions = sum(admissions, na.rm = TRUE), expt_adms = sum(expt_adms, na.rm = TRUE)),
by = c("msoa_code")]
msoa_sex <- dt[, list(admissions = sum(admissions, na.rm = TRUE), expt_adms = sum(expt_adms, na.rm = TRUE)),
by = c("sex", "msoa_code")]
msoa_males <- msoa_sex[msoa_sex$sex=="Male"]
msoa_females <- msoa_sex[msoa_sex$sex=="Female"]
rm(minap_msoas)
rm(msoa_sex)
rm(dt)
gc()
# Load transport data for MSOAs
msoa_transport <- readRDS("data/msoas.Rds") # Load
msoa_transport$msoa_code <- msoa_transport$geo_code
msoa_transport$geo_code <- NULL
# Calculate exposure variables
msoa_transport$pc_cycle <- (msoa_transport$Bicycle / msoa_transport$All) * 100 # cycle
msoa_transport$pc_walk <- (msoa_transport$foot / msoa_transport$All) * 100 # walk
msoa_transport$pc_car <- (msoa_transport$Car / msoa_transport$All) * 100 # car
# msoa_transport <- msoa_transport[,5:8] # drop variables not needed
# Join on cycling data
msoa_p <- join(msoa_persons, msoa_transport@data, by = c("msoa_code"), type = "left", match = "all")
msoa_m <- join(msoa_males, msoa_transport@data, by = c("msoa_code"), type = "left", match = "all")
msoa_f <- join(msoa_females, msoa_transport@data, by = c("msoa_code"), type = "left", match = "all")
rm(msoa_transport)
rm(msoa_persons)
rm(msoa_females)
rm(msoa_males)
# Drop missing data (i.e. only england MSOAs - n=6147)
eng_p <- na.omit(msoa_p)
eng_m <- na.omit(msoa_m)
eng_f <- na.omit(msoa_f)
rm(msoa_p)
rm(msoa_f)
rm(msoa_m)
##### Statistical Analysis #####
### Persons level analysis ###
# Check distribution of outcome variable
hist(eng_p$admissions)
summary(eng_p$admissions) # Note no MSOAs with 0 admissions
## Poisson regression model ##
model_p <- glm(admissions ~ pc_cycle, family = "poisson", data = eng_p, offset = log(expt_adms))
# Goodness of Fit test [chi-square test based on the residual deviance and degrees of freedom]
1 - pchisq(summary(model_p)$deviance, # We want this to be p > 0.05
summary(model_p)$df.residual) # If p>0.05 then suggests Poisson model fits data well
# GOF 2
qchisq(0.95, df.residual(model_p)) # Get five-percent critical value for a chi-squared with df from model
deviance(model_p) # we want the deviance lower than the above number
pr <- residuals(model_p,"pearson") # Pearsons chi square
sum(pr^2) # also want this lower
## Negative Binomial Regression ##
model_nb <- glm.nb(admissions ~ pc_cycle + offset(expt_adms), data = eng_p)
# Goodness of fit (improvement from Poisson model)
1 - pchisq(summary(model_nb)$deviance,
summary(model_nb)$df.residual)
qchisq(0.95, df.residual(model_nb))
deviance(model_nb)
pr <- residuals(model_nb,"pearson")
sum(pr^2)
## Test model assumptions ##
dispersiontest(model_p, trafo=1) # Overdispersion present in larger than 0 (which it is)
odTest(model_nb) # compares log-likelihood ratios of NegBin model to Poisson approach - here we can reject the Poisson model in favour of NegBin (i.e. p significant)
AIC(model_p, model_nb) # lower is better model
vuong(model_p, model_nb) # model which is significant is better
## Results ##
summary(model_nb)
cbind(exp(coef(model_nb)), exp(confint(model_nb))) # Convert to IRRs (take p from summary(model_nb))
### Males analysis ###
# Check distribution of outcome variable
hist(eng_m$admissions) # hist(eng_m$admissions[eng_m$admissions<30]) easier to see
summary(eng_m$admissions)
## Poisson regression model ##
model_p <- glm(admissions ~ pc_cycle, family = "poisson", data = eng_m, offset = log(expt_adms))
# Goodness of Fit test [chi-square test based on the residual deviance and degrees of freedom]
1 - pchisq(summary(model_p)$deviance, # We want this to be p > 0.05
summary(model_p)$df.residual) # If p>0.05 then suggests Poisson model fits data well
# GOF 2
qchisq(0.95, df.residual(model_p)) # Get five-percent critical value for a chi-squared with df from model
deviance(model_p) # we want the deviance lower than the above number
pr <- residuals(model_p,"pearson") # Pearsons chi square
sum(pr^2) # also want this lower
## Negative Binomial Regression ##
model_nb <- glm.nb(admissions ~ pc_cycle + offset(expt_adms), data = eng_m)
# Goodness of fit (improvement from Poisson model)
1 - pchisq(summary(model_nb)$deviance,
summary(model_nb)$df.residual)
qchisq(0.95, df.residual(model_nb))
deviance(model_nb)
pr <- residuals(model_nb,"pearson")
sum(pr^2)
## Test model assumptions ##
dispersiontest(model_p, trafo=1) # Overdispersion present in larger than 0 (which it is)
odTest(model_nb) # compares log-likelihood ratios of NegBin model to Poisson approach - here we can reject the Poisson model in favour of NegBin (i.e. p significant)
AIC(model_p, model_nb) # lower is better model
vuong(model_p, model_nb) # no difference
## Zero inflated NegBin model ##
model_zi <- zeroinfl(admissions ~ pc_cycle, data = eng_m, offset = log(expt_adms), dist = "negbin", EM = T)
AIC(model_nb, model_zi) # Zi model appears better but not entirely clear
vuong(model_nb, model_zi)
## Results ##
# Method seems to matter so not sure which is better
summary(model_nb)
cbind(exp(coef(model_nb)), exp(confint(model_nb))) # Convert to IRRs (take p from summary(model_nb))
summary(model_zi)
cbind(exp(coef(model_zi)), exp(confint(model_zi))) # Convert to IRRs (take p from summary(model_nb))
### Females analysis ###
# Check distribution of outcome variable
hist(eng_f$admissions) # hist(eng_m$admissions[eng_m$admissions<30]) easier to see
summary(eng_f$admissions)
## Poisson regression model ##
model_p <- glm(admissions ~ pc_cycle, family = "poisson", data = eng_f, offset = log(expt_adms))
# Goodness of Fit test [chi-square test based on the residual deviance and degrees of freedom]
1 - pchisq(summary(model_p)$deviance, # We want this to be p > 0.05
summary(model_p)$df.residual) # If p>0.05 then suggests Poisson model fits data well
# GOF 2
qchisq(0.95, df.residual(model_p)) # Get five-percent critical value for a chi-squared with df from model
deviance(model_p) # we want the deviance lower than the above number
pr <- residuals(model_p,"pearson") # Pearsons chi square
sum(pr^2) # also want this lower
## Negative Binomial Regression ##
model_nb <- glm.nb(admissions ~ pc_cycle + offset(expt_adms), data = eng_f)
# Goodness of fit (improvement from Poisson model)
1 - pchisq(summary(model_nb)$deviance,
summary(model_nb)$df.residual)
qchisq(0.95, df.residual(model_nb))
deviance(model_nb)
pr <- residuals(model_nb,"pearson")
sum(pr^2)
## Test model assumptions ##
dispersiontest(model_p, trafo=1) # Overdispersion present in larger than 0 (which it is)
odTest(model_nb) # compares log-likelihood ratios of NegBin model to Poisson approach - here we can reject the Poisson model in favour of NegBin (i.e. p significant)
AIC(model_p, model_nb) # lower is better model
vuong(model_p, model_nb) # no difference
## Zero inflated NegBin model ##
model_zi <- zeroinfl(admissions ~ pc_cycle, data = eng_f, offset = log(expt_adms), dist = "negbin", EM = T)
AIC(model_nb, model_zi) # Zi model appears better but not entirely clear
vuong(model_nb, model_zi)
## Results ##
# Method seems to matter so not sure which is better
summary(model_nb)
cbind(exp(coef(model_nb)), exp(confint(model_nb))) # Convert to IRRs (take p from summary(model_nb))
summary(model_zi)
cbind(exp(coef(model_zi)), exp(confint(model_zi))) # Convert to IRRs (take p from summary(model_nb))
|
56b1b365b7b1579bdfa08b8f1d43492e0c7912e7
|
57e96f47bb38efa99dea6e52e01d4e12ba79ac5b
|
/src/utils.R
|
bf101181371a78664789e374656dc6b0585eab77
|
[] |
no_license
|
maheshkkolla/learnR
|
f5019b66761d9a4c69d4887018e827e8352bfd63
|
cc912acd2707d322106f2db0b96637f2b7265a16
|
refs/heads/master
| 2021-01-01T03:46:10.078908
| 2016-05-18T13:25:15
| 2016-05-18T13:25:15
| 58,992,147
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 234
|
r
|
utils.R
|
isOne <- function(number) number == 1
isTwo <- function(number) number == 2
isZero <- function(number) number == 0
isNegative <- function(number) number < 0
isZeroOrNegative <- function(number) isZero(number) || isNegative(number)
|
9b3d64a672b2a493daa0e22b05689d9ef7d1ae8e
|
79f980b8a424cb3852058008b5869f9259e77394
|
/R/weather.R
|
ac8176aa1686227dc15db7f0f80339a99e8edfab
|
[] |
no_license
|
rjsteckel/boilkettle
|
762281d2a46c775b9b96b59540d7f965f9902baf
|
0b05405c3d6bf7d7fdb32fb4989dcd7e3826c977
|
refs/heads/master
| 2021-09-18T00:07:20.707889
| 2018-07-07T15:59:28
| 2018-07-07T15:59:28
| 115,065,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 136
|
r
|
weather.R
|
weather_forecast <- function(zipcode) {
set_api_key(Sys.getenv('WUNDERGROUND_KEY'))
forecast3day(set_location(zip_code=zipcode))
}
|
0e6ce552320ae78a0c5206421ec089f49dec8540
|
74fe29da37e54fb5e49a1ae7d4cf5051428202eb
|
/demo/example_hl_competitiveness.R
|
ed2886163e3c55d1bfccfd7cb814fac5d36dccc1
|
[] |
no_license
|
CRAFTY-ABM/craftyr
|
7fd8e63f85f4ddc13fbb0a79b67710a7b5a818f2
|
5630d1f0e4a1b1c34e3d10740640d414346f1af4
|
refs/heads/master
| 2022-08-11T13:20:13.579266
| 2018-06-16T06:55:19
| 2018-06-16T06:55:19
| 266,212,786
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 155
|
r
|
example_hl_competitiveness.R
|
cfuncs <- c("Cereal" = function(x) {100*x},
"Meat" = function(x) {100*x},
"Recreation"= function(x) {100*x},
"Timber" = function(x) {100*x})
|
95ba164daaa0efa8257ae03cbb262fc380d47784
|
fff251aa07e97496f6ea1785b6b8109723ad5510
|
/tests/testthat/test-article.R
|
98ab201ceba18dc1631fbe0a9c5386d1f7243ae9
|
[
"Apache-2.0"
] |
permissive
|
kevinykuo/radix
|
509ac2015b33aba816153aa52147ed4ef0185f6d
|
6c79cd0c42a20b494ddf63adcc0a5d2bd2861c0a
|
refs/heads/master
| 2020-04-11T18:02:23.198125
| 2018-12-14T17:19:42
| 2018-12-14T17:19:42
| 161,983,999
| 0
| 0
|
Apache-2.0
| 2018-12-16T08:58:01
| 2018-12-16T08:58:01
| null |
UTF-8
|
R
| false
| false
| 183
|
r
|
test-article.R
|
context("article")
source("utils.R")
test_that("radix articles can be created", {
skip_if_pandoc_not_installed()
expect_s3_class(radix_article(), "rmarkdown_output_format")
})
|
a9806fc1ee51d42d6642d0574b7d1d9b8a8e03ac
|
0204a92ca1094acb54ae9ddd8a418ea1fae11d83
|
/bridge_results.R
|
1b20e3dafef91323fa88472a19c3d64f7f8e9a6a
|
[] |
no_license
|
Hel1vs/Bridge
|
379223f53a461c9cde3c6cace260184e75996e3f
|
270aaeefc7dde79347cb3807e13e68700a8087d3
|
refs/heads/master
| 2023-04-17T18:10:58.376242
| 2022-03-08T20:34:15
| 2022-03-08T20:34:15
| 285,506,442
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 124,187
|
r
|
bridge_results.R
|
# Load data ---------------------------------------------------------------
setwd("~/disks/y/Project/E555163_COMMIT/Data/Database/Snapshots/Scripts/R/Bridge/Bridge")
config <- "config_bridge" #"config_COMMIT"
scencateg <- "scen_categ_bridge" #"scen_categ_COMMIT"
variables <- "variables_bridge" #"variables_xCut"
adjust <- "adjust_reporting_COMMIT"
addvars <- F
datafile <-"commit_bridge_compare_20210517-142459" #commit_cd-links_compare_20191015-114544
source("load_data.R")
# check whether there's only one scenario per category for each model
check=all[,list(unique(scenario)),by=c("model","Category")]
View(check) #TODO Check Bridge IPAC included in graphs?
check2=check[,list(length(unique(V1))),by=c("model","Category")]
View(check2)
# For models with NDCplus, NDCMCS is outdated so remove. For others, keep using NDCMCS until NDCplus is submitted
check3=check[Category=="NDCplus"]
View(check3)
all=all[!c(Category=="NDCMCS"&model%in%unique(check3$model))]
# For REMIND, only global model with NDCMCS, label it NDCplus to show up in the same bar / statistics
#all[model=="REMIND-MAgPIE 1.7-3.0"&Category=="NDCMCS"]$Category<-"NDCplus"
# Load functions and library for plotting
source("functions/plot_LineNationalScens.R")
source("functions/plotstyle.R")
library(grid)
library(gridExtra)
library(xlsx)
# fix stupid R mystery
all$period<-as.numeric(as.character(all$period))
# Make a selection for WP2 paper Panagiotis: China scenarios
#wp2 = all[model%in%c("*PECE V2.0","*IPAC-AIM/technology V1.0")]
#write.csv(wp2,"WP2_China.csv")
# Adjust COFFEE name manually until 1.1 registered
all[model=="COPPE-COFFEE 1.0"]$model<-"COPPE-COFFEE 1.1"
# Read in IAMC 1.5 scenario explorer data for comparison
IPCC15 = fread("data/iamc-1.5c-explorer_snapshot_CD-LINKS_SSP_exGCAM.csv",sep=";", header=T)
IPCC15 <- gather(IPCC15, 8:ncol(IPCC15), key="period", value=value)
IPCC15$Scope <-"global"
IPCC15$period<-as.numeric(as.character(IPCC15$period))
all <- rbind(all,IPCC15)
# Read in IMAGE 2Deg2020 data, corrected on 3 November 2021 because of a reporting error in the carbon price and policy cost
IMAGE2deg = fread("data/2Deg2020_IMAGE_correction.csv",sep=";", header=T)
IMAGE2deg <- data.table(gather(IMAGE2deg, 6:ncol(IMAGE2deg), key="period", value=value))
IMAGE2deg = IMAGE2deg[period%in%c(2005:2100) & variable%in%c("Price|Carbon","Policy Cost|Default for CAV","Policy Cost|Area under MAC Curve")]
IMAGE2deg$Scope <-"global"
IMAGE2deg$Category <-"2Deg2020"
IMAGE2deg$Baseline <- "BAU"
IMAGE2deg$period<-as.numeric(as.character(IMAGE2deg$period))
IMAGE2deg$value<-as.numeric(as.character(IMAGE2deg$value))
setcolorder(IMAGE2deg,colnames(all))
all <- rbind(all[!c(model=="IMAGE 3.0" & variable%in%c("Price|Carbon","Policy Cost|Area under MAC Curve") & Category=="2Deg2020")],IMAGE2deg)
# Plot emissions ----------------------------------------------------------
vars = "Emissions|Kyoto Gases"
scens <- c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")
# scensglob <- c("NPi","2030_low")
# scensnat <- c("NPi","2030_low")
a<-plot_lineNationalScens(reg = "AUS", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]", title="AUS (TIMES-AUS)",file_pre = "GHG") #,ylim=c(-300,1200) #,nolegend=T
b<-plot_lineNationalScens(reg = "BRA", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]", title="Brazil (BLUES)",file_pre = "GHG") #,ylim=c(-300,1200) #,nolegend=T
ca<-plot_lineNationalScens(reg = "CAN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]", title="Canada (GCAM_Canada)", file_pre = "GHG")
c<-plot_lineNationalScens(reg = "CHN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]", title="China (IPAC)", file_pre = "GHG")
e<-plot_lineNationalScens(reg = "EU", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]", title="EU (PRIMES: -, GEM-E3: --)", file_pre = "GHG") #,ylim=c(0,8000)
j<-plot_lineNationalScens(reg = "JPN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]",title="Japan (AIM/E-NIES)", file_pre = "GHG") #,ylim=c(-200,1600)
r<-plot_lineNationalScens(reg = "RUS", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]", title="Russia (RU-TIMES)",file_pre = "GHG") #,ylim=c(0,2500)
i<-plot_lineNationalScens(reg = "IND", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]", title="India (IND-MARKAL)", file_pre = "GHG") #,ylim=c(0,15000)
id<-plot_lineNationalScens(reg = "IDN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]", title="Indonesia (DDPP Ennergy)", file_pre = "GHG") #,ylim=c(0,15000)
u<-plot_lineNationalScens(reg = "USA", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]", title="USA (GCAM_USA)", file_pre = "GHG") #,ylim=c(-500,8000)
k<-plot_lineNationalScens(reg = "ROK", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]", title="Korea (AIM/CGE[Korea])", file_pre = "GHG") #,ylim=c(-500,8000)
w<-plot_lineNationalScens(reg = "World", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = "GHG emissions [MtCO2e]", title="World", file_pre = "GHG") #,ylim=c(-500,8000)
tmp<-ggplot_gtable(ggplot_build(j))
leg<-which(sapply(tmp$grobs,function(x) x$name) =="guide-box")
legend<-tmp$grobs[[leg]]
a=a+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
b=b+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
c=c+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
ca=ca+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
e=e+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
i=i+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
id=id+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
j=j+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
r=r+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
u=u+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
k=k+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
w=w+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
lay<-rbind(c(1,2,3,4,5,6),c(7,8,9,10,11,12))
h=grid.arrange(a,b,c,ca,e,i,id,j,r,u,k,legend,layout_matrix=lay)
ggsave(file=paste(cfg$outdir,"/GHG_natscens_gridarrange.png",sep=""),h,width=24,height=14,dpi=200)
# AFOLU emissions
vars = "Emissions|CO2|AFOLU"
scens <- c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")
ylab = "AFOLU CO2 emissions (MtCO2/year)"
file_pre = "CO2-AFOLU"
a<-plot_lineNationalScens(reg = "AUS", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Australia (TIMES-AUS)",file_pre = file_pre,nolegend=T) #,ylim=c(-300,1200)
b<-plot_lineNationalScens(reg = "BRA", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Brazil (BLUES)",file_pre = file_pre,nolegend=T) #,ylim=c(-300,1200)
ca<-plot_lineNationalScens(reg = "CAN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Canada (GCAM_Canada)", file_pre = file_pre)
c<-plot_lineNationalScens(reg = "CHN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="China (IPAC)", file_pre = file_pre)
e<-plot_lineNationalScens(reg = "EU", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="EU (PRIMES: -, GEM-E3: --)", file_pre = file_pre) #,ylim=c(0,8000)
j<-plot_lineNationalScens(reg = "JPN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab,title="Japan (AIM/E-NIES)", file_pre = file_pre) #,ylim=c(-200,1600)
r<-plot_lineNationalScens(reg = "RUS", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Russia (RU-TIMES)",file_pre = file_pre) #,ylim=c(0,2500)
i<-plot_lineNationalScens(reg = "IND", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="India (IND-MARKAL)", file_pre = file_pre) #,ylim=c(0,15000)
id<-plot_lineNationalScens(reg = "IDN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Indonesia (DDPP Energy)", file_pre = file_pre) #,ylim=c(0,15000)
u<-plot_lineNationalScens(reg = "USA", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="USA (GCAM_USA)", file_pre = file_pre) #,ylim=c(-500,8000)
k<-plot_lineNationalScens(reg = "ROK", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Korea (AIM/CGE[Korea])", file_pre = file_pre) #,ylim=c(-500,8000)
w<-plot_lineNationalScens(reg = "World", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="World", file_pre = file_pre) #,ylim=c(-500,8000)
tmp<-ggplot_gtable(ggplot_build(j))
leg<-which(sapply(tmp$grobs,function(x) x$name) =="guide-box")
legend<-tmp$grobs[[leg]]
a=a+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
b=b+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
c=c+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
ca=ca+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
e=e+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
i=i+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
id=id+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
j=j+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
r=r+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
u=u+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
k=k+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
w=w+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
lay<-rbind(c(1,2,3,4,5,6),c(7,8,9,10,11,12))
h=grid.arrange(a,b,c,ca,e,i,id,j,r,u,k,legend,layout_matrix=lay)
ggsave(file=paste(cfg$outdir,"/CO2-AFOLU_natscens_gridarrange.png",sep=""),h,width=24,height=14,dpi=200)
# CO2 emissions
vars = "Emissions|CO2"
scens <- c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")
ylab = "CO2 emissions (MtCO2/year)"
file_pre = "CO2"
a<-plot_lineNationalScens(reg = "AUS", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Australia (TIMES-AUS)",file_pre = file_pre,nolegend=T) #,ylim=c(-300,1200)
b<-plot_lineNationalScens(reg = "BRA", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Brazil (BLUES)",file_pre = file_pre,nolegend=T) #,ylim=c(-300,1200)
ca<-plot_lineNationalScens(reg = "CAN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Canada (GCAM_Canada)", file_pre = file_pre)
c<-plot_lineNationalScens(reg = "CHN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="China (IPAC)", file_pre = file_pre)
e<-plot_lineNationalScens(reg = "EU", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="EU (PRIMES: -, GEM-E3: --)", file_pre = file_pre) #,ylim=c(0,8000)
j<-plot_lineNationalScens(reg = "JPN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab,title="Japan (AIM/E-NIES)", file_pre = file_pre) #,ylim=c(-200,1600)
r<-plot_lineNationalScens(reg = "RUS", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Russia (RU-TIMES)",file_pre = file_pre) #,ylim=c(0,2500)
i<-plot_lineNationalScens(reg = "IND", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="India (IND-MARKAL)", file_pre = file_pre) #,ylim=c(0,15000)
id<-plot_lineNationalScens(reg = "IDN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Indonesia (DDPP Energy)", file_pre = file_pre) #,ylim=c(0,15000)
u<-plot_lineNationalScens(reg = "USA", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="USA (GCAM_USA)", file_pre = file_pre) #,ylim=c(-500,8000)
k<-plot_lineNationalScens(reg = "ROK", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Korea (AIM/CGE[Korea])", file_pre = file_pre) #,ylim=c(-500,8000)
w<-plot_lineNationalScens(reg = "World", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="World", file_pre = file_pre) #,ylim=c(-500,8000)
tmp<-ggplot_gtable(ggplot_build(j))
leg<-which(sapply(tmp$grobs,function(x) x$name) =="guide-box")
legend<-tmp$grobs[[leg]]
a=a+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
b=b+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
c=c+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
ca=ca+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
e=e+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
i=i+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
id=id+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
j=j+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
r=r+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
u=u+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
k=k+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
w=w+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
lay<-rbind(c(1,2,3,4,5,6),c(7,8,9,10,11,12))
h=grid.arrange(a,b,c,ca,e,i,id,j,r,u,k,legend,layout_matrix=lay)
ggsave(file=paste(cfg$outdir,"/CO2_natscens_gridarrange.png",sep=""),h,width=24,height=14,dpi=200)
# Quick plot IMAGE
vars = "Emissions|Kyoto Gases"
i = ggplot(all[variable%in%vars & Category%in%scens & model=="IMAGE 3.0"]) # & !region=="World"
i = i + geom_line(aes(x=period,y=value,linetype=model,colour=Category))
i = i + scale_colour_manual(values=plotstyle(scens))
#e = e + scale_linetype_manual(values=cfg$man_lines)# TODO use plotstyle for linetypes per model or cfg$man_lines?
i = i + facet_wrap(~region,scales="free_y")
i = i + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))
i = i + theme_bw()
i
ggsave(file=paste(cfg$outdir,"/GHG_IMAGE.png",sep=""),i,width=18,height=14,dpi=200)
# quick plot all models
vars = "Emissions|Kyoto Gases"
m = ggplot(all[variable%in%vars & Category%in%scens&!Scope=="national"&!region=="TUR"]) # & !region=="World"
m = m + geom_line(aes(x=period,y=value,colour=Category))
m = m + scale_colour_manual(values=plotstyle(scens))
#e = e + scale_linetype_manual(values=cfg$man_lines)# TODO use plotstyle for linetypes per model or cfg$man_lines?
m = m + facet_grid(region~model,scales="free_y")
m = m + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))
m = m + theme_bw() + theme(axis.text.y=element_text(size=14)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=14)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12))
m
ggsave(file=paste(cfg$outdir,"/GHG_all_global_models.png",sep=""),m,width=16,height=14,dpi=200)
vars = "Emissions|CO2"
m = ggplot(all[variable%in%vars & Category%in%scens&!Scope=="global"&!region%in%c("TUR","World","R5OECD90+EU")])
m = m + geom_line(aes(x=period,y=value,colour=Category))
m = m + xlim(2005,2050)
m = m + scale_colour_manual(values=plotstyle(scens))
#e = e + scale_linetype_manual(values=cfg$man_lines)# TODO use plotstyle for linetypes per model or cfg$man_lines?
m = m + facet_wrap(~region,scales="free_y")
m = m + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))
m = m + theme_bw()
m
ggsave(file=paste(cfg$outdir,"/CO2_all_national_models.png",sep=""),m,width=18,height=10,dpi=200)
# plot build-up
vars = "Emissions|Kyoto Gases"
m1 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol")&!Scope=="national"®ion=="World"])
m1 = m1 + geom_line(aes(x=period,y=value,colour=Category),size=1.5)
m1 = m1 + xlim(2000,2050) + scale_y_continuous(breaks=c(40000,50000,60000,70000,80000),limits=c(40000,85000))
m1 = m1 + scale_colour_manual(values=plotstyle(scens))
m1 = m1 + facet_grid(~model,scales="free_y")
m1 = m1 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]")) + xlab("")
m1 = m1 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m1
ggsave(file=paste(cfg$outdir,"/GHG_all_global_models_world_BAU-CurPol.png",sep=""),m1,width=18,height=14,dpi=200)
m2 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS")&!Scope=="national"®ion=="World"])
m2 = m2 + geom_line(aes(x=period,y=value,colour=Category),size=1.5)
m2 = m2 + xlim(2000,2050)+ scale_y_continuous(breaks=c(40000,50000,60000,70000,80000),limits=c(40000,85000))
m2 = m2 + scale_colour_manual(values=plotstyle(scens))
m2 = m2 + facet_grid(~model,scales="free_y")
m2 = m2 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m2 = m2 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m2
ggsave(file=paste(cfg$outdir,"/GHG_all_global_models_world_BAU-CurPol-NDC.png",sep=""),m2,width=16,height=14,dpi=200)
m3 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","2Deg2020")&!Scope=="national"®ion=="World"])
m3 = m3 + geom_line(aes(x=period,y=value,colour=Category),size=1.5)
m3 = m3 + xlim(2000,2050)+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
m3 = m3 + scale_colour_manual(values=plotstyle(scens))
m3 = m3 + facet_grid(~model,scales="free_y")
m3 = m3 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m3 = m3 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m3
ggsave(file=paste(cfg$outdir,"/GHG_all_global_models_world_BAU-CurPol-NDC-2Deg2020.png",sep=""),m3,width=16,height=14,dpi=200)
m4 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","2Deg2020")&!Scope=="national"®ion=="World"])
m4 = m4 + geom_line(aes(x=period,y=value,colour=Category),size=1.5)
m4 = m4 + xlim(2000,2050)+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
m4 = m4 + scale_colour_manual(values=plotstyle(scens))
m4 = m4 + facet_grid(~model,scales="free_y")
m4 = m4 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m4 = m4 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m4
ggsave(file=paste(cfg$outdir,"/GHG_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2020.png",sep=""),m4,width=16,height=14,dpi=200)
m5 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2020")&!Scope=="national"®ion=="World"])
m5 = m5 + geom_line(aes(x=period,y=value,colour=Category),size=1.5)
m5 = m5 + xlim(2000,2050)+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
m5 = m5 + scale_colour_manual(values=plotstyle(scens))
m5 = m5 + facet_grid(~model,scales="free_y")
m5 = m5 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m5 = m5 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m5
ggsave(file=paste(cfg$outdir,"/GHG_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2020-Bridge.png",sep=""),m5,width=16,height=14,dpi=200)
m6 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"])
m6 = m6 + geom_line(aes(x=period,y=value,colour=Category),size=1.5)
m6 = m6 + xlim(2000,2050)+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
m6 = m6 + scale_colour_manual(values=plotstyle(scens))
m6 = m6 + facet_grid(~model,scales="free_y")
m6 = m6 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m6 = m6 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m6
ggsave(file=paste(cfg$outdir,"/GHG_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2030-Bridge-2Deg2020.png",sep=""),m6,width=16,height=14,dpi=200)
m6 = m6 + theme(legend.position="bottom")
ggsave(file=paste(cfg$outdir,"/GHG_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2030-Bridge-2Deg2020_wide.png",sep=""),m6,width=16,height=12,dpi=200)
# models as lines, ranges as funnels
range=all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World",list(min=min(value,na.rm=T),max=max(value,na.rm=T)),by=c("Category","variable","period")]
m7 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"])
m7 = m7 + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
m7 = m7 + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
m7 = m7 + xlim(2010,2050)+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
m7 = m7 + scale_colour_manual(values=plotstyle(scens))
m7 = m7 + scale_fill_manual(values=plotstyle(scens))
m7 = m7 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m7 = m7 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m7 = m7 + theme(legend.position="bottom")
m7
ggsave(file=paste(cfg$outdir,"/GHG_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2030-Bridge-2Deg2020_funnel.png",sep=""),m7,width=16,height=12,dpi=200)
vars="Emissions|CO2|Energy|Demand|Transportation"
range=all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World",list(min=min(value,na.rm=T),max=max(value,na.rm=T)),by=c("Category","variable","period")]
m8 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"])
m8 = m8 + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
m8 = m8 + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
m8 = m8 + xlim(2000,2050) #+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
m8 = m8 + scale_colour_manual(values=plotstyle(scens))
m8 = m8 + scale_fill_manual(values=plotstyle(scens))
m8 = m8 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m8 = m8 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m8 = m8 + theme(legend.position="bottom")
m8
ggsave(file=paste(cfg$outdir,"/CO2_Transport_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2030-Bridge-2Deg2020_funnel.png",sep=""),m8,width=16,height=12,dpi=200)
vars="Emissions|CO2|Energy|Demand|Residential and Commercial"
range=all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World",list(min=min(value,na.rm=T),max=max(value,na.rm=T)),by=c("Category","variable","period")]
m9 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"])
m9 = m9 + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
m9 = m9 + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
m9 = m9 + xlim(2000,2050) #+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
m9 = m9 + scale_colour_manual(values=plotstyle(scens))
m9 = m9 + scale_fill_manual(values=plotstyle(scens))
m9 = m9 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m9 = m9 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m9 = m9 + theme(legend.position="bottom")
m9
ggsave(file=paste(cfg$outdir,"/CO2_Buildings_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2030-Bridge-2Deg2020_funnel.png",sep=""),m9,width=16,height=12,dpi=200)
vars="Emissions|CO2|Energy|Demand|Industry"
range=all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World",list(min=min(value,na.rm=T),max=max(value,na.rm=T)),by=c("Category","variable","period")]
m10 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"])
m10 = m10 + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
m10 = m10 + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
m10 = m10 + xlim(2000,2050) #+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
m10 = m10 + scale_colour_manual(values=plotstyle(scens))
m10 = m10 + scale_fill_manual(values=plotstyle(scens))
m10 = m10 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m10 = m10 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m10 = m10 + theme(legend.position="bottom")
m10
ggsave(file=paste(cfg$outdir,"/CO2_Industry_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2030-Bridge-2Deg2020_funnel.png",sep=""),m10,width=16,height=12,dpi=200)
vars="Emissions|CO2|Energy and Industrial Processes"
range=all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World",list(min=min(value,na.rm=T),max=max(value,na.rm=T)),by=c("Category","variable","period")]
m11 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"])
m11 = m11 + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
m11 = m11 + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
m11 = m11 + xlim(2000,2050) #+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
m11 = m11 + scale_colour_manual(values=plotstyle(scens))
m11 = m11 + scale_fill_manual(values=plotstyle(scens))
m11 = m11 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m11 = m11 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m11 = m11 + theme(legend.position="bottom")
m11
ggsave(file=paste(cfg$outdir,"/CO2_FFI_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2030-Bridge-2Deg2020_funnel.png",sep=""),m11,width=16,height=12,dpi=200)
vars="Emissions|CO2|Energy|Supply"
range=all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World",list(min=min(value,na.rm=T),max=max(value,na.rm=T)),by=c("Category","variable","period")]
m12 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"])
m12 = m12 + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
m12 = m12 + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
m12 = m12 + xlim(2000,2050) #+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
m12 = m12 + scale_colour_manual(values=plotstyle(scens))
m12 = m12 + scale_fill_manual(values=plotstyle(scens))
m12 = m12 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m12 = m12 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m12 = m12 + theme(legend.position="bottom")
m12
ggsave(file=paste(cfg$outdir,"/CO2_Supply_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2030-Bridge-2Deg2020_funnel.png",sep=""),m12,width=16,height=12,dpi=200)
# Check BAU vs CurPol for Alex
x=ggplot(all[Category%in%c("BAU","CurPol")&variable=="Emissions|Kyoto Gases"&Scope=="global"])
x=x+facet_grid(region~model,scale="free_y")
x=x+geom_line(aes(x=period,y=value,colour=Category))
x=x+theme_bw()
x
ggsave(file=paste(cfg$outdir,"/GHG_BAU-CurPol_models-regions.png",sep=""),x,width=16,height=12,dpi=200)
checkcp=all[Category%in%c("BAU","CurPol")&variable=="Emissions|Kyoto Gases"]
checkcp$scenario<-NULL
checkcp$Baseline<-NULL
checkcp=spread(checkcp,Category,value)
checkcp$flag=ifelse(checkcp$CurPol>checkcp$BAU,"Check","Fine")
checkcp=checkcp[flag=="Check"]
write.csv(checkcp,"CurPolvsBAU.csv")
# Emissions reduction rate
# source("functions/calcRate.R") # calcrate does not work for negative emissions!
# emisred = all[variable%in%c("Emissions|Kyoto Gases","Emissions|CO2")&Category%in%c("CurPol","GPP","Bridge")]
# emisred = calcRate(emisred,c("Emissions|Kyoto Gases","Emissions|CO2"))
# emisredm = emisred[,list(median=median(value,na.rm=T),min=min(value,na.rm=T),max=max(value,na.rm=T)),
# by=c("Category","region","variable","unit","period")] #,min=min(value,na.rm=T),max=max(value,na.rm=T)
# emisred$Category = factor(emisred$Category,levels=c("CurPol","GPP","Bridge"))
# emisredm$Category = factor(emisredm$Category,levels=c("CurPol","GPP","Bridge"))
#
# e = ggplot()
# e = e + geom_bar(data=emisredm[Category%in%c("CurPol","GPP","Bridge")&variable=="Rate of Change| Emissions|Kyoto Gases"&!region%in%c("TUR","R5OECD90+EU","R5LAM","R5ASIA","R5MAF","R5REF")],
# aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
# # e = e + geom_pointrange(data=emisredm[Category%in%c("CurPol","GPP","Bridge")&variable=="Rate of Change| Emissions|Kyoto Gases"],
# # aes(ymin=min,ymax=max,y=median, x=period, colour=Category),alpha=0.5,size=5,fatten=1,position=position_dodge(width=0.66)) #,show.legend = F
# e = e + geom_point(data=emisred[Category%in%c("CurPol","GPP","Bridge")&variable=="Rate of Change| Emissions|Kyoto Gases"&!region%in%c("TUR","R5OECD90+EU","R5LAM","R5ASIA","R5MAF","R5REF")],
# aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
# e = e + scale_shape_manual(values=cfg$man_shapes)
# e = e + facet_wrap(~region,scales="free_y")
# e = e + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
# theme(axis.text.x = element_text(size=14)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12))
# e = e + ylab("Emission reduction rate (%/yr, CAGR)")
# ggsave(file=paste(cfg$outdir,"/GHG-emissions-reduction-rate.png",sep=""),e,width=18,height=12,dpi=300)
# Plot energy -------------------------------------------------------------
vars = "Final Energy|Electricity"
scens <- c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")
ylab = "Final energy - electricity (EJ/yr)"
file_pre = "FE-elec"
a<-plot_lineNationalScens(reg = "AUS", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Australia (TIMES-AUS)",file_pre = file_pre) #,ylim=c(-300,1200)
b<-plot_lineNationalScens(reg = "BRA", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Brazil (BLUES)",file_pre = file_pre,nolegend=T) #,ylim=c(-300,1200)
ca<-plot_lineNationalScens(reg = "CAN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Canada (GCAM_Canada)", file_pre = file_pre)
c<-plot_lineNationalScens(reg = "CHN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="China (IPAC)", file_pre = file_pre)
e<-plot_lineNationalScens(reg = "EU", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="EU (PRIMES: -, GEM-E3: --)", file_pre = file_pre) #,ylim=c(0,8000)
j<-plot_lineNationalScens(reg = "JPN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab,title="Japan (AIM/E-NIES)", file_pre = file_pre) #,ylim=c(-200,1600)
r<-plot_lineNationalScens(reg = "RUS", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Russia (RU-TIMES)",file_pre = file_pre) #,ylim=c(0,2500)
i<-plot_lineNationalScens(reg = "IND", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="India (IND-MARKAL)", file_pre = file_pre) #,ylim=c(0,15000)
id<-plot_lineNationalScens(reg = "IDN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Indonesia (DDPP Energy)", file_pre = file_pre) #,ylim=c(0,15000)
u<-plot_lineNationalScens(reg = "USA", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="USA (GCAM_USA)", file_pre = file_pre) #,ylim=c(-500,8000)
k<-plot_lineNationalScens(reg = "ROK", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Korea (AIM/CGE[Korea])", file_pre = file_pre) #,ylim=c(-500,8000)
w<-plot_lineNationalScens(reg = "World", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="World", file_pre = file_pre) #,ylim=c(-500,8000)
tmp<-ggplot_gtable(ggplot_build(j))
leg<-which(sapply(tmp$grobs,function(x) x$name) =="guide-box")
legend<-tmp$grobs[[leg]]
a=a+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
b=b+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
c=c+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
ca=ca+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
e=e+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
i=i+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
id=id+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
j=j+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
r=r+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
u=u+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
k=k+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
w=w+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
lay<-rbind(c(1,2,3,4,5,6),c(7,8,9,10,11,12))
h=grid.arrange(a,b,c,ca,e,i,id,j,r,u,k,legend,layout_matrix=lay)
ggsave(file=paste(cfg$outdir,"/FE-elec_natscens_gridarrange.png",sep=""),h,width=24,height=14,dpi=200)
#TODO - use addvars instead?
vars=c("Secondary Energy|Electricity|Solar","Secondary Energy|Electricity|Wind","Secondary Energy|Electricity|Hydro","Secondary Energy|Electricity|Biomass","Secondary Energy|Electricity|Geothermal","Secondary Energy|Electricity")
REN = all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","NDCplus-conv","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"]
REN = spread(REN,variable,value)
REN[model%in%c("IMAGE 3.0","PROMETHEUS","WITCH 5.0")]$`Secondary Energy|Electricity|Geothermal`<-"0"
REN = REN%>%mutate(REN_elec=(`Secondary Energy|Electricity|Solar` + `Secondary Energy|Electricity|Wind` + `Secondary Energy|Electricity|Hydro` + `Secondary Energy|Electricity|Biomass` + `Secondary Energy|Electricity|Geothermal`) / `Secondary Energy|Electricity` * 100 )
REN = data.table(gather(REN,variable,value,c("Secondary Energy|Electricity|Solar","Secondary Energy|Electricity|Wind","Secondary Energy|Electricity|Hydro","Secondary Energy|Electricity|Biomass","Secondary Energy|Electricity|Geothermal","Secondary Energy|Electricity","REN_elec")))
REN = REN[variable=="REN_elec"]
REN$unit <- "%"
range=REN[,list(min=min(value,na.rm=T),max=max(value,na.rm=T)),by=c("Category","variable","period")]
m13 = ggplot(REN)
m13 = m13 + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
m13 = m13 + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
m13 = m13 + xlim(2000,2050) #+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
m13 = m13 + scale_colour_manual(values=plotstyle(scens))
m13 = m13 + scale_fill_manual(values=plotstyle(scens))
m13 = m13 + ylab(paste(unique(REN$variable),"[",unique(REN$unit),"]"))+ xlab("")
m13 = m13 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m13 = m13 + theme(legend.position="bottom")
m13
ggsave(file=paste(cfg$outdir,"/REN_elec_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2030-Bridge-2Deg2020_funnel.png",sep=""),m13,width=16,height=12,dpi=200)
# Plot costs --------------------------------------------------------------
# carbon tax
vars = "Price|Carbon" #TODO check whether this is weighted average or max - for world region. Present for the three protocol tiers?
scens <- c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")
ylab = "Carbon price (US$2010/tCO2)"
file_pre = "ctax"
a<-plot_lineNationalScens(reg = "AUS", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Australia (TIMES-AUS)",file_pre = file_pre,ylim=c(0,7000))
b<-plot_lineNationalScens(reg = "BRA", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Brazil (BLUES)",file_pre = file_pre,nolegend=T,ylim=c(0,7000)) #,ylim=c(-300,1200)
ca<-plot_lineNationalScens(reg = "CAN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Canada (GCAM_Canada)", file_pre = file_pre,ylim=c(0,7000))
c<-plot_lineNationalScens(reg = "CHN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="China (IPAC)", file_pre = file_pre,ylim=c(0,7000))
e<-plot_lineNationalScens(reg = "EU", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="EU (PRIMES: -, GEM-E3: --)", file_pre = file_pre,ylim=c(0,7000))
j<-plot_lineNationalScens(reg = "JPN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab,title="Japan (AIM/E-NIES)", file_pre = file_pre,ylim=c(0,7000)) #,ylim=c(-200,1600)
r<-plot_lineNationalScens(reg = "RUS", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Russia (RU-TIMES)",file_pre = file_pre,ylim=c(0,7000)) #,ylim=c(0,2500)
i<-plot_lineNationalScens(reg = "IND", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="India (IND-MARKAL)", file_pre = file_pre,ylim=c(0,7000)) #,ylim=c(0,15000)
id<-plot_lineNationalScens(reg = "IDN", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Indonesia (DDPP Energy)", file_pre = file_pre,ylim=c(0,7000))
u<-plot_lineNationalScens(reg = "USA", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="USA (GCAM_USA)", file_pre = file_pre,ylim=c(0,7000)) #,ylim=c(-500,8000)
k<-plot_lineNationalScens(reg = "ROK", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="Korea (AIM/CGE[Korea])", file_pre = file_pre,ylim=c(0,7000)) #,ylim=c(-500,8000)
w<-plot_lineNationalScens(reg = "World", dt = all, vars = vars, scensnat = scens, scensglob = scens,
ylab = ylab, title="World", file_pre = file_pre,ylim=c(0,7000)) #,ylim=c(-500,8000)
tmp<-ggplot_gtable(ggplot_build(j))
leg<-which(sapply(tmp$grobs,function(x) x$name) =="guide-box")
legend<-tmp$grobs[[leg]]
a=a+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
b=b+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
c=c+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
ca=ca+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
e=e+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
i=i+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
id=id+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
j=j+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
r=r+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
u=u+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
k=k+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
w=w+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
lay<-rbind(c(1,2,3,4,5,6),c(7,8,9,10,11,12))
h=grid.arrange(a,b,c,ca,e,i,id,j,r,u,k,legend,layout_matrix=lay)
ggsave(file=paste(cfg$outdir,"/Ctax_natscens_gridarrange.png",sep=""),h,width=24,height=14,dpi=200)
vars="Price|Carbon"
range=all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"&!period%in%c(2015,2025,2035,2045,2055,2065,2075,2085,2095),
list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
m14 = ggplot(all[variable%in%vars & Category%in%c("BAU","CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"])
m14 = m14 + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
m14 = m14 + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
m14 = m14 + xlim(2000,2050) + scale_y_continuous(breaks=c(0,200,400,600,800,1000,1200,1400,1600,1800,2000),limits=c(0,2000))
m14 = m14 + scale_colour_manual(values=plotstyle(scens))
m14 = m14 + scale_fill_manual(values=plotstyle(scens))
m14 = m14 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m14 = m14 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m14 = m14 + theme(legend.position="bottom")
m14
ggsave(file=paste(cfg$outdir,"/Cprice_all_global_models_world_BAU-CurPol-NDC-GPP-2Deg2030-Bridge-2Deg2020_funnel.png",sep=""),m14,width=16,height=12,dpi=200)
# split up mitigation and reference scenarios for readability. TODO: put these together with grid arrange / facet grid
m14a = ggplot(all[variable%in%vars & Category%in%c("CurPol","NDCplus","NDCMCS","GPP")&!Scope=="national"®ion=="World"])
m14a = m14a + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
m14a = m14a + geom_ribbon(data=range[Category%in%c("CurPol","NDCplus","NDCMCS","GPP")],aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
m14a = m14a + xlim(2000,2050)
m14a = m14a + scale_y_continuous(breaks=c(0,20,40,60,80,100,120,140,160,180,200),limits=c(0,200))
m14a = m14a + scale_colour_manual(values=plotstyle(scens))
m14a = m14a + scale_fill_manual(values=plotstyle(scens))
m14a = m14a + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m14a = m14a + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m14a = m14a + theme(legend.position="bottom")
m14a
ggsave(file=paste(cfg$outdir,"/Cprice_all_global_models_world_CurPol-NDC-GPP_funnel.png",sep=""),m14a,width=16,height=12,dpi=200)
m14b = ggplot(all[variable%in%vars & Category%in%c("Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"])
m14b = m14b + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
m14b = m14b + geom_ribbon(data=range[Category%in%c("Bridge","2Deg2030","2Deg2020")],aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
m14b = m14b + xlim(2000,2050)
m14b = m14b + scale_y_continuous(breaks=c(0,200,400,600,800,1000,1200,1400,1600,1800,2000),limits=c(0,2000))
m14b = m14b + scale_colour_manual(values=plotstyle(scens))
m14b = m14b + scale_fill_manual(values=plotstyle(scens))
m14b = m14b + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
m14b = m14b + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
m14b = m14b + theme(legend.position="bottom")
m14b
ggsave(file=paste(cfg$outdir,"/Cprice_all_global_models_world_Bridge-2Deg2030-2Deg2020_funnel.png",sep=""),m14b,width=16,height=12,dpi=200)
# or as bar chart
cpricebar=all[variable%in%vars & Category%in%c("CurPol","NDCplus","NDCMCS","GPP","Bridge","2Deg2030","2Deg2020")&!Scope=="national"®ion=="World"&period%in%c(2030,2040,2050)]
cpricebarm=range[period%in%c(2030,2040,2050)&!Category=="BAU"]
cpricebar$period=as.factor(cpricebar$period)
cpricebarm$period=as.factor(cpricebarm$period)
cpricebar$Category = factor(cpricebar$Category,levels=c("CurPol","NDCMCS","NDCplus","GPP","Bridge","2Deg2020","2Deg2030"))
cpricebarm$Category = factor(cpricebarm$Category,levels=c("CurPol","NDCMCS","NDCplus","GPP","Bridge","2Deg2020","2Deg2030"))
shapes_global=c("AIM/CGE" = 2, "COPPE-COFFEE 1.1" = 3, "IMAGE 3.0" = 6, "MESSAGEix-GLOBIOM_1.0" = 7,
"POLES GECO2019" = 8,"PROMETHEUS" = 11, "REMIND-MAgPIE 1.7-3.0" = 9,"TIAM_Grantham_v3.2" = 5,
"WITCH 5.0" = 10)
scens_global=c("CurPol" = "#0072B2", "NDCplus" = "#56B4E9", "Bridge" = "#009E73", "2Deg2020" = "#D55E00", "2Deg2030" = "#E69F00")
m14c = ggplot()
m14c = m14c + geom_bar(data=cpricebarm[Category%in%c("CurPol","NDCplus","Bridge","2Deg2020","2Deg2030")],aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
# m14c = m14c + geom_pointrange(data=emisredm[Category%in%c("CurPol","GPP","Bridge")&variable=="Rate of Change| Emissions|Kyoto Gases"],
# aes(ymin=min,ymax=max,y=median, x=period, colour=Category),alpha=0.5,size=5,fatten=1,position=position_dodge(width=0.66)) #,show.legend = F
m14c = m14c + geom_point(data=cpricebar[Category%in%c("CurPol","NDCplus","Bridge","2Deg2020","2Deg2030")], aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
m14c = m14c +ggtitle("b) Global carbon price")
#m14c = m14c + geom_text(aes(x="2030",y=2000),label ="b)",size=10)
m14c = m14c + ylim(0,2000)
m14c = m14c + scale_shape_manual(values=shapes_global) #cfg$man_shapes
m14c = m14c + scale_color_manual(values=scens_global) #plotstyle(scens)
m14c = m14c + scale_fill_manual(values=scens_global) #plotstyle(scens)
#m14c = m14c + facet_wrap(~region,scales="free_y")
m14c = m14c + theme_bw() + theme(axis.text.y=element_text(size=24)) + theme(strip.text=element_text(size=26)) + theme(axis.title=element_text(size=24)) +
theme(axis.text.x = element_text(size=24)) + theme(legend.text=element_text(size=24),legend.title=element_text(size=26))+theme(plot.title=element_text(size=26))
m14c = m14c + ylab("US$2010/tCO2") + xlab("")
m14c
ggsave(file=paste(cfg$outdir,"/Carbon_price_bar.png",sep=""),m14c,width=18,height=12,dpi=300)
m14d = ggplot()
m14d = m14d + geom_bar(data=cpricebarm,aes(x=Category,y=median,fill=period),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
m14d = m14d + geom_point(data=cpricebar, aes(x=Category,y=value,shape=model,colour=period,group=period),size=3,position=position_dodge(width=0.66))
m14d = m14d + scale_shape_manual(values=cfg$man_shapes)
# m14d = m14d + scale_color_manual(values=plotstyle(scens))
# m14d = m14d + scale_fill_manual(values=plotstyle(scens))
m14d = m14d + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=14)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12))
m14d = m14d + ylab("Carbon price (US$2010/tCO2")
m14d
ggsave(file=paste(cfg$outdir,"/Carbon_price_bar_2.png",sep=""),m14d,width=18,height=12,dpi=300)
# Key paper figures -------------------------------------------------------
# settings
scens = c("CurPol","NDCplus","Bridge","2Deg2020","2Deg2030") #"NDCMCS",
regio = c("World")
regions = c("AUS","BRA","CAN","CHN","EU","IDN","IND","JPN","ROK","RUS","USA")
year = c("2030")
years = c("2030","2050")
# Energy system indicators ------------------------------------------------
#TODO for all graphs: connect individual model points over time to see trend (Lara to help)
### Figure elements
# Figure 1a share of REN
REbar=REN[Category%in%scens®ion%in%regio&period%in%years] # &!Scope=="national"
REbarm=REbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
REbar$period=as.factor(REbar$period)
REbarm$period=as.factor(REbarm$period)
REbar$Category = factor(REbar$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030")) #,"NDCMCS"
REbarm$Category = factor(REbarm$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1a = ggplot()
F1a = F1a + geom_bar(data=REbarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1a = F1a + geom_point(data=REbar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1a = F1a + geom_errorbar(data=REbarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
#F1a = F1a + geom_text(aes(x="2030",y=88),label ="a)",size=10)
F1a = F1a + scale_shape_manual(values=cfg$man_shapes)
F1a = F1a + scale_color_manual(values=plotstyle(scens))
F1a = F1a + scale_fill_manual(values=plotstyle(scens))
F1a = F1a + ggtitle("a) Renewables in electricity")
F1a = F1a + xlab("")+ylab("Share (%)")
F1a = F1a + ylim(0,100)
F1a = F1a + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=20)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18),plot.title = element_text(size=22))
#F1a = F1a + ylab(paste(unique(REN$variable),"[",unique(REN$unit),"]"))
#F1a = F1a + geom_text(aes(x=2030,y=75),label="a)")
F1a
ggsave(file=paste(cfg$outdir,"/F1a_REN-share-elec_bar.png",sep=""),F1a,width=18,height=12,dpi=300)
# Figure 1b share of electric transport
EVbar=all[variable%in%c("Final Energy|Transportation|Electricity","Final Energy|Transportation")&Category%in%scens&!Scope=="national"& region%in%regio &period%in%years]
EVbar = spread(EVbar,variable,value)
EVbar = EVbar%>%mutate(EVshare= `Final Energy|Transportation|Electricity`/`Final Energy|Transportation` * 100 )
EVbar = data.table(gather(EVbar,variable,value,c("Final Energy|Transportation|Electricity","Final Energy|Transportation","EVshare")))
EVbar = EVbar[variable=="EVshare"]
EVbar$unit <- "%"
EVbarm=EVbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
EVbar$period=as.factor(EVbar$period)
EVbarm$period=as.factor(EVbarm$period)
EVbar$Category = factor(EVbar$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030")) #,"NDCMCS"
EVbarm$Category = factor(EVbarm$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1b = ggplot()
F1b = F1b + geom_bar(data=EVbarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1b = F1b + geom_point(data=EVbar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1b = F1b + geom_errorbar(data=EVbarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
#F1b = F1b + geom_text(aes(x="2030",y=30),label ="b)",size=10)
F1b = F1b + scale_shape_manual(values=cfg$man_shapes)
F1b = F1b + scale_color_manual(values=plotstyle(scens))
F1b = F1b + scale_fill_manual(values=plotstyle(scens))
F1b = F1b + ggtitle("b) Electricity in transportation final energy demand")
F1b = F1b + xlab("")+ ylab("Share (%)") #paste("Share","[",unique(EVbar$unit),"]")
F1b = F1b + ylim(0,100)
F1b = F1b + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=20)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12),plot.title = element_text(size=22))
F1b
ggsave(file=paste(cfg$outdir,"/F1b_EV-transport_bar.png",sep=""),F1b,width=18,height=12,dpi=300)
# Figure 1b extra: biomass in transport
Biobar=all[variable%in%c("Final Energy|Transportation|Liquids|Biomass","Final Energy|Transportation")&Category%in%scens&!Scope=="national"& region%in%regio &period%in%years]
Biobar = spread(Biobar,variable,value)
Biobar = Biobar%>%mutate(Bioshare= `Final Energy|Transportation|Liquids|Biomass`/`Final Energy|Transportation` * 100 )
Biobar = data.table(gather(Biobar,variable,value,c("Final Energy|Transportation|Liquids|Biomass","Final Energy|Transportation","Bioshare")))
Biobar = Biobar[variable=="Bioshare"]
Biobar$unit <- "%"
Biobarm=Biobar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
Biobar$period=as.factor(Biobar$period)
Biobarm$period=as.factor(Biobarm$period)
Biobar$Category = factor(Biobar$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030")) #,"NDCMCS"
Biobarm$Category = factor(Biobarm$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1b1 = ggplot()
F1b1 = F1b1 + geom_bar(data=Biobarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1b1 = F1b1 + geom_point(data=Biobar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1b1 = F1b1 + geom_errorbar(data=Biobarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
F1b1 = F1b1 + scale_shape_manual(values=cfg$man_shapes)
F1b1 = F1b1 + scale_color_manual(values=plotstyle(scens))
F1b1 = F1b1 + scale_fill_manual(values=plotstyle(scens))
F1b1 = F1b1 + ggtitle("Biomass in transportation final energy demand")
F1b1 = F1b1 + xlab("")+ ylab("Share (%)") #paste("Share","[",unique(EVbar$unit),"]")
F1b1 = F1b1 + ylim(0,100)
F1b1 = F1b1 + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=20)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12),plot.title = element_text(size=22))
F1b1
ggsave(file=paste(cfg$outdir,"/F1bextra_Bio-transport_bar.png",sep=""),F1b1,width=18,height=12,dpi=300)
# Figure 1c Industry efficiency? Need value added... (only reported by IMAGE). For now CCS as it is part of protocol. Maybe add F-gases?
CCSbar=all[variable%in%c("Carbon Sequestration|CCS|Fossil|Energy|Demand|Industry","Emissions|CO2|Energy|Demand|Industry")&Category%in%scens&!Scope=="national"& region%in%regio &period%in%years]
CCSbar = spread(CCSbar,variable,value)
CCSbar=na.omit(CCSbar)
CCSbar = CCSbar%>%mutate(CCSshare= `Carbon Sequestration|CCS|Fossil|Energy|Demand|Industry`/`Emissions|CO2|Energy|Demand|Industry` * 100 )
CCSbar = data.table(gather(CCSbar,variable,value,c("Carbon Sequestration|CCS|Fossil|Energy|Demand|Industry","Emissions|CO2|Energy|Demand|Industry","CCSshare")))
CCSbar = CCSbar[variable=="CCSshare"]
CCSbar$unit <- "%"
CCSbarm=CCSbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
CCSbar$period=as.factor(CCSbar$period)
CCSbarm$period=as.factor(CCSbarm$period)
CCSbar$Category = factor(CCSbar$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020")) #,"NDCMCS"
CCSbarm$Category = factor(CCSbarm$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020"))
F1c = ggplot()
F1c = F1c + geom_bar(data=CCSbarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1c = F1c + geom_point(data=CCSbar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1c = F1c + geom_errorbar(data=CCSbarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
#F1c = F1c + geom_text(aes(x="2030",y=80),label ="c)",size=10)
F1c = F1c + scale_shape_manual(values=cfg$man_shapes)
F1c = F1c + scale_color_manual(values=plotstyle(scens))
F1c = F1c + scale_fill_manual(values=plotstyle(scens))
F1c = F1c + xlab("")
F1c = F1c + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12))
F1c = F1c + ylab(paste("Industrial CCS as share of industry CO2 emissions","[",unique(CCSbar$unit),"]"))
F1c
ggsave(file=paste(cfg$outdir,"/F1c_CCS-industry_bar.png",sep=""),F1c,width=18,height=12,dpi=300)
#Alternative: industrial process & F-gas emissions
IEbar=all[variable%in%c("Emissions|CO2|Industrial Processes","Emissions|F-Gases")&Category%in%scens&!Scope=="national"& region%in%regio &period%in%c(2015,2030,2050)]
IEbar$unit<-"Mt CO2-equiv/yr"
IEbar = spread(IEbar,variable,value)
IEbar=na.omit(IEbar)
IEbar = IEbar%>%mutate(IEtotal= `Emissions|CO2|Industrial Processes`+`Emissions|F-Gases` )
IEbar = data.table(gather(IEbar,variable,value,c("Emissions|CO2|Industrial Processes","Emissions|F-Gases","IEtotal")))
IEbar = IEbar[variable=="IEtotal"]
IEbar = spread(IEbar,period,value)
IEbar=na.omit(IEbar)
IEbar = IEbar%>%mutate(rel50= ((`2050`-`2015`)/`2015`)*100,rel30=((`2030`-`2015`)/`2015`)*100)
IEbar = data.table(gather(IEbar,period,value,c('2015','2030','2050','rel30','rel50')))
IEbar = IEbar[period%in%c("rel50","rel30")]
IEbar$unit <- "%"
IEbar[period=="rel50"]$period<-2050
IEbar[period=="rel30"]$period<-2030
IEbarm=IEbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
IEbar$period=as.factor(IEbar$period)
IEbarm$period=as.factor(IEbarm$period)
IEbar$Category = factor(IEbar$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030")) #,"NDCMCS"
IEbarm$Category = factor(IEbarm$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1c2 = ggplot()
F1c2 = F1c2 + geom_bar(data=IEbarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1c2 = F1c2 + geom_point(data=IEbar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1c2 = F1c2 + geom_errorbar(data=IEbarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
#F1c2 = F1c2 + geom_text(aes(x="2030",y=80),label ="c)",size=10)
F1c2 = F1c2 + scale_shape_manual(values=cfg$man_shapes)
F1c2 = F1c2 + scale_color_manual(values=plotstyle(scens))
F1c2 = F1c2 + scale_fill_manual(values=plotstyle(scens))
F1c2 = F1c2 + ggtitle("c) F-Gases and Industrial process CO2 emissions")
F1c2 = F1c2 + xlab("")
F1c2 = F1c2 + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=20)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12),plot.title = element_text(size=22))
F1c2 = F1c2 + ylab("Relative to 2015 (%)") #paste("[relative to 2015, ",unique(IEbar$unit),"]")
F1c2
ggsave(file=paste(cfg$outdir,"/F1c2_emissions-industry_bar.png",sep=""),F1c2,width=18,height=12,dpi=300)
# Figure 1d Buildings share of electricity / efficiency?
EBbar=all[variable%in%c("Final Energy|Residential and Commercial|Electricity","Final Energy|Residential and Commercial")&Category%in%scens&!Scope=="national"& region%in%regio &period%in%years]
EBbar = spread(EBbar,variable,value)
EBbar = EBbar%>%mutate(EBshare= `Final Energy|Residential and Commercial|Electricity`/`Final Energy|Residential and Commercial` * 100 )
EBbar = data.table(gather(EBbar,variable,value,c("Final Energy|Residential and Commercial|Electricity","Final Energy|Residential and Commercial","EBshare")))
EBbar = EBbar[variable=="EBshare"]
EBbar$unit <- "%"
EBbarm=EBbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
EBbar$period=as.factor(EBbar$period)
EBbarm$period=as.factor(EBbarm$period)
EBbar$Category = factor(EBbar$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030")) #,"NDCMCS"
EBbarm$Category = factor(EBbarm$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1d = ggplot()
F1d = F1d + geom_bar(data=EBbarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1d = F1d + geom_point(data=EBbar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1d = F1d + geom_errorbar(data=EBbarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
#F1d = F1d + geom_text(aes(x="2030",y=70),label ="d)",size=10)
F1d = F1d + scale_shape_manual(values=cfg$man_shapes)
F1d = F1d + scale_color_manual(values=plotstyle(scens))
F1d = F1d + scale_fill_manual(values=plotstyle(scens))
F1d = F1d + ggtitle("d) Electricity in buildings final energy demand")
F1d = F1d + xlab("") + ylab("Share (%)") #paste("Share","[",unique(EBbar$unit),"]")
F1d = F1d + ylim(0,100)
F1d = F1d + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=20)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12),plot.title = element_text(size=22))
F1d
ggsave(file=paste(cfg$outdir,"/F1d_Elec-buildings_bar.png",sep=""),F1d,width=18,height=12,dpi=300)
# Figure 1e? AFOLU
#"Emissions|CH4|AFOLU"
#"Emissions|N2O|AFOLU"
AFbar=all[variable%in%c("Land Cover|Forest|Afforestation and Reforestation")&Category%in%scens&!Scope=="national"& region%in%regio &period%in%years]
AFbarm=AFbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
AFbar$period=as.factor(AFbar$period)
AFbarm$period=as.factor(AFbarm$period)
AFbar$Category = factor(AFbar$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030")) #,"NDCMCS"
AFbarm$Category = factor(AFbarm$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1e = ggplot()
F1e = F1e + geom_bar(data=AFbarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1e = F1e + geom_point(data=AFbar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1e = F1e + geom_errorbar(data=AFbarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
#F1e = F1e + geom_text(aes(x="2030",y=450),label ="e)",size=10)
F1e = F1e + scale_shape_manual(values=cfg$man_shapes)
F1e = F1e + scale_color_manual(values=plotstyle(scens))
F1e = F1e + scale_fill_manual(values=plotstyle(scens))
F1e = F1e + ggtitle("e) Afforestation and reforestation")
F1e = F1e + xlab("")
F1e = F1e + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=20)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12),plot.title = element_text(size=22))
F1e = F1e + ylab("Million ha") #paste("[",unique(AFbar$unit),"]")
F1e
ggsave(file=paste(cfg$outdir,"/F1e_Afforestation_bar.png",sep=""),F1e,width=18,height=12,dpi=300)
# Figure 1f multi-model FE/PE stack
vars=c("Primary Energy|Biomass|w/ CCS","Primary Energy|Biomass|w/o CCS","Primary Energy|Coal|w/ CCS","Primary Energy|Coal|w/o CCS","Primary Energy|Gas|w/ CCS","Primary Energy|Gas|w/o CCS",
"Primary Energy|Geothermal","Primary Energy|Hydro","Primary Energy|Nuclear","Primary Energy|Oil|w/ CCS","Primary Energy|Oil|w/o CCS","Primary Energy|Other","Primary Energy|Solar","Primary Energy|Wind",
"Primary Energy|Ocean","Primary Energy|Secondary Energy Trade")
PEstack=all[variable%in%vars&Category%in%scens&!Scope=="national"& region%in%regio &period%in%2030]
PEstack$Category = factor(PEstack$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1f = ggplot(data=PEstack) #TODO different year? #[!Category=="NDCplus"]
F1f = F1f + geom_bar(aes(x=Category,y=value,fill=variable),stat="identity", position="stack",width=0.5)
F1f = F1f + facet_wrap(~model,nrow=1,labeller = labeller(model=c("IMAGE 3.0"="IMAGE","REMIND-MAgPIE 1.7-3.0"="REMIND", "POLES GECO2019"="POLES","AIM/CGE"="AIM/CGE","COPPE-COFFEE 1.1"="COFFEE","PROMETHEUS"="PROMETHEUS","MESSAGEix-GLOBIOM_1.0"="MESSAGE","WITCH 5.0"="WITCH","TIAM_Grantham_v3.2"="TIAM")))
F1f = F1f + scale_fill_manual(values=plotstyle(vars))
F1f = F1f + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=18,angle=90)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12)) + theme(panel.spacing = unit(0, "lines"))
F1f = F1f + ylab(paste("Primary energy by source","[",unique(PEstack$unit),"]"))+xlab("")
F1f
ggsave(file=paste(cfg$outdir,"/F1f_PE-stack-model-scens_bar.png",sep=""),F1f,width=18,height=12,dpi=300)
# Figure 1X Rate of change in demand / supply (emissions?) TODO
### Figure collection
tmp<-ggplot_gtable(ggplot_build(F1a))
leg<-which(sapply(tmp$grobs,function(x) x$name) =="guide-box")
legend<-tmp$grobs[[leg]]
F1a=F1a+theme(legend.position = "none")#+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
F1b=F1b+theme(legend.position = "none")#+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
F1c2=F1c2+theme(legend.position = "none")#+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
F1d=F1d+theme(legend.position = "none")#+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
F1e=F1e+theme(legend.position = "none")#+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
#F1f=F1f+theme(legend.position = "none")+theme(axis.text=element_text(size=16),plot.title = element_text(size=18))
lay<-rbind(c(1,2,3,4),c(5,6,7,7))
F1=grid.arrange(F1a,F1b,F1c2,legend,F1d,F1e,F1f,layout_matrix=lay)
ggsave(file=paste(cfg$outdir,"/F1_gridarrange.png",sep=""),F1,width=24,height=14,dpi=200)
## alternative: only panels a-e
F1a=F1a+theme(legend.position = "right",legend.text=element_text(size=22),legend.title=element_text(size=24))
tmp<-ggplot_gtable(ggplot_build(F1a))
leg<-which(sapply(tmp$grobs,function(x) x$name) =="guide-box")
legend<-tmp$grobs[[leg]]
F1a=F1a+theme(legend.position = "none",axis.text.x=element_text(size=22),axis.text.y=element_text(size=22))
F1b=F1b+theme(axis.text.x=element_text(size=22),axis.text.y=element_text(size=22))
F1c2=F1c2+theme(axis.text.x=element_text(size=22),axis.text.y=element_text(size=22))
F1d=F1d+theme(axis.text.x=element_text(size=22),axis.text.y=element_text(size=22))
F1e=F1e+theme(axis.text.x=element_text(size=22),axis.text.y=element_text(size=22))
lay<-rbind(c(1,2,3),c(4,5,6))
F1alt=grid.arrange(F1a,F1b,F1c2,F1d,F1e,legend,layout_matrix=lay)
ggsave(file=paste(cfg$outdir,"/F1_gridarrange_alt.png",sep=""),F1alt,width=24,height=14,dpi=200)
### Repeat but for NDC convergence
scens = c("CurPol","NDCplus-conv","Bridge","2Deg2020","2Deg2030")
# Figure 1a share of REN
REbar=REN[Category%in%scens®ion%in%regio&period%in%years]
REbarm=REbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
REbar$period=as.factor(REbar$period)
REbarm$period=as.factor(REbarm$period)
REbar$Category = factor(REbar$Category,levels=c("CurPol","NDCplus-conv","NDCMCS","Bridge","2Deg2020","2Deg2030")) #,"NDCMCS"
REbarm$Category = factor(REbarm$Category,levels=c("CurPol","NDCplus-conv","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1a = ggplot()
F1a = F1a + geom_bar(data=REbarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1a = F1a + geom_point(data=REbar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1a = F1a + geom_errorbar(data=REbarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
F1a = F1a + geom_text(aes(x="2030",y=88),label ="a)",size=10)
F1a = F1a + scale_shape_manual(values=cfg$man_shapes)
F1a = F1a + scale_color_manual(values=plotstyle(scens))
F1a = F1a + scale_fill_manual(values=plotstyle(scens))
F1a = F1a + xlab("")
F1a = F1a + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
F1a = F1a +ylab("Share of renewables in electricity (%)")
F1a
# Figure 1b share of electric transport
EVbar=all[variable%in%c("Final Energy|Transportation|Electricity","Final Energy|Transportation")&Category%in%scens&!Scope=="national"& region%in%regio &period%in%years]
EVbar = spread(EVbar,variable,value)
EVbar = EVbar%>%mutate(EVshare= `Final Energy|Transportation|Electricity`/`Final Energy|Transportation` * 100 )
EVbar = data.table(gather(EVbar,variable,value,c("Final Energy|Transportation|Electricity","Final Energy|Transportation","EVshare")))
EVbar = EVbar[variable=="EVshare"]
EVbar$unit <- "%"
EVbarm=EVbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
EVbar$period=as.factor(EVbar$period)
EVbarm$period=as.factor(EVbarm$period)
EVbar$Category = factor(EVbar$Category,levels=c("CurPol","NDCplus-conv","NDCMCS","Bridge","2Deg2020","2Deg2030")) #,"NDCMCS"
EVbarm$Category = factor(EVbarm$Category,levels=c("CurPol","NDCplus-conv","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1b = ggplot()
F1b = F1b + geom_bar(data=EVbarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1b = F1b + geom_point(data=EVbar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1b = F1b + geom_errorbar(data=EVbarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
F1b = F1b + geom_text(aes(x="2030",y=30),label ="b)",size=10)
F1b = F1b + scale_shape_manual(values=cfg$man_shapes)
F1b = F1b + scale_color_manual(values=plotstyle(scens))
F1b = F1b + scale_fill_manual(values=plotstyle(scens))
F1b = F1b + xlab("")
F1b = F1b + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12))
F1b = F1b + ylab(paste("Share of electricity in transportation final energy demand","[",unique(EVbar$unit),"]"))
F1b
# Figure 1c industrial process & F-gas emissions
IEbar=all[variable%in%c("Emissions|CO2|Industrial Processes","Emissions|F-Gases")&Category%in%scens&!Scope=="national"& region%in%regio &period%in%c(2015,2030,2050)]
IEbar$unit<-"Mt CO2-equiv/yr"
IEbar = spread(IEbar,variable,value)
IEbar=na.omit(IEbar)
IEbar = IEbar%>%mutate(IEtotal= `Emissions|CO2|Industrial Processes`+`Emissions|F-Gases` )
IEbar = data.table(gather(IEbar,variable,value,c("Emissions|CO2|Industrial Processes","Emissions|F-Gases","IEtotal")))
IEbar = IEbar[variable=="IEtotal"]
IEbar = spread(IEbar,period,value)
IEbar=na.omit(IEbar)
IEbar = IEbar%>%mutate(rel50= ((`2050`-`2015`)/`2015`)*100,rel30=((`2030`-`2015`)/`2015`)*100)
IEbar = data.table(gather(IEbar,period,value,c('2015','2030','2050','rel30','rel50')))
IEbar = IEbar[period%in%c("rel50","rel30")]
IEbar$unit <- "%"
IEbar[period=="rel50"]$period<-2050
IEbar[period=="rel30"]$period<-2030
IEbarm=IEbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
IEbar$period=as.factor(IEbar$period)
IEbarm$period=as.factor(IEbarm$period)
IEbar$Category = factor(IEbar$Category,levels=c("CurPol","NDCplus-conv","NDCMCS","Bridge","2Deg2020","2Deg2030")) #,"NDCMCS"
IEbarm$Category = factor(IEbarm$Category,levels=c("CurPol","NDCplus-conv","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1c2 = ggplot()
F1c2 = F1c2 + geom_bar(data=IEbarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1c2 = F1c2 + geom_point(data=IEbar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1c2 = F1c2 + geom_errorbar(data=IEbarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
F1c2 = F1c2 + geom_text(aes(x="2030",y=80),label ="c)",size=10)
F1c2 = F1c2 + scale_shape_manual(values=cfg$man_shapes)
F1c2 = F1c2 + scale_color_manual(values=plotstyle(scens))
F1c2 = F1c2 + scale_fill_manual(values=plotstyle(scens))
F1c2 = F1c2 + xlab("")
F1c2 = F1c2 + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12))
F1c2 = F1c2 + ylab(paste("F-Gases and Industrial process CO2 emissions","[relative to 2015, ",unique(IEbar$unit),"]"))
F1c2
# Figure 1d Buildings share of electricity
EBbar=all[variable%in%c("Final Energy|Residential and Commercial|Electricity","Final Energy|Residential and Commercial")&Category%in%scens&!Scope=="national"& region%in%regio &period%in%years]
EBbar = spread(EBbar,variable,value)
EBbar = EBbar%>%mutate(EBshare= `Final Energy|Residential and Commercial|Electricity`/`Final Energy|Residential and Commercial` * 100 )
EBbar = data.table(gather(EBbar,variable,value,c("Final Energy|Residential and Commercial|Electricity","Final Energy|Residential and Commercial","EBshare")))
EBbar = EBbar[variable=="EBshare"]
EBbar$unit <- "%"
EBbarm=EBbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
EBbar$period=as.factor(EBbar$period)
EBbarm$period=as.factor(EBbarm$period)
EBbar$Category = factor(EBbar$Category,levels=c("CurPol","NDCplus-conv","NDCMCS","Bridge","2Deg2020","2Deg2030")) #,"NDCMCS"
EBbarm$Category = factor(EBbarm$Category,levels=c("CurPol","NDCplus-conv","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1d = ggplot()
F1d = F1d + geom_bar(data=EBbarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1d = F1d + geom_point(data=EBbar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1d = F1d + geom_errorbar(data=EBbarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
F1d = F1d + geom_text(aes(x="2030",y=70),label ="d)",size=10)
F1d = F1d + scale_shape_manual(values=cfg$man_shapes)
F1d = F1d + scale_color_manual(values=plotstyle(scens))
F1d = F1d + scale_fill_manual(values=plotstyle(scens))
F1d = F1d + xlab("")
F1d = F1d + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12))
F1d = F1d + ylab(paste("Share of electricity in buildings final energy demand","[",unique(EBbar$unit),"]"))
F1d
# Figure 1e AFOLU
AFbar=all[variable%in%c("Land Cover|Forest|Afforestation and Reforestation")&Category%in%scens&!Scope=="national"& region%in%regio &period%in%years]
AFbarm=AFbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
AFbar$period=as.factor(AFbar$period)
AFbarm$period=as.factor(AFbarm$period)
AFbar$Category = factor(AFbar$Category,levels=c("CurPol","NDCplus-conv","NDCMCS","Bridge","2Deg2020","2Deg2030")) #,"NDCMCS"
AFbarm$Category = factor(AFbarm$Category,levels=c("CurPol","NDCplus-conv","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F1e = ggplot()
F1e = F1e + geom_bar(data=AFbarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F1e = F1e + geom_point(data=AFbar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F1e = F1e + geom_errorbar(data=AFbarm,aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
F1e = F1e + geom_text(aes(x="2030",y=450),label ="e)",size=10)
F1e = F1e + scale_shape_manual(values=cfg$man_shapes)
F1e = F1e + scale_color_manual(values=plotstyle(scens))
F1e = F1e + scale_fill_manual(values=plotstyle(scens))
F1e = F1e + xlab("")
F1e = F1e + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12))
F1e = F1e + ylab(paste("Afforestation and reforestation","[",unique(AFbar$unit),"]"))
F1e
## alternative: only panels a-e
F1a=F1a+theme(legend.position = "right",legend.text=element_text(size=22),legend.title=element_text(size=24))
tmp<-ggplot_gtable(ggplot_build(F1a))
leg<-which(sapply(tmp$grobs,function(x) x$name) =="guide-box")
legend<-tmp$grobs[[leg]]
F1a=F1a+theme(legend.position = "none",axis.text.x=element_text(size=22),axis.text.y=element_text(size=22))
F1b=F1b+theme(legend.position = "none")
F1c2=F1c2+theme(legend.position = "none")
F1d=F1d+theme(legend.position = "none")
F1e=F1e+theme(legend.position = "none")
F1b=F1b+theme(axis.text.x=element_text(size=22),axis.text.y=element_text(size=22))
F1c2=F1c2+theme(axis.text.x=element_text(size=22),axis.text.y=element_text(size=22))
F1d=F1d+theme(axis.text.x=element_text(size=22),axis.text.y=element_text(size=22))
F1e=F1e+theme(axis.text.x=element_text(size=22),axis.text.y=element_text(size=22))
lay<-rbind(c(1,2,3),c(4,5,6))
F1alt=grid.arrange(F1a,F1b,F1c2,F1d,F1e,legend,layout_matrix=lay)
ggsave(file=paste(cfg$outdir,"/F1_gridarrange_alt_NDCconvergence.png",sep=""),F1alt,width=24,height=14,dpi=200)
#Back to normal scenario selection:
scens = c("CurPol","NDCplus","Bridge","2Deg2020")
# Waterfall ---------------------------------------------------------------
### Figure elements
# Figure 2a Sectors
# select data
cdata=all[model=="TIAM_Grantham_v3.2"®ion=="World"] # POLES GECO2019, AIM/CGE, IMAGE 3.0, PROMETHEUS, REMIND-MAgPIE 1.7-3.0, COPPE-COFFEE 1.1,MESSAGEix-GLOBIOM_1.0, WITCH 5.0, TIAM_Grantham_v3.2
model=unique(cdata$model)
# add non-CO2
if(!model%in%c("TIAM_Grantham_v3.2","PROMETHEUS")){
nonco2=cdata[variable%in%c("Emissions|CH4","Emissions|N2O","Emissions|F-Gases")]
nonco2$unit<-NULL
nonco2=spread(nonco2,variable,value)
nonco2=nonco2%>%mutate(`Emissions|Non-CO2`=((`Emissions|CH4`*25)+(`Emissions|N2O`*298/1000)+`Emissions|F-Gases`))
nonco2=data.table(gather(nonco2,variable,value,c("Emissions|CH4","Emissions|N2O","Emissions|F-Gases","Emissions|Non-CO2")))
nonco2=nonco2[variable=="Emissions|Non-CO2"]
nonco2$unit<-"Mt CO2-equiv/yr"
setcolorder(nonco2,colnames(cdata))
cdata=rbind(cdata,nonco2)
}
if(unique(cdata$model=="AIM/CGE")){cdata$model<-"AIM-CGE"}
# source script
#source("waterfall_bridge.R")
source("waterfall_bridge_paper-layout.R")
# Figure 2b countries
cdata=all[model=="POLES GECO2019"®ion%in%c("R5ASIA","R5LAM","R5REF","R5OECD90+EU","R5MAF")&variable=="Emissions|Kyoto Gases"] # POLES GECO2019, AIM/CGE, IMAGE 3.0, REMIND-MAgPIE 1.7-3.0, COPPE-COFFEE 1.1,MESSAGEix-GLOBIOM_1.0, WITCH 5.0
if(unique(cdata$model=="AIM/CGE")){cdata$model<-"AIM-CGE"}
source("waterfall_bridge_regions.R")
# for PROMETHEUS and TIAM for CO2 instead of GHG
cdata=all[model=="TIAM_Grantham_v3.2"®ion%in%c("R5ASIA","R5LAM","R5REF","R5OECD90+EU","R5MAF")&variable=="Emissions|CO2"] #TIAM_Grantham_v3.2, PROMETHEUS
source("waterfall_bridge_regions.R")
### Figure collection
#TODO: models as panel (1 figure sector, 1 figure region, latter to SI?)
### Additional table to show model ranges
nonco2=all[variable%in%c("Emissions|CH4","Emissions|N2O","Emissions|F-Gases")]
nonco2$unit<-NULL
nonco2=spread(nonco2,variable,value)
nonco2=nonco2%>%mutate(`Emissions|Non-CO2`=((`Emissions|CH4`*25)+(`Emissions|N2O`*298/1000)+`Emissions|F-Gases`))
nonco2=data.table(gather(nonco2,variable,value,c("Emissions|CH4","Emissions|N2O","Emissions|F-Gases","Emissions|Non-CO2")))
nonco2=nonco2[variable=="Emissions|Non-CO2"]
nonco2$unit<-"Mt CO2-equiv/yr"
setcolorder(nonco2,colnames(all))
sect=rbind(all,nonco2)
sectoral = sect[variable%in%c("Emissions|CO2|Energy|Supply","Emissions|CO2|Energy|Demand|Industry","Emissions|CO2|Energy|Demand|Residential and Commercial","Emissions|CO2|Energy|Demand|Transportation",
"Emissions|CO2|Industrial Processes","Emissions|CO2|AFOLU","Emissions|Non-CO2")&
period%in%c(2030,2050)&Scope=="global"®ion=="World"&Category%in%c("NDCplus","Bridge")&!model%in%c("PROMETHEUS","TIAM_Grantham_v3.2")]
sectoral = spread(sectoral[,!c('unit'),with=FALSE],variable,value)
sectoral = sectoral%>%mutate(total=`Emissions|CO2|Energy|Supply`+`Emissions|CO2|Energy|Demand|Industry`+`Emissions|CO2|Energy|Demand|Residential and Commercial`+`Emissions|CO2|Energy|Demand|Transportation`+
`Emissions|CO2|Industrial Processes`+`Emissions|CO2|AFOLU`+`Emissions|Non-CO2`)
sectoral = data.table(gather(sectoral,variable,value,c("Emissions|CO2|Energy|Supply","Emissions|CO2|Energy|Demand|Industry","Emissions|CO2|Energy|Demand|Residential and Commercial","Emissions|CO2|Energy|Demand|Transportation",
"Emissions|CO2|Industrial Processes","Emissions|CO2|AFOLU","Emissions|Non-CO2","total")))
sectoral = spread(sectoral[,!c('Baseline','scenario'),with=F],Category,value)
sectoral = sectoral%>%mutate(reduction=NDCplus-Bridge)
total = sectoral[,`:=`(NDCplus = NULL, Bridge = NULL)]
total = total[variable=="total"]
sectoral = merge(sectoral,total,by=c("model","region","period","Scope"))
sectoral = sectoral%>%mutate(share=reduction.x/reduction.y*100)
sectoralrange = sectoral[,list(min=min(share,na.rm=T),max=max(share,na.rm=T),med=median(share,na.rm=T)),by=c("region","period","Scope","variable.x")]
sectoralrange=sectoralrange[!variable.x=="total"]
sectoralrange$min <- round(sectoralrange$min,digits=1)
sectoralrange$max <- round(sectoralrange$max,digits=1)
sectoralrange$med <- round(sectoralrange$med,digits=1)
sectoralrange$display = paste(sectoralrange$min,"-",sectoralrange$max,"(",sectoralrange$med,")")
sectoralrange=sectoralrange[,`:=`(region=NULL,Scope=NULL,min=NULL,med=NULL,max=NULL)]
setnames(sectoralrange,"period","Year")
sectoralrange=spread(sectoralrange,variable.x,display)
setnames(sectoralrange,"Emissions|CO2|AFOLU","AFOLU")
setnames(sectoralrange,"Emissions|CO2|Energy|Demand|Industry","Industry")
setnames(sectoralrange,"Emissions|CO2|Energy|Demand|Transportation","Transport")
setnames(sectoralrange,"Emissions|CO2|Energy|Demand|Residential and Commercial","Buildings")
setnames(sectoralrange,"Emissions|CO2|Energy|Supply","Supply")
setnames(sectoralrange,"Emissions|CO2|Industrial Processes","Industrial Processes")
setnames(sectoralrange,"Emissions|Non-CO2","Non-CO2")
setcolorder(sectoralrange,c("Year","Supply","Industry","Buildings","Transport","Industrial Processes","AFOLU","Non-CO2"))
write.xlsx2(sectoralrange,paste(cfg$outdir,"/waterfall_range.xlsx",sep=""),sheetName="data",append=F,row.names = F)
# Emissions ---------------------------------------------------------------
### Figure elements
# Figure 3a GHG emissions pathways
vars="Emissions|Kyoto Gases"
scens <- c("CurPol","NDCplus","Bridge","2Deg2020","2Deg2030","1p5 CD-LINKS") #"NDCMCS", ,"1p5 SSP"
plotdata=all[variable%in%vars & Category%in%scens&!Scope=="national"®ion=="World"]
#plotdata$period=as.numeric(as.character(plotdata$period))
range=all[variable%in%vars & Category%in%scens&!Scope=="national"®ion=="World",list(min=min(value,na.rm=T),max=max(value,na.rm=T),med=median(value,na.rm=T)),by=c("Category","variable","period")]
#range$period=as.numeric(as.character(range$period))
# emissions in Gt
plotdata$value=plotdata$value/1000
plotdata$unit<-"Gt CO2-equiv/yr"
range$min=range$min/1000
range$max=range$max/1000
range$med=range$med/1000
F3a = ggplot(plotdata)
F3a = F3a + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1)
F3a = F3a + geom_line(data=range,aes(x=period,y=med,colour=Category),size=2.5)
F3a = F3a + geom_ribbon(data=range[!Category=="Bridge"],aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.1,show.legend=F)
F3a = F3a + geom_ribbon(data=range[Category=="Bridge"],aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5,show.legend=F)
F3a = F3a + geom_segment(data=range[period %in% c(2050) & Category=="CurPol"], stat="identity", aes(x=2050, xend=2050, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3a = F3a + geom_segment(data=range[period %in% c(2050) & Category=="NDCplus"], stat="identity", aes(x=2050, xend=2050, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3a = F3a + geom_segment(data=range[period %in% c(2050) & Category=="Bridge"], stat="identity", aes(x=2050, xend=2050, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3a = F3a + geom_segment(data=range[period %in% c(2050) & Category=="2Deg2020"], stat="identity", aes(x=2050.5, xend=2050.5, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3a = F3a + geom_segment(data=range[period %in% c(2050) & Category=="2Deg2030"], stat="identity", aes(x=2051, xend=2051, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3a = F3a + geom_segment(data=range[period %in% c(2030) & Category=="Bridge"], stat="identity", aes(x=2030, xend=2030, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3a = F3a + geom_segment(data=range[period %in% c(2030) & Category=="2Deg2020"], stat="identity", aes(x=2030.5, xend=2030.5, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3a = F3a + geom_segment(data=range[period %in% c(2030) & Category=="2Deg2030"], stat="identity", aes(x=2031, xend=2031, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3a = F3a + geom_point(data=range[period %in% c(2050)&Category%in%c("2Deg2020")],aes(x=2050.7,y=med,colour=Category,size=1.5),show.legend = FALSE)
F3a = F3a + geom_point(data=range[period %in% c(2050)&Category%in%c("2Deg2030")],aes(x=2051.2,y=med,colour=Category,size=1.5),show.legend = FALSE)
F3a = F3a + geom_point(data=range[period %in% c(2050)&Category%in%c("Bridge","CurPol","NDCplus")],aes(x=2050.2,y=med,colour=Category,size=1.5),show.legend = FALSE)
F3a = F3a + geom_point(data=range[period %in% c(2030)&Category%in%c("2Deg2020")],aes(x=2030.7,y=med,colour=Category,size=1.5),show.legend = FALSE)
F3a = F3a + geom_point(data=range[period %in% c(2030)&Category%in%c("Bridge")],aes(x=2030.2,y=med,colour=Category,size=1.5),show.legend = FALSE)
F3a = F3a + geom_point(data=range[period %in% c(2030)&Category%in%c("2Deg2030")],aes(x=2031.2,y=med,colour=Category,size=1.5),show.legend = FALSE)
F3a = F3a + xlim(2010,2052) + scale_y_continuous(breaks=c(0,10,20,30,40,50,60,70),limits=c(-5,75))
F3a = F3a + scale_colour_manual(values=plotstyle(scens))
F3a = F3a + scale_fill_manual(values=plotstyle(scens))
F3a = F3a + scale_linetype_manual(values= c("POLES GECO2019" = "dashed","AIM/CGE" = "solid","IMAGE 3.0"= "dotdash","REMIND-MAgPIE 1.7-3.0"= "twodash","WITCH 5.0"= "dotdash","MESSAGEix-GLOBIOM_1.0"= "longdash","COPPE-COFFEE 1.1"= "dotted"))
F3a = F3a + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(plotdata$unit),"]"))+ xlab("")
F3a = F3a + theme_bw() + theme(axis.text.y=element_text(size=20)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=20)) +
theme(axis.text.x = element_text(size=20,angle=90)) + theme(legend.text=element_text(size=14),legend.title=element_blank(),legend.key.width = unit(1,"cm")) #legend.key.size = unit(1.5, "cm"),
F3a = F3a + theme(legend.position="bottom")
#For PhD layout:
#F3a = F3a + theme(legend.position=c(0.2,0.2)) + theme(legend.text=element_text(size=20))
F3a
ggsave(file=paste(cfg$outdir,"/F3a_GHG_all_global_models_world_CurPol-NDC-Bridge-2Deg2020_funnel.png",sep=""),F3a,width=16,height=12,dpi=200)
#For PhD layout:
#ggsave(file=paste(cfg$outdir,"/F3a_GHG_all_global_models_world_CurPol-NDC-Bridge-2Deg2020_funnel_PhD.png",sep=""),F3a,width=16,height=12,dpi=200)
plotdata2=all[variable%in%vars & Category%in%scens&!Scope=="national"®ion=="World"]
plotdata2$value=plotdata2$value/1000
plotdata2$unit<-"Gt CO2-equiv/yr"
F3aSI = ggplot(plotdata2)
F3aSI = F3aSI + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
#F3aSI = F3aSI + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
F3aSI = F3aSI + geom_segment(data=range[period %in% c(2100) & Category=="CurPol"], stat="identity", aes(x=2100, xend=2100, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3aSI = F3aSI + geom_segment(data=range[period %in% c(2100) & Category=="NDCplus"], stat="identity", aes(x=2100, xend=2100, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3aSI = F3aSI + geom_segment(data=range[period %in% c(2100) & Category=="Bridge"], stat="identity", aes(x=2100, xend=2100, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3aSI = F3aSI + geom_segment(data=range[period %in% c(2100) & Category=="2Deg2020"], stat="identity", aes(x=2100.5, xend=2100.5, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3aSI = F3aSI + geom_segment(data=range[period %in% c(2100) & Category=="2Deg2030"], stat="identity", aes(x=2101, xend=2101, y=min, yend=max, size=1.5, colour=Category), show.legend=FALSE)
F3aSI = F3aSI + geom_point(data=range[period %in% c(2100)&Category%in%c("2Deg2020")],aes(x=2100.7,y=med,colour=Category,size=1.5),show.legend = FALSE)
F3aSI = F3aSI + geom_point(data=range[period %in% c(2100)&Category%in%c("2Deg2030")],aes(x=2101.2,y=med,colour=Category,size=1.5),show.legend = FALSE)
F3aSI = F3aSI + geom_point(data=range[period %in% c(2100)&Category%in%c("Bridge","CurPol","NDCplus")],aes(x=2100.2,y=med,colour=Category,size=1.5),show.legend = FALSE)
F3aSI = F3aSI + xlim(2050,2102) + scale_y_continuous(breaks=c(-20,-10,0,10,20,30,40,50,60,70,80,90,100),limits=c(-20,100))
F3aSI = F3aSI + scale_colour_manual(values=plotstyle(scens))
F3aSI = F3aSI + scale_fill_manual(values=plotstyle(scens))
F3aSI = F3aSI + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(plotdata2$unit),"]"))+ xlab("")
F3aSI = F3aSI + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=14),legend.title=element_text(size=14))
F3aSI = F3aSI + theme(legend.position="bottom")
F3aSI
ggsave(file=paste(cfg$outdir,"/F3aSI_GHG_all_global_models_world_CurPol-NDC-Bridge-2Deg2020_funnel.png",sep=""),F3aSI,width=16,height=12,dpi=200)
scens=c("GPP_notax","Bridge_notax","Bridge","GPP")
mods=unique(all[Category=="GPP_notax"]$model)
range=all[variable%in%vars & Category%in%scens&!Scope=="national"®ion=="World"&model%in%mods,list(min=min(value,na.rm=T),max=max(value,na.rm=T),med=median(value,na.rm=T)),by=c("Category","variable","period")]
F3aSI2 = ggplot(all[variable%in%vars & Category%in%scens&!Scope=="national"®ion=="World"&model%in%mods])
F3aSI2 = F3aSI2 + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
F3aSI2 = F3aSI2 + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
F3aSI2 = F3aSI2 + xlim(2010,2051)+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
F3aSI2 = F3aSI2 + scale_colour_manual(values=plotstyle(scens))
F3aSI2 = F3aSI2 + scale_fill_manual(values=plotstyle(scens))
F3aSI2 = F3aSI2 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
F3aSI2 = F3aSI2 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
F3aSI2 = F3aSI2 + theme(legend.position="bottom")
F3aSI2
ggsave(file=paste(cfg$outdir,"/F3aSI2_GHG_all_global_models_world_GPP-Bridge-notax_funnel.png",sep=""),F3aSI2,width=16,height=12,dpi=200)
scens=c("Bridge","GPP")
mods=c("IMAGE 3.0","MESSAGEix-GLOBIOM_1.0") #,"AIM/CGE" "PROMETHEUS"
range=all[variable%in%vars & Category%in%scens&!Scope=="national"®ion=="World"&model%in%mods,list(min=min(value,na.rm=T),max=max(value,na.rm=T),med=median(value,na.rm=T)),by=c("Category","variable","period")]
F3aSI3 = ggplot(all[variable%in%vars & Category%in%scens&!Scope=="national"®ion=="World"&model%in%mods])
F3aSI3 = F3aSI3 + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
#F3aSI3 = F3aSI3 + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
F3aSI3 = F3aSI3 + xlim(2010,2051)+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
F3aSI3 = F3aSI3 + scale_colour_manual(values=plotstyle(scens))
F3aSI3 = F3aSI3 + scale_fill_manual(values=plotstyle(scens))
F3aSI3 = F3aSI3 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
F3aSI3 = F3aSI3 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
F3aSI3 = F3aSI3 + theme(legend.position="bottom")
F3aSI3
ggsave(file=paste(cfg$outdir,"/F3aSI3_GHG_IMAGE-MESSAGE_world_GPP-Bridge_funnel.png",sep=""),F3aSI3,width=16,height=12,dpi=200)
# Convergence scenario
scens=c("NDCplus-conv","NDCplus")
mods=unique(all[Category=="NDCplus-conv"]$model)
range=all[variable%in%vars & Category%in%scens&!Scope=="national"®ion=="World"&model%in%mods,list(min=min(value,na.rm=T),max=max(value,na.rm=T),med=median(value,na.rm=T)),by=c("Category","variable","period")]
F3aSI4 = ggplot(all[variable%in%vars & Category%in%scens&!Scope=="national"®ion=="World"&model%in%mods&period%in%c(2010,2020,2030,2040,2050,2060,2070,2080,2090,2100)])
F3aSI4 = F3aSI4 + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1.5)
F3aSI4 = F3aSI4 + geom_ribbon(data=range[period%in%c(2010,2020,2030,2040,2050,2060,2070,2080,2090,2100)],aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.5)
F3aSI4 = F3aSI4 + scale_x_continuous(breaks=c(2030,2040,2050,2060,2070,2080,2090,2100),limits=c(2030,2100))+ scale_y_continuous(breaks=c(0,10000,20000,30000,40000,50000,60000,70000,80000),limits=c(0,85000))
F3aSI4 = F3aSI4 + scale_colour_manual(values=plotstyle(scens))
F3aSI4 = F3aSI4 + scale_fill_manual(values=plotstyle(scens))
F3aSI4 = F3aSI4 + ylab(paste(unique(all[variable%in%vars]$variable),"[",unique(all[variable%in%vars]$unit),"]"))+ xlab("")
F3aSI4 = F3aSI4 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=16,angle=90)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
F3aSI4 = F3aSI4 + theme(legend.position="bottom")
F3aSI4
ggsave(file=paste(cfg$outdir,"/F3aSI4_GHG_all_global_models_world_NDCplus-convergence_funnel.png",sep=""),F3aSI4,width=16,height=12,dpi=200)
# Figure 3b Emissions relative to NDC
# Figure 3c Rate of change
scens <- c("CurPol","NDCplus","Bridge","2Deg2020","2Deg2030") #"NDCMCS",
emisrednew = all[variable%in%c("Emissions|Kyoto Gases","Emissions|CO2")&Category%in%scens&period%in%c(2015,2030,2050)]
emisrednew = spread(emisrednew,period,value)
emisrednew=na.omit(emisrednew)
emisrednew = emisrednew%>%mutate(rel50= ((`2050`-`2015`)/`2015`)*100,rel30=((`2030`-`2015`)/`2015`)*100)
emisrednew = data.table(gather(emisrednew,period,value,c('2015','2030','2050','rel30','rel50')))
emisrednew = emisrednew[period%in%c("rel50","rel30")]
emisrednew$unit <- "%"
emisrednew[period=="rel50"]$period<-2050
emisrednew[period=="rel30"]$period<-2030
emisrednewm = emisrednew[,list(median=median(value,na.rm=T),min=min(value,na.rm=T),max=max(value,na.rm=T)),
by=c("Category","region","variable","unit","period")] #,min=min(value,na.rm=T),max=max(value,na.rm=T)
emisrednew$Category = factor(emisrednew$Category,levels=c("CurPol","NDCplus","Bridge","2Deg2020","2Deg2030"))
emisrednewm$Category = factor(emisrednewm$Category,levels=c("CurPol","NDCplus","Bridge","2Deg2020","2Deg2030"))
emisrednew$region = factor(emisrednew$region,levels=c("AUS","CAN","EU","JPN","USA","BRA","CHN","IDN","IND","ROK","RUS","World"))
emisrednewm$region = factor(emisrednewm$region,levels=c("AUS","CAN","EU","JPN","USA","BRA","CHN","IDN","IND","ROK","RUS","World"))
emisrednew$model = factor(emisrednew$model,levels=c("AIM/CGE","COPPE-COFFEE 1.1","IMAGE 3.0","MESSAGEix-GLOBIOM_1.0","POLES GECO2019","PROMETHEUS","REMIND-MAgPIE 1.7-3.0","TIAM_Grantham_v3.2","WITCH 5.0",
"*AIM/CGE[Korea]","*AIM/Enduse[Japan]","*BLUES","*GCAM_Canada","*GCAM-USA_COMMIT", "*India MARKAL","*IPAC-AIM/technology V1.0","*PRIMES_V1","*TIMES-AUS"))
F3c = ggplot()
F3c = F3c + geom_bar(data=emisrednewm[Category%in%scens&variable=="Emissions|Kyoto Gases"®ion%in%c(regions,"World")],
aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F3c = F3c + geom_point(data=emisrednew[Category%in%scens&variable=="Emissions|Kyoto Gases"®ion%in%c(regions,"World")],
aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F3c = F3c + scale_shape_manual(values=cfg$man_shapes)
F3c = F3c + scale_fill_manual(values=plotstyle(scens))
F3c = F3c + scale_colour_manual(values=plotstyle(scens))
F3c = F3c + facet_wrap(~region,scales="fixed")
F3c = F3c + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=14)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12))
F3c = F3c + ylab("GHG emissions relative to 2015 (%)")
F3c
ggsave(file=paste(cfg$outdir,"/F3c-GHG-emissions-reduction.png",sep=""),F3c,width=18,height=12,dpi=300)
### Figure collection
# Costs / investments -----------------------------------------------------
### Figure elements
# Figure 4a carbon price #TODO check differences between 3 tiers as in protocol
vars=c("Price|Carbon")
scens <- c("CurPol","NDCplus","Bridge","2Deg2020","2Deg2030")
# R5 regions
cpricebar=all[variable%in%vars & Category%in%scens&!Scope=="national"®ion%in%c("R5OECD90+EU","R5LAM","R5MAF")&period%in%c(2030,2050)]
cpricebarm=cpricebar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","region","variable","period")]
cpricebar$period=as.factor(cpricebar$period)
cpricebarm$period=as.factor(cpricebarm$period)
cpricebar$Category = factor(cpricebar$Category,levels=c("CurPol","NDCplus","Bridge","2Deg2020"))
cpricebarm$Category = factor(cpricebarm$Category,levels=c("CurPol","NDCplus","Bridge","2Deg2020"))
F4a = ggplot()
F4a = F4a + geom_bar(data=cpricebarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F4a = F4a + geom_point(data=cpricebar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F4a = F4a + facet_wrap(~region,nrow=1)
F4a = F4a + scale_shape_manual(values=cfg$man_shapes)
F4a = F4a + scale_color_manual(values=plotstyle(scens))
F4a = F4a + scale_fill_manual(values=plotstyle(scens))
F4a = F4a + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=14)) + theme(legend.text=element_text(size=11),legend.title=element_text(size=12)) + theme(panel.spacing = unit(0, "lines"))
F4a = F4a + ylab("Carbon price (US$2010/tCO2")
F4a
ggsave(file=paste(cfg$outdir,"/F4a_Carbon_price_bar.png",sep=""),F4a,width=18,height=12,dpi=300)
#individual countries rather than R5
cpricebar=all[variable%in%vars & Category%in%scens&!Scope=="national"®ion%in%regions&period%in%c(2030,2050)]
cpricebarm=cpricebar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","region","variable","period")]
cpricebar$period=as.factor(cpricebar$period)
cpricebarm$period=as.factor(cpricebarm$period)
cpricebar$Category = factor(cpricebar$Category,levels=c("CurPol","NDCplus","Bridge","2Deg2020","2Deg2030"))
cpricebarm$Category = factor(cpricebarm$Category,levels=c("CurPol","NDCplus","Bridge","2Deg2020","2Deg2030"))
F4a1 = ggplot()
F4a1 = F4a1 + geom_bar(data=cpricebarm,aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F4a1 = F4a1 + geom_point(data=cpricebar, aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F4a1 = F4a1 + ylim(0,2000)
F4a1 = F4a1 + facet_wrap(~region,nrow=4)
F4a1 = F4a1 + scale_shape_manual(values=cfg$man_shapes)
F4a1 = F4a1 + scale_color_manual(values=plotstyle(scens))
F4a1 = F4a1 + scale_fill_manual(values=plotstyle(scens))
F4a1 = F4a1 + theme_bw() + theme(axis.text.y=element_text(size=18)) + theme(strip.text=element_text(size=20)) + theme(axis.title=element_text(size=20)) +
theme(axis.text.x = element_text(size=18)) + theme(legend.text=element_text(size=18),legend.title=element_text(size=20)) + theme(panel.spacing = unit(0, "lines"))
F4a1 = F4a1 + ylab("Carbon price (US$2010/tCO2")+xlab("")
F4a1
ggsave(file=paste(cfg$outdir,"/F4a1_Carbon_price_bar_country.png",sep=""),F4a1,width=18,height=12,dpi=300)
#TODO fix this
# F4a1 = F4a1 + theme(legend.box="horizontal")
# tmp<-ggplot_gtable(ggplot_build(F4a1))
# leg<-which(sapply(tmp$grobs,function(x) x$name) =="guide-box")
# legend<-tmp$grobs[[leg]]
# F4a1=F4a1+theme(legend.position = "none")
# lay<-rbind(c(1,1,1),c(1,1,2))
# h=grid.arrange(F4a1,legend,layout_matrix=lay)
# Figure 4b policy costs
costsGDP = fread("data/policy costs.csv",sep=";", header=T)
costsGDP = data.table(gather(costsGDP,period,value,c(`2030`,`2050`)))
costsGDP = spread(costsGDP,Scenario,value)
costsGDP = costsGDP%>%mutate(Bridgevs2020 = ((Bridge_V4 / `2Deg2020_V4`)-1)*100, Bridgevs2030 = ((Bridge_V4 / `2deg2030_v4` )-1)*100)
costsGDP = data.table(gather(costsGDP,Scenario,value,c('2Deg2020_V4','2deg2030_v4','Bridge_V4','Bridgevs2020','Bridgevs2030')))
costsGDP = costsGDP[Scenario%in%c('Bridgevs2020','Bridgevs2030')]
costsGDPm=costsGDP[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Scenario","period")]
shapes_global=c("AIM/CGE" = 2, "MESSAGEix-GLOBIOM_1.0" = 7, "REMIND-MAgPIE 1.7-3.0" = 9,"WITCH 5.0" = 10)
F4b = ggplot()
F4b = F4b + geom_bar(data=costsGDPm,aes(x=period,y=median,fill=Scenario),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F4b = F4b + geom_point(data=costsGDP, aes(x=period,y=value,shape=Model,colour=Scenario,group=Scenario),size=5,position=position_dodge(width=0.66))
F4b = F4b + geom_errorbar(data=costsGDPm,aes(x=period,ymin=min,ymax=max,colour=Scenario),position=position_dodge(width=0.66),width=0.66)
F4b = F4b + ggtitle("a) GDP loss per tonne of CO2e abated in Bridge")
F4b = F4b + geom_hline(yintercept=0)
#F4b = F4b + geom_text(aes(x="2030",y=120),label ="a)",size=10)
F4b = F4b + scale_shape_manual(values=shapes_global) #cfg$man_shapes
F4b = F4b + scale_color_manual(values=c("Bridgevs2020"="#D55E00","Bridgevs2030"="#E69F00"),labels=c("Bridgevs2020"="Bridge vs 2Deg2020","Bridgevs2030"="Bridge vs 2Deg2030"))
F4b = F4b + scale_fill_manual(values=c("Bridgevs2020"="#D55E00","Bridgevs2030"="#E69F00"),labels=c("Bridgevs2020"="Bridge vs 2Deg2020","Bridgevs2030"="Bridge vs 2Deg2030"))
F4b = F4b + theme_bw() + theme(axis.text.y=element_text(size=24)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=24)) +
theme(axis.text.x = element_text(size=24)) + theme(legend.text=element_text(size=24),legend.title=element_text(size=26)) + theme(plot.title=element_text(size=26))
F4b = F4b + ylab("Relative to 2Deg2020 or 2Deg2030 (%)")+xlab("")
F4b
ggsave(file=paste(cfg$outdir,"/F4b_policy_costs_GDP_bar.png",sep=""),F4b,width=18,height=12,dpi=300)
# Grid arrange for figure 4 combined
lay<-rbind(c(1),c(2))
F4=grid.arrange(F4b,m14c,layout_matrix=lay)
ggsave(file=paste(cfg$outdir,"/F4ab_GDP-loss_Carbon-price.png",sep=""),F4,width=14,height=16,dpi=300)
# Figure 4c Investments
# potential indicators:
# i. Investment|Energy supply
# ii. Total investments
# iii. Investment|Energy Efficiency
# iv. Investment|Energy Supply|CO2 Transport and Storage
# v. Investment|Energy Supply|Electricity|Transmission and Distribution
# vi. Investment|Energy Supply|Electricity|Electricity Storage
# vii. Investment|Energy Supply|Electricity|Nuclear
# viii. Investment|Energy Supply|Extraction|Bioenergy
# ix. Investment|Energy Supply|Electricity|Non-Biomass Renewables
# x. Investment|Energy Supply|Hydrogen|Fossil
# xi. Investment|Energy Supply|Hydrogen|Renewable
# xii. Investment|Energy Supply|Electricity|Fossil - or better:
# 1. Investment|Energy Supply|Electricity|Oil|w/o CCS
# 2. Investment|Energy Supply|Electricity|Gas|w/o CCS
# 3. Investment|Energy Supply|Electricity|Coal|w/o CCS
# xiii. Investment|Energy Supply|Extraction|Fossil
# xiv. Investment|Infrastructure|Residential and Commercial|Building Retrofits (because of the protocol)
# xv. Investment|Energy Demand|Transportation|Passenger|Road|LDV|EV (because of the protocol)
vars=c("Investment|Energy Supply|Electricity|Non-Biomass Renewables","Investment|Energy Supply|Electricity|Fossil")
scens <- c("CurPol","NDCplus","Bridge","2Deg2020","2Deg2030")
INVbar=all[variable%in%vars&Category%in%scens®ion%in%regio&period%in%years]
INVbarm=INVbar[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),median=median(value,na.rm=T)),by=c("Category","variable","period")]
INVbar$period=as.factor(INVbar$period)
INVbarm$period=as.factor(INVbarm$period)
INVbar$Category = factor(INVbar$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030"))
INVbarm$Category = factor(INVbarm$Category,levels=c("CurPol","NDCplus","NDCMCS","Bridge","2Deg2020","2Deg2030"))
F4c1 = ggplot()
F4c1 = F4c1 + geom_bar(data=INVbarm[variable=="Investment|Energy Supply|Electricity|Fossil"],aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F4c1 = F4c1 + geom_point(data=INVbar[variable=="Investment|Energy Supply|Electricity|Fossil"], aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F4c1 = F4c1 + geom_errorbar(data=INVbarm[variable=="Investment|Energy Supply|Electricity|Fossil"],aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
F4c1 = F4c1 + scale_shape_manual(values=cfg$man_shapes)
F4c1 = F4c1 + scale_color_manual(values=plotstyle(scens))
F4c1 = F4c1 + scale_fill_manual(values=plotstyle(scens))
F4c1 = F4c1 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=14)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
F4c1 = F4c1 + ylab(paste(unique(INVbar[variable=="Investment|Energy Supply|Electricity|Fossil"]$variable),"[",unique(INVbar$unit),"]"))
F4c1
ggsave(file=paste(cfg$outdir,"/F4c_Investments_fossil_bar.png",sep=""),F4c1,width=18,height=12,dpi=300)
F4c2 = ggplot()
F4c2 = F4c2 + geom_bar(data=INVbarm[variable=="Investment|Energy Supply|Electricity|Non-Biomass Renewables"],aes(x=period,y=median,fill=Category),stat="identity",alpha=0.5, position=position_dodge(width=0.66),width=0.66)
F4c2 = F4c2 + geom_point(data=INVbar[variable=="Investment|Energy Supply|Electricity|Non-Biomass Renewables"], aes(x=period,y=value,shape=model,colour=Category,group=Category),size=3,position=position_dodge(width=0.66))
F4c2 = F4c2 + geom_errorbar(data=INVbarm[variable=="Investment|Energy Supply|Electricity|Non-Biomass Renewables"],aes(x=period,ymin=min,ymax=max,colour=Category),position=position_dodge(width=0.66))
F4c2 = F4c2 + scale_shape_manual(values=cfg$man_shapes)
F4c2 = F4c2 + scale_color_manual(values=plotstyle(scens))
F4c2 = F4c2 + scale_fill_manual(values=plotstyle(scens))
F4c2 = F4c2 + theme_bw() + theme(axis.text.y=element_text(size=16)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=18)) +
theme(axis.text.x = element_text(size=14)) + theme(legend.text=element_text(size=16),legend.title=element_text(size=18))
F4c2 = F4c2 + ylab(paste(unique(INVbar[variable=="Investment|Energy Supply|Electricity|Non-Biomass Renewables"]$variable),"[",unique(INVbar$unit),"]"))
F4c2
ggsave(file=paste(cfg$outdir,"/F4c_Investments_NBR_bar.png",sep=""),F4c2,width=18,height=12,dpi=300)
# extra figure on technology costs
vars = c ("OM Cost|Fixed|Electricity|Solar|PV","OM Cost|Fixed|Electricity|Wind|Onshore","Capital Cost|Electricity|Solar|PV","Capital Cost|Electricity|Wind|Onshore") #"OM Cost|Fixed|Electricity|Wind|Offshore", "Capital Cost|Electricity|Wind|Offshore",
scens <- c("NDCplus","Bridge","2Deg2030")
plotdata=all[variable%in%vars & Category%in%scens&!Scope=="national"®ion=="World"]
range=plotdata[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),med=median(value,na.rm=T)),by=c("Category","variable","period")]
F4d = ggplot(plotdata)
F4d = F4d + facet_wrap(~variable,scales="free")
F4d = F4d + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1)
#F4d = F4d + geom_line(data=range,aes(x=period,y=med,colour=Category),size=2.5)
#F4d = F4d + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.3)
F4d = F4d + xlim(2010,2050) #+ scale_y_continuous(breaks=c(-10,0,10,20,30,40,50,60,70,80),limits=c(-15,85))
F4d = F4d + scale_colour_manual(values=plotstyle(scens))
F4d = F4d + scale_fill_manual(values=plotstyle(scens))
F4d = F4d + ylab("US$2010/kW")+ xlab("")
F4d = F4d + theme_bw() + theme(axis.text.y=element_text(size=20)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=20)) +
theme(axis.text.x = element_text(size=20,angle=90)) + theme(legend.text=element_text(size=14),legend.title=element_blank(),legend.key.width = unit(1,"cm")) #legend.key.size = unit(1.5, "cm"),
F4d = F4d + theme(legend.position="bottom")
F4d
ggsave(file=paste(cfg$outdir,"/F4d_tech_costs.png",sep=""),F4d,width=16,height=12,dpi=200)
techcosts = plotdata
techcosts$scenario <- NULL
costreduction = spread(techcosts[Category%in%c("Bridge","NDCplus")&period==2050],Category,value)
costreduction = costreduction %>% mutate(reduction = (Bridge - NDCplus)/NDCplus *100)
costredrange=costreduction[,list(min=min(reduction,na.rm=T),max=max(reduction,na.rm=T),med=median(reduction,na.rm=T)),by=c("variable","unit")]
# Check budgets -----------------------------------------------------------
source("functions/calcBudget.R")
all <- calcBudget(all,'Emissions|CO2','Carbon budget')
all <- calcBudget(all,'Emissions|CO2|Energy and Industrial Processes','Carbon budget|Energy and Industry')
all <- calcBudget(all,'Emissions|CO2|Energy','Carbon budget|Energy')
budget = all[variable%in%c("Carbon budget","Carbon budget|Energy and Industry","Carbon budget|Energy")]
budgetsel= budget[region=='World'&Scope=="global"&period==2100&variable=="Carbon budget"&Category%in%c("Bridge","2Deg2020","2Deg2030")] #"Bridge_notax","2Deg2030"
# source("functions/calcBudget_2015.R")
# all <- calcBudget_2015(all,'Emissions|CO2','Carbon budget_2015')
# all <- calcBudget_2015(all,'Emissions|CO2|Energy and Industrial Processes','Carbon budget_2015|Energy and Industry')
# all <- calcBudget_2015(all,'Emissions|CO2|Energy','Carbon budget_2015|Energy')
# Check gap closure -------------------------------------------------------
scens <- c("CurPol","NDCplus","Bridge","2Deg2020","1p5 CD-LINKS","GPP")
gap = all[Scope=="global"&variable=="Emissions|Kyoto Gases"®ion%in%c(regions,"World")&period%in%c(2030,2050)&Category%in%scens]
gap$scenario <- NULL
gap$Baseline <- NULL
gap = spread(gap,Category,value)
gap = gap%>%mutate(gap=NDCplus-`2Deg2020`,reduction=NDCplus-Bridge,closure=reduction/gap*100, gap15=NDCplus-`1p5 CD-LINKS`,closure15=reduction/gap15*100, reductionGPP=NDCplus-GPP,closureGPP=reductionGPP/gap*100)
gap2 = data.table(gather(gap,Category,value,c('2Deg2020','Bridge','CurPol','NDCplus',"1p5 CD-LINKS",'gap','reduction','closure','gap15','closure15','reductionGPP','closureGPP')))
gap2 =gap2[Category%in%c('gap','gap15','reduction','closure','closure15','reductionGPP', 'closureGPP')]
setnames(gap2,'Category','Indicator')
gaprange = gap2[,list(median=median(value,na.rm=T),min=min(value,na.rm=T),max=max(value,na.rm=T)),by=c('Indicator','period','region')]
# Check AFOLU reductions --------------------------------------------------
AFOLU = all[Scope=="global"&variable=="Emissions|CO2|AFOLU"®ion%in%c(regions,"World")&period%in%c(2015,2030,2050)&Category%in%scens]
AFOLU$scenario <- NULL
AFOLU$Baseline <- NULL
AFOLU = spread(AFOLU,period,value)
AFOLU = AFOLU%>%mutate(red2030=(`2030`-`2015`)/`2015`*100,red2050=(`2050`-`2015`)/`2015`*100)
AFOLU = data.table(gather(AFOLU,period,value,c('2015','2030','2050','red2030','red2050')))
AFOLU =AFOLU[period%in%c('red2030','red2050')]
AFOLU[period=="red2030"]$period<-2030
AFOLU[period=="red2050"]$period<-2050
AFOLU$variable<-"Reduction of AFOLU CO2 emissions"
AFOLUrange = AFOLU[,list(median=median(value,na.rm=T),min=min(value,na.rm=T),max=max(value,na.rm=T)),by=c("Category",'variable','period','region')]
# Check negative emissions ------------------------------------------------
scens = c("Bridge","2Deg2020","2Deg2030")
NegEmis = all[Scope=="global"&variable%in%c("Emissions|CO2","Carbon Sequestration|Land Use","Carbon Sequestration|CCS")®ion%in%c("World")&period%in%c(2050,2100)&Category%in%scens] #Carbon sequestration ccs+land use or emissions co2 <0
NegEmis$scenario <- NULL
NegEmis$Baseline <- NULL
NegEmis = spread(NegEmis,variable,value)
NegEmis$`Carbon Sequestration|Land Use`[is.na(NegEmis$`Carbon Sequestration|Land Use`)] <- 0
NegEmis = NegEmis%>%mutate(`Carbon Sequestration` = `Carbon Sequestration|CCS` + `Carbon Sequestration|Land Use` )
NegEmis = data.table(gather(NegEmis,variable,value,c('Carbon Sequestration|CCS','Carbon Sequestration|Land Use','Carbon Sequestration','Emissions|CO2')))
NegEmis = NegEmis[variable%in%c('Carbon Sequestration','Emissions|CO2')]
NegEmisRange = NegEmis[,list(median=median(value,na.rm=T),min=min(value,na.rm=T),max=max(value,na.rm=T)),by=c("Category",'variable','period','region')]
NegEmis = spread(NegEmis,Category,value)
# SDG indicators ----------------------------------------------------------
# extra figure on health
vars = c("Emissions|NOx","Emissions|VOC","Emissions|CO","Emissions|Sulfur","Emissions|BC","Emissions|OC")
scens <- c("NDCplus","Bridge")
plotdata=all[variable%in%vars & Category%in%scens&!Scope=="national"®ion=="World"]
range=plotdata[,list(min=min(value,na.rm=T),max=max(value,na.rm=T),med=median(value,na.rm=T)),by=c("Category","variable","period")]
Fx = ggplot(plotdata)
Fx = Fx + facet_wrap(~variable,scales="free")
Fx = Fx + geom_line(aes(x=period,y=value,colour=Category, linetype=model),size=1)
Fx = Fx + geom_line(data=range,aes(x=period,y=med,colour=Category),size=2.5)
Fx = Fx + geom_ribbon(data=range,aes(x=period,ymin=min, ymax=max,fill=Category),alpha=0.3)
Fx = Fx + xlim(2010,2050) #+ scale_y_continuous(breaks=c(-10,0,10,20,30,40,50,60,70,80),limits=c(-15,85))
Fx = Fx + scale_colour_manual(values=plotstyle(scens))
Fx = Fx + scale_fill_manual(values=plotstyle(scens))
Fx = Fx + ylab("Mt/year")+ xlab("")
Fx = Fx + theme_bw() + theme(axis.text.y=element_text(size=20)) + theme(strip.text=element_text(size=14)) + theme(axis.title=element_text(size=20)) +
theme(axis.text.x = element_text(size=20,angle=90)) + theme(legend.text=element_text(size=14),legend.title=element_blank(),legend.key.width = unit(1,"cm")) #legend.key.size = unit(1.5, "cm"),
Fx = Fx + theme(legend.position="bottom")
Fx
ggsave(file=paste(cfg$outdir,"/Fx_pollutant_emissions.png",sep=""),Fx,width=16,height=12,dpi=200)
|
f6e3d68ccea8a555da7b62eb05c6fd9eb73f2328
|
2d994c4a960ec254aa08933f7138c822baa6febe
|
/cachematrix.R
|
d5a20afce375dfa38b2ae2fbf2379adcebbe876a
|
[] |
no_license
|
fp-repo/ProgrammingAssignment2
|
6af9222cb405541c5d136032ca7d62ac65c2bd70
|
c6e1245345bdb963ac41abfecbf811a510c84587
|
refs/heads/master
| 2020-06-29T07:16:06.495958
| 2019-08-04T13:02:32
| 2019-08-04T13:02:32
| 200,472,272
| 0
| 0
| null | 2019-08-04T09:11:28
| 2019-08-04T09:11:27
| null |
UTF-8
|
R
| false
| false
| 1,626
|
r
|
cachematrix.R
|
##
## The functions makeCacheMatrix and cacheSolve have been designed as second
## assignment of the "R Programming" course over the Coursera platform. The two
## functions allow calculating and storing the inverse of a matrix in order to
## reduce the number of iterations executed by the system.
## The function makeCacheMatrix creates an object with two variables:
## - m: the matrix passed as function's parameter
## - i: the variable used to store the inverse of the matrix
makeCacheMatrix <- function(m = matrix()) {
# Inizialize the variables with NULL
i <- NULL
# The matrix used as parameter of the function is assigned to the variable m
set <- function(p) {
m <<- p
i <- NULL
}
# The variable m can be retrieved
get <- function() {
return(m)
}
# Assign the function's input to the variable i, which is used to store the inverse
setsolve <- function(inv) {
i <<- inv
}
# Return the inverse stored in the variable i
getsolve <- function() {
return(i)
}
# List of functions inside the object
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## The function cacheSolve
cacheSolve <- function(x, ...) {
# Retrieve the inverse stored in the function's parameter
i <- x$getsolve()
# If the value stored is not null, the program shows it and ends
if(!is.null(i)) {
message("getting cached data")
return(i)
}
# Otherwise the program retrives the matrix, calculates the inverse and stores it
data <- x$get()
i <- solve(data, ...)
x$setsolve(i)
# Return a matrix that is the inverse of 'x'
i
}
|
772faeba7fe39a61f04f37bdd5707a3e8433e829
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#118.A#48.c#.w#7.s#15.asp/ctrl.e#1.a#3.E#118.A#48.c#.w#7.s#15.asp.R
|
73cbc27aa86e1068e27e8a5708c3c36dd3fb2ce7
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91
|
r
|
ctrl.e#1.a#3.E#118.A#48.c#.w#7.s#15.asp.R
|
a339df0656f6cbcc1c8e666d968db0a7 ctrl.e#1.a#3.E#118.A#48.c#.w#7.s#15.asp.qdimacs 6773 19822
|
22808ca4563757e71d6d0c5b676e14181c0f95e3
|
0ca8fd2b2f7ed68d977f1cca1bb3c25b6acbdb26
|
/Dis_graph_review.R
|
4d3a90fb0410091ce1af734a2e235cebe6aa55c5
|
[] |
no_license
|
TheMoorLab/Tooth
|
87b1534628c9b06e9c87b1076f413d020754c604
|
b32bf333f1933a48436e9520ca1ae358a0c9f497
|
refs/heads/master
| 2023-04-07T10:42:06.842346
| 2021-04-07T07:36:28
| 2021-04-07T07:36:28
| 239,493,294
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,683
|
r
|
Dis_graph_review.R
|
# Jaccard distance plotted tih force-directed graph layout
# MUST BE RUN AFTER umap_heatmaps.R OR must load:
library(plyr) #join
library(proxy) # more distances (ejaccard)
library(qgraph) # force-directed graph layout
Idents(object = merged_harmony) <- "groups_bysize"
av.ex_top = AverageExpression(merged_harmony, add.ident = 'condition', features = merged_harmony@assays$SCT@var.features)
# av.ex.SCT2 = data.frame(t(av.ex_top$SCT)) # if not returning a seurat object, the data in average expression is returned in non-logspace
av.ex.SCT2 = data.frame(t(log1p(av.ex_top$SCT)))
# av.ex.RNA = data.frame(t(av.ex_top$RNA))
cluster_dist2 = dist(av.ex.SCT2 , method = "ejaccard")
dist_mat2 = as.matrix(cluster_dist2)
# Set up node legend labels for qgraph
# Sub row names' underscore with space
rownames(av.ex.SCT2) = gsub("_", " ", rownames(av.ex.SCT2))
vertex_names = rownames(av.ex.SCT2)
################################################ RANK PAIRS FROM SMALLEST DIST TO LARGEST (DONE AFTER REVIEW) ################################################
# For each cell type, find the smallest ejaccard dissimilarity (that isn't zero)
min_dists = apply(dist_mat2, 2,function(x) {min(x[x>0]) } )
# order the min_dists from smallest to largest
min_dists_sorted = sort(min_dists)
# Then find the id of that min in the original matrix (not excluding zeros/ if we use which.min above we get the wrong indeces because we would be looking at a sub (non-zero) arrays for each column)
# min_ids = mapply(function(x,y) {which(dist_mat2[x,] == min_dists_sorted[y])}, x = as.numeric(names(sort(min_dists))), y = 1:length(min_dists ))
min_ids = mapply(function(x,y) {which(dist_mat2[x,] == min_dists_sorted[y])}, x = names(sort(min_dists)), y = 1:length(min_dists ))
test = cbind(names(sort(min_dists)), vertex_names[min_ids])
save.image(file = file.path('/IMCR_shares/Moorlab/Common/Tooth_project/R_analysis/r_output',"Review.Rdata"))
##################################################### PLOT FORCE DIRECTED GRAPH (ORIGINAL CODE) #####################################################
# node graph labels
nodes_numbers = 1:length(vertex_names)
row.names(dist_mat2) = nodes_numbers
colnames(dist_mat2) = nodes_numbers
# Set up groups for qgraph
conditions = c("perio", "pulp")
cc_collapsed_ids = lapply(conditions, function(a) grep(pattern = a, x = vertex_names))
names(cc_collapsed_ids) = conditions
################################ Varying vertex sizes by proportions #########################################
# Estimate proportions for each cell type relative to pulp and perio totals (since total population sizes are quite different between perio and pulp)
melted_celltype_condition2 = cbind.data.frame(cellType =merged_harmony@meta.data$groups_bysize, condition=merged_harmony@meta.data$condition)
melted_celltype_by_condition2 = as.data.frame.matrix(table(melted_celltype_condition2))
cellType_normalized_by_condition2 = as.data.frame(t(melted_celltype_by_condition2)/rowSums(t(melted_celltype_by_condition2)))
celltype_props_by_sample <- cellType_normalized_by_condition2 %>%
mutate(ind = factor(row_number())) %>%
gather(variable, value, -ind)
# Gathering and mutating cause the sample (perio/pulp) names to be replaced with (1,2), so we create a table with the key and merge back with celltype_props_by_sample
key_cat = data.frame(sample = c('perio', 'pulp'), ind = c(1,2))
merged_props = merge(celltype_props_by_sample, key_cat)[-1]#
# We're only interested in the label column with combined labels of cell type (variable) and sample
celltype_props_by_sample = cbind.data.frame(prop= merged_props$value, CTsample = paste(merged_props$variable, merged_props$sample))
################################################################################################################
# Order proportions according to vertex order in distance matrix
v_names = data.frame(CTsample=vertex_names)
ordered_props = join(v_names, celltype_props_by_sample)
# Because the range of cell type proportions is very wide and the node size variation would too large to plot, we compress the values through a ln transform
# Unnecessary is using log1p(av.ex_top$SCT) at the beginning
# compress_sizes = log(1000*ordered_props$prop) # the 1000 scaling is just so there are no values betwee 0 and 1 which would lead to a negative value after the log transform.
# Force-directed graph layout of distance matrix
# Nodes are normalized by cell type proportion (relative to perio or pulp sample sizes)
dist_mi <- 1-dist_mat2 # 1 as qgraph takes similarity matrices as input
pdf(file = file.path("/IMCR_shares/Moorlab/Common/Tooth_project/R_analysis/ldvr_analyses/PseudobulkCorrs",
"PerioPulpEjaccard_FDGL_normalized_node_sizes.pdf"), width=16, height=16)
# qgraph(dist_mi, layout='spring', vsize=3, groups = test, legend = TRUE)
qgraph(dist_mi, layout='spring', vsize=compress_sizes, color = c(rep('blue',length(cc_collapsed_ids[1])), rep('red', length(cc_collapsed_ids[2]))), nodeNames = vertex_names, groups = cc_collapsed_ids, legend.mode="style2")
dev.off()
# Force-directed graph layout of distance matrix (node sizes are all equal)
dist_mi <- 1-dist_mat2 # 1 as qgraph takes similarity matrices as input
pdf(file = file.path("/IMCR_shares/Moorlab/Common/Tooth_project/R_analysis/ldvr_analyses/PseudobulkCorrs",
"PerioPulpEjaccard_FDGL_fixed_node_sizes.pdf"), width=16, height=16)
# qgraph(dist_mi, layout='spring', vsize=3, groups = test, legend = TRUE)
qgraph(dist_mi, layout='spring', vsize=3, nodeNames = vertex_names, groups = cc_collapsed_ids, legend.mode="style2")
dev.off()
|
67d7314720b9b1b102da91138b4ca5946e17ef71
|
93a0fb25288a0602a6189b6930bc6329ee4ac124
|
/pandey_200151827_Q9_3.R
|
de59d6ab60ee5ac9e749a23751283efea3cefd42
|
[] |
no_license
|
apan255/R_Workspace
|
001cf88b1ae0b697358ab9a0f3c0c26bfd40f59e
|
5639fa2bec0943d9e26d27fc5c1da8696aeb0ff0
|
refs/heads/master
| 2021-01-11T16:17:45.254730
| 2017-01-25T21:17:33
| 2017-01-25T21:17:33
| 80,058,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 185
|
r
|
pandey_200151827_Q9_3.R
|
normal_random_variable<-rnorm(300, m=24, sd=5)
qqnorm(normal_random_variable, main="Sample normal qqplot of normal random variable with mean 24 and sd 5")
qqline(normal_random_variable)
|
87f12df862a846599f15403f6b054bae9b2c5221
|
33bb983cc20cff0a5bfc8ef342addd678274b061
|
/Mail/mail.R
|
1f113995e7ced4e8296c6403066c115217ab30b4
|
[] |
no_license
|
Gottavianoni/R
|
00c65142fd29e62cc010147b9089eaecd85f0ea9
|
6918a4dec29faa442567f7ce271c38d001b9a2af
|
refs/heads/master
| 2021-01-22T20:35:18.829590
| 2017-04-05T15:17:39
| 2017-04-05T15:17:39
| 85,334,011
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 406
|
r
|
mail.R
|
#MAIL
install.packages("RDCOMClient")
library(RDCOMClient)
## init com api
OutApp <- COMCreate("Outlook.Application")
## create an email
outMail = OutApp$CreateItem(0)
## configure email parameter
outMail[["To"]] = "*****"
outMail[["subject"]] = "some subject"
outMail[["body"]] = "some body"
## send it
outMail$Send()
outMail[["Attachments"]]$Add(path_to_attch_file)
|
6f16dff48837a9ccf205b900bb9a069c1c6d7248
|
bad7f54bb4235f354a70dcf10f28f2e7b139ba4b
|
/R_script/Sylvioidea_Ultimate/Geodesic/find_potent_knum.R
|
e99b895c78a717fe3d67b55c37ebca15ef5067d2
|
[] |
no_license
|
HKyleZhang/Sylvioidea_project
|
05f13565b00fdb63cc4f6d6b5f302424a5923af4
|
c581310129f9f4e0d7a53790bb74bc3ddc88e4e0
|
refs/heads/master
| 2020-04-11T03:32:18.118864
| 2019-03-19T15:02:08
| 2019-03-19T15:02:08
| 161,481,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 762
|
r
|
find_potent_knum.R
|
logd <- read.csv("logLkhforR.csv", header = TRUE)
logd <- abs(logd[,2]-logd[,3])
logdmin <- min(logd, na.rm = TRUE)
min.ind <- which(logd == logdmin)
logper <- seq(1:14)
for (i in 1:13) {
logper[i+1] <- logd[i+1]/logd[1]
}
res <- array("NA", 14)
j <- 1
threshd <- 0.5
for (i in 1:12){
if (logd[i] <= logd[i+1]){
if (i > 2){
if (logd[i] <= logd[i-1]) {
if (logper[i+1] <= threshd & logper[i+2] <= threshd){
res[j] <- i
j <- j+1
}
}
}
else {
if (logper[i+1] <= threshd & logper[i+2] <= threshd){
res[j] <- i
j <- j+1
}
}
}
}
res <- res[res != "NA"]
res <- as.data.frame(res)
write.table(res, file = "potent_knum", quote = FALSE, col.names = FALSE, row.names = FALSE)
|
6f4ec2c486fb72d812990c29a804f22ca6ca6090
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.mobile/man/mobileanalytics_put_events.Rd
|
ce2315c05db0ec5434fdc1e57b77ae0af1e6a046
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,350
|
rd
|
mobileanalytics_put_events.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mobileanalytics_operations.R
\name{mobileanalytics_put_events}
\alias{mobileanalytics_put_events}
\title{The PutEvents operation records one or more events}
\usage{
mobileanalytics_put_events(events, clientContext, clientContextEncoding)
}
\arguments{
\item{events}{[required] An array of Event JSON objects}
\item{clientContext}{[required] The client context including the client ID, app title, app version and
package name.}
\item{clientContextEncoding}{The encoding used for the client context.}
}
\value{
An empty list.
}
\description{
The PutEvents operation records one or more events. You can have up to
1,500 unique custom events per app, any combination of up to 40
attributes and metrics per custom event, and any number of attribute or
metric values.
}
\section{Request syntax}{
\preformatted{svc$put_events(
events = list(
list(
eventType = "string",
timestamp = "string",
session = list(
id = "string",
duration = 123,
startTimestamp = "string",
stopTimestamp = "string"
),
version = "string",
attributes = list(
"string"
),
metrics = list(
123.0
)
)
),
clientContext = "string",
clientContextEncoding = "string"
)
}
}
\keyword{internal}
|
d2ebf457287b1215b772eea5e0c21a61c170e05a
|
55686d2928596baa6bbde6b972d191f8a035f573
|
/Week_7_Discussion/Discussion_Week7_V1.R
|
af3bcec0a8b2c7fbe43e397bc0bdf668bda1c4fb
|
[] |
no_license
|
DarioUrbina/Teacher-A-Statististical-Methods-BME-423
|
6556688a414c1b3ee404aacdbf4401324f0b2645
|
1572301100c96583da46209d08ceac4efa570024
|
refs/heads/master
| 2023-01-06T23:57:37.652149
| 2020-11-06T02:45:19
| 2020-11-06T02:45:19
| 288,513,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,865
|
r
|
Discussion_Week7_V1.R
|
#Chi-square goodness-of-fit
#GOF tests if the observed data fits a certain distribution (probability)
#Modify cards to one column
rm(list = ls());
cat("\014")
setwd("~/Desktop/Week_7_materials")
load("randomness.Rdata")
library(lsr)
who(expand = TRUE)
head(cards[,c(1,2)]) #1st selection
TotalSelections <- 200 #200 subjects
H0 <- 'All four suits are chosen with equal probability (Prob=0.25)'
H1 <- 'All four suits are chosen with unequal probability (Prob~=0.25)'
expected <- 200*c(clubs = 0.25, diamonds = 0.25, hearts= 0.25, spades= 0.25)
print(expected)
observed <- table(cards[,2]) #observed selections from the data
print(observed)
#We want to test the difference between the expected probability and the observed probability
#To determine that the difference is not due to chance, we use chi-square goodness of fit test
#Check in the discussion slides to see the formula
#Let's do the test by setting the significance level = 0.05
#Go back to slide no. 17 at before using the function
chisq.test(table(cards[,2]))
# or we can use the Convenience function (goodnessOfFitTest)
#library(lsr)
goodnessOfFitTest(cards[,2]) #Input is the second column of cards
#---------------------------------Chi-square association test-------------------------------
#Association btw nominal variables
#setwd("~/Desktop/Week_7_materials")
load("chapek9.Rdata")
#About the chapek9 data
who(TRUE)
head(chapek9)
summary(chapek9)
associationTest( formula = ~choice+species, data = chapek9)
chapekFrequencies <- xtabs(~choice+species,data=chapek9) #create a contingency table
print(chapekFrequencies)
chisq.test(chapekFrequencies)
chisq.test(table(chapek9))
#To gain access to the capital city, a visitor must prove that they're not a robot, not a human.
#They ask whether the visitor prefers puppies, flowers or data files.
#In-class exercise
|
8c97046065f238a4ae4922b938641817a38f99cf
|
be348ef72c01bd46481b14a9f9df770b46c25f72
|
/UsToDec.R
|
92c3c03c294502d0abb7fc69d816998c5f6c7032
|
[] |
no_license
|
cardsbettor/OddsEngine
|
b0ad1d16bf02e54da316240ec825ecc48c7ecd58
|
dcda80a365a96bf1602f2ac19119c57f93007ccc
|
refs/heads/main
| 2023-02-10T00:13:42.766742
| 2021-01-01T21:18:05
| 2021-01-01T21:18:05
| 321,608,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 172
|
r
|
UsToDec.R
|
UsToDec <- function(price){
if(price <= -100){
round(100/abs(price) + 1,2)
}
else{
if(price >= 100){
round(price/100 + 1,2)
}
else {NULL}
}
}
|
54f63fda1d2ee4e5a1793446f79ec50045de71ac
|
c7b96498e324b23287b7e6b286e23f7d599abba3
|
/Zika-ZF Yi.r
|
43e228e40165413d100a11cd3f4b7bfd67c1355b
|
[] |
no_license
|
Geoyi/Data-visualization-of-global-zika-Virus-epidemic-in-2015-and-2016
|
52698b1f4127b5623d278649e0ca88c68ffa7ea7
|
f5f4601f0e4d7299036dc0ab61b16a61723d0017
|
refs/heads/master
| 2021-01-19T05:39:36.550647
| 2016-07-29T17:52:06
| 2016-07-29T17:52:06
| 64,427,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,169
|
r
|
Zika-ZF Yi.r
|
library(dplyr)
library(data.table)
library(ggplot2)
library(RColorBrewer)
library(rworldmap)
#system("ls ../input")
#zika = read.csv("../input/cdc_zika.csv", stringsAsFactors = F, header = T)
setwd("C:/Data Science Fundation with R/Kraggle/zika-virus-epidemic")
list.files("C:/Data Science Fundation with R/Kraggle/zika-virus-epidemic")
zika <- read.csv('cdc_zika.csv',header=TRUE, fill=TRUE,row.names=NULL)
zika <- data.table(zika)
zika[, c("Country", "Province") := tstrsplit(location, "-", fixed = TRUE)][]
zika$report_date <-as.Date(zika$report_date, "%m/%d/%Y")
zika$Year <- as.numeric(format(zika$report_date, format = "%Y"))
zika %>%
filter(!is.na(Year)) %>%
group_by(Country, Year) %>%
summarise(n = n())-> ZikaOc
names(ZikaOc)[3] <- "cases"
ggplot(ZikaOc, aes(x= Country, y = cases)) +
geom_bar(stat="identity") +
coord_flip()+
facet_wrap(~Year)
data(countryExData,envir=environment(),package="rworldmap")
str(countryExData)
Test <- merge(countryExData, ZikaOc, by = "Country")
sPDF <- joinCountryData2Map(Test, joinCode = "ISO3", nameJoinColumn = "ISO3V10")
mapDevice() #create world map shaped window
mapCountryData(sPDF, nameColumnToPlot='cases')
USA <- zika[grep("United_States", zika$location),]
Mexico <- zika[grep("Mexico", zika$location),]
Panama <- zika[grep("Panama", zika$location),]
Nicaragua <- zika[grep("Nicaragua", zika$location),]
Haiti <- zika[grep("Haiti", zika$location),]
Guatemala <- zika[grep("Guatemala", zika$location),]
El_salvador <- zika[grep("El_Salvador", zika$location),]
Ecuador <- zika[grep("Ecuador", zika$location),]
Dominican_republic <- zika[grep("Dominican_Republic", zika$location),]
Colombia <- zika[grep("Colombia", zika$location),]
Argentina <- zika[grep("Argentina", zika$location),]
Brazil <- zika[grep("Brazil", zika$location),]
USA %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_USA
g1 <- ggplot(g_USA, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'USA: Reported Zika cases',
x = 'cases types')
Mexico %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_Mexico
g2 <-ggplot(g_Mexico, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'Mexico: Reported Zika cases',
x = 'cases types')
Panama %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_Panama
g3 <-ggplot(g_Panama, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'Panama: Reported Zika cases',
x = 'cases types')
Nicaragua %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_Nicaragua
g4 <-ggplot(g_Nicaragua, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'Nicaragua: Reported Zika cases',
x = 'cases types')
Haiti %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_Haiti
g5 <-ggplot(g_Haiti, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'Haiti: Reported Zika cases',
x = 'cases types')
Guatemala %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_Guatemala
g6 <-ggplot(g_Guatemala, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'Guatemala: Reported Zika cases',
x = 'cases types')
El_salvador %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_El_salvador
g7 <-ggplot(g_El_salvador, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'El_salvador: Reported Zika cases',
x = 'cases types')
Ecuador %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_Ecuador
g8 <-ggplot(g_Ecuador, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'g_Ecuador: Reported Zika cases',
x = 'cases types')
Dominican_republic %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_Dominican_republic
g9 <-ggplot(g_Dominican_republic, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'Dominican_republic: Reported Zika cases',
x = 'cases types')
Colombia %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_Colombia
g10 <-ggplot(g_Colombia, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'Colombia: Reported Zika cases',
x = 'cases types')
Argentina %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_Argentina
g11 <-ggplot(g_Argentina, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'Argentina: Reported Zika cases',
x = 'cases types')
Brazil %>%
group_by(data_field, Year) %>%
summarise(Cases = n()) -> g_Brazil
g12 <-ggplot(g_Brazil, aes(x = data_field, y = Cases)) +
geom_bar(stat = 'identity',colour = 'white') +
facet_wrap(~ Year) +
scale_fill_hue() +
coord_flip() +
labs(y = 'Brazil: Reported Zika cases',
x = 'cases types')
g1 # Frome the cases in USA, most zika cases was reported from travel and local. Local cases mainly were reported from Puerto Rico, New York, Florida, and Virgin Island.
g2
g3
g4
g5
g6
g7
g8
g9
g10
g11
g12
|
6acdef8b4e11c00931bdfed27357ab1ed0c6a25d
|
e71493c5666af39ca887c9e71b6e50b08d8fa508
|
/R/values-methods.R
|
0451533d2aba923e58d853458d097d6c523c4e56
|
[] |
no_license
|
cran/MendelianRandomization
|
6d4d9bd2418b9d706bb41f5af7e1deb281156438
|
7c5864084eaf2a68534d16617c7319bbb08226d9
|
refs/heads/master
| 2023-08-17T13:29:51.450083
| 2023-08-08T18:10:02
| 2023-08-08T18:31:16
| 67,027,188
| 33
| 23
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,705
|
r
|
values-methods.R
|
#' Applies method values() to different output classes
#'
#' @description Enables the internal function \code{values}, used in the \code{mr_allmethods} function.
#' @docType methods
#' @name values
#' @param object Object (could be an object of class "WeightedMedian", "Egger", or "IVW").
#'
#' @keywords internal
NULL
#' @rdname values
setMethod("values",
"WeightedMedian",
function(object){
return(c(object@Estimate,
object@StdError,
object@CILower,
object@CIUpper,
object@Pvalue
))
}
)
#--------------------------------------------------------------------------------------------
#' @rdname values
setMethod("values",
"IVW",
function(object){
return(c(object@Estimate,
object@StdError,
object@CILower,
object@CIUpper,
object@Pvalue
))
}
)
#--------------------------------------------------------------------------------------------
#' @rdname values
setMethod("values",
"Egger",
function(object){
return(rbind(c(object@Estimate,
object@StdError.Est,
object@CILower.Est,
object@CIUpper.Est,
object@Pvalue.Est),
c(object@Intercept,
object@StdError.Int,
object@CILower.Int,
object@CIUpper.Int,
object@Pvalue.Int)))
}
)
|
1afc0d266ada7aa78bdf9c0c1f1cd4f1bcd4599c
|
337deca529928a9036c8939cb47a39b7435d0f1a
|
/R/simplify.R
|
15d868a209f6c4b001867c742fd7200e739a4c98
|
[] |
no_license
|
alko989/icesTAF
|
883b29e78ee69a5ef2dd5e5ca5a680cb220789d8
|
a5beaaf64ed1cacc09ca7732e791e89373d1d044
|
refs/heads/master
| 2020-04-28T10:02:16.923481
| 2019-03-09T23:15:02
| 2019-03-09T23:15:02
| 175,188,781
| 0
| 0
| null | 2019-03-12T10:36:18
| 2019-03-12T10:36:18
| null |
UTF-8
|
R
| false
| false
| 951
|
r
|
simplify.R
|
simplify <- function(x) {
# from Arni's toolbox
# coerce object to simplest storage mode: factor > character > numeric > integer
owarn <- options(warn = -1)
on.exit(options(owarn))
# list or data.frame
if (is.list(x)) {
for (i in seq_len(length(x)))
x[[i]] <- simplify(x[[i]])
}
# matrix
else if (is.matrix(x))
{
if (is.character(x) && sum(is.na(as.numeric(x))) == sum(is.na(x)))
mode(x) <- "numeric"
if (is.numeric(x))
{
y <- as.integer(x)
if (sum(is.na(x)) == sum(is.na(y)) && all(x == y, na.rm = TRUE))
mode(x) <- "integer"
}
}
# vector
else
{
if (is.factor(x))
x <- as.character(x)
if (is.character(x))
{
y <- as.numeric(x)
if (sum(is.na(y)) == sum(is.na(x)))
x <- y
}
if (is.numeric(x))
{
y <- as.integer(x)
if (sum(is.na(x)) == sum(is.na(y)) && all(x == y, na.rm = TRUE))
x <- y
}
}
x
}
|
35c5bde8682ebc5cc55b618423e18f8a04176e49
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/wakefield/examples/r_list.Rd.R
|
36acfc1645f0199f7bcd0655799d6095f8e8a81e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 432
|
r
|
r_list.Rd.R
|
library(wakefield)
### Name: r_list
### Title: List Production (From Variable Functions)
### Aliases: r_list
### Keywords: list
### ** Examples
r_list(
n = 30,
id,
race,
age,
sex,
hour,
iq,
height,
died,
Scoring = rnorm
)
r_list(
n = 30,
id,
race,
age(x = 8:14),
Gender = sex,
Time = hour,
iq,
height(mean=50, sd = 10),
died,
Scoring = rnorm
)
|
ade5bc5feb0cd2d8cf10b8fdae5a1a028930f045
|
4d83f68148bbafb7a00990fd8da17548970ac021
|
/bin/191007_figure3.R
|
c413fc094ea4ad3b984ed9e9a63c6775cf1808a9
|
[] |
no_license
|
liwenbo520/Yeast-GGE
|
265082d10628c9d75a98e24907252ab4971df99d
|
044ff8b9c6003e3d7915e95a720568f62c41e9e3
|
refs/heads/master
| 2022-04-07T21:17:21.518852
| 2020-02-26T09:28:19
| 2020-02-26T09:28:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,689
|
r
|
191007_figure3.R
|
load("./results/figure3.RData")
#dev.off()
par(xpd=T,mar=c(4,4,2,1),mfrow=c(4,4),cex.lab=1.52,cex.axis=1.1)
plot(p$loadings[,1], p$loadings[,2],col="black",frame.plot = F,xlab="PC1",ylab = "PC2",pch= ifelse(rownames(p$loadings) %in% index_selected,19,0) )
text(x =p$loadings[,1],y=p$loadings[,2]+0.04,labels =rownames(p$loadings),cex=cex_now)
#legend(x=-0.1,y=0.1,legend=c("Carbon sources","Oxidative stress","Unclear","Ca2+ Signaling releated"),fill = c("black","blue","purple","tomato"),cex=.6,bty = "n")
mtext(text = "A",side = 3,line = 0,at = -0.5,cex=2)
hist(as.numeric(num_add_3),col="lightblue",xpd=T,xlab = "Number of environments \ndetected as additive loci",main="",ylab = "Count")
mtext(text = "B",side = 3,line = -1,at = 0,cex=2)
hist(num_epi_3,col="lightblue",xlab = "Number of environments \ndetected as epistatic loci",main="",ylab = "Count",xpd=T)
mtext(text = "C",side = 3,line = -1,at = 0,cex=2)
hist(as.numeric(ed_rank_3),col="lightblue",xpd=T,xlab = "Number of environments detected for\n a particular pairwise interaction",main="",ylab = "Count")
mtext(text = "D",side = 3,line = -1,at = 0,cex=2)
dev.off()
par(mfrow=c(3,3),mar=c(2,2,2,2))
#lab <- c("D","E","F")
lab <- c("D","E","F")
require(RColorBrewer)
col <- brewer.pal(9,"Reds")[c(2,4,9)] #colorpanel(100,"lightblue","darkblue")
lab_main <- c("IAA","Formamide","Raffinose")
for ( i in 1:3){
if(i ==1){
#a <- layout.fruchterman.reingold(Netall_update,weights=rep(2,39))
#a <- layout.graphopt(Netall_update,charge = 0.005,spring.length=1)
#save(a,file="./results/190207_layout.RData")
load("./results/190207_layout2.RData")
}
trait_now <- trait_selected[i]
add_now <- Add_for_select[[trait_now]]
epi_now <- epi[[trait_now]]
hub_now <- hubs[[trait_now]]
######################## Shape
V(Netall_update)$shape <- ifelse(names(V(Netall_update)) %in% add_now,"circle",ifelse(names(V(Netall_update)) %in% epi_now ,"square","sphere"))
V(Netall_update)$label <- "" # test_name[names(V(Netall))]
V(Netall_update)$size <- ifelse(names(V(Netall_update)) %in% "4928552_chrVIII_98622_C_G",12,10) #"4928552_chrVIII_98622_C_G" "9680784_chrXIV_433148_G_A"
# V(network)$label <- network_nrNeighbors
# V(network)$label.cex <- 1.6
set1 <- edge.attributes(Netall_update)$trait %in% trait_now
set2 <- rep(F,length(set1))
set2[grep(pattern = paste0(trait_selected[i],"-updated"),x =edge.attributes(Netall_update)$trait)] <- T
set3 <- edge.attributes(Netall_update)$index_IAA & edge.attributes(Netall_update)$trait %in% trait_now
if(any(set2)){
E(Netall_update)$color <- ifelse(set1,"tomato",ifelse(set2,"tomato",rgb(0,0,0,alpha = 0.5)))
###E(Netall_update)$color <- ifelse(set2 & shared_edgeall,"red",ifelse(set2 & edge2,"darkorange2","darkgreen"))
E(Netall_update)$width <- ifelse(set3,1,ifelse(set1,1,ifelse(set2,1,0.25)))
}else{
set_new <- edge.attributes(Netall_update)$trait %in% trait_now
E(Netall_update)$color <- ifelse(edge.attributes(Netall_update)$trait %in% trait_now ,"tomato",rgb(0,0,0,alpha = 0.5))
###E(Netall_update)$color <- ifelse(set_new & shared_edgeall,"red",ifelse(set_new & edge2,"darkorange2","darkgreen"))
E(Netall_update)$width <- ifelse(set3,1,ifelse(set1,1,ifelse(set2,1,0.25)))
}
loci_now <- names(V(Netall_update)) %in% epi_now
epi1_in <- names(V(Netall_update)) %in% epi1
epi2_in <- names(V(Netall_update)) %in% epi2
epi3_in <- names(V(Netall_update)) %in% epi3
#V(Netall_update)$color <- ifelse(loci_now & Unique_loci[[i]],col[1],ifelse(loci_now & Shared_loci_all,col[3],ifelse(loci_now & Shared_loci[[i]],col[2],"white")))
V(Netall_update)$color <- ifelse(loci_now & epi3_in,col[3],ifelse(loci_now & epi2_in,col[2],ifelse(loci_now & epi1_in,col[1],"white")))
#V(Netall_update)$color <- ifelse(num_epi_3
#ifelse(names(V(Netall)) %in% inter_all,"lightpink2",ifelse(names(V(Netall)) %in% unique_loci[[i]],"grey","lightblue"))
#plot(Netall_update)
plot(Netall_update,layout=b,main=lab_main[i])
#legend("topleft",cex=2,legend = c("Shared with 3","Shared with 2","Unique"),col=c("lightpink2","lightblue","grey"),pch = 19,bty="n")
### plot network evolution
if(i==1){
mtext(lab[i],side=3,line=0, at=-1,cex=2)
}else{
mtext(lab[i],side=3,line=0, at=-1.4,cex=2)
}
}
par(mar = c(4.5, 4.5, 3, 2) + .1,xpd=F)
#plot(0,type="n",col="white",yaxt="n",xaxt="n",frame.plot = F,main="",xlab = "",ylab = "")
#layout(matrix(c(1,2:4,1,5:7),ncol =4,byrow = T))
## panel B first plot G-P on Formamide
trait_now <- "IndolaceticAcid"
t <- unname(get.Gen.id(t.name = trait_now))
# box.pheno <- Sort_bxp(data = data,trait_now = t,snps = snps)$bxp
# tmp <- Sort_bxp(data = data,trait_now = t,snps = snps)$tmp
# bxp(box.pheno, ylim = c(-4, 1.9), xaxt = "n", boxfill = c(rep(c3, 32), rep(c1, 32)), frame = F, yaxt = "n", outcex = .3,pch=19,outcol="grey",whiskcol="grey",staplecol="grey")
geno_IAA <- Sort_bxp(data = data,trait_now = t,snps = snps)$geno
pheno_IAA <- Sort_bxp(data = data,trait_now = t,snps = snps)$pheno
na <- Sort_bxp(data = data,trait_now = t,snps = snps)$NNA
num <- apply(geno_IAA[,2:6], 1, FUN = function(x)sum(x=="H"))
bplot <- function(pheno=pheno_IAA,geno_mat = geno_IAA,count = num,text1="Number of growth increasing alleles\n at six-locus IAA network",text2 = "Growth on IAA"){
group1.count <- num[geno_IAA[,1] == "RM"]
group2.count <- num[geno_IAA[,1] == "BY"]
group1 <- geno_IAA[,1] == "RM"
group2 <- geno_IAA[,1] == "BY"
ylim = range(pheno_IAA)
box.group1 <- boxplot(pheno[group1] ~ group1.count, boxwex = .15, at = seq(.9, 5.9),ylim=ylim, col = "blue", xaxt = "n", frame = F, yaxt = "n", outcex = .3,pch=19,outcol="grey",whiskcol="grey",staplecol="grey")
box.group2 <- boxplot(pheno[group2] ~ group2.count, boxwex = .15, at = seq(1.1, 6.1),ylim=ylim, add = T, col = "tomato", xaxt = "n", frame = F, yaxt = "n", outcex = .3,pch=19,outcol="grey",whiskcol="grey",staplecol="grey")
axis(side = 1, at = 1:6, labels = 0:5, cex.axis = 1.6, padj = .5, line = .8)
axis(side = 2, cex.axis = 1.6, line = -2)
mtext(text1, side = 1.5, cex = 1, line = 4.5)
mtext(text2, side = 2, cex = 1, line = .5)
#legend(x = .6, y = -2.5, c("BY hub-QTL allele", "RM hub-QTL allele"), col = c(cols[2], cols[1]), pch = 15, cex = .75, bty = "n")
#legend(x = .6, y = -2, c("Additive model fit", "Exponential model fit"), col = c("black", "blue"), lty = "solid", lwd = 3, cex = .75, bty = "n")
}
# boxplot(pheno_IAA ~ geno_IAA[,1] + num ,names=c(rep(0:5,each=2)),col=c("tomato","blue"),ylab="Growth on IAA",xlab="Number of growth increasing allele\n at six-locus IAA network",frame=F, outcex = .3,pch=19,outcol="grey",whiskcol="grey",staplecol="grey")
bplot(pheno=pheno_IAA,geno_mat = geno_IAA,count = num,text1="Number of growth increasing alleles\n at six-locus IAA network",text2 = "Growth on IAA" )
legend("bottomright",legend = c("BY allele","RM allele"),fill= c("tomato","blue"),bty="n")
mtext("H",side=3,line=1.1, at=-1,cex=2)
## continue to For and raf
trait_now <- "Formamide"
t <- unname(get.Gen.id(t.name = trait_now))
pheno_For <- phdata(data)[na,t]
#boxplot(pheno_For ~ geno_IAA[,1] + num ,names=c(rep(0:5,each=2)),col=c("tomato","blue"),ylab="Growth on Formamide",xlab="Number of growth increasing allele\n at six-locus IAA network",frame=F, outcex = .3,pch=19,outcol="grey",whiskcol="grey",staplecol="grey")
bplot(pheno=pheno_For,geno_mat = geno_IAA,count = num,text1="Number of growth increasing alleles\n at six-locus IAA network",text2 = "Growth on Formamide" )
mtext("I",side=3,line=1.1, at=-1.4,cex=2)
trait_now <- "Raffinose"
t <- unname(get.Gen.id(t.name = trait_now))
pheno_raf <- phdata(data)[na,t]
#boxplot(pheno_raf ~ geno_IAA[,1] + num ,names=c(rep(0:5,each=2)),col=c("tomato","blue"),ylab="Growth on Raffinose",xlab="Number of growth increasing allele\n at six-locus IAA network",frame=F, outcex = .3,pch=19,outcol="grey",whiskcol="grey",staplecol="grey")
bplot(pheno=pheno_raf,geno_mat = geno_IAA,count = num,text1="Number of growth increasing alleles\n at six-locus IAA network",text2 = "Growth on Raffinose" )
mtext("J",side=3,line=1.1, at=-1.4,cex=2)
col = c("tomato","blue")
plot(phdata(data)[,t1],phdata(data)[,t2],col=col,frame.plot=F,pch=19,cex=0.3,xlab="Growth on IAA ",ylab="Growth on Formamide")
a <- lm(phdata(data)[,t2][g==0] ~phdata(data)[,t1][g==0])
abline(a,col="tomato")
b <- lm(phdata(data)[t2][g==2] ~phdata(data)[,t1][g==2])
abline(b,col="blue")
mtext("K",side=3,line=1.1, at=-3.4,cex=2)
## label correlation
r1 <- cor(phdata(data)[,t2][g==0] ,phdata(data)[,t1][g==0],use="pairwise.complete")
r2 <- cor(phdata(data)[,t2][g==2] ,phdata(data)[,t1][g==2],use="pairwise.complete")
r1.f <- as.numeric(format(r1,digits = 2))
r2.f <- format(r2,digits = 2)
legend("topleft",legend =c(paste0("Pearson r^2 = ",r1.f),paste0("Pearson r^2 = ",r2.f)) ,fill= c("tomato","blue"),bty="n")
#eval(paste(expression(paste("Pearson r"^"2")),"=",r1.f))
#my_string <- "Pearson r"
#bquote(.(my_string)^2~"big")
#plot(1,1, main=)
plot(phdata(data)[,t1],phdata(data)[,t3],col=col,frame.plot=F,pch=19,cex=0.3,ylim=c(-4,4),xlab=" Growth on IAA ",ylab="Growth on Raffinose")
a <- lm(phdata(data)[,t3][g==0] ~phdata(data)[,t1][g==0])
abline(a,col="tomato")
b <- lm(phdata(data)[,t3][g==2] ~phdata(data)[,t1][g==2])
abline(b,col="blue")
mtext("L",side=3,line=1.1, at=-3.4,cex=2)
r1 <- cor(phdata(data)[,t3][g==0] ,phdata(data)[,t1][g==0],use="pairwise.complete")
r2 <- cor(phdata(data)[,t3][g==2] ,phdata(data)[,t1][g==2],use="pairwise.complete")
r1.f <- round(r1,digits = 2)
r2.f <- round(r2,digits = 2)
legend("topleft",legend =c(paste0("Pearson r^2 = ",r1.f),paste0("Pearson r^2 = ",r2.f)) ,fill= c("tomato","blue"),bty="n")
# plot(phdata(data)[,t2],phdata(data)[,t3],col=col,frame.plot=F,pch=19,cex=0.3,xlab=" Growth on Formamide ",ylab="Growth on Raffinose")
# a <- lm(phdata(data)[,t3][g==0] ~phdata(data)[,t2][g==0])
# abline(a,col="tomato")
# b <- lm(phdata(data)[,t3][g==2] ~phdata(data)[,t2][g==2])
# abline(b,col="blue")
# ## genetic robustness
phe <- apply(phdata(data)[,c(t1,t2,t3)],MARGIN = 2,FUN=scale)
var <- apply(phe,MARGIN = 1,FUN=var)
geno_hub <- as.double.gwaa.data(data[,"4944074_chrVIII_114144_A_G"])
geno_hub[geno_hub[,1]==0,] <- "BY"
geno_hub[geno_hub[,1]==2,] <- "RM"
boxplot(var~geno_hub,cex=0.2,xlab="Hub genotype",ylab="Within strain growth variance",ylim=c(0,6),col=c("tomato","blue"),frame=F,border="black")
arrows(x0 = 1.05,y0 = 5,x1 = 1.95,y1 = 5,col = "black",angle = 90,length = 0.04)
arrows(x1 = 1.05,y1 = 5,x0 = 1.95,y0 = 5,col = "black",angle = 90,length = 0.04)
text(x = 1.5,y=4.5,labels = "P = 1.03e-18")
text(x = 1.5,y=5.5,labels = "***",cex=1.5)
mtext("M",side=3,line=1.1, at=-0,cex=2)
|
f167f6f8fcdfbe432ce5f76e330f87d9e6bd6a6e
|
20c367ee9f5d5585a41d0b3d12a48633ea7c5f03
|
/pre-process_trips.R
|
98a59dffa8c8643909525c55a0e7a21c806a20f1
|
[
"MIT"
] |
permissive
|
WorldFishCenter/timor-catch-estimation
|
ea908664c5e9ea49dea8e8a7482b68234d448ad5
|
de66b2e04224fa8aa84f76d3a8d20c3027cfe030
|
refs/heads/master
| 2023-03-26T22:12:55.612292
| 2021-03-19T00:23:48
| 2021-03-19T00:23:48
| 317,982,927
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,970
|
r
|
pre-process_trips.R
|
files <- list.files("data/raw/peskas-tracking2",
pattern = "timor_all",
full.names = TRUE)
library(magrittr)
library(data.table)
process_track_file_dplyr <- function(x){
m <- readr::read_csv(x, col_types = "Tccdddddcccc")
on.exit(remove("m"))
if (isFALSE("imei" %in% colnames(m))) {
}
m %>%
janitor::clean_names() %>%
dplyr::group_by(trip) %>%
dplyr::summarise(start = min(time),
end = max(time),
range = max(range_meters),
dplyr::across(c(boat, boat_name, community, imei), dplyr::first),
.groups = "drop")
}
process_track_file_dt <- function(x){
this_day <- stringr::str_extract(x, "[0-9]{4}-[0-9]{2}-[0-9]{2}")
m0 <- fread(x, nrows = 5)
if (nrow(m0) == 0 | ncol(m0) == 1) {
empty_out <- data.table(start = NA,
end = NA,
range = NA,
boat_id = NA, boat_name = NA,
community = NA, imei = NA,
day = this_day)
return(empty_out)
}
m <- fread(x, colClasses = c("POSIXct", "character", "character",
"numeric","numeric", "numeric", "numeric", "numeric",
"character", "character", "character", "character")) %>%
janitor::clean_names()
on.exit(remove("m"))
if (isFALSE("imei" %in% colnames(m))) {
m <- m[, imei := NA]
}
m[, by = trip, .(start = min(time), end = max(time), range = max(range_meters),
boat_id = data.table::first(boat),
boat_name = data.table::first(boat_name),
community = data.table::first(community),
imei = data.table::first(imei),
day = this_day)]
}
o <- files %>%
purrr::map_dfr(purrr::auto_browse(process_track_file_dt))
fwrite(o, "data/processed/trips2.csv")
|
8c19c5e3ecfae8efd0f6b93e1bf8c61b857df3ab
|
28361565ec0320451d6e4ec45c630613f0294486
|
/lab5sarasara/man/lab5sarasara.Rd
|
7b002072bbbb3f0a6f364c901555d106b83bc663
|
[] |
no_license
|
SaraJesperson/AdvancedRlab5
|
a7c5177500711bcb4274324455a4ac3da284dc71
|
35809b7f6c47c2592813ec0ad1e592212d93f611
|
refs/heads/master
| 2020-03-29T00:03:54.226716
| 2017-10-06T08:40:36
| 2017-10-06T08:40:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 412
|
rd
|
lab5sarasara.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lab5sarasara.R
\docType{package}
\name{lab5sarasara}
\alias{lab5sarasara}
\alias{lab5sarasara-package}
\title{Election results of the Swedish election 2014}
\description{
A package containing a function to visually analyze the election results of the Swedish parliamentary election 2014
}
\author{
Sara Jesperson and Sara Johansson
}
|
e512bfc37407e70d38d4c7bbe246c37f713e6780
|
a4bd05fdf74fa9a6172d5902f588b643a75d33c9
|
/Inference/PF/2-Senegal/pft_21_network_exec_fan_pred.R
|
7c1a0e6c48e4ee9c4b62eed3e8979e99e606c7dd
|
[] |
no_license
|
Fanfanyang/Projects
|
53abfd6ee02105aa5cc1b9d04a21a7fcba783375
|
615c3ca5e751fa699a710a5ec91e743b090d401f
|
refs/heads/master
| 2020-03-28T09:50:35.763635
| 2018-10-29T02:23:05
| 2018-10-29T02:23:05
| 148,063,753
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,658
|
r
|
pft_21_network_exec_fan_pred.R
|
library('Matrix')
require('TTR')
require('igraph')
require('survey')
require('grr')
require('RcppEigen')
require('Rcpp')
load("data_exec/Xt_real_fan.RData")
load("data_exec/Yt_fan.RData")
load("data_exec/m.time.fan.RData")
load("data_exec/obs.matrix.RData")
load('data_prep/tower_dakar.RData')
load('data_exec/vehicle.state.d.RData')
load('../bench_senegal1/data_result/Xt_est.RData')
Xt_est_track = Xt_est
# particle fitering
RunTimes = 3
obs.scale = 10
particles = 1e3
par_inc = 1
nodes = max(tower_dakar)
obs.lanes = c((nodes+1):ncol(Xt_real))
step = 1
small.prob = 1e-8
lane.nums = dim(m.time)[1]
pred.window = 10
pred.times = trunc((nrow(Xt_real)-1)/pred.window)
for(yy in c(1:RunTimes)) {
print(yy)
Xt_est = array(0,dim=c(nrow(Xt_real),lane.nums))
Xi_1 = array(0,dim =c(particles,lane.nums))
Xi_2 = array(0,dim =c(particles*par_inc,lane.nums))
Xt_est[1,1:ncol(Yt)] = Yt[1,]*obs.scale
for (pp in c(1:pred.times)) {
t0=1+(pp-1)*pred.window
Xi_1 = matrix(rep(Xt_est_track[t0,],each=particles),nrow=particles)
Xt_pred = array(0,dim=c(pred.window,ncol(Xt_real)))
if (pp==1)
Xt_pred[1,] = Xt_est[1,]
for(t in c((t0+1):(t0+pred.window))) {
print(t)
t1 = min(trunc((t-1)/120)+1,12)
Xi_2[,] = 0
if(TRUE) {
Xi_1 = matrix(rep(Xi_1,each=par_inc),ncol=ncol(Xi_1))
ndx_lane_from = which(colSums(Xi_1)>0)
for(j in ndx_lane_from){
idx = which(m.time[j,,t1]>0)
sample_result = sample.int(length(idx), sum(Xi_1[,j]), replace=TRUE,prob=m.time[j,idx,t1])
ndx = rep(0, length(sample_result))
ndx2 = which(Xi_1[,j]>0)
ndx3 = Xi_1[ndx2,j]
ndx[ cumsum(c(1,head(ndx3,-1))) ] = 1 #diff(c(0,ndx2))
ndx = cumsum(ndx)
Xi_2[ndx2,idx] = Xi_2[ndx2,idx] + matrix(tabulate( ndx+(sample_result-1)*length(ndx2), nbins=length(ndx2)*length(idx) ), nrow=length(ndx2))
}
}
if(TRUE) {
Xi_1 = Xi_2
Xt_est[t,] = colMeans(Xi_1)
Xt_pred[t-t0,] = colMeans(Xi_1)
next
}
}
name.file = paste('data_result/pred',pred.window,sep='_')
name.file = paste(name.file,'Xt_pred',sep='/')
name.file = paste(name.file,pp,sep = '_')
name.file = paste(name.file,'RDS',sep = '.')
saveRDS(Xt_pred,file = name.file)
}
name.file = paste('data_result/Xt_est',pred.window,sep='_')
name.file = paste(name.file,'RData',sep = '.')
save(Xt_est,file=name.file)
}
|
84c29780423b34d3f2e783bb4925d8ef1f42293a
|
82b1c5655856b660c053d18ec7ad94f3aa30a964
|
/tests/testthat/test-function-plot_file_size_in_depth.R
|
0f05dbe8230fcbf5a37f1ebb26f5b16c7894b6ce
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.fakin
|
9792dfa732a8dd1aaa8d2634630411119604757f
|
17ab0e6e9a63a03c6cb40ef29ee3899c2b2724a0
|
refs/heads/master
| 2022-06-09T22:25:09.633343
| 2022-06-08T21:24:14
| 2022-06-08T21:24:14
| 136,065,795
| 1
| 0
|
MIT
| 2021-03-15T10:55:17
| 2018-06-04T18:21:30
|
R
|
UTF-8
|
R
| false
| false
| 202
|
r
|
test-function-plot_file_size_in_depth.R
|
test_that("plot_file_size_in_depth() works", {
f <- kwb.fakin:::plot_file_size_in_depth
expect_error(f())
df <- data.frame(extension = "xls", depth = 1, root = "a", size = c(1,2))
f(df)
})
|
e03a1006cfc17d800508b19bf55173a8044efa3a
|
7eb63399fa00e3c547e5933ffa4f47de515fe2c6
|
/man/print.stppp.Rd
|
28159b94fe266a8eb31ae6d52044248620c6bcbc
|
[] |
no_license
|
bentaylor1/lgcp
|
a5cda731f413fb30e1c40de1b3360be3a6a53f19
|
2343d88e5d25ecacd6dbe5d6fcc8ace9cae7b136
|
refs/heads/master
| 2021-01-10T14:11:38.067639
| 2015-11-19T13:22:19
| 2015-11-19T13:22:19
| 45,768,716
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 377
|
rd
|
print.stppp.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/stpppClassDef.R
\name{print.stppp}
\alias{print.stppp}
\title{print.stppp function}
\usage{
\method{print}{stppp}(x, ...)
}
\arguments{
\item{x}{an object of class stppp}
\item{...}{additional arguments}
}
\value{
prints the stppp object x
}
\description{
Print method for stppp objects
}
|
764b430b481b62225918d2316445fccbc1edc45f
|
331234b7eabbe4daf51ee8143c6dcbc768da0595
|
/Plot Distribution Speciation Events.R
|
5d2267d9d66e0de442055cabe5755d542e2dcdaa
|
[] |
no_license
|
IanGBrennan/Convenient_Scripts
|
6e3959ad69e594b21a5b4b2ca3dd6b30d63170be
|
71f0e64af8c08754e9e3d85fe3fb4770a12ec2d4
|
refs/heads/master
| 2021-05-12T03:57:32.339140
| 2020-01-30T01:58:19
| 2020-01-30T01:58:19
| 117,630,023
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 817
|
r
|
Plot Distribution Speciation Events.R
|
library(geiger)
library(ape)
library(BAMMtools)
library(phytools)
#####################################################################
#### Plot Distribution of Speciation Events (not all branching events)
#####################################################################
mar<-read.tree("oz.marsupials.tre")
n<-length(mar$tip.label)
ms<-setNames(mar$edge.length[sapply(1:n,function(x,y) which(y==x),y=mar$edge[,2])],mar$tip.label)
hist<-hist(ms, breaks=40, xlab="Branching Times", col="lightpink", ylim=c(0,30), xlim=c(40,0)) #breaks determines the # of bins distributed across the whole xlim=
multiplier<-hist$counts/hist$density
mydensity<-density(ms) #pull the speciation frequencies out
mydensity$y<-mydensity$y*multiplier[1]
lines(mydensity) #plot the smoothed-out line of best fit across our histogram
|
1568b0fa81833c834b715cb792d9276877e4838a
|
c7aa8e706945584fbf1cab17d47cb95d22170bb8
|
/plot1.R
|
27194014988f1a9cd5f97abf3ad47c5e101ac552
|
[] |
no_license
|
Tonnia/ExData_Plotting1
|
8fd9c1cca1b623ace06577c7f70c8817feb070af
|
841f99b2bdd1f801424681e4ea6feff2339e08a6
|
refs/heads/master
| 2021-05-12T11:32:22.654690
| 2018-01-14T01:26:25
| 2018-01-14T01:26:25
| 117,390,095
| 0
| 0
| null | 2018-01-14T00:40:51
| 2018-01-14T00:40:50
| null |
UTF-8
|
R
| false
| false
| 510
|
r
|
plot1.R
|
# read in data date = 2007/02/01 and 2007/02/02
data <- read.table("household_power_consumption.txt", stringsAsFactors = FALSE,
na.strings = "?", header = TRUE, sep = ";")
data_use <- subset(data, Date %in% c("1/2/2007","2/2/2007"))
data_use$Date <- as.Date(data_use$Date, format="%d/%m/%Y")
# plot 1
png("plot1.png", width = 480, height = 480)
hist(data_use$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)",
main = "Global Active Power")
dev.off()
|
2721d2a5945eb1543ba8dffd620c00b2f817b5cd
|
3541b9aca6dc776827dd780f25e2e41f07fcb322
|
/plot4.r
|
641a5ed60574d4fb50523c960878afdac1e57429
|
[] |
no_license
|
pilimayora/ExData_Plotting1
|
e970f7ba77b355c9205ae9856413252246620e1d
|
088f64152bd95493f758a0ab313d1009e6c3080b
|
refs/heads/master
| 2020-12-28T20:19:25.518045
| 2016-03-06T23:38:32
| 2016-03-06T23:38:32
| 53,272,986
| 0
| 0
| null | 2016-03-06T19:51:55
| 2016-03-06T19:51:54
| null |
UTF-8
|
R
| false
| false
| 2,110
|
r
|
plot4.r
|
# Replace by URL where file was downloaded
epc <- read.csv("/Users/pilimayora/Downloads/household_power_consumption.txt", sep=";")
# Change ? by NA
epc[epc == "?"] <- NA
# Update date column as Date
epc$Date <- as.Date(epc$Date, format = "%d/%m/%Y")
# Subset only Feb1st and Feb2nd, 2007
epc_feb <- subset(epc, Date==as.Date("2007-02-01") | Date==as.Date("2007-02-02"))
# Create datetime column
epc_feb$datetime <- as.POSIXct(paste(epc_feb$Date, epc_feb$Time), format="%Y-%m-%d %H:%M:%S")
# Update Global Active Power column as numeric
epc_feb$Global_active_power <- as.numeric(as.character(epc_feb$Global_active_power))
# Update Global Reactive Power column as numeric
epc_feb$Global_reactive_power <- as.numeric(as.character(epc_feb$Global_reactive_power))
# Update Voltage column as numeric
epc_feb$Voltage <- as.numeric(as.character(epc_feb$Voltage))
# Update Sub_metering columns as numeric
epc_feb$Sub_metering_1 <- as.numeric(as.character(epc_feb$Sub_metering_1))
epc_feb$Sub_metering_2 <- as.numeric(as.character(epc_feb$Sub_metering_2))
epc_feb$Sub_metering_3 <- as.numeric(as.character(epc_feb$Sub_metering_3))
# Replace by URL where graph should be saved
png("/Users/pilimayora/Sites/personal/ExData_Plotting1/plot4.png", width = 480, height = 480, units = "px")
# Two rows, two columns
par(mfrow=c(2,2))
# Top-left graph
plot(x = epc_feb$datetime, y = epc_feb$Global_active_power, type = "l", xlab = "", ylab="Global Active Power")
# Top-right graph
plot(x = epc_feb$datetime, y = epc_feb$Voltage, type = "l", xlab = "", ylab="Voltage")
# Bottom-left graph
plot(epc_feb$datetime, epc_feb$Sub_metering_1, type="l", col="black", ylab="Energy sub metering", xlab="")
lines(epc_feb$datetime, epc_feb$Sub_metering_2, col="red")
lines(epc_feb$datetime, epc_feb$Sub_metering_3, col="blue")
legend("topright", col=c("black","red","blue"), lty=c(1,1,1), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
# Bottom-right graph
plot(x = epc_feb$datetime, y = epc_feb$Global_reactive_power, type = "l", xlab = "", ylab="Global_reactive_power")
# Close graphic device
dev.off()
|
5d8baa0fff310e0ec20d704fb6180574d30e678e
|
88ef8c8d97e825a78e96da1346e1bc3bcfaf7d63
|
/seismology.R
|
600adfaf65035d5e21e9c6096a370b57c65554c5
|
[] |
no_license
|
earlbellinger/mesa
|
9a4aad16ca608cd0fb9e949338168facfd0f64af
|
4f6b3aa7396c496a8bd1cd6db78b603845eb94f4
|
refs/heads/master
| 2021-03-16T10:22:34.916871
| 2015-12-14T16:48:56
| 2015-12-14T16:48:56
| 38,882,904
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,375
|
r
|
seismology.R
|
#### Obtain model properties from evolutionary tracks
#### Author: Earl Bellinger ( bellinger@mps.mpg.de )
#### Stellar predictions & Galactic Evolution Group
#### Max-Planck-Institut fur Sonnensystemforschung
options(error=traceback)
library(matrixStats)
library(magicaxis)
library(RColorBrewer)
library(parallel)
library(parallelMap)
cl <- brewer.pal(4, "BrBG")
#rbPal <- colorRampPalette(c('red','blue'))
Z_div_X_solar = 0.02293
freqs_cols <- c('l', 'n', 'nu', 'inertia')
profile_pattern <- 'profile.+.data$'
freqs_pattern <- 'profile.+-freqs.dat$'
plot_width <- 4
plot_height <- 2.5
font <- "Palatino"
separation_dir <- 'plots'
dir.create(separation_dir, showWarnings=FALSE)
################################################################################
### Seismological calculations #################################################
################################################################################
## Separation: just the difference between two frequencies
separation <- function(first_l, first_n, second_l, second_n, df) {
# nu_{l1,n1} - nu_{l2,n2}
first <- df$l == first_l & df$n == first_n
second <- df$l == second_l & df$n == second_n
if (sum(first) == 1 && sum(second) == 1) # check that it's unique
return(df[first,]$nu - df[second,]$nu)
return(NA)
}
# Five point averages defined by
#dd_01= 1/8( nu_[n-1,0] - 4*nu_[n-1,1] + 6*nu_[n,0] - 4*nu[n, 1] + nu_[n+1,0] )
#dd_10=-1/8( nu_[n-1,1] - 4*nu_[n, 0] + 6*nu_[n,1] - 4*nu[n+1,0] + nu_[n+1,1] )
dd <- function(l0, l1, n, df) {
ell.0 <- df[df$l==0 & df$n>0,]
ell.1 <- df[df$l==1 & df$n>0,]
n. <- df[df$n==n,]
n.minus.one <- df[df$n==n-1,]
n.plus.one <- df[df$n==n+1,]
val <- if (l0 == 0 && l1 == 1) { ## dd_01
( merge(n.minus.one, ell.0)$nu -
4*merge(n.minus.one, ell.1)$nu +
6*merge(n., ell.0)$nu -
4*merge(n., ell.1)$nu +
merge(n.plus.one, ell.0)$nu )/8
} else if (l1 == 0 && l0 == 1) { ## dd_10
-( merge(n.minus.one, ell.1)$nu -
4*merge(n., ell.0)$nu +
6*merge(n., ell.1)$nu -
4*merge(n.plus.one, ell.0)$nu +
merge(n.plus.one, ell.1)$nu )/8
} else NA
if (length(val) == 0) NA
else val
}
## Separations and ratios
dnu <- function(l, n, df) separation(l, n, l+2, n-1, df)
Dnu <- function(l, n, df) separation(l, n, l, n-1, df)
r_sep <- function(l, n, df) dnu(l, n, df) / Dnu(1-l, n+l, df)
r_avg <- function(l, n, df) dd(l, 1-l, n, df) / Dnu(1-l, n+l, df)
get_averages <- function(f, df, freqs, l_degs, nu_max, outf=FALSE) {
# calcualte averages of things like f = dnu, Dnu, r_sep, r_avg
# df is the where the result will be stored
# freqs are a data frame with columns l, n, nu
# l_degs are the l's for which this calculation should be made
# nu_max is the center of the gaussian
# make a plot with filename 'outf' if outf != FALSE
sep_name <- deparse(substitute(f))
a <- c() # contains the computed quantity (e.g. large freq separations)
b <- c() # contains frequencies of the base mode
pchs <- c() # if there's more than one l, get different symbols for each
#err <- c() # uncertainties on frequencies if they are known (not models)
for (l_deg in l_degs) {
ell <- freqs[freqs$n > 1 & freqs$l==l_deg,]
vals <- sapply(unique(ell$n), function(n) f(l_deg, n, freqs))
not.nan <- complete.cases(vals)
a <- c(a, vals[not.nan])
b <- c(b, ell$nu[not.nan])
pchs = c(pchs, rep(l_deg+1, sum(not.nan)))
#if ("dnu" %in% names(freqs)) err <- c(err, 1/ell$dnu[not.nan])
}
# build expression for y label of plot
ylab <- if (sep_name == 'Dnu' && length(l_degs) > 1) bquote(Delta*nu)
else if (sep_name == 'Dnu') bquote(Delta*nu[.(l_degs)])
else if (sep_name == 'dnu') bquote(delta*nu[.(l_degs)*','*.(l_degs+2)])
else if (sep_name == 'r_sep') bquote(r[.(l_degs)*','*.(l_degs+2)])
else if (sep_name == 'r_avg') bquote(r[.(l_degs)*','*.(1-l_degs)])
ylab <- bquote(.(ylab) ~ "["*mu*Hz*"]")
sep_name <- if (sep_name == 'Dnu' && length(l_degs) > 1) paste0(sep_name)
else if (sep_name == 'Dnu') paste0(sep_name, l_degs)
else if (sep_name == 'dnu') paste0(sep_name, l_degs, l_degs+2)
else if (sep_name == 'r_sep') paste0(sep_name, l_degs, l_degs+2)
else if (sep_name == 'r_avg') paste0(sep_name, l_degs, 1-l_degs)
fwhm <- (0.66*nu_max**0.88)/(2*sqrt(2*log(2)))
gaussian_env <- dnorm(b, nu_max, fwhm)
#if ("dnu" %in% names(freqs)) err
wm <- weightedMedian(a, gaussian_env)
df[paste0(sep_name, "_median")] <- wm
fit <- lm(a~b, weights=gaussian_env)
df[paste0(sep_name, "_slope")] <- coef(fit)[2]
df[paste0(sep_name, "_intercept")] <- coef(fit)[1]
#lower.bound = wm-coef(fit)[1]
#upper.bound = wm+coef(fit)[1]
if (outf != FALSE) {
cairo_pdf(file.path(separation_dir,
paste0(sep_name, '-', outf, '.pdf')),
width=plot_width, height=plot_height, family=font)
par(mar=c(3, 4, 1, 1), mgp=c(2, 0.25, 0), cex.lab=1.3)
plot(a~b, tck=0, ylab=ylab, cex=gaussian_env*1.75/max(gaussian_env),
#ylim=c(lower.bound-lower.bound*0.05, upper.bound+upper.bound*0.05),
ylim=range(wm, coef(fit)[1], wm+(wm-coef(fit)[1])),
col=if (length(l_degs)==1) 1 else cl[pchs],
pch=if (length(l_degs)==1) 1 else pchs,
xlab=expression("frequency"~"["*mu*Hz*"]"))
abline(fit, lty=2)
abline(v=nu_max, lty=3)
magaxis(side=1:4, family=font, tcl=0.25, labels=FALSE)
if (length(l_degs)>1)
legend("topright", pch=l_degs+1, col=cl, cex=0.75,
ncol=length(l_degs), #bty="n",
legend=paste0("\u2113=", l_degs))
dev.off()
}
df
}
seismology <- function(freqs, nu_max, acoustic_cutoff=Inf, outf=FALSE) {
if (nrow(freqs) == 0) {
print("No frequencies found")
return(NULL)
}
freqs <- unique(freqs[complete.cases(freqs) & freqs$nu < acoustic_cutoff,])
# fix radial modes because ADIPLS breaks sometimes
for (l_mode in unique(freqs$l)) {
# grab the relevant l's and n's
selection <- freqs$l==l_mode & freqs$n>0
ell <- freqs[selection,]
ns <- ell$n
# check if any n's are duplicated and if so, shift them down
if (any(duplicated(ns))) {
dup <- which(duplicated(ns)) # grab duplicated (hopef. only one)
if (length(dup) > 1) { # hopeless
print(paste0("Duplicated l=", l_mode, " mode, exiting"))
return(NULL)
}
toshift <- 1:(dup-1) # find the ones to shift
ell$n[toshift] <- ns[toshift] - 1 # calculate new n vals
freqs[selection,] <- ell # replace the old with the new
freqs <- freqs[!(freqs$l==l_mode & freqs$n==0),] # overwrite data
}
}
seis.DF <- NULL
seis.DF <- get_averages(Dnu, seis.DF, freqs, sort(unique(freqs$l)),
nu_max, outf)
for (l_deg in 0:1) {
seis.DF <- get_averages(dnu, seis.DF, freqs, l_deg, nu_max, outf)
seis.DF <- get_averages(r_sep, seis.DF, freqs, l_deg, nu_max, outf)
seis.DF <- get_averages(r_avg, seis.DF, freqs, l_deg, nu_max, outf)
}
return(seis.DF)
}
################################################################################
### Obtain observable properties from models ###################################
################################################################################
get_obs <- function(profile_file, freqs_file, ev_history, min_age=0.001) {
#print(profile_file)
profile_header <- read.table(profile_file, header=TRUE, nrows=1, skip=1)
hstry <- ev_history[ev_history$model_number==profile_header$model_number,]
if (nrow(hstry) == 0) {#|| hstry$mass_conv_core > 0)
print(c("Model ", profile_file, " failed"))
return(NULL)
}
obs.DF <- NULL
## Things we want to predict
obs.DF["age"] <- profile_header$star_age/10**9
if (obs.DF["age"] < min_age && !grepl('ZAMS', profile_file)) {
print(paste(profile_file, "below minimum age of", min_age))
return(NULL)
}
obs.DF["mass"] <- profile_header$star_mass
obs.DF["radius"] <- profile_header$photosphere_r
obs.DF["He"] <- (profile_header$star_mass_he3 +
profile_header$star_mass_he4)/profile_header$star_mass
obs.DF["log_g"] <- hstry$log_g
## Things we can observe
obs.DF["L"] <- profile_header$photosphere_L
obs.DF["Teff"] <- profile_header$Teff
obs.DF["Fe_H"] <- log10(10**hstry$log_surf_z/hstry$surface_h1/Z_div_X_solar)
if (hstry$mass_conv_core > 0) {
print(paste("ConvectiveCore", profile_file,
obs.DF["age"], obs.DF["mass"], obs.DF["He"],
hstry$mass_conv_core,
hstry$mass_conv_core/profile_header$star_mass))
}
freqs <- read.table(freqs_file, col.names=freqs_cols, fill=TRUE)
acoustic_cutoff <- hstry$acoustic_cutoff/(2*pi)
nu_max <- hstry$nu_max
seis.DF <- seismology(freqs, nu_max, acoustic_cutoff,
outf=ifelse(sample(0:10000, 1)==0, gsub("/", "-", freqs_file), FALSE))
return(merge(rbind(obs.DF), rbind(seis.DF)))
}
### Obtain evolutionary tracks from a MESA directory
parse_dir <- function(directory) {
#print(directory)
# parse dirname string e.g. "M=1.0_Y=0.28"
params.DF <- NULL
for (var in unlist(strsplit(basename(directory), '_'))) {
nameval <- unlist(strsplit(var, "="))
params.DF[nameval[1]] <- as.numeric(nameval[2])
}
# obtain history
log_dir <- file.path(directory, "LOGS")
logs <- list.files(log_dir)
if (length(logs) <= 1) {
print(paste(directory, "No logs found!"))
return(NA)
}
ev_history <- read.table(file.path(log_dir, 'history.data'),
header=TRUE, skip=5)
# figure out which profiles & frequency files to use
profile_candidates <- logs[grep(profile_pattern, logs)]
freq_file_candidates <- logs[grep(freqs_pattern, logs)]
profile_files <- c()
freq_files <- c()
for (profile_file in profile_candidates) {
freq_name <- sub(".data", "-freqs.dat", profile_file, fixed=TRUE)
if (freq_name %in% freq_file_candidates) {
profile_files <- c(profile_files, profile_file)
freq_files <- c(freq_files, freq_name)
}
}
if (length(profile_files) <= 2) {
print("Too few profile files")
return(NA)
}
# obtain observable information
parallelStartMulticore(max(1, detectCores()))
obs.DF <- do.call(plyr:::rbind.fill,
parallelMap(function(profile_file, freqs_file)
get_obs(profile_file, freqs_file, ev_history),
profile_file=file.path(log_dir, profile_files),
freqs_file=file.path(log_dir, freq_files)))
return(merge(rbind(params.DF), obs.DF[with(obs.DF, order(age)),]))
}
args <- commandArgs(TRUE)
if (length(args)>0) {
print(args[1])
DF <- unique(parse_dir(args[1]))
DF <- DF[complete.cases(DF),]
min_ts <- 0.0001
while (any(diff(DF$age) < min_ts)) # remove tiny time steps
DF <- DF[c(1, which(diff(DF$age) >= min_ts)+1),]
if (nrow(DF) > 2 && ncol(DF) > 2) # save data file
write.table(DF, paste0(args[1], '.dat'),
quote=FALSE, sep='\t', row.names=FALSE)
if (sample(0:50, 1)==0) { # plot HR diagram
cairo_pdf(file.path(separation_dir, paste0(args[1], '-HR.pdf')),
width=plot_width, height=plot_height, family=font)
par(mar=c(3, 4, 1, 1), mgp=c(2, 0.25, 0), cex.lab=1.3)
plot(DF$Teff, DF$L, type='l', tcl=0,
xlab=expression(T[eff]),
ylab=expression(L / L['\u0298']),
xlim=rev(range(DF$Teff)))
abline(v=5777, lty=3, col='lightgray')
abline(h=1, lty=3, col='lightgray')
points(DF$Teff, DF$L, pch=1,
col=brewer.pal(11,"Spectral")[floor(DF$age/13.9*11)+1],
cex=0.25*DF$radius/max(DF$radius))
magaxis(side=1:4, family=font, tcl=0.25, labels=FALSE)
dev.off()
}
}
|
ca5ce1fcf183cf25e5aa99e8aa84b9de0943d6b0
|
17d4e98e859d4ea34a6783ed3757a6e3d89b32e3
|
/wikipageview/R/articlecount.R
|
619da1742cf075ff7031af6bf993f70c32f4c234
|
[
"MIT"
] |
permissive
|
haotianjin/ubco-data-534-project-group
|
318409dc7226dc156751174ff2c809426ecfa557
|
e006c9e9a1b5102e37e2f12d191f006815b4cd72
|
refs/heads/main
| 2023-03-11T18:32:06.333202
| 2021-02-19T02:20:49
| 2021-02-19T02:20:49
| 338,165,641
| 0
| 0
| null | 2021-02-15T14:21:00
| 2021-02-11T22:10:01
|
R
|
UTF-8
|
R
| false
| false
| 2,632
|
r
|
articlecount.R
|
#' Return a period page view count of a specific article from wikipedia page view api.
#' This function generate a data frame contains a specific article page view count within an entered period.
#'
#' @import tidyverse
#' @import httr
#' @import jsonlite
#' @import stringr
#' @import lubridate
#' @param article_title the article name, selected from wikipedia For example: "https://en.wikipedia.org/wiki/Linear_algebra" article_title is "Linear_algebra"
#' @param starting period start timestamp with format YYYYMMDD, for example 20200102(Jan 2nd, 2021), with the default value the timestamp of the day before yesterday
#' @param ending period start timestamp with format YYYYMMDD, for example 20200102(Jan 2nd, 2021), with the default value the timestamp of yesterday
#' @param period period type should be 'daily' or 'monthly', with the default value 'daily'
#' @return return a 2-column data frame
#' @export
#' @examples
#' get_article_vc("Linear_algebra", "20150803", "20201002", "monthly")
get_article_vc <- function(article_title,
starting = paste(substr(toString(Sys.Date()-2),1,4), substr(toString(Sys.Date()-2),6,7), substr(toString(Sys.Date()-2),9,10), sep = ""),
ending = paste(substr(toString(Sys.Date()-1),1,4), substr(toString(Sys.Date()-1),6,7), substr(toString(Sys.Date()-1),9,10), sep = ""),
period = "daily"){
url <- paste(
"https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/all-agents",
#values in Wikipedia URL represents the article title
#For example: "https://en.wikipedia.org/wiki/Linear_algebra" article_title is "Linear_algebra"
article_title,
#values in ["daily", "monthly"] is allowed for tool
period,
#start time not earlier than 20150701
#with format YYYYMMDD
starting,
#with format YYYYMMDD
ending,
sep = "/")
#get response from url
response <-GET(url)
#read respond JSON file
result <- fromJSON(content(response, as="text", encoding = "UTF-8"))
#Catch bad request from URL if request fail and capture the fail message received
tryCatch({
if(response$status_code != 200) {
cat(result$detail)
cat("\n")
stop()
# return()
}}, error = function(err) {
stop("bad request, please see above error message")
})
#transform dataset to data frame
result <- data.frame(result)
#pick timestamp and view count
viewcount <- result[, c(4,7)]
#transform timestamp to date value
viewcount$items.timestamp <- ymd(str_sub(viewcount$items.timestamp, 1, -3))
return(viewcount)
}
|
f5ab5e8486b5a1f3f9f89fe76d6ccae585cf6ece
|
e5b0acd8a255c9491d27fc7706db6833b151db22
|
/R/test-in-sso.R
|
aa1ac0624436d26a28c99eaa35fa201550fa8345
|
[
"MIT"
] |
permissive
|
rstudio/shinycoreci
|
52b867e603a207d1e62552bf006572f6365f85fe
|
05cb467a217972a5f838d18296ee701307a2430f
|
refs/heads/main
| 2023-08-31T14:59:49.494932
| 2023-08-14T14:49:17
| 2023-08-14T14:49:17
| 227,222,013
| 38
| 5
|
NOASSERTION
| 2023-09-11T18:29:27
| 2019-12-10T21:46:45
|
HTML
|
UTF-8
|
R
| false
| false
| 8,143
|
r
|
test-in-sso.R
|
#' Retrieve default GitHub username
#'
#' Equivalent to the terminal code: `git config github.user`
#' @export
github_user <- function() {
system("git config github.user", intern = TRUE)
}
#' Test Apps in SSO/SSP
#'
#' Automatically launches docker in a background process. Once the docker is ready, a shiny application will be launched to help move through the applications.
#'
#' The docker application will stop when the shiny application exits.
#'
#' @inheritParams test_in_browser
#' @param r_version R version to use. Ex: \code{"3.6"}
#' @param release Distro release name, such as "focal" for ubuntu or "7" for centos
#' @param port port to have server function locally
#' @param tag Extra tag information for the docker image. This will prepend a \verb{-} if a value is given.
#' @param user GitHub username. Ex: `schloerke`. Uses [`github_user`] by default
#' @param port Port for local shiny application
#' @param port_background Port to connect to the Docker container
#' @export
#' @describeIn test_in_ssossp Test SSO Shiny applications
#' @examples
#' \dontrun{test_in_sso()}
#' \dontrun{test_in_ssp()}
test_in_sso <- function(
app_name = apps[1],
apps = apps_manual,
...,
user = github_user(),
release = c("focal", "bionic", "centos7"),
r_version = c("4.3", "4.2", "4.1", "4.0", "3.6"),
tag = NULL,
port = 8080,
port_background = switch(release, "centos7" = 7878, 3838),
host = "127.0.0.1"
) {
release <- match.arg(release)
test_in_ssossp(
user = user,
app_name = app_name,
apps = apps,
type = "sso",
release = release,
port_background = port_background,
r_version = match.arg(r_version),
tag = NULL,
host = host,
port = port
)
}
#' @export
#' @param license_file Path to a SSP license file
#' @describeIn test_in_ssossp Test SSP Shiny applications
test_in_ssp <- function(
app_name = apps[1],
apps = apps_manual,
...,
license_file = NULL,
user = github_user(),
release = c("focal", "bionic", "centos7"),
r_version = c("4.3", "4.2", "4.1", "4.0", "3.6"),
tag = NULL,
port = 8080,
port_background = switch(release, "centos7" = 8989, 4949),
host = "127.0.0.1"
) {
release <- match.arg(release)
test_in_ssossp(
user = user,
app_name = app_name,
apps = apps,
type = "ssp",
release = release,
license_file = license_file,
port_background = port_background,
r_version = match.arg(r_version),
tag = NULL,
host = host,
port = port
)
}
test_in_ssossp <- function(
user = github_user(),
app_name = apps[1],
apps = apps_manual,
type = c("sso", "ssp"),
release = c("focal", "bionic", "centos7"),
license_file = NULL,
port_background = switch(type,
sso = switch(release, "centos7" = 7878, 3838),
ssp = switch(release, "centos7" = 8989, 4949)
),
r_version = c("4.3", "4.2", "4.1", "4.0", "3.6"),
tag = NULL,
host = "127.0.0.1",
port = 8080
) {
# validate_core_pkgs()
apps <- resolve_app_name(apps)
type <- match.arg(type)
release <- match.arg(release)
force(port_background)
r_version <- match.arg(r_version)
radiant_app <- "141-radiant"
if (radiant_app %in% apps) {
message("\n!!! Radiant app being removed. It does not play well with centos7 !!!\n")
apps <- setdiff(apps, radiant_app)
if (identical(app_name, radiant_app)) {
app_name <- apps[1]
}
}
message("Verify Docker port is available...", appendLF = FALSE)
conn_exists <- tryCatch({
httr::GET(paste0("http://127.0.0.1:", port_background))
# connection exists
TRUE
}, error = function(e) {
# nothing exists
FALSE
})
if (conn_exists) {
message("")
stop("Port ", port_background, " is busy. Maybe stop all other docker files? (`docker stop NAME`) Can inspect with `docker ps` in terminal.")
}
message(" OK")
message("Starting Docker...")
if (!docker_is_alive()) {
stop("Cannot connect to the Docker daemon. Is the docker daemon running?")
}
if (!docker_is_logged_in(user = user)) {
stop("Docker is not logged in. Please run `docker login` in the terminal with your Docker Hub username / password")
}
docker_proc <- callr::r_bg(
function(type_, release_, license_file_, port_, r_version_, tag_, launch_browser_, docker_run_server_, user_) {
docker_run_server_(
type = type_,
release = release_,
license_file = license_file_,
port = port_,
r_version = r_version_,
tag = tag_,
launch_browser = launch_browser_,
user = user_
)
},
list(
type_ = type,
release_ = release,
license_file_ = license_file,
port_ = port_background,
r_version_ = r_version,
tag_ = tag,
launch_browser_ = FALSE,
user_ = user,
docker_run_server_ = docker_run_server
),
supervise = TRUE,
stdout = "|",
stderr = "2>&1",
cmdargs = c(
"--slave", # tell the session that it's being controlled by something else
# "--interactive", # (UNIX only) # tell the session that it's interactive.... but it's not
"--quiet", # no printing
"--no-save", # don't save when done
"--no-restore" # don't restore from .RData or .Rhistory
)
)
on.exit({
if (docker_proc$is_alive()) {
message("Killing Docker...")
docker_proc$kill()
docker_stop(type, r_version, release)
message("Killing Docker... OK")
}
}, add = TRUE)
# wait for docker to start
## (wait until '/' is available)
get_docker_output <- function() {
if (!docker_proc$is_alive()) {
return("")
}
out <- docker_proc$read_output_lines()
if (length(out) > 0 && nchar(out) > 0) {
paste0(out, collapse = "\n")
} else {
""
}
}
while (TRUE) {
if (!docker_proc$is_alive()) {
message("Trying to display docker failure message...")
print(docker_proc$read_all_output_lines())
stop("Background docker process has errored.")
}
tryCatch({
# will throw error on connection failure
httr::GET(paste0("http://127.0.0.1:", port_background))
cat(get_docker_output(), "\n")
break
}, error = function(e) {
Sys.sleep(0.5) # arbitrary, but it'll be a while till the docker is launched
# display all docker output
out <- get_docker_output()
if (nchar(out) > 0) {
cat(out, "\n", sep = "")
}
invisible()
})
}
cat("(Docker output will no longer be tracked in console)\n")
message("Starting Docker... OK") # starting docker
output_lines <- ""
app_infos <- lapply(apps, function(app_name) {
list(
app_name = app_name,
start = function() {
output_lines <<- ""
invisible(TRUE)
},
on_session_ended = function() { invisible(TRUE) },
output_lines = function(reset = FALSE) {
if (release == "centos7") {
return("(centos7 console output not available)")
}
if (isTRUE(reset)) {
output_lines <<- ""
return(output_lines)
}
if (is.null(docker_proc) || !docker_proc$is_alive()) {
return("(dead)")
}
docker_proc_output_lines <- docker_proc$read_output_lines()
if (any(nchar(docker_proc_output_lines) > 0)) {
output_lines <<- paste0(
output_lines,
if (nchar(output_lines) > 0) "\n",
paste0(docker_proc_output_lines, collapse = "\n")
)
}
output_lines
},
app_url = function() {
paste0("http://", host, ":", port_background, "/", app_name)
},
# user_agent = function(user_agent) {
# app_status_user_agent_browser(user_agent, paste0(type, "_", r_version, "_", release))
# },
header = function() {
shiny::tagList(shiny::tags$strong(type, ": "), shiny::tags$code(release), ", ", shiny::tags$code(paste0("r", r_version)))
}
)
})
app <- test_in_external(
app_infos = app_infos,
default_app_name = resolve_app_name(app_name),
host = host,
port = port
)
# Run right now
print(app)
}
|
8eae3c055b99164a0c4fef823fd7038c17a60712
|
c012e767662190621ff739c0610653c1b6a9984b
|
/man/backup.Rd
|
8d93e5b5e1b1b848cb92f481306822c57ded533d
|
[
"MIT"
] |
permissive
|
IMCR-Hackathon/toolkit
|
e63d663fb650c966796957d5a2ea9462004c74fb
|
dff0643fe9d02f9da61c4c677d57596d661ed385
|
refs/heads/master
| 2020-06-24T05:11:46.037268
| 2019-09-26T19:48:49
| 2019-09-26T19:48:49
| 198,857,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 491
|
rd
|
backup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backup.R
\name{backup}
\alias{backup}
\title{Back up all software metadata in the IMCR Portal}
\usage{
backup(path)
}
\arguments{
\item{path}{(character) Where IMCR metadata will be written.}
}
\value{
(.json) JSON metadata files written to the specified \code{path}.
}
\description{
Back up all software metadata in the IMCR Portal
}
\examples{
\dontrun{
get_imcr_json()
backup("/path/to/backup/directory")
}
}
|
8dd4129713ab04378b422ecb649dddbb7339777b
|
3474af6c604afd89a64b3a1a637f02384669dba7
|
/R/geom-pictogram.R
|
9e335e893a4889084dc0ddaa0190a3d04985a378
|
[] |
no_license
|
edwindj/waffle
|
4dbb28d1aabaaa0a93502fa122fcb853400924dd
|
1d076c55f30b1a5ad101679be726e5d90c86f91b
|
refs/heads/master
| 2020-07-08T04:51:51.664029
| 2019-08-21T12:20:26
| 2019-08-21T12:20:26
| 203,570,123
| 1
| 1
| null | 2019-08-21T11:28:08
| 2019-08-21T11:28:07
| null |
UTF-8
|
R
| false
| false
| 5,718
|
r
|
geom-pictogram.R
|
picto_scale <- function(aesthetic, values = NULL, ...) {
values <- if (is_missing(values)) "circle" else force(values)
pal <- function(n) {
vapply(
if (n > length(values)) rep(values[[1]], n) else values,
function(.x) .fa_unicode[.fa_unicode[["name"]] == .x, "unicode"],
character(1),
USE.NAMES = FALSE
)
}
discrete_scale(aesthetic, "manual", pal, ...)
}
#' Used with geom_pictogram() to map Font Awesome fonts to labels
#'
#' @param ... dots
#' @param values values
#' @param aesthetics aesthetics
#' @export
scale_label_pictogram <- function(..., values, aesthetics = "label") {
picto_scale(aesthetics, values, ...)
}
#' Legend builder for pictograms
#'
#' @param data,params,size legend key things
#' @keywords internal
#' @export
draw_key_pictogram <- function(data, params, size) {
# msg("==> draw_key_pictogram()")
#
# print(str(data, 1))
# print(str(params, 1))
if (is.null(data$label)) data$label <- "a"
textGrob(
label = data$label,
x = 0.5, y = 0.5,
rot = data$angle %||% 0,
hjust = data$hjust %||% 0,
vjust = data$vjust %||% 0.5,
gp = gpar(
col = alpha(data$colour %||% data$fill %||% "black", data$alpha),
fontfamily = data$family %||% "",
fontface = data$fontface %||% 1,
fontsize = (data$size %||% 3.88) * .pt,
lineheight = 1.5
)
)
}
#' Pictogram Geom
#'
#' There are two special/critical `aes()` mappings:
#' - `label` (so the geom knows which column to map the glyphs to)
#' - `values` (which column you're mapping the filling for the squares with)
#'
#' @md
#' @param mapping Set of aesthetic mappings created by `aes()` or
#' `aes_()`. If specified and `inherit.aes = TRUE` (the
#' default), it is combined with the default mapping at the top level of the
#' plot. You must supply `mapping` if there is no plot mapping.
#' @param n_rows how many rows should there be in the waffle chart? default is 10
#' @param flip If `TRUE`, flip x and y coords. n_rows then becomes n_cols.
#' Useful to achieve waffle column chart effect. Defaults is `FALSE`.
#' @param make_proportional compute proportions from the raw values? (i.e. each
#' value `n` will be replaced with `n`/`sum(n)`); default is `FALSE`.
#' @param data The data to be displayed in this layer. There are three
#' options:
#'
#' If `NULL`, the default, the data is inherited from the plot
#' data as specified in the call to `ggplot()`.
#'
#' A `data.frame`, or other object, will override the plot
#' data. All objects will be fortified to produce a data frame. See
#' `fortify()` for which variables will be created.
#'
#' A `function` will be called with a single argument,
#' the plot data. The return value must be a `data.frame.`, and
#' will be used as the layer data.
#' @param na.rm If `FALSE`, the default, missing values are removed with
#' a warning. If `TRUE`, missing values are silently removed.
#' @param show.legend logical. Should this layer be included in the legends?
#' `NA`, the default, includes if any aesthetics are mapped.
#' `FALSE` never includes, and `TRUE` always includes.
#' It can also be a named logical vector to finely select the aesthetics to
#' display.
#' @param inherit.aes If `FALSE`, overrides the default aesthetics,
#' rather than combining with them. This is most useful for helper functions
#' that define both data and aesthetics and shouldn't inherit behaviour from
#' the default plot specification, e.g. `borders()`.
#' @param ... other arguments passed on to `layer()`. These are
#' often aesthetics, used to set an aesthetic to a fixed value, like
#' `color = "red"` or `size = 3`. They may also be parameters
#' to the paired geom/stat.
#' @export
geom_pictogram <- function(mapping = NULL, data = NULL,
n_rows = 10, make_proportional = FALSE, flip = FALSE,
..., na.rm = FALSE, show.legend = NA, inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = "waffle",
geom = "pictogram",
position = "identity",
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
n_rows = n_rows,
make_proportional = make_proportional,
flip = flip,
...
)
)
}
#' @rdname geom_pictogram
#' @export
GeomPictogram <- ggplot2::ggproto(
`_class` = "GeomPictogram",
`_inherit` = GeomText,
# required_aes = c("x", "y", "label", "colour"),
default_aes = aes(
fill = NA, alpha = NA, colour = "black",
size = 9, angle = 0, hjust = 0.5, vjust = 0.5,
family = "FontAwesome5Free-Solid", fontface = 1, lineheight = 1
),
draw_group = function(self, data, panel_params, coord,
n_rows = 10, make_proportional = FALSE, flip = FALSE,
radius = grid::unit(0, "npc")) {
# msg("Called => GeomPictogram::draw_group()")
coord <- ggplot2::coord_equal()
grobs <- GeomText$draw_panel(data, panel_params, coord, parse = FALSE, check_overlap = FALSE)
# msg("Done With => GeomPictogram::draw_group()")
ggname("geom_pictogram", grid::grobTree(children = grobs))
},
draw_panel = function(self, data, panel_params, coord,
n_rows = 10, make_proportional = FALSE, flip = FALSE, ...) {
# msg("Called => GeomPictogram::draw_panel()")
# print(str(data, 1))
coord <- ggplot2::coord_equal()
grobs <- GeomText$draw_panel(data, panel_params, coord, parse = FALSE, check_overlap = FALSE)
# msg("Done With => GeomPictogram::draw_panel()")
ggname("geom_pictogram", grid::grobTree(children = grobs))
},
draw_key = draw_key_pictogram
)
|
150a81d74fe5d74cc4aea8283615fc126c9b9d57
|
9251053f822d7761f5f664c84c103e97a492f212
|
/R/simulate_individuals_df.R
|
dea5c7fd4e7f6daa40f359b33021e224455f1342
|
[
"MIT"
] |
permissive
|
uk-gov-mirror/ukgovdatascience.orgsurveyr
|
c1554a7d224f6a77c2247364080d1a1a52a74c61
|
a7ff62bd4a5e5975bc7302b543695481c2fd7708
|
refs/heads/master
| 2021-10-01T00:56:28.514944
| 2018-11-26T14:06:30
| 2018-11-26T14:06:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,242
|
r
|
simulate_individuals_df.R
|
#' Simulate individuals data frame
#'
#' Given an organisation tbl_graph object with the unit_size column defined, a tibble will be generated
#' with one row per individual in the organisation. For test purposes a dummy variable is also generated.
#'
#' @param x tbl_graph organisation with unit_size defined
#'
#' @return tibble
#' @export
#'
#' @examples
#' set.seed(1234)
#' tg_ex1 <- create_realistic_org(n_children = 4, max_depth = 3, prob=0.3)
#' tg_ex1 <- simulate_unit_size(tg_ex1)
#' df <- simulate_individuals_df(tg_ex1)
#' df
simulate_individuals_df <- function(x) {
check_tbl_graph_is_org(x)
nodes_df <- x %>%
tidygraph::activate(nodes) %>%
tidygraph::as_tibble()
check_unit_size <- 'unit_size' %in% colnames(nodes_df)
if(!check_unit_size) {
stop('Need to generate unit size first using the simulate_unit_size function')
}
nodes_df %>%
dplyr::mutate(individual_name = purrr::map2(unit_id, unit_size, ~paste(.x, seq(1,.y), sep='_')),
test_var = purrr::map(unit_size, ~stats::rnorm(., mean=10, sd=3))) %>%
tidyr::unnest() %>%
dplyr::mutate(individual_id = dplyr::row_number() %>% as.character()) %>%
dplyr::select(individual_id, individual_name, unit_id, test_var)
}
|
2cc7edaeed65b9adbc7c82b2cd402f077e4f9fcf
|
5d6f5daba9f5f9374039bcc649da05ae7626819f
|
/man/print.RV.Rd
|
8aedfd245f8bcb47c5e564e7cfb5b7b5d402e692
|
[] |
no_license
|
Dasonk/drvc
|
b3db750bc396acea475793506ee630fef7ba02ef
|
b78dd94c7961b941f3ce4a5729421c276a86dc5d
|
refs/heads/master
| 2021-01-11T04:57:39.898768
| 2014-05-04T02:47:00
| 2014-05-04T02:47:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 350
|
rd
|
print.RV.Rd
|
\name{print.RV}
\alias{print.RV}
\title{Print random variable}
\usage{
\method{print}{RV} (x, ..., digits = 4)
}
\arguments{
\item{x}{A random variable}
\item{digits}{The number of digits to use when printing}
\item{\ldots}{Further parameters to pass to
print.data.frame}
}
\description{
Provide a nice way to view a random variable
}
|
4120ba2004e759313b3cc6480703d6338f2351ea
|
9ddd623471e8174ade5b9921dbc1cb1da731e115
|
/man/calendar.Rd
|
e32abb0141349ddafa15d5889adabd3b7af4363e
|
[] |
no_license
|
zackarno/koborg
|
2eba2f837b51a494b7efcb8d491e800de6ec70d9
|
6312bb3ab0b59b96f91812b90f5afd224d599b04
|
refs/heads/master
| 2022-09-13T17:11:09.884337
| 2020-05-27T09:45:22
| 2020-05-27T09:45:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 325
|
rd
|
calendar.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class_calendar.R
\name{calendar}
\alias{calendar}
\title{Calendar constructor}
\usage{
calendar(
x = new_date(),
relevant = NA,
label = NA,
constraint = NA,
source = "excel"
)
}
\description{
`calendar()` constructs a calendar vector.
}
|
bd87579365ec93350daa8a72e3dba770627faa90
|
db2cbc930ff30ec463901319136c55322d2ec296
|
/Assignment 5.3.r
|
1b4a9b60a041997851ee205c22f3bec60b2ee583
|
[] |
no_license
|
hariarjun/Assignment-5
|
eab60494e67df2d613d07ea28756304c311b80c7
|
31a459dbcd669a19c5bf568e812d730153288108
|
refs/heads/master
| 2020-03-23T05:57:18.534563
| 2018-07-16T18:42:17
| 2018-07-16T18:42:17
| 141,178,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,158
|
r
|
Assignment 5.3.r
|
#Question 1 : Test whether two vectors are exactly equal (element by element).
vec1 = c(rownames(mtcars[1:15,]))
vec2 = c(rownames(mtcars[11:25,]))
vec1 %in% vec2
vec1 == vec2
identical(vec1,vec2)
all.equal(vec1,vec2)
#Question 2: Sort the character vector in ascending order and descending order.
vec1 = c(rownames(mtcars[1:15,]))
vec2 = c(rownames(mtcars[11:25,]))
#ascending order
sort(vec1, decreasing = F)
sort(vec2, decreasing = F)
#descending order
sort(vec1, decreasing = T)
sort(vec2, decreasing = T)
#Question 3: What is the major difference between str() and paste() show an example.
#str() Function : The str() function can be used to display the structure of data. for example :
str(mtcars)
#paste() Function : The paste() function can be used to concatenate Vectors by converting them into character. for example :
paste('age',22,'weight',65,'myname')
#Question 4: Introduce a separator when concatenating the strings.
paste(., sep="", collapse=NULL)
#Sep: is a character that would be appended between two adjacent strings and acts as a separator
#collapse: is an optional character to separate the results
|
0000ed418e5903aaf38e7109f69a336ed75d08e7
|
9d8b86b2a20d5fd3c31a3bce56e7f52312187be1
|
/R/start.session.R
|
4ab20a26f98cda71f0d8310a64b1433fbc0837e8
|
[] |
no_license
|
hms-dbmi/Rcupcake
|
d4141be5394de83340f476392defa11477fda1ee
|
2f87f7c771ceb0da7813a90529c973e1f028b6e8
|
refs/heads/master
| 2022-01-17T06:03:15.038438
| 2019-07-02T23:44:11
| 2019-07-02T23:44:11
| 81,849,992
| 2
| 5
| null | 2018-04-06T15:36:32
| 2017-02-13T17:08:40
|
HTML
|
UTF-8
|
R
| false
| false
| 2,662
|
r
|
start.session.R
|
#' Start the connection to the database
#'
#' Given a URL and a key access it starts the connection to the database
#'
#' @param url The url.
#' @param apiKey The key to access to the data.
#' @return A message showing if the connection has been done or not.
#' @examples
#'
#' sessionEx <- start.session(
#' url = "https://nhanes2.hms.harvard.edu/",
#' apiKey = "YOURKEY"
#' )
#' @export start.session
start.session <- function( url, apiKey){
Key <- apiKey
IRCT_REST_BASE_URL <- url
IRCT_CL_SERVICE_URL <- paste(IRCT_REST_BASE_URL,"rest/v1/",sep="")
#Service URLS
IRCT_RESOURCE_BASE_URL <- paste(IRCT_CL_SERVICE_URL,"resourceService/",sep="")
IRCT_QUERY_BASE_URL <- paste(IRCT_CL_SERVICE_URL,"queryService/",sep="")
IRCT_RESULTS_BASE_URL <- paste(IRCT_CL_SERVICE_URL,"resultService/",sep="")
IRCT_PROCESS_BASE_URL <- paste(IRCT_CL_SERVICE_URL,"processService/",sep="")
#List resources
IRCT_LIST_RESOURCE_URL <- paste(IRCT_RESOURCE_BASE_URL,"resources",sep="")
IRCT_PATH_RESOURCE_URL <- paste(IRCT_RESOURCE_BASE_URL,"path",sep="")
#Query
IRCT_START_QUERY_URL <- paste(IRCT_QUERY_BASE_URL,"startQuery",sep="")
IRCT_CLAUSE_URL <- paste(IRCT_QUERY_BASE_URL,"clause",sep="")
IRCT_RUN_QUERY_URL <- paste(IRCT_QUERY_BASE_URL,"runQuery",sep="")
#Process
IRCT_START_PROCESS_URL <- paste(IRCT_PROCESS_BASE_URL,"startProcess",sep="")
IRCT_UPDATE_PROCESS_URL <- paste(IRCT_PROCESS_BASE_URL,"updateProcess",sep="")
IRCT_RUN_PROCESS_URL <- paste(IRCT_PROCESS_BASE_URL,"runProcess",sep="")
#Result
IRCT_GET_RESULTS_STATUS_URL <- paste(IRCT_RESULTS_BASE_URL,"resultStatus",sep="")
IRCT_GET_RESULTS_FORMATS_URL <- paste(IRCT_RESULTS_BASE_URL,"availableFormats",sep="")
IRCT_GET_RESULTS_URL <- paste(IRCT_RESULTS_BASE_URL,"result",sep="")
startSession <-
httr::content(httr::GET(
paste0(
IRCT_REST_BASE_URL,
"/rest/v1/securityService/startSession?key=",
Key
)
))
session <<- startSession
cache.creation()
if( names(startSession)[1] == "node"){
return( "Start Session: failed. Please revise your url and apiKey. Check that your apiKey has not expired.")
}else if( names(startSession)[1] == "status"){
if( startSession[[1]] == "success" | startSession[[1]] == "ok"){
return( "Start Session: success" )
}else{
return( "Start Session: failed. Please revise your url and apiKey" )
}
}
}
|
8ae896f9ce2e6afb396722aaa92edbe2bc70252a
|
c8bce529daccc22533607fd83eeced0509b044c8
|
/tests/testthat/test-make_grps.R
|
93e195ca948563b5799e070eb2a020ed8b213994
|
[
"MIT"
] |
permissive
|
camille-s/camiller
|
4621954dac2954ed1d6ef60cc8b273ef533ab78e
|
544ee2879a1c4f6bc5a75b854d1d3c57a99bae84
|
refs/heads/main
| 2022-03-05T22:57:39.198220
| 2022-01-21T21:52:56
| 2022-01-21T21:52:56
| 134,476,995
| 2
| 0
|
NOASSERTION
| 2022-01-21T20:43:39
| 2018-05-22T21:18:03
|
R
|
UTF-8
|
R
| false
| false
| 2,884
|
r
|
test-make_grps.R
|
library(camiller)
library(testthat)
test_that("make_grps gets group names", {
ages <- c("Under 6 years", "Under 6 years", "Under 6 years", "6 to 11 years",
"6 to 11 years", "6 to 11 years", "12 to 17 years", "12 to 17 years",
"12 to 17 years", "18 to 24 years", "18 to 24 years", "18 to 24 years",
"25 to 34 years", "25 to 34 years", "25 to 34 years", "35 to 44 years",
"35 to 44 years", "35 to 44 years", "45 to 54 years", "45 to 54 years",
"45 to 54 years", "55 to 64 years", "55 to 64 years", "55 to 64 years",
"65 to 74 years", "65 to 74 years", "65 to 74 years", "75 years and over",
"75 years and over", "75 years and over")
age_list <- list(under6 = 1, under18 = 1:3, ages18_34 = 4:5, ages65plus = 9:10)
expect_is(make_grps(ages, age_list), "list")
expect_equal(make_grps(ages, age_list)[[3]], c("18 to 24 years", "25 to 34 years"))
expect_named(make_grps(ages, age_list), names(age_list))
})
test_that("make_grps makes groups from positions or values", {
ages <- c("Under 6 years", "Under 6 years", "Under 6 years", "6 to 11 years",
"6 to 11 years", "6 to 11 years", "12 to 17 years", "12 to 17 years",
"12 to 17 years", "18 to 24 years", "18 to 24 years", "18 to 24 years",
"25 to 34 years", "25 to 34 years", "25 to 34 years", "35 to 44 years",
"35 to 44 years", "35 to 44 years", "45 to 54 years", "45 to 54 years",
"45 to 54 years", "55 to 64 years", "55 to 64 years", "55 to 64 years",
"65 to 74 years", "65 to 74 years", "65 to 74 years", "75 years and over",
"75 years and over", "75 years and over")
age_list_num <- list(under6 = 1, under18 = 1:3, ages65plus = 9:10)
age_list_char <- list(under6 = "Under 6 years", under18 = c("Under 6 years", "6 to 11 years", "12 to 17 years"), ages65plus = c("65 to 74 years", "75 years and over"))
expect_equal(make_grps(ages, age_list_num), make_grps(ages, age_list_char))
})
test_that("make_grps checks if strings are in vector", {
ages <- c("Under 6 years", "Under 6 years", "Under 6 years", "6 to 11 years",
"6 to 11 years", "6 to 11 years", "12 to 17 years", "12 to 17 years",
"12 to 17 years", "18 to 24 years", "18 to 24 years", "18 to 24 years",
"25 to 34 years", "25 to 34 years", "25 to 34 years", "35 to 44 years",
"35 to 44 years", "35 to 44 years", "45 to 54 years", "45 to 54 years",
"45 to 54 years", "55 to 64 years", "55 to 64 years", "55 to 64 years",
"65 to 74 years", "65 to 74 years", "65 to 74 years", "75 years and over",
"75 years and over", "75 years and over")
age_list_char <- list(under5 = "Under 5 years", under18 = c("Under 6 years", "6 to 11 years", "12 to 17 years"))
expect_error(make_grps(ages, age_list_char))
})
|
c3a6d4d435cd86c8917d01ceae26513d3cdd3e93
|
f6b808b919500f3cad19ddd1e04d3959957ae9c0
|
/plot4.R
|
7021423851b7745fc1c92216768e54a37ed9c86f
|
[] |
no_license
|
Rajat9654/ExData_Plotting1
|
b63ae463b716ac64636b28296457146cee4271eb
|
9ecc5c72c43fb45e6456f9565a96f4671168509a
|
refs/heads/master
| 2021-01-20T04:11:44.299566
| 2017-04-30T07:14:29
| 2017-04-30T07:14:29
| 89,656,123
| 0
| 0
| null | 2017-04-28T01:42:57
| 2017-04-28T01:42:56
| null |
UTF-8
|
R
| false
| false
| 1,739
|
r
|
plot4.R
|
library(lubridate)
data <- read.table("household_power_consumption.txt" , sep = ";" , header = TRUE)
data$Date <- dmy(data$Date)
Subdata <- subset(data , c(Date == '2007-02-02' | Date == '2007-02-01'))
Subdata$Global_active_power <- gsub("?",NA, Subdata$Global_active_power , fixed =TRUE)
Subdata$Global_reactive_power <- gsub("?",NA, Subdata$Global_reactive_power , fixed =TRUE)
Subdata$Voltage <- gsub("?",NA, Subdata$Voltage , fixed =TRUE)
Subdata$Sub_metering_1 <- gsub("?",NA, Subdata$Sub_metering_1 , fixed =TRUE)
Subdata$Sub_metering_2 <- gsub("?",NA, Subdata$Sub_metering_2 , fixed =TRUE)
Subdata$Sub_metering_3 <- gsub("?",NA, Subdata$Sub_metering_3 , fixed =TRUE)
Subdata[,3:9] <- sapply(Subdata[,3:9],as.numeric)
Subdata$combine <- strptime(paste(Subdata$Date, Subdata$Time, sep=" "), "%Y-%m-%d %H:%M:%S")
jpeg(file = "plot4.jpeg", width = 480, height = 480, units = "px")
par(mfrow = c(2,2) , mar = c(4,4,2,1))
plot(Subdata$combine, Subdata$Global_active_power , type = "l" , xlab = ""
, ylab = "Global Active Power (kilowatts)")
plot(Subdata$combine, Subdata$Voltage , type = "l" , xlab = "datetime"
, ylab = "Voltage")
with(Subdata, plot(combine, Sub_metering_1 , type = "n" , ylab = ""))
with(Subdata, lines(combine, Sub_metering_1 ))
with(Subdata, lines(combine, Sub_metering_2, col = "red" ))
with(Subdata, lines(combine, Sub_metering_3, col = "blue" ))
legend("topright" , col = c("black" , "red" , "blue") , lty = 1, cex = 0.9 , lwd = 1
,legend = c("Sub_metering_1", "Sub_metering_2" ,"Sub_metering_3"))
plot(Subdata$combine, Subdata$Global_reactive_power , type = "l" , xlab = "datetime"
, ylab = "Global_reactive_power")
dev.off()
|
71ec9e017abefad447bf07a919d17f744d83a07d
|
2f6ee1089c3888ff01b4e880724445fc2c74817a
|
/server.R
|
890e4633aad57dd9e9f4c7b83eee113f2bcb5ca3
|
[] |
no_license
|
hknust/ddpapp
|
82d671fa35655082fe57f6e9298ec04af760fa10
|
400fb5ce31b8ae63baf4e0fc7a10ed51c8e7c31d
|
refs/heads/master
| 2020-04-22T07:49:10.682388
| 2015-07-26T21:30:43
| 2015-07-26T21:30:43
| 39,742,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,534
|
r
|
server.R
|
library(shiny)
library(ggplot2)
library(data.table)
library(maps)
library(rCharts)
library(reshape2)
library(markdown)
library(mapproj)
states_map <- map_data("state")
load("data/state_gdp_final.RData")
industries <- sort(unique(gdp$Description))
shinyServer(function(input, output) {
gdp.agg <- reactive({
ss <- subset(gdp, Year == as.numeric(input$year) & Description %in% input$industries, select=c(GeoName,Gdp))
temp <- aggregate(Gdp ~ GeoName, data=ss, FUN=sum)
temp[is.na(temp)] <- 0
temp$GeoName <- tolower(temp$GeoName)
temp
})
output$gdpByState <- renderPlot({
data <- gdp.agg()
title <- paste("GDP by State in ", input$year, "(Million USD)")
p <- ggplot(data, aes(map_id = GeoName))
p <- p + geom_map(aes(fill = Gdp), map = states_map, colour='black') + expand_limits(x = states_map$long, y = states_map$lat)
p <- p + coord_map() + theme_bw() + scale_fill_continuous(low="blue", high="hotpink")
p <- p + labs(x = "Long", y = "Lat", title = title)
print(p)
}, width=800, height=600)
output$industryControls <- renderUI({
if(1) {
checkboxGroupInput('industries', 'Industries', industries, selected=industries)
}
})
dataTable <- reactive({
gdp
})
output$table <- renderDataTable(
{dataTable()}, options = list(bFilter = FALSE, iDisplayLength = 50))
output$downloadData <- downloadHandler(
filename = 'data.csv',
content = function(file) {
write.csv(dataTable(), file, row.names=FALSE)
}
)
})
|
02102298fdaeeb4c117a4c6f3b5be2981070ee8b
|
9e713bd43e164d946c2e9fcaefc86e218145f387
|
/sparseDataFrame.R
|
914e42c3334eb9a2fdd3d4d6ff13ef93a47d00e4
|
[] |
no_license
|
klh8mr/travel_search_analysis
|
cd41ca46bac15c6188ab49dbf7cd1a3bd2053c04
|
63426a74f1a263b99860c0849887831d12daf43c
|
refs/heads/master
| 2021-01-18T23:17:01.355555
| 2017-05-08T02:00:23
| 2017-05-08T02:00:23
| 87,102,768
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,600
|
r
|
sparseDataFrame.R
|
library(jsonlite)
library(plyr)
library(stringr)
library(arules)
library(magrittr)
library(dplyr)
setwd("~/UVaMSDS/MachineLearning/FinalProject")
df <- read.csv("cityData.csv")
df_city <- df # save version of df to get avg and min distance later
## Create Sparse Matrix
###############################################
# List of unique cities
city <- strsplit(as.character(df$cities), ", ")
cities <- city %>%
unlist() %>%
unique()
# Create column for each city
names <- c(colnames(df), cities)
for (i in 11:(10 + length(cities))) {
df[, i] <- NA
}
colnames(df) <- names
# Loop through each row and city column to create binary indicators
for (i in 1:nrow(df)){
for (j in 11:ncol(df)){
if (names(df)[j] %in% city[[i]]){
df[i, j] <- 1
}
else df[i, j] <- 0
}
}
# Convert session and joining date to lubridate format
df$session_date <- ymd(df$session_date)
df$joining_date <- ymd(df$joining_date)
# Calculate days elapsed between join and session date
df$daysSinceJoin <- (df$session_date - df$joining_date) %>%
as.character() %>%
as.numeric()
# Write sparse Dataframe
write.csv(df, "city_search_sparse.csv", row.names = FALSE)
## Create user dataframe
###############################################
df <- read.csv("city_search_sparse.csv")
df$session_date <- ymd(df$session_date)
# Select Columns of interest from sparse dataframe
df_users <- unique(df[,c(2,8,10)])
# Create empty columns for calculations below
df_users$avgTimeElapsed <- 0
df_users$n_visits <- 0
df_users$CitiesSearched_avg <- 0
# Create object to store sum of city columns per user
cities_tot <- c()
for (x in unique(df$user_id)) {
n_visits <- nrow(df[df$user_id==x,])
df_users$n_visits[df_users$user_id==x] <- n_visits
df_users$CitiesSearched_avg[df_users$user_id==x] <- sum(rowSums(df[df$user_id==x, 11:99], na.rm=T), na.rm=T)/n_visits
if (n_visits>1){
dates <- sort(df$session_date[df$user_id==x]) # dates a user visited the site, sorted
timeElapsed <- diff(dates) # number of days between each visit
df_users$avgTimeElapsed[df_users$user_id==x] <- mean(timeElapsed) # add the average days between visits to user df
}
cities_x <- c(x, colSums(df[df$user_id==x, 11:99]))
cities_tot <- rbind(cities_tot, cities_x)
}
# Join df_users and city_tot by user_id
cities_tot <- data.frame(cities_tot)
names(cities_tot)[1] <- "user_id"
df_users <- full_join(df_users, cities_tot, by="user_id")
# get average and max distance searched
df_users["avg_distance"] <- NA
df_users["min_distance"] <- NA
df_users$concat <- paste(as.character(df_users$session_date), as.character(df_users$user_id))
df_city$concat <- paste(as.character(df_city$session_date), as.character(df_city$user_id))
for (i in 1:nrow(df_users)){
if(df_users[4][[1]][i] == 0 && df_users[5][[1]][i] == 2){
df_users[96][[1]][i] <- NA
df_users[97][[1]][i] <- NA
} else if(df_users[4][[1]][i] == 0.5 && df_users[5][[1]][i] == 3){
df_users[96][[1]][i] <- NA
df_users[97][[1]][i] <- NA
} else {
join_on <- df_users[98][[1]][i]
df_users[96][[1]][i] <- df_city[which(df_city$concat == join_on),][,c(6)]
df_users[97][[1]][i] <- df_city[which(df_city$concat == join_on),][,c(7)]
}
}
# write out to csv to do formatting in excel -
# fill in rest of avg_distance and min_distance
write.csv(df_users, "df_users_working.csv",row.names = FALSE)
# read in new df_users
df_users <- read.csv("df_users_working_clean.csv")
# reorder the columns
df_users <- df_users[,c(1:6,96:97,7:95)]
# Write user Dataframe
write.csv(df_users, "df_users.csv", row.names = FALSE)
|
93a3fe3158867ca4d492a1393bd01f8a41c2997c
|
a9356c021ef2d7d73bdebaa35793b07fbe2eff3d
|
/R scripts/CPdayFR SCRIPT.R
|
f66346dfd54a5a02beb643caf24de06fff70c133
|
[] |
no_license
|
vegmer/NMDA
|
be523fb55eba8e35e22f34d7e044987171e69ffe
|
9d33a480973db217b7624ba8d0598edb160e85f4
|
refs/heads/master
| 2020-03-24T01:20:53.056317
| 2019-05-08T15:11:54
| 2019-05-08T15:11:54
| 142,332,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,282
|
r
|
CPdayFR SCRIPT.R
|
CPdayFR <- function(experiment="Exp 4", masterDF=list(masterDF_DS_VEH, masterDF_DS_AP5),
comp=c("VEH", "AP5"), graphFolder=MixedGraphFolder, dataProcess="Zscores",
correctOnly=FALSE, cueExcOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 10, yAxMaxRaw = 10, WdwStart=0, WdwEnd=400,
removeOutliers=F, imgFormat="pdf", neudata=allNeuronsDS, morethanIQR=T){
if(correctOnly==TRUE){trialSel="correctOnly"} else {trialSel="all trials"}
if(imgFormat=="pdf"){pdf(file=paste(graphFolder, experiment, "FR Before vs After CP Boxplot", "Sessions", sessFromCP, dataProcess, trialSel, ".pdf", sep="_"))}
if(imgFormat=="png"){png(filename=paste(graphFolder, experiment, "FR Before vs After CP Boxplot", Sessions, sessFromCP, dataProcess, trialSel, ".png", sep="_"))}
binw <- neudata$parameters$binw
minBin <- WdwStart/binw
maxBin <- WdwEnd/binw
selBins <- minBin:maxBin
plot.new()
# This function has 2 functions:
# a) Calculate and spit the mean FR per bin around the time of the event for each session (w respect to change point) for each group of units (VEH vs AP5)
# b) Plot that info
FRbyUnitBoth <- lapply(seq(1, length(masterDF)), function(c){
CPdaySel <- filter(masterDF[[c]], sessfromCPsess==0)
CPdaySel$BeforeCP <- ((CPdaySel$trialfromCP)>0)*(1) #0 is trials before CP, 1 is trials after CP
PrePostCPidx <- unique(CPdaySel$BeforeCP)
meanFRWOI <- sapply(seq(1, length(PrePostCPidx)), function(i){
dataSel <- filter(CPdaySel, BeforeCP==PrePostCPidx[i]) #Trials before or after the CP
FRcols <- (1:ncol(masterDF[[c]]))[is.element(colnames(masterDF[[c]]), 1:ncol(masterDF[[c]]))]
subFRcolNames <- (unique(masterDF[[c]]$CueBin)+minBin):(unique(masterDF[[c]]$CueBin)+maxBin) #Select bins to be plotted
subFRcols <- colnames(masterDF[[c]]) %in% as.character(subFRcolNames)
ZscoreCalc <- function(x, avg, sd){(x-avg)/sd}
if(correctOnly==TRUE){dataSel <- dataSel[!is.na(dataSel$CueResponse), ]}
if(cueExcOnly==TRUE){dataSel <- dataSel[dataSel$CueExcited==T, ]}
if(sum(is.na(dataSel[,1]))!=nrow(dataSel)){ #If no rows are left after the filters I just applied, then ignore the following code. Only apply if there are units to apply it to
#All the units recorded on that session
uniqUnits <- unique(dataSel$allUnitIdx)
byUnit <- do.call("rbind", lapply(seq(1, length(uniqUnits)), function(u){
unitSel <- filter(dataSel, allUnitIdx==uniqUnits[u])
numericDF <- apply(unitSel[, subFRcols], MARGIN=2, as.numeric) #Convert selected FR columns into numeric
BLaverage <- as.numeric(format(unique(unitSel$BLavg), digits=2)) #Baseline info is in integer format. If I just say numeric, it'll remove the decimal point and do sth weird. So I have to recur to this roundabout way.
BLsd <- as.numeric(format(unique(unitSel$BLsd), digits=2))
if(is.null(nrow(numericDF))){
MeanByUnit <- mean(numericDF, na.rm=T);
MeanByUnitZsc <- ZscoreCalc(x=MeanByUnit, avg=BLaverage, sd=BLsd)
} else {
MeanByBin <- colMeans(numericDF, na.rm=T)
MeanByUnit <- mean(MeanByBin, na.rm=T)
MeanByUnitZsc <- ZscoreCalc(x=MeanByUnit, avg=BLaverage, sd=BLsd)
}
MeanByUnitZsc <- ZscoreCalc(x=MeanByUnit, avg=BLaverage, sd=BLsd)
CueExcited <- unitSel$CueExcited[1]
m <- data.frame(Unit=uniqUnits[u], FRbyUnit=MeanByUnit, FRZsc=MeanByUnitZsc, CueExcited=CueExcited)
m
return(m)
})
)
if(cueExcOnly==T){
byUnit <- filter(byUnit, CueExcited==T)
}
if(dataProcess=="Zscores"){
MeanByUnit <- byUnit$FRZsc
yAxMax=yAxMaxZ
labelLeg="(Z sc.)"
} else {
MeanByUnit <- byUnit$FRbyUnit
yAxMax=yAxMaxRaw
labelLeg="(Hz)"
}
plot.window(xlim=c(0, length(PrePostCPidx)+1), ylim=c(yAxMin, yAxMax+3))
MeanByUnit <- MeanByUnit[!is.nan(MeanByUnit)]
barSide <- (i-2)+(i-1) #This will put PRE CP side to the left and POST CP side to the right
Q1 <- summary(MeanByUnit)[2]
Q3 <- summary(MeanByUnit)[5]
IQR <- IQR(MeanByUnit)
Median <- summary(MeanByUnit)[3]
#IQR rectangle
rect(xleft=c+(barSide)*0.3, xright=c, ybottom=Q1, ytop = Q3, col = colindx[c], border="white")
#Median line
segments(x0=c+(barSide)*0.3, x1=c, y0=Median, y1=Median, lwd=2)
segments(x0=c+(barSide)*0.3, x1=c, y0=mean(MeanByUnit), y1=mean(MeanByUnit), lwd=2, col = "white")
if(morethanIQR==T){
#Whiskers: maximum value still within Q3+1.5*IQR (whatever is smaller) or minimum value Q1-1.5*IQR
overTop <- MeanByUnit>(Q3+1.5*IQR); top <- max(MeanByUnit[overTop==F])
underBottom <- MeanByUnit<(Q1-1.5*IQR); bottom <- min(MeanByUnit[underBottom==F])
topWhisker <- min(max(MeanByUnit), top)
bottomwhisker <- max(min(MeanByUnit), bottom)
segments(x0=c+barSide*0.15, x1=c+barSide*0.15, y0=Q3, y1=topWhisker)
segments(x0=c+barSide*0.15, x1=c+barSide*0.15, y0=Q1, y1=bottomwhisker)
overWhisker <- MeanByUnit[overTop]
underWhisker <- MeanByUnit[underBottom]
#Outliers
points(x=rep(c+((barSide)*0.15), length(overWhisker)), y=overWhisker, cex=0.2, pch=19)
points(x=rep(c+((barSide)*0.15), length(underWhisker)), y=underWhisker, cex=0.2, pch=19)
}
if(removeOutliers==T){
outlierIdx <- (1:length(MeanByUnit))[(overTop==T | underBottom==T)]
if(length(outlierIdx)>0){MeanByUnit <- MeanByUnit[-outlierIdx]}
}
}
return(MeanByUnit)
})
})
sapply(seq(1, length(FRbyUnitBoth)), function(x){
xpos <- c(x-0.1, x+0.1)
sapply(seq(1, nrow(FRbyUnitBoth[[x]])), function(u){
lines(x=xpos, y=FRbyUnitBoth[[x]][u, ])
})
wilcox.test(FRbyUnitBoth[[x]][,1], FRbyUnitBoth[[x]][,2], paired=T)
})
axis(side=1, at=seq(1, length(PrePostCPidx)), labels=comp, cex.axis=1.4, tick = F)
#Add axis, labels and legend
if(dataProcess=="Zscores"){yAxMax=yAxMaxZ; yAxMin=yAxMinZ}
if(dataProcess=="raw"){yAxMax=yAxMaxRaw; yAxMin=yAxMinRaw}
axis(side=2, at=seq(yAxMin, yAxMax, by=2), las=2, cex.axis=1.4, pos=0.6)
mtext(side=2, line=2.5, cex=1.5, font=2, text=paste("Firing rate", labelLeg, sep=" "))
}
|
2fca678a1cff5e5c9ff76167267ccef6ddb5d4f6
|
749687f99c1cb3aced1b64c8c2609dc36ba52b8c
|
/tests/testthat.R
|
6f221b76eec5c134337579f61a013cff58d93540
|
[] |
no_license
|
srvanderplas/ShoeprintCleanR
|
fa2316404b764fd560723b13073919ea6073518c
|
cf334aefa83997d0ecc0fc2602b013b99ef1738b
|
refs/heads/master
| 2021-04-12T08:21:19.145091
| 2019-09-05T19:03:15
| 2019-09-05T19:03:15
| 126,033,594
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 76
|
r
|
testthat.R
|
library(testthat)
library(ShoeprintCleanR2)
test_check("ShoeprintCleanR2")
|
8e979e8012c1c624768ff8ea6faeed3da4be4f55
|
2a1b80a49c7aaf7a97ed8721dc95b30f382fb802
|
/MI_RBIG_2016_algo.R
|
4a5817fe02831cd091d033f803f1183e0cad0202
|
[] |
no_license
|
thaos/RBIG
|
42a334c61edebc2177a435d078031620adaa075a
|
9b9e5177943eed770aeebf748a57a9e361e1d669
|
refs/heads/master
| 2021-01-13T03:47:12.220454
| 2017-02-03T09:04:03
| 2017-02-03T09:04:03
| 77,227,106
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,381
|
r
|
MI_RBIG_2016_algo.R
|
library(entropy)
library(sROC)
library(mixAK)
library(MVN)
library(hexbin)
library(cramer)
library(lpSolve)
library(memoise)
library(amap)
library(scales)
# library(sn)
entropy_mm <- function(x, nbins=sqrt(length(x))){
dx <- discretize(x, nbins)
delta = diff(range(x))/ nbins
hx = entropy.MillerMadow(dx, unit="log2")+log2(delta)
hx
}
compute_tol_h0 <- function(nrow, ncol, probs=0.975, n=1000){
nbins <- sqrt(nrow)
sim <- function(){
x <- matrix(rnorm(nrow * ncol), ncol=ncol)
mnegent <- apply(x, 2, entropy_mm)
sum(mnegent)
}
tol <- sapply(seq.int(n), function(i){ sim() - sim()})
# tol <- quantile(abs(tol), probs=probs)
tol
}
compute_tol_h0_m <- memoise(compute_tol_h0)
MI_RBIG_2016 <- function(dat, N_lay=1000){
ldat <- list()
ldat[[1]] <- dat
lR <- list()
DIM = dim(dat)
Nsamples = DIM[1]
nbins <- floor(sqrt(Nsamples))
DIM = DIM[2]
delta_I <- numeric(N_lay)
for (n in 1:N_lay){
# marginal gaussianization
p <- numeric(DIM)
for(d in 1:DIM){
margin <- marginal_gaussianization(dat[,d]);
dat[, d] <- margin$x_gauss
}
dat_aux = dat;
# PCA rotation
C <- cov(dat)
eig <- eigen(C);
V <- eig$vectors
lR[[n]] <- V
# V <- rRotationMatrix(1, ncol(C))
dat <- dat %*% V
ldat[[n+1]] <- dat
delta_I[n] = information_reduction_LT(dat,dat_aux, nbins=nbins);
rt <- roystonTest(dat, qqplot = FALSE)
hzt <- hzTest(dat, qqplot = FALSE)
if (runif(1)< max(rt@p.value, hzt@p.value)) break
}
ans <- list(ldat=ldat, lR=lR, MIs=delta_I, MI=sum(delta_I))
}
information_reduction_LT <- function(X, Y, nbins){
# should discretize first
hx <- apply(X, 2, function(x)entropy.MillerMadow(discretize(x, nbins), unit="log2") + log2(diff(range(x))/nbins))
hy <- apply(Y, 2, function(y)entropy.MillerMadow(discretize(y, nbins), unit="log2") + log2(diff(range(y))/nbins))
I <- sum(hy - hx)
}
marginal_gaussianization <- function(x){
# x_order <- order(x)
# x_cdfk <- kCDF(x, xgrid=x)
# x_unif <- x_cdfk$Fhat
x_unif <- ecdf(x)(x)
x_unif <- x_unif * length(x)/(length(x)+1)
# x_gauss <- qnorm(x_unif)[x_order]
x_gauss <- qnorm(x_unif)
# ans <- list(x_gauss=x_gauss, shapiro.test=shapiro.test(x_gauss))
ans <- list(x_gauss=x_gauss)
}
cond_MI_r <- function(dat, x_ind, y_ind, c_ind=integer(0)){
if(length(c_ind) == 0){
ans <- MI_RBIG_2016(dat[, c(x_ind, y_ind)])$MI
}else{
ans <- MI_RBIG_2016(dat[, c(x_ind, y_ind, c_ind)])$MI
ans <- ans - MI_RBIG_2016(dat[, c(x_ind, c_ind)])$MI
ans <- ans - MI_RBIG_2016(dat[, c(y_ind, c_ind)])$MI
if(length(c_ind) > 1)
ans <- ans + MI_RBIG_2016(dat[, c_ind])$MI
}
ans
}
cond_MI_m <- function(dat, x_ind, y_ind, c_ind=integer(0)){
if(length(c_ind) == 0){
ans <- RBIG_r(dat[, c(x_ind, y_ind)])
}else{
ans <- RBIG_r(dat[, c(x_ind, y_ind, c_ind)])
ans <- ans - RBIG_r(dat[, c(x_ind, c_ind)])
ans <- ans - RBIG_r(dat[, c(y_ind, c_ind)])
if(length(c_ind) > 1)
ans <- ans + RBIG_r(dat[, c_ind])
}
ans
}
sample_mi <- function(dat, x_ind, y_ind){
dat <- dat[,c(x_ind, y_ind)]
dat[, 1] <- sample(dat[,1])
dat
}
sample_cmi <- function(dat, x_ind, y_ind, c_ind){
dat <- dat[,c(x_ind, y_ind, c_ind)]
c_dist <- dist(dat[, 3:ncol(dat), drop=FALSE])
P <- linear_permutation(c_dist)
dat <- cbind(P%*%dat[, 1], dat[, 2:ncol(dat)])
dat
}
boot_mi <- function(dat, x_ind, y_ind, cond_MI=cond_MI_r){
dat <- sample_mi(dat, x_ind, y_ind)
cond_MI(dat, 1, 2)
}
boot_cmi <- function(dat, x_ind, y_ind, c_ind, cond_MI=cond_MI_r){
dat <- sample_cmi(dat, x_ind, y_ind, c_ind)
cond_MI(dat, 1, 2, 3:ncol(dat))
}
rbig_sim <- function(rbig_fit, gdat=NULL){
ldat <- rbig_fit$ldat
lR <- rbig_fit$lR
if(is.null(gdat)){
gdat <- tail(ldat, 1)[[1]]
}
for(n in rev(seq_along(ldat)[-1])){
gdat <- gdat %*% solve(lR[[n-1]])
for(d in ncol(gdat):1){
gdat[, d] <- pnorm(gdat[, d])
gdat[, d] <- gdat[, d] / max(gdat[, d])
# print(max(gdat[, d]))
# hist(gdat[, d])
gdat[, d] <- quantile(ldat[[n-1]][, d], gdat[, d])
}
}
gdat
}
nboot_cmi <- function(n,dat, x_ind, y_ind, c_ind=numeric(0), cond_MI=cond_MI_r){
pb <- txtProgressBar(min = 0, max = n, style = 3)
if(length(c_ind) == 0)
ans <- unlist(lapply(seq.int(n), function(i){setTxtProgressBar(pb, i); boot_mi(dat, x_ind, y_ind, cond_MI)}))
else{
rbig_fit <- MI_RBIG_2016(dat)
ans <- unlist(lapply(seq.int(n), function(i){
setTxtProgressBar(pb, i)
rbig_inv <- rbig_sim(rbig_fit, gdat=matrix(rnorm(length(c(dat))), ncol=ncol(dat), nrow(dat)))
boot_cmi(rbig_inv, x_ind, y_ind, c_ind, cond_MI)}))
}
close(pb)
ans
}
cmi_btest <- function(nboot ,dat, x_ind, y_ind, c_ind=numeric(0), cond_MI=cond_MI_r){
cmi <- cond_MI(dat, x_ind, y_ind, c_ind)
ncmi <- nboot_cmi(nboot, dat, x_ind, y_ind, c_ind, cond_MI)
df <- data.frame(stat=c(cmi, ncmi), type=rep(c("H1","H0"), c(1,nboot)))
plot(ggplot(data=df, aes(x=stat, fill=type, color=type)) + geom_histogram(aes(y=..density..),alpha=0.5, position="identity", bins=30)+ggtitle(paste("x=",x_ind[1], "y=", y_ind[1], " S=", paste(c_ind, collapse=",")))+theme(aspect.ratio=1/3))
p.value <- 1 - rank(c(cmi, ncmi))[1]/(length(ncmi) + 1)
print(p.value)
p.value
}
# code translated to R from Gary Doran et al. "A permutation-Based Kernel Conditional Independence Test
linear_permutation <- function(D){
# D <- as.matrix(dist(dat[1:3, 3:4]))
D <- as.matrix(D)
n <- nrow(D)
# Rescale Distances
D <- D / max(max(D))
# Objective Function
f <- c(t(D))
# Inequality contraint
# lb <- numeric(n^2)
# Equality constraints
Aeq <- matrix(0, nrow=2*n, ncol=n^2)
b <- matrix(1, nrow=2*n, ncol=1)
# Columns sum to 1
for(c in 0:n-1){
Aeq[c + 1, (c*n+1):((c+1)*n)] <- 1
}
# Rows sum to 1 (last row constraint not necessary
# it is implied by other constraints)
for(r in 1:(n-1)){
for(c in 1:n){
Aeq[r+n, r+(c-1)*n] <- 1
}
}
# Diagonal entries zero
for (z in 1:n){
Aeq[2*n, (z-1)*(n+1) + 1] <- 1
}
b[2*n, 1] <- 0
cdir <- paste(rep("=", 2*n))
ans <- lp (direction = "min", objective.in=f, const.mat=Aeq, const.dir=cdir, const.rhs=b, transpose.constraints = TRUE, all.int=TRUE, all.bin=TRUE)
ans <- matrix(ans$sol, ncol=n, byrow=FALSE) #%*% D
ans
}
KCIPT <- function(dat, xy_ind, c_ind=numeric(0), dist, B, b, M){
MMD <- numeric(B)
samples <- numeric(B)
inner_null <- matrix(numeric(B*b), nrow=B)
outer_null <- numeric(M)
dat <- as.matrix(dat)
dat <- dat[, c(xy_ind, c_ind)]
for( i in 1:B){
omega <- dat
idx <- sample.int(nrow(omega), round(nrow(omega)/2))
omega1 <- omega[idx, ]
omega2 <- omega[-idx, ]
P <- linear_permutation(dist(omega2[, 3:ncol(omega2)]))
if(i < B/2){
omega2 <- cbind(P%*%omega2[, 1], omega2[, 2:ncol(omega2)])
}else{
omega2 <- cbind(omega2[, 1], P%*%omega2[, 2], omega2[, 3:ncol(omega2)])
}
MMD[i] <- cramer.test_simple(omega1, omega2)
omega <- rbind(omega1, omega2)
for( j in 1:b){
idx <- sample.int(nrow(dat), round(nrow(dat)/2))
omega1 <- omega[idx, ]
omega2 <- omega[-idx, ]
inner_null[i, j] <- cramer.test_simple(omega1, omega2)
}
cat("*")
}
cat("\n")
statistics <- median(MMD)
for(k in 1:M){
for(i in 1:B){
r <- ceiling(runif(1) * b)
samples[i] <- inner_null[i, r]
}
outer_null[k] <- median(samples)
}
p.value <- 1 - rank(c(statistics, outer_null))[1]/(length(outer_null) + 1)
p.value
}
# from the cramer packages
cramer.test_simple <- function(x, y, kernel="phiCramer"){
.cramer.statistic<-function(daten,indexe,mm,nn,lookup) {
xind<-indexe[1:mm]
yind<-indexe[(mm+1):(mm+nn)]
mm*nn/(mm+nn)*(2*sum(lookup[xind,yind])/(mm*nn)-sum(lookup[xind,xind])/(mm^2)-sum(lookup[yind,yind])/(nn^2))
}
m<-nrow(x)
n<-nrow(y)
daten<-matrix(c(t(x),t(y)),ncol=ncol(x),byrow=TRUE)
lookup<-eval(call(kernel, as.matrix(Dist(daten))))
.cramer.statistic(daten,1:(m+n),m,n,lookup)
}
RBIG_kcipt <- function(dat, xy_ind, c_ind=numeric(0), dist, B, b, M){
MMD <- numeric(B)
samples <- numeric(B)
inner_null <- matrix(numeric(B*b), nrow=B)
outer_null <- numeric(M)
dat <- as.matrix(dat)
dat <- dat[, c(xy_ind, c_ind)]
for( i in 1:B){
omega <- dat
idx <- sample.int(nrow(omega), round(nrow(omega)/2))
omega1 <- omega[idx, ]
omega2 <- omega[-idx, ]
P <- linear_permutation(dist(omega2[, 3:ncol(omega2)]))
omega21 <- cbind(P%*%omega2[, 1], omega2[, 2:ncol(omega2)])
omega22 <- cbind(omega2[, 1], P%*%omega2[, 2], omega2[, 3:ncol(omega2)])
omega2 <- rbind(omega21, omega22)[sample.int(nrow(omega1)), ]
MMD[i] <- cond_MI(omega1, 1, 2, c_ind=3:ncol(omega1))
omega <- rbind(omega1, omega2)
for( j in 1:b){
idx <- sample.int(nrow(dat), round(nrow(dat)/2))
omega2 <- omega[-idx, ]
inner_null[i, j] <- cond_MI(omega2, 1, 2, c_ind=3:ncol(omega2))
}
cat("*")
}
cat("\n")
statistics <- median(MMD)
for(k in 1:M){
for(i in 1:B){
r <- ceiling(runif(1) * b)
samples[i] <- inner_null[i, r]
}
outer_null[k] <- median(samples)
}
p.value <- 1 - rank(c(statistics, outer_null))[1]/(length(outer_null) + 1)
p.value
}
# RBIG_kcipt(head(dat, 700), 1:2, 3, dist, 10, 20, 100)
|
936c7ec0bd57715d58a1d8275b788bab268f43db
|
f8853c17bd18fc7a9e625a98e1ffd20cee02ee63
|
/man/erhmm.Rd
|
ce6f541bc87331fc4bc9829373509718538c9c2e
|
[
"MIT"
] |
permissive
|
okamumu/mapfit
|
9005f612df3c79301dd7a72a55b7881dd2cf8445
|
77fc2ab0b450fafdb8ae2ace348f79322d43296b
|
refs/heads/main
| 2023-03-16T12:52:05.978437
| 2022-11-23T02:36:47
| 2022-11-23T02:36:47
| 495,679,672
| 2
| 0
|
NOASSERTION
| 2022-11-23T02:36:48
| 2022-05-24T05:19:18
|
C++
|
UTF-8
|
R
| false
| true
| 708
|
rd
|
erhmm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_erhmm.R
\name{erhmm}
\alias{erhmm}
\title{Create ERHMM}
\usage{
erhmm(
size,
shape,
alpha = rep(1/length(shape), length(shape)),
rate = rep(1, length(shape)),
P = matrix(1/length(shape), length(shape), length(shape))
)
}
\arguments{
\item{size}{An integer of the number of phases}
\item{shape}{A vector of shape parameters}
\item{alpha}{A vector of initial probability (alpha)}
\item{rate}{A vector of rate parameters}
\item{P}{A matrix of transition probabilities}
}
\value{
An instance of ERHMM
}
\description{
Create an instance of ERHMM
}
\note{
If shape is given, shape is used even though size is set.
}
|
4170495c3e13e9823f48ad6c3b77f81cafc2de52
|
eeaa4f12f6f4e031a16323b6bdda5408874874d4
|
/man/L1splines.Rd
|
10e8ac1f0749287d75741abf5cfe3dc5641059f8
|
[] |
no_license
|
helenecharlotte/L1splines
|
bef04e0dbe95f827298ddcf825e7d2126dc9326a
|
4a0b4cf4edfe32dc3c0e7980d2448caef6b5eb49
|
refs/heads/master
| 2021-01-20T17:54:02.255473
| 2017-11-20T10:48:10
| 2017-11-20T10:48:10
| 62,609,980
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 196
|
rd
|
L1splines.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/L1splines-package.R
\docType{package}
\name{L1splines}
\alias{L1splines}
\alias{L1splines-package}
\title{L1splines}
|
35bf499def292f3e3ff4e03df3d470796a8d72d4
|
b443cb3ec7263930f12ae4e97c01aea77c2f5c89
|
/R/08_regional_baseline_differences_MACRO.R
|
87cba2a1c2376b6ea007bd161b9fba11f3fd86d2
|
[] |
no_license
|
yoffeash/geo_spatial_copd
|
a90bf798efb02e90bc0fa242673e09f24d2430c3
|
c00b5b9451ded3a98d6faa91653bc11cc4fd63ff
|
refs/heads/master
| 2020-03-28T00:21:13.287811
| 2018-10-24T16:53:03
| 2018-10-24T16:53:03
| 147,401,747
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 898
|
r
|
08_regional_baseline_differences_MACRO.R
|
### differences in regions by baseline characteristics ###
pairwise <- list(c("Midwest","Northeast"), c("Northeast","South and West"), c("Midwest","South and West"))
## age
ggplot(data=copd_region, aes(x=region_2, y=age)) + geom_boxplot() + stat_compare_means() + stat_compare_means(comparisons = pairwise)
## gold stage
ggplot(data=copd_region, aes(x=region_2, y=goldclass)) + geom_boxplot() + stat_compare_means() + stat_compare_means(comparisons = pairwise)
## exacerbation rate
ggplot(data=copd_region, aes(x=region_2, y=rate_exacerb)) + geom_boxplot() + stat_compare_means() + stat_compare_means(comparisons = pairwise) + ylim(0,15)
## smoking status
smok_region_table <- table(copd_region$region_2,copd_region$nowsmk)
chisq.test(smok_region_table)
smok_region_table
smok_center_table <- table(copd_region$clinic_name,copd_region$nowsmk)
chisq.test(smok_center_table)
smok_center_table
|
d2afb5ea0e0362e4eb8a897fff0fa50145d85811
|
3ae034f636da3885d76ed09f03222520d557f8b9
|
/R/reader.R
|
733ca5e4b9b1671344709de0f08a06a2cd90681e
|
[] |
no_license
|
vlcvboyer/FITfileR
|
e32b03762ac55bfb6ac8e38aeb0f7ed6dadf549c
|
90f52e716022123a02ecd2c44bdb73967d3f467c
|
refs/heads/master
| 2023-07-09T05:02:09.914129
| 2021-08-11T08:11:07
| 2021-08-11T08:11:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,259
|
r
|
reader.R
|
#' Read a FIT file
#'
#' Reads a specified FIT file and returns an object of class \code{FitFile}
#'
#' @param fileName A character specifying the FIT file to be read.
#' @param dropUnknown Many FIT files contain data that is not defined in the FIT
#' file specification. This may be used by the device manufacturer for
#' debugging purposes, but is typically not useful to an end user. The default
#' value of this argument will exclude these fields from the returned data
#' structure. Setting a value of \code{FALSE} will retain them.
#' @param mergeMessages FIT files may contain similar 'messages' with varying
#' numbers of fields e.g. if a new sensor is added during an activity the
#' 'records' messages recorded after this will contain an extra data column.
#' The default value of this argument will merge all messages of the same type,
#' and insert \code{NA} to pad missing fields. Setting this to \code{FALSE}
#' will return a separate \code{data.frame} for each distinct message type.
#'
#' @return An object of class \code{[FitFile-class]}
#'
#' @examples
#' garmin_file <- system.file("extdata", "Activities", "garmin-edge530-ride.fit",
#' package = "FITfileR")
#' garmin <- readFitFile(garmin_file)
#'
#' @export
readFitFile <- function(fileName, dropUnknown = TRUE, mergeMessages = TRUE) {
tmp <- .readFile(fileName)
return(tmp)
}
#' @importFrom methods is new
.readFile <- function(fileName) {
con <- file(fileName, "rb")
on.exit(close(con))
file_header <- .readFileHeader(con)
messages <- list()
msgDefs <- list()
devMessages <- list()
count <- 1
msg_count <- 1
prev_header <- NULL
while(seek(con, where = NA) < (file_header$data_size + file_header$size)) {
record_header <- .readRecordHeader(con, prev_header)
if(isDefinition(record_header)) {
msgDefs[[ count ]] <- .readMessage_definition(con = con, message_header = record_header)
count <- count + 1
} else {
definition <- .matchDefinition(msgDefs, local_message_number = localMessageNumber(record_header))
## is this a developer data definition message?
if(globalMessageNumber(definition) == 206) {
tmp <- .readMessage_data(con = con, header = record_header, definition = definition)
idx <- which(tmp@definition@field_defs$field_def_num == 1)
dev_data_idx <- as.integer(tmp@fields[[ idx ]]) + 1
devMessages[[ dev_data_idx ]] <- tmp
} else {
messages[[ msg_count ]] <- .readMessage_data(con = con,
header = record_header,
definition = definition)
if(is( messages[[ msg_count ]], "FitDataMessageWithDevData")) {
dev_data_idx <- names(messages[[ msg_count ]]@dev_fields)
messages[[ msg_count ]]@dev_field_details <- .matchDevDefinition(devMessages,
dev_data_idx = as.integer(dev_data_idx) + 1)
}
msg_count <- msg_count + 1
}
}
prev_header <- record_header
}
fit <- new("FitFile", header = file_header, messages = messages)
return(fit)
}
|
8c1ef0f900c34846241c103958f4c5bd2b0408f2
|
a593d96a7f0912d8dca587d7fd54ad96764ca058
|
/R/ml_feature_idf.R
|
98bfceddb24f18909d1afd9232d84b45c3d67ba9
|
[
"Apache-2.0"
] |
permissive
|
sparklyr/sparklyr
|
98f3da2c0dae2a82768e321c9af4224355af8a15
|
501d5cac9c067c22ad7a9857e7411707f7ea64ba
|
refs/heads/main
| 2023-08-30T23:22:38.912488
| 2023-08-30T15:59:51
| 2023-08-30T15:59:51
| 59,305,491
| 257
| 68
|
Apache-2.0
| 2023-09-11T15:02:52
| 2016-05-20T15:28:53
|
R
|
UTF-8
|
R
| false
| false
| 2,388
|
r
|
ml_feature_idf.R
|
#' Feature Transformation -- IDF (Estimator)
#'
#' Compute the Inverse Document Frequency (IDF) given a collection of documents.
#'
#' @template roxlate-ml-feature-input-output-col
#' @template roxlate-ml-feature-transformer
#' @template roxlate-ml-feature-estimator-transformer
#' @param min_doc_freq The minimum number of documents in which a term should appear. Default: 0
#'
#' @export
ft_idf <- function(x, input_col = NULL, output_col = NULL,
min_doc_freq = 0, uid = random_string("idf_"), ...) {
check_dots_used()
UseMethod("ft_idf")
}
ml_idf <- ft_idf
#' @export
ft_idf.spark_connection <- function(x, input_col = NULL, output_col = NULL,
min_doc_freq = 0, uid = random_string("idf_"), ...) {
.args <- list(
input_col = input_col,
output_col = output_col,
min_doc_freq = min_doc_freq,
uid = uid
) %>%
c(rlang::dots_list(...)) %>%
validator_ml_idf()
estimator <- spark_pipeline_stage(
x, "org.apache.spark.ml.feature.IDF",
input_col = .args[["input_col"]], output_col = .args[["output_col"]], uid = .args[["uid"]]
) %>%
invoke("setMinDocFreq", .args[["min_doc_freq"]]) %>%
new_ml_idf()
estimator
}
#' @export
ft_idf.ml_pipeline <- function(x, input_col = NULL, output_col = NULL,
min_doc_freq = 0, uid = random_string("idf_"), ...) {
stage <- ft_idf.spark_connection(
x = spark_connection(x),
input_col = input_col,
output_col = output_col,
min_doc_freq = min_doc_freq,
uid = uid,
...
)
ml_add_stage(x, stage)
}
#' @export
ft_idf.tbl_spark <- function(x, input_col = NULL, output_col = NULL,
min_doc_freq = 0, uid = random_string("idf_"), ...) {
stage <- ft_idf.spark_connection(
x = spark_connection(x),
input_col = input_col,
output_col = output_col,
min_doc_freq = min_doc_freq,
uid = uid,
...
)
if (is_ml_transformer(stage)) {
ml_transform(stage, x)
} else {
ml_fit_and_transform(stage, x)
}
}
new_ml_idf <- function(jobj) {
new_ml_estimator(jobj, class = "ml_idf")
}
new_ml_idf_model <- function(jobj) {
new_ml_transformer(jobj, class = "ml_idf_model")
}
validator_ml_idf <- function(.args) {
.args <- validate_args_transformer(.args)
.args[["min_doc_freq"]] <- cast_scalar_integer(.args[["min_doc_freq"]])
.args
}
|
7c80e60e2ccf033a59f0cea18e5deebe600d18bd
|
8dd69bdc4e638dc9def63e026f6db32cc4b118b6
|
/man/ImmunoAssay-class.Rd
|
e6afea3f11c21d29ee126f3d1116fa67beff2dad
|
[] |
no_license
|
cran/rADA
|
cc3833046cb41a33de37a114f2f5ced83e6f573f
|
10b8e8e44f674f528b1d09ea3078103834f8e7db
|
refs/heads/master
| 2023-03-27T07:18:44.412472
| 2021-03-23T18:40:06
| 2021-03-23T18:40:06
| 350,936,809
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 946
|
rd
|
ImmunoAssay-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/immunogenicity_functions.R
\docType{class}
\name{ImmunoAssay-class}
\alias{ImmunoAssay-class}
\alias{ImmunoAssay}
\title{Define ImmunoAssay class}
\description{
This stores the data that is used for screening cut point analysis.
}
\section{Slots}{
\describe{
\item{\code{data}}{Imported data as is, used for CV analysis}
\item{\code{melted.data}}{Data used for most functions}
\item{\code{exp.name}}{Experiment name}
\item{\code{stats}}{List of statistics, results gathered from both coefficient of variation analysis as well as plot generation}
\item{\code{outlier.rm}}{Has any outlier analysis been performed on this dataset?}
\item{\code{outlier.rm.method}}{If outlier removal has been performed, what method was used?}
\item{\code{scp.table}}{Table of cut point information}
\item{\code{cv.table}}{Table derived from coefficient of variation analysis}
}}
|
b33a2acaa755acf4f6a12e341747ddd2905e3270
|
d56a43fe676f14b7a6ebca3fdbdf7ab65548a0f6
|
/inst/zztools/pkgdown.R
|
14fdabf0f685c07fe9e4d0a838f9f73cebdd5b96
|
[] |
no_license
|
jranke/officer
|
45742a768b13511135ba818403e3e7aa1c59706b
|
59d8ef5193108f93f3dbeb3be1b814ac2d66a09c
|
refs/heads/master
| 2023-01-03T03:47:33.741340
| 2020-10-23T20:58:28
| 2020-10-23T20:58:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 471
|
r
|
pkgdown.R
|
# unlink("vignettes/offcran/assets", recursive = TRUE, force = TRUE)
pkgdown::build_site()
file.copy("vignettes/offcran/assets", to = "docs/articles/offcran", overwrite = TRUE, recursive = TRUE)
browseURL("docs/articles/offcran/assets/docx/toc_and_captions.docx")
browseURL("docs/articles/offcran/assets/docx/body_add_demo.docx")
unlink("vignettes/offcran/assets", recursive = TRUE, force = TRUE)
unlink("vignettes/offcran/extract.png", recursive = TRUE, force = TRUE)
|
73d37255494a3392f917c1a954f6a6e09bc77123
|
332041cde99bc19f33ca63dca74d05b016b5de3c
|
/cachematrix.R
|
1858c3e242f9b6d585930f9c6cc8fb81a480931a
|
[] |
no_license
|
geoffsnowman/ProgrammingAssignment2
|
db6752cac0acd6ccc5b1ef5ce4801aa36bef3a02
|
00f8502081032731e35bc47d5d228c079b14a1fd
|
refs/heads/master
| 2021-01-15T09:46:55.923274
| 2016-08-15T03:40:05
| 2016-08-15T03:40:05
| 65,693,093
| 0
| 0
| null | 2016-08-15T00:37:07
| 2016-08-15T00:37:07
| null |
UTF-8
|
R
| false
| false
| 1,445
|
r
|
cachematrix.R
|
## These functions create a cache that allows the user to invert
## a matrix once and then return the results of the solve function
## as needed.
## This function creates the special cache list that stores the
## inverted matrix. The object returned by the matrix is a list of
## four functions: set, get, setInverse and getInverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInv <- function(solve) m <<- solve
getInv <- function() m
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## This function calls getInv to try to get the inverse.
## If the result of getInv is not null, then it returns
## that result.
## If the result of getInv is null, it solves the matrix,
## then caches the inverted matrix using setInv.
## Either way, it returns the inverse matrix to the caller.
cacheSolve <- function(x, ...) {
m <- x$getInv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
message("solving the matrix")
m <- solve(data, ...)
x$setInv(m)
m
}
## The following code will test the functions
r1 <- c(3,0,0)
r2 <- c(0,3,0)
r3 <- c(0,0,3)
m <- rbind(r1,r2,r3)
c <- makeCacheMatrix(m)
## First call should solve the matrix
cacheSolve(c)
## Second call should use the cache
cacheSolve(c)
## Third call should use the cache
cacheSolve(c)
|
7affa7ecfdb9c6a6207fc9cfb7c45521781d93e4
|
cf0dd17d275d592d60292002e102735b6456aa65
|
/man/affinityMatrix.Rd
|
8a859d01164d898ec9a1ed968eb0e1b67634af4d
|
[] |
no_license
|
cran/M2SMJF
|
d6c732f88872974061e2460b9dc286376308eacf
|
3d54f69f3524716ed2d7c767ab00c3efdababf4d
|
refs/heads/master
| 2023-01-22T02:29:26.738578
| 2020-11-23T07:40:06
| 2020-11-23T07:40:06
| 315,982,793
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 724
|
rd
|
affinityMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/affinityMatrix.R
\name{affinityMatrix}
\alias{affinityMatrix}
\title{To calculate the similarity matrix}
\usage{
affinityMatrix(Diff, K = 20, sigma = 0.5)
}
\arguments{
\item{Diff}{A diff matrix}
\item{K}{The number of neighbors in consideration}
\item{sigma}{A parameter to determine the scale}
}
\value{
W The similarity matrix
}
\description{
calculate the affinity matrix from the diff matrix with 20 neighbors
}
\examples{
data_list <- simu_data_gen()
Diff <- dist2eu(Standard_Normalization(data_list[[1]]),Standard_Normalization(data_list[[1]]))
simi <- affinityMatrix(Diff,20,0.5)
}
\author{
Xiaoyao Yin
}
|
d3dc60767da707db83bde0899e94c34ae9336bae
|
fa117c1f993a91208a70f2ff6090c642c44560e9
|
/run_analysis.R
|
da00a91b32c304b4251e2a6f50660049b0b5d687
|
[] |
no_license
|
fabiolarw/CleanData
|
5418a58222f38dd020d9e76390c338b80dbbe721
|
268dcc8ed034d17ca66ff61b0d95b42afbcc869a
|
refs/heads/main
| 2022-12-28T22:54:05.402839
| 2020-10-18T20:57:33
| 2020-10-18T20:57:33
| 305,185,430
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,536
|
r
|
run_analysis.R
|
##You should create one R script called run_analysis.R that does the following.
##1. Merges the training and the test sets to create one data set.
##2. Extracts only the measurements on the mean and standard deviation for each measurement.
##3. Uses descriptive activity names to name the activities in the data set
##4. Appropriately labels the data set with descriptive variable names.
##5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
##load libraries
library(dplyr)
library(reshape2)
##The files are downloaded and unzipped
if(!file.exists("runRaw")) { dir.create("runRaw")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile="./runRaw/run.zip", method="curl")
if (!file.exists("runData")) {
dir.create("runData")
unzip("./runRaw/run.zip", files = NULL, list = FALSE, overwrite = TRUE,
junkpaths = FALSE, exdir = "runData", unzip = "internal",
setTimes = FALSE)
}
dateDownloaded=date()
## 1. Merge phase
## read all tables
x_train<- read.table("./runData/UCI HAR Dataset/train/X_train.txt")
y_train<- read.table("./runData/UCI HAR Dataset/train/y_train.txt")
s_train<- read.table("./runData/UCI HAR Dataset/train/subject_train.txt")
x_test<- read.table("./runData/UCI HAR Dataset/test/X_test.txt")
y_test<- read.table("./runData/UCI HAR Dataset/test/y_test.txt")
s_test<- read.table("./runData/UCI HAR Dataset/test/subject_test.txt")
features<- read.table("./runData/UCI HAR Dataset/features.txt", col.names=c("featureNumber", "functions"))
activities <- read.table("./runData/UCI HAR Dataset/activity_labels.txt", col.names = c("activityNumber", "activityName"))
#merging data
merge_x<- rbind(x_train, x_test)
merge_y<- rbind(y_train, y_test)
merge_s<- rbind(s_train, s_test)
##2. Extract mean and standard deviation
selectedColumns <- grep("-(mean|std).*", as.character(features[,2]))
selectedColNames<-features[selectedColumns, 2]
merge_x <- merge_x[selectedColumns]
merge_all<- cbind(merge_s, merge_y, merge_x)
colnames(merge_all) <- c("Subject", "Activity", selectedColNames)
##3. Descriptive activity names
merge_all$Activity <-activities$activityName[merge_all$Activity]
##4. Descriptive variable names
names(merge_all) <-gsub("Acc", " acceleration", names(merge_all))
names(merge_all) <-gsub("Gyro", " angular acceleration", names(merge_all))
names(merge_all) <-gsub("Jerk", " jerk", names(merge_all))
names(merge_all) <-gsub("BodyBody", "Body", names(merge_all))
names(merge_all) <-gsub("Mag", " Magnitude", names(merge_all))
names(merge_all) <-gsub("std", "standard deviation", names(merge_all))
names(merge_all) <-gsub("^t", "(Time domain) ", names(merge_all))
names(merge_all) <-gsub("^f", "(Frequency domain) ", names(merge_all))
names(merge_all) <-gsub("X$", "X axis", names(merge_all))
names(merge_all) <-gsub("Y$", "Y axis", names(merge_all))
names(merge_all) <-gsub("Z$", "Z axis", names(merge_all))
##5. Final tidy set
meltedData <- melt(merge_all, id = c("Subject", "Activity"))
finalTidyData <- dcast(meltedData, Subject + Activity ~ variable, mean)
write.table(finalTidyData, "./finalTidyData.txt", row.names = FALSE, quote = FALSE)
##Create codebook
library(codebook)
cb <- codebook(finalTidyData, survey_repetition = "single", metadata_table = FALSE)
|
20966c171f4726b55af672d87a10a18f21d57647
|
d0340fb5a69f12a8395e3721525186919515223d
|
/1_Explore.R
|
a774cfa755bfd06c4e0320d01bcffb2c3fbc0c74
|
[] |
no_license
|
evanchildress/lee
|
444a7c78068f7e0ce47e0351ad56a3edb985f1e4
|
ce697dfd519ea5aa96bb2b184522340733ff35fe
|
refs/heads/master
| 2016-08-11T07:01:23.844719
| 2016-04-25T16:15:44
| 2016-04-25T16:15:44
| 46,361,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,397
|
r
|
1_Explore.R
|
# rm(list=ls())
library(R2jags)
library(lme4)
library(MCMCpack) # rwish function
library(nlme)
library(plyr)
library(lubridate)
library(lattice)
library(RgoogleMaps)
library(PBSmapping)
library(maptools)
library(rgdal)
library(maps)
library(GISTools)
library(mapplots)
library(plotrix)
library(seqinr)
library(rgdal)
## Read in data
dat <- read.csv('qrystbtpopest.csv')
head(dat)
dim(dat) # [1] 129238 22
dat$date<-as.Date(dat$SurveyDate,format="%m/%d/%Y")
head(dat)
dat$year <- year(dat$date)
dat$month <- month(dat$date)
head(dat)
# Rename some column headings
dat <- rename(dat, c('WaterSiteSurvey_ID'='surveyid','SurveySiteLatDD'='lat', 'SurveySiteLonDD'='long','SiteLength_m'='length',
'SiteWidth_m'='width', 'Comname'='species','GroupSize'='sizebin', 'EffortCatch'='catch',
'SiteGearDescription'='gear','SurveyPurposeDescription'='surveypurpose','WaterSectionID'='waterid') )
head(dat)
summary(dat)
# Number of unique surveys
length(unique(dat$surveyid)) # 5,127
# Number of unique stream sections
length(unique(dat$waterid)) # 1,775
# Number of unique stream sections - brook
length(unique(dat$waterid[dat$species=='Brook Trout'])) # 1,541
# Number of unique stream sections - brook
length(unique(dat$waterid[dat$species=='Brown Trout'])) # 1,267
# Number of Zippin 3-pass surveys
length(unique(dat$surveyid[dat$EstimateType=='Zippen 3 Pass Removel'])) # 531
# Number of Zippin 4-pass surveys
length(unique(dat$surveyid[dat$EstimateType=='Zippen 4 Pass Removel'])) # 16
# Number of Peterson M & R
length(unique(dat$surveyid[dat$EstimateType=='Petersen M & R'])) # 4,331
# Number of Jolly 2 Pass Removel
length(unique(dat$surveyid[dat$EstimateType=='Jolly 2 Pass Removel'])) # 249
# Range of years
range(dat$year) # 1975- 2015
###-------- Plot unique sites ---------- ###############
# Create id and year variable for below
dat$id.year <- as.factor(paste(dat$waterid, dat$year, sep='') )
summary(dat)
# Select out most recent date for each site id
# sort by comid and yearsamp (the "-" before yearsamp makes the most recent year first)
# This is necessary for the loop below.
dat2 <- dat[order(dat$waterid, -dat$year) , ]
head(dat2,50)
# Check sorting
# dat2[dat2$surveyid==10, ]
# Get unique ids (used in loop below)
ids <- unique(dat2$waterid)
length(ids)
# Create a container to hold the most recent data for each id in the loop below
# Create a new data frame called dat3 from dat2 (we use dat2 so dat3 has the same column names, etc),
# but only want to make dat3 with as many rows as there are unique comids.
dat3 <- dat2[1:length(ids),]
# This loop will go through and grab the first row for each id in dat2 and its most recent year,
# because this was sorted by comid and year, it will be grabbing the most recent year.
# We are simply overwriting the data contained in dat3 (our container) created above with
# the new data we actually want.
for(i in 1:length(ids) ){
dat3[i,] <- dat2[dat2$waterid == ids[i], ][1,]
}
head(dat3)
dim(dat3)
bb <- qbbox(lat = dat3[,"lat"], lon = dat3[,"long"])
zoom <- min(MaxZoom(range(dat3$lat), range(dat3$lon)))
MyMap <- GetMap.bbox(bb$lonR, bb$latR, destfile = "sites.png", maptype="terrain",zoom=zoom) # terrain, hybrid
png("All_sites.png", 900, 900, res=300)
PlotOnStaticMap(MyMap, dat3$lat, dat3$long, col="red",pch='*', add = F, cex=0.5)
dev.off()
###########################
|
decd48a2e583d3eb961fec6948fd521c766a7aaf
|
6373f402637e20d84125026edc8a9f2c857d2ce4
|
/restopicer-research/NetworkBasedTopicModel/code/demo/main_for_demo_linkcomm.R
|
a1caf8b8eca30aa6ebf4721cc112d66f1ec44b02
|
[
"MIT"
] |
permissive
|
JoshuaZe/restopicer
|
bddce0a435a686ca45ef4b67e7947221d70d2fd5
|
28d0833e7b950356ae6e29459991d87a53073a72
|
refs/heads/master
| 2021-01-17T10:10:19.437197
| 2016-04-22T03:13:35
| 2016-04-22T03:13:35
| 41,923,476
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,431
|
r
|
main_for_demo_linkcomm.R
|
rm(list = ls(envir = globalenv()))
setwd("F:/Desktop/restopicer/restopicer-research/NetworkBasedTopicModel")
#####
# required library
#####
library(linkcomm)
load(file = "rdata/demo.RData")
source(file = "code/functions.R")
##############
# Traditional Network Topic Model demo
# linkcomm.community
##############
# preprocessing
data <- unique(demoPapersKeywords)
bi_matrix <- table(data$item_ut,tolower(data$author_keyword))
# bipartite network max compart
#bi_MaxCompart <- runMaxCompartOfMatrix(bi_matrix)
bi_MaxCompart <- bi_matrix
# bipartite from incidence matrix
bi_g <- graph_from_incidence_matrix(bi_MaxCompart)
# projecting of two side
proj_g <- bipartite_projection(bi_g, types = NULL, multiplicity = TRUE,probe1 = NULL, which = "both", remove.type = TRUE)
# run linkcomm community
coterm_g <- proj_g[[2]]
coterm_g <- simplify(coterm_g)
coterm_edgelist <- as.data.frame(cbind(get.edgelist(coterm_g),get.edge.attribute(coterm_g,name = "weight")),stringsAsFactors = F)
coterm_edgelist$V3 <- as.numeric(coterm_edgelist$V3)
lc <- getLinkCommunities(coterm_edgelist,hcmethod="average",bipartite=F,dist = NULL)
community_member_list <- lapply(split(lc$nodeclusters$node,f = lc$nodeclusters$cluster),FUN = function(x){unlist(as.character(x))})
# get the term-term matrix
# coterm_g_matrix <- as_adjacency_matrix(coterm_g,type = "both",attr="weight")
# generate topic-term matrix through community
topic_term <- getTopicMemberBipartiteMatrix(community_member_list,weight = "binary")
# calculate similarity to get doc-topic matrix
doc_topic <- getDocTopicBipartiteMatrix(doc_member = bi_MaxCompart,topic_member = topic_term,method = "similarity.cos")
# document tagging test
taggingtest_doc_topic <- cbind(item_ut=rownames(doc_topic),as.data.frame(doc_topic))
taggingtest_doc_sc <- unique(demoPapersSubjectCategory[,c("item_ut","subject_category")])
taggingtest_data <- merge(taggingtest_doc_topic, taggingtest_doc_sc)
# plot report
doc.tagging.test(taggingtest_data = taggingtest_data,filename = "demo_linkcomm_keyword",path = "output/demo_linkcomm_keyword/document_topic",LeaveOneOut = FALSE)
# network of topic
plotBipartiteNetworkReport(filename = "demo_linkcomm_keyword",bi_graph = bi_g,community_member_list,showNamesInPlot = F,path = "output/demo_linkcomm_keyword/document_term")
plotTopicNetworkReport(filename = "demo_linkcomm_keyword",graph = coterm_g,community_member_list,showNamesInPlot = FALSE,plotCommunity = TRUE,plotOverallTopics = TRUE,path = "output/demo_linkcomm_keyword/topic_term")
# transpose = FALSE
plotBipartiteMatrixReport(filename = "demo_linkcomm_keyword",bi_matrix = bi_MaxCompart,path = "output/demo_linkcomm_keyword/document_term",showNamesInPlot = FALSE, weightType = "tfidf", plotRowWordCloud = TRUE, plotWordCloud = TRUE, plotRowComparison = TRUE, plotRowDist = TRUE, plotModules = FALSE)
plotBipartiteMatrixReport(filename = "demo_linkcomm_keyword",bi_matrix = topic_term,path = "output/demo_linkcomm_keyword/topic_term",showNamesInPlot = FALSE, weightType = "tf", plotRowWordCloud = TRUE, plotWordCloud = TRUE, plotRowComparison = TRUE, plotRowDist = TRUE, plotModules = FALSE)
plotBipartiteMatrixReport(filename = "demo_linkcomm_keyword",bi_matrix = doc_topic,path = "output/demo_linkcomm_keyword/document_topic",showNamesInPlot = FALSE, weightType = "tf", plotRowWordCloud = TRUE, plotWordCloud = TRUE, plotRowComparison = TRUE, plotRowDist = TRUE, plotModules = FALSE)
# transpose = TRUE
plotBipartiteMatrixReport(filename = "demo_linkcomm_keyword",bi_matrix = bi_MaxCompart,transpose = TRUE,path = "output/demo_linkcomm_keyword/document_term",showNamesInPlot = FALSE, weightType = "tfidf", plotRowWordCloud = FALSE, plotWordCloud = FALSE, plotRowComparison = FALSE, plotRowDist = TRUE, plotModules = FALSE)
plotBipartiteMatrixReport(filename = "demo_linkcomm_keyword",bi_matrix = topic_term,transpose = TRUE,path = "output/demo_linkcomm_keyword/topic_term",showNamesInPlot = FALSE, weightType = "tf", plotRowWordCloud = FALSE, plotWordCloud = FALSE, plotRowComparison = FALSE, plotRowDist = TRUE, plotModules = FALSE)
plotBipartiteMatrixReport(filename = "demo_linkcomm_keyword",bi_matrix = doc_topic,transpose = TRUE,path = "output/demo_linkcomm_keyword/document_topic",showNamesInPlot = FALSE, weightType = "tf", plotRowWordCloud = FALSE, plotWordCloud = FALSE, plotRowComparison = FALSE, plotRowDist = TRUE, plotModules = FALSE)
##############
# END TNTM-linkcomm demo
##############
|
929117ed2460bec275a0122528ed8f954d460e99
|
1562c46daad656a3757b6d54019c8a2d55460f65
|
/R/scatterplot.R
|
744ad2d41dfda47584745d6d4b24eaf8fd45297e
|
[] |
no_license
|
yaprakozturk/miRmoset
|
c97b1ae73f3abe4d96124bcfa49a272fb517b838
|
0ed4aab0dcb3a733d8bb6dab10d69d9fea11199a
|
refs/heads/main
| 2023-05-09T20:55:47.341869
| 2021-06-08T09:53:23
| 2021-06-08T09:53:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 385
|
r
|
scatterplot.R
|
#' Scatterplot
#'
#' Plot a chosen miRNA and mRNA
#' @param miRNA_name The miRNA
#' @param mRNA the mRNA
#' @param cut_off set at 0 and is the minimum expression it should have
#' @return A scatterplot with the tissueType on the right
#' @examples
#' temp1 <- F_to_C(50);
#' @export
F_to_C <- function(RNAscatterplot){
C_temp <- (F_temp - 32) * 5/9;
return(C_temp);
}
|
9f45f9ea50279277c298c6ad1b9906a7ca8fefd6
|
6daeb33a35fd354502e1c23e977355295eef6f6c
|
/R/aggregate.R
|
aeea09328a6ccecf0664152868aab2f09c8fb6b9
|
[] |
no_license
|
pik-piam/rmndt
|
2642f3b2703b148f37bd942b3b96ae7a8a0bbbbc
|
f7b0704d78f2058c690885726247c703d9677277
|
refs/heads/master
| 2023-07-10T13:14:27.388585
| 2023-07-10T09:32:59
| 2023-07-10T09:32:59
| 243,305,595
| 0
| 3
| null | 2023-07-10T09:33:00
| 2020-02-26T16:07:29
|
R
|
UTF-8
|
R
| false
| false
| 7,261
|
r
|
aggregate.R
|
#' Internal function to apply the weights and perform some checks.
#'
#' @param data a data.table.
#' @param mapping a mapping between the aggregated categories and their parts. *All* aggregated categories in `data` have to be part of the mapping.
#' @param weights table with weights for disaggregation, the name of the column with the aggregated categories has to be `manycol`. If columns (other than the column with the aggregated category) of the `weights` coincide with columns of the data, the respective columns are considered when joining.
#' @param fewcol name of the column containing aggregated categories. Default is "region".
#' @param manycol name of the column containing dis-aggregated categories. Default is "iso".
#' @param valuecol name of the column with the actual value to disaggregate, default is `value`.
#' @param datacols index columns that label categories which have to be treated seperately when dis-aggregating with a weight.
#' @param weightcol column with the weights for the dis-aggregation, default is `weight`.
#' @import data.table
apply_weights <- function(data, mapping, weights, fewcol, manycol, valuecol, datacols, weightcol){
diff <- setdiff(unique(mapping[[manycol]]), unique(weights[[manycol]]))
if(length(diff)){
warning("The weights are incomplete. ",
"Some dis-aggregated categories are found in the mapping, but not in the weights: ",
paste(diff, collapse=", "))
}
## we are only interested in the matching cols and the weight col
inboth <- intersect(colnames(data), colnames(weights))
weights <- weights[, c(inboth, weightcol), with=F]
## are there other dimensions to consider when applying the weights?
othercols <- setdiff(inboth, manycol)
## leftjoin data
data <- weights[data, on=c(manycol, othercols)]
## if there are NAs in the weights, the weights were incomplete along the additional dimension
if(any(is.na(data[[weightcol]]))){
warning("NAs are found when joining the weights. ",
"The weights are incomplete along the following dimension(s):",
paste(othercols, collapse=", "))
}
## apply weights
data[, (valuecol) := get(valuecol)*get(weightcol)/sum(get(weightcol)), by=c(fewcol, othercols, datacols)]
data[, (weightcol) := NULL]
}
#' Disaggregate data in a data.table object using a mapping.
#' If no weights are given, the value for the aggregated categories is used on the disaggregated ones.
#' If a weight is given, the values from the aggregated categories are distributed according to the weights.
#'
#' @param data a data.table.
#' @param mapping a mapping between the aggregated categories and their parts. *All* aggregated categories in `data` have to be part of the mapping.
#' @param fewcol name of the column containing aggregated categories. Default is "region".
#' @param manycol name of the column containing dis-aggregated categories. Default is "iso".
#' @param valuecol name of the column with the actual value to disaggregate, default is `value`.
#' @param datacols index columns that label categories which have to be treated seperately when dis-aggregating with a weight.
#' @param weights table with weights for disaggregation, the name of the column with the aggregated categories has to be `manycol`. If columns (other than the column with the aggregated category) of the `weights` coincide with columns of the data, the respective columns are considered when joining.
#' @param weightcol column with the weights for the dis-aggregation, default is `weight`.
#' @import data.table
#' @export
disaggregate_dt <- function(data, mapping,
fewcol="region",
manycol="iso",
valuecol="value",
datacols="data",
weights=NULL,
weightcol="weight"){
## Note that isocol in the data has to match the column name in the mapping
mapping <- mapping[, c(manycol, fewcol), with=F]
## require the mapping to be a superset of the regions in data
diff <- setdiff(unique(data[[fewcol]]), mapping[[fewcol]])
if(length(diff)){
stop("Mapping is incomplete. Missing aggregated categories: ", paste(diff, collapse=", "))
}
## disaggregation function
data <- mapping[data, on=c(fewcol), allow.cartesian=T]
if(!is.null(weights)){
data <- apply_weights(data, mapping, weights, fewcol, manycol, valuecol, datacols, weightcol)
}
data[, (fewcol) := NULL]
return(data)
}
#' Aggregate values in a data.table object using a mapping.
#' If no weight is given, the value for the aggregated categories is the sum of the parts.
#' Otherwise, the weight is used to calculate a weighted average accross the parts.
#'
#' @param data, a magpie object.
#' @param mapping, a mapping between the aggregated categories in the data and ISO3 countrycodes. *All* regions in `data` have to be part of the mapping.
#' @param fewcol, name of the column containing aggregated categories. Default is "region".
#' @param manycol, name of the column containing dis-aggregated categories. Default is "iso".
#' @param yearcol, name of the column containing time step info. Default is "year".
#' @param valuecol name of the column with the value to aggregate, default is `value`.
#' @param datacols index columns that label categories which have to be treated seperately when aggregating with a weight.
#' @param weights table with weights for a (weighted average) aggregation, the name of the column with the aggregated categories has to be `manycol`. If columns (other than the column with the aggregated category) of the `weights` coincide with columns of the data, the respective columns are considered when joining.
#' @param weightcol column with the weights for aggregation, default is `weight`.
#' @import data.table
#' @export
aggregate_dt <- function(data, mapping,
fewcol="region",
yearcol="year",
manycol="iso",
datacols="data",
valuecol="value",
weights=NULL,
weightcol="weight"){
## aggregation function, sums by default
## alternatively, do a weighted average
mapping <- mapping[, c(manycol, fewcol), with=F]
## left join: only regions in the mapping are mapped
data <- mapping[data, on=c(manycol)]
## require the mapping to be a superset of the countries in data
diff <- setdiff(unique(data[[manycol]]), mapping[[manycol]])
if(length(diff)){
warning("Mapping is incomplete. Data for the following ISO countries is omitted: ", paste(diff, collapse=", "))
data <- data[!is.na(get(fewcol))]
}
if(!is.null(weights)){
data <- apply_weights(data, mapping, weights, fewcol, manycol, valuecol, datacols, weightcol)
}
## sum
data[, (valuecol) := sum(get(valuecol), na.rm=T), by=c(yearcol, fewcol, datacols)]
# drop manycol
data[, (manycol) := NULL]
# drop duplicate rows
data <- unique(data)
return(data)
}
|
94a73cf09271aad53f89f58249f60686c356005c
|
7ad3ffcfb001733227962a2aeacc00657d30350f
|
/inst/resources/scripts/book/sweetgum.r
|
a9f5822cfaaa9b4eec00ab321cb863f580027aed
|
[] |
no_license
|
cran/FAwR
|
b70f10a5ada58a3da4a56464d86534eb1a59fbb0
|
9917873167c1a0109136e772024009c7e81131ab
|
refs/heads/master
| 2021-06-02T13:37:48.157146
| 2020-11-09T04:20:02
| 2020-11-09T04:20:02
| 17,679,114
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,678
|
r
|
sweetgum.r
|
### R code from vignette source 'sweetgum.rnw'
###################################################
### code chunk number 1: sweetgum.rnw:4-5
###################################################
options(width=67)
###################################################
### code chunk number 2: Read sweetgum
###################################################
raw.data <- scan("../../data/TX_SGUM2.DAT",
what = "", sep = "\n")
length(raw.data)
###################################################
### code chunk number 3: sweetgum.rnw:33-34
###################################################
raw.data <- raw.data[-c(1:26, 1101)]
###################################################
### code chunk number 4: sweetgum.rnw:43-44
###################################################
metadata <- grep("SWEETGUM", raw.data)
###################################################
### code chunk number 5: sweetgum.rnw:47-49 (eval = FALSE)
###################################################
## metadata <- grep("SWEETGUM", raw.data)
## cbind(metadata, raw.data[metadata])
###################################################
### code chunk number 6: sweetgum.rnw:56-58
###################################################
substr(raw.data[627], 1, 1) <- "4"
substr(raw.data[910], 1, 1) <- "5"
###################################################
### code chunk number 7: sweetgum.rnw:68-73
###################################################
for (i in 1:length(raw.data)) {
if(substr(raw.data[i], 57, 64) != "SWEETGUM")
raw.data[i] <- paste(substr(raw.data[i - 1], 1, 10),
raw.data[i], sep="")
}
###################################################
### code chunk number 8: sweetgum.rnw:80-85
###################################################
tree.data <- raw.data[metadata]
length(tree.data)
sections.data <- raw.data[-metadata]
length(sections.data)
###################################################
### code chunk number 9: sweetgum.rnw:90-103
###################################################
sweetgum <-
data.frame(plot = factor(substr(tree.data, 1, 5)),
tree = substr(tree.data, 6, 10),
dbh.in = substr(tree.data, 21, 26),
stump.ht.ft = substr(tree.data, 27, 32),
height.ft = substr(tree.data, 39, 44))
sections <-
data.frame(plot = factor(substr(sections.data, 1, 5)),
tree = substr(sections.data, 6, 10),
meas.ln.ft = substr(sections.data, 11, 16),
meas.dob.in = substr(sections.data, 20, 25),
meas.dib.in = substr(sections.data, 26, 31))
###################################################
### code chunk number 10: sweetgum.rnw:110-112
###################################################
sapply(sweetgum, class)
sapply(sections, class)
###################################################
### code chunk number 11: sweetgum.rnw:118-122
###################################################
for (i in 3:5) {
sweetgum[,i] <- as.numeric(as.character(sweetgum[,i]))
sections[,i] <- as.numeric(as.character(sections[,i]))
}
###################################################
### code chunk number 12: sweetgum.rnw:129-132
###################################################
all.meas <- merge(sweetgum, sections, all = TRUE)
dim(all.meas)
names(all.meas)
###################################################
### code chunk number 13: sweetgum.rnw:137-143
###################################################
all.meas$meas.ht.ft <- with(all.meas,
meas.ln.ft + stump.ht.ft)
all.meas$meas.ht.m <- all.meas$meas.ht.ft / 3.2808399
all.meas$meas.dob.cm <- all.meas$meas.dob.in * 2.54
sweetgum$height.m <- sweetgum$height.ft / 3.2808399
sweetgum$dbh.cm <- sweetgum$dbh.in * 2.54
###################################################
### code chunk number 14: sweetgum.rnw:163-176
###################################################
spline.vol.m3 <- function(hts.m,
ds.cm,
max.ht.m,
min.ht.m = 0) {
rs.cm <- c(ds.cm[order(hts.m)] / 2, 0)
hts.m <- c(hts.m[order(hts.m)], max.ht.m)
taper <- splinefun(hts.m, rs.cm)
volume <- integrate(f = function(x)
pi * (taper(pmax(x,0))/100)^2,
lower = min.ht.m,
upper = max.ht.m)$value
return(volume)
}
###################################################
### code chunk number 15: sweetgum.rnw:182-188
###################################################
sweetgum$vol.m3 <-
mapply(spline.vol.m3,
hts.m = split(all.meas$meas.ht.m, all.meas$tree),
ds.cm = split(all.meas$meas.dob.cm, all.meas$tree),
max.ht.m = as.list(sweetgum$height.m),
min.ht.m = 0.3)
###################################################
### code chunk number 16: fig-sgum-check
###################################################
par(las = 1)
plot(sweetgum$vol.m3,
(sweetgum$dbh.cm/200)^2 * pi * sweetgum$height.m / 2,
ylab = expression(paste("Second-degree paraboloid volume (",
m^3, ")", sep="")),
xlab = expression(paste("Integrated spline volume (",
m^3, ")", sep="")))
abline(0, 1, col="darkgrey")
###################################################
### code chunk number 17: sgum-check
###################################################
par(las = 1)
plot(sweetgum$vol.m3,
(sweetgum$dbh.cm/200)^2 * pi * sweetgum$height.m / 2,
ylab = expression(paste("Second-degree paraboloid volume (",
m^3, ")", sep="")),
xlab = expression(paste("Integrated spline volume (",
m^3, ")", sep="")))
abline(0, 1, col="darkgrey")
|
d565eb298809133aea195885e419fd548f85b89f
|
1ca927126130a42d1dcdb9565f9e24b0409c7ce9
|
/Tennis/ui.R
|
fdafa809862458927743b46a8477d41defbc6c92
|
[] |
no_license
|
NAGTennis/Projet_Tennis
|
b2b16cfafd1aae8b4957a32cd1bfd42b61607f0b
|
58771ff74144f21ace3b13ad1b30a210794dcceb
|
refs/heads/master
| 2020-04-06T23:38:49.599918
| 2019-03-29T05:57:56
| 2019-03-29T05:57:56
| 157,876,495
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,987
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
######Ne plas oublier d'importer les tables de données
setkey(Rank,Player_Id)
setkey(atp_players,Player_Id)
Joueurs_actif <- unique(Rank[DateRanking>=20180101&Numero<=100,.(Player_Id)][atp_players,.(nom=paste(Prenom, Nom)),nomatch=0])
Type_surfaces <- c(" ",unique(Tennis_table[tourney_date>'20170101',.(surface)]))
Nom_tournois <- c(" ",unique(Tennis_table[tourney_date>'20170101',.(tourney_name)]))
tags$head(tags$link(rel = "stylesheet",type = "text/css", href = "./style.css"))
shinyUI(
# navbarPage
navbarPage("Projet Tennis",
tabPanel("Présentation",
"Objectif : Donner le gagnant d'un match de tennis"
),
# premier onglet Data
tabPanel("Données",
navlistPanel(
widths = c(2, 10),
tabPanel("Table",
# titre avec css
h1("Jeu de données", style = "color : #0099ff;text-align:center"),
# table
dataTableOutput("table")),
tabPanel("Résumé",h1("Résumé des données", style = "color : #0099ff;text-align:center"),verbatimTextOutput("summary"))
)
),
# second onglet Visualisation
tabPanel("Application",
fluidRow(
# premier colonne
column(width = 3,
# wellPanel pour griser
wellPanel(
# Nom du joueur 1
selectizeInput(inputId = "nom1", label = "Nom du Joueur 1",choices = Joueurs_actif, options=list(create=FALSE)),
# Nom du joueur 2
selectizeInput(inputId = "nom2", label = "Nom du Joueur 2",choices = Joueurs_actif, options=list(create=FALSE)),
# Type de Surface
selectInput(inputId = "surface", label = "Surface",choices = Type_surfaces),
# Type de Tournois
selectInput(inputId = "tournois", label = "Tournois",choices = Nom_tournois),
# Type de surface dynamique
#htmlOutput("surface_select"),
# Type de tournois dynamique
#htmlOutput("tournois_select"),
#Date du match
dateInput(inputId = "date", label = "Date du match", value = Sys.Date(), format= "dd/mm/yyyy",language="French"),
# bouton
actionButton("go", "Valider")
)
)
,
mainPanel(
wellPanel(fluidRow(height='500px',
splitLayout(
textOutput("nom_j1")
,HTML("<div style='text-align:center; font-size: 18px'>contre</div>")
,textOutput("nom_j2")
)
,
splitLayout(align='middle'
,imageOutput("image_j1")
,imageOutput("image_surface_tournois")
,imageOutput("image_j2")
)
)
))
)
),
# onglet About
tabPanel("About",
"Projet réalisé par Nardjesse, Greg et Axel."
)
#CSS
,tags$style(type = 'text/css', '#nom_j1, #nom_j2{color: #0099ff;font-size: 18px;text-align:center;overflow: hidden}')
,tags$style(type = 'text/css', '.image_j1, .image_j2{height:auto}')
#,tags$style(type = 'text/css', '#image_surface_tournois {display: table-cell; vertical-align: middle; text-align:center; width: 33.333%; height: auto}')
,tags$style(type = 'text/css', '.shiny-split-layout>div {vertical-align: middle;}')
)
)
|
e9041a87d4b4400f162cdbbd2d12dba3bd12809f
|
f26781b86f2dea0394809d1951bad4550d82ba3c
|
/util/modeldev/read_runSummary_table.R
|
cbc2b93509854054a211ab2096230f0966d5f36e
|
[] |
no_license
|
fyang72/handbook
|
0ac0d616f033747347bce3fe72219223a2553ab8
|
89abb7b557b83d9b651821780b92410623aaa9a2
|
refs/heads/master
| 2022-09-30T10:36:14.303860
| 2019-12-16T19:32:13
| 2019-12-16T19:32:13
| 171,066,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,028
|
r
|
read_runSummary_table.R
|
batch_read_runSummary_table <- function(
runno_lst,
local_home = "./",
local_ctl_dir = paste0(local_home, "/ctl/"), # local directory that holds all ctl files
local_data_dir = paste0(local_home, "/data/"), # local directory that holds all data files
local_output_dir = paste0(local_home, "/output/") # local directory that holds all nonmem output files from server
) {
library(readr)
runno_df = cbind(runno_lst, str_split_fixed(runno_lst, pattern="_", n=2)) %>% as.data.frame()
colnames(runno_df) <- c("runno", "ctl", "dat")
runno_df = runno_df %>% mutate(runno=as.character(runno))
PARAMS = NULL # lapply(1:nrow(runno_df), function(i) {
for (i in 1:nrow(runno_df)) {
irunno = as.character(runno_df[i, "runno"])
#local_model_name = as.character(runno_df[i, "ctl"])
folder.loc <- paste0(local_output_dir, "ctl/", irunno)
file.lst <-list.files(path = folder.loc, all.files = FALSE,full.names = TRUE,
include.dirs = TRUE, recursive =TRUE)
file.lst <- file.lst[which(substr(basename(file.lst), 1, 3)=="fit")]
if (length(file.lst)>0) {
for (j in 1:length(file.lst)) {
print(paste0("read ", irunno))
file.name = file.lst[j]
base.name = tools::file_path_sans_ext(basename(file.name))
PARAMS[[paste0(irunno, "_", base.name)]] <- read_csv(file.name,
col_names=TRUE,
col_type=cols(.default=col_character())
) %>% as.data.frame()
}}
}
return(PARAMS)
}
#-----------------------------------------------
# generate_parmsTab
#-----------------------------------------------
generate_runSummary_table <- function(PARAMS) {
# how to subset a list
#runno.lst <- c("LN001", "LN002")
#tdata= lapply(runno, function(irunno) PARAMS[[irunno]])
#names(tdata) = runno
out = merge_all(PARAMS)
library(lubridate)
out$model_run_time = lubridate::hms(as.character(out$model_run_time)) #as.character(out$model_run_time)
#out = out %>% select(model:ofv, starts_with("TV"), starts_with("RUV"), starts_with("WGT_ON"), starts_with("IIV"), starts_with("SIGMA"), starts_with("se"))
out$ofv = as_numeric(out$ofv)
out$diff_ofv = as_numeric(out$ofv) - as_numeric(out$ofv[1])
#col.lst = out %>% select(TVCL:seSIGMA_1) %>% colnames()
#out[, col.lst] = u.signif(out[, col.lst], digits=3)
#out[which(out==" NA")] = "---"
out[, c("ofv", "diff_ofv")] = u.signif(out[, c("ofv", "diff_ofv")], digits=5)
#col.lst = c("ofv", "diff_ofv", col.lst, "condition_number")
#out = out[, col.lst]
#out = out %>% select(model:ofv, diff_ofv, starts_with("TV"), starts_with("IIV"), starts_with("SIGMA"))shrinkage
#out = out %>% select(ofv, diff_ofv, starts_with("TV"), starts_with("IIV"))# %>% select(ofv, diff_ofv, one_of(col.lst)))
out = out %>% select(-starts_with("se"), -starts_with("EI"), -starts_with("shrinkage"))
# out = out %>% select(ofv, diff_ofv, minimization_successful, covariance_step_successful,
# est_methods, model_run_time, condition_number,
# starts_with("TV"), starts_with("RUV"), EMAX,T50,HILL, starts_with("WGT_ON"), starts_with("IIV") )
out = t(out)
out = cbind(parms = rownames(out), out)
rownames(out) = NULL
out[is.na(out)]= "---"
out
return(out)
#
# runno.lst = names(parms)
# # obj
# obj = sapply(runno.lst, function(runno) parms[[runno]]$ofv)
#
# # thetas
# tmp = sapply(runno.lst, function(runno) parms[[runno]]$thetas )
# parms.lst = sapply(runno.lst, function(runno) names(parms[[runno]]$thetas )) %>%unlist() %>% unique()
# nMax <- max(sapply(tmp, length))
# thetas = (sapply(tmp, function(i) i[parms.lst]))
# rownames(thetas) = parms.lst
# thetas = round((thetas), digits=3)
# thetas[is.na(thetas)] = "---"
#
#
# # omega
# tmp = sapply(runno.lst, function(runno) {
# omega=parms[[runno]]$omega
# tt = omega[1:2, 1:2][lower.tri(omega[1:2, 1:2],diag=TRUE)]
# if (ncol(omega)==3) {tt = c(tt, omega[3:ncol(omega), 3:ncol(omega)])}
# if (ncol(omega)>3) {tt = c(tt, diag(omega[3:ncol(omega), 3:ncol(omega)]))}
#
#
# names(tt) = omega.name[1:length(tt)]
# return(tt)
# } )
# parms.lst = sapply(runno.lst, function(runno) names(tmp[[runno]] )) %>%unlist() %>% unique()
# nMax <- max(sapply(tmp, length))
# omega = (sapply(tmp, function(i) i[parms.lst]))
# rownames(omega) = parms.lst
# omega = round(omega, digits=3)
# omega[is.na(omega)] = "---"
#
# # finaly output of parameters
# thetas = thetas[setdiff(rownames(thetas),c("TVF1", "TVKA" )), ]
# rbind("OBJ"=round(obj, digits=3),
# "DIFF_OBJ"=round(obj-obj[1], digits=3), #"---",
# thetas[setdiff(rownames(thetas),c("RUVCV","RUVSD")), ], #"---",
# omega, # "---",
# thetas[c("RUVCV","RUVSD"), ]
# )
}
|
8af60fd360996a327eb4034519d87ec4e08e9174
|
f46c0880bf841ca246fda76cd60444b4d87f48ca
|
/final_figures.R
|
979e02d992b64e0f22e685572cd6d7d93fff5dbf
|
[] |
no_license
|
ajvsom/LCS_TAR
|
5ab18e639475084dacad2aabe16d4673bc8f2fb5
|
7ae113ec5e641cfe290eb054de1966c64441d515
|
refs/heads/main
| 2023-04-10T18:40:28.759439
| 2021-04-26T16:43:33
| 2021-04-26T16:43:33
| 361,819,703
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 48,609
|
r
|
final_figures.R
|
# TAR & LCS Representations -----------------------------------------------
#AR(1) Difference Equation
T = 10
a = seq(-1.5, 1.5, .1)
y_ts = matrix(NA, length(a), T)
y_ts[,1] <- 4
for (i in 1:length(a)){
for (j in 2:T){
y_ts[i,j] = a[i]*y_ts[i,(j-1)] + 4
}
}
y_ts = as.data.frame(y_ts)
names(y_ts) = c(paste0('t', seq(1,10)))
y_ts$a = seq(-1.5, 1.5, .1)
y_ts_long = reshape(y_ts,
direction = "long",
varying = list(names(y_ts[1:10])),
v.names = "Y",
idvar = c("cond"),
timevar = "Time")
y_ts_long$MODEL = 'TAR'
#UNIVARIATE CHANGE SCORE MODEL
T = 10
p = seq(-2.5,.5,.1)
x_ts = matrix(NA, length(p), T)
x_ts[,1] <- 4
for (i in 1:length(p)){
for (j in 2:T){
x_ts[i,j] = (1+p[i])*x_ts[i,(j-1)] + 4
}
}
x_ts = as.data.frame(x_ts)
names(x_ts) = c(paste0('t', seq(1,10)))
x_ts$p = seq(-2.5, .5, .1)
x_ts_long = reshape(x_ts,
direction = "long",
varying = list(names(y_ts[1:10])),
v.names = "Y",
idvar = c("cond"),
timevar = "Time")
x_ts_long$MODEL = 'LCS'
ts_long_merged = rbind(y_ts_long, x_ts_long)
# Figure 1: TAR Trajectories ----------------------------------------------
library(ggplot2)
library(gridExtra)
theme_update(plot.title = element_text(hjust = 0.5))
y_polarize = subset(y_ts_long, a >= -1.5 & a <= -1.1)
y_polarize$a = as.factor(y_polarize$a)
y_ntrend = subset(y_ts_long, a == -1)
y_ntrend$a = as.factor(y_ntrend$a)
y_nconv = subset(y_ts_long, a > -1 & a < 0)
y_nconv$a = as.factor(y_nconv$a)
levels(y_nconv$a)[levels(y_nconv$a)=="-0.0999999999999999"] <- "-0.1"
y_none = subset(y_ts_long, a == 0)
y_none$a = as.factor(y_none$a)
y_pconv = subset(y_ts_long, a > 0 & a < 1)
y_pconv$a = as.factor(y_pconv$a)
y_trend = subset(y_ts_long, a == 1)
y_trend$a = as.factor(y_trend$a)
y_exp = subset(y_ts_long, a >= 1.1 & a <= 1.5)
y_exp$a = as.factor(y_exp$a)
c1 = ggplot(data = y_polarize) +
aes(x = Time, y = Y, group = a) +
geom_line(aes(linetype=a)) +
#geom_point(aes(shape=a)) +
labs(title="Volatility",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10)) +theme(legend.key.size = unit(0.5, "cm"))
c2 = ggplot(data = y_ntrend) +
aes(x = Time, y = Y, group = a) +
geom_line(aes(linetype=a)) +
#geom_point(aes(shape=a)) +
labs(title="Periodic Change",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10)) +theme(legend.key.size = unit(0.5, "cm"))
c3 = ggplot(data = y_nconv) +
aes(x = Time, y = Y, group = a) +
geom_line(aes(linetype=a)) +
#geom_point(aes(shape=a)) +
labs(title="Oscillatory Convergence",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10)) +theme(legend.key.size = unit(0.5, "cm"))
c4 = ggplot(data = y_none) +
aes(x = Time, y = Y, group = a) +
geom_line(aes(linetype=a)) +
#geom_point(aes(shape=a)) +
labs(title="Stasis",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))+theme(legend.key.size = unit(0.5, "cm"))
c5 = ggplot(data = y_pconv) +
aes(x = Time, y = Y, group = a) +
geom_line(aes(linetype=a)) +
#geom_point(aes(shape=a)) +
labs(title="Smooth Convergence",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10)) +theme(legend.key.size = unit(0.5, "cm"))
c6 = ggplot(data = y_trend) +
aes(x = Time, y = Y, group = a) +
geom_line(aes(linetype=a)) +
#geom_point(aes(shape=a)) +
labs(title="Constant Growth",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10)) +theme(legend.key.size = unit(0.5, "cm"))
c7 = ggplot(data = y_exp) +
aes(x = Time, y = Y, group = a) +
geom_line(aes(linetype=a)) +
#geom_point(aes(shape=a)) +
labs(title="Explosive Growth", x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10)) +theme(legend.key.size = unit(0.5, "cm"))
grid.arrange(c1, c2, c3, c4, c5, c6, c7, nrow = 3)
# Figure 2: LCS Trajectories ----------------------------------------------
x_polarize = subset(x_ts_long, p >= -2.5 & p <= -2.1)
x_polarize$p = as.factor(x_polarize$p)
x_ntrend = subset(x_ts_long, p == -2)
x_ntrend$p = as.factor(x_ntrend$p)
x_nconv = subset(x_ts_long, p >= -1.9 & p < -1)
x_nconv$p = as.factor(x_nconv$p)
x_none = subset(x_ts_long, p == -1)
x_none$p = as.factor(x_none$p)
x_pconv = subset(x_ts_long, p >= -.9 & p < 0)
x_pconv$p = as.factor(x_pconv$p)
levels(x_pconv$p)[levels(x_pconv$p)=="-0.0999999999999996"] <- "-0.1"
x_trend = subset(x_ts_long, p == 0)
x_trend$p = as.factor(x_trend$p)
x_exp = subset(x_ts_long, p >= .1 & p <= .5)
x_exp$p = as.factor(x_exp$p)
c8 = ggplot(data = x_polarize) +
aes(x = Time, y = Y, group = p) +
geom_line(aes(linetype=p)) +
#geom_point(aes(shape=a)) +
labs(title="Volatility",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))+theme(legend.key.size = unit(0.5, "cm"))
c9 = ggplot(data = x_ntrend) +
aes(x = Time, y = Y, group = p) +
geom_line(aes(linetype=p)) +
#geom_point(aes(shape=a)) +
labs(title="Periodic Change",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))+theme(legend.key.size = unit(0.5, "cm"))
c10 = ggplot(data = x_nconv) +
aes(x = Time, y = Y, group = p) +
geom_line(aes(linetype=p)) +
#geom_point(aes(shape=a)) +
labs(title="Oscillatory Convergence",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))+theme(legend.key.size = unit(0.5, "cm"))
c11 = ggplot(data = x_none) +
aes(x = Time, y = Y, group = p) +
geom_line(aes(linetype=p)) +
#geom_point(aes(shape=a)) +
labs(title="Stasis",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))+theme(legend.key.size = unit(0.5, "cm"))
c12 = ggplot(data = x_pconv) +
aes(x = Time, y = Y, group = p) +
geom_line(aes(linetype=p)) +
#geom_point(aes(shape=a)) +
labs(title="Smooth Convergence",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))+theme(legend.key.size = unit(0.5, "cm"))
c13 = ggplot(data = x_trend) +
aes(x = Time, y = Y, group = p) +
geom_line(aes(linetype=p)) +
#geom_point(aes(shape=a)) +
labs(title="Constant Growth",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))+theme(legend.key.size = unit(0.5, "cm"))
c14 = ggplot(data = x_exp) +
aes(x = Time, y = Y, group = p) +
geom_line(aes(linetype=p)) +
#geom_point(aes(shape=a)) +
labs(title="Explosive Growth", x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10)) +theme(legend.key.size = unit(0.5, "cm"))
grid.arrange(c8, c9, c10, c11, c12, c13, c14, nrow = 3)
#Import data
library(readxl)
table_for_graphs <- read_excel("OneDrive - Michigan State University/Research/Current_Projects/LCSM_Sims/figures_tables/table_for_graphs.xlsx")
View(table_for_graphs)
df = table_for_graphs
#Load packages
library(ggplot2)
library(gridExtra)
theme_update(plot.title = element_text(hjust = 0.5))
# Figure 3: Bias ----------------------------------------------------------
# T = 5 ==================================================================
df$N = as.factor(df$N)
bias_t5 = ggplot(data = subset(df, T == 5)) +
aes(x = CHANGE, y = as.numeric(BIAS)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="T = 5",x="Beta", y = "Bias") +
scale_x_continuous(breaks=seq(-1.5, 1.5, .1)) +
scale_color_manual(values=c("red", "blue"))
## T = 10 ==================================================================
bias_t10 = ggplot(data = subset(df, T == 10)) +
aes(x = CHANGE, y = as.numeric(BIAS)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="T = 10",x="Beta", y = "Bias") +
scale_x_continuous(breaks=seq(-1.5, 1.5, .1)) +
scale_color_manual(values=c("red", "blue"))
## T = 30 ==================================================================
bias_t30 = ggplot(data = subset(df, T == 30)) +
aes(x = CHANGE, y = as.numeric(BIAS)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="T = 30",x="Beta", y = "Bias") +
scale_x_continuous(breaks=seq(-1.5, 1.5, .1)) +
scale_color_manual(values=c("red", "blue"))
grid.arrange(bias_t5, bias_t10, bias_t30)
# Predicted vs Actual -----------------------------------------------------
# Prediction Functions -----------------------------------------------------
project_tar = function(a, T){
y_ts = matrix(NA, length(a), T)
y_ts[,1] <- 4
for (i in 1:length(a)){
for (j in 2:T){
y_ts[i,j] = a[i]*y_ts[i,(j-1)] + 4
}
}
y_ts = as.data.frame(y_ts)
names(y_ts) = c(paste0('t', seq(1,T)))
y_ts_long = reshape(y_ts,
direction = "long",
varying = list(names(y_ts[1:T])),
v.names = "Y",
idvar = c("cond"), #note: cond = coefficient
timevar = "Time")
a = list(y_ts, y_ts_long)
return(a)
}
project_lcsm = function(p, T){
x_ts = matrix(NA, length(p), T)
x_ts[,1] <- 4
for (i in 1:length(p)){
for (j in 2:T){
x_ts[i,j] = (1+p[i])*x_ts[i,(j-1)] + 4
}
}
x_ts = as.data.frame(x_ts)
names(x_ts) = c(paste0('t', seq(1,T)))
x_ts_long = reshape(x_ts,
direction = "long",
varying = list(names(x_ts[1:T])),
v.names = "Y",
idvar = c("cond"), #note: cond = coefficient
timevar = "Time")
a = list(x_ts, x_ts_long)
return(a)
}
# T = 5 -------------------------------------------------------------------
# Generate Data -----------------------------------------------------------
# Actual
b = seq(-1.5, 1.5, .1)
temp_t5 = as.data.frame(project_tar(b, 5)[2])
COEF = seq(-1.5, 1.5, .1)
N = NA
actualt5 = cbind(temp_t5, COEF, N)
actualt5$N = 200
actualt5$MODEL = 'b'
# TAR
temp_tarT5 = project_tar(subset(df, T == 5 & MODEL == "TAR")$EST, 5)
temp_tarT5 = as.data.frame(temp_tarT5[2])
COEF = rep(seq(-1.5, 1.5, .1), 5*2) #T = 5 * levels(N) = 2
N = rep(c(rep(200,31), rep(500,31)), 5) #levels(b) = 31; T = 5
tarT5 = cbind(temp_tarT5, COEF, N)
tarT5$MODEL = 'TAR'
# LCS
temp_lcsT5 = project_lcsm(subset(df, T == 5 & MODEL == "LCS")$EST, 5)
temp_lcsT5 = as.data.frame(temp_lcsT5[2])
COEF = rep(seq(-1.5, 1.5, .1), 5*2) #T = 5 * levels(N) = 2
N = rep(c(rep(200,31), rep(500,31)), 5) #levels(b) = 31; T = 5
lcsT5 = cbind(temp_lcsT5, COEF, N)
lcsT5$MODEL = 'LCS'
predT5 = rbind(actualt5, tarT5, lcsT5)
predT5 = subset(predT5, select = -c(cond))
predT5$N = as.factor(predT5$N)
predT5$COEF = as.factor(predT5$COEF)
levels(predT5$COEF)[levels(predT5$COEF)=="-0.0999999999999999"] <- "-0.1"
# Graphing Functions --------------------------------------
extract_legend <- function(my_ggp) {
step1 <- ggplot_gtable(ggplot_build(my_ggp))
step2 <- which(sapply(step1$grobs, function(x) x$name) == "guide-box")
step3 <- step1$grobs[[step2]]
return(step3)
}
# Graph Nonstationary Cases for T = 5 --------------------------------------
# Negative Nonstationary Cases
n15t5 = ggplot(data = subset(predT5, COEF == -1.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n14t5 = ggplot(data = subset(predT5, COEF == -1.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n13t5 = ggplot(data = subset(predT5, COEF == -1.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n12t5 = ggplot(data = subset(predT5, COEF == -1.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n11t5 = ggplot(data = subset(predT5, COEF == -1.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n10t5 = ggplot(data = subset(predT5, COEF == -1.0)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.0",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
# Positive Nonstationary Cases
p10t5 = ggplot(data = subset(predT5, COEF == 1.0)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.0",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p11t5 = ggplot(data = subset(predT5, COEF == 1.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p12t5 = ggplot(data = subset(predT5, COEF == 1.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p13t5 = ggplot(data = subset(predT5, COEF == 1.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p14t5 = ggplot(data = subset(predT5, COEF == 1.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p15t5 = ggplot(data = subset(predT5, COEF == 1.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
grid.arrange(n15t5+theme(legend.position='hidden'),
n14t5+theme(legend.position='hidden'),
n13t5+theme(legend.position='hidden'),
n12t5+theme(legend.position='hidden'),
n11t5+theme(legend.position='hidden'),
n10t5+theme(legend.position='hidden'),
p10t5+theme(legend.position='hidden'),
p11t5+theme(legend.position='hidden'),
p12t5+theme(legend.position='hidden'),
p13t5+theme(legend.position='hidden'),
p14t5+theme(legend.position='hidden'),
p15t5+theme(legend.position='hidden'),
nrow = 3)
# Graph Stationary Cases for T = 5 --------------------------------------
# Negative Stationary Cases
n09t5 = ggplot(data = subset(predT5, COEF == -0.9)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.9",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n08t5 = ggplot(data = subset(predT5, COEF == -0.8)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.8",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n07t5 = ggplot(data = subset(predT5, COEF == -0.7)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.7",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n06t5 = ggplot(data = subset(predT5, COEF == -0.6)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.6",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n05t5 = ggplot(data = subset(predT5, COEF == -0.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n04t5 = ggplot(data = subset(predT5, COEF == -0.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n03t5 = ggplot(data = subset(predT5, COEF == -0.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n02t5 = ggplot(data = subset(predT5, COEF == -0.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
n01t5 = ggplot(data = subset(predT5, COEF == -0.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
# Positive Stationary Cases
p09t5 = ggplot(data = subset(predT5, COEF == 0.9)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.9",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p08t5 = ggplot(data = subset(predT5, COEF == 0.8)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.8",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p07t5 = ggplot(data = subset(predT5, COEF == 0.7)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.7",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p06t5 = ggplot(data = subset(predT5, COEF == 0.6)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.6",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p05t5 = ggplot(data = subset(predT5, COEF == 0.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p04t5 = ggplot(data = subset(predT5, COEF == 0.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p03t5 = ggplot(data = subset(predT5, COEF == 0.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p02t5 = ggplot(data = subset(predT5, COEF == 0.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p01t5 = ggplot(data = subset(predT5, COEF == 0.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
p0t5 = ggplot(data = subset(predT5, COEF == 0)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 5))
grid.arrange(n09t5+theme(legend.position='hidden'),
n08t5+theme(legend.position='hidden'),
n07t5+theme(legend.position='hidden'),
n06t5+theme(legend.position='hidden'),
n05t5+theme(legend.position='hidden'),
n04t5+theme(legend.position='hidden'),
n03t5+theme(legend.position='hidden'),
n02t5+theme(legend.position='hidden'),
n01t5+theme(legend.position='hidden'),
p0t5+theme(legend.position='hidden'),
p01t5+theme(legend.position='hidden'),
p02t5+theme(legend.position='hidden'),
p03t5+theme(legend.position='hidden'),
p04t5+theme(legend.position='hidden'),
p05t5+theme(legend.position='hidden'),
p06t5+theme(legend.position='hidden'),
p07t5+theme(legend.position='hidden'),
p08t5+theme(legend.position='hidden'),
p09t5+theme(legend.position='hidden'),
nrow = 4)
grid.arrange(n15t5+theme(legend.position='hidden'),
n14t5+theme(legend.position='hidden'),
n13t5+theme(legend.position='hidden'),
n12t5+theme(legend.position='hidden'),
n11t5+theme(legend.position='hidden'),
n10t5+theme(legend.position='hidden'),
n09t5+theme(legend.position='hidden'),
n08t5+theme(legend.position='hidden'),
n07t5+theme(legend.position='hidden'),
n06t5+theme(legend.position='hidden'),
n05t5+theme(legend.position='hidden'),
n04t5+theme(legend.position='hidden'),
n03t5+theme(legend.position='hidden'),
n02t5+theme(legend.position='hidden'),
n01t5+theme(legend.position='hidden'),
p0t5+theme(legend.position='hidden'),
p01t5+theme(legend.position='hidden'),
p02t5+theme(legend.position='hidden'),
p03t5+theme(legend.position='hidden'),
p04t5+theme(legend.position='hidden'),
p05t5+theme(legend.position='hidden'),
p06t5+theme(legend.position='hidden'),
p07t5+theme(legend.position='hidden'),
p08t5+theme(legend.position='hidden'),
p09t5+theme(legend.position='hidden'),
p10t5+theme(legend.position='hidden'),
p11t5+theme(legend.position='hidden'),
p12t5+theme(legend.position='hidden'),
p13t5+theme(legend.position='hidden'),
p14t5+theme(legend.position='hidden'),
p15t5+theme(legend.position='hidden'),
nrow = 4)
# T = 10 -------------------------------------------------------------------
# Generate Data -----------------------------------------------------------
# Actual
b = seq(-1.5, 1.5, .1)
temp_t10 = as.data.frame(project_tar(b, 10)[2])
COEF = seq(-1.5, 1.5, .1)
N = NA
actualt10 = cbind(temp_t10, COEF, N)
actualt10$N = 200
actualt10$MODEL = 'b'
# TAR
temp_tarT10 = project_tar(subset(df, T == 10 & MODEL == "TAR")$EST, 10)
temp_tarT10 = as.data.frame(temp_tarT10[2])
COEF = rep(seq(-1.5, 1.5, .1), 10*2) #T = 10 * levels(N) = 2
N = rep(c(rep(200,31), rep(500,31)), 10) #levels(b) = 31; T = 10
tarT10 = cbind(temp_tarT10, COEF, N)
tarT10$MODEL = 'TAR'
# LCS
temp_lcsT10 = project_lcsm(subset(df, T == 10 & MODEL == "LCS")$EST, 10)
temp_lcsT10 = as.data.frame(temp_lcsT10[2])
COEF = rep(seq(-1.5, 1.5, .1), 10*2) #T = 10 * levels(N) = 2
N = rep(c(rep(200,31), rep(500,31)), 10) #levels(b) = 31; T = 10
lcsT10 = cbind(temp_lcsT10, COEF, N)
lcsT10$MODEL = 'LCS'
predT10 = rbind(actualt10, tarT10, lcsT10)
predT10 = subset(predT10, select = -c(cond))
predT10$N = as.factor(predT10$N)
predT10$COEF = as.factor(predT10$COEF)
levels(predT10$COEF)[levels(predT10$COEF)=="-0.0999999999999999"] <- "-0.1"
# Graph Nonstationary Cases for T = 10 --------------------------------------
# Negative Nonstationary Cases
n15T10 = ggplot(data = subset(predT10, COEF == -1.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n14T10 = ggplot(data = subset(predT10, COEF == -1.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n13T10 = ggplot(data = subset(predT10, COEF == -1.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n12T10 = ggplot(data = subset(predT10, COEF == -1.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n11T10 = ggplot(data = subset(predT10, COEF == -1.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n10T10 = ggplot(data = subset(predT10, COEF == -1.0)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.0",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
# Positive Nonstationary Cases
p10T10 = ggplot(data = subset(predT10, COEF == 1.0)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.0",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p11T10 = ggplot(data = subset(predT10, COEF == 1.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p12T10 = ggplot(data = subset(predT10, COEF == 1.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p13T10 = ggplot(data = subset(predT10, COEF == 1.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p14T10 = ggplot(data = subset(predT10, COEF == 1.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p15T10 = ggplot(data = subset(predT10, COEF == 1.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
grid.arrange(n15T10+theme(legend.position='hidden'),
n14T10+theme(legend.position='hidden'),
n13T10+theme(legend.position='hidden'),
n12T10+theme(legend.position='hidden'),
n11T10+theme(legend.position='hidden'),
n10T10+theme(legend.position='hidden'),
p10T10+theme(legend.position='hidden'),
p11T10+theme(legend.position='hidden'),
p12T10+theme(legend.position='hidden'),
p13T10+theme(legend.position='hidden'),
p14T10+theme(legend.position='hidden'),
p15T10+theme(legend.position='hidden'),
nrow = 3)
# Graph Stationary Cases for T = 10 --------------------------------------
# Negative Stationary Cases
n09T10 = ggplot(data = subset(predT10, COEF == -0.9)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.9",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n08T10 = ggplot(data = subset(predT10, COEF == -0.8)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.8",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n07T10 = ggplot(data = subset(predT10, COEF == -0.7)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.7",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n06T10 = ggplot(data = subset(predT10, COEF == -0.6)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.6",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n05T10 = ggplot(data = subset(predT10, COEF == -0.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n04T10 = ggplot(data = subset(predT10, COEF == -0.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n03T10 = ggplot(data = subset(predT10, COEF == -0.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n02T10 = ggplot(data = subset(predT10, COEF == -0.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
n01T10 = ggplot(data = subset(predT10, COEF == -0.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
# Positive Stationary Cases
p09T10 = ggplot(data = subset(predT10, COEF == 0.9)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.9",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p08T10 = ggplot(data = subset(predT10, COEF == 0.8)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.8",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p07T10 = ggplot(data = subset(predT10, COEF == 0.7)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.7",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p06T10 = ggplot(data = subset(predT10, COEF == 0.6)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.6",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p05T10 = ggplot(data = subset(predT10, COEF == 0.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p04T10 = ggplot(data = subset(predT10, COEF == 0.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p03T10 = ggplot(data = subset(predT10, COEF == 0.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p02T10 = ggplot(data = subset(predT10, COEF == 0.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p01T10 = ggplot(data = subset(predT10, COEF == 0.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
p0T10 = ggplot(data = subset(predT10, COEF == 0)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 10))
grid.arrange(n09T10+theme(legend.position='hidden'),
n08T10+theme(legend.position='hidden'),
n07T10+theme(legend.position='hidden'),
n06T10+theme(legend.position='hidden'),
n05T10+theme(legend.position='hidden'),
n04T10+theme(legend.position='hidden'),
n03T10+theme(legend.position='hidden'),
n02T10+theme(legend.position='hidden'),
n01T10+theme(legend.position='hidden'),
p0T10+theme(legend.position='hidden'),
p01T10+theme(legend.position='hidden'),
p02T10+theme(legend.position='hidden'),
p03T10+theme(legend.position='hidden'),
p04T10+theme(legend.position='hidden'),
p05T10+theme(legend.position='hidden'),
p06T10+theme(legend.position='hidden'),
p07T10+theme(legend.position='hidden'),
p08T10+theme(legend.position='hidden'),
p09T10+theme(legend.position='hidden'),
nrow = 4)
grid.arrange(n15T10+theme(legend.position='hidden'),
n14T10+theme(legend.position='hidden'),
n13T10+theme(legend.position='hidden'),
n12T10+theme(legend.position='hidden'),
n11T10+theme(legend.position='hidden'),
n10T10+theme(legend.position='hidden'),
n09T10+theme(legend.position='hidden'),
n08T10+theme(legend.position='hidden'),
n07T10+theme(legend.position='hidden'),
n06T10+theme(legend.position='hidden'),
n05T10+theme(legend.position='hidden'),
n04T10+theme(legend.position='hidden'),
n03T10+theme(legend.position='hidden'),
n02T10+theme(legend.position='hidden'),
n01T10+theme(legend.position='hidden'),
p0T10+theme(legend.position='hidden'),
p01T10+theme(legend.position='hidden'),
p02T10+theme(legend.position='hidden'),
p03T10+theme(legend.position='hidden'),
p04T10+theme(legend.position='hidden'),
p05T10+theme(legend.position='hidden'),
p06T10+theme(legend.position='hidden'),
p07T10+theme(legend.position='hidden'),
p08T10+theme(legend.position='hidden'),
p09T10+theme(legend.position='hidden'),
p10T10+theme(legend.position='hidden'),
p11T10+theme(legend.position='hidden'),
p12T10+theme(legend.position='hidden'),
p13T10+theme(legend.position='hidden'),
p14T10+theme(legend.position='hidden'),
p15T10+theme(legend.position='hidden'),
nrow = 4)
# T = 30 -------------------------------------------------------------------
# Generate Data -----------------------------------------------------------
# Actual
b = seq(-1.5, 1.5, .1)
temp_T30 = as.data.frame(project_tar(b, 30)[2])
COEF = seq(-1.5, 1.5, .1)
N = NA
actualT30 = cbind(temp_T30, COEF, N)
actualT30$N = 200
actualT30$MODEL = 'b'
# TAR
temp_tarT30 = project_tar(subset(df, T == 30 & MODEL == "TAR")$EST, 30)
temp_tarT30 = as.data.frame(temp_tarT30[2])
COEF = rep(seq(-1.5, 1.5, .1), 30*2) #T = 30 * levels(N) = 2
N = rep(c(rep(200,31), rep(500,31)), 30) #levels(b) = 31; T = 30
tarT30 = cbind(temp_tarT30, COEF, N)
tarT30$MODEL = 'TAR'
# LCS
temp_lcsT30 = project_lcsm(subset(df, T == 30 & MODEL == "LCS")$EST, 30)
temp_lcsT30 = as.data.frame(temp_lcsT30[2])
COEF = rep(seq(-1.5, 1.5, .1), 30*2) #T = 30 * levels(N) = 2
N = rep(c(rep(200,31), rep(500,31)), 30) #levels(b) = 31; T = 30
lcsT30 = cbind(temp_lcsT30, COEF, N)
lcsT30$MODEL = 'LCS'
predT30 = rbind(actualT30, tarT30, lcsT30)
predT30 = subset(predT30, select = -c(cond))
predT30$N = as.factor(predT30$N)
predT30$COEF = as.factor(predT30$COEF)
levels(predT30$COEF)[levels(predT30$COEF)=="-0.0999999999999999"] <- "-0.1"
# Graph Nonstationary Cases for T = 30 --------------------------------------
# Negative Nonstationary Cases
n15T30 = ggplot(data = subset(predT30, COEF == -1.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n14T30 = ggplot(data = subset(predT30, COEF == -1.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n13T30 = ggplot(data = subset(predT30, COEF == -1.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n12T30 = ggplot(data = subset(predT30, COEF == -1.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n11T30 = ggplot(data = subset(predT30, COEF == -1.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n10T30 = ggplot(data = subset(predT30, COEF == -1.0)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -1.0",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
# Positive Nonstationary Cases
p10T30 = ggplot(data = subset(predT30, COEF == 1.0)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.0",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p11T30 = ggplot(data = subset(predT30, COEF == 1.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p12T30 = ggplot(data = subset(predT30, COEF == 1.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p13T30 = ggplot(data = subset(predT30, COEF == 1.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p14T30 = ggplot(data = subset(predT30, COEF == 1.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p15T30 = ggplot(data = subset(predT30, COEF == 1.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 1.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
grid.arrange(n15T30+theme(legend.position='hidden'),
n14T30+theme(legend.position='hidden'),
n13T30+theme(legend.position='hidden'),
n12T30+theme(legend.position='hidden'),
n11T30+theme(legend.position='hidden'),
n10T30+theme(legend.position='hidden'),
p10T30+theme(legend.position='hidden'),
p11T30+theme(legend.position='hidden'),
p12T30+theme(legend.position='hidden'),
p13T30+theme(legend.position='hidden'),
p14T30+theme(legend.position='hidden'),
p15T30+theme(legend.position='hidden'),
nrow = 3)
# Graph Stationary Cases for T = 30 --------------------------------------
# Negative Stationary Cases
n09T30 = ggplot(data = subset(predT30, COEF == -0.9)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.9",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n08T30 = ggplot(data = subset(predT30, COEF == -0.8)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.8",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n07T30 = ggplot(data = subset(predT30, COEF == -0.7)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.7",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n06T30 = ggplot(data = subset(predT30, COEF == -0.6)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.6",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n05T30 = ggplot(data = subset(predT30, COEF == -0.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n04T30 = ggplot(data = subset(predT30, COEF == -0.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n03T30 = ggplot(data = subset(predT30, COEF == -0.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n02T30 = ggplot(data = subset(predT30, COEF == -0.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
n01T30 = ggplot(data = subset(predT30, COEF == -0.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = -0.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
# Positive Stationary Cases
p09T30 = ggplot(data = subset(predT30, COEF == 0.9)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.9",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p08T30 = ggplot(data = subset(predT30, COEF == 0.8)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.8",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p07T30 = ggplot(data = subset(predT30, COEF == 0.7)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.7",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p06T30 = ggplot(data = subset(predT30, COEF == 0.6)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.6",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p05T30 = ggplot(data = subset(predT30, COEF == 0.5)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.5",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p04T30 = ggplot(data = subset(predT30, COEF == 0.4)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.4",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p03T30 = ggplot(data = subset(predT30, COEF == 0.3)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.3",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p02T30 = ggplot(data = subset(predT30, COEF == 0.2)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.2",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p01T30 = ggplot(data = subset(predT30, COEF == 0.1)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0.1",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
p0T30 = ggplot(data = subset(predT30, COEF == 0)) +
aes(x = as.numeric(Time), y = as.numeric(Y)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="b = 0",x="Time", y = "Y values") +
scale_x_continuous(breaks=seq(0, 30, 5))
grid.arrange(n09T30+theme(legend.position='hidden'),
n08T30+theme(legend.position='hidden'),
n07T30+theme(legend.position='hidden'),
n06T30+theme(legend.position='hidden'),
n05T30+theme(legend.position='hidden'),
n04T30+theme(legend.position='hidden'),
n03T30+theme(legend.position='hidden'),
n02T30+theme(legend.position='hidden'),
n01T30+theme(legend.position='hidden'),
p0T30+theme(legend.position='hidden'),
p01T30+theme(legend.position='hidden'),
p02T30+theme(legend.position='hidden'),
p03T30+theme(legend.position='hidden'),
p04T30+theme(legend.position='hidden'),
p05T30+theme(legend.position='hidden'),
p06T30+theme(legend.position='hidden'),
p07T30+theme(legend.position='hidden'),
p08T30+theme(legend.position='hidden'),
p09T30+theme(legend.position='hidden'),
nrow = 4)
grid.arrange(n15T30+theme(legend.position='hidden'),
n14T30+theme(legend.position='hidden'),
n13T30+theme(legend.position='hidden'),
n12T30+theme(legend.position='hidden'),
n11T30+theme(legend.position='hidden'),
n10T30+theme(legend.position='hidden'),
n09T30+theme(legend.position='hidden'),
n08T30+theme(legend.position='hidden'),
n07T30+theme(legend.position='hidden'),
n06T30+theme(legend.position='hidden'),
n05T30+theme(legend.position='hidden'),
n04T30+theme(legend.position='hidden'),
n03T30+theme(legend.position='hidden'),
n02T30+theme(legend.position='hidden'),
n01T30+theme(legend.position='hidden'),
p0T30+theme(legend.position='hidden'),
p01T30+theme(legend.position='hidden'),
p02T30+theme(legend.position='hidden'),
p03T30+theme(legend.position='hidden'),
p04T30+theme(legend.position='hidden'),
p05T30+theme(legend.position='hidden'),
p06T30+theme(legend.position='hidden'),
p07T30+theme(legend.position='hidden'),
p08T30+theme(legend.position='hidden'),
p09T30+theme(legend.position='hidden'),
p10T30+theme(legend.position='hidden'),
p11T30+theme(legend.position='hidden'),
p12T30+theme(legend.position='hidden'),
p13T30+theme(legend.position='hidden'),
p14T30+theme(legend.position='hidden'),
p15T30+theme(legend.position='hidden'),
nrow = 4)
# Figure 7: T1 Error ----------------------------------------------------------
# T = 5 ==================================================================
df$N = as.factor(df$N)
type1_t5 = ggplot(data = subset(df, T == 5)) +
aes(x = CHANGE, y = as.numeric(T1ERROR)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="T = 5",x="Beta", y = "Type 1 Error") +
scale_x_continuous(breaks=seq(-1.5, 1.5, .1)) +
scale_color_manual(values=c("red", "blue")) +
geom_hline(yintercept = .05)
## T = 10 ==================================================================
type1_t10 = ggplot(data = subset(df, T == 10)) +
aes(x = CHANGE, y = as.numeric(T1ERROR)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="T = 10",x="Beta", y = "Type 1 Error") +
scale_x_continuous(breaks=seq(-1.5, 1.5, .1)) +
scale_color_manual(values=c("red", "blue")) +
geom_hline(yintercept = .05)
## T = 30 ==================================================================
type1_t30 = ggplot(data = subset(df, T == 30)) +
aes(x = CHANGE, y = as.numeric(T1ERROR)) +
geom_line(aes(color = MODEL, linetype=N)) +
labs(title="T = 30",x="Beta", y = "Type 1 Error") +
scale_x_continuous(breaks=seq(-1.5, 1.5, .1)) +
scale_color_manual(values=c("red", "blue")) +
geom_hline(yintercept = .05)
grid.arrange(type1_t5, type1_t10, type1_t30)
|
898255feb2f4e61cfd1666e5fb8ad751e56776c9
|
cd442a4c4870194ab71caf58eace95a9124f04e7
|
/Lab_Code/R/Lib/Rgetopt/R/main.R
|
edebc3378c6cc8e6721791617db6d2ae6d809a87
|
[
"MIT"
] |
permissive
|
alexgraehl/TimeForScience
|
662041383cdd8ecb1ae606ca9e71983151ff6c7e
|
f383d397ac3ff8030ccf068cdfea26a8b8bc60c0
|
refs/heads/master
| 2023-01-24T09:11:15.725640
| 2023-01-14T01:24:07
| 2023-01-14T01:24:07
| 32,352,233
| 6
| 1
| null | 2021-04-09T01:14:09
| 2015-03-16T20:51:38
|
Perl
|
UTF-8
|
R
| false
| false
| 6,146
|
r
|
main.R
|
parseString <- function(string) string
parseInteger <- function(string) {
i <- suppressWarnings(as.integer(string))
if (!is.na(i) && i == string) return(as.integer(string))
else return(NULL)
}
parseFloat <- function(string) {
if (!is.na(suppressWarnings(as.double(string)))) return(as.double(string))
else return(NULL)
}
parseIntegerList <- function(string) {
l <- try(eval(parse(text=paste("c(",string,")"))), silent=TRUE)
if ("try-error" %in% class(l)) return(NULL)
if (!is.numeric(l)) return(NULL)
if (any(as.integer(l) != l, na.rm=T)) return(NULL)
return(as.integer(l))
}
parseFloatList <- function(string) {
l <- try(eval(parse(text=paste("c(",string,")"))), silent=TRUE)
if ("try-error" %in% class(l)) return(NULL)
if (is.numeric(l)) return(l)
else return(NULL)
}
parseStringList <- function(string) {
return(unlist(strsplit(string, ",")))
}
parseStringListSpace <- function(string) {
return(unlist(strsplit(string, " ")))
}
parseReadableFile <- function(string) {
if (string == '-') return(file("stdin"))
f <- try(file(string, open="r"))
if ("try-error" %in% class(f)) return(NULL)
return(f)
}
parseWriteableFile <- function(string) {
if (string == '-') return(stdout())
f <- try(file(string, open="w"))
if ("try-error" %in% class(f)) return(NULL)
return(f)
}
defaultArgValueMap <- function(...) {
extra <- list(...)
m <- list(s=list(parse=parseString,desc="<string>"),
i=list(parse=parseInteger, desc="<integer>"),
f=list(parse=parseFloat, desc="<float>"),
li=list(parse=parseIntegerList, desc="<integer list>"),
lf=list(parse=parseFloatList, desc="<float list>"),
ls=list(parse=parseStringList, desc="<string list>"),
lss=list(parse=parseStringListSpace,
desc="<space delimited string list>"),
rfile=list(parse=parseReadableFile,
desc="<readable file or pipe>"),
wfile=list(parse=parseWriteableFile,
desc="<writable file or pipe>"))
if (length(extra) > 0) {
if (is.null(names(extra)) || any(is.na(extra))
|| any(nchar(names(extra)) == 0)) {
stop("names not set for extra argument values")
}
m[names(extra)] <- extra
}
for (a in names(extra)) {
stopifnot(all(names(extra[[a]]) == c("parse", "desc")))
stopifnot(is.function(extra[[a]]$parse))
stopifnot(is.character(extra[[a]]$desc))
stopifnot(length(extra[[a]]$desc) == 1)
}
return(m)
}
Rgetopt <- function(...,
argspec=c(...),
argv=RgetArgvEnvironment()[-1],
argMap=parseArgMap(argspec),
onerror=function(x) usage(x,argspec=argspec,argMap=argMap),
argValMap=defaultArgValueMap(),
defaults) {
description <- argspec[1]
options <- vector(length(argMap$description), mode="list")
names(options) <- names(argMap$description)
if (!missing(defaults)) options[names(defaults)] <- defaults
i <- 1
while (i <= length(argv)) {
if (argv[i] %in% c('--help')) usage('', argspec=argspec, argMap=argMap)
if (argv[i] == '--') {
# stop parsing arguments
i <- i + 1
break
}
flag <- sub("^--?", "", argv[i])
if (argv[i] == '-' || flag == argv[i]) {
# encountered a non-argument, time to stop parsing
break
}
flag <- argMap$map[flag]
if (is.null(flag) || is.na(flag)) {
onerror(paste("Unknown flag:", argv[i]))
}
if (argMap$value[flag] != "") {
i <- i + 1
if (i > length(argv)) {
onerror(paste("Need an argument for option", argv[i-1]))
}
valtype <- argValMap[[match.arg(argMap$value[flag], names(argValMap))]]
val <- valtype$parse(argv[i])
if (is.null(val)) {
onerror(paste("Couldn't parse a", valtype$desc, "from", argv[i]))
}
options[[flag]] <- val
} else {
options[[flag]] <- TRUE
}
i <- i + 1
}
options$argv <- if (i > 1) argv[-(1:(i-1))] else argv
return(options)
}
parseArgMap <- function(argspec) {
# return a list with two elements
# map - a vector that's a mapping from alias to primary argument name
# value - a vector that's a mapping from primary argument to
# aliases - a list map from primary to alias names
# description - the description of the argument
# usage - the first entry in argspec, a general description of the command
u <- argspec[1]
argspec <- argspec[-1]
d <- sub("[^ ]*[ ]*", "", argspec)
spec <- sub(" .*$", "", argspec)
value <- sub("[^=]*=?", "", spec)
aliases <- strsplit(sub("=.*", "", spec), "\\|")
primary <- sapply(aliases, "[", 1)
names(d) <- primary
names(value) <- primary
names(aliases) <- primary
map <- vector("character")
for (i in seq(from=1, length.out=length(aliases))) {
map[aliases[[i]]] <- primary[i]
}
return(list(map=map, value=value, aliases=aliases, description=d, usage=u))
}
usage <- function(reason, argspec, argMap=parseArgMap(argspec),
argValMap=defaultArgValueMap(), finish=q()) {
if (!is.null(reason) && !is.na(reason) && nchar(reason) > 0) {
cat(reason, "\n", sep='')
}
if (!missing(argspec) && !is.null(argMap)) {
cat(argMap$usage, "\n", sep='')
for(a in names(argMap$aliases)) {
aliases <- argMap$aliases[[a]]
prefix <- c('-', '--')[(nchar(aliases) > 1) + 1]
v <- argMap$value[a]
if (v != "") {
v <- argValMap[[v]]$desc
}
v <- if (is.null(v)) "" else paste("", v)
d <- argMap$description[a]
cat("\n ", paste(prefix, aliases, sep='', collapse=", "), v, "\n", sep='')
if (nchar(d) > 0) {cat(" ", d, "\n", sep='')}
}
}
finish()
}
RgetArgvEnvironment <- function() {
argc <- as.integer(Sys.getenv("RGETOPT_ARGC"))
if(is.na(argc) || argc < 1)
stop("Invalid getopt setup: RGETOPT_ARGC not there")
return(Sys.getenv(paste("RGETOPT_ARGV", 1:argc, sep="_")))
}
stdoutIsTTY <- function() {
return(Sys.getenv("RGETOPT_ISATTY") == "1")
}
deferredStdOutFile <- function() {
return(Sys.getenv("RGETOPT_STDOUTONEXIT"))
}
|
33bf98789cdce32be43fb287c13e330ab34562da
|
22c7f49d2537292a849a8dfe513d48c23629fcef
|
/LibBi-code/to_table.R
|
a4d6740b8f4e03dd794a6356ebd2c3a03715c684
|
[] |
no_license
|
kjartako/TMHMC
|
3d059cc214fc48c104742ca6bde05ab4dad706bf
|
95da66b129d20bf4e2c6fda23ee7866ca9353821
|
refs/heads/master
| 2021-06-26T23:48:50.522592
| 2021-03-17T16:26:29
| 2021-03-17T16:26:29
| 226,729,484
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 715
|
r
|
to_table.R
|
library(coda)
load("Computations_new")
means <- matrix(0.0,3,8)
sds <- means
ESSs <- means
ESSspert <- means
for( i in 1:8){
t <- summary(llist[[i]])
means[,i] <- t$statistics[,"Mean"]
sds[,i] <- t$statistics[,"SD"]
ESSs[,i] <- effectiveSize(llist[[i]])
ESSspert[,i] <- ESSs[,i]/timing[i]
}
print("time")
print(min(timing))
print(mean(timing))
print("mean, SD")
print(rowMeans(means))
print(rowMeans(sds))
print("min ESS")
print(round(min(ESSs[1,])))
print(round(min(ESSs[2,])))
print(round(min(ESSs[3,])))
print("min ESS/time")
print(min(ESSspert[1,]))
print(min(ESSspert[2,]))
print(min(ESSspert[3,]))
print("mean ESS")
print(round(rowMeans(ESSs)))
print("min ESS/time")
print(rowMeans(ESSspert))
|
26ae3b9dae6601f392474f0fff1793b5bb5bb0aa
|
4c9f29cb8e4bb24d7d2042ca006fd7df010fc9f0
|
/man/bootbctype2.Rd
|
978b1b998729df05e04f9b0039b22bb95898bc79
|
[] |
no_license
|
cran/bccp
|
595b03fc436305b92ff1286ade0131de2895f795
|
197450e9e6b3733571dce84e78fa627b2d99d628
|
refs/heads/master
| 2023-04-23T23:14:41.410214
| 2021-05-18T03:10:05
| 2021-05-18T03:10:05
| 307,947,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,291
|
rd
|
bootbctype2.Rd
|
\name{bootbctype2}
\alias{bootbctype2}
\title{Computing the bias corrected maximum likelihood estimator under progressive type-I interval censoring scheme using the Bootstrap resampling}
\description{Computes the bias corrected maximum likelihood estimator under progressive type-I interval censoring scheme using the Bootstrap resampling. It works by obtaining the empirical distribution of the MLE using bootstrap approach and then constructing the percentile confidence intervals (PCI) suggested by DiCiccio and Tibshirani (1987).
}
\usage{bootbctype2(plan, param, mle, cdf, pdf, lb = 0, ub = Inf, nboot = 200, coverage = 0.95)}
\arguments{
\item{plan}{Censoring plan for progressive type-II censoring scheme. It must be given as a \code{data.frame} that includes number of failed items \code{X}, and vector of removed items \code{R}.}
\item{param}{Vector of the of the family parameter's names.}
\item{mle}{Vector of the maximum likelihood estimators.}
\item{cdf}{Expression of the cumulative distribution function.}
\item{pdf}{Expression for the probability density function.}
\item{lb}{Lower bound of the family support. That is zero by default.}
\item{ub}{Upper bound of the family's support. That is \code{Inf} by default.}
\item{nboot}{Number of Bootstrap resampling.}
\item{coverage}{Confidence or coverage level for constructing percentile confidence intervals. That is 0.95 by default.}
}
\details{For some families of distributions whose support is the positive semi-axis, i.e., \eqn{x>0}, the cumulative distribution function (cdf) may not be differentiable. In this case, the lower bound of the support of random variable, i.e., \code{lb} that is zero by default, must be chosen some positive small value to ensure the differentiability of the cdf.}
\value{A list of the outputs including a matrix that represents the variance-covariance matrix of the uncorrected MLE, a matrix that represents the variance-covariance matrix of the corrected MLE, the lower \code{LPCI}, and upped \code{UPCI}, bounds of \code{95\%} percentile confidence interval for \code{param}, the ML estimator, bias value, and bias-corrected estimator. Finally, the goodness-of-fit measures consists of Anderson-Darling (\code{AD}), Cramer-von Misses (\code{CVM}), and Kolmogorov-Smirnov (\code{KS}) statistics.}
\references{
T. J. DiCiccio and R. Tibshirani 1987. Bootstrap confidence intervals and bootstrap approximations. \emph{Journal of the American Statistical Association}, 82, 163-170.
A. J. Lemonte, F. Cribari-Neto, and K. L. P. Vasconcellos 2007. Improved statistical inference for the two-parameter Birnbaum-Saunders distribution.
\emph{Computational Statistics and Data Analysis}, 51, 4656-4681.
}
\author{Mahdi Teimouri}
\examples{
n <- 20
R <- c(9, rep(0, 10) )
param <- c("alpha","beta")
mle <- c(0.80, 12)
cdf <- quote( 1-exp( -(x/beta)^alpha ) )
pdf <- quote( alpha/beta*(x/beta)^(alpha-1)*exp( -(x/beta)^alpha ) )
lb <- 0
ub <- Inf
nboot <- 200
coverage <- 0.95
plan <- rtype2(n = n, R = R, param = param, mle = mle, cdf = cdf, lb = lb, ub = ub)
bootbctype2(plan = plan, param = param, mle = mle, cdf = cdf, pdf = pdf, lb = lb, ub = ub,
nboot = nboot, coverage = coverage)
}
|
bc6cd08f013ffb277627d90938aed559d5cb3bd9
|
4a00886d627412c19bfa4c6e664dc44740a3d675
|
/man/IRB.Rd
|
b3c47f98e151463a3e5907f641454a86069b6e3f
|
[] |
no_license
|
mireia-bioinfo/plotRegulome
|
a668abe92445594bbdba54014b08388cfea7378d
|
21a46e971c4f249dd84073faa437350a8b17d290
|
refs/heads/master
| 2021-06-25T12:34:50.322389
| 2020-12-30T16:30:54
| 2020-12-30T16:30:54
| 175,781,508
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 331
|
rd
|
IRB.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IRB.R
\docType{data}
\name{IRB}
\alias{IRB}
\title{IRB}
\format{List containing each type of dataset with the available dataset names.}
\usage{
IRB
}
\description{
Dataset containing all available dataset names to use for plotting.
}
\keyword{datasets}
|
e9bddc2f5eba62b0aee1585bbd405863abf53da4
|
c96347a1f9e940bb3b071178190cf6c2dd39bafe
|
/plot4.R
|
6e7ad784ac866059ebe2fa1d75251c4ec8d4e23d
|
[] |
no_license
|
alexcastilio/ExData_Plotting1
|
e8730007da72394891328aede1daef716c4d0185
|
21e6cea6fcd31b453613f4b2443bd0cb34a57c7c
|
refs/heads/master
| 2021-01-22T12:56:55.252473
| 2015-05-10T21:55:23
| 2015-05-10T21:55:23
| 35,383,460
| 0
| 0
| null | 2015-05-10T18:49:25
| 2015-05-10T18:49:25
| null |
UTF-8
|
R
| false
| false
| 1,504
|
r
|
plot4.R
|
plot1 <- function(){
#read data from file
data<-read.table("household_power_consumption.txt",sep = ";",header = TRUE,na.strings = "?",
colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric", "numeric","numeric"))
#filter by data
data<-data[data$Date=="1/2/2007"|data$Date=="2/2/2007",]
#format Date column
data$Date<-strptime(as.Date(data$Date[],format = "%d/%m/%Y"),format = "%Y-%m-%d")
#open graphics device
png("plot4.png",width = 480, height = 480)
#Graphs positions
par(mfrow = c(2,2))
#plot1
hist(data$Global_active_power,main = "",xlab = "Global Active Power (kilowatts)",col = "red",ylim = c(0,1200))
#plot2
plot(as.POSIXct(paste(data$Date,data$Time)),data$Voltage,ylab = "Voltage",xlab = "datetime",type="l")
#plot3
plot(as.POSIXct(paste(data$Date,data$Time)),data$Sub_metering_1,ylab = "Energy sub metering",xlab = "",type = "n")
points(as.POSIXct(paste(data$Date,data$Time)),data$Sub_metering_1,type = "l")
points(as.POSIXct(paste(data$Date,data$Time)),data$Sub_metering_2,col="red",type = "l")
points(as.POSIXct(paste(data$Date,data$Time)),data$Sub_metering_3,col="blue",type = "l")
legend("topright",legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col = c("black","red","blue"),lwd=1)
#plot4
plot(as.POSIXct(paste(data$Date,data$Time)),data$Global_reactive_power,ylab = "Global_reactive_power",xlab = "datetime",type="l")
#close PNG
dev.off()
}
|
5ba35a202d7bff71eceaa4e188ba0c2396d55976
|
1b6ee52e76965af92946adb8dcab35b366c1135a
|
/circuits/adder.R
|
e90c2e89f9a2f4f4393982b9f649b7a7d4e23eea
|
[] |
no_license
|
SalonikResch/QuantumNoiseProfiling
|
985e269f194fbf09c8a74f98ea7aeb7608e242d9
|
5e670bc57177ac9125d465d7571afd7bdbb10712
|
refs/heads/main
| 2023-05-26T16:36:07.182968
| 2021-06-03T02:08:45
| 2021-06-03T02:08:45
| 373,354,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,489
|
r
|
adder.R
|
Toffoli <- function(a,b,c){
gates <- list()
gates <- c(gates,list(list('H',c,'')))
gates <- c(gates,list(list('CX',c(b,c),'')))
gates <- c(gates,list(list("T'",c,'')))
gates <- c(gates,list(list('CX',c(a,c),'')))
gates <- c(gates,list(list('T',c,'')))
gates <- c(gates,list(list('CX',c(b,c),'')))
gates <- c(gates,list(list("T'",c,'')))
gates <- c(gates,list(list('CX',c(a,c),'')))
gates <- c(gates,list(list('T',b,'')))
gates <- c(gates,list(list('T',c,'')))
gates <- c(gates,list(list('CX',c(a,b),'')))
gates <- c(gates,list(list('H',c,'')))
gates <- c(gates,list(list('T',a,'')))
gates <- c(gates,list(list("T'",b,'')))
gates <- c(gates,list(list('CX',c(a,b),'')))
gates
}
FullAdd <- function(cin,a,b,cout){
gates <- list()
gates <- c(gates,Toffoli(a=a,b=b,c=cout))
gates <- c(gates,list(list('CX',c(a,b),'')))
gates <- c(gates,Toffoli(a=cin,b=b,c=cout))
gates <- c(gates,list(list('CX',c(cin,b),'')))
gates
}
adder_ckt <- function(n,schedule=FALSE){
nQubits <- n #
n <- (n-1)/3
#circuit <- list()
gates <- list()
for(j in 1:n){
idx <- 3*(j-1)
gates <- c(gates,FullAdd(cin=idx,a=idx+1,b=idx+2,cout=idx+3))
}
#If just want the schedule (for graphing purposes)
if(schedule)
return(schedule(nQubits=nQubits,gates=gates))
#Normally, get schedule and make a circuit
return(schedule2circuit(nQubits=nQubits,schedule(nQubits=nQubits,gates=gates)))
}
|
73206657a5682a869e56b8093eccb9b4ae501b7a
|
1d028fb3473f1bcbb3e0c899f79dd435b8c275a6
|
/tests/testthat.R
|
914dad626468a1ed235104380adda2ef58cd5944
|
[] |
no_license
|
cran/samplingbook
|
a2257b7c259249eaf0c8cfeae1aee0a37659eb6f
|
1253c518ff04c41c537b0100bebaadd94b96a0ae
|
refs/heads/master
| 2021-07-11T12:20:27.502856
| 2021-04-02T20:40:03
| 2021-04-02T20:40:03
| 17,699,456
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 72
|
r
|
testthat.R
|
library(testthat)
library(samplingbook)
test_check("samplingbook")
|
befd59763145c698f3881511c7e5859c1d16ec58
|
0abf4159b861a4e19ed0941fb85c55746ea47ac0
|
/R/importZikaData.R
|
2cee956d44f3d14d776f0dad90bbd9496e340732
|
[] |
no_license
|
smorsink1/ncov2019
|
a31b9953cdb10241986041e74d8671f4a6797760
|
e309e12e371fa3dcd73412bc894d9db13efacbc6
|
refs/heads/master
| 2021-04-15T01:55:06.487031
| 2020-03-26T18:03:15
| 2020-03-26T18:03:15
| 249,284,980
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,766
|
r
|
importZikaData.R
|
#' Downloads Zika virus data
#'
#' Imports the Kaggle Zika virus data from a public Github repository.
#'
#' @return Output is a dataframe with rows for date-location-case_type pairs
#'
#' @importFrom readr read_csv cols col_character
#' @importFrom magrittr %>%
#'
#' @examples
#' scrapeZikaData()
#'
#' @export
#'
scrapeZikaData <- function() {
link <- "https://raw.githubusercontent.com/mcolon21/ncov2019data/master/cdc_zika.csv"
data <- tryCatch(readr::read_csv(link, col_types = readr::cols(value = readr::col_character(),
report_date = readr::col_character())),
error = function(e) stop ("Data no longer found at url"))
# parsing issues:
## * in value in Brazil entries (rows 2415, 2784, 5193)
### 2415 is: 125*5 (should be 125), 2784 is: 149*5 (should be 149), 5193 is: 5* (should be 5)
## underscores in Puerto Rico date formats (rows 104408, 104409, ..., )
### mostly it is 2016_03_30 format, some of it is 2016_04-06 format
removeAfterChar <- function(string, char) {
# given a string and a character,
# removes the everything from the first appearance of specified character onwards
string %>%
strsplit(char, fixed = TRUE) %>%
`[[`(1) %>%
`[`(1)
}
data$value[2415] <- removeAfterChar(data$value[2415], "*")
data$value[2784] <- removeAfterChar(data$value[2784], "*")
data$value[5193] <- removeAfterChar(data$value[5193], "*")
data$value <- as.integer(data$value)
report_date_dashed <- gsub(pattern = "_", replacement = "-", x = data$report_date)
data$report_date <- as.Date(report_date_dashed)
return (data)
}
#' Reformat Zika Data
#'
#' Imports data from public github repo on Zika cases by date and location,
#' then reformats the data into date/location/values type combinations
#'
#' @return Output is a dataframe with columns for province, region, date, value, and value_type
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr select rename filter
#'
#' @examples
#' reformatZikaData()
#'
#' @export
#'
reformatZikaData <- function() {
data <- scrapeZikaData()
data$disease <- "zika"
# province and location
data$region <- data$location %>%
strsplit(split = "-") %>%
sapply(FUN = `[`, 1)
data$province <- data$location %>%
strsplit(split = "-") %>%
sapply(FUN = `[`, 2)
data_tidy <- data %>%
# Brazil subregions (which have cumulative stats) are coded as "region"
# since they simply accumulate data that is already present, these rows are removed
dplyr::filter(!(region %in% c("Centro", "Nordeste", "Norte", "Sudeste", "Sul"))) %>%
# drop unnecessary columns
dplyr::select(-data_field_code, -time_period, -time_period_type, -unit,
-location, -location_type) %>%
# renaming and reordering to match consistent format
dplyr::rename("value_type" = "data_field", "date" = "report_date") %>%
# dropping non-zika reports
dplyr::filter(!grepl("microcephaly", value_type)) %>%
dplyr::select(disease, province, region, date, value, value_type)
return (data_tidy)
}
#' Clean Zika Data
#'
#' Imports data from public github repo on Zika cases by date and location,
#' reformats the data into date/location/values type combinations, and cleans
#' the values_type column to give proxies for confirmed cases for each row
#'
#' @return Output is a dataframe with columns for province, region, date, value, and value_type
#'
#' @importFrom dplyr filter group_by summarize mutate bind_rows recode
#' @importFrom tidyr pivot_wider
#' @importFrom magrittr %>%
#'
#' @examples
#' cleanZikaData()
#'
#' @export
#'
cleanZikaData <- function() {
data <- reformatZikaData()
data_split <- split(data, data$region)
# Argentina
data_split[["Argentina"]] <- data_split[["Argentina"]] %>%
dplyr::filter(value_type %in% c("cumulative_confirmed_imported_cases",
"cumulative_confirmed_local_cases")) %>%
dplyr::group_by(disease, province, region, date) %>%
dplyr::summarize(value = sum(value, na.rm = TRUE)) %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases")
# Brazil
data_split[["Brazil"]] <- data_split[["Brazil"]] %>%
dplyr::filter(value_type == "zika_reported") %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases") %>%
dplyr::filter(!is.na(province)) # NA rows are cumulative rows
# Colombia (significant problems with reporting)
data_split[["Colombia"]] <- data_split[["Colombia"]] %>%
tidyr::pivot_wider(names_from = value_type, values_from = value, values_fn = list(value = sum)) %>%
dplyr::group_by(disease, province, region, date) %>%
dplyr::summarize("value" = sum(zika_confirmed_laboratory, na.rm = TRUE) + sum(zika_confirmed_clinic, na.rm = TRUE)) %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases")
# Dominican_Republic
data_split[["Dominican_Republic"]] <- data_split[["Dominican_Republic"]] %>%
dplyr::filter(grepl("zika", value_type)) %>%
dplyr::filter(value_type == "zika_confirmed_pcr_cumulative") %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases") %>%
## the rows with NA for province are totals across DR, numbers check out, so
dplyr::filter(!is.na(province))
# Ecuador
data_split[["Ecuador"]] <- data_split[["Ecuador"]] %>%
dplyr::filter(value_type == "total_zika_confirmed_autochthonous") %>%
dplyr::group_by(disease, province, region, date) %>%
dplyr::summarize(value = sum(value)) %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases")
# El_Salvador
data_split[["El_Salvador"]] <- data_split[["El_Salvador"]] %>%
dplyr::filter(value_type %in% c("cumulative_suspected_total", "cumulative_confirmed")) %>%
dplyr::mutate(value_type = dplyr::recode(value_type,
"cumulative_suspected_total" = "cumulative_suspected_cases",
"cumulative_confirmed" = "cumulative_confirmed_cases")) %>%
## the rows with NA for province are totals across region, numbers check out, so
dplyr::filter(!is.na(province))
# Guatemala
data_split[["Guatemala"]] <- data_split[["Guatemala"]] %>%
dplyr::filter(value_type == "total_zika_confirmed_cumulative") %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases") %>%
## the rows with NA for province are totals across region, numbers check out, so
dplyr::filter(!is.na(province))
# Haiti
data_split[["Haiti"]] <- data_split[["Haiti"]] %>%
dplyr::filter(value_type == "total_zika_new_suspected_cumulative") %>%
dplyr::mutate(value_type = "cumulative_suspected_cases") %>%
dplyr::filter(!is.na(province)) # NA row is a cumulative row
# Mexico
data_split[["Mexico"]] <- data_split[["Mexico"]] %>%
dplyr::filter(value_type == "weekly_zika_confirmed") %>%
dplyr::group_by(disease, province, region) %>%
dplyr::mutate(value = cumsum(value)) %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases")
# Nicaragua
data_split[["Nicaragua"]] <- data_split[["Nicaragua"]] %>%
dplyr::filter(value_type == "total_zika_confirmed_cumulative") %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases") %>%
## most data has NA for province (assume it's country-wide)
dplyr::mutate(province = "Nicaragua")
# Panama
data_split[["Panama"]] <- data_split[["Panama"]] %>%
dplyr::filter(grepl("Zika_confirmed_laboratory", value_type)) %>%
dplyr::group_by(disease, province, region, date) %>%
dplyr::summarize(value = sum(value)) %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases")
# Puerto_Rico
data_split[["Puerto_Rico"]] <- data_split[["Puerto_Rico"]] %>%
dplyr::filter(value_type == "zika_confirmed_cumulative_2015-2016") %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases") %>%
## all data has NA for province (assume it's country-wide)
dplyr::mutate(province = "Puerto_Rico")
# United_States
data_split[["United_States"]] <- data_split[["United_States"]] %>%
dplyr::group_by(disease, province, region, date) %>%
dplyr::summarize(value = sum(value)) %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases")
# United_States_Virgin_Islands
data_split[["United_States_Virgin_Islands"]] <- data_split[["United_States_Virgin_Islands"]] %>%
dplyr::filter(value_type == "zika_reported") %>%
dplyr::mutate(value_type = "cumulative_confirmed_cases") %>%
dplyr::filter(!is.na(province)) # NA row is cumulative
return (dplyr::bind_rows(data_split))
}
#' Import Zika Data
#'
#' Imports data from public github repo on Zika cases by date and location,
#' reformats, cleans, and merges with population and latitude-longitude data
#'
#' @param from_web defaults to FALSE: whether to import from the web or from the package
#'
#' @return Output is a dataframe with columns for disease (zika), province (location specific),
#' region (location general), value, value_type, pop_2016, lat (latitude), long (longitude)
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr left_join select rename
#'
#' @examples
#' importZikaData() # from_web defaults to FALSE
#'
#' @export
#'
importZikaData <- function(from_web = FALSE) {
if (!from_web) {
data("zika_data", envir = environment())
return (zika_data)
}
pop_map <- buildPopulationMap() %>%
dplyr::select(zika_name, pop_2016)
coord_map <- buildCoordinateMap() %>%
dplyr::select(zika_name, latitude, longitude) %>%
dplyr::rename("lat" = "latitude", "long" = "longitude")
zika_data <- cleanZikaData() %>%
dplyr::left_join(pop_map, by = c("region" = "zika_name")) %>%
dplyr::left_join(coord_map, by = c("region" = "zika_name"))
return (zika_data)
}
|
e1330a056fbcfbfc6ed5e71f48bea3bdf39b4f25
|
8e82a1e639f05beed4b76893c80e91b6453879fa
|
/R/Proteomics.R
|
266a09c2d6d775589b7a03323ae6b74f543c024a
|
[
"Apache-2.0"
] |
permissive
|
dlroxe/EIF-analysis
|
52f7cba51f36b0f6527f50fb671b96da27a9095a
|
6189c6ecc9368c188c7532fae402a0cfe4cdaa03
|
refs/heads/master
| 2020-07-30T11:24:01.302589
| 2020-01-12T07:56:13
| 2020-01-12T07:56:13
| 210,213,138
| 0
| 0
|
NOASSERTION
| 2020-01-11T19:51:55
| 2019-09-22T20:56:37
|
R
|
UTF-8
|
R
| false
| false
| 3,065
|
r
|
Proteomics.R
|
BiocManager::install("RforProteomics")
library("ggplot2") ## Convenient and nice plotting
library("mzR")
library("RColorBrewer") ## Color palettes
library("RforProteomics")
library("reshape2") ## Flexibly reshape data
library("rpx")
###########################
## Importing experiments ##
###########################
# MSnbase is able to import raw MS data stored in XML-based formats, mzXML,
# mzData and mzML
file <- dir(system.file(package = "MSnbase", dir = "extdata"),
full.names = TRUE, pattern = "mzXML$")
rawdata <- readMSData(file, msLevel = 2, verbose = FALSE)
library("MSnbase")
itraqdata
head(fData(itraqdata))
#####################
## Spectra objects ##
#####################
# The raw data is composed of the 55 MS spectra. The spectra are named individually (X1, X10, X11, X12, X13, X14, …) and stored in a environment
spectra(itraqdata)
sp <- itraqdata[["X1"]]
sp
peaksCount(sp)
head(peaksCount(itraqdata))
rtime(sp)
head(rtime(itraqdata))
###################
## Reporter ions ##
###################
# ReporterIons instances are required to quantify reporter peaks in MSnExp experiments
iTRAQ4
TMT10
##########################
## Chromatogram objects ##
##########################
###################
## MS data space ##
###################
# a list of recent PX additions and updates
pxannounced()
# Pharmacoproteomic characterisation of human colon and rectal cancer - CPTAC Full Proteomes
# rpx package provids access to the ProteomeXchange (PX) central repository
px <- PXDataset("PXD005354")
px <- PXDataset("PXD000001")
px
pxtax(px)
pxurl(px)
pxref(px)
# All files available for the PX experiment
pxfiles(px)
fn <- "TMT_Erwinia_1uLSike_Top10HCD_isol2_45stepped_60min_01-20141210.mzML"
# download dataset with pxget function
mzf <- pxget(px, fn)
mzf
## reads the data
ms <- openMSfile(mzf)
ms
hd <- header(ms)
dim(hd)
names(hd)
hd[1000, ]
head(peaks(ms, 1000))
plot(peaks(ms, 1000), type = "h")
## a set of spectra of interest: MS1 spectra eluted
## between 30 and 35 minutes retention time
ms1 <- which(hd$msLevel == 1)
rtsel <- hd$retentionTime[ms1] / 60 > 30 &
hd$retentionTime[ms1] / 60 < 35
## the heat map
M <- MSmap(ms, ms1[rtsel], 521, 523, .005, hd)
plot(M, aspect = 1, allTicks = FALSE)
plot3D(M)
i <- ms1[which(rtsel)][1]
j <- ms1[which(rtsel)][2]
M2 <- MSmap(ms, i:j, 100, 1000, 1, hd)
plot3D(M2)
plot(sp, reporters = iTRAQ4, full = TRUE)
#################
## MS Spectra ##
#################
plot(sp, reporters = iTRAQ4, full = TRUE)
sel <- fData(itraqdata)$ProteinAccession == "BSA"
bsa <- itraqdata[sel]
bsa
as.character(fData(bsa)$ProteinAccession)
plot(bsa, reporters = iTRAQ4, full = FALSE) + theme_gray(8)
#####################
## MS Chromatogram ##
#####################
#########################
## Raw data processing ##
#########################
experiment <- removePeaks(itraqdata, t = 400, verbose = FALSE)
ionCount(itraqdata[["X55"]])
ionCount(experiment[["X55"]])
qnt <- quantify(experiment,
method = "trap",
reporters = iTRAQ4,
strict = FALSE,
verbose = FALSE)
qnt
|
3e2b13d8b54e308586ece87c15978cb51b982a7b
|
2df42b13fef6978ad09b407c6791031a959f449f
|
/man/downloadProjectZip.Rd
|
a2443eea1bff542f8373bd4c51a5fb313d26c421
|
[] |
no_license
|
Sea2Data/Rstox
|
4284021138ea244eaaccded3f7728f9cc06cb03d
|
71367f11deec42791e809c28cdf7752c5c6ca1f3
|
refs/heads/master
| 2023-03-07T00:03:22.039374
| 2019-02-08T22:40:17
| 2019-02-08T22:40:17
| 90,259,495
| 1
| 3
| null | 2022-01-05T12:36:08
| 2017-05-04T12:15:33
|
R
|
UTF-8
|
R
| false
| true
| 1,118
|
rd
|
downloadProjectZip.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rstox_base.r
\name{downloadProjectZip}
\alias{downloadProjectZip}
\title{Download a zipped StoX project to a specified project path.}
\usage{
downloadProjectZip(URL, projectName = NULL, projectRoot = NULL,
cleanup = TRUE, ow = TRUE, msg = TRUE, onlyone = TRUE)
}
\arguments{
\item{URL}{The URL of the zipped project.}
\item{projectName}{The name or full path of the project, a baseline object (as returned from \code{\link{getBaseline}} or \code{\link{runBaseline}}, og a project object (as returned from \code{\link{openProject}}).}
\item{projectRoot}{The root directory of the project in which to save the downloaded files (set this if you wish to place the files in a project specified by its name, and not in the default root directory).}
\item{cleanup}{Logical: if FALSE, the downloaded zip file is not deleted.}
\item{ow, msg}{See \code{\link{getow}}.}
\item{onlyone}{Logical: If TRUE, only one project is checked (no for loop).}
}
\description{
Download a zipped StoX project to a specified project path.
}
\keyword{internal}
|
503877f0018235114559e8778dd30e78b00f3473
|
0a13213a5bd373eee3e67bb27260c9f59f0f6ac8
|
/R6.R
|
f42bc145a8ea795ec9cc3be06195714f8b0f569a
|
[
"Apache-2.0"
] |
permissive
|
mt-christo/r-prog
|
6757afa4071c20b0c45f2fc0f4d4692364add84d
|
11bdaef32ae7580cc73761b37a60c0b6801e8c00
|
refs/heads/master
| 2020-07-13T11:57:36.686887
| 2019-02-23T11:25:08
| 2019-02-23T11:25:08
| 26,853,999
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,549
|
r
|
R6.R
|
snow1D_R___ <- function(arr,arr_length,i,wgh,br_length){
a = arr
a[i] = a[i]+wgh
Excess = 0
Num_Avalanche = 0
Av_Array = c(0)
av_lengths = array(0,2)
if(a[i]>=1){
a[i] = a[i]-1
nb = ceiling(runif(2)*br_length)
l1 = if(nb[1]>0 && nb[1]<=arr_length) Recall(a,arr_length,nb[1],0.5,br_length) else list(a,0.5,0,c(0))
l2 = if(nb[2]>0 && nb[2]<=arr_length) Recall(l1[[1]],arr_length,nb[2],0.5,br_length) else list(l1[[1]],0.5,0,c(0))
a = l2[[1]]
Excess = l1[[2]]+l2[[2]]
Num_Avalanche = 1+l1[[3]]+l2[[3]]
av_lengths = c(length(l1[[4]]),length(l2[[4]]))
Av_Array = c(1,array(0,max(av_lengths)))
ii = 0
for(i in 2:length(Av_Array)){
ii = i-1
if(ii<=av_lengths[1]) Av_Array[i]=Av_Array[i]+l1[[4]][ii]
if(ii<=av_lengths[2]) Av_Array[i]=Av_Array[i]+l2[[4]][ii]
}
}
list(a,Excess,Num_Avalanche,Av_Array)
}
snow_1D_STEP_plain___ <- function(arr,av_points,br_length){
a = arr; Excess=0; arr_len = length(a); arr_br_len = round(arr_len*(1+br_length))
for(p in av_points){
a[p] = a[p]-1
av_recs = ceiling(runif(2)*arr_br_len)
if(av_recs[1]<=arr_len) a[av_recs[1]] = a[av_recs[1]]+0.5 else Excess = Excess+0.5
if(av_recs[2]<=arr_len) a[av_recs[2]] = a[av_recs[2]]+0.5 else Excess = Excess+0.5
}
list(a,Excess)
}
snow1D_L___ <- function(arr,i,wgh,params,avalanche_func){
#print('snow1D_L started')
#print(arr)
#print(i)
a=arr; a[i]=a[i]+wgh
Excess=0; Num_Avalanche=0; Av_Array=c(length(which(a>1))); av_points=c()
step_i = 1
while(max(a)>1){
av_points = which(a>1)
av_res = run_func_with_params(avalanche_func,c(list(a),list(av_points),params))
a = av_res[[1]]
Excess = Excess+av_res[[2]]
Num_Avalanche = Num_Avalanche+length(av_points)
Av_Array = c(Av_Array,length(av_points))
step_i = step_i+1
#print(step_i)
}
list(a,Excess,Num_Avalanche,Av_Array)
}
if(1==0){
library(hash); source("~/R/R4.R"); source("~/R/R5.R"); source("~/R/R6.R");
ts_len=10000;actors_count=100;wgh_in=0.3;params=list(0.1);avalanche_func=snow_1D_STEP_plain;filename_prefix='~/R/test-res-plain'
params=list(ts_len,actors_count,wgh_in,params,avalanche_func);calc_func=generate_snow_arrs;filename_prefix=filename_prefix
ts_len=params[[1]];actors_count=params[[2]];wgh_in=params[[3]];avalanche_func=params[[5]];params=params[[4]]
}
#ts_len=params[[1]];actors_count=params[[2]];wgh_in=params[[3]];avalanche_func=params[[5]];params=params[[4]]
generate_snow_arrs = function(ts_len,actors_count,wgh_in,params,avalanche_func){
print('generate_snow_arrs Started')
arr = array(0,actors_count)
s1 = array(0,ts_len)
s2 = array(0,ts_len)
s3 = array(0,ts_len)
s4 = c(0)
for(i in 1:ts_len){
sn = snow1D_L(arr,ceiling(runif(1)*actors_count),wgh_in,params,avalanche_func)
arr = sn[[1]]
s1[i] = sn[[2]]
s2[i] = sn[[3]]
s3[i] = sum(arr)
s4 = c(s4,cumsum(sn[[4]]))
if(i%%1000==0) print(i)
}
list(s1,s2,s3,s4)
}
#ts_len=10000;actors_count=100;wgh_in=0.3;params=list(101);avalanche_func=snow_1D_STEP_plain;filename_prefix='~/R/test-res-plain'
generate_ALS_ts = function(ts_len,actors_count,wgh_in,params,avalanche_func,filename_prefix){
print('generate_ALS_ts Started')
cors = calc_data_params_vals(list(ts_len,actors_count,wgh_in,params,avalanche_func),generate_snow_arrs,filename_prefix)[[4]]
print('generate_ALS_ts - filename loaded')
cors[cors>actors_count] = actors_count
print(1)
cors_len = length(cors)
print(2)
r_cors = sign(runif(cors_len)-0.5)*cors
print(3)
idx = c()
print(4)
for(i in 1:actors_count){
print(i)
idx = i>cors
r_cors[idx] = r_cors[idx] + sign((runif(cors_len)-0.5))
if(i%%100==0)
print(i)
}
r_cors = r_cors/actors_count
cumsum(r_cors)
}
if(1==0){
library(hash); source("~/R/R4.R"); source("~/R/R5.R"); source("~/R/R6.R");
len_in=10000; mlen_in=100; wgh_in=0.1; add_mlen_in=0.02
a1 = array(0,mlen_in)
s1 = array(0,len_in)
s2 = array(0,len_in)
s3 = array(0,len_in)
s4 = c(0)
for(i in 1:len_in){
sn = snow1D_L(a1,ceiling(runif(1)*mlen_in),wgh_in,add_mlen_in,snow_1D_STEP_plain)
a1 = sn[[1]]
s1[i] = sn[[2]]
s2[i] = sn[[3]]
s3[i] = sum(a1)
s4 = c(s4,sn[[4]])
if(i%%1000==0) print(i)
}
plot(s3)
hist(s2,br=100)
}
|
5e0f4e810bbb0bc46f648c08a69e44f7249f5fe0
|
f1df80ec987a517546a34c8589691206e079fc8b
|
/R/imagetools.r
|
851246fcf4added8872ee8ad7c3b2bd3f3887747
|
[] |
no_license
|
skranz/sktools
|
3c38b49d990a2f6e18bb92b614f8b47a79a7fc42
|
8e629e09f0b72b1471b4a4eb89f3ada0a43e4aaf
|
refs/heads/master
| 2021-07-11T23:03:31.263729
| 2021-04-06T05:34:30
| 2021-04-06T05:34:30
| 9,919,328
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,699
|
r
|
imagetools.r
|
#' Uses Manipulate to interactively change some parameters of an image.plot
#'@export
explore.image = function(x,y,z,xlab="x",ylab="y",main="",num.colors=30,add.plot.fun = NULL, pal.colors=c("red","white","blue")) {
library(fields)
library(manipulate)
# Specify a palette
my.palette <- colorRampPalette(pal.colors,space = "Lab")
library(fields)
image.fun = function(x,y,z,num.colors,focus,xL,xH,yL,yH,...) {
col = my.palette(num.colors)
zlim = range(z)
at.rel = seq(0,1,length=NROW(col)+1)
at.rel = at.rel ^ (abs(focus)^sign(-focus))
at = zlim[1] + diff(zlim)*at.rel
image.plot(x=x,y=y,z=z,
main=main,xlab = xlab,ylab = ylab,
xlim=c(xL,xH),ylim=c(yL,yH),
col=col,breaks=at)
if (!is.null(add.plot.fun)) {
add.plot.fun(xL=xL,xH=xH,
yL=yL,yH=yH,num.colors=num.colors,...)
}
}
xrange=range(x)
yrange=range(y)
zrange=range(z)
control = list(
focus = slider(-100,100,0,step=1,label="focus"),
num.colors = slider(2,200,30,step=1,label="Number of Colors"),
xL = slider(xrange[1],xrange[2],xrange[1],step=(xrange[2]-xrange[1])/1000),
xH = slider(xrange[1],xrange[2],xrange[2],step=(xrange[2]-xrange[1])/1000),
yL = slider(yrange[1],yrange[2],yrange[1],step=(yrange[2]-yrange[1])/1000),
yH = slider(yrange[1],yrange[2],yrange[2],step=(yrange[2]-yrange[1])/1000)
)
manipulate(image.fun(x=x,y=y,z=z,num.colors=num.colors,
focus=focus,xL=xL,xH=xH,yL=yL,yH=yH),
control)
}
#' Uses Manipulate to explore the function z.fun
#' @export
explore.3d.fun = function(z.fun,plot.type="image",xrange,yrange=xrange,main="Function Explorer",xlab="x",ylab="y",num.colors=30,
pal.colors=c("red","white","blue"),Vectorize.z.fun = TRUE,grid.length.default=8,num.color.default=100,image.fun=NULL,
extra.control = list(),add.plot.fun = NULL,...) {
library(fields)
library(manipulate)
# Specify a palette
my.palette <- colorRampPalette(pal.colors,space = "Lab")
if (is.null(image.fun)) {
library(fields)
if (plot.type=="image") {
image.fun = function(x,y,z,col,focus,...) {
zlim = range(z)
at.rel = seq(0,1,length=NROW(col)+1)
at.rel = at.rel ^ (abs(focus)^sign(-focus))
at = zlim[1] + diff(zlim)*at.rel
image.plot(x=x,y=y,z=z,
main=main,xlab = xlab,ylab = ylab,
col=col,breaks=at)
}
} else if (plot.type=="persp") {
image.fun = function(x,y,z,col,theta=30,phi=20,...) {
drape.plot(x=x,y=y,z=z,
main=main,xlab = xlab,ylab = ylab,
col=col,theta=theta,phi=phi)
}
}
}
if (Vectorize.z.fun) {
z.fun = Vectorize(z.fun, vectorize.args=c("x","y"))
}
f = function(grid.length,xL,xH,yL,yH,num.color,focus,...) {
n = grid.length
if (xL>=xH || yL>=yH) return(NULL)
x = seq(xL,xH,length=n)
y = seq(yL,yH,length=n)
xy = expand.grid(x,y)
z = matrix(z.fun(xy[,1],xy[,2],...),n,n)
image.fun(x=x,y=y,z=z,col = my.palette(num.color),main=main,xlab=xlab,ylab=ylab,focus=focus,...)
if (!is.null(add.plot.fun)) {
add.plot.fun(grid.length=grid.length,xL=xL,xH=xH,yL=yL,yH=yH,num.color,...)
}
}
control = list(
xL = slider(xrange[1],xrange[2],xrange[1],step=(xrange[2]-xrange[1])/1000),
xH = slider(xrange[1],xrange[2],xrange[2],step=(xrange[2]-xrange[1])/1000),
yL = slider(yrange[1],yrange[2],yrange[1],step=(yrange[2]-yrange[1])/1000),
yH = slider(yrange[1],yrange[2],yrange[2],step=(yrange[2]-yrange[1])/1000),
grid.length = slider(2,200,grid.length.default,step=1),
num.color = slider(2,200,num.color.default,step=1)
)
if (plot.type=="persp") {
control = c(list(theta=slider(0,360,30,step=1),
phi=slider(0,360,20,step=1)),
control)
} else if (plot.type=="image") {
control = c(list(focus = slider(-100,100,0,step=1,label="focus")),
control)
}
control = c(extra.control,control)
expr = paste(names(control),"=",names(control),collapse=",")
expr = paste("f(",expr,",...)")
#print(expr)
expr = parse(text=expr)[[1]]
manipulate(eval(expr),control)
}
examples.explore.3d.fun = function() {
z.fun = function(x,y,a=1,b=1,c=1,d=1,e=1,...) {
a*x^2+b*y^2+c*x^3+d*y^3+e*x*y
}
explore.3d.fun(z.fun=z.fun,plot.type="image",xrange=c(-2,2),Vectorize.z.fun=F,
extra.control = list(a=slider(-5.0,5.0,1,step=0.01)),
num.color.default=30)
}
|
967f311665c1a6824a4f2f56ecd411625bc2769c
|
71fbbbaa53672ec7478c580f517f00ab7d24a917
|
/Team Case 1 - Code.R
|
cd9641de95e986522da597429688534db518961c
|
[] |
no_license
|
huonganh-nguyen/Personalized-Health-Care-Analytics
|
0d7ecb5ddb80cf239a507e7cac95f745a1382464
|
01462ac2bf6d4606c066a629a577bb59543d35fa
|
refs/heads/master
| 2021-05-06T20:43:19.843641
| 2017-11-29T19:49:42
| 2017-11-29T19:49:42
| 112,507,426
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,402
|
r
|
Team Case 1 - Code.R
|
### Question 1
# Stacked Bar Plot with Colors and Legend
counts <- table(DATA$smoke, DATA$ed.col)
#Proportional Stacked Bar Plot
prop = prop.table(counts, margin = 2)
barplot(prop, main="Smoking and College Education",
names.arg=c("No or Some College", "Completed College"), col=c("darkblue","red"),
legend = c("Nonsmoking", "Smoking"))
###Question 2
#First create vector of where pvals are significant based on the traditional method,
#conservative method, and fdr method
sig05 <- pvals < .05
sigcon <- pvals < (.05/45)
sigfdr <- pvals < fdr_cut(pvals, .001)
#Next find the differences between the traditional method and the other methods below
#The addition below will place a 1 where the two vectors are different
sig05andsigcon <- sig05 + sigcon
sig05andsigfdr <- sig05 + sigfdr
#By examining the vectors for where the 1 is, we can find the differences
sig05andsigcon
sig05andsigfdr
#In the fourth, fifth, and thirty-third indices, there is a difference in significance
ListLabels[4]
sig05[4]
sigcon[4]
sigfdr[4]
ListLabels[5]
sig05[5]
sigcon[5]
sigfdr[5]
ListLabels[33]
sig05[33]
sigcon[33]
sigfdr[33]
### Question 3
# Find the correlation between each variable and weight.
cor(DATA)
# Check if correction between each variable and weight is significant.
cor.test(DATA$black, DATA$weight)
cor.test(DATA$married, DATA$weight)
cor.test(DATA$boy, DATA$weight)
cor.test(DATA$tri1, DATA$weight)
cor.test(DATA$tri2, DATA$weight)
cor.test(DATA$tri3, DATA$weight)
cor.test(DATA$ed.hs, DATA$weight)
cor.test(DATA$ed.smcol, DATA$weight)
cor.test(DATA$ed.col, DATA$weight)
cor.test(DATA$mom.age, DATA$weight)
cor.test(DATA$smoke, DATA$weight)
cor.test(DATA$cigsper, DATA$weight)
cor.test(DATA$m.wtgain, DATA$weight)
cor.test(DATA$mom.age2, DATA$weight)
### Question 4
# Create a multiple regression with 14 variables against weight
reg<-glm(weight ~ black + married + boy +tri1+tri2+tri3+ed.hs+ed.smcol+ed.col+mom.age+smoke+cigsper+m.wtgain+mom.age2, data=DATA)
summary(reg)
# Apply the 0.05 cut-off
pvals<-summary(reg)$coef[,4]
sigvar<-which( pvals <.05)
sigvar
# Apply the conservative cut-off (alpha/n)
cons_cutoff<-0.05/14
cons_cutoff
conservative_sigvar<-which( pvals <cons_cutoff)
conservative_sigvar
# Apply the FDR cut-off of 0.001
fdr_cutoff<-fdr_cut(pvals,0.001)
# Find the significant variables
fdr_significant_var<-which( pvals<fdr_cutoff)
fdr_significant_var
|
24e04481fd94ef5b9b9dd96887da7b10e7c5db20
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sampler/examples/rpro.Rd.R
|
85b35c3ba25d4ecd5be5fdae143f43ef822e8430
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
rpro.Rd.R
|
library(sampler)
### Name: rpro
### Title: Calculate proportion and margin of error (simple random sample)
### Aliases: rpro
### ** Examples
rpro(df=opening, col_name=openTime, ci=95, na="n/a", N=5361)
|
b43ae790b58e6ebcba9ee6dd8e299273b156906b
|
2ac64f6ab560715d7460c32ecd5f47e3816a928b
|
/genderclassification.r
|
999b8a036a8adc4be261e38857287aa2e908e72d
|
[] |
no_license
|
gtople92/Gender-Classification-using-voice-data
|
6ff715d797406698a565df2f69a6c460dbfb7213
|
270f624bc1165f88ac1498598390e92fd616d60f
|
refs/heads/master
| 2022-08-20T09:49:09.734798
| 2020-05-25T21:28:19
| 2020-05-25T21:28:19
| 266,867,925
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,647
|
r
|
genderclassification.r
|
# Load the required libraries
library(dplyr)
library(Amelia)
library(ggplot2)
library(corrgram)
library(corrplot)
library(caTools)
library(caret)
library(gains)
library(class)
library(randomForest)
library(e1071)
library(psych)
library(neuralnet)
library(pROC)
library(gmodels)
library(tuneR)
library(psycho)
library(warbleR)
#Load the files
voice.df <- read.csv("voice.csv")
#Exploratory Data Analysis
head(voice.df)
str(voice.df)
summ <- summary(voice.df[,-21])
any(is.na(voice.df))
missmap(voice.df, main="Voice Data - Missings Map",col=c("yellow", "black"), legend=FALSE)
print(summ)
ggplot(voice.df, aes(meanfreq, fill = label)) + geom_histogram( color="black",alpha=0.3 ,bins = 30)
ggplot(voice.df, aes(mode)) + geom_histogram( color= "black",alpha=0.3 ,bins = 30)
smf <- summarise(group_by(voice.df,label),mean(mode))
voice.df[which(voice.df$mode == 0 & voice.df$label == "male"), "mode"] <-smf$`mean(mode)`[2]
voice.df[which(voice.df$mode == 0 & voice.df$label == "female"), "mode"] <- smf$`mean(mode)`[1]
ggplot(voice.df, aes(mode, fill = label)) + geom_histogram( color= "black",alpha=0.3 ,bins = 30)
ggplot(voice.df, aes(modindx, fill = label)) + geom_histogram( color= "black",alpha=0.3 ,bins = 30)
ggplot(voice.df, aes(dfrange, fill = label)) + geom_histogram( color= "black",alpha=0.3 ,bins = 30)
#Seperating Numerical columns
num.cols <- sapply(voice.df, is.numeric)
#Plotting Correlation plot
corr.data <- cor(voice.df[,num.cols])
corrplot(corr.data,method='number')
corrgram(voice.df,order=TRUE, lower.panel=panel.shade,upper.panel=panel.pie, text.panel=panel.txt)
#Function to Assign 1 to male and 0 to female
val <- function(lab){
temp <- 1:length(lab)
for (i in 1:length(lab)) {
if(lab[i] == "male"){
temp[i] <- 1
}
else{
temp[i] <- 0
}
}
return(temp)}
#Principal Component Analysis (PCA)
voice.pca<- prcomp(voice.df[,-21],scale. = T)
summary(voice.pca)
pc_var <- (voice.pca$sdev^2)/sum(voice.pca$sdev^2)
plot(pc_var, xlab = "Principal Component", ylab = "Proportion of Variance Explained", type = "b")
plot(voice.pca, main = "Principal Component Analysis")
voice.pca.imp<-as.data.frame(voice.pca$x[,1:10])
voice.pca.imp$label <- voice.df$label
#Split the data into training and validation data
set.seed(101)
split = sample.split(voice.pca.imp$label, SplitRatio = 0.7)
voice.train <- subset(voice.pca.imp,split== TRUE)
voice.test <- subset(voice.pca.imp,split== FALSE)
#Train and Build logistic regression model using PCA scores
start_time<-Sys.time()
logmodel <- glm(label ~ ., family = binomial(link = 'logit'),data = voice.train)
summary(logmodel)
# Predict the data
fitted.probability <- predict(logmodel,newdata = voice.test[,-11],type = 'response')
end_time<-Sys.time()
time.taken.logit <-end_time-start_time
time.taken.logit <- round(as.numeric(time.taken.logit),2)
fitted.results <- as.factor(ifelse(fitted.probability > 0.5,1,0))
logit.con <- confusionMatrix(as.factor(ifelse(fitted.results=="1", "male", "female")), voice.test[,11])
ct <-as.factor(ifelse(fitted.results=="1", "male", "female"))
CrossTable(ct, voice.test[,11])
print(logit.con$table)
accuracy.logit <- round(logit.con$overall[[1]] * 100 ,2)
print(paste("Accuracy :",accuracy.logit,"%"))
# KNN
start_time<-Sys.time()
acc <- 1:100
for(i in 1:100){
set.seed(101)
predicted.gender.knn <- knn(voice.train[,-11],voice.test[,-11],voice.train[,11],k=i)
c <- confusionMatrix(predicted.gender.knn, voice.test[,11])
acc[i] <- c$overall[[1]] * 100
}
acc <- as.data.frame(acc)
acc$knn <- 1:100
acc$err <- 100- acc$acc
ggplot(acc,aes(x=knn,y=err)) + geom_point()+ geom_line(lty="dotted",color='red')
set.seed(101)
predicted.gender.knn <- knn(voice.train[,-11],voice.test[,-11],voice.train[,11],k=1)
end_time<-Sys.time()
time.taken.knn <-end_time-start_time
time.taken.knn <- round(as.numeric(time.taken.knn),2)
print(time.taken.knn)
conknn <- confusionMatrix(predicted.gender.knn, voice.test[,11])
CrossTable(predicted.gender.knn, voice.test[,11])
print(conknn)
accuracy.knn <- round(conknn$overall[[1]] * 100 ,2)
print(paste("Accuracy :",accuracy.knn,"%"))
#Random Forest
start_time<-Sys.time()
voice.randf.model <- randomForest(label ~ ., data = voice.train, ntree = 500)
print(voice.randf.model)
voice.randf.model$confusion
voice.randf.model$importance
predictedresults <- predict(voice.randf.model,voice.test[,-11])
end_time<-Sys.time()
time.taken.rdf <-end_time-start_time
time.taken.rdf <- round(as.numeric(time.taken.rdf),2)
print(time.taken.rdf)
plot(voice.randf.model)
conrdf <- confusionMatrix(predictedresults,voice.test[,11])
CrossTable(predictedresults,voice.test[,11])
print(conrdf)
accuracy.rdf <- round(conrdf$overall[[1]] * 100 ,2)
print(paste("Accuracy :",accuracy.rdf,"%"))
# SVM algorithm
start_time<-Sys.time()
voice.svm.model <- svm(label ~ ., data= voice.train)
summary(voice.svm.model)
svm.predicted.values <- predict(voice.svm.model,voice.test[,-11],type="class")
end_time<-Sys.time()
time.taken.svm <-end_time-start_time
time.taken.svm <- round(as.numeric(time.taken.svm),2)
print(time.taken.svm)
confsvm <- confusionMatrix(svm.predicted.values,voice.test[,11])
CrossTable(svm.predicted.values,voice.test[,11])
accuracy.svm <- round(confsvm$overall[[1]] * 100 ,2)
print(paste("Accuracy :",accuracy.svm,"%"))
# Neural Network
start_time<-Sys.time()
# Get column names
f <- as.formula(paste("label ~", paste(n[!n %in% "label"], collapse = " + ")))
nn.voice <- neuralnet(f,data= voice.train)
plot(nn.voice, rep = "best")
summary(nn.voice)
pred.voice <- compute(nn.voice,voice.test[,-11])
predicted.class=apply(pred.voice$net.result,1,which.max)-1
predicted.class <- as.factor(predicted.class)
end_time<-Sys.time()
time.taken.nn <-end_time-start_time
time.taken.nn <- round(as.numeric(time.taken.nn),2)
print(time.taken.nn)
confnn <- confusionMatrix(as.factor(ifelse(predicted.class=="1", "male", "female")),voice.test[,11])
print(confnn)
accuracy.nn <- round(confnn$overall[[1]] * 100 ,2)
print(paste("Accuracy :",accuracy.nn,"%"))
# New Data
#Loading the voice file: male voice
snap <- readWave("Recording.wav")
print(snap)
plot(snap@left[30700:31500], type = "l", main = "Snap",xlab = "Time", ylab = "Frequency")
summary(snap)
ad <- autodetec(threshold = 5, env = "hil", ssmooth = 300, power=1,bp=c(0,22), xl = 2, picsize = 2, res = 200, flim= c(1,11), osci = TRUE, wl = 300, ls = FALSE, sxrow = 2, rows = 4, mindur = 0.1, maxdur = 1, set = TRUE)
c <- specan(ad,bp=c(0,1),pd= F)
#Loading the voice file: female voice
snap2 <- readWave("FemaleRecord.wav")
print(snap2)
plot(snap2@left[30700:31500], type = "l", main = "Snap",xlab = "Time", ylab = "Frequency")
summary(snap2)
ad2 <- autodetec(threshold = 5, env = "hil", ssmooth = 300, power=1,bp=c(0,22), xl = 2, picsize = 2, res = 200, flim= c(1,11), osci = TRUE,wl = 300, ls = FALSE, sxrow = 2, rows = 4, mindur = 0.1, maxdur = 1, set = TRUE)
c2 <- specan(ad,bp=c(0,1),pd= F)
#Consolidating the male and female data
newdata <- rbind(c,c2)
#adjusting variable names
newdata$median <- c$freq.median
newdata$Q25 <- c$freq.Q25
newdata$Q75 <- c$freq.Q75
newdata$IQR <- c$freq.IQR
newdata <- newdata[names(newdata) %in% names(voice.df)]
#Mean Imputation of missing data
smf <- summarise(group_by(voice.df,label),mean(maxfun))
newdata$maxfun[1:3] <- round(smf[2,2],7)
newdata$maxfun[4:6] <- round(smf[1,2],7)
newdata$maxfun <- as.numeric(newdata$maxfun)
smf <- summarise(group_by(voice.df,label),mean(minfun))
newdata$minfun[1:3] <- round(smf[2,2],7)
newdata$minfun[4:6] <- round(smf[1,2],7)
newdata$minfun <- as.numeric(newdata$minfun)
smf <- summarise(group_by(voice.df,label),mean(meanfun))
newdata$meanfun[1:3] <- round(smf[2,2],7)
newdata$meanfun[4:6] <- round(smf[1,2],7)
newdata$meanfun <- as.numeric(newdata$meanfun)
smf <- summarise(group_by(voice.df,label),mean(centroid))
newdata$centroid[1:3] <- round(smf[2,2],7)
newdata$centroid[4:6] <- round(smf[1,2],7)
newdata$centroid <- as.numeric(newdata$centroid)
smf <- summarise(group_by(voice.df,label),mean(mode))
newdata$mode[1:3] <- round(smf[2,2],7)
newdata$mode[4:6] <- round(smf[1,2],7)
newdata$mode <- as.numeric(newdata$mode)
newdata$label <- factor("male",levels = c("male","female"))
newdata$label[4:6] <- factor("female",levels = c("male","female"))
new.svm.model <- svm(label ~ ., data= voice.df)
#Predicting the gender of new data
new.predicted.values <- predict(new.svm.model,newdata[,-21],type="class")
#Performance evaluation
confusionMatrix(as.factor(new.predicted.values), newdata[,21])
|
fb622735391b946cbecbb9e6fc75f1f7c42c3d4d
|
87b03390e65a8e6ad1689b4b19f79514ba308f2e
|
/code/archive/7. tree_fz.R
|
286114ed8988c2ae434cf3a17bd549af655f0e5c
|
[] |
no_license
|
tincerti/corruption_meta
|
377c895f45fc4663dd3c74c06a12708a52a219a3
|
8c8cc78b85d1f93df5e927f61e5fe5fa341190bb
|
refs/heads/master
| 2022-04-30T22:17:35.488617
| 2022-04-12T17:38:30
| 2022-04-12T17:38:30
| 179,543,429
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,120
|
r
|
7. tree_fz.R
|
################################################################################
# Libraries and Import
################################################################################
rm(list=ls())
# Set seed
set.seed(300) # Estimates will of course vary slightly with change in seed
# Libraries
library(foreign)
library(readstata13)
library(tidyverse)
library(gbm)
library(rpart)
library(rpart.plot)
library(caTools)
# Import all conjoint experiments
fz = read.dta('data/franchino_zucchini.dta')
mv = read.dta('data/mares_visconti.dta')
b = read.dta13('data/choosing_crook_clean.dta')
eggers = readRDS("data/experiment_data_eggers.Rds", refhook = NULL)
################################################################################
# Data setup: Breitenstein
################################################################################
# Reduce to one corruption measure
b$Corrupt = with(b, ifelse(corrupt == "Corrupt", "Yes", "No"))
# Define attribute lists: Corruption
b$Corrupt <- factor(b$Corrupt,
levels = c("No", "Yes"),
labels = c("No", "Yes"))
# Define attribute lists: Co-partisanship
b$Party <- factor(b$samep,
levels = c("0", "1"),
labels = c("Different", "Co-partisan"))
# Define attribute lists: Economic performance
b$Economy <- factor(b$nperformance,
levels = c("bad",
"good"),
labels = c("Bad",
"Good"))
# Define attribute lists: Experience
b$Experience <- factor(b$nqualities,
levels = c("low",
"high"),
labels = c("Low",
"High"))
# Define attribute lists: Gender
b$Gender <- factor(b$ngender,
levels = c("man",
"woman"),
labels = c("Male",
"Female"))
b$candidate2 = b$candidate
# Add clean challenger variable
b$Challenger = with(b, ifelse(lead(Corrupt, 1) == "No" &
candidate == 1 & lead(candidate, 1) == 2,
"Clean", NA))
b$Challenger = with(b, ifelse(lag(Corrupt, 1) == "No" &
candidate == 2 & lag(candidate, 1) == 1,
"Clean", Challenger))
b$Challenger = with(b, ifelse(lead(Corrupt, 1) == "Yes" &
candidate == 1 & lead(candidate, 1) == 2,
"Corrupt", Challenger))
b$Challenger = with(b, ifelse(lag(Corrupt, 1) == "Yes" &
candidate == 2 & lag(candidate, 1) == 1,
"Corrupt", Challenger))
# Create datasets consisting of corrupt candidate and clean challenger only
clean = b %>%
filter((Corrupt == "Yes" & Challenger == "Clean") |
Corrupt == "No" & Challenger == "Corrupt")
################################################################################
# Predictions: Breitenstein
################################################################################
# Convert outcome variable to binary for classification
b$Y = as.factor(b$Y)
# Split data into training and test
sample = sample.split(b, SplitRatio = .9) # From caTools package
train = subset(b, sample == TRUE)
test = subset(b, sample == FALSE)
# Run classification tree (uses package rpart)
b_tree <- rpart(Y ~ Corrupt + Party + Economy + Experience + Gender,
data = train,
cp = 0,
method = 'class')
# Pick tree size that minimizes classification error rate and prune tree
bestcp <- b_tree$cptable[which.min(b_tree$cptable[,"xerror"]),"CP"]
plotcp(b_tree)
b_tree_pruned <- prune(b_tree, cp = bestcp)
# Plot classification tree
rpart.plot(b_tree_pruned, extra = 7, type = 5, cex = 0.6)
# Save plot
dev.copy(pdf,'figs/b_tree.pdf', width = 7, height = 3.5)
dev.off()
################################################################################
# Figure A12: Analysis with clean challenger only
################################################################################
# Reduce clean dataframe to corrupt candidate only
clean_reduced = clean %>% filter(Corrupt == "Yes")
# Split data into training and test
sample = sample.split(clean_reduced, SplitRatio = .9) # From caTools package
train = subset(clean_reduced, sample == TRUE)
test = subset(clean_reduced, sample == FALSE)
# Run classification tree (uses package rpart)
b_tree_clean <- rpart(Y ~ Corrupt + Party + Economy + Experience + Gender,
data = train,
cp = -0.01,
method = 'class')
# Pick tree size that minimizes classification error rate and prune tree
bestcp <- b_tree_clean$cptable[which.min(b_tree_clean$cptable[,"xerror"]),"CP"]
plotcp(b_tree_clean)
printcp(b_tree_clean)
b_tree_clean_pruned <- prune(b_tree_clean, cp = -0.01)
# Plot classification tree
rpart.plot(b_tree_clean_pruned, extra = 7, type = 5, cex = 0.55)
# Save plot
dev.copy(pdf,'figs/b_tree_clean.pdf', width = 7, height = 3.5)
dev.off()
################################################################################
# Boosted tree
################################################################################
boost_fit = gbm(Y ~ `Corrupt` + as.factor(Challenger) + `Party` + `Economy` + `Experience` + `Gender`,
data = train, distribution= "gaussian", n.trees = 5000,
interaction.depth = 4)
################################################################################
# Data setup: Franchino and Zucchini
################################################################################
# Remove NA outcome values - not sure why these are here
fz = fz %>% filter(!is.na(Y))
# Reduce to one corruption measure
fz$Corrupt = with(fz, ifelse(corruption == "Convicted of corruption" |
corruption == "Investigated for corruption",
"Yes", "No"))
# Define attribute lists: Corruption
fz$Corrupt <- factor(fz$Corrupt,
levels = c("No", "Yes"),
labels = c("No", "Yes"))
# Define attribute lists: Education
fz$Education <- factor(fz$education,
levels = c("Licenza media", "Diploma superiore", "Laurea"),
labels = c("Junior high", "High School", "College"))
# Define attribute lists: Income
fz$Income <- factor(fz$income,
levels = c("Less than 900 euro a month",
"Between 900 and 3000 euro a month",
"More than 3000 euro a month"),
labels = c("Less than 900 euros",
"900 to 3000 euros",
"Greater than 3000 euros"))
# Define attribute lists: tax policy
fz$`Tax policy` <- factor(fz$taxspend,
levels = c("Maintain level of provision",
"Cut taxes",
"More social services"),
labels = c("Maintain level of provision",
"Cut taxes",
"More social services"))
# Define attribute lists: same sex marriage
fz$`Same sex marriage` <- factor(fz$samesex,
levels = c("Some rights",
"No rights",
"Same rights"),
labels = c("Some rights",
"No rights",
"Same rights"))
|
fa6b1e0803b8ca3c816f79cecd5308c5981e2b06
|
05678f03a83ce73472b1473f2d0743c9f015f2b8
|
/R/observations_api.R
|
598b75a33a1e0ac8083ea21ba47c39d661cbc761
|
[] |
no_license
|
Breeding-Insight/brapi-r-v2
|
3a7b4168c6d8516eb1128445a2f281d1199668a3
|
5cfa7453947121496780b410661117639f09c7ff
|
refs/heads/main
| 2023-03-14T22:20:29.331935
| 2021-03-17T01:31:11
| 2021-03-17T01:31:11
| 348,535,689
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54,280
|
r
|
observations_api.R
|
# BrAPI-Core
#
# The Breeding API (BrAPI) is a Standardized REST ful Web Service API Specification for communicating Plant Breeding Data. BrAPI allows for easy data sharing between databases and tools involved in plant breeding. <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">General Reference Documentation</h2> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/URL_Structure.md\">URL Structure</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Response_Structure.md\">Response Structure</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Date_Time_Encoding.md\">Date/Time Encoding</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Location_Encoding.md\">Location Encoding</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Error_Handling.md\">Error Handling</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Search_Services.md\">Search Services</a></div> </div> <div class=\"current-brapi-section brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Core</h2> <div class=\"brapi-section-description\">The BrAPI Core module contains high level entities used for organization and management. This includes Programs, Trials, Studies, Locations, People, and Lists</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Core\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Core\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapicore.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Phenotyping</h2> <div class=\"brapi-section-description\">The BrAPI Phenotyping module contains entities related to phenotypic observations. This includes Observation Units, Observations, Observation Variables, Traits, Scales, Methods, and Images</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Phenotyping\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Phenotyping\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapiphenotyping.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Genotyping</h2> <div class=\"brapi-section-description\">The BrAPI Genotyping module contains entities related to genotyping analysis. This includes Samples, Markers, Variant Sets, Variants, Call Sets, Calls, References, Reads, and Vendor Orders</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Genotyping\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Genotyping\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapigenotyping.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Germplasm</h2> <div class=\"brapi-section-description\">The BrAPI Germplasm module contains entities related to germplasm management. This includes Germplasm, Germplasm Attributes, Seed Lots, Crosses, Pedigree, and Progeny</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Germplasm\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Germplasm\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapigermplasm.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <style> .link-btn{ float: left; margin: 2px 10px 0 0; padding: 0 5px; border-radius: 5px; background-color: #ddd; } .stop-float{ clear: both; } .version-number{ float: left; margin: 5px 10px 0 5px; } .brapi-section-title{ margin: 0 10px 0 0; font-size: 20px; } .current-brapi-section{ font-weight: bolder; border-radius: 5px; background-color: #ddd; } .brapi-section{ padding: 5px 5px; } .brapi-section-description{ margin: 5px 0 0 5px; } </style>
#
# The version of the OpenAPI document: 2.0
#
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title Observations operations
#' @description openapi.Observations
#' @format An \code{R6Class} generator object
#' @field apiClient Handles the client-server communication.
#'
#' @section Methods:
#' \describe{
#' \strong{ ObservationsGet } \emph{ Get a filtered set of Observations }
#' Retrieve all observations where there are measurements for the given observation variables. observationTimestamp should be ISO8601 format with timezone -> YYYY-MM-DDThh:mm:ss+hhmm
#'
#' \itemize{
#' \item \emph{ @param } observation.db.id character
#' \item \emph{ @param } observation.unit.db.id character
#' \item \emph{ @param } germplasm.db.id character
#' \item \emph{ @param } observation.variable.db.id character
#' \item \emph{ @param } study.db.id character
#' \item \emph{ @param } location.db.id character
#' \item \emph{ @param } trial.db.id character
#' \item \emph{ @param } program.db.id character
#' \item \emph{ @param } season.db.id character
#' \item \emph{ @param } observation.unit.level.name character
#' \item \emph{ @param } observation.unit.level.order character
#' \item \emph{ @param } observation.unit.level.code character
#' \item \emph{ @param } observation.time.stamp.range.start character
#' \item \emph{ @param } observation.time.stamp.range.end character
#' \item \emph{ @param } external.reference.id character
#' \item \emph{ @param } external.reference.source character
#' \item \emph{ @param } page integer
#' \item \emph{ @param } page.size integer
#' \item \emph{ @param } authorization character
#' \item \emph{ @returnType } \link{ObservationListResponse} \cr
#'
#'
#' \item status code : 200 | OK
#'
#' \item return type : ObservationListResponse
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 400 | Bad Request
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 401 | Unauthorized
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 403 | Forbidden
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ ObservationsObservationDbIdGet } \emph{ Get the details of a specific Observations }
#' Get the details of a specific Observations observationTimestamp should be ISO8601 format with timezone -> YYYY-MM-DDThh:mm:ss+hhmm
#'
#' \itemize{
#' \item \emph{ @param } observation.db.id character
#' \item \emph{ @param } authorization character
#' \item \emph{ @returnType } \link{ObservationSingleResponse} \cr
#'
#'
#' \item status code : 200 | OK
#'
#' \item return type : ObservationSingleResponse
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 400 | Bad Request
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 401 | Unauthorized
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 403 | Forbidden
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 404 | Not Found
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ ObservationsObservationDbIdPut } \emph{ Update an existing Observation }
#' Update an existing Observation
#'
#' \itemize{
#' \item \emph{ @param } observation.db.id character
#' \item \emph{ @param } authorization character
#' \item \emph{ @param } observation.new.request \link{ObservationNewRequest}
#' \item \emph{ @returnType } \link{ObservationSingleResponse} \cr
#'
#'
#' \item status code : 200 | OK
#'
#' \item return type : ObservationSingleResponse
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 400 | Bad Request
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 401 | Unauthorized
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 403 | Forbidden
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 404 | Not Found
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ ObservationsPost } \emph{ Add new Observation entities }
#' Add new Observation entities
#'
#' \itemize{
#' \item \emph{ @param } authorization character
#' \item \emph{ @param } observation.new.request list( \link{ObservationNewRequest} )
#' \item \emph{ @returnType } \link{ObservationListResponse} \cr
#'
#'
#' \item status code : 200 | OK
#'
#' \item return type : ObservationListResponse
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 400 | Bad Request
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 401 | Unauthorized
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 403 | Forbidden
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 404 | Not Found
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ ObservationsPut } \emph{ Update multiple Observation entities }
#' Update multiple Observation entities simultaneously with a single call Include as many `observationDbIds` in the request as needed. Note - In strictly typed languages, this structure can be represented as a Map or Dictionary of objects and parsed directly from JSON.
#'
#' \itemize{
#' \item \emph{ @param } authorization character
#' \item \emph{ @param } request.body named list( \link{map(ObservationNewRequest)} )
#' \item \emph{ @returnType } \link{ObservationListResponse} \cr
#'
#'
#' \item status code : 200 | OK
#'
#' \item return type : ObservationListResponse
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 400 | Bad Request
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 401 | Unauthorized
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 403 | Forbidden
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 404 | Not Found
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ ObservationsTableGet } \emph{ Get a list of Observations in a table format }
#' <p>This service is designed to retrieve a table of time dependant observation values as a matrix of Observation Units and Observation Variables. This is also sometimes called a Time Series. This service takes the \"Sparse Table\" approach for representing this time dependant data.</p> <p>The table may be represented by JSON, CSV, or TSV. The \"Accept\" HTTP header is used for the client to request different return formats. By default, if the \"Accept\" header is not included in the request, the server should return JSON as described below.</p> <p>The table is REQUIRED to have the following columns</p> <ul> <li>observationUnitDbId - Each row is related to one Observation Unit</li> <li>observationTimeStamp - Each row is has a time stamp for when the observation was taken</li> <li>At least one column with an observationVariableDbId</li> </ul> <p>The table may have any or all of the following OPTIONAL columns. Included columns are decided by the server developer</p> <ul> <li>observationUnitName</li> <li>studyDbId</li> <li>studyName</li> <li>germplasmDbId</li> <li>germplasmName</li> <li>positionCoordinateX</li> <li>positionCoordinateY</li> <li>year</li> </ul> <p>The table also may have any number of Observation Unit Hierarchy Level columns. For example:</p> <ul> <li>field</li> <li>plot</li> <li>sub-plot</li> <li>plant</li> <li>pot</li> <li>block</li> <li>entry</li> <li>rep</li> </ul> <p>The JSON representation provides a pair of extra arrays for defining the headers of the table. The first array \"headerRow\" will always contain \"observationUnitDbId\" and any or all of the OPTIONAL column header names. The second array \"observationVariables\" contains the names and DbIds for the Observation Variables represented in the table. By appending the two arrays, you can construct the complete header row of the table. </p> <p>For CSV and TSV representations of the table, an extra header row is needed to describe both the Observation Variable DbId and the Observation Variable Name for each data column. See the example responses below</p>
#'
#' \itemize{
#' \item \emph{ @param } accept \link{WSMIMEDataTypes}
#' \item \emph{ @param } observation.unit.db.id character
#' \item \emph{ @param } germplasm.db.id character
#' \item \emph{ @param } observation.variable.db.id character
#' \item \emph{ @param } study.db.id character
#' \item \emph{ @param } location.db.id character
#' \item \emph{ @param } trial.db.id character
#' \item \emph{ @param } program.db.id character
#' \item \emph{ @param } season.db.id character
#' \item \emph{ @param } observation.level character
#' \item \emph{ @param } search.results.db.id character
#' \item \emph{ @param } observation.time.stamp.range.start character
#' \item \emph{ @param } observation.time.stamp.range.end character
#' \item \emph{ @param } authorization character
#' \item \emph{ @returnType } \link{ObservationTableResponse} \cr
#'
#'
#' \item status code : 200 | OK
#'
#' \item return type : ObservationTableResponse
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 400 | Bad Request
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 401 | Unauthorized
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 403 | Forbidden
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ SearchObservationsPost } \emph{ Submit a search request for a set of Observations }
#' Submit a search request for a set of Observations. Returns an Id which reference the results of this search
#'
#' \itemize{
#' \item \emph{ @param } authorization character
#' \item \emph{ @param } observation.search.request \link{ObservationSearchRequest}
#' \item \emph{ @returnType } \link{ObservationListResponse} \cr
#'
#'
#' \item status code : 200 | OK
#'
#' \item return type : ObservationListResponse
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 202 | Accepted
#'
#' \item return type : Accepted202SearchResponse
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 400 | Bad Request
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 401 | Unauthorized
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 403 | Forbidden
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ SearchObservationsSearchResultsDbIdGet } \emph{ Returns a list of Observations based on search criteria. }
#' Returns a list of Observations based on search criteria. observationTimeStamp - Iso Standard 8601. observationValue data type inferred from the ontology
#'
#' \itemize{
#' \item \emph{ @param } accept \link{WSMIMEDataTypes}
#' \item \emph{ @param } search.results.db.id character
#' \item \emph{ @param } authorization character
#' \item \emph{ @param } page integer
#' \item \emph{ @param } page.size integer
#' \item \emph{ @returnType } \link{ObservationListResponse} \cr
#'
#'
#' \item status code : 200 | OK
#'
#' \item return type : ObservationListResponse
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 202 | Accepted
#'
#' \item return type : Accepted202SearchResponse
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 400 | Bad Request
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 401 | Unauthorized
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 403 | Forbidden
#'
#' \item return type : character
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' }
#'
#'
#' @examples
#' \dontrun{
#' #################### ObservationsGet ####################
#'
#' library(openapi)
#' var.observation.db.id <- 'observation.db.id_example' # character | The unique ID of an Observation
#' var.observation.unit.db.id <- 'observation.unit.db.id_example' # character | The unique ID of an Observation Unit
#' var.germplasm.db.id <- 'germplasm.db.id_example' # character | The unique ID of a germplasm (accession) to filter on
#' var.observation.variable.db.id <- 'observation.variable.db.id_example' # character | The unique ID of an observation variable
#' var.study.db.id <- 'study.db.id_example' # character | The unique ID of a studies to filter on
#' var.location.db.id <- 'location.db.id_example' # character | The unique ID of a location where these observations were collected
#' var.trial.db.id <- 'trial.db.id_example' # character | The unique ID of a trial to filter on
#' var.program.db.id <- 'program.db.id_example' # character | The unique ID of a program to filter on
#' var.season.db.id <- 'season.db.id_example' # character | The year or Phenotyping campaign of a multi-annual study (trees, grape, ...)
#' var.observation.unit.level.name <- 'observation.unit.level.name_example' # character | The Observation Unit Level. Returns only the observation unit of the specified Level. References ObservationUnit->observationUnitPosition->observationLevel->levelName
#' var.observation.unit.level.order <- 'observation.unit.level.order_example' # character | The Observation Unit Level Order Number. Returns only the observation unit of the specified Level. References ObservationUnit->observationUnitPosition->observationLevel->levelOrder
#' var.observation.unit.level.code <- 'observation.unit.level.code_example' # character | The Observation Unit Level Code. This parameter should be used together with `observationUnitLevelName` or `observationUnitLevelOrder`. References ObservationUnit->observationUnitPosition->observationLevel->levelCode
#' var.observation.time.stamp.range.start <- 'observation.time.stamp.range.start_example' # character | Timestamp range start
#' var.observation.time.stamp.range.end <- 'observation.time.stamp.range.end_example' # character | Timestamp range end
#' var.external.reference.id <- 'external.reference.id_example' # character | An external reference ID. Could be a simple string or a URI. (use with `externalReferenceSource` parameter)
#' var.external.reference.source <- 'external.reference.source_example' # character | An identifier for the source system or database of an external reference (use with `externalReferenceID` parameter)
#' var.page <- 0 # integer | Used to request a specific page of data to be returned. The page indexing starts at 0 (the first page is 'page'= 0). Default is `0`.
#' var.page.size <- 1000 # integer | The size of the pages to be returned. Default is `1000`.
#' var.authorization <- 'Bearer XXXX' # character | HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong>
#'
#' #Get a filtered set of Observations
#' api.instance <- ObservationsApi$new()
#'
#' #Configure HTTP basic authorization: AuthorizationToken
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$ObservationsGet(observation.db.id=var.observation.db.id, observation.unit.db.id=var.observation.unit.db.id, germplasm.db.id=var.germplasm.db.id, observation.variable.db.id=var.observation.variable.db.id, study.db.id=var.study.db.id, location.db.id=var.location.db.id, trial.db.id=var.trial.db.id, program.db.id=var.program.db.id, season.db.id=var.season.db.id, observation.unit.level.name=var.observation.unit.level.name, observation.unit.level.order=var.observation.unit.level.order, observation.unit.level.code=var.observation.unit.level.code, observation.time.stamp.range.start=var.observation.time.stamp.range.start, observation.time.stamp.range.end=var.observation.time.stamp.range.end, external.reference.id=var.external.reference.id, external.reference.source=var.external.reference.source, page=var.page, page.size=var.page.size, authorization=var.authorization)
#'
#'
#' #################### ObservationsObservationDbIdGet ####################
#'
#' library(openapi)
#' var.observation.db.id <- 'observation.db.id_example' # character | The unique ID of an observation
#' var.authorization <- 'Bearer XXXX' # character | HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong>
#'
#' #Get the details of a specific Observations
#' api.instance <- ObservationsApi$new()
#'
#' #Configure HTTP basic authorization: AuthorizationToken
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$ObservationsObservationDbIdGet(var.observation.db.id, authorization=var.authorization)
#'
#'
#' #################### ObservationsObservationDbIdPut ####################
#'
#' library(openapi)
#' var.observation.db.id <- 'observation.db.id_example' # character | The unique ID of an observation
#' var.authorization <- 'Bearer XXXX' # character | HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong>
#' var.observation.new.request <- ObservationNewRequest$new() # ObservationNewRequest |
#'
#' #Update an existing Observation
#' api.instance <- ObservationsApi$new()
#'
#' #Configure HTTP basic authorization: AuthorizationToken
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$ObservationsObservationDbIdPut(var.observation.db.id, authorization=var.authorization, observation.new.request=var.observation.new.request)
#'
#'
#' #################### ObservationsPost ####################
#'
#' library(openapi)
#' var.authorization <- 'Bearer XXXX' # character | HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong>
#' var.observation.new.request <- [ObservationNewRequest$new()] # array[ObservationNewRequest] |
#'
#' #Add new Observation entities
#' api.instance <- ObservationsApi$new()
#'
#' #Configure HTTP basic authorization: AuthorizationToken
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$ObservationsPost(authorization=var.authorization, observation.new.request=var.observation.new.request)
#'
#'
#' #################### ObservationsPut ####################
#'
#' library(openapi)
#' var.authorization <- 'Bearer XXXX' # character | HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong>
#' var.request.body <- {'key' => ObservationNewRequest$new()} # map(ObservationNewRequest) |
#'
#' #Update multiple Observation entities
#' api.instance <- ObservationsApi$new()
#'
#' #Configure HTTP basic authorization: AuthorizationToken
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$ObservationsPut(authorization=var.authorization, request.body=var.request.body)
#'
#'
#' #################### ObservationsTableGet ####################
#'
#' library(openapi)
#' var.accept <- WSMIMEDataTypes$new() # WSMIMEDataTypes | The requested content type which should be returned by the server
#' var.observation.unit.db.id <- 'observation.unit.db.id_example' # character | The unique ID of an Observation Unit
#' var.germplasm.db.id <- 'germplasm.db.id_example' # character | The unique ID of a germplasm (accession) to filter on
#' var.observation.variable.db.id <- 'observation.variable.db.id_example' # character | The unique ID of an observation variable
#' var.study.db.id <- 'study.db.id_example' # character | The unique ID of a studies to filter on
#' var.location.db.id <- 'location.db.id_example' # character | The unique ID of a location where these observations were collected
#' var.trial.db.id <- 'trial.db.id_example' # character | The unique ID of a trial to filter on
#' var.program.db.id <- 'program.db.id_example' # character | The unique ID of a program to filter on
#' var.season.db.id <- 'season.db.id_example' # character | The year or Phenotyping campaign of a multi-annual study (trees, grape, ...)
#' var.observation.level <- 'observation.level_example' # character | The type of the observationUnit. Returns only the observation unit of the specified type; the parent levels ID can be accessed through observationUnitStructure.
#' var.search.results.db.id <- 'search.results.db.id_example' # character | Permanent unique identifier which references the search results
#' var.observation.time.stamp.range.start <- 'observation.time.stamp.range.start_example' # character | Timestamp range start
#' var.observation.time.stamp.range.end <- 'observation.time.stamp.range.end_example' # character | Timestamp range end
#' var.authorization <- 'Bearer XXXX' # character | HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong>
#'
#' #Get a list of Observations in a table format
#' api.instance <- ObservationsApi$new()
#'
#' #Configure HTTP basic authorization: AuthorizationToken
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$ObservationsTableGet(var.accept, observation.unit.db.id=var.observation.unit.db.id, germplasm.db.id=var.germplasm.db.id, observation.variable.db.id=var.observation.variable.db.id, study.db.id=var.study.db.id, location.db.id=var.location.db.id, trial.db.id=var.trial.db.id, program.db.id=var.program.db.id, season.db.id=var.season.db.id, observation.level=var.observation.level, search.results.db.id=var.search.results.db.id, observation.time.stamp.range.start=var.observation.time.stamp.range.start, observation.time.stamp.range.end=var.observation.time.stamp.range.end, authorization=var.authorization)
#'
#'
#' #################### SearchObservationsPost ####################
#'
#' library(openapi)
#' var.authorization <- 'Bearer XXXX' # character | HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong>
#' var.observation.search.request <- ObservationSearchRequest$new() # ObservationSearchRequest |
#'
#' #Submit a search request for a set of Observations
#' api.instance <- ObservationsApi$new()
#'
#' #Configure HTTP basic authorization: AuthorizationToken
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$SearchObservationsPost(authorization=var.authorization, observation.search.request=var.observation.search.request)
#'
#'
#' #################### SearchObservationsSearchResultsDbIdGet ####################
#'
#' library(openapi)
#' var.accept <- WSMIMEDataTypes$new() # WSMIMEDataTypes | The requested content type which should be returned by the server
#' var.search.results.db.id <- 'search.results.db.id_example' # character | Unique identifier which references the search results
#' var.authorization <- 'Bearer XXXX' # character | HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong>
#' var.page <- 0 # integer | Used to request a specific page of data to be returned. The page indexing starts at 0 (the first page is 'page'= 0). Default is `0`.
#' var.page.size <- 1000 # integer | The size of the pages to be returned. Default is `1000`.
#'
#' #Returns a list of Observations based on search criteria.
#' api.instance <- ObservationsApi$new()
#'
#' #Configure HTTP basic authorization: AuthorizationToken
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$SearchObservationsSearchResultsDbIdGet(var.accept, var.search.results.db.id, authorization=var.authorization, page=var.page, page.size=var.page.size)
#'
#'
#' }
#' @importFrom R6 R6Class
#' @importFrom base64enc base64encode
#' @export
ObservationsApi <- R6::R6Class(
'ObservationsApi',
public = list(
apiClient = NULL,
initialize = function(apiClient){
if (!missing(apiClient)) {
self$apiClient <- apiClient
}
else {
self$apiClient <- ApiClient$new()
}
},
ObservationsGet = function(observation.db.id=NULL, observation.unit.db.id=NULL, germplasm.db.id=NULL, observation.variable.db.id=NULL, study.db.id=NULL, location.db.id=NULL, trial.db.id=NULL, program.db.id=NULL, season.db.id=NULL, observation.unit.level.name=NULL, observation.unit.level.order=NULL, observation.unit.level.code=NULL, observation.time.stamp.range.start=NULL, observation.time.stamp.range.end=NULL, external.reference.id=NULL, external.reference.source=NULL, page=NULL, page.size=NULL, authorization=NULL, ...){
apiResponse <- self$ObservationsGetWithHttpInfo(observation.db.id, observation.unit.db.id, germplasm.db.id, observation.variable.db.id, study.db.id, location.db.id, trial.db.id, program.db.id, season.db.id, observation.unit.level.name, observation.unit.level.order, observation.unit.level.code, observation.time.stamp.range.start, observation.time.stamp.range.end, external.reference.id, external.reference.source, page, page.size, authorization, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
ObservationsGetWithHttpInfo = function(observation.db.id=NULL, observation.unit.db.id=NULL, germplasm.db.id=NULL, observation.variable.db.id=NULL, study.db.id=NULL, location.db.id=NULL, trial.db.id=NULL, program.db.id=NULL, season.db.id=NULL, observation.unit.level.name=NULL, observation.unit.level.order=NULL, observation.unit.level.code=NULL, observation.time.stamp.range.start=NULL, observation.time.stamp.range.end=NULL, external.reference.id=NULL, external.reference.source=NULL, page=NULL, page.size=NULL, authorization=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
headerParams['Authorization'] <- `authorization`
queryParams['observationDbId'] <- observation.db.id
queryParams['observationUnitDbId'] <- observation.unit.db.id
queryParams['germplasmDbId'] <- germplasm.db.id
queryParams['observationVariableDbId'] <- observation.variable.db.id
queryParams['studyDbId'] <- study.db.id
queryParams['locationDbId'] <- location.db.id
queryParams['trialDbId'] <- trial.db.id
queryParams['programDbId'] <- program.db.id
queryParams['seasonDbId'] <- season.db.id
queryParams['observationUnitLevelName'] <- observation.unit.level.name
queryParams['observationUnitLevelOrder'] <- observation.unit.level.order
queryParams['observationUnitLevelCode'] <- observation.unit.level.code
queryParams['observationTimeStampRangeStart'] <- observation.time.stamp.range.start
queryParams['observationTimeStampRangeEnd'] <- observation.time.stamp.range.end
queryParams['externalReferenceID'] <- external.reference.id
queryParams['externalReferenceSource'] <- external.reference.source
queryParams['page'] <- page
queryParams['pageSize'] <- page.size
body <- NULL
urlPath <- "/observations"
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "ObservationListResponse", loadNamespace("openapi")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
ObservationsObservationDbIdGet = function(observation.db.id, authorization=NULL, ...){
apiResponse <- self$ObservationsObservationDbIdGetWithHttpInfo(observation.db.id, authorization, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
ObservationsObservationDbIdGetWithHttpInfo = function(observation.db.id, authorization=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`observation.db.id`)) {
stop("Missing required parameter `observation.db.id`.")
}
headerParams['Authorization'] <- `authorization`
body <- NULL
urlPath <- "/observations/{observationDbId}"
if (!missing(`observation.db.id`)) {
urlPath <- gsub(paste0("\\{", "observationDbId", "\\}"), URLencode(as.character(`observation.db.id`), reserved = TRUE), urlPath)
}
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "ObservationSingleResponse", loadNamespace("openapi")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
ObservationsObservationDbIdPut = function(observation.db.id, authorization=NULL, observation.new.request=NULL, ...){
apiResponse <- self$ObservationsObservationDbIdPutWithHttpInfo(observation.db.id, authorization, observation.new.request, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
ObservationsObservationDbIdPutWithHttpInfo = function(observation.db.id, authorization=NULL, observation.new.request=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`observation.db.id`)) {
stop("Missing required parameter `observation.db.id`.")
}
headerParams['Authorization'] <- `authorization`
if (!missing(`observation.new.request`)) {
body <- `observation.new.request`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/observations/{observationDbId}"
if (!missing(`observation.db.id`)) {
urlPath <- gsub(paste0("\\{", "observationDbId", "\\}"), URLencode(as.character(`observation.db.id`), reserved = TRUE), urlPath)
}
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "PUT",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "ObservationSingleResponse", loadNamespace("openapi")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
ObservationsPost = function(authorization=NULL, observation.new.request=NULL, ...){
apiResponse <- self$ObservationsPostWithHttpInfo(authorization, observation.new.request, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
ObservationsPostWithHttpInfo = function(authorization=NULL, observation.new.request=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
headerParams['Authorization'] <- `authorization`
if (!missing(`observation.new.request`)) {
body.items = paste(unlist(lapply(observation.new.request, function(param){param$toJSONString()})), collapse = ",")
body <- paste0('[', body.items, ']')
} else {
body <- NULL
}
urlPath <- "/observations"
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "POST",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "ObservationListResponse", loadNamespace("openapi")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
ObservationsPut = function(authorization=NULL, request.body=NULL, ...){
apiResponse <- self$ObservationsPutWithHttpInfo(authorization, request.body, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
ObservationsPutWithHttpInfo = function(authorization=NULL, request.body=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
headerParams['Authorization'] <- `authorization`
if (!missing(`request.body`)) {
body <- `request.body`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/observations"
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "PUT",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "ObservationListResponse", loadNamespace("openapi")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
ObservationsTableGet = function(accept, observation.unit.db.id=NULL, germplasm.db.id=NULL, observation.variable.db.id=NULL, study.db.id=NULL, location.db.id=NULL, trial.db.id=NULL, program.db.id=NULL, season.db.id=NULL, observation.level=NULL, search.results.db.id=NULL, observation.time.stamp.range.start=NULL, observation.time.stamp.range.end=NULL, authorization=NULL, ...){
apiResponse <- self$ObservationsTableGetWithHttpInfo(accept, observation.unit.db.id, germplasm.db.id, observation.variable.db.id, study.db.id, location.db.id, trial.db.id, program.db.id, season.db.id, observation.level, search.results.db.id, observation.time.stamp.range.start, observation.time.stamp.range.end, authorization, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
ObservationsTableGetWithHttpInfo = function(accept, observation.unit.db.id=NULL, germplasm.db.id=NULL, observation.variable.db.id=NULL, study.db.id=NULL, location.db.id=NULL, trial.db.id=NULL, program.db.id=NULL, season.db.id=NULL, observation.level=NULL, search.results.db.id=NULL, observation.time.stamp.range.start=NULL, observation.time.stamp.range.end=NULL, authorization=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`accept`)) {
stop("Missing required parameter `accept`.")
}
headerParams['Accept'] <- `accept`
headerParams['Authorization'] <- `authorization`
queryParams['observationUnitDbId'] <- observation.unit.db.id
queryParams['germplasmDbId'] <- germplasm.db.id
queryParams['observationVariableDbId'] <- observation.variable.db.id
queryParams['studyDbId'] <- study.db.id
queryParams['locationDbId'] <- location.db.id
queryParams['trialDbId'] <- trial.db.id
queryParams['programDbId'] <- program.db.id
queryParams['seasonDbId'] <- season.db.id
queryParams['observationLevel'] <- observation.level
queryParams['searchResultsDbId'] <- search.results.db.id
queryParams['observationTimeStampRangeStart'] <- observation.time.stamp.range.start
queryParams['observationTimeStampRangeEnd'] <- observation.time.stamp.range.end
body <- NULL
urlPath <- "/observations/table"
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "ObservationTableResponse", loadNamespace("openapi")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
SearchObservationsPost = function(authorization=NULL, observation.search.request=NULL, ...){
apiResponse <- self$SearchObservationsPostWithHttpInfo(authorization, observation.search.request, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
SearchObservationsPostWithHttpInfo = function(authorization=NULL, observation.search.request=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
headerParams['Authorization'] <- `authorization`
if (!missing(`observation.search.request`)) {
body <- `observation.search.request`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/search/observations"
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "POST",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "ObservationListResponse", loadNamespace("openapi")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
SearchObservationsSearchResultsDbIdGet = function(accept, search.results.db.id, authorization=NULL, page=NULL, page.size=NULL, ...){
apiResponse <- self$SearchObservationsSearchResultsDbIdGetWithHttpInfo(accept, search.results.db.id, authorization, page, page.size, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
SearchObservationsSearchResultsDbIdGetWithHttpInfo = function(accept, search.results.db.id, authorization=NULL, page=NULL, page.size=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`accept`)) {
stop("Missing required parameter `accept`.")
}
if (missing(`search.results.db.id`)) {
stop("Missing required parameter `search.results.db.id`.")
}
headerParams['Accept'] <- `accept`
headerParams['Authorization'] <- `authorization`
queryParams['page'] <- page
queryParams['pageSize'] <- page.size
body <- NULL
urlPath <- "/search/observations/{searchResultsDbId}"
if (!missing(`search.results.db.id`)) {
urlPath <- gsub(paste0("\\{", "searchResultsDbId", "\\}"), URLencode(as.character(`search.results.db.id`), reserved = TRUE), urlPath)
}
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "ObservationListResponse", loadNamespace("openapi")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
}
)
)
|
79ed37586c7869af4e7ffd96c73de416c166bb7e
|
1b17c973a8cf41d6349fc33f098086ef6603f696
|
/cachematrix.R
|
37721893904b9c1aa931631ce9ae4b2855e8b7d0
|
[] |
no_license
|
vicki167/ProgrammingAssignment2
|
7e9db55da776b39a4073737487557e994d1ea4cb
|
fc0fe3e3fe67b411dd411ae001cf40252f72af9c
|
refs/heads/master
| 2021-01-23T12:54:58.885875
| 2017-09-06T23:57:37
| 2017-09-06T23:57:37
| 102,659,629
| 0
| 0
| null | 2017-09-06T21:23:59
| 2017-09-06T21:23:59
| null |
UTF-8
|
R
| false
| false
| 1,436
|
r
|
cachematrix.R
|
## These functions work in conjunction to provide
## a matrix implementation that caches the value
## of its inverse
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## the inverse
i <- NULL
## function to set the value
set <- function(y) {
x <<- y
i <<- NULL
}
## the function to get the value
get <- function() {
x
}
## the function to set the inverse
setinverse <- function(inverse) {
i <<- inverse
}
## the function to get the inverse
getinverse <- function() {
i
}
list(
set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## returns the inverse of the passed matrix, returning
## a cached value or computing if this is the first call
## of the matrix has changed
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## retrieve a possible cached version of the inverse matrix
i <- x$getinverse()
## determine if we have a cached value
if (!is.null(i)) {
## if the value is not null, print a cache message
message("returning cached data")
} else {
## if the value is null, compute it and cache the value
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
}
## return the inverse of the matrix
i
}
|
af6ec403c1451a2c3afbda1cac420a9f1f720bf0
|
217a471ec71f6a4d9db0a4a34b453b242aed053c
|
/R/form-chl2qc.R
|
d245e98bb31b3e159e3e2eede67101a6e5b206b2
|
[] |
no_license
|
mcmventura/fcdata2qc
|
0fd5c2668cdf8e59805b7ce5e7922f36e920c17d
|
2ac2d1a63b8f1c47ea22a45a9840ba046aa57738
|
refs/heads/master
| 2020-04-23T16:33:44.888237
| 2019-05-23T09:31:52
| 2019-05-23T09:31:52
| 171,302,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,569
|
r
|
form-chl2qc.R
|
#' Converts to WP3 C3S-QC Format the Digitisations of Chilean Surface Records
#' 1950-1958 performed by FCiências.ID.
#'
#' Splits the annual digitisations per station into several data frames by type
#' of meteorological variable. The output data frames and text files are in the
#' WP3 C3S-QC format: \strong{variable code | year (YYYY) | month (MM) | day
#' (DD) | time (HHMM) | observation}.
#'
#' @details
#' \strong{Input:}
#' \itemize{
#' \item The output data frame of \code{\link{read_chl_ffcul}} with seventeen
#' columns: WIGOS compatible station ID, day of the year, year, month, day,
#' hour, dew point temperature, cloud cover, wind direction, wind speed, air
#' pressure, air temperature, accumulated precipitation at first hour,
#' accumulated precipitation at second hour, daily minimum temperature, daily
#' maximum temperature, relative humidity.
#' }
#' \strong{Output:}
#' \itemize{
#' \item A .RData and a .txt file without header for each one of the ten
#' variables digitised (td, n, dd, w, p, ta, rr, Tn, Tx, rh) -
#' 'VariableCode_StationName_Year'. If misses the output for some variable it's
#' because doesn't exist any observations of that variable in the record.
#' \item A .txt file with the wind speed and another with the wind direction in
#' the original units - 'VariableCode_StationName_Year_16wcr' and
#' 'VariableCode_StationName_Year_kt'.
#' \item A .txt file for each one of the ten variables digitised which includes
#' the missing values in the anual series - 'VariableCode_StationName_Year_all'.
#' }
#'
#' @param digt A data frame with the following seventeen columns:
#' \strong{station | dayr | year | month | day | hour | tdw | cloud | wdir
#' | wsp | ppa | tta | rrr1 | rrr2 | tmin | tmax | rhum}.
#'
#' @usage form_chl2qc(digt)
#'
#' @import utils
#'
#' @export
#'
form_chl2qc <- function(digt) {
digt[digt == -999] <- NA
#station <- digt[1, 1]
station <- unique(digt[[1]])
year <- unique(digt[[3]])
digt$station <- NULL
cat("\n")
cat("Converting to C3S-QC format...\n\n")
# DEW POINT
# Checks if all the values are missing
if (sum(is.na(digt$tdw)) < nrow(digt)) {
subda_td <- digt[c(2:5, 6)]
# Or this way
# subd_td <- digt[c("month", "day", "hour", "tdw")]
# Creates the column with the variable code
subda_td$vcod <- c(rep("td", nrow(subda_td)))
# Defines the standard order of the columns in the data frame
subda_td <- subda_td[c("vcod", "year", "month", "day", "hour", "tdw")]
# Creates directory for td
td_fol <- paste("td_", station, sep = "")
if (!dir.exists(td_fol)) {
dir.create(td_fol)
}
# Saves with the NA - good for plotting
fna_td <- paste("td", station, year, "all", sep = "_")
write.table(subda_td, file = paste(td_fol, "/", fna_td, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
# Saves without the NA
subd_td <- subda_td[!is.na(subda_td$tdw), ]
fn_td <- paste("td", station, year, sep = "_")
write.table(subd_td, file = paste(td_fol, "/", fn_td, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
save(subd_td, file = paste(td_fol, "/", fn_td, ".RData", sep = ""))
saveRDS(subd_td, file = paste(td_fol, "/", fn_td, ".rds", sep = ""))
}
# CLOUD COVER
if (sum(is.na(digt$cloud)) < nrow(digt)) {
subda_n <- digt[c(2:5, 7)]
subda_n$vcod <- c(rep("n", nrow(subda_n)))
subda_n <- subda_n[c("vcod", "year", "month", "day", "hour", "cloud")]
n_fol <- paste("n_", station, sep = "")
if (!dir.exists(n_fol)) {
dir.create(n_fol)
}
fna_n <- paste("n", station, year, "all", sep = "_")
write.table(subda_n, file = paste(n_fol, "/", fna_n, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
subd_n <- subda_n[!is.na(subda_n$cloud), ]
fn_n <- paste("n", station, year, sep = "_")
write.table(subd_n, file = paste(n_fol, "/", fn_n, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
save(subd_n, file = paste(n_fol, "/", fn_n, ".RData", sep = ""))
saveRDS(subd_n, file = paste(n_fol, "/", fn_n, ".rds", sep = ""))
}
# WIND DIRECTION
if (sum(is.na(digt$wdir)) < nrow(digt)) {
subda_dd <- digt[c(2:5, 8)]
subda_dd$vcod <- c(rep("dd", nrow(subda_dd)))
subda_dd <- subda_dd[c("vcod", "year", "month", "day", "hour", "wdir")]
dd_fol <- paste("dd_", station, sep = "")
if (!dir.exists(dd_fol)) {
dir.create(dd_fol)
}
fna_dd <- paste("dd", station, year, "all", sep = "_")
write.table(subda_dd,
file = paste(dd_fol, "/", fna_dd, "_16wcr", ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
# Converts from 16-wind compass rose points to degrees
dd16wcr <- subda_dd$wdir
dddeg <- convert_dd_16wcr2deg(dd16wcr = dd16wcr)
subda_dd_deg <- subda_dd
subda_dd_deg$wdir <- dddeg
fna_dd_deg <- paste("dd", station, year, "all", sep = "_")
write.table(subda_dd_deg,
file = paste(dd_fol, "/", fna_dd_deg, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
subd_dd_deg <- subda_dd_deg[!is.na(subda_dd_deg$wdir), ]
fn_dd_deg <- paste("dd", station, year, sep = "_")
write.table(subd_dd_deg,
file = paste(dd_fol, "/", fn_dd_deg, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
save(subd_dd_deg, file = paste(dd_fol, "/", fn_dd_deg, ".RData", sep = ""))
saveRDS(subd_dd_deg, file = paste(dd_fol, "/", fn_dd_deg, ".rds", sep = ""))
}
# WIND SPEED
if (sum(is.na(digt$wsp)) < nrow(digt)) {
subda_w <- digt[c(2:5, 9)]
subda_w$vcod <- c(rep("w", nrow(subda_w)))
subda_w <- subda_w[c("vcod", "year", "month", "day", "hour", "wsp")]
w_fol <- paste("w_", station, sep = "")
if (!dir.exists(w_fol)) {
dir.create(w_fol)
}
fna_w <- paste("w", station, year, "all", sep = "_")
write.table(subda_w, file = paste(w_fol, "/", fna_w, "_kt", ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
# Converts from knots to meters/second
wkn <- subda_w$wsp
wms <- convert_w_kn2ms(wkn = wkn)
subda_w_ms <- subda_w
subda_w_ms$wsp <- wms
fna_w_ms <- paste("w", station, year, "all", sep = "_")
write.table(subda_w_ms, file = paste(w_fol, "/", fna_w_ms, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
subd_w_ms <- subda_w_ms[!is.na(subda_w_ms$wsp), ]
fn_w_ms <- paste("w", station, year, sep = "_")
write.table(subd_w_ms, file = paste(w_fol, "/", fn_w_ms, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
save(subd_w_ms, file = paste(w_fol, "/", fn_w_ms, ".RData", sep = ""))
saveRDS(subd_w_ms, file = paste(w_fol, "/", fn_w_ms, ".rds", sep = ""))
}
# AIR PRESSURE
if (sum(is.na(digt$ppa)) < nrow(digt)) {
subda_p <- digt[c(2:5, 10)]
subda_p$vcod <- c(rep("p", nrow(subda_p)))
subda_p <- subda_p[c("vcod", "year", "month", "day", "hour", "ppa")]
p_fol <- paste("p_", station, sep = "")
if (!dir.exists(p_fol)) {
dir.create(p_fol)
}
fna_p <- paste("p", station, year, "all", sep = "_")
write.table(subda_p, file = paste(p_fol, "/", fna_p, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
subd_p <- subda_p[!is.na(subda_p$ppa), ]
fn_p <- paste("p", station, year, sep = "_")
write.table(subd_p, file = paste(p_fol, "/", fn_p, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
save(subd_p, file = paste(p_fol, "/", fn_p, ".RData", sep = ""))
saveRDS(subd_p, file = paste(p_fol, "/", fn_p, ".rds", sep = ""))
}
# AIR TEMPERATURE
if (sum(is.na(digt$tta)) < nrow(digt)) {
subda_ta <- digt[c(2:5, 11)]
subda_ta$vcod <- c(rep("ta", nrow(subda_ta)))
subda_ta <- subda_ta[c("vcod", "year", "month", "day", "hour", "tta")]
ta_fol <- paste("ta_", station, sep = "")
if (!dir.exists(ta_fol)) {
dir.create(ta_fol)
}
fna_ta <- paste("ta", station, year, "all", sep = "_")
write.table(subda_ta, file = paste(ta_fol, "/", fna_ta, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
subd_ta <- subda_ta[!is.na(subda_ta$tta), ]
fn_ta <- paste("ta", station, year, sep = "_")
write.table(subd_ta, file = paste(ta_fol, "/", fn_ta, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
save(subd_ta, file = paste(ta_fol, "/", fn_ta, ".RData", sep = ""))
saveRDS(subd_ta, file = paste(ta_fol, "/", fn_ta, ".rds", sep = ""))
}
# ACCUMULATED PRECIPITATION
subda_rr <- data.frame()
subd_rr <- data.frame()
# Measured most commonly at 12:00 (not always)
if (sum(is.na(digt$rrr1)) < nrow(digt)) {
subd_rr1 <- digt[c(2:5, 12)]
subd_rr1$vcod <- c(rep("rr", nrow(subd_rr1)))
subd_rr1 <- subd_rr1[c("vcod", "year", "month", "day", "hour", "rrr1")]
rr_fol <- paste("rr_", station, sep = "")
if (!dir.exists(rr_fol)) {
dir.create(rr_fol)
}
# Output that keeps the NA
# Subsets hour by hour, until de 3rd hour
# Usually the value in on the 1st hour of the time resolution (4)
subd_rr1_h1 <- subd_rr1[seq(1, nrow(subd_rr1), 4), ]
rr1_h1_na <- sum(is.na(subd_rr1_h1$rrr1))
subd_rr1_h2 <- subd_rr1[seq(2, nrow(subd_rr1), 4), ]
rr1_h2_na <- sum(is.na(subd_rr1_h2$rrr1))
subd_rr1_h3 <- subd_rr1[seq(3, nrow(subd_rr1), 4), ]
rr1_h3_na <- sum(is.na(subd_rr1_h3$rrr1))
# Subsets the hour for which the values aren't all NA
if (rr1_h1_na < nrow(digt) / 4) {
names(subd_rr1_h1)[6] <- "rrr"
subda_rr <- rbind(subda_rr, subd_rr1_h1)
} else if (rr1_h2_na < nrow(digt) / 4) {
names(subd_rr1_h2)[6] <- "rrr"
subda_rr <- rbind(subda_rr, subd_rr1_h2)
} else if (rr1_h3_na < nrow(digt) / 4) {
names(subd_rr1_h3)[6] <- "rrr"
subda_rr <- rbind(subda_rr, subd_rr1_h3)
}
# Output that doesn't keep the NA
# Selects the lines, from column 12, for which precipitation isn't NA
subdv_rr1 <- subd_rr1[!(is.na(subd_rr1$rrr1)), ]
names(subdv_rr1)[6] <- "rrr"
subd_rr <- rbind(subd_rr, subdv_rr1)
}
# Measured most commonly at 23:00 (not always)
if (sum(is.na(digt$rrr2)) < nrow(digt)) {
subd_rr2 <- digt[c(2:5, 13)]
subd_rr2$vcod <- c(rep("rr", nrow(subd_rr2)))
subd_rr2 <- subd_rr2[c("vcod", "year", "month", "day", "hour", "rrr2")]
rr_fol <- paste("rr_", station, sep = "")
if (!dir.exists(rr_fol)) {
dir.create(rr_fol)
}
# Output that keeps the NA
# Subsets hour by hour, starting on the 2nd hour
# Usually the value in on the 4th hour of the time resolution (4)
subd_rr2_h2 <- subd_rr2[seq(2, nrow(subd_rr2), 4), ]
rr2_h2_na <- sum(is.na(subd_rr2_h2$rrr2))
subd_rr2_h3 <- subd_rr2[seq(3, nrow(subd_rr2), 4), ]
rr2_h3_na <- sum(is.na(subd_rr2_h3$rrr2))
subd_rr2_h4 <- subd_rr2[seq(4, nrow(subd_rr2), 4), ]
rr2_h4_na <- sum(is.na(subd_rr2_h4$rrr2))
if (rr2_h2_na < nrow(digt) / 4) {
names(subd_rr2_h2)[6] <- "rrr"
subda_rr <- rbind(subda_rr, subd_rr2_h2)
} else if (rr2_h3_na < nrow(digt) / 4) {
names(subd_rr2_h3)[6] <- "rrr"
subda_rr <- rbind(subda_rr, subd_rr2_h3)
} else if (rr2_h4_na < nrow(digt) / 4) {
names(subd_rr2_h4)[6] <- "rrr"
subda_rr <- rbind(subda_rr, subd_rr2_h4)
}
# Output that doesn't keep the NA
# Selects the lines, from column 13, for which precipitation isn't NA
subdv_rr2 <- subd_rr2[!(is.na(subd_rr2$rrr2)), ]
names(subdv_rr2)[6] <- "rrr"
subd_rr <- rbind(subd_rr, subdv_rr2)
}
if (nrow(subda_rr) != 0) {
# Orders by day
subda_rr <- subda_rr[order(subda_rr[, 4]), ]
# Then orders by month
subda_rr <- subda_rr[order(subda_rr[, 3]), ]
fna_rr <- paste("rr", station, year, "all", sep = "_")
write.table(subda_rr, file = paste(rr_fol, "/", fna_rr, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
}
if (nrow(subd_rr) != 0) {
subd_rr <- subd_rr[order(subd_rr[, 4]), ]
subd_rr <- subd_rr[order(subd_rr[, 3]), ]
fn_rr <- paste("rr", station, year, sep = "_")
write.table(subd_rr, file = paste(rr_fol, "/", fn_rr, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
save(subd_rr, file = paste(rr_fol, "/", fn_rr, ".RData", sep = ""))
saveRDS(subd_rr, file = paste(rr_fol, "/", fn_rr, ".rds", sep = ""))
}
# DAILY MINIMUM AIR TEMPERATURE
# Measured most commonly at 12:00 (not always)
if (sum(is.na(digt$tmin)) < nrow(digt)) {
subd_tn <- digt[c(2:5, 14)]
subd_tn$vcod <- c(rep("Tn", nrow(subd_tn)))
subd_tn <- subd_tn[c("vcod", "year", "month", "day", "hour", "tmin")]
tn_fol <- paste("tn_", station, sep = "")
if (!dir.exists(tn_fol)) {
dir.create(tn_fol)
}
# Output that keeps the NA
# Subsets hour by hour, until de 3rd hour
# Usually the value in on the 1st hour of the time resolution (4)
subda_tn <- data.frame()
subd_tn_h1 <- subd_tn[seq(1, nrow(subd_tn), 4), ]
tn_h1_na <- sum(is.na(subd_tn_h1$tmin))
subd_tn_h2 <- subd_tn[seq(2, nrow(subd_tn), 4), ]
tn_h2_na <- sum(is.na(subd_tn_h2$tmin))
subd_tn_h3 <- subd_tn[seq(3, nrow(subd_tn), 4), ]
tn_h3_na <- sum(is.na(subd_tn_h3$tmin))
# Subsets the hour for which the values aren't all NA
if (tn_h1_na < nrow(digt) / 4) {
subda_tn <- rbind(subda_tn, subd_tn_h1)
} else if (tn_h2_na < nrow(digt) / 4) {
subda_tn <- rbind(subda_tn, subd_tn_h2)
} else if (tn_h3_na < nrow(digt) / 4) {
subda_tn <- rbind(subda_tn, subd_tn_h3)
}
fna_tn <- paste("tn", station, year, "all", sep = "_")
write.table(subda_tn, file = paste(tn_fol, "/", fna_tn, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
# Output that doesn't keep the NA
subd_tn <- subd_tn[!is.na(subd_tn$tmin), ]
fn_tn <- paste("tn", station, year, sep = "_")
write.table(subd_tn, file = paste(tn_fol, "/", fn_tn, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
save(subd_tn, file = paste(tn_fol, "/", fn_tn, ".RData", sep = ""))
saveRDS(subd_tn, file = paste(tn_fol, "/", fn_tn, ".rds", sep = ""))
}
# DAILY MAXIMUM AIR TEMPERATURE
# Measured most commonly at 23:00 (not always)
if (sum(is.na(digt$tmax)) < nrow(digt)) {
subd_tx <- digt[c(2:5, 15)]
subd_tx$vcod <- c(rep("Tx", nrow(subd_tx)))
subd_tx <- subd_tx[c("vcod", "year", "month", "day", "hour", "tmax")]
tx_fol <- paste("tx_", station, sep = "")
if (!dir.exists(tx_fol)) {
dir.create(tx_fol)
}
##################
# If the value is always in the 4th hour (not sure...) the next will work:
# subda_tx <- subd_tx[seq(4, nrow(subd_tx), 4), ]
# fna_tx <- paste("tx", station, year, "all", sep = "_")
# write.table(subda_tx, file = paste(fna_tx, "txt", sep = "."),
# row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
# subd_tx <- subd_tx[!is.na(subd_tx$tmax), ]
# fn_tx <- paste("tx", station, year, sep = "_")
# write.table(subd_tx, file = paste(fn_tx, "txt", sep = "."),
# row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
#################
# Output that keeps the NA
# Subsets hour by hour, starting on the 2nd hour
# Usually the value in on the 4th hour of the time resolution (4)
subda_tx <- data.frame()
subd_tx_h2 <- subd_tx[seq(2, nrow(subd_tx), 4), ]
tx_h2_na <- sum(is.na(subd_tx_h2$tmax))
subd_tx_h3 <- subd_tx[seq(3, nrow(subd_tx), 4), ]
tx_h3_na <- sum(is.na(subd_tx_h3$tmax))
subd_tx_h4 <- subd_tx[seq(4, nrow(subd_tx), 4), ]
tx_h4_na <- sum(is.na(subd_tx_h4$tmax))
# Subsets the hour for which the values aren't all NA
if (tx_h2_na < nrow(digt) / 4) {
subda_tx <- rbind(subda_tx, subd_tx_h2)
} else if (tx_h3_na < nrow(digt) / 4) {
subda_tx <- rbind(subda_tx, subd_tx_h3)
} else if (tx_h4_na < nrow(digt) / 4) {
subda_tx <- rbind(subda_tx, subd_tx_h4)
}
fna_tx <- paste("tx", station, year, "all", sep = "_")
write.table(subda_tx, file = paste(tx_fol, "/", fna_tx, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
# Output that doesn't keep the NA
subd_tx <- subd_tx[!is.na(subd_tx$tmax), ]
fn_tx <- paste("tx", station, year, sep = "_")
write.table(subd_tx, file = paste(tx_fol, "/", fn_tx, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
save(subd_tx, file = paste(tx_fol, "/", fn_tx, ".RData", sep = ""))
saveRDS(subd_tx, file = paste(tx_fol, "/", fn_tx, ".rds", sep = ""))
}
# RELATIVE HUMIDITY
if (sum(is.na(digt$rhum)) < nrow(digt)) {
subda_rh <- digt[c(2:5, 16)]
subda_rh$vcod <- c(rep("rh", nrow(subda_rh)))
subda_rh <- subda_rh[c("vcod", "year", "month", "day", "hour", "rhum")]
rh_fol <- paste("rh_", station, sep = "")
if (!dir.exists(rh_fol)) {
dir.create(rh_fol)
}
fna_rh <- paste("rh", station, year, "all", sep = "_")
write.table(subda_rh, file = paste(rh_fol, "/", fna_rh, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
subd_rh <- subda_rh[!is.na(subda_rh$rhum), ]
fn_rh <- paste("rh", station, year, sep = "_")
write.table(subd_rh, file = paste(rh_fol, "/", fn_rh, ".txt", sep = ""),
row.names = FALSE, col.names = FALSE, quote = FALSE, sep = "\t")
save(subd_rh, file = paste(rh_fol, "/", fn_rh, ".RData", sep = ""))
saveRDS(subd_rh, file = paste(rh_fol, "/", fn_rh, ".rds", sep = ""))
}
cat("Outputs of form_chl2c3sqc() in the folders 'varcode_StationName':\n")
cat("one file with the subdaily meteorological observations\n")
cat("for each one of the following variables -\n")
cat("td, n, dd, w, p, ta, rr, Tn, Tx, rh.\n")
cat("If any variable is missing it's because doesn't exist any\n")
cat("observations of that variable in the record.\n\n")
return(station)
}
|
aa4f9389a0d57cef556c7216dd676c98ec4898a3
|
969d915cc9f1cc0f040a9652af1002bc3c7d3b2e
|
/DM_Secramento.R
|
135bc2c4d178addbcc817aca412e2fb35852ff59
|
[] |
no_license
|
Wasabiijelly/datamining-class
|
38fa188d7334de8bb0ce53adcfdffc0442e028c8
|
05a1dc81acb1a7561f8c1b5c8f61cd65784815fb
|
refs/heads/main
| 2023-06-21T05:53:37.664029
| 2021-07-25T12:19:15
| 2021-07-25T12:19:15
| 389,314,780
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,253
|
r
|
DM_Secramento.R
|
##################################################
## Data Mining Homework3
## Implemented by Hyeyoon Kim
## 2021-06-08
##################################################
## Set Env.
setRepositories(ind = 1:8)
library(tidyverse)
library(datarium)
library(caret)
library(dplyr)
library(rpart)
library(rpart.plot)
library(kknn)
library(ROCR)
library(kernlab)
library(MASS)
library(gpls)
library(fastAdaboost)
library(earth)
library(mda)
## Set Wording Dir.
WORK_DIR <- "C:\\Users\\admin\\Desktop\\데이터 마이닝\\practice"
setwd(WORK_DIR)
## Function for model performance check
Accuracy <- function(confusion){
return (sum(diag(confusion)/ sum(confusion) * 100))
}
Sensitivity <- function(confusion){
return(confusion[2,2] / sum(confusion[2,]))
}
Specificity <- function(confusion){
return(confusion[1,1] / sum(confusion[1,]))
}
MaxAccuracy <- function(modelName,strModelName, data, foldIdx){
modelEvalList <- list() # List for evaluating model
for(i in 1:5){
Train <- data[-foldIdx[[i]],] # Train set
Test <- data[foldIdx[[i]],] # Test set
Model <- modelName(type~., data = Train) # Modeling
prediction_ <- predict(Model, newdata = Test) # Prediction
if(strModelName == "svm" | strModelName == "bagFDA"){
confusion_ <- table(Predicted = prediction_, # Confusion Matrix
Credit = Test$type)
}
else{
confusion_ <- table(Predicted = prediction_$class, # Confusion Matrix
Type = Test$type)
}
modelEvalList <- append(modelEvalList, Accuracy(confusion_))
}
maxIdx <- which.max(unlist(modelEvalList))
return(maxIdx)
}
## Load Data
data(Sacramento)
data_home <- Sacramento
## Cleansing Data
data_home <- data_home %>%
dplyr::select(-city, -zip)
str(data_home)
## 5-fold
foldIdx <- createFolds(data_home$type, k = 5)
# Decision Tree
modelEvalList <- list() # List for evaluating model
for(i in 1:5){
Train <- data_home[-foldIdx[[i]],] # Train set
Test <- data_home[foldIdx[[i]],] # Test set
model_DT <- rpart(type~., data = Train, method = "class") # Modeling
prediction_DT <- predict(model_DT, Test, type = "class") # Prediction
confusion_DT <- table(Predicted = prediction_DT, # Confusion Matrix
Type = Test$type)
modelEvalList <- append(modelEvalList, Accuracy(confusion_DT))
}
maxIdx_DT <- which.max(unlist(modelEvalList))
# LDA
maxIdx_lda <- MaxAccuracy(lda,"lda", data_home, foldIdx)
# QDA
maxIdx_qda <- MaxAccuracy(qda, "qda", data_home, foldIdx)
# KNN - 3
modelEvalList <- list() # List for evaluating model
for(i in 1:5){
Train <- data_home[-foldIdx[[i]],] # Train set
Test <- data_home[foldIdx[[i]],] # Test set
knnModel_5 <- kknn(type~., train = Train, test = # Modeling and Prediction
Test, k=3)
confusion_5nn <- table(Predicted = fitted(knnModel_5),
Type =Test$type)
modelEvalList <- append(modelEvalList, Accuracy(confusion_5nn))
}
maxIdx_3nn <- which.max(unlist(modelEvalList))
# KNN - 5
modelEvalList <- list() # List for evaluating model
for(i in 1:5){
Train <- data_home[-foldIdx[[i]],] # Train set
Test <- data_home[foldIdx[[i]],] # Test set
knnModel_5 <- kknn(type~., train = Train, test = # Modeling and Prediction
Test, k=5)
confusion_5nn <- table(Predicted = fitted(knnModel_5),
Type =Test$type)
modelEvalList <- append(modelEvalList, Accuracy(confusion_5nn))
}
maxIdx_5nn <- which.max(unlist(modelEvalList))
# KNN - 7
modelEvalList <- list() # List for evaluating model
for(i in 1:5){
Train <- data_home[-foldIdx[[i]],] # Train set
Test <- data_home[foldIdx[[i]],] # Test set
knnModel_5 <- kknn(type~., train = Train, test = # Modeling and Prediction
Test, k=7)
confusion_5nn <- table(Predicted = fitted(knnModel_5),
Type =Test$type)
modelEvalList <- append(modelEvalList, Accuracy(confusion_5nn))
}
maxIdx_7nn <- which.max(unlist(modelEvalList))
# SVM
maxIdx_svm <- MaxAccuracy(ksvm, "svm", data_home, foldIdx)
## Set train set and test set
homeTrain_DT <- data_home[-foldIdx[[maxIdx_DT]],]
homeTest_DT <- data_home[foldIdx[[maxIdx_DT]],]
homeTrain_lda <- data_home[-foldIdx[[maxIdx_lda]],]
homeTest_lda <- data_home[foldIdx[[maxIdx_lda]],]
homeTrain_qda <- data_home[-foldIdx[[maxIdx_qda]],]
homeTest_qda <- data_home[foldIdx[[maxIdx_qda]],]
homeTrain_3nn <- data_home[-foldIdx[[maxIdx_3nn]],]
homeTest_3nn <- data_home[foldIdx[[maxIdx_3nn]],]
homeTrain_5nn <- data_home[-foldIdx[[maxIdx_5nn]],]
homeTest_5nn <- data_home[foldIdx[[maxIdx_5nn]],]
homeTrain_7nn <- data_home[-foldIdx[[maxIdx_7nn]],]
homeTest_7nn <- data_home[foldIdx[[maxIdx_7nn]],]
homeTrain_svm <- data_home[-foldIdx[[maxIdx_svm]],]
homeTest_svm <- data_home[foldIdx[[maxIdx_svm]],]
## Modeling
model_DT <- rpart(type~., data = homeTrain_DT, method = "class")
ldaModel <- lda(type~., data = homeTrain_lda)
qdaModel <- qda(type~., data = homeTrain_qda)
knnModel_3 <- kknn(type~., train = homeTrain_3nn, test = # Modeling and Prediction
homeTest_3nn, k=3)
knnModel_5 <- kknn(type~., train = homeTrain_5nn, test = # Modeling and Prediction
homeTest_5nn, k=5)
knnModel_7 <- kknn(type~., train = homeTrain_7nn, test = # Modeling and Prediction
homeTest_7nn, k=5)
svmModel <- ksvm(type~., data = homeTrain_svm, kernel = "rbf",
type = "C-svc")
## Prediction
prediction_DT <- predict(model_DT, homeTest_DT, type = "class")
prediction_lda <- predict(ldaModel, newdata = homeTest_lda)
prediction_qda <- predict(qdaModel, newdata = homeTest_qda)
prediction_svm <- predict(svmModel, newdata = homeTest_svm)
## Model performance check
# Decision Tree
confusion_DT <- table(Predicted = prediction_DT,
Type = homeTest_DT$type)
DecisionTree <- c(Accuracy(confusion_DT))
performanceTable <- data.frame(DecisionTree)
# LDA
confusion_lda <- table(Predicted = prediction_lda$class,
Type = homeTest_lda$type)
LDA <- Accuracy(confusion_lda)
performanceTable <- cbind(performanceTable,LDA )
# QDA
confusion_qda <- table(Predicted = prediction_qda$class,
Type = homeTest_qda$type)
QDA <- Accuracy(confusion_qda)
performanceTable <- cbind(performanceTable, QDA)
# KNN-3
confusion_3nn <- table(Predicted = fitted(knnModel_3),
Type = homeTest_3nn$type)
KNN3 <- Accuracy(confusion_3nn)
performanceTable <- cbind(performanceTable, KNN3)
# KNN-5
confusion_5nn <- table(Predicted = fitted(knnModel_5),
Type = homeTest_5nn$type)
KNN5 <- Accuracy(confusion_5nn)
performanceTable <- cbind(performanceTable, KNN5)
# KNN-7
confusion_7nn <- table(Predicted = fitted(knnModel_7),
Type = homeTest_7nn$type)
KNN7 <- Accuracy(confusion_7nn)
performanceTable <- cbind(performanceTable, KNN7)
# SVM
confusion_svm <- table(Predicted = prediction_svm, Type = homeTest_svm$type)
SVM <- Accuracy(confusion_svm)
performanceTable <- cbind(performanceTable, SVM)
rownames(performanceTable) <- "Accuracy"
View(performanceTable)
## Visualization Try
|
85acf79b01184cb5c45068840c843c6c3d4d8654
|
9d8d244651fba10db19748b90ef70dd9fcc4ae27
|
/mapTileToPopGrids2.R
|
9d7f48f9d84b35a3baac8eee9f84d3f5c9313858
|
[] |
no_license
|
ander2ed/R
|
0428c285a20bb8f9eb0ac0f2652333f7c1c6f335
|
d0664ca3d2f56b7ecc17cce7441a95269990e7b2
|
refs/heads/main
| 2023-01-07T10:28:08.022024
| 2020-11-10T16:29:09
| 2020-11-10T16:29:09
| 311,685,832
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,374
|
r
|
mapTileToPopGrids2.R
|
# library(jpeg)
library(sf)
library(RgoogleMaps)
library(ggplot2)
library(ggmap)
library(rjson)
library(randomForest)
rm(list = ls())
source("Z:/E_Anderson/Research/R/getBingMap.R")
## Grids;
grids <- st_read(dsn = "Z:/E_Anderson/_Projects/Shell/2018/Malaysia/LabuanGridIssue/Labaun_250m_Pop.tab",
layer = "Labaun_250m_Pop")
# ggplot(data = grids) +
# geom_sf(aes(fill = log(grids$Population))) +
# scale_fill_gradient2(low = "blue", high = "red")
## Map;
ll.x <- st_bbox(grids)[1]
ll.y <- st_bbox(grids)[2]
ur.x <- st_bbox(grids)[3]
ur.y <- st_bbox(grids)[4]
## get map metadata for bbox/center to use for transforming image raster coords to lat/long;
map <- getBingMap(mapArea = c(ll.y, ll.x, ur.y, ur.x),
maptype = c("Aerial"),
apiKey = "AtZt1KSMMSaoPcbt85RPIer5r9gkJm33cdrrkhYeIBSibj34dqXm9cDa0BgEq3Lu",
verbose = TRUE,
NEWMAP = TRUE,
destfile = 'C:/users/ed008an/desktop/maps/Labuan.png',
RETURNIMAGE = FALSE
)
map.tile <- ReadMapTile('C:/users/ed008an/desktop/maps/Labuan.png') # read the actual map
metaData_json <- fromJSON(file = paste0(map, "&mapMetadata=1"))
map.bbox <- metaData_json$resourceSets[[1]]$resources[[1]]$bbox
map.center <- as.numeric(metaData_json$resourceSets[[1]]$resources[[1]]$mapCenter$coordinates)
names(map.bbox) <- c("ll.y", "ll.x", "ur.y", "ur.x")
names(map.center) <- c("y", "x")
rm(ll.x, ll.y, ur.x, ur.y, metaData_json, map)
## Read the image
img <- png::readPNG("C:/users/ed008an/desktop/maps/Labuan.png", native = FALSE)
dim <- dim(img)
img.df <- data.frame(
x = rep(1:dim[2], each = dim[1]),
y = rep(dim[1]:1, dim[2]),
R = ceiling(as.vector(img[, , 1]) * 255),
G = ceiling(as.vector(img[, , 2]) * 255),
B = ceiling(as.vector(img[, , 3]) * 255)
)
head(img.df)
# translate img.df x/y to lat/long;
latRange <- map.bbox[["ur.y"]] - map.bbox[["ll.y"]]
lonRange <- map.bbox[["ur.x"]] - map.bbox[["ll.x"]]
height <- dim[1]
width <- dim[2]
img.df$lat <- ((latRange / height) * img.df$y) + map.bbox[["ll.y"]]
img.df$lon <- ((lonRange / width) * img.df$x) + map.bbox[["ll.x"]]
head(img.df)
img.sf <- st_as_sf(img.df, coords = c("lon", "lat"), crs = 4326)
st_crs(grids) <- "+proj=longlat +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +no_defs"
st_crs(img.sf)<- "+proj=longlat +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +no_defs"
img.nowater.sf <- img.sf[img.sf$R > img.sf$B & img.sf$G > img.sf$B, ]
img.sf$col <- rgb(img.sf$R,
img.sf$G,
img.sf$B,
maxColorValue = 255)
img.nowater.sf$col <- rgb(img.nowater.sf$R,
img.nowater.sf$G,
img.nowater.sf$B,
maxColorValue = 255)
# ggplot() +
# geom_sf(data = img.nowater.sf,
# col = img.nowater.sf$col, pch = 19, cex = .1) +
# geom_sf(data = grids, fill = "transparent")
gridIntersection <- st_intersects(grids, img.sf, sparse = TRUE)
gridInt <- data.frame(
Grid_i = as.integer(NULL),
Img_j = as.integer(NULL),
R = as.integer(NULL),
G = as.integer(NULL),
B = as.integer(NULL)
)
for(i in 1:length(gridIntersection)) {
for(j in 1:length(gridIntersection[[i]])) {
img_j <- gridIntersection[[i]][[j]]
gridInt <- rbind(
gridInt,
data.frame(
Grid_i = c(i),
Img_j = c(img_j),
R = img.df[img_j, c("R")],
G = img.df[img_j, c("G")],
B = img.df[img_j, c("B")]
)
)
}
}
gridInt.dt <- data.table(gridInt)
gridInt.dt.agg <- gridInt.dt[, .(R = mean(R),
G = mean(G),
B = mean(B),
n = .N),
by = .(Grid_i)]
grids$rowId <- as.integer(rownames(grids))
grids.dt <- data.table(grids)
setkey(gridInt.dt.agg, "Grid_i");setkey(grids.dt, "rowId")
grids.mdl <- gridInt.dt.agg[grids.dt[, .(rowId, Population)]]
grids.mdl[, ZeroPop := ifelse(Population == 0, TRUE, FALSE)]
## predict for 0 pop grids first
zp.mdl <- randomForest(ZeroPop ~ R + G + B, type = 'classification', data = grids.mdl)
grids.mdl$zp_pred <- predict(zp.mdl, grids.mdl)
# summary(linear.model <- lm(Population ~ R + G + B, data = grids.mdl2, weights = n))
rf.model <- randomForest(Population ~ R + G + B, data = grids.mdl)
grids.mdl$pred <- predict(rf.model, grids.mdl)
summary(grids.mdl$zp_pred)
grids.mdl[, final_pred := ifelse(zp_pred > .65, 0, pred)]
grids.mdl[, `:=`(error = (Population - final_pred) / Population,
absError = abs((Population - final_pred) / Population))]
summary(grids.mdl[, absError], na.rm = T)
grids.mdl.sf <- base::merge(grids, grids.mdl[, .(Grid_i, final_pred)], by.x = "rowId", by.y = "Grid_i")
dev.new()
ggplot(data = grids.mdl.sf) +
geom_sf(aes(fill = log(grids.mdl.sf$final_pred))) +
scale_fill_gradient2(low = "blue", high = "red") +
ggtitle("Predicted")
dev.new()
ggplot(data = grids.mdl.sf) +
geom_sf(aes(fill = log(grids.mdl.sf$Population))) +
scale_fill_gradient2(low = "blue", high = "red") +
ggtitle("Actual")
str(grids.mdl.sf)
|
6496f243bbb7277c071c98aaa6c38657e6561e98
|
db9234cd98b6ec28207bf974a3173b43093e8e9a
|
/urlSearch.R
|
52327aa14447eafc83e6ca8335d6bd31931e7ef0
|
[] |
no_license
|
avnerkantor/openpisa
|
ce9c5e6446cc5546113d5b976022076a27bdc58c
|
74558a83583fab179fc841318f98cf3e31c58253
|
refs/heads/master
| 2020-07-30T15:51:24.643312
| 2017-08-14T14:13:25
| 2017-08-14T14:13:25
| 73,626,165
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,340
|
r
|
urlSearch.R
|
#https://gallery.shinyapps.io/032-client-data-and-query-string/?a=xxx&b=yyy#zzz
#https://github.com/rstudio/shiny-examples/tree/master/061-server-to-client-custom-messages
#http://stackoverflow.com/questions/25306519/shiny-saving-url-state-subpages-and-tabs
# output$queryText <- renderText({
# query <- parseQueryString(session$clientData$url_search)
# # Return a string with key-value pairs
# paste(names(query), query, sep = "=", collapse=", ")
# })
#Pull from url
observe({
# Parse the GET query string
querySearch <- parseQueryString(session$clientData$url_search)
updateSelectInput(session, "Subject", selected=querySearch$subject)
#updateCheckboxGroupInput(session, inputId="Gender", selected = querySearch$gender)
#updateCheckboxGroupInput(session, inputId="Escs", selected = querySearch$escs)
updateSelectInput(session, "Country1", selected=querySearch$country1)
updateSelectInput(session, "Country2", selected=querySearch$country2)
updateSelectInput(session, "Country3", selected=querySearch$country3)
updateSelectInput(session, "Country4", selected=querySearch$country4)
#updateNumericInput(session, "LevelNumber", selected=querySearch$level)
#updateSelectInput(session, "SurveyYear", selected=querySearch$surveyYear)
# updateSelectInput(session, "SurveySubject", selected=querySearch$hebSubject)
# updateSelectInput(session, "SurveyCategory", selected=querySearch$hebCategory)
# updateSelectInput(session, "SurveySubCategory", selected=querySearch$hebSubCategory)
#updateSelectInput(session, "ModelId", selected=querySearch$modelId)zz
})
#Push to url
observe({
#query search is case sensitive
queryHash <- parseQueryString(session$clientData$url_hash_initial)
data<-paste0(queryHash, "?subject=", input$Subject,
#"&gender=", input$Gender, "&escs=", input$Escs,
"&country1=", input$Country1, "&country2=", input$Country2, "&country3=", input$Country3,
"&country4=", input$Country4
#"&level=", input$LevelNumber,
# "&surveyYear=", input$SurveyYear, "&hebSubject=", input$SurveySubject,
# "&hebCategory=", input$SurveyCategory, "&hebSubCategory="=input$SurveySubCategory
)
# "$modelId=", ModelId)
session$sendCustomMessage(type='updateSelections', message=data)
})
|
5bb447de06e47636b0ed52a937b364ebdcf475e6
|
8cf0bb6f877c48a6ec866eb3a59ec3cf3ea0de81
|
/cachematrix.R
|
4f64a6e85c1920d3b426d8e654d800b1b5fba63e
|
[] |
no_license
|
janse/ProgrammingAssignment2
|
958d15cf1ec430d9eb7862cb50b6f8a6e5e33f17
|
eb63c22683bf27129425b1c395d40135cb76e6e1
|
refs/heads/master
| 2020-05-29T08:54:26.325632
| 2016-09-24T19:19:03
| 2016-09-24T19:19:03
| 69,101,682
| 0
| 0
| null | 2016-09-24T12:44:27
| 2016-09-24T12:44:27
| null |
UTF-8
|
R
| false
| false
| 941
|
r
|
cachematrix.R
|
## Two functions to calculate the inverse of a matrix using the cache
makeCacheMatrix <- function(x = matrix()) {
## Store the inverse of 'x' in the cache
mat <- NULL
## Set the value of the matrix
set <- function(y) {
x <<- y
mat <<- NULL
}
## Get the value of the matrix
get <- function() x
## Set the value of the inverse
setinverse <- function(solve) mat <<- solve
## Get the value of the inverse
getinverse <- function() mat
list(set = set, get = get,
getinverse = getinverse,
setinverse = setinverse)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mat <- x$getinverse()
## If inverse is already cached, retrieve the inverse from the cache
if(!is.null(mat)) {
message("getting cached data")
return(mat)
}
## Otherwise, calculate the inverse now
data <- x$get()
mat <- solve(data, ...)
x$setinverse(mat)
mat
}
|
82fe1fe7ef69c4acc4a50b782592a3a62b2968dc
|
4dad2d41b33d98c5b783a805990c1c426cecbd05
|
/Comadre_gbif_lpi_counts.R
|
034fc37d68d9f7173294b4cf12f6ae74342ece85
|
[] |
no_license
|
spoonerf/PhD_Method
|
b90ac02d68a0ce7b1e67d38178b8ce733b1b6ec1
|
210aca3e36bfa96ab1b1012ef1cb84b9bbef7e01
|
refs/heads/master
| 2020-04-04T06:19:53.840889
| 2019-06-19T17:51:50
| 2019-06-19T17:51:50
| 48,995,337
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,191
|
r
|
Comadre_gbif_lpi_counts.R
|
install.packages("rgbif")
library(rgbif)
pop<-read.csv("LPI_pops_20160523_edited.csv")
head(pop)
plot(pop$Longitude, pop$Latitude)
pop<-pop[pop$Specific_location ==1,]
pop$Binomial<-as.character(pop$Binomial)
bin_bind<-unique(bin_bind)
library(dismo)
#gbif occurrence poiont counting
gbif_count<-function(binomial){
bin<-strsplit(binomial, "_")
genus<-bin[[1]][1]
species<-bin[[1]][2]
sp<-gbif(genus, species, geo=TRUE, download=FALSE,ntries=10)
records<-cbind(genus, species, sp)
return(records)
}
gbif_records<-lapply(pop$Binomial, gbif_count)
gbif<-data.frame(matrix(unlist(gbif_records), ncol=3, byrow=T))
gbif$binomial<-paste(gbif$X1, gbif$X2, sep="_")
colnames(gbif)<-c("Genus", "species", "GBIF_count", "Binomial")
gbif_count<-gbif[,-c(1,2)]
gbif_count$GBIF_count<-as.numeric(as.character(gbif_count$GBIF_count))
filter(gbif_count, GBIF_count > 1000000) %>%
arrange(desc(GBIF_count))
gbif_count<-unique(gbif_count)
#comadre matrix counting
load(paste(wd, "COMADRE_v.2.0.1.RData", sep="/"))
matrix_count<-function(binomial){
species<-binomial
tempMetadata<-subset(comadre$metadata, SpeciesAccepted==species)
mat_count<-nrow(tempMetadata)
mat_sp<-cbind(species, mat_count)
return(mat_sp)
}
com_count<-lapply(lpi$binomial, matrix_count)
com_count<-data.frame(matrix(unlist(com_count), ncol= 2, byrow=T))
com_count$X2<-as.numeric(as.character(com_count$X2))
colnames(com_count)<-c("Binomial", "Matrix_count")
#lpi pop counting
pop2<-pop %>% group_by(Binomial) %>% mutate(count = n())
pop2$count
lpi_pop<-data.frame(pop2$Binomial, pop2$count, pop2$Red_list_category, pop2$Common_name, pop2$Class, pop2$System)
colnames(lpi_pop)<-c("Binomial", "pop_count", "RedList", "Common_Name", "Class", "System")
gbif_count
com_count
lpi_gbif<-merge(lpi_pop, gbif_count, by="Binomial")
lgc<-merge(lpi_gbif, com_count, by="Binomial")
lgc<-unique(lgc)
head(lgc)
lgc$Matrix_count<-as.numeric(as.character(lgc$Matrix_count))
lgc_ord<-lgc %>%
filter(Matrix_count >1 & pop_count > 5 & GBIF_count>1000 & System == "Terrestrial") %>%
arrange(desc(Matrix_count), desc(pop_count))
lgc_ord
write.csv(lgc_ord, "Comadre_gbif_lpi_counts.csv")
|
475bc86dc76ab52e340a180b6d897bb10aa8236f
|
31a03dd17df86005ac26c9da65511181a3480a1a
|
/man/varImpact.Rd
|
a0aaa77d60eeadae38cc7bf1d6b1761c786f5f18
|
[] |
no_license
|
jonrobinson2/varImpact
|
b1e7bd434e46687d28dd80b2757a60f3cbf5e883
|
23ca4c2aa628e43549918ca79a7f0589985f39aa
|
refs/heads/master
| 2021-01-18T12:09:42.097922
| 2016-06-23T20:10:00
| 2016-06-23T20:10:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,452
|
rd
|
varImpact.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/varImpact.R
\name{varImpact}
\alias{varImpact}
\title{Variable Impact Estimation}
\usage{
varImpact(Y, data, V = 2, Q.library = c("SL.gam", "SL.glmnet", "SL.mean"),
g.library = c("SL.stepAIC"), family = "binomial", minYs = 15,
minCell = 0, ncov = 10, corthres = 0.8, impute = "knn",
miss.cut = 0.5, verbose = F, parallel = T)
}
\arguments{
\item{Y}{outcome of interest (numeric vector)}
\item{data}{data frame of predictor variables of interest for
which function returns VIM's. (possibly a matrix?)}
\item{V}{Number of cross-validation folds.}
\item{Q.library}{library used by SuperLearner for model of outcome
versus predictors}
\item{g.library}{library used by SuperLearner for model of
predictor variable of interest versus other predictors}
\item{family}{family ('binomial' or 'gaussian')}
\item{minYs}{mininum # of obs with event - if it is < minYs, skip VIM}
\item{minCell}{is the cut-off for including a category of
A in analysis, and presents the minumum of cells in a 2x2 table of the indicator of
that level versus outcome, separately by training and validation
sample}
\item{ncov}{minimum number of covariates to include as adjustment variables (must
be less than # of basis functions of adjustment matrix)}
\item{corthres}{cut-off correlation with explanatory
variable for inclusion of an adjustment variables}
\item{impute}{Type of missing value imputation to conduct. One of: "zero", "median", "knn" (default).}
\item{miss.cut}{eliminates explanatory (X) variables with proportion
of missing obs > cut.off}
\item{verbose}{Boolean - if TRUE the method will display more detailed output.}
\item{parallel}{Use parallel processing if a backend is registered; enabled by default.}
}
\value{
Results object.
}
\description{
\code{varImpact} returns variable importance statistics ordered
by statistical significance using a combination of data-adaptive target parameter
}
\details{
The function performs the following functions.
\enumerate{
\item Drops variables missing > miss.cut of time (tuneable).
\item Separate out covariates into factors and continuous (ordered).
\item Drops variables for which their distribution is uneven - e.g., all 1 value (tuneable)
separately for factors and numeric variables (ADD MORE DETAIL HERE)
\item Changes all factors to remove spaces (used for naming dummies later)
\item Changes variable names to remove spaces
\item Makes dummy variable basis for factors, including naming dummies
to be traceable to original factor variable laters
\item Makes new ordered variable of integers mapped to intervals defined by deciles for the ordered numeric variables (automatically makes)
fewer categories if original variable has < 10 values.
\item Creates associated list of number of unique values and the list of them
for each variable for use in variable importance part.
\item Makes missing covariate basis for both factors and ordered variables
\item For each variable, after assigning it as A, uses
optimal histogram function to combine values using the
distribution of A | Y=1 to avoid very small cell sizes in
distribution of Y vs. A (tuneable) (ADD DETAIL)
\item Uses HOPACH to cluster variables associated confounder/missingness basis for W,
that uses specified minimum number of adjustment variables.
\item Finds min and max estimate of E(Ya) w.r.t. a. after looping through
all values of A* (after processed by histogram)
\item Returns estimate of E(Ya(max)-Ya(min)) with SE
\item Things to do include implementing CV-TMLE and allow reporting of results
that randomly do not have estimates for some of validation samples.
}
}
\section{Authors}{
Alan E. Hubbard and Chris J. Kennedy, University of California, Berkeley
}
\section{References}{
Benjamini, Y., & Hochberg, Y. (1995). \emph{Controlling the false discovery rate: a practical and powerful approach to multiple testing}. Journal of the royal statistical society. Series B (Methodological), 289-300.
Gruber, S., & van der Laan, M. J. (2012). \emph{tmle: An R Package for Targeted Maximum Likelihood Estimation}. Journal of Statistical Software, 51(i13).
Hubbard, A. E., & van der Laan, M. J. (2016). \emph{Mining with inference: data-adaptive target parameter (pp. 439-452)}. In P. Bühlmann et al. (Ed.), \emph{Handbook of Big Data}. CRC Press, Taylor & Francis Group, LLC: Boca Raton, FL.
van der Laan, M. J., & Pollard, K. S. (2003). \emph{A new algorithm for hybrid hierarchical clustering with visualization and the bootstrap}. Journal of Statistical Planning and Inference, 117(2), 275-303.
van der Laan, M. J., Polley, E. C., & Hubbard, A. E. (2007). \emph{Super learner}. Statistical applications in genetics and molecular biology, 6(1).
van der Laan, M. J., & Rose, S. (2011). \emph{Targeted learning: causal inference for observational and experimental data}. Springer Science & Business Media.
}
\examples{
####################################
# Create test dataset.
set.seed(1)
N <- 200
num_normal <- 7
X <- as.data.frame(matrix(rnorm(N * num_normal), N, num_normal))
Y <- rbinom(N, 1, plogis(.2*X[, 1] + .1*X[, 2] - .2*X[, 3] + .1*X[, 3]*X[, 4] - .2*abs(X[, 4])))
# Add some missing data to X so we can test imputation.
for (i in 1:10) X[sample(nrow(X), 1), sample(ncol(X), 1)] <- NA
####################################
# Basic example
vim <- varImpact(Y = Y, data = X)
vim
vim$results_all
exportLatex(vim)
# Impute by median rather than knn.
vim <- varImpact(Y = Y, data = X, impute = "median")
####################################
# doMC parallel (multicore) example.
library(doMC)
registerDoMC()
vim <- varImpact(Y = Y, data = X)
####################################
# doSNOW parallel example.
library(doSNOW)
library(RhpcBLASctl)
# Detect the number of physical cores on this computer using RhpcBLASctl.
cluster <- makeCluster(get_num_cores())
registerDoSNOW(cluster)
vim <- varImpact(Y = Y, data = X)
stopCluster(cluster)
####################################
# mlbench BreastCancer example.
data(BreastCancer, package="mlbench")
data <- BreastCancer
# Create a numeric outcome variable.
data$Y <- as.numeric(data$Class == "malignant")
# Use multicore parallelization to speed up processing.
doMC::registerDoMC()
vim <- varImpact(Y = data$Y, data = subset(data, select=-c(Y, Class, Id)))
}
\seealso{
\code{\link[varImpact]{exportLatex}}, \code{\link[varImpact]{print.varImpact}} method
}
|
b3629564d940701d740da258f224ce6e403c8dc2
|
938a210f435725e4ab3608306dcac9810d52ccde
|
/Birth-death-analysis(2011-2016)-lab1/lab1_plot.R
|
ae2b9904aa2f49c5c99171ad26fb3691154a6d13
|
[] |
no_license
|
KAPILJHADE/Data-Analysis
|
b562e589248162e1490f7b7a9365febe6a0e5e6a
|
b288ab2f794fc165f2a5f4d589a147ca5b136034
|
refs/heads/master
| 2021-01-02T01:59:48.280054
| 2020-04-14T07:38:40
| 2020-04-14T07:38:40
| 239,444,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,545
|
r
|
lab1_plot.R
|
data <- read.table("/home/kapil/desktop/study material notes/6TH SEM/Data Analysis/DA LAB/data.csv", sep = ",", header = TRUE)
data
# Find Min
min(data$nve_l)
min(data$nve_s)
min(data$nve_d)
min(data$c_b)
min(data$c_d)
min(data$per_b)
min(data$per_d)
# Find Max
max(data$nve_l)
max(data$nve_s)
max(data$nve_d)
max(data$c_b)
max(data$c_d)
max(data$per_b)
max(data$per_d)
# Find Mean.
mean(data$nve_l)
mean(data$nve_s)
mean(data$nve_d)
mean(data$c_b)
mean(data$c_d)
mean(data$per_b)
mean(data$per_d)
#Find Median
median(data$nve_l)
median(data$nve_s)
median(data$nve_d)
median(data$c_b)
median(data$c_d)
median(data$per_b)
median(data$per_d)
#Find Mode
mode(data$nve_l)
mode(data$nve_s)
mode(data$nve_d)
mode(data$c_b)
mode(data$c_d)
mode(data$per_b)
mode(data$per_d)
#Find Variance
var(data$nve_l)
var(data$nve_s)
var(data$nve_d)
var(data$c_b)
var(data$c_d)
var(data$per_b)
var(data$per_d)
#Find Standard Deviation
sd(data$nve_l)
sd(data$nve_s)
sd(data$nve_d)
sd(data$c_b)
sd(data$c_d)
sd(data$per_b)
sd(data$per_d)
#Find IQR
IQR(data$nve_l)
IQR(data$nve_s)
IQR(data$nve_d)
IQR(data$c_b)
IQR(data$c_d)
IQR(data$per_b)
IQR(data$per_d)
#Detecting outliers in data
#data <- read.table("data.csv", sep = ",", header = TRUE)
outlierKD <- function(dt, var) {
var_name <- eval(substitute(var),eval(dt))
tot <- sum(!is.na(var_name))
na1 <- sum(is.na(var_name))
m1 <- mean(var_name, na.rm = T)
par(mfrow=c(2, 2), oma=c(0,0,3,0))
dev.new(width=5, height=4, unit="in")
boxplot(var_name, main="With outliers")
hist(var_name, main="With outliers", xlab=NA, ylab=NA)
outlier <- boxplot.stats(var_name)$out
mo <- mean(outlier)
var_name <- ifelse(var_name %in% outlier, NA, var_name)
boxplot(var_name, main="Without outliers")
hist(var_name, main="Without outliers", xlab=NA, ylab=NA)
title("Outlier Check", outer=TRUE)
na2 <- sum(is.na(var_name))
message("Outliers identified: ", na2 - na1, " from ", tot, " observations")
message("Proportion (%) of outliers: ", (na2 - na1) / tot*100)
message("Mean of the outliers: ", mo)
m2 <- mean(var_name, na.rm = T)
message("Mean without removing outliers: ", m1)
message("Mean if we remove outliers: ", m2)
response <- readline(prompt="Do you want to remove outliers and to replace with NA? [yes/no]: ")
if(response == "y" | response == "yes"){
dt[as.character(substitute(var))] <- invisible(var_name)
assign(as.character(as.list(match.call())$dt), dt, envir = .GlobalEnv)
message("Outliers successfully removed", "\n")
return(invisible(dt))
} else{
message("Nothing changed", "\n")
return(invisible(var_name))
}
}
outlierKD(data,data$nve_l)
outlierKD(data,data$nve_s)
outlierKD(data,data$nve_d)
outlierKD(data,data$c_b)
outlierKD(data,data$c_d)
outlierKD(data,data$per_b)
outlierKD(data,data$per_d)
#Plot 1
data <- read.table("/home/kapil/desktop/study material notes/6TH SEM/Data Analysis/DA LAB/data.csv", sep = ",", header = TRUE)
dev.new(width=5, height=4, unit="in")
plot(data$year,data$nve_l, type="l", col="green", lwd=5, xlab="years", ylab="No. of Births")
#lines(data$year,data$nve_d, col="red", lwd=5)
title("No. of Live Births from 2011 to 2016")
#legend("topright",c("Births","nve_ds"), lwd=c(5,2), col=c("green","red"), y.intersp=1.5)
#Plot 2
data <- read.table("/home/kapil/desktop/study material notes/6TH SEM/Data Analysis/DA LAB/data.csv", sep = ",", header = TRUE)
dev.new(width=5, height=4, unit="in")
plot(data$year,data$c_b, type="b", col="green", lwd=5, xlab="years", ylab="birth rate/nve_d rate",ylim=range(data$c_d,data$c_b))
lines(data$year,data$c_d,type="b",col="red", lwd=5)
title("Birth and nve_d rates from 2011 to 2016")
legend("topright",c("Birth rate","nve_d rate"), lwd=c(5,2), col=c("green","red"), y.intersp=1.5)
#Plot 3
data <- read.table("/home/kapil/desktop/study material notes/6TH SEM/Data Analysis/DA LAB/data.csv", sep = ",", header = TRUE)
dev.new(width=5, height=4, unit="in")
plot(data$year,data$per_b, type="b", col="green", lwd=5, xlab="years", ylab="birth %/nve_d % ",ylim=range(data$per_d,data$per_b))
lines(data$year,data$per_d,type="b",col="red", lwd=5)
title("Birth and nve_d percentage from 2011 to 2016")
legend("topright",c("Birth %","nve_d %"), lwd=c(5,2), col=c("green","red"), y.intersp=1.5)
#Plot 4
data <- read.table("/home/kapil/desktop/study material notes/6TH SEM/Data Analysis/DA LAB/data.csv", sep = ",", header = TRUE)
dev.new(width=5, height=4, unit="in")
boxplot(data$nve_s ~ data$year, xlab = "years",ylab = "No.of still Births", main = "still birth data")
|
976fa36978b8840dfcca39040111e6f54875ceb9
|
833bdb27d5bc39d7dae655e9dfde0b3208db956d
|
/man/dum9.Rd
|
91705110314693854b40e0f28f10186a5c5ac4c3
|
[
"MIT"
] |
permissive
|
dennist2/QuantResearch
|
98492cc8767aea7ab5f639a86d67ab37145d60d2
|
b4dae474b2faa6217b0b3d8ac5400650f305ed0e
|
refs/heads/master
| 2022-09-08T19:30:06.833929
| 2020-06-02T17:02:49
| 2020-06-02T17:02:49
| 268,732,257
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 386
|
rd
|
dum9.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dum9.R
\name{dum9}
\alias{dum9}
\title{dum9}
\usage{
dum9(x)
}
\arguments{
\item{x}{a list or data.frame}
}
\value{
returns the list/df but with return
}
\description{
Takes output from histXCII and tells you the return
}
\details{
DETAILS
}
\examples{
\dontrun{
if(interactive()){
lapply(dum9,LGNN)
}
}
}
|
a1db61e4c03a6db46abe1502b3cdb398f4b2fa0e
|
155ab54887496697271f8cf473bc978c41fb6406
|
/Tidy Data and Summarize/ClimateData/gd_get_biomes_spdf.R
|
59b5d4f0d3143a8cc2c259bf16d651d3ecddd73e
|
[] |
no_license
|
robbinscalebj/DetritalNutrientsSynthesis
|
fe20246369e60376e0301f42edf713eef3e40036
|
8cd7822b725545eb6d570797e5d45946c438bb6b
|
refs/heads/master
| 2023-05-11T18:44:15.347983
| 2023-05-10T05:30:07
| 2023-05-10T05:30:07
| 270,461,610
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,599
|
r
|
gd_get_biomes_spdf.R
|
# pulled this biome code from now defunct guillembagaria/ggbiome
gd_get_biomes_spdf <- function(merge_deserts = FALSE) {
# STEP 0
# Argument checks
# Is merge_deserts logical?
if (!(is.logical(merge_deserts))) {
stop('merge_deserts must be logical.')
}
# Is merge_deserts NA?
if (is.na(merge_deserts)) {
stop('merge_deserts must be either TRUE or FALSE.')
}
# STEP 1
# Create the data frame
biomes_df <- data.frame(
mat = c(
29.339, 13.971, 15.371, 17.510, 24.131, 27.074, 28.915, 29.201, 29.339,
13.971, -9.706, -7.572, 4.491, 17.510, 15.371, 13.971, 17.510, 4.491,
-7.572, -9.706, -6.687, -0.949, 3.098, 7.147, 10.165, 13.918, 18.626,
18.176, 17.510, 18.626, 13.918, 10.165, 7.147, 3.098, -0.949, 1.039,
1.998, 2.444, 3.118, 4.446, 7.758, 12.614, 18.720, 18.637, 18.626, -0.949,
-6.687, -4.395, -4.098, -1.592, 0.914, 4.155, 3.118, 2.444, 1.998, 1.039,
-0.949, 18.720, 12.614, 7.758, 4.446, 3.118, 4.155, 15.716, 20.136,
19.392, 18.720, 18.720, 19.392, 20.136, 22.278, 23.756, 24.199, 24.714,
25.667, 26.105, 27.414, 27.772, 25.709, 21.736, 18.720, 17.510, 18.176,
18.626, 18.637, 18.720, 21.736, 25.709, 27.772, 28.418, 28.915, 27.074,
24.131, 17.510, -6.687, -8.896, -9.706, -13.382, -15.366, -15.217, -8.373,
-4.098, -1.592, -4.098, -4.395, -6.687
),
map = c(
21.3, 23.0, 174.6, 535.1, 702.9, 847.9, 992.4, 532.1, 21.3, 23.0, 7.3,
87.2, 314.6, 535.1, 174.6, 23.0, 535.1, 314.6, 87.2, 7.3, 202.6, 391.7,
529.9, 783.1, 956.9, 1116.5, 1269.3, 794.3, 535.1, 1269.3, 1116.5, 956.9,
783.1, 529.9, 391.7, 514.8, 673.4, 968.5, 1630.6, 1839.7, 2028.0, 2224.0,
2355.7, 1837.6, 1269.3, 391.7, 202.6, 922.9, 1074.1, 1405.9, 1744.9,
2012.3, 1630.6, 968.5, 673.4, 514.8, 391.7, 2355.7, 2224.0, 2028.0,
1839.7, 1630.6, 2012.3, 2930.1, 3377.7, 2917.0, 2355.7, 2355.7, 2917.0,
3377.7, 3896.5, 4343.1, 4415.2, 4429.8, 4279.0, 4113.7, 3344.4, 2790.6,
2574.0, 2414.3, 2355.7, 535.1, 794.3, 1269.3, 1837.6, 2355.7, 2414.3,
2574.0, 2790.6, 1920.3, 992.4, 847.9, 702.9, 535.1, 202.6, 50.8, 7.3,
34.8, 98.8, 170.8, 533.0, 1074.1, 1405.9, 1074.1, 922.9, 202.6
),
biome = c(
rep('Subtropical desert', 9), rep('Temperate grassland/desert', 7),
rep('Woodland/shrubland', 13), rep('Temperate forest', 16),
rep('Boreal forest', 12), rep('Temperate rain forest', 10),
rep('Tropical rain forest', 14), rep('Tropical seasonal forest/savanna', 13),
rep('Tundra', 12)
)
)
# STEP 2
# Merge deserts if specified
if (merge_deserts){
biome <- as.character(biomes_df$biome)
biome[grepl('desert', biome, fixed = TRUE)] <- 'Desert'
biomes_df$biome <- as.factor(biome)
}
# STEP 3
# Create SpatialPolygonsDataFrame object
list_pol <- sapply(as.character(unique(biomes_df$biome)),
function(id_biome,df)
sp::Polygon(cbind(df$map[df$biome == id_biome],
df$mat[df$biome == id_biome])),
df=biomes_df, USE.NAMES = TRUE)
sp_biomes <- sp::SpatialPolygons(
lapply(1:length(list_pol),
function(i, x) {sp::Polygons(list(x[[i]]),
names(x)[i])},
x = list_pol)
)
spdf_biomes <- sp::SpatialPolygonsDataFrame(
sp_biomes, data.frame(biome = names(list_pol)), match.ID = 'biome'
)
# STEP 4
# Return SpatialPolygonsDataFrame object
return(spdf_biomes)
# END FUNCTION
}
|
9496b4d7dc2cf2e16093083acc174b3bc3b83d11
|
8a45b18d417bca59ac4320236c365d6a21c4672b
|
/cachematrix.R
|
31fdf677c766d1a6ffde9327c92e07dd07890fdb
|
[] |
no_license
|
mpolugodina/Maria-Polugodina-Repository-for-Programming-Assignment-2
|
8565f5c821f6b3bfe844577e40fe9778a7013eeb
|
b6ba4b74fa4417dec1410ca3bd1ffd09c5a3d0d2
|
refs/heads/master
| 2022-11-14T10:57:42.982618
| 2020-07-08T15:28:00
| 2020-07-08T15:28:00
| 278,095,614
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,515
|
r
|
cachematrix.R
|
## 'makeCacheMatrix' creates a special "matrix", with a list of functions to
# 1. Set the value of a matrix x (set)
# 2. Retrieve the value of the matrix (get)
# 3. Set the value of the matrix' inverse (setinv)
# 4. Retrieve the value of the matrix' inverse (getinv)
#The default argument value is an empty matrix, otherwise x will not be
#initialized if no argument is passed => 'get' will not work until x is
#explicitly initialized with 'set'
makeCacheMatrix <- function(x = matrix()) {
#initialize the inverse matrix as NULL
inv<- NULL
#'set' function allows to "re-initialize" the matrix
# it assumes, a matrix is passed to it
set <- function(y) {
#Store the new matrix value (y) in the environment of 'makeCacheMatrix'
x <<- y
#reset the inverse to NULL whenever the matrix is reset
inv <<- NULL
}
get <- function() x #retrieves x
#Function to store a new inverse value ('inverse') in the
#environment of 'makeCacheMatrix'
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv #retrieves inv
#assign functions to a list of function names, so that they can be
#called using x$<function_name>
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
# 'cacheSolve' checks if the inverse of matrix x is stored, provided x was
# generated by makeCacheMatrix. If an inverse is stored, the function returns it.
# If not, it calculates the inverse and stores it.
#As per assignment, the function assumes, the matrix is always invertible,
#so no special checks are added for that
cacheSolve <- function(x, ...) {
#get the inverse value stored in x
inv <- x$getinv()
#Check if a non-NULL value is already stored. If yes, return cached data
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
#If inv=NULL, get the matrix & calculate its inverse
#1. Retrieve the matrix
data <- x$get()
#2. Calculate the inverse using the standard R function.
#It is the solution to data*inv=I, where I is identity matrix of same size
#Since only square matrices can be inverted, we can use number of columns
#or number of rows in data to determine the size for I
inv <- solve(data,diag(ncol(data)), ...)
#3. Store the new value of the inverse & print it in the console
x$setinv(inv)
inv
}
|
7343c57cd3b2e2769160ecaa0532939b7cbd6b1e
|
8c70bae5ec757d3a4d9404d34196ca165afcb4c5
|
/R/analyses/women.R
|
014c1f7514ab2d8390ebb3533122f3a8b321bb4a
|
[] |
no_license
|
JeredLinares/Olympic_history
|
8c091ecffa8d3d069ffae6940de1304d04a79eb8
|
b14dd25bf2b63c562fc7771c70439d005696ea4c
|
refs/heads/master
| 2022-04-08T14:27:35.088304
| 2020-02-16T19:13:35
| 2020-02-16T19:13:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,047
|
r
|
women.R
|
################
# PREPARATIONS #
################
# load packages
library("tidyverse")
# load data
data <- read_csv("~/Documents/GitHub/Olympic_history/data/athlete_events.csv",
col_types = cols(
ID = col_character(),
Name = col_character(),
Sex = col_factor(levels = c("M","F")),
Age = col_integer(),
Height = col_double(),
Weight = col_double(),
Team = col_character(),
NOC = col_character(),
Games = col_character(),
Year = col_integer(),
Season = col_factor(levels = c("Summer","Winter")),
City = col_character(),
Sport = col_character(),
Event = col_character(),
Medal = col_factor(levels = c("Gold","Silver","Bronze"))
)
)
# Exclude art competitions
data <- data %>% filter(Sport != "Art Competitions")
# Recode year of Winter Games after 1992 to match the next Summer Games
original <- c(1994,1998,2002,2006,2010,2014)
new <- c(1996,2000,2004,2008,2012,2016)
for (i in 1:length(original)) {
data$Year <- gsub(original[i], new[i], data$Year)
}
####################################
# MALE VS FEMALE ATHLTES OVER TIME #
####################################
# Table counting number of athletes by Year and Sex
counts <- data %>% group_by(Year, Sex) %>%
summarize(Athletes = length(unique(ID)))
counts$Year <- as.integer(counts$Year)
# Plot number of male/female athletes vs time
ggplot(counts, aes(x=Year, y=Athletes, color=Sex)) +
geom_point(size=2) +
geom_line() +
scale_color_manual(values=c("darkblue","red")) +
labs(title = "Number of male and female Olympians over time") +
theme(plot.title = element_text(hjust = 0.5))
####################################################
# FEMALE ~ MALE ATHLETES PER NOC FROM SELECT GAMES #
####################################################
# Count M/F/Total per country per Olympics
# Keep only country-years with at least 30 athletes
counts_NOC <- data %>% filter(Year %in% c(1936,1956,1976,1996,2016)) %>%
group_by(Year, NOC, Sex) %>%
summarize(Count = length(unique(ID))) %>%
spread(Sex, Count) %>%
mutate(Total = sum(M,F,na.rm=T)) %>%
filter(Total > 49)
names(counts_NOC)[3:4] <- c("Male","Female")
counts_NOC$Male[is.na(counts_NOC$Male)] <- 0
counts_NOC$Female[is.na(counts_NOC$Female)] <- 0
# Plot female vs. male athletes by NOC / Year
ggplot(counts_NOC, aes(x=Male, y=Female, color=Year)) +
geom_point(alpha=0.6) +
geom_abline(intercept=0, slope=1, linetype="dashed") +
geom_smooth(method="lm", se=FALSE) +
labs(title = "Female vs. Male Olympians from participating NOCs") +
theme(plot.title = element_text(hjust = 0.5)) +
guides(color=guide_legend(reverse=TRUE))
###################################################
# PROPORTIONS OF ATHLETES/MEDALISTS WHO ARE WOMEN #
###################################################
# Proportions of athletes/medals won by women from select NOCs/Years
props <- data %>% filter(Year %in% c(1936,1976,2016)) %>%
group_by(Year, NOC, Sex) %>%
summarize(Athletes = length(unique(ID)),
Medals = sum(!is.na(Medal)))
props <- dcast(setDT(props),
Year + NOC ~ Sex,
fun.aggregate = sum,
value.var = c("Athletes","Medals"))
props <- props %>%
mutate(Prop_F_athletes = Athletes_F/(Athletes_F + Athletes_M),
Prop_F_medals = Medals_F/(Medals_F + Medals_M)) %>%
filter(Athletes_F + Athletes_M > 49)
props$Prop_F_medals[props$Medals_M + props$Medals_F == 0] <- NA
plot(Prop_F_medals~Prop_F_athletes, data=props[which(props$Year==2016),], xlim=c(0,1),ylim=c(0,1))
abline(a=0,b=1)
# Data for 1936 only
props_1936 <- props %>%
filter(Year == 1936) %>%
gather(Prop_F_athletes, Prop_F_medals, key="type", value="value")
levs <- props_1936 %>%
filter(type == "Prop_F_athletes") %>%
arrange(value) %>% select(NOC)
props_1936$NOC <- factor(props_1936$NOC, levels=c(levs$NOC))
# Plot 1936
ggplot(props_1936, aes(x=value, y=NOC, color=type)) +
geom_point(na.rm=FALSE, alpha=0.8) +
scale_color_manual(name="",
values=c("black","goldenrod"),
labels=c("Athletes","Medals")) +
labs(title="1936 Olympics (Garmisch-Partenkirchen and Berlin)", x="Proportion female") +
theme(plot.title = element_text(hjust = 0.5)) +
xlim(0,1)
# Data for 1976 only
props_1976 <- props %>%
filter(Year == 1976) %>%
gather(Prop_F_athletes, Prop_F_medals, key="type", value="value")
levs <- props_1976 %>%
filter(type == "Prop_F_athletes") %>%
arrange(value) %>% select(NOC)
props_1976$NOC <- factor(props_1976$NOC, levels=c(levs$NOC))
# Plot 1976
ggplot(props_1976, aes(x=value, y=NOC, color=type)) +
geom_point(na.rm=FALSE, alpha=0.8) +
scale_color_manual(name="",
values=c("black","goldenrod"),
labels=c("Athletes","Medals")) +
labs(title="1976 Olympics (Innsbruck and Montreal)", x="Proportion female") +
theme(plot.title = element_text(hjust = 0.5)) +
xlim(0,1)
# Data for 2014/2016 only
props_2016 <- props %>%
filter(Year == 2016) %>%
gather(Prop_F_athletes, Prop_F_medals, key="type", value="value")
levs <- props_2016 %>%
filter(type == "Prop_F_athletes") %>%
arrange(value) %>% select(NOC)
props_2016$NOC <- factor(props_2016$NOC, levels=c(levs$NOC))
# Plot 2014/2016
ggplot(props_2016, aes(x=value, y=NOC, color=type)) +
geom_point(na.rm=FALSE, alpha=0.8) +
scale_color_manual(name="",
values=c("black","goldenrod"),
labels=c("Athletes","Medals")) +
labs(title="2014/2016 Olympics (Sochi and Rio)",
x="Proportion female") +
theme(plot.title = element_text(hjust = 0.5),
axis.text.y = element_text(size=6)) +
xlim(0,1)
###########################################
# MEDAL COUNTS FOR WOMEN BY COUNTRY/GAMES #
###########################################
# Count number of medals awarded to each NOC at 1936 Olympics
counts_1936 <- data %>% filter(Year==1936, !is.na(Medal), Sex=="F") %>%
group_by(NOC, Medal) %>%
summarize(Count=length(Medal))
levs <- counts_1936 %>%
group_by(NOC) %>%
summarize(Total=sum(Count)) %>%
arrange(Total) %>%
select(NOC)
counts_1936$NOC <- factor(counts_1936$NOC, levels=levs$NOC)
# Plot 1936
ggplot(counts_1936, aes(x=NOC, y=Count, fill=Medal)) +
geom_col() +
coord_flip() +
scale_fill_manual(values=c("gold1","gray70","gold4")) +
ggtitle("Medal counts for women at the 1936 Olympics") +
theme(plot.title = element_text(hjust = 0.5))
# Count number of medals awarded to each NOC at 1976 Olympics
counts_1976 <- data %>% filter(Year==1976, !is.na(Medal), Sex=="F") %>%
group_by(NOC, Medal) %>%
summarize(Count=length(Medal))
levs <- counts_1976 %>%
group_by(NOC) %>%
summarize(Total=sum(Count)) %>%
arrange(Total) %>%
select(NOC)
counts_1976$NOC <- factor(counts_1976$NOC, levels=levs$NOC)
# Plot 1976
ggplot(counts_1976, aes(x=NOC, y=Count, fill=Medal)) +
geom_col() +
coord_flip() +
scale_fill_manual(values=c("gold1","gray70","gold4")) +
ggtitle("Medal counts for women at the 1976 Olympics") +
theme(plot.title = element_text(hjust = 0.5))
# Count number of medals awarded to each NOC at 2014/2016 Olympics
counts_2016 <- data %>% filter(Year==2016, !is.na(Medal), Sex=="F") %>%
group_by(NOC, Medal) %>%
summarize(Count=length(Medal))
levs <- counts_2016 %>%
group_by(NOC) %>%
summarize(Total=sum(Count)) %>%
arrange(Total) %>%
select(NOC)
counts_2016$NOC <- factor(counts_2016$NOC, levels=levs$NOC)
# Plot 2014/2016
ggplot(counts_2016, aes(x=NOC, y=Count, fill=Medal)) +
geom_col() +
coord_flip() +
scale_fill_manual(values=c("gold1","gray70","gold4")) +
ggtitle("Medal counts for women at the 2014/2016 Olympics") +
theme(plot.title = element_text(hjust = 0.5),
axis.text.y = element_text(size=6))
#######
# END #
#######
|
061dfb6cf66b0d299a4b6a7dd706be84d6bcd540
|
71f31688fc7f4e6eed62492edf4d0c1a369e03b7
|
/Scripts/EU_US_map_univariate.R
|
fae32541dec2568fb25f2509fc566a9b39dd3264
|
[
"BSD-2-Clause"
] |
permissive
|
elslabbert/ARAGOG
|
55bcf631e4f45fdd32f6384a2df7307b5446c496
|
9e4bb2ab6bd55fbc705a9a4a5da4be31239c4a2a
|
refs/heads/master
| 2021-01-07T05:21:09.866549
| 2019-11-18T14:24:48
| 2019-11-18T14:24:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,679
|
r
|
EU_US_map_univariate.R
|
###########
# Create EU and US maps for one variable
# of choice from a table in the Ready_tables folder
###########
source("Scripts/library.R")
#plots functions
make_EU_plot <- function (df, variable_to_plot) {
ggplot(df, aes(fill = as.numeric(variable_to_plot)))+
geom_sf(color = "grey", size = 0.001)+ #lwd = 0
coord_sf(xlim = c(-20, 30), ylim = c(30,70))+
scale_fill_gradient(low = "#ef8a62", high = "#67a9cf")+
theme_bw()
}
make_US_plot <- function (df, variable_to_plot) {
ggplot(df, aes(fill = as.numeric(variable_to_plot)))+
geom_sf(color = "grey", size = 0.001)+
coord_sf(xlim = c(-125, -60), ylim = c(20,50))+
scale_fill_gradient(low = "#ef8a62", high = "#67a9cf")+
theme_bw()
}
#load shapefiles of NUTS2 and Counties
ShapeEU <- readShapeSpatial("NUTS2_shapes/NUTS_RG_01M_2013_4326_LEVL_2.shp")
ShapeUS <- readShapeSpatial("County_shapes/us_county_updated.shp")
#Load tables of the variable to plot. Set variable name and path
NUTS2_N_balance <- read_xlsx("Ready_datasets/NUTS2_Nitrogen_balance_ha_140_crops.xlsx")
CTFIPS_N_balance <- read_xlsx("Ready_datasets/CTFIPS_NitrogenBalance_140crops.xlsx")
#merge shapefile with variable to plot, by region ID, then turn back into a spatial file (sf)
EU_N <- merge(ShapeEU, NUTS2_N_balance, by = "NUTS_ID", sort = FALSE)
EU_N <- st_as_sf(EU_N)
US_N <- merge(ShapeUS, CTFIPS_N_balance, by = "CTFIPS", sort = FALSE)
US_N <- st_as_sf(US_N)
#plot EU and US separately
EUplot <- make_EU_plot(df = EU_N, variable_to_plot = EU_N$N_b_140)
USplot <- make_US_plot(df = US_N, variable_to_plot = US_N$N_balance_140crops)
#arrange plots in one
US_EU_N <- grid.arrange(EUplot, USplot, nrow = 2)
|
b7baca11fdc4085427cd017dc26c277d2c02251f
|
979846e786f49fa6f69b0cba4eb1b36efcf2809c
|
/1-Bildspektrenextraktion.r
|
28850bdd21cb9320ed13a663d6662974ac3b6b87
|
[] |
no_license
|
fabianfassnacht/AVI_carotenoids
|
69de5d9cca8685bb3b1f03ec40c84d0ac64e8157
|
7408d891971edf487bef41ed90f416cd4182cc30
|
refs/heads/master
| 2020-03-20T00:45:28.822514
| 2018-06-12T10:28:35
| 2018-06-12T10:28:35
| 137,054,940
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 773
|
r
|
1-Bildspektrenextraktion.r
|
library(rgdal)
library(raster)
library(maptools)
###Einlesen###
hyp<-stack("U:/EnMap/AA_BaumartenErkennung/Hyperspektralszenen/Karlsruhe/100820_Karlsruhe_02_rad_atm_geo_masked.bsq")
shp<-readOGR("U:/EnMap/AA_BaumartenErkennung/ReferenceDatasets/Karlsruhe/Samples_Baumarten/Final_Samples_Shapes_60",layer="Samples_60.shp")
###Projektion, Koordinatensystem anpassen###
#shp<-spTransform(shp,CRS=CRS("+proj=utm +zone=32 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"))
###Plot###
plot(hyp,10,col=gray.colors(100))
plot(shp,col="yellow",add=T)
###Spektren extrahieren###
ana<-extract(hyp,shp)
write.csv(ana,file="U:/EnMap/AA_BaumartenErkennung/Hyperspektralszenen/Karlsruhe/training_ka.csv")
###Testplot###
plot(c(1:125),ana[1,],type="l", col="red")
|
1054593a414f22074ee26f553d51b1194f325fe3
|
a0fb87ab6ddc7ecae238e768c21918e54d40a5a3
|
/Airlines-Forcasting.R
|
3f0c7b4cabb508d53f61bd1565e7d6e6ce2e6eb0
|
[] |
no_license
|
itsme020/Final
|
bc471c59072584b67b3296f2751e5af86f2d99d3
|
7e70fe8d015d6407c81e4bc55cb879a8138d0e79
|
refs/heads/master
| 2022-04-20T06:16:33.061538
| 2020-04-20T07:08:01
| 2020-04-20T07:08:01
| 257,199,725
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,070
|
r
|
Airlines-Forcasting.R
|
library(readr)
windows()
.
#attach dataset
plot(Airlinesdata1$Passengers,type="l")
#so creating 11 dummy variables
x<-data.frame(outer(rep(month.abb,length=96),month.abb,"==")+0)
View(x)
colnames(x)<-month.abb# assigning month names
View(x)
trakdata<-cbind(Airlinesdata1,x)
View(trakdata)
colnames(trakdata)[2]<-"Pasengers"
colnames(trakdata)
trakdata["t"]<-1:96
View(trakdata)
trakdata[,3]<-log(trakdata[,2])
trakdata["t_square"]<-trakdata["t"]*trakdata["t"]
attach(trakdata)
##Data Partition
train<-trakdata[1:84,]
test<-trakdata[85:96,]
View(trakdata)
#########################LINEAR MODEL############################################
linear_model<-lm(Pasengers~t,data=train)
summary(linear_model)
linear_pred<-data.frame(predict(linear_model,interval = 'predict',newdata = test))
View(linear_pred)
rmse_linear<-sqrt(mean((test$Pasengers-linear_pred$fit)^2,na.rm=T))
View(rmse_linear)
###################################Exponential################
expo_model<-lm(log_Passengers~t,data=train)
summary(expo_model)
expo_pred<-data.frame(predict(expo_model,interval = 'predict',newdata = test))
rmse_expo<-sqrt(mean((test$Pasengers-exp(expo_pred$fit))^2,na.rm=T))
rmse_expo
################################Quadratic################################
Quad_model<-lm(Pasengers~t+t_square,data = train)
summary(Quad_model)
Quad_pred<-data.frame(predict(Quad_model,interval = 'predict',newdata = test))
rmse_Quad<-sqrt(mean((test$Pasengers-Quad_pred$fit)^2,na.rm=T))
rmse_Quad
######################3Additive Seasonality######################33
sea_add_model<-lm(Pasengers~Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
summary(sea_add_model)
sea_add_pred<-data.frame(predict(sea_add_model,newdata = test,interval = 'predict'))
rmse_sea_add<-sqrt(mean((test$Pasengers-sea_add_pred$fit)^2,na.rm=T))
rmse_sea_add
#####################Additive Seasonality with Linear#######################
Add_sea_Linear_model<-lm(Pasengers~t+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
summary(Add_sea_Linear_model)
Add_sea_Linear_pred<-data.frame(predict(Add_sea_Linear_model,interval = 'predict',newdata = test))
rmse_Add_sea_Linear<-sqrt(mean((test$Pasengers-Add_sea_Linear_pred$fit)^2,na.rm=T))
rmse_Add_sea_Linear
############################Additive Seasonality With Quardratic############
Add_sea_Quad_model<-lm(Pasengers~t+t_square+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
summary((Add_sea_Quad_model))
Add_sea_Quad_pred<-data.frame(predict(Add_sea_Quad_model,interval = 'predict',newdata = test))
rmse_Add_sea_Quad<-sqrt(mean((test$Pasengers-Add_sea_Quad_pred$fit)^2,na.rm=T))
rmse_Add_sea_Quad
#######################Multiplicative Seasonality#######################################
multi_sea_model<-lm(log_Passengers~Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
summary(multi_sea_model)
multi_sea_pred<-data.frame(predict(multi_sea_model,newdata = test,interval = 'predict'))
rmse_multi_sea<-sqrt(mean((test$Pasengers-exp(multi_sea_pred$fit))^2,na.rm=T))
rmse_multi_sea
##########################Multiplicative Seasonality Linear Trend###################3
multi_add_sea_model<-lm(log_Passengers~t+t+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
summary(multi_add_sea_model)
multi_add_sea_pred<-data.frame(predict(multi_add_sea_model,interval = 'predict',newdata = test))
rmse_multi_add_sea<-sqrt(mean((test$Pasengers-exp(multi_add_sea_pred$fit))^2,na.rm=T))
rmse_multi_add_sea
########Preparing Table on model and its rmse values######################
table_rmse<-data.frame('Model'=c("rmse_linear","rmse_expo",
"rmse_Quad","rmse_sea_add","rmse_Add_sea_Quad",
"rmse_multi_sea","rmse_multi_add_sea","Add_sea_Linear_model"),
'RMSE'=c(rmse_linear,rmse_expo,rmse_Quad,rmse_sea_add,rmse_Add_sea_Quad,
rmse_multi_sea,rmse_multi_add_sea,rmse_Add_sea_Linear))
View(table_rmse)
colnames(table_rmse)<-c("model","RMSE")
View(table_rmse)
############################################LEAST VALUE####################
#####################MMultiplicative Seasonality Linear Trend has least value###############
new_model<-lm(log_Passengers~t+t+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=trakdata)
# Predict (new_model,N.ahead=1)
#getting Residual
resid<-residuals(new_model)
resid[1:10]
hist(resid)
windows()
acf(resid,lag.max = 10)
# By principle or parcimony we will consider lag-1 as we have so
#many Significant lags
#building Autoreggressive model on residuals consider lag-1
k<-arima(resid,order=c(1,0,0))
#### Buld model on residual@@@
windows()
acf(k$residuals,lag.max=15)
pred_res<-predict(arima(k$residuals,order=c(1,0,0)),n.ahead=96)
str(pred_res)
pred_res$pred
acf(k$residuals)
write.csv(trakdata,file="trakdata.csv",col.names = F,row.names = F)
getwd()
|
27a9e13af021b494efb2ab2a19efc24ddd9baeb1
|
9e4df408b72687493cc23144408868a975971f68
|
/SMS_r_prog/flsms/sms2flsmss.r
|
afb793796ad4432eb171ec913c46897c9f266cb7
|
[
"MIT"
] |
permissive
|
ices-eg/wg_WGSAM
|
7402ed21ae3e4a5437da2a6edf98125d0d0e47a9
|
54181317b0aa2cae2b4815c6d520ece6b3a9f177
|
refs/heads/master
| 2023-05-12T01:38:30.580056
| 2023-05-04T15:42:28
| 2023-05-04T15:42:28
| 111,518,540
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,263
|
r
|
sms2flsmss.r
|
SMS2FLSMSs<-function(sumfile=file.path(data.path,'summary.out'),residualsFile="Catch_survey_residuals.out",bio.interact=FALSE,read.input=FALSE,read.output=TRUE,
control=NULL,FLStocksMulti=NULL) {
if (is.null(control) | (!inherits(control, "FLSMS.control")))
stop("A valid 'FLSMS.control' must be given!")
nsp<-slot(control,"no.species")
if (slot(control,"last.season")==1) annual<-TRUE else annual<-FALSE
info<-slot(control,"species.info")[,"predator"]
first.VPA<-max(length(info[info==2])+1,1) # other predator have predator code =2
pl.grp<-slot(control,"species.info")[,"+group"]
max.age<-slot(control,"species.info")[,"last-age"]
pl.grp[pl.grp==1]<-max.age[pl.grp==1]
pl.grp[pl.grp==0]<-NA
#read summary.out file from SMS run
sms<-read.table(sumfile,header=TRUE)
var.names<-names(sms)
index<-list(sms$Species.n,sms$Age,sms$Year,sms$Quarter)
condense<-function(x,index) {
y<-tapply(x,index,sum)
y[is.na(y)]<-0
y
}
if (read.input) {
if ("C.obs" %in% var.names) c.n<-condense(sms$C.obs,index) # catch numbers observed
if ("west" %in% var.names) s.wt<-condense(sms$west,index) # catch mean weight
if ("weca" %in% var.names) c.wt<-condense(sms$weca,index)
if ("M" %in% var.names) m<-condense(sms$M,index)
if ("propmat" %in% var.names) mat<-condense(sms$propmat,index)
}
if (read.output) {
if ("C.hat" %in% var.names) c.n.hat<-condense(sms$C.hat,index) # catch numbers, prediction
if ("N" %in% var.names) s.n<-condense(sms$N,index) # stock numbers
if ("F" %in% var.names) harvest<-condense(sms$F,index) # harvest rate=F
if (bio.interact) {
if ("M2" %in% var.names) m2<-condense(sms$M2,index) # Predation mortality, multispecies species
}
}
condense2<-function(x,index) {
y<-tapply(x,index,sum)
y[y==-99.9]<-NA
y
}
if (read.output) {
#read Catch_survey_residuals.out file from SMS run
resids<-read.table(residualsFile,header=TRUE)
# catch data
resid.c<-subset(resids,data=="catch")
index<-list(resid.c$Species.n,resid.c$Age,resid.c$Year,resid.c$Quarter)
c.n.resid<-condense2(resid.c$residual,index) # catch residuals
}
# species names
s<-slot(control,"species.names")
if (nsp>1) new.fls <- new("FLSMSs") else new.fls<-FLSMS
# template for quant
c1<-c.n[1,,,]
dc<-dim(c1)
nc<-dimnames(c1)
if (annual) {nc[[3]]="all"; dc[3]<-1}
dimnames<-list(age=nc[[1]],year=nc[[2]],unit="all",season=nc[[3]],area="all",iter="none")
dim<-c(dc[1],dc[2],1,dc[3],1,1)
dimnames2<-list(age='all',year=nc[[2]],unit="all",season=nc[[3]],area="all",iter="none")
dim2<-c(1,dc[2],1,dc[3],1,1)
for (si in (first.VPA:nsp)) {
sp.no<-si-first.VPA+1
if (is.null(FLStocksMulti)) { # create a new Stock object
if (nsp==1) q<-c.n else q<-c.n[sp.no,,,] # Why ?
s.<-FLStockMulti(name =s[si], desc =s[si],plusgroup=pl.grp[si],
iniFLQuant=FLQuant(q,dim=dim,dimnames=dimnames,quant="age",units="1000"))
} else s.<-new.fls[[si]] # take the old one
if (read.input) {
s.@catch.n <-FLQuant( q,dim=dim,dimnames=dimnames,quant="age",units="1000")
if (nsp==1) q<-c.wt else q<-c.wt[sp.no,,,]
if ("weca" %in% var.names) s.@catch.wt<-FLQuant(q,dim=dim,dimnames=dimnames,quant="age",units="kg")
if ("weca" %in% var.names) s.@catch <-FLQuant(apply(s.@catch.n*s.@catch.wt,c(2,3,4),sum),dimnames=dimnames2,quant="all",units="ton")
if (nsp==1) q<-s.wt else q<-s.wt[sp.no,,,]
if ("west" %in% var.names) s.@stock.wt<-FLQuant(q,dim=dim,dimnames=dimnames,quant="age",units="kg")
if (nsp==1) q<-m else q<-m[sp.no,,,]
if ("M" %in% var.names) s.@m <-FLQuant(q,dim=dim,dimnames=dimnames,quant="age",units=" ")
if (nsp==1) q<-mat else q<-mat[sp.no,,,]
if ("propmat" %in% var.names) s.@mat <-FLQuant(q,dim=dim,dimnames=dimnames,quant="age",units="proportion")
if (bio.interact) {
if (nsp==1) q<-m1 else q<-m1[sp.no,,,]
if ("M1" %in% var.names) s.@m1 <-FLQuant(q,dim=dim,dimnames=dimnames,quant="age",units=" ")
}
}
if (read.output) {
if (nsp==1) q<-s.n else q<-s.n[sp.no,,,]
if ("N" %in% var.names) s.@stock.n <-FLQuant(q,dim=dim,dimnames=dimnames,quant="age",units="1000")
if (nsp==1) q<-harvest else q<-harvest[sp.no,,,]
if ("F" %in% var.names) s.@harvest <-FLQuant(q,dim=dim,dimnames=dimnames,quant="age",units="f")
if (("west" %in% var.names) & ("N" %in% var.names)) s.@stock <-FLQuant(apply(s.@stock.n*s.@stock.wt,c(2,3,4),sum),dimnames=dimnames2,quant="all",units="ton")
if (bio.interact) {
if (nsp==1) q<-m2 else q<-m2[sp.no,,,] # Why ?
if ("M2" %in% var.names) s.@m2 <-FLQuant(q,dim=dim,dimnames=dimnames,quant="age",units=" ")
}
}
if (nsp>1) new.fls[[sp.no]]<-s.
}
ifelse(nsp>1, return(new.fls),return(s.))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.