blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
80c4f1f63555dd67ba04f9233328b18d117e031c
|
381ea93b5654c74584e203676ceb9bd17bd608b0
|
/man/notrack-methods.Rd
|
0b83e749c36b56e544de310af94694b91870f114
|
[] |
no_license
|
gmbecker/GRANCore
|
e26962f45f2ee646cc3c24f24ebe11212a51ec62
|
47809ff6789185df947159f1d73249abd7694bee
|
refs/heads/master
| 2022-01-11T10:00:33.795287
| 2019-10-31T00:35:24
| 2019-10-31T00:35:24
| 112,526,940
| 1
| 5
| null | 2019-05-24T21:23:05
| 2017-11-29T20:56:19
|
R
|
UTF-8
|
R
| false
| true
| 581
|
rd
|
notrack-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/accessors.R
\docType{methods}
\name{notrack,GRANRepository-method}
\alias{notrack,GRANRepository-method}
\title{notrack
Return the directory which stores retreived versions of non-GRAN packages
for use in virtual repositories}
\usage{
\S4method{notrack}{GRANRepository}(repo)
}
\arguments{
\item{repo}{a GRANRepository object}
}
\value{
The path to the notrack directory
}
\description{
notrack
Return the directory which stores retreived versions of non-GRAN packages
for use in virtual repositories
}
|
86300fbe0f71666283df697c83e59ea0ea8d29ea
|
f69bcd76b3308c3847135442719c49688b03fed3
|
/R/data.R
|
c2a3426ef4b4b8b9ec6088684f709a2ceb3e7410
|
[] |
no_license
|
cran/activityGCMM
|
8922e39b4542cedcbe0a1d117d7cf8291e76dc82
|
db777426190dd415c6ddd485844189d183395ab6
|
refs/heads/master
| 2023-06-02T21:47:56.691319
| 2021-06-14T18:20:02
| 2021-06-14T18:20:02
| 348,029,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,710
|
r
|
data.R
|
#' Sample data of camera trap observations of humans
#'
#'
#' @description Example dataset for fitting circular mixed effect mixture models with activityGCMM package
#'
#' @name humanssample
#' @docType data
#' @title Sample data of camera trap observations of humans
#'
#'
#'
#' @format Dataframes with 3 variables
#' Radians Time of observations, in radians (0 to 2pi)
#' CameraTrapID Variable identifying camera traps
#' SamplingPeriod Variable identifying sampling period during which camera traps were recording
#'
#'
#' @source \ Campbell L.A.D. 2017
#'
#' @keywords datasets
#'
#' @examples
#' data(humanssample)
#' \dontrun{ GCMM(data=humanssample$Radians, RE1=humanssample$SamplingPeriod,
#' scale=c("2pi"), family="vonmises", autojags=TRUE, thin=3) }
#'
"humanssample"
#' Sample data of camera trap observations of humans
#'
#'
#' @description Example dataset for fitting circular mixed effect mixture models with activityGCMM package
#'
#' @name redfoxsample
#' @docType data
#' @title Sample data of camera trap observations of red fox
#'
#'
#'
#' @format Dataframes with 3 variables
#' Radians Time of observations, in radians (0 to 2pi)
#' CameraTrapID Variable identifying camera traps
#' SamplingPeriod Variable identifying sampling period during which camera traps were recording
#'
#'
#' @source \ Campbell L.A.D. 2017
#'
#' @keywords datasets
#'
#' @examples
#' data(redfoxsample)
#' \dontrun{ GCMM(data=redfoxsample$Radians, RE1=redfoxsample$SamplingPeriod,
#' scale=c("2pi"), family="vonmises", autojags=FALSE,
#' adapt=0, sample=300, burnin=300, thin=1, n.chains=2 ) }
#'
"redfoxsample"
|
9cb2a711f4f684fdcb9b89f6c8d078ba045a225c
|
9ec70912e3ad64c994cb89b0d153e2802c9f420f
|
/Springleaf/Scripts/r_xboost2.R
|
83f611b6f7fa4ce4ad3a42219fa80a44c23ed4ae
|
[] |
no_license
|
AdrianoW/Kaggle
|
ce0e125e7c4712233cbd3b06eb4a2f04cff21fd7
|
4fcce1ba5ba240c651f1d207e7151dfda7da8f56
|
refs/heads/master
| 2021-01-18T21:57:10.653407
| 2015-12-17T21:18:02
| 2015-12-17T21:18:02
| 23,518,743
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,377
|
r
|
r_xboost2.R
|
setwd("~/Documents/Machine Learning/Kaggle/Spring/r")
library(readr)
library(xgboost)
library(h2o)
my_h2o <- h2o.init(nthreads = 6,max_mem_size = "10G")
set.seed(8472397) #seed bag1:8, then eta=0.06not0.04&nround125not250: bag2:64, bag3:6, bag4:88, bag5: 0.03-300-seed666
#bag6:16, train[1:80000,], val=train[80001:120000,], 0.06, 125 #bag7: 888,train[65000:145000,], val=train[1:40000,], 0.06, 125
#bag8: 888,train[65000:145000,], val=train[1:40000,], 0.03, 300
#seed bag9:9999, 0.02,300,random
#bag10:425, bag11:718, bag12:719, bag13:720, bag14:721
cat("reading the train and test data\n")
train <- read_csv("../input/train.csv")
test <- read_csv("../input/test.csv")
# get the amount of different values for each column
train.unique.count=lapply(train, function(x) length(unique(x)))
# filter the columns with a single value and 2 different values
train.unique.count_1=unlist(train.unique.count[unlist(train.unique.count)==1])
train.unique.count_2=unlist(train.unique.count[unlist(train.unique.count)==2])
train.unique.count_2=train.unique.count_2[-which(names(train.unique.count_2)=='target')]
delete_const=names(train.unique.count_1)
delete_NA56=names(which(unlist(lapply(train[,(names(train) %in% names(train.unique.count_2))], function(x) max(table(x,useNA='always'))))==145175))
delete_NA89=names(which(unlist(lapply(train[,(names(train) %in% names(train.unique.count_2))], function(x) max(table(x,useNA='always'))))==145142))
delete_NA918=names(which(unlist(lapply(train[,(names(train) %in% names(train.unique.count_2))], function(x) max(table(x,useNA='always'))))==144313))
#VARS to delete
#safe to remove VARS with 56, 89 and 918 NA's as they are covered by other VARS
print(length(c(delete_const,delete_NA56,delete_NA89,delete_NA918)))
train=train[,!(names(train) %in% c(delete_const,delete_NA56,delete_NA89,delete_NA918))]
test=test[,!(names(test) %in% c(delete_const,delete_NA56,delete_NA89,delete_NA918))]
# From manual data analysis
datecolumns = c("VAR_0073", "VAR_0075", "VAR_0156", "VAR_0157", "VAR_0158", "VAR_0159", "VAR_0166", "VAR_0167", "VAR_0168", "VAR_0176", "VAR_0177", "VAR_0178", "VAR_0179", "VAR_0204", "VAR_0217")
train_cropped <- train[datecolumns]
train_cc <- data.frame(apply(train_cropped, 2, function(x) as.double(strptime(x, format='%d%b%y:%H:%M:%S', tz="UTC")))) #2 = columnwise
for (dc in datecolumns){
train[dc] <- NULL
train[dc] <- train_cc[dc]
}
train_cc <- NULL
train_cropped <- NULL
gc()
test_cropped <- test[datecolumns]
test_cc <- data.frame(apply(test_cropped, 2, function(x) as.double(strptime(x, format='%d%b%y:%H:%M:%S', tz="UTC")))) #2 = columnwise
for (dc in datecolumns){
test[dc] <- NULL
test[dc] <- test_cc[dc]
}
test_cc <- NULL
test_cropped <- NULL
gc()
# safe target and put it at the end again
train_target <- train$target
train$target <- NULL
train$target <- train_target
# names(train) # 1934 variables
for (f in feature.names) {
if (class(train[[f]])=="character") {
levels <- unique(c(train[[f]], test[[f]]))
train[[f]] <- as.integer(factor(train[[f]], levels=levels))
test[[f]] <- as.integer(factor(test[[f]], levels=levels))
}
}
cat("replacing missing values with -1\n")
train[is.na(train)] <- -1
test[is.na(test)] <- -1
write_csv(train, '../Input/train_processed.csv')
write_csv(test, '../Input/test_processed.csv')
train <- read_csv("../input/train_processed.csv")
test <- read_csv("../input/test_processed.csv")
#val.2 <- read_csv("../Preprocessing/val2.csv")
#tr <- read_csv("../Preprocessing/train.csv")
test <- read_csv("../Preprocessing/test.csv")
feature.names <- setdiff(names(val.2), c('target', 'ID'))
h <- sample(nrow(train), 120000)
val<-train[-h,]
tr <-train[h,]
rm(train)
# generate 2 validation.
h <- sample(nrow(train), 120000)
tr <-train[h,]
val<-train[-h,]
h <- sample(nrow(val), round(0.50*nrow(val)))
val.1 <- val[h,]
val.2 <- val[-h,]
# put into the dmatrix used with xgboost
dtrain <- xgb.DMatrix(data.matrix(tr[,feature.names]), label=tr$target)
dval.1 <- xgb.DMatrix(data.matrix(val.1[,feature.names]), label=val.1$target)
dval.2 <- xgb.DMatrix(data.matrix(val.2[,feature.names]), label=val.2$target)
# train the model
watchlist <- watchlist <- list(eval = dval.1, train = dtrain)
#watchlist <- watchlist <- list(eval = dval.2, train = dtrain)
param <- list( objective = "binary:logistic",
# booster = "gblinear",
eta = 0.01, # 0.06, #0.01,
max_depth = 16, #changed from default of 8
subsample = 0.95, # 0.7
colsample_bytree = 0.8, # 0.7
eval_metric = "auc",
nthread = 2,
alpha = 0.0001,
lambda = 1
)
# best model so far
# max_depth 15, subsample 1, bytree .8, lamda 1
clf <- xgb.train(params = param,
data = dtrain,
nrounds = 1200, #300, #280, #125, #250, # changed from 300
verbose = 1,
early.stop.round = 25,
watchlist = watchlist,
maximize = TRUE,
#nthread = 2,
nfold = 3
)
#best 408
xgb.save(clf, '../Training/xgb_e.01_md16_ss.95_byt.8_aph.0001_lamb1.model')
submission <- data.frame(ID=test$ID)
#clf <- xgb.load('../Training/xgb_e.01_md16_ss.95_byt.8_aph.0001_lamb1.model')
# create the submission file
submission$target <- NA
for (rows in split(1:nrow(test), ceiling((1:nrow(test))/10000))) {
submission[rows, "target"] <- predict(clf, data.matrix(test[rows,feature.names]))
}
cat("saving the submission file\n")
write_csv(submission, "../Training/xgb_e.01_md16_ss.95_byt.8_aph.0001_lamb1.csv")
# save the validation output
submission.val <- data.frame()
#submission.val$target <- NA
submission.val[rows, "target"] <- predict(clf, data.matrix(val.2))
cat("saving the submission file\n")
write_csv(submission.val, "../Training/xgb_val.csv")
# Get the feature real names
names <- dimnames(dtrain)[[2]]
# Compute feature importance matrix
importance_matrix <- xgb.importance(names, model = clf)
xgb.plot.importance(importance_matrix[1:10,])
# load an old model
clf <- xgb.load('./xgboost.model')
xgb.dump(clf, fname = './xgboost.model.dump')
|
d3b824b8aab809e48c33edf6f8f2e9e38d3488de
|
548333edfb4493b63a4cf0a2542851178bb1a557
|
/Models/4_Asset_impacts/MSCI_CC_waterfalls.R
|
a5917fc99fe8b45812041151b9819b4599d2cf92
|
[] |
no_license
|
Allisterh/Climate_change_financial-IPR
|
b3137e270cacd35b08ebb7c2a612a9d1047a68bd
|
1b304b9d0ec2b362d9cfa0d46bb6ebc1d49fe623
|
refs/heads/master
| 2023-03-26T07:44:54.148687
| 2019-07-16T16:02:27
| 2019-07-16T16:02:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,410
|
r
|
MSCI_CC_waterfalls.R
|
##### Project code: Net-Zero Toolkit for modelling the financial impacts of low-carbon transition scenarios
##### Date of last edit: 03/04/2019
##### Code author: Shyamal Patel
##### Dependencies: 1. Cost & competition model results under a variety of parameter assumptions
##### See attributes and Section 1 for more details
##### Notes: None
##### Called by: N/A
#--------------------------------------------------------------------------------------------------
##### SECTION 1 - Housekeeping and data read in ----
# Define master_folder and source utils file which contains useful functions
main_save_folder <- "4_Asset_impacts"
source("utils.R")
# Recast input source for Interim folder
input_source <- function(filename) {
fs::path(here::here(main_save_folder), "Interim", filename)
}
### Read in cost & competition model results for each of the 3 value chain element model runs with
### CM and DD models switched on, and verify that attributes are correct
### (the 'right' switches are ON and OFF as we cycle through)
# Carbon cost only results
results_cc <- readRDS("3_Cost_and_competition/Output/Dated/190225_1106_Subsidiary_results.rds")
glimpse(attr(results_cc, "parameters"))
# Abatement on results
results_abt <- readRDS("3_Cost_and_competition/Output/Dated/190225_1107_Subsidiary_results.rds")
glimpse(attr(results_abt, "parameters"))
# Cost pass through on results (final)
results_cpt <- readRDS("3_Cost_and_competition/Output/Dated/190225_1108_Subsidiary_results.rds")
glimpse(attr(results_cpt, "parameters"))
# Model panel for emissions, elasiticity, product differentiation and DD / CM model results
panel <- readRDS("3_Cost_and_competition/Interim/Dated/190220_1508_Cleaned_model_panel.rds")
# Check that results have been properly calibrated - differences should be in abatement potential for (1) and
# in CPT, sales impact and Q reallocation for (2)
# library(daff)
# render_diff(diff_data(attr(results_cc, "parameters"), attr(results_abt, "parameters")))
# render_diff(diff_data(attr(results_abt, "parameters"), attr(results_cpt, "parameters")))
### Read in cost & competition model results for each of the 23 value chain element model runs with
### CM and DD models switched off (looking at CC in isolation)
# Cost & competition model only - carbon cost only results
results_cc_only_cc <- readRDS("3_Cost_and_competition/Output/Dated/190222_1702_Subsidiary_results.rds")
glimpse(attr(results_cc_only_cc, "parameters"))
# Cost & competition model only - abatement on results
results_cc_only_abt <- readRDS("3_Cost_and_competition/Output/Dated/190222_1704_Subsidiary_results.rds")
glimpse(attr(results_cc_only_abt, "parameters"))
# Cost & competition model only - cost pass through on results (final)
results_cc_only_cpt <- readRDS("3_Cost_and_competition/Output/Dated/190222_1705_Subsidiary_results.rds")
glimpse(attr(results_cc_only_cpt, "parameters"))
# Check that results are correctly calibrated
# library(daff)
# render_diff(diff_data(attr(results_cc_only_cc, "parameters"), attr(results_cc_only_abt, "parameters")))
# render_diff(diff_data(attr(results_cc_only_abt, "parameters"), attr(results_cc_only_cpt, "parameters")))
#--------------------------------------------------------------------------------------------------
##### SECTION 2 - Clean up DD and CM model results ----
# Find stranding results using the profit impact index and market cap from the model panel
results_dd <- panel %>%
select(scenario, company_id, company, market, region, market_cap_2017, profit_impact_pct) %>%
mutate(profit_impact_pct = case_when(substring(market, 1, 3) == "GR_" ~ NA_real_,
TRUE ~ profit_impact_pct)) %>%
mutate(profit_dd = case_when(!is.na(profit_impact_pct) ~ market_cap_2017 * (1 + profit_impact_pct),
TRUE ~ market_cap_2017)) %>%
select(-profit_impact_pct)
# Find cleantech market results using the profit impact index and market cap from the model panel
# [note that this is cumulative so includes the effects of both DD and CM]
results_cm <- panel %>%
select(scenario, company_id, company, market, region, market_cap_2017, profit_impact_pct) %>%
mutate(profit_cm = case_when(!is.na(profit_impact_pct) ~ market_cap_2017 * (1 + profit_impact_pct),
TRUE ~ market_cap_2017)) %>%
select(-profit_impact_pct)
#--------------------------------------------------------------------------------------------------
##### SECTION 3 - Combine MSCI ACWI waterfall datasets ----
shorten_tibble <- function(shorten_arg) {
data <- shorten_arg[[1]]
subscript <- shorten_arg[[2]]
temp <- data %>%
select(scenario, company_id, company, market, region, market_cap_2017, market_cap_model, index, index_cap) %>%
rename_at(vars(market_cap_model, index, index_cap),
funs(paste0(., "_", subscript)))
return(temp)
}
list_cc <- list(results_cc, "cc")
list_abt <- list(results_abt, "abt")
list_cpt <- list(results_cpt, "cpt")
results_comb <- map(list(list_cc, list_abt, list_cpt), shorten_tibble) %>%
reduce(left_join)
# Verify that all the market_cap_model values are the same
results_comb2 <- results_comb %>%
select(-contains("market_cap_model")) %>%
select(-starts_with("index_cap")) %>%
mutate_at(vars(index_cc, index_abt, index_cpt),
funs(profit = market_cap_2017 * (1 + .))) %>%
rename_at(vars(ends_with("profit")),
funs(paste0("profit_", gsub("index_", "", gsub("_profit", "", .))))) %>%
select(-starts_with("index"))
# Join in demand destruction and cleantech market results
results_comb3 <- results_comb2 %>%
left_join(results_dd) %>%
left_join(results_cm)
# Summarise over regions, markets and companies
results_comb4 <- results_comb3 %>%
group_by(scenario) %>%
summarise_at(vars(market_cap_2017, profit_dd, profit_cm, profit_cc, profit_abt, profit_cpt),
funs(sum(., na.rm = TRUE))) %>%
ungroup() %>%
mutate_at(vars(market_cap_2017, profit_dd, profit_cm, profit_cc, profit_abt, profit_cpt),
funs(index = . / market_cap_2017))
#--------------------------------------------------------------------------------------------------
##### SECTION 4 - Summarise MSCI ACWI data and create waterfall ----
msci_acwi_waterfall <- function(plot_scenario) {
temp <- results_comb4 %>%
filter(scenario == plot_scenario) %>%
select(scenario, contains("_index")) %>%
# Add a column which takes the value of CPT (rest can be differenced to be effects of the change from previous column)
mutate(profit_final_index = profit_cpt_index) %>%
gather(key = category, value = profit, (market_cap_2017_index:profit_final_index)) %>%
mutate(profit = profit * 100) %>%
mutate(category = case_when(category == "market_cap_2017_index" ~ "Paris NDCs",
category == "profit_dd_index" ~ "Demand destruction",
category == "profit_cm_index" ~ "Cleantech markets",
category == "profit_cc_index" ~ "Carbon costs",
category == "profit_abt_index" ~ "Abatement",
category == "profit_cpt_index" ~ "Cost pass through",
category == "profit_final_index" ~ gsub("_", " ", plot_scenario),
TRUE ~ NA_character_)) %>%
mutate(category = ordered(category, levels = c("Paris NDCs", "Demand destruction", "Cleantech markets",
"Carbon costs", "Abatement", "Cost pass through", gsub("_", " ", plot_scenario)))) %>%
arrange(category) %>%
mutate(lagged_profit = lag(profit, n = 1),
delta_profit = profit - lagged_profit) %>%
# Calculate waterfall variables
mutate(base_stack = ifelse(category %in% c("Paris NDCs", gsub("_", " ", plot_scenario)), profit, NA_real_),
invisible_stack = case_when(delta_profit <= 0 ~ lagged_profit + delta_profit,
delta_profit > 0 ~ lagged_profit,
TRUE ~ NA_real_),
fall_stack = ifelse(delta_profit < 0, -delta_profit, NA_real_),
rise_stack = ifelse(delta_profit > 0, delta_profit, NA_real_)) %>%
select(scenario, category, contains("_stack")) %>%
gather(key = stack_type, value = stack_value, contains("_stack")) %>%
mutate(stack_type = ordered(stack_type, levels = c("base_stack", "invisible_stack", "fall_stack", "rise_stack"))) %>%
mutate(stack_type = fct_rev(stack_type))
waterfall <- ggplot(temp) +
geom_col(aes(x = category, y = stack_value, fill = stack_type), colour = NA, width = 0.75) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 10)) +
scale_y_continuous(name = "NPV profits (normalised)", expand = c(0,0)) +
scale_fill_manual(values = plot_colours) +
theme_vivid(vivid_size = 1.6) +
theme(legend.position = "none",
axis.title.x = element_blank())
waterfall <- waterfall + coord_cartesian(ylim = c(40, 100))
ggsave(paste0("4_Asset_impacts/Output/Plots/MSCI_waterfalls/MSCI_ACWI_", plot_scenario, ".png"), plot = waterfall, width = 16, height = 9)
}
plot_colours <- c("base_stack" = rgb(0, 143, 159, max = 255), "fall_stack" = rgb(255, 77, 166, max = 255), "rise_stack" = rgb(0, 196, 103, max = 255),
"invisible_stack" = NA)
map(unique(results_comb4$scenario)[unique(results_comb4$scenario) != "Paris_NDCs"], msci_acwi_waterfall)
#--------------------------------------------------------------------------------------------------
##### SECTION 5 - Combine cost & competition model only datasets ----
list_cc_only_cc <- list(results_cc_only_cc, "cc")
list_cc_only_abt <- list(results_cc_only_abt, "abt")
list_cc_only_cpt <- list(results_cc_only_cpt, "cpt")
results_cc_only_comb <- map(list(list_cc_only_cc, list_cc_only_abt, list_cc_only_cpt), shorten_tibble) %>%
reduce(left_join)
# Verify that all the market_cap_model values are the same
results_cc_only_comb2 <- results_cc_only_comb %>%
select(-contains("market_cap_model")) %>%
select(-starts_with("index_cap")) %>%
mutate_at(vars(index_cc, index_abt, index_cpt),
funs(profit = market_cap_2017 * (1 + .))) %>%
rename_at(vars(ends_with("profit")),
funs(paste0("profit_", gsub("index_", "", gsub("_profit", "", .))))) %>%
select(-starts_with("index"))
# Summarise over regions
results_cc_only_comb3 <- results_cc_only_comb2 %>%
group_by(scenario, company_id, company, market) %>%
summarise_at(vars(market_cap_2017, profit_cc, profit_abt, profit_cpt),
funs(sum(., na.rm = TRUE))) %>%
ungroup() %>%
mutate_at(vars(profit_cc, profit_abt, profit_cpt),
funs(index = . / market_cap_2017 - 1)) %>%
rename_at(vars(ends_with("_index")),
funs(paste0("index_", gsub("profit_", "", gsub("_index", "", .)))))
# Shorten the panel dataset to essential variables only and calculate emissions intensity at the business segment level
# summarise over regions first
panel2 <- panel %>%
group_by(scenario, company_id, company, market) %>%
summarise_at(vars(revenue_2017, co2_scope_1_2017, co2_scope_2_2017, co2_scope_3_2017),
funs(sum(., na.rm = TRUE))) %>%
ungroup()
#--------------------------------------------------------------------------------------------------
##### SECTION 6 - Group cost & competition model only datasets based on
##### above / below median and prepare for waterfall ----
# Find median
results_cc_only_comb4 <- results_cc_only_comb3 %>%
group_by(scenario, market) %>%
mutate(median_impact = quantile(index_cpt, probs = 0.5),
median_test = case_when(index_cpt <= median_impact ~ "BELOW",
TRUE ~ "ABOVE"))
# Add emissions intensity to the data
results_cc_only_comb5 <- results_cc_only_comb4 %>%
left_join(panel2, by = c("scenario", "company_id", "company", "market"))
# Summarise variables over categories and index so initial profits are 1
results_cc_only_comb6 <- results_cc_only_comb5 %>%
group_by(scenario, market, median_test) %>%
summarise_at(vars(market_cap_2017, profit_cc, profit_abt, profit_cpt, revenue_2017, co2_scope_1_2017, co2_scope_2_2017, co2_scope_3_2017),
funs(sum(., na.rm = TRUE))) %>%
mutate_at(vars(market_cap_2017, profit_cc, profit_abt, profit_cpt),
funs(index = . / market_cap_2017)) %>%
mutate_at(vars(co2_scope_1_2017, co2_scope_2_2017, co2_scope_3_2017),
funs(intensity = . / revenue_2017)) %>%
ungroup() %>%
select(-revenue_2017, -co2_scope_1_2017, -co2_scope_2_2017, -co2_scope_3_2017)
save_dated(results_cc_only_comb6, "Cost_and_comp_statistics", folder = "Output", csv = TRUE)
#--------------------------------------------------------------------------------------------------
##### SECTION 7 - Create cost & competition model only - above / below median waterfalls ----
cost_comp_waterfall <- function(plot_scenario, plot_market, plot_group) {
temp <- results_cc_only_comb6 %>%
filter(scenario == plot_scenario & market == plot_market & median_test == plot_group) %>%
select(scenario, market, median_test, contains("_index")) %>%
# Add a column which takes the value of CPT (rest can be differenced to be effects of the change from previous column)
mutate(profit_final_index = profit_cpt_index) %>%
gather(key = category, value = profit, (market_cap_2017_index:profit_final_index)) %>%
mutate(profit = profit * 100) %>%
mutate(category = case_when(category == "market_cap_2017_index" ~ "Paris NDCs",
category == "profit_cc_index" ~ "Carbon costs",
category == "profit_abt_index" ~ "Abatement",
category == "profit_cpt_index" ~ "Cost pass through",
category == "profit_final_index" ~ "2DS Balanced Transformation",
TRUE ~ NA_character_)) %>%
mutate(category = ordered(category, levels = c("Paris NDCs", "Carbon costs", "Abatement", "Cost pass through", "2DS Balanced Transformation"))) %>%
arrange(category) %>%
mutate(lagged_profit = lag(profit, n = 1),
delta_profit = profit - lagged_profit) %>%
# Calculate waterfall variables
mutate(base_stack = ifelse(category %in% c("Paris NDCs", "2DS Balanced Transformation"), profit, NA_real_),
invisible_stack = case_when(delta_profit <= 0 ~ lagged_profit + delta_profit,
delta_profit > 0 ~ lagged_profit,
TRUE ~ NA_real_),
fall_stack = ifelse(delta_profit < 0, -delta_profit, NA_real_),
rise_stack = ifelse(delta_profit > 0, delta_profit, NA_real_)) %>%
select(scenario, market, median_test, category, contains("_stack")) %>%
gather(key = stack_type, value = stack_value, contains("_stack")) %>%
mutate(stack_type = ordered(stack_type, levels = c("base_stack", "invisible_stack", "fall_stack", "rise_stack"))) %>%
mutate(stack_type = fct_rev(stack_type))
ggplot(temp) +
geom_col(aes(x = category, y = stack_value, fill = stack_type), colour = NA, width = 0.75) +
scale_x_discrete(labels = function(x) str_wrap(x, width = 18)) +
scale_y_continuous(name = "NPV profits (normalised)", expand = c(0,0), limits = c(0, 130)) +
scale_fill_manual(values = plot_colours) +
theme_vivid(vivid_size = 1.6) +
theme(legend.position = "none",
axis.title.x = element_blank(),
aspect.ratio = 9 / 21.16)
ggsave(paste0("4_Asset_impacts/Output/Plots/CC_waterfalls/", plot_market, "_", plot_group, "_", plot_scenario, ".png"), width = 21.16, height = 9)
}
plot_colours <- c("base_stack" = rgb(0, 143, 159, max = 255), "fall_stack" = rgb(255, 77, 166, max = 255), "rise_stack" = rgb(0, 196, 103, max = 255),
"invisible_stack" = NA)
cost_comp_waterfall("2DS_Balanced_Transformation", "Concrete and cement", "BELOW")
cost_comp_waterfall("2DS_Balanced_Transformation", "Concrete and cement", "ABOVE")
cost_comp_waterfall("2DS_Balanced_Transformation", "Power generation", "BELOW")
cost_comp_waterfall("2DS_Balanced_Transformation", "Power generation", "ABOVE")
|
6ebbd2dcdc92f23e0ed41f7bb99b917f10fabbca
|
ef2b942dd21ac2dbf57a1541b75b019b422c332b
|
/ABNLookupSampleCodeR/ABNSampleCodeR.R
|
6fc5e73fb91c422d7f98c6b1a20041d0c6a94a73
|
[
"MIT"
] |
permissive
|
madhuri-butn/ABNLookupSampleCode
|
d25959e44f01de9c1be92d200da3a32da3edd6e3
|
1c4fc2fd0575d23284878b82c78aedd22191044d
|
refs/heads/master
| 2023-07-02T16:26:33.204109
| 2021-07-30T07:50:30
| 2021-07-30T07:50:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
r
|
ABNSampleCodeR.R
|
install.packages(c("httr", "jsonlite"))
library(httr)
library(jsonlite)
guid <- "paste_your_guid_here"
abn <- "26008672179"
response <- GET("https://abr.business.gov.au/json/AbnDetails.aspx", query = list(guid = guid, abn = abn, callback = "callback"))
removeCallback <- sub('[^\\[|\\{]*', '', response) # remove callback and opening parenthesis
removeClosingParenthesis <- sub('\\);*$', '', removeCallback) # remove closing parenthesis
results <- fromJSON(removeClosingParenthesis)
|
9aa6dbd6e5e496017d5602f712bb81cb41ccaabd
|
87758b3ef57e7b6fc9ee25f33b411d7f319da121
|
/text_analysis.R
|
a16bbb006ffe11fb95386982cc483e9d6bea6b9b
|
[] |
no_license
|
p10rahulm/cancer_text
|
2e747db7768f91cc85807282aaa1a0dda99b90a9
|
5cbf70b9a2488a4df84e2243346e2ab1e7c55d86
|
refs/heads/master
| 2021-07-09T06:25:24.041673
| 2017-10-08T16:21:27
| 2017-10-08T16:21:27
| 104,238,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 356
|
r
|
text_analysis.R
|
rm(list=ls())
# -------------
# Load Libraries
# -------------
library(stringr)
library(tm)
# install.packages("RWeka")
library(RWeka)
library(SnowballC)
# -------------
# readfiles
# -------------
load("cleaned/training_words_removed_cleaned.bin")
synopses <- readLines(con = "cleaned/training_synopses.txt")
# -------------
# readfiles
# -------------
|
2ce01f2b643b44b92f39ae6e7c23e32ec6acfad4
|
7c8f9b15213eca35cac247428b5f5b7a7d14cfec
|
/code/02_cleandata.R
|
ca09bc595fdb86e9606724bb2e8a20e609ea9556
|
[] |
no_license
|
ausmani23/jobtalk_crim
|
74dcbd518d154daf68f0f9327ebed5ea614274dd
|
312af53de1aec335bde2bb98e813a90e2b66369f
|
refs/heads/master
| 2020-04-08T05:25:06.567064
| 2018-11-29T16:00:40
| 2018-11-29T16:00:40
| 159,059,662
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,337
|
r
|
02_cleandata.R
|
#########################################################
#########################################################
#clear workspace
rm(list=ls())
#load packages
require(stringr)
require(plyr)
require(dplyr)
require(zoo)
require(tidyr)
require(rprojroot)
#set dirs
homedir<-find_root(
criterion=has_file('crimtalk.RProj')
)
codedir<-file.path(homedir,"code")
setwd(codedir); dir()
source('dirs.R')
#########################################################
#########################################################
#clean the ncvs data
#counts, serious violent victimization
#loop through each file
setwd(datadir); dir()
filenames<-c(
'ncvs_count_1999to2009.csv',
'ncvs_rate_1999to2009.csv',
'ncvs_count_2010to2015.csv',
'ncvs_rate_2010to2015.csv',
'ncvs_rate_1993to1998.csv',
'ncvs_count_1993to1998.csv'
)
fulldf<-lapply(filenames,function(thisf) {
#thisf<-filenames[1]
tmpdf<-read.csv(
thisf,
stringsAsFactors=F
)
#get yrs, var from filename
yrs<-str_extract_all(thisf,"[0-9]{4}")[[1]] %>% as.numeric
years<-paste0("y",yrs[1]:yrs[2])
thisvar<-str_extract(thisf,"count|rate")
#clean and loop through
tmp<-tmpdf$race==""
tmpdf$race[tmp]<-NA
tmpdf$race<-na.locf(tmpdf$race)
tmp<-tmpdf$income==""
tmpdf$income[tmp]<-NA
tmpdf$income<-na.locf(tmpdf$income)
tmpseq.i<-1:nrow(tmpdf)
mydf<-lapply(tmpseq.i,function(i) {
#i<-1
x<-apply(tmpdf[i,],1,identity)
y<-x[!is.na(x) & x!="" & x!="!"]
if(length(y)!=(length(years)+2)) stop(print(i))
tmpdf<-data.frame(
t(y)
)
}) %>% rbind.fill
names(mydf)<-c(
"race",
"income",
years
)
mydf$var<-thisvar
#gather and append..
mydf$race<-tolower(mydf$race)
mydf$income<-tolower(mydf$income)
mydf<-gather(
mydf,
"year",
"value",
years
)
mydf
}) %>% rbind.fill
fulldf<-spread(
fulldf,
var,
value
)
#now we can conver tto numeric
tmp<-as.numeric(fulldf$count)
fulldf$count[is.na(tmp)]
fulldf$count<-tmp
tmp<-as.numeric(fulldf$rate)
fulldf$rate[is.na(tmp)]
fulldf$rate<-tmp
#get # of people n each category
fulldf$number<-10^3 *fulldf$count/fulldf$rate
#classify into poor and rich
fulldf$income<-str_replace_all(
fulldf$income,
",|\\$",""
)
fulldf$income %>% unique
fulldf$class<-"other"
tmp<-fulldf$income%in%c(
"less than 7500",
"7500 to 14999"
)
fulldf$class[tmp]<-"poor"
tmp<-fulldf$income%in%c(
"15000 to 24999",
"25000 to 34999",
"35000 to 49999",
"50000 to 74999"
)
fulldf$class[tmp]<-"middle"
tmp<-fulldf$income%in%c(
"75000 or more"
)
fulldf$class[tmp]<-"rich"
#drop totals
tmp<-fulldf$class=="other"
fulldf<-fulldf[!tmp,]
tmplevels<-c(
"hispanic",
"non-hispanic black",
"non-hispanic other",
"non-hispanic white"
)
tmplabels<-c(
"hispanic",
"black",
"other",
"white"
)
fulldf$race<-factor(
fulldf$race,
tmplevels,
tmplabels
)
setwd(datadir)
fulldf$year<-str_extract(fulldf$year,"[0-9]+") %>%
as.numeric
write.csv(
fulldf,
"ncvs_summaries.csv",
row.names=F
)
#########################################################
#########################################################
#( all cleaning should take place here
#do it later, if there's time.. )
#########################################################
#########################################################
#save out into dfs
|
08d46d0f69598629f647705597178b002cabe9e1
|
12285ae33495c15adbaa61864de1f44ea92792ed
|
/R/fars_read_years.R
|
f0d0b57580268a267332c7d7baf7534231d82f17
|
[] |
no_license
|
ragp29/project01
|
7dd72b8640409eb344f437f62765db63292a4aed
|
6f1f892fcd127302384d2c6938d32e3ba23dfed0
|
refs/heads/master
| 2021-01-19T01:09:28.481282
| 2017-04-05T10:35:13
| 2017-04-05T10:35:13
| 87,229,062
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 627
|
r
|
fars_read_years.R
|
#'Reads in data from different years and stores in dat which contains month and year
#'
#'@param years vector
#'
#'@importFrom dplyr mutate select
#'@importFrom magrittr "%>%"
#'
#'@return a data frame tbl which includes month and year across different years
#'
#'@export
fars_read_years <- function(years) {
year <- NULL
MONTH <- NULL
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate(dat, year = year) %>%
dplyr::select(MONTH, year)
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
|
777c679cdcc4f5667e9f51a76883d9e7735adfb3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BayesComm/examples/print.bayescomm.Rd.R
|
54bd572c6fbb35f2351756c629bbce7f0121770c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 169
|
r
|
print.bayescomm.Rd.R
|
library(BayesComm)
### Name: print.bayescomm
### Title: Print a bayescomm object
### Aliases: print.bayescomm
### ** Examples
m1 <- example(BC)[[1]]
print(m1)
m1
|
85de020ed6b8affc5fc1549e6d1a0038cc6b6354
|
61ae80dc84faad496a920817f56b2f2c94cd7d59
|
/removals/ibm/old/summarize_validate_sims.r
|
a19b6d7cfbac43816a56e25a2a1597d286feea59
|
[] |
no_license
|
pbadler/ExperimentTests
|
1f78c6be8c193345bd63e11c7ba65a423bf77ad4
|
b2bfeb446a54446210e3f04a010d689dc577f257
|
refs/heads/master
| 2021-04-09T17:39:42.583837
| 2021-03-16T22:12:05
| 2021-03-16T22:12:05
| 53,069,884
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,889
|
r
|
summarize_validate_sims.r
|
# call from validate wrapper
setwd("ibm/")
sppList=c("ARTR","HECO","POSE","PSSP")
Nspp=length(sppList)
sppNames=c("A. tripartita","H. comata","Poa secunda","P. spicata")
myCol=c("black","forestgreen","blue","red")
# control plots
qList <- paste0("Q",c(1:6,19:26)) #c("Q1","Q2","Q3","Q4","Q5","Q6" )
covD=NULL
for(i in 1:length(qList)){
infile=paste("simulations1step/",qList[i],"_validation_cov_removals_noTrt.csv",sep="")
tmpD=read.csv(infile)
covD=rbind(covD,tmpD)
}
control.mean=aggregate(covD[,2:NCOL(covD)],by=list(year=covD$year),FUN=mean,na.rm=T)
control.sd=aggregate(covD[,2:NCOL(covD)],by=list(year=covD$year),FUN=sd,na.rm=T)
# no shrub plots, no treatment effects
qList <- c("Q47","Q50","Q52","Q53","Q54","Q56","Q59","Q61")
covD=NULL
for(i in 1:length(qList)){
infile=paste("simulations1step/",qList[i],"_validation_cov_removals_noTrt.csv",sep="")
tmpD=read.csv(infile)
covD=rbind(covD,tmpD)
}
covD$ARTR=NA ; covD$ARTRpred = NA # get rid of ARTR so grass trends are easier to see
noshrub.mean=aggregate(covD[,2:NCOL(covD)],by=list(year=covD$year),FUN=mean,na.rm=T)
noshrub.sd=aggregate(covD[,2:NCOL(covD)],by=list(year=covD$year),FUN=sd,na.rm=T)
# no shrub plots, WITH treatment effects
qList <- c("Q47","Q50","Q52","Q53","Q54","Q56","Q59","Q61")
covD=NULL
for(i in 1:length(qList)){
infile=paste("simulations1step/",qList[i],"_validation_cov_removals_Trt.csv",sep="")
tmpD=read.csv(infile)
covD=rbind(covD,tmpD)
}
covD$ARTR=NA ; covD$ARTRpred = NA # get rid of ARTR so grass trends are easier to see
noshrubTRT.mean=aggregate(covD[,2:NCOL(covD)],by=list(year=covD$year),FUN=mean,na.rm=T)
noshrubTRT.sd=aggregate(covD[,2:NCOL(covD)],by=list(year=covD$year),FUN=sd,na.rm=T)
# no grass plots, no treatment effects
qList <- c("Q48","Q49","Q51","Q55","Q57","Q58","Q60","Q62") # no grass
covD=NULL
for(i in 1:length(qList)){
infile=paste("simulations1step/",qList[i],"_validation_cov_removals_noTrt.csv",sep="")
tmpD=read.csv(infile)
covD=rbind(covD,tmpD)
}
nograss.mean=aggregate(covD[,2:NCOL(covD)],by=list(year=covD$year),FUN=mean,na.rm=T)
nograss.sd=aggregate(covD[,2:NCOL(covD)],by=list(year=covD$year),FUN=sd,na.rm=T)
# no grass plots WITH treatment effects
qList <- c("Q48","Q49","Q51","Q55","Q57","Q58","Q60","Q62") # no grass
covD=NULL
for(i in 1:length(qList)){
infile=paste("simulations1step/",qList[i],"_validation_cov_removals_Trt.csv",sep="")
tmpD=read.csv(infile)
covD=rbind(covD,tmpD)
}
nograssTRT.mean=aggregate(covD[,2:NCOL(covD)],by=list(year=covD$year),FUN=mean,na.rm=T)
nograssTRT.sd=aggregate(covD[,2:NCOL(covD)],by=list(year=covD$year),FUN=sd,na.rm=T)
#set up plotting function
plotObsPred<-function(doSpp,mydata1,mydata2,mydata3,mytitle){
# format data
newD=data.frame(mydata1$year,mydata1[,1+doSpp],mydata1[,5+doSpp], # control obs and pred
mydata2[,1+doSpp],mydata2[,5+doSpp], # removal obs and pred (no TRT effect)
mydata3[,5+doSpp]) # removal pred (with TRT effect)
names(newD)=c("year","control.obs","control.pred","remove.obs","remove.pred","remove.predTRT")
matplot(newD$year,newD[,2:6]/100,type="o",xlab="",ylab="",
col=c(rep("black",2),rep("blue",3)),
pch=c(16,1,16,1,2),
lty=c("solid","dashed","solid","dashed","dashed"))
title(main=mytitle,adj=0,font.main=1)
}
png("obsVSpred_project1step.png",units="in",height=3.5,width=8.5,res=600)
par(mfrow=c(1,4),tcl=-0.2,mgp=c(2,0.5,0),mar=c(2,2,2,1),oma=c(2,2,0,0))
plotObsPred(1,control.mean,nograss.mean,nograssTRT.mean,"ARTR")
plotObsPred(2,control.mean,noshrub.mean,noshrubTRT.mean,"HECO")
plotObsPred(3,control.mean,noshrub.mean,noshrubTRT.mean,"POSE")
plotObsPred(4,control.mean,noshrub.mean,noshrubTRT.mean,"PSSP")
mtext(side=1,"Year",line=0.5, outer=T)
mtext(side=2,"Mean cover (%)",line=0.5, outer=T)
dev.off()
setwd("..")
|
ebd6a41429468486af597a6b37081baf0029c88d
|
f363f246688186ceab7ea22a1f47e2d184ddfa42
|
/R/foo.R
|
b515d207cca637f394781ee6989c55e435c4d5ec
|
[] |
no_license
|
aljabadi/TravisTest
|
82369f7650c432ad16277782dec8ef011059c8aa
|
0ddac2fd19c3ccb1cbd56d9e5a5347931f9e48e3
|
refs/heads/master
| 2022-03-13T14:20:03.077587
| 2019-11-26T01:34:33
| 2019-11-26T01:34:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 132
|
r
|
foo.R
|
#' Foo
#'
#' @param bar bars of some foo
#'
#' @return stuff
#' @export
#'
#' @examples
#' foo(3)
foo <- function(bar=5){
bar
}
|
26f27242ad5be46a1b95568fabe138e4c421df4e
|
703d042ac43e56d9a347d3cc1bc9ead0d0eb6f4f
|
/1_likelihood/R_scripts/plot_likelihood_surface.R
|
2a94dd594057e5640d7ad34202a449b5a137195a
|
[] |
no_license
|
hoehna/birth-death-shift-analyses
|
732093ac26b24ad79726660c8d8cdf9836eced62
|
b24e98d9bd6ce6af375b64b8bcfce32e5d03b575
|
refs/heads/master
| 2020-04-05T21:43:05.806601
| 2019-09-16T21:52:42
| 2019-09-16T21:52:42
| 157,230,453
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,054
|
r
|
plot_likelihood_surface.R
|
colors = rep(1,2)
likelihoods = read.table("likelihoods/primates.csv",header=TRUE)
x_vals = likelihoods$Rel.Ext
space = 30
lwd = 4
cex = 0.7
pch = c(1,3)
f = 1.2
pdf(paste0("../figures/Likelihood_surface.pdf"),width=7.5, height=5)
par(lend=2, mar=c(5,6,0.3,0.3))
plot(x_vals, likelihoods$BDP, type="l", lwd=lwd, col="grey90", xaxt="n", yaxt="n", xlab=NA, ylab=NA)
points(x_vals[seq(1, length(x_vals), space)], y=likelihoods$DA[seq(1, length(x_vals), space)], pch=pch[1], col=colors[1], cex=cex)
points(x_vals[seq(1 + 0.5 * space, length(x_vals), space)], y=likelihoods$SCM[seq(1 + 0.5 * space, length(x_vals), space)], pch=pch[2], col=colors[2], cex=cex)
axis(1, lwd.tick=1, lwd=0)
axis(2, lwd.tick=1, lwd=0, las=2)
mtext(side=2, text="log likelihood", line=4.0, cex=1.4)
mtext(side=1, text="relative extinction", line=2.5, cex=1.4)
legend("topleft", legend=c("analytical","data-augmentation","numerical integration"), bty="n", lty=c(1,NA,NA), pch=c(NA, pch[1], pch[2]), col=c("grey90",colors[1], colors[2]), lwd=c(lwd,1,1))
dev.off()
|
34e2a527874d3418d550012af0b2e23158438c8e
|
2eebaf7f9e3246d2453df9289574fba9e7f8151a
|
/inst/flowfinder/data_tab.R
|
d09d14f30b4536f3fc9283ddebd9e06f3303a854
|
[
"MIT"
] |
permissive
|
mikejohnson51/FlowFinder
|
b47f2e726c452770bfa755c0da90ff7d13a1da92
|
617610cb3d53229de23a43775892223f8f854162
|
refs/heads/master
| 2021-06-07T20:26:34.622266
| 2021-03-15T00:05:05
| 2021-03-15T00:05:05
| 136,057,097
| 6
| 1
|
MIT
| 2021-03-15T00:05:05
| 2018-06-04T17:00:17
|
R
|
UTF-8
|
R
| false
| false
| 3,462
|
r
|
data_tab.R
|
# Generate static ggplot graph
static_plot <- function(values, selected, id, normals) {
#color <- ifelse(values$data$Q_cfs <= cutoff, '#0069b5', 'red')
time = values$data %>%
filter(COMID == id)
df = data.frame(time = time$dateTime)
for (stream in selected) {
text = stream
stream_id = getIDs(text)[1]
data = values$data %>%
filter(COMID == stream_id)
df[as.character(stream_id)] = data$Q_cfs
}
df.long = reshape2::melt(df, id="time")
colnames(df.long) <- c("time", "COMID", "value")
graph = ggplot(data = df.long, aes(time, value, colour = COMID)) +
theme_bw() +
theme(axis.title.x = element_text(margin = unit(c(6, 0, 0, 0), "mm")),
axis.title.y = element_text(margin = unit(c(0, 6, 0, 0), "mm"))) +
labs(x = "Date and Time",
y = "Streamflow (cfs)") +
scale_x_datetime(expand = c(0, 0)) +
scale_y_continuous(expand = expand_scale(mult = c(0, .05)))
# title = paste0(ifelse(is.na(values$flow_data$nhd$gnis_name[values$flow_data$nhd$comid == values$flow_data$nhd$comid[values$i]]), "", paste0(values$flow_data$nhd@data$gnis_name[values$flow_data$nhd$comid == values$flow_data$nhd$comid[values$i]], " ")),
# paste0("COMID: ", values$flow_data$nhd$comid[values$flow_data$nhd$comid == values$flow_data$nhd$comid[values$i]])))
if (length(selected) > 1) {
graph = graph +
geom_line() +
geom_point()
}
else{
cutoff <- normals %>%
filter(COMID == id)
cutoff = cutoff[,2] * 35.3147
mn = mean(df[[2]], na.rm = TRUE)
std = sd(df[[2]], na.rm = TRUE)
graph = graph +
geom_rect(aes(ymin=mn - std, ymax=mn + std, xmin=df$time[1], xmax=df$time[length(df$time)]),fill = "#ededed", size = 0, show.legend = FALSE, alpha = .1) +
geom_hline(aes(yintercept = cutoff), colour = "red", show.legend = FALSE) +
geom_text(aes(df$time[2],cutoff,label = paste0("Monthly Average (", round(cutoff,2), " cfs)"), vjust = -1), show.legend = FALSE) +
geom_area(aes(fill="pos"), fill = '#0069b5', size = 0, alpha = .2, show.legend = FALSE) +
geom_line(color="#0069b5", show.legend = TRUE) +
geom_point(color = "#0069b5", show.legend = TRUE)
}
return(graph)
}
# Generate upstream and downstream tables
stream_table <- function(data = NULL, direction = NULL, current_id = NULL, session = NULL) {
if (length(data) > 0) {
df <- data %>%
dplyr::mutate(View = paste('<a class="go-stream" href="" data-stream="', data[[1]], '"><i class="fa fa-eye"></i></a>', sep=""))
all = data.frame(paste0("All ", "(",nrow(df), ")"), paste('<a class="go-stream" href="" data-stream="', paste(data[[1]],collapse=","), '"><i class="fa fa-eye"></i></a>', sep=""))
df = rbind(setNames(all, names(df)), df)
action <- DT::dataTableAjax(session, df, rownames = FALSE)
table = DT::datatable(df,
options = list(ajax = list(url = action),
dom = 't'
),
escape = FALSE,
selection = 'none', rownames = FALSE
)
} else {
df <- data
df <- rbind(df, paste0("No ", direction, " reaches from COMID ", current_id))
colnames(df) = ifelse(direction == "upstream", "Upstream", "Downstream")
table = DT::datatable(df, options = list(dom = 't'), escape = FALSE, selection = 'none')
}
return(table)
}
|
2004ea559de25c9162f0b0c7a71b7c3fb0ae1098
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/cjoint/examples/amce.Rd.R
|
9673048c31b0f474ad249358e27b6a0f072b9d4a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,586
|
r
|
amce.Rd.R
|
library(cjoint)
### Name: amce
### Title: Estimating Causal Effects in Conjoint Experiments
### Aliases: amce
### ** Examples
## Not run:
##D # Immigration Choice Conjoint Experiment Data from Hainmueller et. al. (2014).
##D data("immigrationconjoint")
##D data("immigrationdesign")
##D
##D # Run AMCE estimator using all attributes in the design
##D results <- amce(Chosen_Immigrant ~ Gender + Education + `Language Skills` +
##D `Country of Origin` + Job + `Job Experience` + `Job Plans` +
##D `Reason for Application` + `Prior Entry`, data=immigrationconjoint,
##D cluster=TRUE, respondent.id="CaseID", design=immigrationdesign)
##D # Print summary
##D summary(results)
##D
##D
##D # Run AMCE estimator using all attributes in the design with interactions
##D interaction_results <- amce(Chosen_Immigrant ~ Gender + Education + `Language Skills` +
##D `Country of Origin` + Job + `Job Experience` + `Job Plans` +
##D `Reason for Application` + `Prior Entry` + Education:`Language Skills` +
##D Job: `Job Experience` + `Job Plans`:`Reason for Application`,
##D data=immigrationconjoint, cluster=TRUE, respondent.id="CaseID",
##D design=immigrationdesign)
##D # Print summary
##D summary(interaction_results)
##D
##D # create weights in data
##D weights <- runif(nrow(immigrationconjoint))
##D immigrationconjoint$weights <- weights
##D # Run AMCE estimator using weights
##D results <- amce(Chosen_Immigrant ~ Gender + Education + `Language Skills` +
##D `Country of Origin` + Job + `Job Experience` + `Job Plans` +
##D `Reason for Application` + `Prior Entry`, data=immigrationconjoint,
##D cluster=TRUE, respondent.id="CaseID", design=immigrationdesign,
##D weights = "weights")
##D # Print summary
##D summary(results)
##D
##D # Include a respondent-varying interaction
##D results <- amce(Chosen_Immigrant ~ Gender + Education + Job +
##D ethnocentrism:Job + Education:Job,
##D data=immigrationconjoint, na.ignore = TRUE,
##D cluster=FALSE,design=immigrationdesign,
##D respondent.varying = "ethnocentrism")
##D # Print summary
##D summary(results)
##D
##D # Change the baseline for "Education"
##D baselines <- list()
##D baselines$Education <- "graduate degree"
##D
##D results <- amce(Chosen_Immigrant ~ Gender + Education + Job +
##D Education:Job, data=immigrationconjoint,
##D cluster=FALSE,design=immigrationdesign,
##D baselines=baselines)
##D # Print summary
##D summary(results)
## End(Not run)
|
cc450a1727794fa2c052931e094341cdccb29fc3
|
5822375099a692619be24211b5654aaf4d41436f
|
/Machine_Learning/papers/TDDE01-master/lab_2/assignment2/ass2.R
|
12d10204dff6fcbf6ab8bb13ee6a4156737d0703
|
[] |
no_license
|
Nikoge/LiU-2018
|
6847529611374fbe2f739fb6be526ab9b23149e9
|
7abfa3b6db9f09eaae82081985bb2bf9889ca266
|
refs/heads/master
| 2020-09-23T20:51:43.172595
| 2019-11-04T21:05:34
| 2019-11-04T21:05:34
| 225,583,093
| 0
| 1
| null | 2019-12-03T09:40:08
| 2019-12-03T09:40:07
| null |
UTF-8
|
R
| false
| false
| 2,650
|
r
|
ass2.R
|
library(e1071)
library(readxl)
library(tree)
data = read_excel("TDDE01/lab2/assignment2/creditscoring.xls")
data$good_bad = as.factor(data$good_bad)
n = dim(data)[1]
# splitting
training = data[1:floor((1/2)*n),]
validation = data[(floor((1/2)*n)+1):(floor((3/4)*n)),]
test = data[(floor((3/4)*n)+1):n,]
mysum = function(name, comp, pred) {
print(name)
tab = table(comp, pred)
print(tab)
print(1-sum(diag(2)*tab)/sum(tab))
}
q2 = function() {
set.seed(12345)
# fitting
fit.gini = tree(good_bad ~ ., data = training, split = c("gini"))
fit.dev = tree(good_bad ~ ., data = training, split = c("deviance"))
# predictions
pred.train.gini = predict(fit.gini, newdata=training, type="class")
pred.train.dev = predict(fit.dev, newdata=training, type="class")
pred.test.gini = predict(fit.gini, newdata=test, type="class")
pred.test.dev = predict(fit.dev, newdata=test, type="class")
# tables and misclassification
mysum("train.gini", training$good_bad, pred.train.gini)
mysum("train.dev", training$good_bad, pred.train.dev)
mysum("test.gini", test$good_bad, pred.test.gini)
mysum("test.dev", test$good_bad, pred.test.dev)
fit.dev
summary(fit.dev)
# finding optimal size
n_leaves = 19
trainScore=rep(0,n_leaves)
testScore=rep(0,n_leaves)
for(i in 2:n_leaves) {
prunedTree = prune.tree(fit.dev, best = i)
pred = predict(prunedTree, newdata=validation, type="tree")
trainScore[i] = deviance(prunedTree)
testScore[i] = deviance(pred)
}
plot(2:n_leaves, trainScore[2:n_leaves], type="b", col="red", ylim=c(270,570), ylab="train/test scores", xlab="leaves")
points(2:n_leaves, testScore[2:n_leaves], type="b", col="blue")
# plot of optimal tree
pruned = prune.tree(fit.dev, best = 8)
plot(pruned)
text(pruned, pretty = 0)
}
q3 = function() {
# fitting
fit = naiveBayes(good_bad ~ ., data = training)
# predictions
pred.train = predict(fit, newdata = training)
pred.test = predict(fit, newdata = test)
# raw predictions
pred2.train = predict(fit, newdata = training, type = "raw")
pred2.test = predict(fit, newdata = test, type = "raw")
# with a lower threshold
res.train = apply(as.matrix(pred2.train[,1]), 1, function(x) ifelse( x < 0.1, "good", "bad"))
res.test = apply(as.matrix(pred2.test[,1]), 1, function(x) ifelse( x < 0.1, "good", "bad"))
# confusion matrices
mysum("training", training$good_bad, pred.train)
mysum("test", test$good_bad, pred.test)
# confusion matrices with loss matrix
mysum("training lm", training$good_bad, res.train)
mysum("test lm", test$good_bad, res.test)
}
q2()
|
37c2242057b091a969daa1b7bee46e8862f018e6
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.management/man/cloudformation_import_stacks_to_stack_set.Rd
|
e8cd24049c31e7472c8f5e001cbc6e6e8d89ac3c
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 2,191
|
rd
|
cloudformation_import_stacks_to_stack_set.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudformation_operations.R
\name{cloudformation_import_stacks_to_stack_set}
\alias{cloudformation_import_stacks_to_stack_set}
\title{Import existing stacks into a new stack sets}
\usage{
cloudformation_import_stacks_to_stack_set(
StackSetName,
StackIds = NULL,
StackIdsUrl = NULL,
OrganizationalUnitIds = NULL,
OperationPreferences = NULL,
OperationId = NULL,
CallAs = NULL
)
}
\arguments{
\item{StackSetName}{[required] The name of the stack set. The name must be unique in the Region where
you create your stack set.}
\item{StackIds}{The IDs of the stacks you are importing into a stack set. You import up
to 10 stacks per stack set at a time.
Specify either \code{StackIds} or \code{StackIdsUrl}.}
\item{StackIdsUrl}{The Amazon S3 URL which contains list of stack ids to be inputted.
Specify either \code{StackIds} or \code{StackIdsUrl}.}
\item{OrganizationalUnitIds}{The list of OU ID's to which the stacks being imported has to be mapped
as deployment target.}
\item{OperationPreferences}{The user-specified preferences for how CloudFormation performs a stack
set operation.
For more information about maximum concurrent accounts and failure
tolerance, see \href{https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-concepts.html#stackset-ops-options}{Stack set operation options}.}
\item{OperationId}{A unique, user defined, identifier for the stack set operation.}
\item{CallAs}{By default, \code{SELF} is specified. Use \code{SELF} for stack sets with
self-managed permissions.
\itemize{
\item If you are signed in to the management account, specify \code{SELF}.
\item For service managed stack sets, specify \code{DELEGATED_ADMIN}.
}}
}
\description{
Import existing stacks into a new stack sets. Use the stack import operation to import up to 10 stacks into a new stack set in the same account as the source stack or in a different administrator account and Region, by specifying the stack ID of the stack you intend to import.
See \url{https://www.paws-r-sdk.com/docs/cloudformation_import_stacks_to_stack_set/} for full documentation.
}
\keyword{internal}
|
48423a85b2cfe4d0ab169c36aa8011402c9bc1d0
|
1a8db591cff4a67aa8f66ba90d699e65f61038a5
|
/app.r
|
a2061300d5d64846920455991bf407740ce26802
|
[] |
no_license
|
zkuralt/ChiloBioBase
|
451de925b3ad7e7783afc262502d6984778ee429
|
356ddc780141ab308117dca9b6beaf1974e7602d
|
refs/heads/master
| 2019-12-24T09:13:59.755454
| 2018-01-17T13:42:13
| 2018-01-17T13:42:13
| 102,843,765
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,334
|
r
|
app.r
|
# Load needed packages
library(shinydashboard)
library(shiny)
library(shinyjs)
library(leaflet)
library(DBI)
library(DT)
library(rhandsontable)
library(htmltools)
library(jpeg)
source("./R/creds.R", local = TRUE)
source("./R/global.R", local = TRUE)
source("./R/about.R", local = TRUE)
#### Header ####
source("./R/header.R", local = TRUE)
#### Sidebar ####
source("./R/sidebar.R", local = TRUE)
#### Body ####
source("./R/body.R", local = TRUE)
ui <- dashboardPage(header, sidebar, body, skin = "black")
#### Server side ####
server <- function(input, output) {
#### About ####
source("./R/about.R", local = TRUE)
#### Data explorer ####
source("./R/explore_map.R", local = TRUE)
#### Data input ####
# Input form
source("./R/input_form.R", local = TRUE)
# Dynamic display of morphology input fields
source("./R/input_form_morphology.R", local = TRUE)
#### Records browser ####
source("./R/browser.R", local = TRUE)
#### Querying database #### TO-DO: Limit query expressions ####
source("./R/build_query.R", local = TRUE)
#### Settings ####
#### How-to ####
source("./R/howto.R", local = TRUE)
#### Tables ####
source("./R/manage_tables.R", local = TRUE)
#### End session ####
onSessionEnded(function() {
dbDisconnect(con)
})
}
shinyApp(ui, server)
|
fd30055afc20be88ccb759fb1a993e633846fba6
|
fb28dd592087e02e3f80b0b619a305b94e51e211
|
/scripts/interact world map.R
|
83df57c7659c0c78df0049e5ee65bb67ad5f577d
|
[] |
no_license
|
couyang24/Stack-Overflow-2018-Developer-Survey
|
7a058a94e04c3968fe08a57ae0fb1ee3b27f9c06
|
1c2bf1e2c7eab5d3c2e319524a87ee89d940472e
|
refs/heads/master
| 2020-03-19T03:52:31.807182
| 2018-06-06T01:38:55
| 2018-06-06T01:38:55
| 135,770,669
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,367
|
r
|
interact world map.R
|
library(readxl)
library(tidyverse)
library(ggplot2)
library(highcharter)
library(plotly)
library(stringr)
library(viridis)
library(gridExtra)
library(tidyverse)
library(highcharter)
library(plotly)
library(dygraphs)
library(lubridate)
library("viridisLite")
library(countrycode)
library(leaflet)
library(xts)
library(htmltools)
data <- read_csv("input/survey_results_public.csv")
data[is.na(data)] <- ""
data[data$Country=="United States",]$Country <- "United States of America"
data[data$Country=="Bolivia",]$Country <- "Bolivia (Plurinational State of)"
data[data$Country=="Venezuela, Bolivarian Republic of...",]$Country <- "Venezuela, Bolivarian Republic of"
data[data$Country=="Iran, Islamic Republic of...",]$Country <- "Iran (Islamic Republic of)"
data[data$Country=="United Kingdom",]$Country <- "United Kingdom of Great Britain and Northern Ireland"
countries <- data %>% count(Country)
names(countries) <- c("country.code", "total")
data(worldgeojson, package = "highcharter")
countries$iso3 <- countrycode(countries$country.code, 'country.name', 'iso3c')
countries$country_code <- countrycode(countries$country.code, 'country.name', 'iso3n')
library(wpp2017)
data('pop')
rm(popF, popFT, popM, popMT)
pop %>% head()
countries %>% head()
new_country <- countries %>%
left_join(pop, by = 'country_code') %>%
select(country.code, country_code, iso3, total, pop = `2015`) %>%
mutate(ratio = round(total/pop*1000,3))
dshmstops <- data.frame(q = c(0, exp(1:10)/exp(10)),
c = substring(viridis(10 + 1, option = "D"), 0, 7)) %>%
list_parse2()
highchart() %>%
hc_add_series_map(worldgeojson, new_country, value = "total", joinBy = "iso3", colorByPoint = 1) %>%
hc_colorAxis(stops = dshmstops) %>%
hc_legend(enabled = TRUE) %>%
hc_mapNavigation(enabled = TRUE) %>%
# hc_tooltip(useHTML = TRUE, headerFormat = "",
# pointFormat = "Country") %>%
hc_add_theme(hc_theme_chalk()) %>%
hc_title(text = "Where are Stack Overflow Users?") %>%
hc_credits(enabled = TRUE, text = "Sources: Stack Overflow 2018 Developer Survey", style = list(fontSize = "10px"))
a <- highchart() %>%
hc_add_series_map(worldgeojson, new_country, value = "total", joinBy = "iso3", colorByPoint = 1) %>%
hc_colorAxis(stops = dshmstops) %>%
hc_legend(enabled = TRUE) %>%
hc_mapNavigation(enabled = TRUE) %>%
# hc_tooltip(useHTML = TRUE, headerFormat = "",
# pointFormat = "Country") %>%
hc_add_theme(hc_theme_chalk()) %>%
hc_title(text = "Number of Stack Overflow Users by Country") %>%
hc_credits(enabled = TRUE, text = "Sources: Stack Overflow 2018 Developer Survey", style = list(fontSize = "10px"))
b <- highchart() %>%
hc_add_series_map(worldgeojson, new_country, value = "ratio", joinBy = "iso3", colorByPoint = 1) %>%
hc_colorAxis(stops = dshmstops) %>%
hc_legend(enabled = TRUE) %>%
hc_mapNavigation(enabled = TRUE) %>%
# hc_tooltip(useHTML = TRUE, headerFormat = "",
# pointFormat = "Country") %>%
hc_add_theme(hc_theme_chalk()) %>%
hc_title(text = "Number of Stack Overflow Users per million population by Country") %>%
hc_credits(enabled = TRUE, text = "Sources: Stack Overflow 2018 Developer Survey", style = list(fontSize = "10px"))
lst <- list(a, b)
hw_grid(lst, rowheight = 350)
|
1751f8919855e89909063e19d3606de5e3961c4f
|
610c5a5444c776bd3d5867f4ec879c698da550ee
|
/data/lgbb/fill.R
|
6fe0cc2ae1f518309bc5861489f573f26f316fdd
|
[] |
no_license
|
efsalvarenga/surrogates
|
147a109bdabbac0219754f534201b9acea3c36da
|
873da8e6604f2a5c15ceac61c74787c4f13aa8d0
|
refs/heads/master
| 2023-04-24T00:25:07.228690
| 2021-05-05T08:01:21
| 2021-05-05T08:01:21
| 283,463,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 570
|
r
|
fill.R
|
library(tgp)
as <- read.table("lgbb_as.txt", header=TRUE)
rest <- read.table("lgbb_as_rest.txt", header=TRUE)
plan <- read.table("lgbb_as_planned.txt", header=TRUE)
XX <- rest[,-1]
responses <- names(as)[-(1:4)]
fill <- matrix(NA, nrow=nrow(plan), ncol=length(responses))
fill <- as.data.frame(fill)
names(fill) <- responses
for(r in responses) {
X <- as[,2:4]
Z <- as[r]
out <- btgpllm(X=X, Z=Z, XX=XX, bprior="b0", BTE=c(10000,20000,100),
linburn=TRUE, R=100)
fill[as[,1],r] <- out$Zp.mean
fill[rest[,1],r] <- out$ZZ.mean
save(fill, file="fill.RData")
}
|
7ef9ccfc9bfcb60f73e90b6fad57341b48c7befb
|
2f5f0a49261b3ff44d85fcc8533af421c6dce5ae
|
/Plot4.R
|
b0b0479581b0c90d01b9d7e67ec8a7aeedd949a2
|
[] |
no_license
|
markie1mb/ExData_Plotting1
|
db9e6b4002c05a75dfb9549d2c492bfc21cdc4d2
|
d0753f548fa3667c6dcfa954920c6661ae3bd249
|
refs/heads/master
| 2021-01-24T22:22:42.195658
| 2016-04-09T14:05:00
| 2016-04-09T14:05:00
| 55,799,580
| 0
| 0
| null | 2016-04-08T18:06:29
| 2016-04-08T18:06:29
| null |
UTF-8
|
R
| false
| false
| 1,591
|
r
|
Plot4.R
|
## Plot4.R
## Source dataset:
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
## This script creates a 4 graphics in one plot
## plot 1 = Global_active_power over time
## plot 2 = Voltage over time
## plot 3 = 3 different sub-meterings over time
## plot 4 = Global reactive power over time.
## Set Directory
data_dir="C:/Users/Marc/Documents/R_Working_dir/exploratory-data-analysis"
setwd(data_dir)
## Read the data
power_cons_df <- read.csv("household_power_consumption.txt",sep=";",na.strings = "?")
## Change Date and Time
power_cons_df$Date<-as.Date(power_cons_df$Date,"%d/%m/%Y")
power_cons_df$Time<-strptime(paste(power_cons_df$Date,power_cons_df$Time),"%Y-%m-%d %H:%M:%S")
## Extract just 2 days
power_2days<-power_cons_df[power_cons_df$Date==as.Date("2007-02-01","%Y-%m-%d"),]
power_2days<-rbind(power_2days,power_cons_df[power_cons_df$Date==as.Date("2007-02-02","%Y-%m-%d"),])
## Make the plot
png('Plot4.png',width = 480, height = 480)
par(mfrow = c(2, 2), mar = c(5, 5, 2, 1), oma = c(0, 0, 2, 0))
with(power_2days,{
plot(Time,Global_active_power,type="l",ylab="Global Active Power(kilowats)",xlab="")
plot(Time,Voltage, type="l",xlab="datetime")
plot(Time,Sub_metering_1,type="l",ylab="Energy sub metering",xlab="")
lines(Time,Sub_metering_2,col="red")
lines(Time,Sub_metering_3,col="blue")
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black", "red", "blue"),lty=1,bty="n")
plot(Time,Global_reactive_power, type="l",xlab="datetime")
})
dev.off()
|
1c56f3cb10cd41e5f6bd7095860dcc41aa5e381b
|
23a572adade5e5682a38580d5c46f9ee27f6a16b
|
/man/PhyDataError.Rd
|
c8e7c763ee022c9a496e00aa51919c3be9db1236
|
[] |
no_license
|
cran/MAGNAMWAR
|
600a2865205c3f719bac6b55c60f9c53099a8ca2
|
b78d31591665dfcabe8f635d1b6e3d07f8c71d2e
|
refs/heads/master
| 2021-01-20T03:12:07.293148
| 2018-07-12T06:20:17
| 2018-07-12T06:20:17
| 89,508,282
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,463
|
rd
|
PhyDataError.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphics.R
\name{PhyDataError}
\alias{PhyDataError}
\title{Phylogenetic Tree with Attached Bar Plot and Standard Error Bars}
\usage{
PhyDataError(phy, data, mcl_matrix, species_colname, data_colname,
color = NULL, OG = NULL, xlabel = "xlabel", ...)
}
\arguments{
\item{phy}{Path to tree file}
\item{data}{R object of phenotype data}
\item{mcl_matrix}{AnalyzeOrthoMCL output}
\item{species_colname}{name of column in data file with taxa designations}
\item{data_colname}{name of column in data file with data observations}
\item{color}{optional parameter, (defaults to NULL) assign colors to individual taxa by providing file (format: Taxa | Color)}
\item{OG}{optional parameter, (defaults to NULL) a string with the names of chosen group to be colored}
\item{xlabel}{string to label barplot's x axis}
\item{...}{argument to be passed from other methods such as parameters from barplot() function}
}
\value{
A phylogenetic tree with a barplot of the data (with standard error bars) provided matched by taxa.
}
\description{
Presents data for each taxa including standard error bars next to a phylogenetic tree.
}
\examples{
file <- system.file('extdata', 'muscle_tree2.dnd', package='MAGNAMWAR')
PhyDataError(file, pheno_data, mcl_mtrx, species_colname = 'Treatment', data_colname = 'RespVar',
OG='OG5_126778', xlabel='TAG Content')
}
\references{
Some sort of reference
}
|
33a6c3e355ec52e0c4f5a92d0addc6bced645b8b
|
aecd7bbacf879d85f460c2018e475862a06d7735
|
/processing_ocean_color_data_modis.R
|
7b2884725ace5094bf7d3156af4d7e2dbecc2a21
|
[] |
no_license
|
bparment1/general_utilities_data_analysis
|
bd52e7c56251945b6628099e4cfca9cccc8f72e2
|
6186ce69c2698278f9531536bef0be4877ff5439
|
refs/heads/master
| 2021-01-23T12:46:42.379521
| 2018-10-01T18:30:00
| 2018-10-08T16:41:20
| 93,199,408
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,582
|
r
|
processing_ocean_color_data_modis.R
|
############### SESYNC Research Support: ocean colors datasets for environmental applications ##########
## Importing and processing data about the water/ocean from MODIS sensor.
## This is an example with Lake Malawi as a studya area.
##
## Data were downloaded from EARTHDATA NASA.
## https://oceandata.sci.gsfc.nasa.gov/MODIS-Terra/Mapped/Monthly/4km/Kd/
## An account is necessary if many files are downloaded.
## The products were downloaded manually but it is possible to write a script to automatically
## navigate the server directory structure and obtain the files.
##
## DATE CREATED: 11/01/2017
## DATE MODIFIED: 11/03/2017
## AUTHORS: Benoit Parmentier
## PROJECT: Ocean colors data
## ISSUE:
## TO DO:
##
## COMMIT: cleaning and producing example dataset
##
## Links to investigate:
## Kd product: https://oceandata.sci.gsfc.nasa.gov/MODIS-Terra/Mapped/Monthly/4km/Kd/
## refleance bands: https://oceandata.sci.gsfc.nasa.gov/MODIS-Terra/Mapped/Monthly/4km/Rrs/
###################################################
#
###### Library used
library(gtools) # loading some useful tools
library(sp) # Spatial pacakge with class definition by Bivand et al.
library(spdep) # Spatial pacakge with methods and spatial stat. by Bivand et al.
library(rgdal) # GDAL wrapper for R, spatial utilities
library(raster)
library(gdata) # various tools with xls reading, cbindX
library(rasterVis) # Raster plotting functions
library(parallel) # Parallelization of processes with multiple cores
library(maptools) # Tools and functions for sp and other spatial objects e.g. spCbind
library(maps) # Tools and data for spatial/geographic objects
library(plyr) # Various tools including rbind.fill
library(spgwr) # GWR method
library(rgeos) # Geometric, topologic library of functions
library(gridExtra) # Combining lattice plots
library(colorRamps) # Palette/color ramps for symbology
library(ggplot2)
library(lubridate)
library(dplyr)
library(car)
library(sf)
###### Functions definitions/declarations used in this script and sourced from other files ##########
create_dir_fun <- function(outDir,out_suffix=NULL){
#if out_suffix is not null then append out_suffix string
if(!is.null(out_suffix)){
out_name <- paste("output_",out_suffix,sep="")
outDir <- file.path(outDir,out_name)
}
#create if does not exists
if(!file.exists(outDir)){
dir.create(outDir)
}
return(outDir)
}
#Used to load RData object saved within the functions produced.
load_obj <- function(f){
env <- new.env()
nm <- load(f, env)[1]
env[[nm]]
}
### Other functions ####
#function_processing_data <- ".R" #PARAM 1, not implemented right now
#script_path <- "/nfs/bparmentier-data/Data/projects/ocean_colors_data/scripts" #path to script #PARAM
#source(file.path(script_path,function_processing_data)) #source all functions used in this script 1.
############################################################################
##### Parameters and argument set up ###########
in_dir <- "/nfs/bparmentier-data/Data/projects/ocean_colors_data/data" #local bpy50 , param 1
out_dir <- "/nfs/bparmentier-data/Data/projects/ocean_colors_data/outputs" #param 2
num_cores <- 2 #param 8
create_out_dir_param=TRUE # param 9
out_suffix <-"ocean_colors_example_11032017" #output suffix for the files and ouptut folder #param 12
#Region study area from http://www.masdap.mw/layers/geonode%3Amalawi_lake
infile_reg_outline <- "malawi_lake.shp" #study area
file_format <- ".tif" #raster format used as output
NA_flag_val <- -9999 #pixels values for missing, backgroud or no-data
############## START SCRIPT ############################
#####################
######### PART 0: Set up the output dir ################
if(is.null(out_dir)){
out_dir <- in_dir #output will be created in the input dir
}
#out_dir <- in_dir #output will be created in the input dir
out_suffix_s <- out_suffix #can modify name of output suffix
if(create_out_dir_param==TRUE){
out_dir <- create_dir_fun(out_dir,out_suffix)
setwd(out_dir)
}else{
setwd(out_dir) #use previoulsy defined directory
}
#################
### PART I READ AND PREPARE DATA #######
#set up the working directory
#Create output directory
## Remote Sensing reflectance
## band backscattering
lf_rrs <- list.files(path=in_dir,
pattern="*.RRS.*",
full.names=T) #this is the list of folder with RAW data information
r_stack_rrs <- stack(lf_rrs) #create a stack of raster images
#Composite reflectance from 2001001 to 2001031 (January 2001)
plot(r_stack_rrs,y=1) # plot the first image from the raster stack object
NAvalue(r_stack_rrs) #find out NA values
dataType(r_stack_rrs) #find out the data type, here FLT4S
## Kd 490nm attenuation
#e.g.: T2000032 2000060.L3m_MO_KD490_Kd_490_4km.nc
#
# T2017244 2017273.L3m_MO_SST_sst_4km.nc
lf_kd <- list.files(path=in_dir,
pattern="*.Kd.*",
full.names=T) #this is the list of folder with RAW data information
r_stack_kd <- stack(lf_kd) #stack of Kd
plot(r_stack_kd,y=1)
#######################
###### PART 2: A quick exploration and extraction ###########
### Examine values across 12 months for 2001
NAvalue(r_stack_kd) #find out NA values
#animate(r_stack_kd) #generate animation for specific bands/product
### create temporal profile for specific location using monthly kd data for 2001 (monthly)
geog_loc <- c(-100,-10) #longitude and latitude, South America coast
geog_loc_mat <- matrix(geog_loc,nrow=1,ncol=2)
kd_df <- extract(r_stack_kd,geog_loc_mat)
plot(kd_df[1,],type="b")
### create spectral profile for specific location using monthly kd data from 2000 and 2001
rrs_df <- extract(r_stack_rrs,geog_loc_mat)
bands_names<- names(r_stack_rrs)
bands_char<- strsplit(bands_names,"[.]")
bands_labels <- unlist(lapply(bands_char,function(x){x[5]})) #get band values in nm
plot(rrs_df[1,],type="b",xaxt="n",xlab="reflectance band (nm)",ylab="Reflectance value")
axis(side=1, at=1:10, labels=bands_labels,las=2) # pos=, lty=, col=, las=, tck=, ...)
title("MODIS Terra Reflectance: Ocean color product")
pt_sf <- st_point(geog_loc)
plot(r_stack_rrs,y=1,
main="location of extracted point")
plot(pt_sf,add=T)
#######################
###### PART 3: Extracting data for a specific study area ###########
#### get data out of the image for specific area
## The reference data for Lake Malawi was obtained here:
#http://www.masdap.mw/layers/geonode%3Amalawi_lake
reg_sf <- st_read(file.path(in_dir,infile_reg_outline)) # Read in as sf object
reg_sp <- as(reg_sf,"Spatial") # convert to Spatial object
r_kd_malawi <- crop(r_stack_kd,reg_sp) #Crop raster stack using region's extent
plot(r_kd_malawi,y=1:12) #Take a look at the time series for the variable of interest (kd here)
malawi_mean_df <- extract(r_stack_kd,reg_sp,fun=mean,df=T,na.rm=T) # Extract the average for the area of interest
plot(malawi_mean_df[1,],type="b")
### Save data in multiple format:
#Write out cropped data as raster:
out_raster_filename <- paste("study_area_cropped_pixels_",out_suffix,file_format,sep="")
writeRaster(r_kd_malawi,
file.path(out_dir,out_raster_filename),
NAflag=NA_flag_val,
bylayer=T,
suffix=names(r_kd_malawi))
#Write out cropped data as shapefile and textfile
malawi_data_sp <- rasterToPoints(r_kd_malawi,spatial=T) #sp points object
dim(malawi_data_sp)
outfile <- paste0("study_area_values_pixels_",out_suffix)
writeOGR(malawi_data_sp,dsn= ".",layer= outfile, driver="ESRI Shapefile",overwrite_layer=TRUE)
### Use the new sf package to write out:
malawi_data_sf <- as(malawi_data_sp,"sf")
outfile_sf <- paste0("study_area_sf_values_pixels_",out_suffix)
st_write(malawi_data_sf,
file.path(out_dir,outfile_sf),
driver="ESRI Shapefile")
malawi_data_df <- as.data.frame(malawi_data_sp)
dim(malawi_data_df) # note x, y column added!!
out_filename_df <- paste0("study_area_values_pixels_",out_suffix,".txt")
write.table(malawi_data_df,
file.path(out_dir,out_filename_df),
sep=",")
####################### END OF SCRIPT ##################################################
|
5c31543d814dcc25cfd08b918c107e179fadf5a2
|
dd8132404e8c7b028cb13cba904c50aace01c6a7
|
/swt/src/std.r/file.r
|
d13325769dfe60f47636c560e49a6e0d523f3216
|
[] |
no_license
|
arnoldrobbins/gt-swt
|
d0784d058fab9b8b587f850aeccede0305d5b2f8
|
2922b9d14b396ccd8947d0a9a535a368bec1d6ae
|
refs/heads/master
| 2020-07-29T09:41:19.362530
| 2019-10-04T11:36:01
| 2019-10-04T11:36:01
| 209,741,739
| 15
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,411
|
r
|
file.r
|
# file --- test for file conditions on specified file
integer filtst, result, getarg, argno
character pathname (MAXLINE)
integer zero_length, exists, type, permissions, got_path, i
integer readable, writable, dumped
character arg (MAXLINE)
integer p_bits (6)
permissions = 0 # default permission list: NO
type = 0 # default type list: NO
exists = 1 # default finding if file exists:YES
zero_length = 0 # default length test: NO
readable = 0 # default readability test: NO
writable = 0 # default writability test: NO
dumped = 0 # default dumped test: NO
argno = 1 # first argument number
got_path = NO # did caller pass a pathname?
# set up bit flag array for primos permissions
p_bits(1) = :2000 # owner delete/truncate
p_bits(2) = :1000 # owner write permission
p_bits(3) = :400 # owner read permission
p_bits(4) = :4 # non-owner delete/truncate
p_bits(5) = :2 # non-owner write permission
p_bits(6) = :1 # non-owner read permission
#
# expecting only one pathname per call, find all args and then process
#
while (EOF ~= getarg (argno, arg, MAXLINE)) {
if (arg(1) == '-'c) {
call mapstr (arg, UPPER)
#
# check found arg for one of the known arg types
if (arg(2) ~= 'D'c &&
arg(2) ~= 'E'c &&
arg(2) ~= 'N'c &&
arg(2) ~= 'P'c &&
arg(2) ~= 'S'c &&
arg(2) ~= 'U'c &&
arg(2) ~= 'W'c &&
arg(2) ~= 'R'c &&
arg(2) ~= 'Z'c )
call usage
if (arg(2) == 'D'c)
type = :100001
if (arg(2) == 'E'c)
exists = 1
if (arg(2) == 'P'c) {
if (EOF == getarg (argno + 1, arg, MAXLINE))
call usage
argno = argno + 1
for (i=1; i<=6; i=i+1)
if (arg(i) ~= '-'c)
permissions = or (permissions, p_bits (i))
}
if (arg(2) == 'R'c)
readable = 1
if (arg(2) == 'S'c)
type = :100000
if (arg(2) == 'U'c)
type = :100004
if (arg(2) == 'W'c)
writable = 1
if (arg(2) == 'Z'c)
zero_length = 1
if (arg(2) == 'N'c) {
if (arg(3) ~= 'E'c &&
arg(3) ~= 'W'c &&
arg(3) ~= 'R'c &&
arg(3) ~= 'Z'c )
call usage
if (arg(3) == 'E'c)
exists = -1
if (arg(3) == 'R'c)
readable = -1
if (arg(3) == 'W'c)
writable = -1
if (arg(3) == 'Z'c)
zero_length = -1
}
}
#------ end of minus options ------
#
#------ if not a minus option, assume it was a pathname
else {
call scopy (arg, 1, pathname, 1)
got_path = YES
}
argno = argno + 1
} # end of while loop for arg processing
if (got_path == NO) { # no pathname... error!
call usage
}
result = filtst (pathname, zero_length, permissions, exists, type,
readable, writable, dumped)
#
# filtst returns ERR, YES, NO...
if (result == YES)
call print (STDOUT, "1*n.")
else if (result == NO)
call print (STDOUT, "0*n.")
else if (result == ERR) {
call print (STDOUT, "0*n.")
call print (ERROUT, "*s: cannot test conditions*n"s, pathname)
}
stop
end
subroutine usage
call error ("Usage: file <pathname> -d -[n]e -p twrtwr -[n]r -s -u -[n]w -[n]z"p)
return
end
|
0100fba87b0387ccdd4f37d41bb62a23c5f5dba6
|
35425b4781332ee56ccb9812b7f937062f3b6778
|
/man/rhandson_ProgramCostSummary.Rd
|
353497d6a150647f646bbe8f04d0b7de990396fc
|
[] |
no_license
|
RX-PBB/PBBOpenData
|
8e389d11a37d4336e1231d1537e2dc27a8b1ae8f
|
7ff0d760ccb37bdfc5ed15e7713dd9ded182d307
|
refs/heads/master
| 2021-07-13T12:32:52.004154
| 2020-05-15T14:25:24
| 2020-05-15T14:25:24
| 134,998,853
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 520
|
rd
|
rhandson_ProgramCostSummary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ProgramModalSummary.R
\name{rhandson_ProgramCostSummary}
\alias{rhandson_ProgramCostSummary}
\title{rhandson_ProgramCostSummary}
\usage{
rhandson_ProgramCostSummary(df, height = 175)
}
\arguments{
\item{df}{Cost data to summarize}
\item{height}{Table height}
}
\description{
Makes a cost summary tabel for our Program Summary Modal
}
\examples{
showModal(ProgramModal(Modal_header=T,Modal_tabs=F,TotalCost=T,Positions=T,OperatingCosts=T))
}
|
9da0b90b9a61a1d7b2e7adee0e83e8f7f2651d50
|
65bf63e3bcc1d542804afa874df89a0c559e810a
|
/mactesttwo.r
|
110ac5fc9b42424fccccc646f3643182c8b0a8eb
|
[] |
no_license
|
adamsb0713/mac-test2
|
b5e4cafcda6b95ec5a0455e0bc5baef20f0d2f9c
|
f5c35f049908ac67850f8da5429e024f3cdd389b
|
refs/heads/master
| 2020-03-16T22:49:11.355638
| 2018-05-11T15:00:11
| 2018-05-11T15:00:11
| 133,053,563
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26
|
r
|
mactesttwo.r
|
##test-mac2
a=1
b=2
c=a+b
|
6a218c6c17f5a18e592275baab942bb10e5caa37
|
f1b8a09882ba6096e3b0ddede99d62e8c372bd9d
|
/R/example_serial_interval.R
|
c8e3f61c1b6cc59af34a94b1044669633d664f32
|
[
"MIT"
] |
permissive
|
medewitt/EpiSoon
|
a255d0abebebed56f3a1e2b4b502b2558dd48c41
|
84b8ac35a2afff9f058544225b2def6edb89517e
|
refs/heads/master
| 2022-07-09T19:25:19.792882
| 2020-05-14T19:43:29
| 2020-05-14T19:43:29
| 260,072,094
| 0
| 0
|
NOASSERTION
| 2020-05-14T19:43:30
| 2020-04-29T23:56:44
|
HTML
|
UTF-8
|
R
| false
| false
| 162
|
r
|
example_serial_interval.R
|
#' Example Serial Interval
#'
#' An example serial interval probability vector
#' @format A vector giviing the probability for each day
"example_serial_interval"
|
6e26d5083cbebdd2fdfa2738024a30d716df8f2c
|
f69bcd76b3308c3847135442719c49688b03fed3
|
/man/extractparam.Rd
|
171f45de44a499c0c028785238db95fadf823666
|
[] |
no_license
|
cran/activityGCMM
|
8922e39b4542cedcbe0a1d117d7cf8291e76dc82
|
db777426190dd415c6ddd485844189d183395ab6
|
refs/heads/master
| 2023-06-02T21:47:56.691319
| 2021-06-14T18:20:02
| 2021-06-14T18:20:02
| 348,029,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 529
|
rd
|
extractparam.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supportfunctions.R
\name{extractparam}
\alias{extractparam}
\title{Extract parameters for posterior simulations}
\usage{
extractparam(model, x)
}
\arguments{
\item{model}{Object of class GCMM containing output from GCMM function}
\item{x}{Name of parameter to be extracted}
}
\value{
Returns posterior samples of the parameter
}
\description{
Support function that extracts parameter estimates for creating posterior simulations of activity curves
}
|
0c1a48fd43b314837334144a7f9bb175cf9c0883
|
94c1f2fc69dd3b0cf9b6135fb8a138cd008c0e2b
|
/avaliando_residuos_no_r.r
|
9f74509cb304480f4afcf987605157375ab20e7f
|
[] |
no_license
|
ricardocunh/serie_temporais_com_r
|
85093ebd366636ebf3f0f567a95df84abcda03a1
|
375016c80c0a360f9afca4b05cf4f2badd89a99e
|
refs/heads/main
| 2023-03-30T05:46:18.825319
| 2021-03-28T21:28:49
| 2021-03-28T21:28:49
| 350,142,747
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 361
|
r
|
avaliando_residuos_no_r.r
|
library(forecast)
libary(ggplot2)
autoplot(presidents)
prev = auto.arima(presidents)
print(prev$residuals)
# gerando a visualização
autoplot(prev$residuals)
hist(prev$residuals)
var(prev$residuals)
var(prev$residuals, na.rm = T)
mean(as.vector(prev$residuals), na.rm=T)
acf(prev$residuals, na.action = na.pass)
checkresiduals(prev)
|
1df04dcb945793efc47d63f434d4964ca4e97073
|
22087c2df17907e3d7c6c502b68779ba968848ce
|
/R/SWD_analysis_helpers.R
|
91814408a7fecf7a62c883f8811eee01e66185d0
|
[] |
no_license
|
marlonecobos/kuenm
|
07c16d20445ccbd6e07d7349fa54ea03900cf0a9
|
2012fadee1ceffec008f50495ec598c94b41ffc5
|
refs/heads/master
| 2023-07-07T09:13:57.863875
| 2023-06-26T07:03:22
| 2023-06-26T07:03:22
| 130,799,797
| 46
| 19
| null | 2022-01-21T11:30:20
| 2018-04-24T05:17:49
|
TeX
|
UTF-8
|
R
| false
| false
| 15,442
|
r
|
SWD_analysis_helpers.R
|
#' AICc calculation of Maxent SWD predictions
#'
#' @description aicc calculates the Akaike information criterion corrected for
#' small sample sizes (AICc) for predictions produced with Maxent.
#'
#' @param occ matrix or data.frame with coordinates of the occurrences used to
#' create the model (raster) to be evaluated; columns must be: longitude and
#' latitude.
#' @param prediction matrix or data.frame of longitude and latitude coordinates,
#' and Maxent Raw predictions obtained using the SWD format in Maxent.
#' Coordinates in this prediction must include the ones in \code{occ}
#' @param npar (numeric) number of parameters of the model. Use function
#' \code{\link{n_par}} to obtain number of parameters in the model from
#' the lambdas file.
#'
#' @return
#' A data.frame containing values of AICc, delta AICc, weight of AICc, and
#' number of parameters. The number of rows of the data.frame corresponds to
#' the number of models evaluated.
#'
#' @export
#'
#' @details
#' Calculations are done following
#' [Warren and Seifert (2011)](https://doi.org/10.1890/10-1171.1).
aicc <- function(occ, prediction, npar) {
if (missing(occ)) {
stop("Argument 'occ' must be defined, see function's help.")
}
if (missing(prediction)) {
stop("Argument 'prediction' must be defined, see function's help.")
}
if (!class(prediction)[1] %in% c("matrix", "data.frame")) {
stop("'prediction' must be of class a matrix or data.frame. See function's help.")
}
if (missing(npar)) {
stop("Argument 'npar' must be defined, see function's help.")
}
AIC.valid <- npar < nrow(occ)
if (nrow(prediction) == 0) {
res <- data.frame(cbind(AICc = NA, delta_AICc = NA,
weight_AICc = NA, parameters = npar))
warning("Cannot calculate AICc when prediction has 0 rows. Returning NA")
} else {
vals <- prediction[paste(prediction[, 1], prediction[, 2]) %in%
paste(occ[, 1], occ[, 2]), 1:3]
vals <- unique(na.omit(vals))[, 3]
probsum <- sum(prediction[, 3], na.rm = TRUE)
LL <- colSums(log(.Machine$double.eps + t(t(vals)/probsum)))
AICc <- ((2 * npar) - (2 * LL)) + (2 * npar * (npar + 1) /
(nrow(occ) - npar - 1))
AICc[AIC.valid == FALSE] <- NA
AICc[is.infinite(AICc)] <- NA
if (sum(is.na(AICc)) == length(AICc)) {
warning("AICc not valid: too many parameters, or likelihood = Inf. Returning NA")
res <- data.frame(cbind(AICc, delta_AICc = NA, weight_AICc = NA,
parameters = npar))
} else {
delta_AICc <- AICc - min(AICc, na.rm = TRUE)
weight_AICc <- exp(-0.5 * delta_AICc) / sum(exp(-0.5 * delta_AICc), na.rm = TRUE)
res <- data.frame(AICc, delta_AICc, weight_AICc, parameters = npar)
rownames(res) <- NULL
}
}
rownames(res) <- NULL
return(res)
}
#' Omission rates calculation for Maxent SWD predictions
#'
#' @description or calculates omission rates of numerical projections of ecological
#' niche models based on one or multiple user-specified thresholds.
#'
#' @param prediction matrix of longitude and latidue coordinates, and Maxent
#' prediction obtained using the SWD format. Prediction coordinates must include
#' the ones in \code{occ.tra}, and \code{occ.test}.
#' @param occ.tra numerical matrix containing coordinates of the occurrence data
#' used to create the prediction to be evaluated; columns must be: longitude and
#' latitude.
#' @param occ.test numerical matrix containing coordinates of the occurrences
#' used to test the prediction to be evaluated; columns must be: longitude and
#' latitude.
#' @param threshold (numeric) vector of value(s) from 0 to 100 that will be used
#' as thresholds, default = 5.
#'
#' @return A named numeric value or numeric vector with the result(s).
#'
#' @export
or <- function(prediction, occ.tra, occ.test, threshold = 5) {
if (min(prediction, na.rm = T) == max(prediction, na.rm = T)) {
warning("Model imput has no variability, omission rate = NA.")
om_rate <- NA
} else {
vals <- prediction[paste(prediction[, 1], prediction[, 2]) %in%
paste(occ.tra[, 1], occ.tra[, 2]), 3]
tvals <- prediction[paste(prediction[, 1], prediction[, 2]) %in%
paste(occ.test[, 1], occ.test[, 2]), 3]
vals <- na.omit(vals); tvals <- na.omit(tvals)
om_rate <- vector("numeric", length = length(threshold))
for (i in 1:length(threshold)) {
val <- ceiling(nrow(occ.tra) * threshold[i] / 100) + 1
omi_val_suit <- sort(vals)[val]
om_rate[i] <- length(tvals[tvals < omi_val_suit]) / length(tvals)
}
names(om_rate) <- paste("om_rate_", threshold, "%", sep = "")
}
return(om_rate)
}
#' Partial ROC, omission rates, and AICc calculations in concert (helper)
#'
#' @description proc_or_aicc performs a series of step by step processes that
#' help to read files from directores, extract necessary data, and evaluate
#' Maxent predictions based on partial ROC, omission rates, and AICc values.
#'
#' @param occ.joint (character) the name of csv file with training and testing
#' occurrences combined; columns must be: species, longitude, and latitude.
#' @param occ.tra (character) the name of the csv file with the training
#' occurrences; columns as in \code{occ.joint}.
#' @param occ.test (character) the name of the csv file with the evaluation
#' occurrences; columns as in \code{occ.joint}.
#' @param raw.folders (character) vector of names of directories containing
#' models created with all occurrences and raw outputs.
#' @param log.folders (character) vector of names of directories containing
#' models created with training occurrences and logistic outputs.
#' @param threshold (numeric) the percentage of training data omission error
#' allowed (E); default = 5.
#' @param rand.percent (numeric) the percentage of data to be used for the
#' bootstraping process when calculating partial ROCs; default = 50.
#' @param iterations (numeric) the number of times that the bootstrap is going
#' to be repeated; default = 500.
#' @param kept (logical) if FALSE, all candidate models will be erased after
#' evaluation, default = TRUE.
#'
#' @return
#' A data.frame with the results of partial ROC, omission rates, and AICc metrics
#' for all candidate models.
#'
#' @export
#'
#' @usage
#' proc_or_aicc(occ.joint, occ.tra, occ.test, raw.folders, log.folders,
#' threshold = 5, rand.percent = 50, iterations = 500, kept = TRUE)
#'
#' @export
proc_or_aicc <- function(occ.joint, occ.tra, occ.test,
raw.folders, log.folders, threshold = 5,
rand.percent = 50, iterations = 500, kept = TRUE) {
#pROCs, omission rates, and AICcs calculation
message("Evaluation using partial ROC, omission rates, and AICc")
# Slash
if(.Platform$OS.type == "unix") {sl <- "/"; dl <- "/"} else {sl <- "\\"; dl <- "\\\\"}
# model names
model_names <- gsub(paste0(".*", dl), "", gsub("_all$", "", raw.folders))
# occurrences
oc <- read.csv(occ.joint)
spn <- gsub(" ", "_", as.character(oc[1, 1]))
oc <- oc[, -1]
occ <- read.csv(occ.tra)[, -1]
occ1 <- read.csv(occ.test)[, -1]
longitude <- colnames(oc)[1]
latitude <- colnames(oc)[2]
aics <- list()
proc_res <- list()
om_rates <- numeric()
nm <- length(raw.folders)
if(.Platform$OS.type == "unix") {
pb <- txtProgressBar(min = 0, max = nm, style = 3)
} else {
pb <- winProgressBar(title = "Progress bar", min = 0, max = nm, width = 300)
}
for(i in 1:nm) {
Sys.sleep(0.1)
if(.Platform$OS.type == "unix") {
setTxtProgressBar(pb, i)
} else {
setWinProgressBar(pb, i, title = paste(round(i / nm * 100, 2),
"% of the evaluation has finished"))
}
#AICc calculation
lbds <- paste0(raw.folders[i], sl, spn, ".lambdas")
waiting <- wait_written_done(lbds)
lambdas <- readLines(lbds)
par_num <- n_par(lambdas)
mods <- paste0(raw.folders[i], sl, spn, ".csv")
waiting <- wait_written_done(mods)
mod <- read.csv(mods)
aic <- aicc(oc, mod, par_num)
aics[[i]] <- aic
#pROCs and omission rates calculation
mods1 <- paste0(log.folders[i], sl, spn, ".csv")
waiting <- wait_written_done(mods1)
mod1 <- read.csv(mods1)
tval <- mod1[paste(mod1[, 1], mod1[, 2]) %in% paste(occ1[, 1], occ1[, 2]), 3]
proc <- kuenm_proc(tval, mod1[, 3], threshold, rand.percent, iterations)
proc_res[[i]] <- proc[[1]]
om_rates[i] <- or(mod1, occ, occ1, threshold)
#Erasing calibration models after evaluating them if kept = FALSE
if(kept == FALSE) {
unlink(raw.folders[i], recursive = T)
unlink(log.folders[i], recursive = T)
}
}
if(.Platform$OS.type != "unix") {suppressMessages(close(pb))}
# From AICc analyses few calculations
aiccs <- do.call(rbind, aics)
aiccs[, 2] <- aiccs[, 1] - min(aiccs[, 1], na.rm = TRUE)
aiccs[, 3] <- exp(-0.5 * aiccs[, 2]) / sum(exp(-0.5 * aiccs[, 2]), na.rm = TRUE)
# From pROC analyses
proc_res_m <- data.frame(model_names, do.call(rbind, proc_res))[, 1:3]
# Joining the results
ku_enm_eval <- data.frame(proc_res_m, om_rates, aiccs)
colnames(ku_enm_eval) <- c("Model", "Mean_AUC_ratio", "pval_pROC",
paste0("Omission_rate_at_", threshold, "%"), "AICc",
"delta_AICc", "W_AICc", "N_parameters")
return(ku_enm_eval)
}
#' Helper to summarize all results from model calibration exercises
#'
#' @param proc.or.aicc.results data.frame with results from evaluation of all
#' candidate models. Generally the output of \code{\link{proc_or_aicc}}.
#' @param selection (character) model selection criterion, can be "OR_AICc",
#' "AICc", or "OR"; OR = omission rates. Default = "OR_AICc", which means that
#' among models that are statistically significant and that present omission
#' rates below the threshold, those with delta AICc up to 2 will be selected.
#' See details for other selection criteria.
#'
#' @details
#' Other selecton criteria are described below: If "AICc" criterion is chosen,
#' all significant models with delta AICc up to 2 will be selected If "OR" is
#' chosen, the 10 first significant models with the lowest omission rates will
#' be selected.
#'
#' @return
#' A list with all results that need to be written to produce the evaluation report.
#'
#' @export
summary_calibration <- function(proc.or.aicc.results, selection = "OR_AICc") {
ku_enm_eval <- proc.or.aicc.results
threshold <- gsub("Omission_rate_at_", "", colnames(ku_enm_eval)[4])
threshold <- as.numeric(gsub("%", "", threshold))
# Choosing the best models
if(selection == "OR_AICc") {
ku_enm_bes <- na.omit(ku_enm_eval[ku_enm_eval[, 3] <= 0.05, ])
ku_enm_best <- na.omit(ku_enm_bes[which(ku_enm_bes[, 4] <= threshold / 100), ])
if(length(ku_enm_best[, 4]) != 0) {
ku_enm_best[, 6] <- ku_enm_best[, 5] - min(ku_enm_best[, 5], na.rm = TRUE)
ku_enm_best[, 7] <- exp(-0.5 * ku_enm_best[, 6]) /
sum(exp(-0.5 * ku_enm_best[, 6]), na.rm = TRUE)
ku_enm_best <- ku_enm_best[ku_enm_best[, 6] <= 2, ]
ku_enm_best <- ku_enm_best[order(ku_enm_best[, 6]), ]
}else {
message("None of the significant candidate models met the omission rate criterion,",
"\nmodels with the lowest omission rate and lowest AICc will be presented")
ku_enm_best <- ku_enm_bes[ku_enm_bes[, 4] == min(ku_enm_bes[, 4]), ]
ku_enm_best[, 6] <- ku_enm_best[, 5] - min(ku_enm_best[, 5], na.rm = TRUE)
ku_enm_best[, 7] <- exp(-0.5 * ku_enm_best[, 6]) /
sum(exp(-0.5 * ku_enm_best[, 6]), na.rm = TRUE)
ku_enm_best <- ku_enm_best[ku_enm_best[, 6] <= 2, ]
ku_enm_best <- ku_enm_best[order(ku_enm_best[, 6]), ]
}
}
if(selection == "AICc") {
ku_enm_bes <- na.omit(ku_enm_eval[ku_enm_eval[, 3] <= 0.05, ])
ku_enm_best <- ku_enm_bes[ku_enm_bes[, 6] <= 2, ]
if(length(ku_enm_best[, 6]) != 0) {
ku_enm_best <- ku_enm_best[order(ku_enm_best[, 6]), ]
}else {
message("None of the significant candidate models met the AICc criterion,",
"\ndelta AICc will be recalculated for significant models")
ku_enm_best[, 6] <- ku_enm_best[, 5] - min(ku_enm_best[, 5], na.rm = TRUE)
ku_enm_best[, 7] <- exp(-0.5 * ku_enm_best[, 6]) /
sum(exp(-0.5 * ku_enm_best[, 6]), na.rm = TRUE)
ku_enm_best <- ku_enm_best[ku_enm_best[, 6] <= 2, ]
ku_enm_best <- ku_enm_best[order(ku_enm_best[, 6]), ]
}
}
if(selection == "OR") {
ku_enm_b <- ku_enm_eval[!is.na(ku_enm_eval[, 3]), ]
ku_enm_bes <- na.omit(ku_enm_eval[ku_enm_eval[, 3] <= 0.05, ])
ku_enm_bes1 <- ku_enm_b[ku_enm_b[, 3] <= 0.05, ]
ku_enm_best <- ku_enm_bes1[ku_enm_bes1[, 4] <= threshold / 100, ]
if(length(ku_enm_best[, 4]) != 0) {
if(length(ku_enm_best[, 4]) > 10) {
ku_enm_best <- ku_enm_best[order(ku_enm_best[, 4]), ][1:10, ]
}else {
ku_enm_best <- ku_enm_best[order(ku_enm_best[, 4]), ]
}
}else {
message("None of the significant candidate models met the omission rate criterion,",
"\nmodels with the lowest omission rate will be presented")
ku_enm_best <- ku_enm_bes[ku_enm_bes[, 4] == min(ku_enm_bes[, 4]), ][1:10, ]
}
}
#####
#Statistics of the process
##Counting
ku_enm_sign <- ku_enm_eval[!is.na(ku_enm_eval[, 3]), ]
ku_enm_sign <- ku_enm_sign[ku_enm_sign[, 3] <= 0.05, ]
ku_enm_or <- ku_enm_eval[ku_enm_eval[, 4] <= threshold / 100, ]
ku_enm_AICc <- ku_enm_eval[!is.na(ku_enm_eval[, 6]), ]
ku_enm_AICc <- ku_enm_AICc[ku_enm_AICc[, 6] <= 2, ]
ku_enm_best_OR <- ku_enm_sign[ku_enm_sign[, 4] <= threshold / 100, ]
ku_enm_best_AICc <- ku_enm_bes[ku_enm_bes[, 6] <= 2, ]
ku_enm_best_OR_AICc <- ku_enm_bes[ku_enm_bes[, 4] <= threshold / 100, ]
if(length(ku_enm_best_OR_AICc[, 4]) != 0) {
ku_enm_best_OR_AICc[, 6] <- ku_enm_best_OR_AICc[, 5] -
min(ku_enm_best_OR_AICc[, 5], na.rm = TRUE)
ku_enm_best_OR_AICc[, 7] <- exp(-0.5 * ku_enm_best_OR_AICc[, 6]) /
sum(exp(-0.5 * ku_enm_best_OR_AICc[, 6]), na.rm = TRUE)
ku_enm_best_OR_AICc <- ku_enm_best_OR_AICc[ku_enm_best_OR_AICc[, 6] <= 2, ]
}
# Preparing the table
r_names <- c("All candidate models", "Statistically significant models",
"Models meeting omission rate criteria",
"Models meeting AICc criteria",
"Statistically significant models meeting omission rate criteria",
"Statistically significant models meeting AICc criteria",
"Statistically significant models meeting omission rate and AICc criteria")
statis <- c(length(ku_enm_eval[, 1]),
length(ku_enm_sign[, 3]),
length(ku_enm_or[, 4]),
length(ku_enm_AICc[, 6]),
length(ku_enm_best_OR[, 4]),
length(ku_enm_best_AICc[, 6]),
length(ku_enm_best_OR_AICc[, 2]))
ku_enm_stats <- data.frame(r_names, statis)
colnames(ku_enm_stats) <- c("Criteria", "Number of models")
# returning results
results <- list(calibration_statistics = ku_enm_stats,
selected_models = ku_enm_best,
calibration_results = ku_enm_eval,
threshold = threshold, significant_models = ku_enm_sign)
return(results)
}
|
fd670f5c918c058a2f12daa21060d891e2046d42
|
2e731f06724220b65c2357d6ce825cf8648fdd30
|
/BayesMRA/inst/testfiles/rmvn_arma_scalar/libFuzzer_rmvn_arma_scalar/rmvn_arma_scalar_valgrind_files/1612725971-test.R
|
29387b4f329546eef2f5863ff7228579b3b3655c
|
[] |
no_license
|
akhikolla/updatedatatype-list1
|
6bdca217d940327d3ad42144b964d0aa7b7f5d25
|
3c69a987b90f1adb52899c37b23e43ae82f9856a
|
refs/heads/master
| 2023-03-19T11:41:13.361220
| 2021-03-20T15:40:18
| 2021-03-20T15:40:18
| 349,763,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
1612725971-test.R
|
testlist <- list(a = 1.62994420810855e-260, b = 5.43239211533662e-311)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result)
|
36d271529d8a56f7a0f4a4d0eb32f866bea13ec2
|
c2ca2cc7ca616defdd69a1098f1f34717eba28cf
|
/WordPredictionGenerate.R
|
115986e1a6c45d246164ad44883c66980a880550
|
[] |
no_license
|
tsuyoshi-matsuura/DataScienceCapstone
|
9dcef32e1de2ba59dd8fc6b8b4207e1eb1ee9824
|
84f0173cb9e28fb468970a06d060d3e9c5a146f1
|
refs/heads/master
| 2020-09-02T03:32:11.359340
| 2019-11-19T07:53:38
| 2019-11-19T07:53:38
| 216,562,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,962
|
r
|
WordPredictionGenerate.R
|
library(tidyverse)
library(tidytext)
# Read a file with profanity words
profanity <- readLines("profanity.txt")
readFile <- function(filepath,fraction,seed) {
# Open as "rb" to avoid ^Z problem
con = file(filepath, "rb")
lines <- readLines(con,encoding = "UTF-8")
close(con)
set.seed(seed)
train <- sample(1:length(lines),length(lines)*fraction)
Ltrain <- lines[train]
Ltest <- lines[-train]
Ttrain <- tibble(line=1:length(Ltrain),text=Ltrain)
Ttest <- tibble(line=1:length(Ltest),text=Ltest)
return(list(Ttrain,Ttest))
}
# Read 10% of the blogs, news and twitter data
blogs <- readFile("en_US/en_US.blogs.txt",0.9,124)
news <- readFile("en_US/en_US.news.txt",0.9,124)
twitter <- readFile("en_US/en_US.twitter.txt",0.9,124)
# Combine the above into a single corpus
# Mark the source of each of the data
corpus <- bind_rows(list(blogs=blogs[[1]],news=news[[1]],twitter=twitter[[1]]),.id="source")
corpusTest <- bind_rows(list(blogs=blogs[[2]],news=news[[2]],twitter=twitter[[2]]),.id="source")
# Clean up
rm(blogs)
rm(news)
rm(twitter)
gc()
##### Building vocabulary #####
# Tokenize into unigrams using unnest_token
words <- corpus %>% unnest_tokens(word,text,token="words") %>%
# Remove words with non letters
filter(!grepl("[^a-zA-Z']",word)) %>%
# Remove profanity words
filter(!word %in% profanity)
#Clean up
rm(corpus)
gc()
Wcount <- words %>% count(word,sort=TRUE)
Wtot <- sum(Wcount$n)
Wcount <- Wcount %>% mutate(frac=n/Wtot,Cumfrac=cumsum(frac)) %>%
select(word,Cumfrac)
cumfrac <- 0.95
Wcount_k <- Wcount %>% filter(Cumfrac<=cumfrac) %>% mutate(replace=word)
Wcount_r <- Wcount %>% filter(Cumfrac>cumfrac) %>% mutate(replace="d0mmy")
Wcount <- rbind(Wcount_k,Wcount_r)
#Clean up
rm(Wcount_k)
rm(Wcount_r)
gc()
words <- left_join(words,Wcount) %>% mutate(word=replace) %>%
select(source, line, word)
##### Re-build corpus with chosen vocabulary #####
corpusClean <- words %>% group_by(source,line) %>%
summarize(text = str_c(word, collapse = " "),
text=paste("b0s b0s b0s ",text," e0s")) %>% ungroup()
##### Unigram data #####
# Tokenize into unigrams
unigrams <- corpusClean %>% unnest_tokens(word,text,token="words")
# Determine term frequency
unigrams <- unigrams %>% count(word,sort=TRUE, name="n1") %>%
filter(!word %in% c("d0mmy","b0s"))
unigrams
##### Bigram data #####
# Tokenize into bigrams
bigrams <- corpusClean %>% unnest_tokens(bigram,text,token="ngrams", n=2)
# Determine term frequency
bigrams <- bigrams %>% count(bigram,sort=TRUE, name="n2") %>%
separate(bigram, c("w1","w2"),sep=" ") %>%
filter(!w2 %in% c("d0mmy","b0s"))
bigrams
# Calculate discount factor
n1 <- sum((bigrams %>% filter(n2==1))$n2)
n2 <- sum((bigrams %>% filter(n2==2))$n2)
d2 <- n1/(n1+2*n2)
# Calculate factors for Kneser-Ney
w2_w1 <- bigrams %>% group_by(w1) %>% count(w2) %>% summarize(w2_w1=sum(n))
w1_w2 <- bigrams %>% group_by(w2) %>% count(w1) %>% summarize(w1_w2=sum(n))
w1_wS2 <- sum(w1_w2$w1_w2)
w1_w2 <- w1_w2 %>% mutate(w1_w2=w1_w2/w1_wS2)
# Store results and clean
bigrams <- left_join(bigrams,w2_w1)
unigrams <- left_join(unigrams,w1_w2,by=c("word"="w2"))
rm(w2_w1)
rm(w1_w2)
rm(w1_wS2)
gc()
##### Calculate unigram probabilities #####
unigrams <- unigrams %>% rename(prob1=w1_w2) %>% select(word,n1,prob1) %>% arrange(desc(prob1))
##### Trigram data #####
# Tokenize into trigrams
trigrams <- corpusClean %>% unnest_tokens(trigram,text,token="ngrams", n=3)
# Determine term frequency
trigrams <- trigrams %>% count(trigram,sort=TRUE,name="n3") %>%
separate(trigram, c("w1","w2","w3"),sep=" ") %>%
filter(!w3 %in% c("d0mmy","b0s"))
trigrams
# Calculate discount factor
n1 <- sum((trigrams %>% filter(n3==1))$n3)
n2 <- sum((trigrams %>% filter(n3==2))$n3)
d3 <- n1/(n1+2*n2)
# Calculate factors for Kneser-Ney (step 1)
trigrams <- trigrams %>% unite(w12, w1, w2, sep=" ")
w3_w12 <- trigrams %>% group_by(w12) %>% count(w3) %>% summarize(w3_w12=sum(n))
# Store results and clean
trigrams <- left_join(trigrams,w3_w12)
rm(w3_w12)
gc()
# Calculate factors for Kneser-Ney (step 2)
trigrams <- trigrams %>% separate(w12, c("w1","w2"),sep=" ") %>%
unite(w23, w2, w3, sep=" ")
w1_w23 <- trigrams %>% group_by(w23) %>% count(w1) %>% summarize(w1_w23=sum(n))
w1_w2S3 <- w1_w23 %>% separate(w23, c("w2", "w3"), sep=" ") %>%
group_by(w2) %>% summarize(w1_w2S3=sum(w1_w23))
# Store results and clean
bigrams <- left_join(bigrams,w1_w2S3,by=c("w1"="w2"))
bigrams <- bigrams %>% unite(w12, w1, w2, sep=" ")
bigrams <- left_join(bigrams,w1_w23,by=c("w12"="w23"))
rm(w1_w23)
rm(w1_w2S3)
gc()
##### Calculte bigram probabilities #####
bigrams <- bigrams %>% separate(w12, c("w1","w2"),sep=" ")
bigrams <- left_join(bigrams,unigrams,by=c("w2"="word"))
bigrams <- bigrams %>% mutate( prob2 = (w1_w23-d2)/w1_w2S3+d2*w2_w1/w1_w2S3*prob1 )
bigrams <- bigrams %>% select(w1, w2, n2, prob2 )
##### Quadgram data #####
# Tokenize quadgrams
quadgrams <- corpusClean %>% unnest_tokens(quadgram,text,token="ngrams", n=4)
# Determine term frequency
quadgrams <- quadgrams %>% count(quadgram,sort=TRUE,name="n4") %>%
separate(quadgram, c("w1","w2","w3", "w4"),sep=" ") %>%
filter(!w4 %in% c("d0mmy","b0s"))
quadgrams
# Calculate discount factor
n1 <- sum((quadgrams %>% filter(n4==1))$n4)
n2 <- sum((quadgrams %>% filter(n4==2))$n4)
d4 <- n1/(n1+2*n2)
# Calculate factors for Kneser-Ney (step 1)
quadgrams <- quadgrams %>% unite(w123, w1, w2, w3, sep=" ")
w4_w123 <- quadgrams %>% group_by(w123) %>% count(w4) %>% summarize(w4_w123=sum(n))
w123S4 <- quadgrams %>% group_by(w123) %>% summarize(w123S4=sum(n4))
# Store results and clean
quadgrams <- left_join(quadgrams,w4_w123)
quadgrams <- left_join(quadgrams,w123S4)
rm(w4_w123)
rm(w123S4)
gc()
# Calculate factors for Kneser-Ney (step 2)
quadgrams <- quadgrams %>% separate(w123, c("w1","w2","w3"),sep=" ") %>%
unite(w234, w2, w3, w4, sep=" ")
w1_w234 <- quadgrams %>% group_by(w234) %>% count(w1) %>% summarize(w1_w234=sum(n))
w1_w23S4 <- w1_w234 %>% separate(w234, c("w2", "w3","w4"), sep=" ") %>%
unite(w23, w2, w3, sep=" ") %>%
group_by(w23) %>% summarize(w1_w23S4=sum(w1_w234))
# Store results and clean
trigrams <- trigrams %>% separate(w23, c("w2","w3"),sep=" ") %>%
unite(w12, w1, w2, sep=" ")
trigrams <- left_join(trigrams,w1_w23S4,by=c("w12"="w23"))
trigrams <- trigrams %>% unite(w123, w12, w3, sep=" ")
trigrams <- left_join(trigrams,w1_w234,by=c("w123"="w234"))
rm(w1_w23S4)
rm(w1_w234)
gc()
##### Calculte trigram probabilities #####
trigrams <- trigrams %>% separate(w123, c("w1","w2","w3"),sep=" ") %>%
unite(w23, w2, w3, sep=" ")
bigrams <- bigrams %>% unite(w12, w1, w2, sep=" ")
trigrams <- left_join(trigrams,bigrams,by=c("w23"="w12"))
trigrams <- trigrams %>% mutate( prob3 = (w1_w234-d3)/w1_w23S4+d3*w3_w12/w1_w23S4*prob2 )
trigrams <- trigrams %>% select(w1, w23, n3, prob3 )
##### Calculate quadgram probabilities #####
trigrams <- trigrams %>% unite(w123, w1, w23, sep=" ")
quadgrams <- left_join(quadgrams,trigrams,by=c("w234"="w123"))
quadgrams <- quadgrams %>% mutate( prob4=(n4-d4)/w123S4 + d4*w4_w123/w123S4*prob3 )
quadgrams <- quadgrams %>% select(w1, w234, n4, prob4 )
##### Prepare probabilities for prediction #####
bigrams <- bigrams %>% separate(w12, c("w1","w2"),sep=" ")
trigrams <- trigrams %>% separate(w123, c("w1","w2","w3"), sep=" ") %>%
unite(w12, w1, w2, sep=" ")
quadgrams <- quadgrams %>% separate(w234, c("w2","w3","w4"),sep=" ") %>%
unite(w123, w1, w2, w3, sep=" ")
Qwords <- function(w1, w2, w3, n=5) {
match <- paste(w1,w2,w3,sep=" ")
Qlist <- quadgrams %>% filter(w123 == match) %>% arrange(desc(prob4)) %>%
select(w4)
if ( nrow(Qlist) == 0 ){
return( Twords(w2, w3, n) )
}
if ( nrow(Qlist) >= n) {
return( pull(Qlist[1:n,]) )
}
Tlist <- Twords(w2, w3, n)[1:(n - nrow(Qlist))]
return( c(pull(Qlist), Tlist) )
}
Twords <- function(w1, w2, n=5) {
match <- paste(w1,w2,sep=" ")
Tlist <- trigrams %>% filter(w12 == match) %>% arrange(desc(prob3)) %>%
select(w3)
if ( nrow(Tlist) == 0 ){
return( Bwords(w2, n) )
}
if ( nrow(Tlist) >= n) {
return( pull(Tlist[1:n,]) )
}
Blist <- Bwords(w2, n)[1:(n - nrow(Tlist))]
return( c(pull(Tlist), Blist) )
}
# function to return highly probable previous word given a word
Bwords <- function(word, n = 5) {
Blist <- bigrams %>% filter(w1==as.character(word)) %>% arrange(desc(prob2)) %>%
select(w2)
if ( nrow(Blist)==0 ) {
return( Uwords(n) )
}
if ( nrow(Blist) >= n ) {
return( pull(Blist[1:n,]) )
}
Ulist <- Uwords(n)[1:(n - nrow(Blist))]
return( c(pull(Blist),Ulist) )
}
# function to return random words from unigrams
Uwords <- function(n = 5) {
return( sample( pull(unigrams[1:50,"word"]), size = n ) )
}
PredictWord <- function(text,n=5){
input <- tibble(line =c(1), text=text)
words <- input %>% unnest_tokens(word,text,token="words") %>%
# Remove words with non letters
filter(!grepl("[^a-zA-Z']",word)) %>%
# Remove profanity words
filter(!word %in% profanity)
if (nrow(words)>0) {
input <- words %>% group_by(line) %>%
summarize(text = str_c(word, collapse = " "),
text=paste("b0s b0s b0s ",text)) %>% ungroup()
words <- input %>% unnest_tokens(word,text,token="words")
} else {
input <- tibble(line =c(1), text="b0s b0s b0s ")
}
words <- input %>% unnest_tokens(word,text,token="words")
nw <- nrow(words)
w1 <- words[nw-2,"word"]
w2 <- words[nw-1,"word"]
w3 <- words[nw,"word"]
return( Qwords(w1,w2,w3,n) )
}
saveRDS(unigrams,file="unigrams.rds")
saveRDS(bigrams,file="bigrams.rds")
saveRDS(trigrams,file="trigrams.rds")
saveRDS(quadgrams,file="quadgrams.rds")
|
6b4791e516a768fd1650544b58fc1633beb24521
|
9fbc87205a3880aa124012afb77048b0716b6dad
|
/package_v2.R
|
a40e1abfaa6dcdf540126430e57db5b74fe572f2
|
[] |
no_license
|
asensio-lab/workplace-charging-experiment
|
070e32f4f41c9cb671769151047c408bac6e735e
|
ec0bf80e2281d2b21d23f386a6cd48c32469c5b3
|
refs/heads/master
| 2023-01-05T15:05:23.053411
| 2020-10-19T21:29:11
| 2020-10-19T21:29:11
| 185,430,217
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 74,272
|
r
|
package_v2.R
|
# _____________________________________________________________________________
# Minimum required imports
# _____________________________________________________________________________
library(rddensity)
library(rdrop2)
library(lpdensity)
library(ggplot2)
library(rdd)
library(car)
library(dplyr)
library(lubridate)
library(dummies)
library(extrafont)
library(gridExtra)
#For generation of Times New Roman fonts (optional for replication)
#font_import()
# _____________________________________________________________________________
# Requisite Data
# _____________________________________________________________________________
station.dat.4 <-read.csv("station_data_dataverse.csv")
external.sample <-read.csv("external_sample.csv")
#Data cleaning
#Eliminating transactions with 0 kWh
sub<-which(station.dat.4$kwhTotal==0.000)
station.dat.4<-station.dat.4[-sub,]
#Subset free transactions
free <- station.dat.4[station.dat.4$dollars == 0,]
# _____________________________________________________________________________
# DESCRIPTIVE STATISTICS
# _____________________________________________________________________________
#Placing paid transactions in a separate dataset (376 observations)
sub<-which(station.dat.4$dollars==0)
station.dat.4.2<-station.dat.4[-sub,]
#Generate count of transactons by user ID for histogram
station.dat.agg2<-tally(group_by(station.dat.4, userId))
#Eliminating observations with no reported zipcode for distance calculations
station.dat.4.3<-station.dat.4[station.dat.4$reportedZip == 1,]
#Calculating total distance per user
station.dat.agg<-aggregate(station.dat.4.3$distance ~ station.dat.4.3$userId, data = station.dat.4.3, sum)
station.dat.agg$distance<-station.dat.agg$`station.dat.4.3$distance`
#Create matrix to display descriptive statistics
table_2 <- matrix(nrow = 6,ncol = 5, dimnames = list(c("Charge time (hours)",
"Total consumption (kWh)",
"Repeat transactions per user (count)",
"Session revenue ($)",
"Estimated daily commute distance - one way (miles)",
"Electric vechicle miles traveled per user (miles)"),
c("M", "SD", "Min", "Max", "Active sessions")))
#3 datasets used in calculating descriptive stats:
#all observations, observations with cost > 0, and observations reporting zip code
#Calculating mean of each variable
table_2[,"M"] <- c(round(mean(station.dat.4$chargeTimeHrs),2),
round(mean(station.dat.4$kwhTotal),2),
round(mean(station.dat.agg2$n),2),
round(mean(station.dat.4.2$dollars),2),
round(mean(station.dat.4$distance[!is.na(station.dat.4$distance)]),2),
round(mean(station.dat.agg$distance),2))
#Calculating standard deviation of each variable
table_2[,"SD"] <- c(round(sd(station.dat.4$chargeTimeHrs),2),
round(sd(station.dat.4$kwhTotal),2),
round(sd(station.dat.agg2$n),2),
round(sd(station.dat.4.2$dollars),2),
round(sd(station.dat.4$distance[!is.na(station.dat.4$distance)]),2),
round(sd(station.dat.agg$distance),2))
#Calculating minimum of each variable
table_2[,"Min"] <- c(round(min(station.dat.4$chargeTimeHrs),2),
round(min(station.dat.4$kwhTotal),2),
round(min(station.dat.agg2$n),2),
round(min(station.dat.4.2$dollars),2),
round(min(station.dat.4$distance[!is.na(station.dat.4$distance)]),2),
round(min(station.dat.agg$distance),2))
#Calculating maximum of each variable
table_2[,"Max"] <- c(round(max(station.dat.4$chargeTimeHrs),2),
round(max(station.dat.4$kwhTotal),2),
round(max(station.dat.agg2$n),2),
round(max(station.dat.4.2$dollars),2),
round(max(station.dat.4$distance[!is.na(station.dat.4$distance)]),2),
round(max(station.dat.agg$distance),2))
#Calculating total sessions used in analysis for each variable
table_2[,"Active sessions"] <- c(nrow(station.dat.4),
nrow(station.dat.4),
nrow(station.dat.4),
nrow(station.dat.4.2),
nrow(station.dat.4.3),
nrow(station.dat.4.3))
table_2
#Calculating statistic on number of users who are treated at 4 hours
#station.dat.test <- station.dat.4[station.dat.4$chargeTimeHrs >= 4,]
#a <- c(station.dat.test$userId)
#station.dat.4 <- station.dat.4[station.dat.4$userId %in% a,]
#length(unique(station.dat.4$userId))
# _____________________________________________________________________________
# Creation of function for creating McCrary Figures
# _____________________________________________________________________________
#adapts output of DCdensity function for more flexible aesthetic changes to figures
#function adapted from GitHub user mikedecr to calculate at 95% CI
#https://gist.github.com/mikedecr/6ae9c63b6d28c43b068ddc0d85e8897b
mccrary <-
function (runvar, cutpoint, bin = NULL, bw = NULL, verbose = FALSE,
plot = TRUE, ext.out = FALSE, htest = FALSE)
{
library(rdd)
runvar <- runvar[complete.cases(runvar)]
rn <- length(runvar)
rsd <- sd(runvar)
rmin <- min(runvar)
rmax <- max(runvar)
if (missing(cutpoint)) {
if (verbose)
cat("Assuming cutpoint of zero.\n")
cutpoint <- 0
}
if (cutpoint <= rmin | cutpoint >= rmax) {
stop("Cutpoint must lie within range of runvar")
}
if (is.null(bin)) {
bin <- 2 * rsd * rn^(-1/2)
if (verbose)
cat("Using calculated bin size: ", sprintf("%.3f",
bin), "\n")
}
l <- floor((rmin - cutpoint)/bin) * bin + bin/2 + cutpoint
r <- floor((rmax - cutpoint)/bin) * bin + bin/2 + cutpoint
lc <- cutpoint - (bin/2)
rc <- cutpoint + (bin/2)
j <- floor((rmax - rmin)/bin) + 2
binnum <- round((((floor((runvar - cutpoint)/bin) * bin +
bin/2 + cutpoint) - l)/bin) + 1)
cellval <- rep(0, j)
for (i in seq(1, rn)) {
cnum <- binnum[i]
cellval[cnum] <- cellval[cnum] + 1
}
cellval <- (cellval/rn)/bin
cellmp <- seq(from = 1, to = j, by = 1)
cellmp <- floor(((l + (cellmp - 1) * bin) - cutpoint)/bin) *
bin + bin/2 + cutpoint
if (is.null(bw)) {
leftofc <- round((((floor((lc - cutpoint)/bin) * bin +
bin/2 + cutpoint) - l)/bin) + 1)
rightofc <- round((((floor((rc - cutpoint)/bin) * bin +
bin/2 + cutpoint) - l)/bin) + 1)
if (rightofc - leftofc != 1) {
stop("Error occurred in bandwidth calculation")
}
cellmpleft <- cellmp[1:leftofc]
cellmpright <- cellmp[rightofc:j]
P.lm <- lm(cellval ~ poly(cellmp, degree = 4, raw = T),
subset = cellmp < cutpoint)
mse4 <- summary(P.lm)$sigma^2
lcoef <- coef(P.lm)
fppleft <- 2 * lcoef[3] + 6 * lcoef[4] * cellmpleft +
12 * lcoef[5] * cellmpleft * cellmpleft
hleft <- 3.348 * (mse4 * (cutpoint - l)/sum(fppleft *
fppleft))^(1/5)
P.lm <- lm(cellval ~ poly(cellmp, degree = 4, raw = T),
subset = cellmp >= cutpoint)
mse4 <- summary(P.lm)$sigma^2
rcoef <- coef(P.lm)
fppright <- 2 * rcoef[3] + 6 * rcoef[4] * cellmpright +
12 * rcoef[5] * cellmpright * cellmpright
hright <- 3.348 * (mse4 * (r - cutpoint)/sum(fppright *
fppright))^(1/5)
bw = 0.5 * (hleft + hright)
if (verbose)
cat("Using calculated bandwidth: ", sprintf("%.3f",
bw), "\n")
}
if (sum(runvar > cutpoint - bw & runvar < cutpoint) == 0 |
sum(runvar < cutpoint + bw & runvar >= cutpoint) == 0)
stop("Insufficient data within the bandwidth.")
if (plot) {
d.l <- data.frame(cellmp = cellmp[cellmp < cutpoint],
cellval = cellval[cellmp < cutpoint], dist = NA,
est = NA, lwr = NA, upr = NA)
pmin <- cutpoint - 2 * rsd
pmax <- cutpoint + 2 * rsd
for (i in 1:nrow(d.l)) {
d.l$dist <- d.l$cellmp - d.l[i, "cellmp"]
w <- kernelwts(d.l$dist, 0, bw, kernel = "triangular")
newd <- data.frame(dist = 0)
pred <- predict(lm(cellval ~ dist, weights = w, data = d.l),
interval = "confidence", level = 0.95, newdata = newd)
d.l$est[i] <- pred[1]
d.l$lwr[i] <- pred[2]
d.l$upr[i] <- pred[3]
}
d.r <- data.frame(cellmp = cellmp[cellmp >= cutpoint],
cellval = cellval[cellmp >= cutpoint], dist = NA,
est = NA, lwr = NA, upr = NA)
for (i in 1:nrow(d.r)) {
d.r$dist <- d.r$cellmp - d.r[i, "cellmp"]
w <- kernelwts(d.r$dist, 0, bw, kernel = "triangular")
newd <- data.frame(dist = 0)
pred <- predict(lm(cellval ~ dist, weights = w, data = d.r),
interval = "confidence", newdata = newd)
d.r$est[i] <- pred[1]
d.r$lwr[i] <- pred[2]
d.r$upr[i] <- pred[3]
}
plot(d.l$cellmp, d.l$est, lty = 1, lwd = 2, col = "black",
type = "l", xlim = c(pmin, pmax), ylim = c(min(cellval[cellmp <=
pmax & cellmp >= pmin]), max(cellval[cellmp <=
pmax & cellmp >= pmin])), xlab = NA, ylab = NA,
main = NA)
lines(d.l$cellmp, d.l$lwr, lty = 2, lwd = 1, col = "black",
type = "l")
lines(d.l$cellmp, d.l$upr, lty = 2, lwd = 1, col = "black",
type = "l")
lines(d.r$cellmp, d.r$est, lty = 1, lwd = 2, col = "black",
type = "l")
lines(d.r$cellmp, d.r$lwr, lty = 2, lwd = 1, col = "black",
type = "l")
lines(d.r$cellmp, d.r$upr, lty = 2, lwd = 1, col = "black",
type = "l")
points(cellmp, cellval, type = "p", pch = 20)
}
cmp <- cellmp
cval <- cellval
padzeros <- ceiling(bw/bin)
jp <- j + 2 * padzeros
if (padzeros >= 1) {
cval <- c(rep(0, padzeros), cellval, rep(0, padzeros))
cmp <- c(seq(l - padzeros * bin, l - bin, bin), cellmp,
seq(r + bin, r + padzeros * bin, bin))
}
dist <- cmp - cutpoint
w <- 1 - abs(dist/bw)
w <- ifelse(w > 0, w * (cmp < cutpoint), 0)
w <- (w/sum(w)) * jp
fhatl <- predict(lm(cval ~ dist, weights = w), newdata = data.frame(dist = 0))[[1]]
w <- 1 - abs(dist/bw)
w <- ifelse(w > 0, w * (cmp >= cutpoint), 0)
w <- (w/sum(w)) * jp
fhatr <- predict(lm(cval ~ dist, weights = w), newdata = data.frame(dist = 0))[[1]]
thetahat <- log(fhatr) - log(fhatl)
sethetahat <- sqrt((1/(rn * bw)) * (24/5) * ((1/fhatr) +
(1/fhatl)))
z <- thetahat/sethetahat
p <- 2 * pnorm(abs(z), lower.tail = FALSE)
if (verbose) {
cat("Log difference in heights is ", sprintf("%.3f",
thetahat), " with SE ", sprintf("%.3f", sethetahat),
"\n")
cat(" this gives a z-stat of ", sprintf("%.3f", z),
"\n")
cat(" and a p value of ", sprintf("%.3f", p), "\n")
}
estimates <- data.frame(dhat = c(d.l$est, d.r$est), dlower = c(d.l$lwr, d.r$lwr), dupper = c(d.l$upr, d.r$upr),
force = c(rep(0, length(d.l$est)), rep(1, length(d.r$est)))
)
if (ext.out)
return(
list(
theta = thetahat, se = sethetahat, z = z,
p = p, binsize = bin, bw = bw, cutpoint = cutpoint,
data = data.frame(cellmp, cellval, force = c(rep(0, length(d.l$est)), rep(1, length(d.r$est)))),
estimates = estimates
)
)
else if (htest) {
structure(list(statistic = c(z = z), p.value = p, method = "McCrary (2008) sorting test",
parameter = c(binwidth = bin, bandwidth = bw, cutpoint = cutpoint),
alternative = "no apparent sorting"), class = "htest")
}
else return(p)
}
# _____________________________________________________________________________
# Generating McCrary Figures
# _____________________________________________________________________________
#Transform variable to date for later calculations
station.dat.4$created<-as.Date(station.dat.4$created)
#Create variable for week of program
station.dat.4$weeknos <- (interval(min(station.dat.4$created), station.dat.4$created) %/% weeks(1)) + 1
#Generate total sessions by userID
station.dat.4 <-
station.dat.4 %>%
dplyr::group_by(userId) %>%
dplyr::mutate(total = length(unique(sessionId)))
#Generate subsets needed for high and low-volume users
high_volume <- station.dat.4[station.dat.4$total >= 20,]
all_volume <- station.dat.4
# -----------------------------------------------------------------------------
# (a) 4-hour cutoff
# -----------------------------------------------------------------------------
#see comments from generation of figure (a); follows parallel proess
mccrary_b <- mccrary(station.dat.4$chargeTimeHrs, 4, bin = 4.15*sd(station.dat.4$chargeTimeHrs)*length(station.dat.4$chargeTimeHrs)^(-.5),
bw = 0.275, verbose = TRUE,
plot = TRUE, ext.out = TRUE, htest = FALSE)
sub_mb <- mccrary_b$estimates
sub_mb_pre <- sub_mb
sub_mb_post <- sub_mb
sub_mb_pre[sub_mb_pre$force == 1,] <- NA
sub_mb_post[sub_mb_post$force == 0,] <- NA
sub_mb1 <- mccrary_b$data
figure_s2a <- ggplot(sub_mb1, aes(y = cellval, x = cellmp)) +
geom_line(aes(y = sub_mb_post$dlower, col = "CI_lower"), linetype= "dashed",colour="grey30",size = 0.5)+
geom_line(aes(y = sub_mb_post$dhat, col = "est"), colour="grey30",size = 0.5) +
geom_line(aes(y = sub_mb_post$dupper), linetype= "dashed",colour="grey30",size = 0.5)+
geom_line(aes(y = sub_mb_pre$dlower, col = "CI_lower"), linetype= "dashed",colour="grey30",size = 0.5)+
geom_line(aes(y = sub_mb_pre$dhat, col = "est"), colour="grey30",size = 0.5) +
geom_line(aes(y = sub_mb_pre$dupper, col = "CI_lower"), linetype= "dashed",colour="grey30",size = 0.5)+
labs(x="Duration of Charge", y = "Density of Transcations") +
coord_cartesian(xlim=c(0,7), ylim = c(0,.5)) +
geom_vline(xintercept=4, size = 1.5) +
geom_ribbon(aes(ymin=sub_mb_post$dlower, ymax=sub_mb_post$dupper), linetype=2, alpha=0.2, fill = "skyblue2")+
geom_ribbon(aes(ymin=sub_mb_pre$dlower, ymax=sub_mb_pre$dupper), linetype=2, alpha=0.2, fill = "skyblue2")+
theme(axis.line = element_line(colour = "black"),
axis.text=element_text(size=20),
axis.title=element_text(size=20),
plot.title = element_text(size=11, face="bold",hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"))
figure_s2a
# -----------------------------------------------------------------------------
# (b) 2-hour cutoff
# -----------------------------------------------------------------------------
#Get output of estimates and CIs
mccrary_a <- mccrary(station.dat.4$chargeTimeHrs, 2, bin = 5*sd(station.dat.4$chargeTimeHrs)*length(station.dat.4$chargeTimeHrs)^(-.5),
bw = 0.6, verbose = TRUE,
plot = TRUE, ext.out = TRUE, htest = FALSE)
#isolate estimates
sub_ma <- mccrary_a$estimates
#create replicas to serve as lines for pre and post-cutoff
sub_ma_pre <- sub_ma
sub_ma_post <- sub_ma
#replace estimates before or after cutoff with NA to maintain series length as required by ggplot
sub_ma_pre[sub_ma_pre$force == 1,] <- NA
sub_ma_post[sub_ma_post$force == 0,] <- NA
#isolate axis data
sub_ma1 <- mccrary_a$data
#note: ggplot will throw warning for removed NA values. This is by design
#and is necessary for accurate output
figure_s2b <- ggplot(sub_ma1, aes(y = cellval, x = cellmp)) +
geom_line(aes(y = sub_ma_post$dlower, col = "CI_lower"), linetype= "dashed",colour="grey30",size = 0.5)+
geom_line(aes(y = sub_ma_post$dhat, col = "est"), colour="grey30",size = 0.5) +
geom_line(aes(y = sub_ma_post$dupper), linetype= "dashed",colour="grey30",size = 0.5)+
geom_line(aes(y = sub_ma_pre$dlower, col = "CI_lower"), linetype= "dashed",colour="grey30",size = 0.5)+
geom_line(aes(y = sub_ma_pre$dhat, col = "est"), colour="grey30",size = 0.5) +
geom_line(aes(y = sub_ma_pre$dupper, col = "CI_lower"), linetype= "dashed",colour="grey30",size = 0.5)+
labs(x="Duration of Charge", y = "Density of Transcations") +
coord_cartesian(xlim=c(0,7), ylim = c(0,.5)) +
geom_vline(xintercept=2, size = 1.5) +
geom_ribbon(aes(ymin=sub_ma_post$dlower, ymax=sub_ma_post$dupper), linetype=2, alpha=0.2, fill = "salmon")+
geom_ribbon(aes(ymin=sub_ma_pre$dlower, ymax=sub_ma_pre$dupper), linetype=2, alpha=0.2, fill = "salmon")+
theme(axis.line = element_line(colour = "black"),
axis.text=element_text(size=20),
axis.title=element_text(size=20),
plot.title = element_text(size=11, face="bold",hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"))
figure_s2b
figure_s2 <- grid.arrange(figure_s2a, figure_s2b, ncol=2)
# _____________________________________________________________________________
# Figure on additions of new users over time
# _____________________________________________________________________________
#Take one row per user for use in generating cumulative frequency
high_volume <- station.dat.4[match(unique(high_volume$userId), high_volume$userId),]
high_volume$dummy <- 1
high_volume <- high_volume[order(high_volume$created),]
#Running sum of high-volume users over time
high_volume$cumsum <- cumsum(high_volume$dummy)
#Find max running total by week for plotting
high_volume <-
high_volume %>%
dplyr::group_by(weeknos) %>%
dplyr::mutate(finalcumf = max(cumsum))
#Take only first observation per week for plotting
high_volume <- high_volume[match(unique(high_volume$weeknos), high_volume$weeknos),]
#Take one row per user for use in generating cumulative frequency
all_volume <- station.dat.4[match(unique(all_volume$userId), all_volume$userId),]
all_volume$dummy <- 1
all_volume <- all_volume[order(all_volume$created),]
#Running sum of high-volume users over time
all_volume$cumsum <- cumsum(all_volume$dummy)
#Find max running total by week for plotting
all_volume <-
all_volume %>%
dplyr::group_by(weeknos) %>%
dplyr::mutate(finalcumf = max(cumsum))
#Take only first observation per week for plotting
all_volume <- all_volume[match(unique(all_volume$weeknos), all_volume$weeknos),]
#Extract only relevant columns for plotting
all_vector <- all_volume[,c('weeknos','finalcumf')]
high_vector <- high_volume[,c('weeknos','finalcumf')]
#Pad weeks that do not have a new user added with the cumulative number of users from
#previous week (46 total weeks in program)
for (i in 1:46) {
if (!(i %in% all_vector$weeknos)) {
all_vector[nrow(all_vector) + 1,] <- c(i, all_vector[i-1,][2])
all_vector <- all_vector[order(all_vector$weeknos),]
}
if (!(i %in% high_vector$weeknos)) {
high_vector[nrow(high_vector) + 1,] <- c(i, high_vector[i-1,][2])
high_vector <- high_vector[order(high_vector$weeknos),]
}
}
#flags to allow plot to distinguish between low and high-volume users
all_vector$flag <- c("all")
high_vector$flag <- c("high")
final_frame <- rbind(all_vector,high_vector)
figure_s5a <- ggplot(final_frame, aes(x=weeknos, y=finalcumf, col=flag)) +
geom_line(size = 0.5) +
labs(title = "", x="Week of program", y= expression("Total number of users"), color = "", fill = "white") +
scale_color_manual(labels = c("All users", "High-repeat users"), values = c("grey5", "orange2")) +
scale_fill_identity(name='',guide = 'legend',labels = c('Plug-out Times','Plug-in Times'))+
theme(legend.position = c(.1, 1),legend.justification = c(0, 1),legend.text = element_text(size=15))+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text=element_text(size=20),
axis.title=element_text(size=20),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"),
text = element_text(family = "sans")) +
scale_x_continuous(breaks = seq(0, 45, by = 5)) +
theme(legend.key=element_blank())
figure_s5a
# _____________________________________________________________________________
# Figure transactions per station by week
# _____________________________________________________________________________
#Find total transactions per week
station.dat.s4 <-
station.dat.4 %>%
dplyr::group_by(weeknos) %>%
dplyr::mutate(transactions = n())
#Take one row per user for use in generating cumulative frequency
cumulative_stats <- station.dat.s4[match(unique(station.dat.s4$stationId), station.dat.s4$stationId),]
cumulative_stats$dummy <- 1
cumulative_stats <- cumulative_stats[order(cumulative_stats$created),]
#Running sum of stations
cumulative_stats$cumsum <- cumsum(cumulative_stats$dummy)
#Find max running total by week for plotting
cumulative_stats <-
cumulative_stats %>%
dplyr::group_by(weeknos) %>%
dplyr::mutate(finalcumf = max(cumsum))
#Take only first observation per week for plotting
cumulative_stats <- cumulative_stats[match(unique(cumulative_stats$weeknos), cumulative_stats$weeknos),]
#Extract only relevant columns for plotting
final <- cumulative_stats[,c('weeknos','finalcumf')]
#Pad weeks that do not have a new user added with the cumulative number of users from
#previous week (46 total weeks in program)
for (i in 1:46) {
if (!(i %in% final$weeknos)) {
final[nrow(final) + 1,] <- c(i, final[i-1,][2])
final <- final[order(final$weeknos),]
}
}
#Replace single week that did not have station added (week 6)
cumulative_stats <- station.dat.s4[match(unique(station.dat.s4$weeknos), station.dat.s4$weeknos),]
cumulative_stats_last <- cumulative_stats[c('weeknos','transactions')]
row <- c(6,0)
cumulative_stats_last[nrow(cumulative_stats_last) + 1,] <- list(6,0)
#Merge for finak plotting
last <- merge(cumulative_stats_last, final, by = "weeknos")
#Generate transactions per week for plot
last$plot = last$transactions / last$finalcumf
figure_s5b <- ggplot(last, aes(x=weeknos, y=plot, col = )) +
geom_line(size = 0.5) +
labs(title = "", x="Week of program", y= expression("Transactions per station"), fill = "white") +
geom_line(aes(x = weeknos, y = plot), color = 'orange2') +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text=element_text(size=20),
axis.title=element_text(size=20),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"),
text = element_text(family = "sans")) +
scale_x_continuous(breaks = seq(0, 45, by = 5)) +
scale_y_continuous(breaks = seq(0, 2, by = 1)) +
theme(legend.key=element_blank())
figure_s5b
figure_s5 <- grid.arrange(figure_s5a, figure_s5b, ncol=2)
# _____________________________________________________________________________
# Data Processing & Variable Creation
# _____________________________________________________________________________
#Create variable for lag in kWh per session by user
station.dat.4<-
station.dat.4 %>%
dplyr::group_by(userId) %>%
dplyr::mutate(Lag1 = dplyr::lag(kwhTotal,n = 1, default = NA))
#Natural log of the difference between natural log of the current and previous transaction
station.dat.4$delta.kwh.lag.ln<- log(station.dat.4$kwhTotal)-log(station.dat.4$Lag1)
#Remove all observartions of NA
sub<-which(is.na(station.dat.4$delta.kwh.lag.ln))
station.dat.4<-station.dat.4[-sub,]
#station.dat.4 <- station.dat.4[!duplicated(station.dat.4$userId),]
station.dat.4 <- ungroup(station.dat.4)
#Square and cubic terms
station.dat.4$charge3<-station.dat.4$chargeTimeHrs^3
station.dat.4$charge2<-station.dat.4$chargeTimeHrs^2
#Pull month out of datetime column
station.dat.4$month <- month(station.dat.4$created, label=TRUE)
#Dummies of months
month_new <- dummy(station.dat.4$month)
new <- as.data.frame(month_new)
new$sessionId <- station.dat.4$sessionId
#Merge dummies back to master
station.dat.4 <- merge(station.dat.4, new, by = "sessionId")
# _____________________________________________________________________________
# Robustness Checks
# _____________________________________________________________________________
#Calculate optimal bandwidth at cutpoints of 2 and 4 hours
obw.2<-IKbandwidth(X=station.dat.4$chargeTimeHrs, Y=station.dat.4$delta.kwh.lag.ln,
cutpoint = 2,verbose =TRUE, kernel = "triangular")
obw.4<-IKbandwidth(X=station.dat.4$chargeTimeHrs, Y=station.dat.4$delta.kwh.lag.ln,
cutpoint = 4,verbose =TRUE, kernel = "triangular")
#----------------------------------
# NEXT: Sharp RD, 4hrs
#----------------------------------
#No clusertering, no covariates (sharp, 4hrs)
RDest4_1 <-RDestimate(delta.kwh.lag.ln~chargeTimeHrs, data = station.dat.4, cutpoint = 4,
verbose = TRUE, se.type='HC0')
#Clustering at facility with day of week covariates (sharp, 4hrs)
RDest4_3<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri , data = station.dat.4, cutpoint = 4,
verbose = TRUE, cluster=station.dat.4$facilityType, se.type='HC0')
#No clustering with day of week covariates/month covariates (sharp, 4hrs)
RDest4_6<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 4,
verbose = TRUE, se.type='HC0')
#No clustering with day of week covariates/month covariates, cubic term (sharp, 4hrs)
RDest4_7<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | charge3 + Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 4,
verbose = TRUE, se.type='HC0')
#Clustering at facility type, day of week covariates/month covariates (sharp, 4hrs)
RDest4_8<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 4,
verbose = TRUE,cluster=station.dat.4$facilityType, se.type='HC0')
#Clustering at location ID, day of week covariates/month covariates (sharp, 4hrs)
RDest4_9<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 4,
verbose = TRUE,cluster=station.dat.4$locationId, se.type='HC0')
#Clustering at station ID, day of week covariates/month covariates (sharp, 4hrs)
RDest4_10<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 4,
verbose = TRUE,cluster=station.dat.4$stationId, se.type='HC0')
#----------------------------------
# NEXT: Sharp RD, 2hrs
#----------------------------------
#No clusertering, no covariates (sharp, 2hrs)
RDest2_1 <-RDestimate(delta.kwh.lag.ln~chargeTimeHrs, data = station.dat.4, cutpoint = 2,
verbose = TRUE, se.type='HC0')
#Clustering at facility with day of week covariates (sharp, 2hrs)
RDest2_3<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri , data = station.dat.4, cutpoint = 2,
verbose = TRUE, cluster=station.dat.4$facilityType, se.type='HC0')
#No clustering, day of week covariates/month covariates (sharp, 2hrs)
RDest2_6<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 2,
verbose = TRUE, se.type='HC0')
#No clustering, day of week covariates/month covariates, cubic term (sharp, 2hrs)
RDest2_7<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | charge3 + Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 2,
verbose = TRUE, se.type='HC0')
#Clustering at facility type, day of week covariates/month covariates (sharp, 2hrs)
RDest2_8<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 2,
verbose = TRUE, cluster=station.dat.4$facilityType, se.type='HC0')
#Clustering at location ID, day of week covariates/month covariates (sharp, 2hrs)
RDest2_9<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 2,
verbose = TRUE, cluster=station.dat.4$locationId, se.type='HC0')
#Clustering at station ID, day of week covariates/month covariates (sharp, 2hrs)
RDest2_10<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 2,
verbose = TRUE, cluster=station.dat.4$stationId, se.type='HC0')
#----------------------------------
# NEXT: Sharp RD (Managers), 2hrs
#----------------------------------
#Subset by users who are charging with car given to managers
station.dat.5<-subset(station.dat.4, station.dat.4$managerVehicle == 1)
#Calculate optimal bandwidth for these users
obw.2_Managers<-IKbandwidth(X=station.dat.5$chargeTimeHrs, Y=station.dat.5$delta.kwh.lag.ln,
cutpoint = 2,verbose =TRUE, kernel = "triangular")
#No clusertering, no covariates (sharp, 2hrs/managers only)
RDest_Man_1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs, data = station.dat.5, cutpoint = 2,
verbose = TRUE, se.type='HC0')
#Clustering at facility with day of week covariates (sharp, 2hrs/managers only)
RDest_Man_3<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri , data = station.dat.5, cutpoint = 2,
verbose = TRUE, cluster=station.dat.5$facilityType, se.type='HC0')
#No clustering with day of week covariates/monthly dummies (sharp, 2hrs/managers only)
RDest_Man_6<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.5, cutpoint = 2,
verbose = TRUE, se.type='HC0')
#No clustering with day of week covariates/monthly dummies, cubic term (sharp, 2hrs/managers only)
RDest_Man_7<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | charge3 + Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.5, cutpoint = 2,
verbose = TRUE, se.type='HC0')
#Clustering at facility type, day of week covariates/monthly dummies (sharp, 2hrs/managers only)
RDest_Man_8<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.5, cutpoint = 2,
verbose = TRUE, cluster=station.dat.5$facilityType, se.type='HC0')
#Clustering at location ID, day of week covariates/monthly dummies (sharp, 2hrs/managers only)
RDest_Man_9<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.5, cutpoint = 2,
verbose = TRUE, cluster=station.dat.5$locationId, se.type='HC0')
#Clustering at station ID, day of week covariates/monthly dummies (sharp, 2hrs/managers only)
RDest_Man_10<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.5, cutpoint = 2,
verbose = TRUE, cluster=station.dat.5$stationId, se.type='HC0')
#----------------------------------
# NEXT: Sharp RD (Managers), 4hrs
#----------------------------------
#Subset by users who are charging with car given to managers
station.dat.5<-subset(station.dat.4, station.dat.4$managerVehicle == 1)
#Calculate optimal bandwidth for these users
obw.4_Managers<-IKbandwidth(X=station.dat.5$chargeTimeHrs, Y=station.dat.5$delta.kwh.lag.ln,
cutpoint = 4,verbose =TRUE, kernel = "triangular")
#No clusertering, no covariates (sharp, 2hrs/managers only)
RDest_Man4_1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs, data = station.dat.5, cutpoint = 4,
verbose = TRUE, se.type='HC0')
#Clustering at facility with day of week covariates (sharp, 2hrs/managers only)
RDest_Man4_3<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri , data = station.dat.5, cutpoint = 4,
verbose = TRUE, cluster=station.dat.5$facilityType, se.type='HC0')
#No clustering with day of week covariates/monthly covariates (sharp, 2hrs/managers only)
RDest_Man4_6<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov, data = station.dat.5, cutpoint = 4,
verbose = TRUE, se.type='HC0')
#No clustering with day of week covariates/monthly covariates, cubic term (sharp, 2hrs/managers only)
RDest_Man4_7<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | charge3 + Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov, data = station.dat.5, cutpoint = 4,
verbose = TRUE, se.type='HC0')
#Clustering at facility type, day of week covariates/monthly covariates (sharp, 2hrs/managers only)
RDest_Man4_8<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov, data = station.dat.5, cutpoint = 4,
verbose = TRUE, cluster=station.dat.5$facilityType, se.type='HC0')
#Clustering at location ID, day of week covariates/monthly covariates (sharp, 2hrs/managers only)
RDest_Man4_9<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov, data = station.dat.5, cutpoint = 4,
verbose = TRUE, cluster=station.dat.5$locationId, se.type='HC0')
#Clustering at station ID, day of week covariates/monthly covariates (sharp, 2hrs/managers only)
RDest_Man4_10<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov, data = station.dat.5, cutpoint = 4,
verbose = TRUE, cluster=station.dat.5$stationId, se.type='HC0')
#----------------------------------
# NEXT: Placebo (Sharp)
#----------------------------------
#Calculate optimal bandwidth at cutpoints of 3 hours
obw.3<-IKbandwidth(X=station.dat.4$chargeTimeHrs, Y=station.dat.4$delta.kwh.lag.ln,
cutpoint = 3,verbose =TRUE, kernel = "triangular")
#No clusertering, no covariates (sharp, 3hrs/placebo)
RDest.p1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs, data = station.dat.4, cutpoint = 3,
verbose = TRUE, se.type='HC0')
#Clustering at facility with day of week covariates (sharp, 3hrs/placebo)
RDest.p3<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri , data = station.dat.4, cutpoint = 3,
verbose = TRUE, cluster=station.dat.4$facilityType, se.type='HC0')
#No clustering with day of week covariates/month covariates (sharp, 3hrs/placebo)
RDest.p6<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 3,
verbose = TRUE, se.type='HC0')
#No clustering with day of week covariates/month covariates, cubic term (sharp, 3hrs/placebo)
RDest.p7<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | charge3 + Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 3,
verbose = TRUE, se.type='HC0')
#Clustering at facility type, day of week covariates/month covariates (sharp, 3hrs/placebo)
RDest.p8<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 3,
verbose = TRUE, cluster=station.dat.4$facilityType, se.type='HC0')
#Cluster at location ID, day of week covariates/month covariates (sharp, 3hrs/placebo)
RDest.p9<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 3,
verbose = TRUE, cluster=station.dat.4$locationId, se.type='HC0')
#Cluster at station ID, day of week covariates/month covariates (sharp, 3hrs/placebo)
RDest.p10<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri + monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov , data = station.dat.4, cutpoint = 3,
verbose = TRUE, cluster=station.dat.4$stationId, se.type='HC0')
#----------------------------------
# FINAL: Display results
#----------------------------------
#Create matrix to display robustness table
table_s1<- matrix(nrow = 15,ncol = 6, dimnames = list(c("Cutoff 4 hours Sharp",
"(s.e. 1)",
"Cutoff 4 hours Sharp - Managers",
"(s.e. 2)",
"Cutoff 2 hours Sharp",
"(s.e. 3)",
"Cutoff 2 hours Sharp - Managers",
"(s.e. 4)",
"Cutoff 3 hours Sharp - Placebo",
"(s.e. 5)",
"Time dummies",
"Cubic polynomial",
"Clustering at facility type",
"Clustering at location ID",
"Clustering at station ID"
),
c("(1)",
"(2)",
"(3)",
"(4)",
"(5)",
"(6)")))
table_s1[1, ] <- c(round(RDest4_1$est[1],4),round(RDest4_6$est[1],4),round(RDest4_7$est[1],4),round(RDest4_8$est[1],4),round(RDest4_9$est[1],4),round(RDest4_10$est[1],4))
table_s1[2, ] <- c(round(RDest4_1$se[1],4),round(RDest4_6$se[1],4),round(RDest4_7$se[1],4),round(RDest4_8$se[1],4),round(RDest4_9$se[1],4),round(RDest4_10$se[1],4))
table_s1[3, ] <- c(round(RDest_Man4_1$est[1],4),round(RDest_Man4_6$est[1],4),round(RDest_Man4_7$est[1],4),round(RDest_Man4_8$est[1],4),round(RDest_Man4_9$est[1],4),round(RDest_Man4_10$est[1],4))
table_s1[4, ] <- c(round(RDest_Man4_1$se[1],4),round(RDest_Man4_6$se[1],4),round(RDest_Man4_7$se[1],4),round(RDest_Man4_8$se[1],4),round(RDest_Man4_9$se[1],4),round(RDest_Man4_10$se[1],4))
table_s1[5, ] <- c(round(RDest2_1$est[1],4),round(RDest2_6$est[1],4),round(RDest2_7$est[1],4),round(RDest2_8$est[1],4),round(RDest2_9$est[1],4),round(RDest2_10$est[1],4))
table_s1[6, ] <- c(round(RDest2_1$se[1],4),round(RDest2_6$se[1],4),round(RDest2_7$se[1],4),round(RDest2_8$se[1],4),round(RDest2_9$se[1],4),round(RDest2_10$se[1],4))
table_s1[7, ] <- c(round(RDest_Man_1$est[1],4),round(RDest_Man_6$est[1],4),round(RDest_Man_7$est[1],4),round(RDest_Man_8$est[1],4),round(RDest_Man_9$est[1],4),round(RDest_Man_10$est[1],4))
table_s1[8, ] <- c(round(RDest_Man_1$se[1],4),round(RDest_Man_6$se[1],4),round(RDest_Man_7$se[1],4),round(RDest_Man_8$se[1],4),round(RDest_Man_9$se[1],4),round(RDest_Man_10$se[1],4))
table_s1[9, ] <- c(round(RDest.p1$est[1],4),round(RDest.p6$est[1],4),round(RDest.p7$est[1],4),round(RDest.p8$est[1],4),round(RDest.p9$est[1],4),round(RDest.p10$est[1],4))
table_s1[10, ] <- c(round(RDest.p1$se[1],4),round(RDest.p6$se[1],4),round(RDest.p7$se[1],4),round(RDest.p8$se[1],4),round(RDest.p9$se[1],4),round(RDest.p10$se[1],4))
table_s1[11, ] <- c("No","Yes","Yes","Yes","Yes","Yes")
table_s1[12, ] <- c("No","No","Yes","No","No","No")
table_s1[13, ] <- c("No","No","No","Yes","No","No")
table_s1[14, ] <- c("No","No","No","No","Yes","No")
table_s1[15, ] <- c("No","No","No","No","No","Yes")
table_s1
# _____________________________________________________________________________
# RD Results Table (3)
# _____________________________________________________________________________
table_3<- matrix(nrow = 5,ncol = 4, dimnames = list(c("Price effect, (4 hours), all users","Price effect (4 hours), managers","Behavioral effect (2 hours), all users"," Behavioral effect (2 hours), managers"," Placebo test (3 hours), all users"),
c("Optimal Bandwidth","RD Estimate", "Std Error", "Total sessions")))
table_3[1, ] <- c(round(obw.4,4), round(RDest4_3$est[1],4), round(RDest4_3$se[1],4), nrow(station.dat.4))
table_3[2, ] <- c(round(obw.4_Managers,4), round(RDest_Man4_3$est[1],4), round(RDest_Man4_3$se[1],4), nrow(station.dat.5))
table_3[3, ] <- c(round(obw.2,4), round(RDest2_3$est[1],4), round(RDest2_3$se[1],4), nrow(station.dat.4))
table_3[4, ] <- c(round(obw.2_Managers,4), round(RDest_Man_3$est[1],4), round(RDest_Man_3$se[1],4), nrow(station.dat.5))
table_3[5, ] <- c(round(obw.3,4), round(RDest.p3$est[1],4), round(RDest.p3$se[1],4), nrow(station.dat.4))
table_3
# _____________________________________________________________________________
# Figure (1): PRICE POLICY GRAPH
# _____________________________________________________________________________
#Specify hour limits and increments of 5 minutes (marginal cost incurred every 5 min after 4hrs)
x <- seq(0, 6, (1/12))
#Specify function corresponding to pricing scheme
fx <- (x > 0 & x <=4) *0+
(x >4 & x < 4.5) * 0.5 +
(x >= 4.5 & x < 10.5) * (x-4)
par(mar=c(5,5,1,1)+.1)
plot(x, fx, type="S", xlab="Duration of charge (hrs)",
ylab="Cost (dollars)",cex.lab=1.5, cex.axis=1.3)
abline(v=4, lty=2)
figure_1 <- recordPlot()
figure_1
# _____________________________________________________________________________
# Figure (S1): HISTOGRAM OF TRANSACTION COUNT BY EV USER
# _____________________________________________________________________________
#Generate histogram using dataframe with count of transactions by user ID previously generated
figure_s3 <- ggplot(station.dat.agg2, aes(x=n))+
geom_histogram(color="black", fill="orange2", binwidth = 10, alpha = 0.4)+
labs(x="Number of transactions", y="Users")+
theme(axis.line = element_line(colour = "black"),
axis.text=element_text(size=20),
axis.title=element_text(size=20),
plot.title = element_text(size=20, face="bold",hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())
figure_s3
# _____________________________________________________________________________
# Figure (4a): HISTOGRAM FOR PLUG-IN/PLUG-OUT (OF OBSERVATIONS CONSIDERED IN STUDY)
# _____________________________________________________________________________
figure_s4a <-ggplot(station.dat.4, aes(x = startTime, color=cols))+
labs(title = "", x="Time of Day (hr)", y="Frequency of Transactions")+
scale_x_discrete(breaks = c(0,2,4,6,8,10,12,14,16,18,20,22,24),limits = 0:24)+
scale_y_discrete(breaks = c(100,200,300,400,500), limits = c(0:500)) +
geom_bar(aes(x=startTime, fill="orange2"), color="orange3", alpha = 0.4, position = position_nudge(x = 0.5)) +
geom_bar(aes(x=endTime, fill="grey45"),color="grey30", alpha = 0.4, position = position_nudge(x = 0.5))+
scale_fill_identity(name='',guide = 'legend',labels = c('Plug-out Times','Plug-in Times'))+
theme_light()+
theme(legend.position = c(0.05, 1),legend.justification = c(0, 1),legend.text = element_text(size=15))+
theme(axis.line = element_line(colour = "black"),
axis.text=element_text(size=20),
axis.title=element_text(size=20),
plot.title = element_text(size=20, face="bold",hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())
figure_s4a
# _____________________________________________________________________________
# Figure (4b): HISTOGRAM FOR PLUG-IN/PLUG-OUT (OF EXTERNAL SAMPLE)
# _____________________________________________________________________________
figure_s4b <-ggplot(external.sample, aes(x = startTime, color=cols))+
labs(title = "", x="Time of Day (hr)", y="Frequency of Transactions")+
scale_x_discrete(breaks = c(0,2,4,6,8,10,12,14,16,18,20,22,24),limits = 0:24)+
scale_y_discrete(breaks = c(100,200), limits = c(0:200)) +
geom_bar(aes(x=startTime, fill="orange2"), color="orange3", alpha = 0.4, position = position_nudge(x = 0.5)) +
geom_bar(aes(x=endTime, fill="grey45"),color="grey30", alpha = 0.4, position = position_nudge(x = 0.5))+
scale_fill_identity(name='',guide = 'legend',labels = c('Plug-out Times','Plug-in Times'))+
theme_light()+
theme(legend.position = c(.1, 1),legend.justification = c(0, 1),legend.text = element_text(size=15))+
theme(axis.line = element_line(colour = "black"),
axis.text=element_text(size=20),
axis.title=element_text(size=20),
plot.title = element_text(size=20, face="bold",hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())
figure_s4b
figure_s4 <- grid.arrange(figure_s4a, figure_s4b, ncol=2)
# _____________________________________________________________________________
# Creation of generalizable material used in creation of figures 2 and 3.
# _____________________________________________________________________________
#Use n=6 for 30 days intervals, n=12 for 15 days intervals, n=25 for 7 days intervals
#One less than number of observations used for later calculations in matrix
n=169
#Use d=30 for 30 days intervals, d=15 for 15 days intervals, d=7 for 7 days intervals
d=1
# _____________________________________________________________________________
# Figure (2a): ESTIMATE OF MAIN SPECIFICATION USING DIFFERENT BANDWIDTHS (4hrs)
# _____________________________________________________________________________
#Generate row of matrix per month
r=8
#Matrix used to generate estimates for dynamic and main specification models with
#one column per metric needed in generating figures
mat_2a <- data.frame(matrix(0, ncol=6, nrow=r))
names(mat_2a)<-c("Month","Estimate","se", "p-generation", "lower ci", "upper ci")
#RDestimate
bndwdth<-c(0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3)
for(i in 1:8){
RRDD<-RDest.4<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs|monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov + Mon + Tues + Wed + Thurs + Fri, data = station.dat.4, bw = obw.4*(bndwdth[i]), cutpoint = 4,
verbose = TRUE, cluster=station.dat.4$locationId, se.type='HC0')
mat_2a[i,1]<-bndwdth[i]
mat_2a[i,2]<-RRDD$ci[1]
mat_2a[i,3]<-RRDD$ci[4]
mat_2a[i,4]<-RRDD$est[1]
}
mat_2a[,1] <- mat_2a[,1]*100
figure_2a <- ggplot(mat_2a, aes(mat_2a[,1])) +
labs(title = "", x="% of I-K Optimal Bandwidth", y= expression("Estimate of RD Coefficient"))+
geom_line(aes(y = mat_2a[,2], col = "CI_lower"), linetype= "dashed",colour="grey30", size = 1) +
geom_line(aes(y = mat_2a[,4], col = "estimate"),colour="black", size = 1)+
geom_line(aes(y = mat_2a[,3], col = "CI_upper"), linetype="dashed",colour="grey30", size = 1)+
scale_x_continuous(breaks = seq(0,300 , by = 50))+
coord_cartesian(ylim=c(-0.4,0.1)) +
geom_hline(yintercept=0, size = 1)+
geom_ribbon(aes(ymin=mat_2a[,2], ymax=mat_2a[,3]), linetype=2, alpha=0.3, fill = "skyblue3")+
theme_bw()+
theme(axis.line = element_line(colour = "black"),
axis.text=element_text(size=20),
axis.title=element_text(size=20),
plot.title = element_text(size=11,
face="bold",hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.major.y = element_line(color = "grey80"),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"),
text = element_text(family = "sans"))
figure_2a
# _____________________________________________________________________________
# Figure (2b): ESTIMATE OF MAIN SPECIFICATION USING DIFFERENT BANDWIDTHS (2hrs)
# _____________________________________________________________________________
#Generate row of matrix per month
r=8
date1<-as.Date("0014-11-18")
date2<-as.Date("0015-04-17")
#Matrix used to generate estimates for dynamic and main specification models with
#one column per metric needed in generating figures
mat_2b <- data.frame(matrix(0, ncol=6, nrow=r))
names(mat_2b)<-c("Month","Estimate","se", "p-generation", "lower ci", "upper ci")
#RDestimate
bndwdth<-c(0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3)
for(i in 1:8){
RRDD<-RDest.4<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs|monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov + Mon + Tues + Wed + Thurs + Fri, data = station.dat.4, bw = obw.2*(bndwdth[i]), cutpoint = 2,
verbose = TRUE, cluster=station.dat.4$locationId, se.type='HC0')
mat_2b[i,1]<-bndwdth[i]
mat_2b[i,2]<-RRDD$ci[1]
mat_2b[i,3]<-RRDD$ci[4]
mat_2b[i,4]<-RRDD$est[1]
}
mat_2b[,1] <- mat_2b[,1]*100
figure_2b <- ggplot(mat_2b, aes(mat_2b[,1])) +
labs(title = "", x="% of I-K Optimal Bandwidth", y= expression("Estimate of RD Coefficient"))+
geom_line(aes(y = mat_2b[,2], col = "CI_lower"), linetype= "dashed",colour="grey30", size = 1) +
geom_line(aes(y = mat_2b[,4], col = "estimate"),colour="black", size = 1)+
geom_line(aes(y = mat_2b[,3], col = "CI_upper"), linetype="dashed",colour="grey30", size = 1)+
scale_x_continuous(breaks = seq(0,300 , by = 50))+
scale_y_continuous(breaks = seq(-0.8,.2 , by = 0.2))+
geom_ribbon(aes(ymin=mat_2b[,2], ymax=mat_2b[,3]), linetype=2, alpha=0.3, fill = "salmon")+
theme_bw()+
coord_cartesian(ylim=c(-0.8,0.2)) +
geom_hline(yintercept=0, size = 1)+
theme(axis.line = element_line(colour = "black"),
axis.text=element_text(size=20),
axis.title=element_text(size=20),
plot.title = element_text(size=11,
face="bold",hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.major.y = element_line(color = "grey80"),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"),
text = element_text(family = "sans"))
figure_2b
figure_2 <- grid.arrange(figure_2a, figure_2b, ncol=2)
# _____________________________________________________________________________
# Figure (3a): DYNAMIC ESTIMATES (4-HOUR CUTOFF)
# _____________________________________________________________________________
#Change according to the intervals. 7 rows for months, 13 for 15 days, 26 for 7 days, 182 for 1 day interval?
#Uses the last 170 days of observations to calculate estimates
r=170
#Matrix used to generate estimates for dynamic and main specification models with
#one column per metric needed in generating figures
mat_3a <- data.frame(matrix(0, ncol=6, nrow=r))
names(mat_3a)<-c("End date","Estimate","se", "p-generation", "lower ci", "upper ci")
date1<-as.Date("0014-11-18")
date2<-as.Date("0015-04-17")
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
ctpt<-4
#RDestimate with clustering by facility type and including day of week controls
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov + Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
mat_3a[1,1]<-date2
mat_3a[1,2]<-RDest1$est[1]
mat_3a[1,3]<-RDest1$se[1]
mat_3a[1,4]<-RDest1$p[1]
mat_3a[1,5]<-RDest1$ci[1]
mat_3a[1,6]<-RDest1$ci[4]
mat_3a[1,7]<-150/7
for(i in 1:n)
{
date2<-date2+d
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov + Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
mat_3a[1+i,1]<-date2
mat_3a[1+i,2]<-RDest1$est[1]
mat_3a[1+i,3]<-RDest1$se[1]
mat_3a[1+i,4]<-RDest1$p[1]
mat_3a[1+i,5]<-RDest1$ci[1]
mat_3a[1+i,6]<-RDest1$ci[4]
mat_3a[1+i,7]<-mat_3a[i,7]+1/7
}
figure_3a <- ggplot(mat_3a, aes(mat_3a[,7], mat_3a[,2])) +
labs(x="Weeks since start of program", y= expression("Estimate of RD Coefficient"))+
geom_line(aes(y = mat_3a[,5], col = "CI_lower"), linetype= "dashed",colour="grey30", size = 0.5) +
geom_line(aes(y = mat_3a[,2], col = "estimate"),colour="black", size = 0.5)+
geom_line(aes(y = mat_3a[,6], col = "CI_upper"), linetype="dashed",colour="grey30", size = 0.5)+
scale_x_continuous(breaks = seq(20,50, by = 2), limits=c(20,50),
sec.axis = sec_axis(~ . / 4, name = "Months since start of program", breaks = seq(5,12,1)))+
scale_y_continuous(breaks = seq(-1, 1, by = 0.1), labels=abbreviate)+
geom_hline(yintercept=0, size = 1)+
geom_ribbon(aes(ymin=mat_3a[,5], ymax=mat_3a[,6]), linetype=2, alpha=0.3, fill = "skyblue3")+
coord_cartesian(xlim=c(22,44.75)) +
theme_bw()+
theme(axis.line = element_line(colour = "black"),
axis.text=element_text(size=20),
axis.title=element_text(size=20),
plot.title = element_text(size=11,
face="bold",hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.major.y = element_line(color = "grey80"),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"),
text = element_text(family = "sans"))
figure_3a
# _____________________________________________________________________________
# Figure (3b): DYNAMIC ESTIMATES (2-HOUR CUTOFF)
# _____________________________________________________________________________
#Change according to the intervals. 7 rows for months, 13 for 15 days, 26 for 7 days, 182 for 1 day interval?
#Uses the last 170 days of observations to calculate estimates
r=170
#Matrix used to generate estimates for dynamic and main specification models with
#one column per metric needed in generating figures
mat_3b <- data.frame(matrix(0, ncol=6, nrow=r))
names(mat_3b)<-c("End date","Estimate","se", "p-value", "lower ci", "upper ci")
#First observation is on 11.18.2014
date1<-as.Date("0014-11-18")
#Last observations is on 10.04.2015
date2<-as.Date("0015-04-17")
as.Date("0015-04-17")-as.Date("0014-11-18")
#Subset only those observations ocurring between designated start and end dates
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
#Set desired cutpoint to 2hrs
ctpt<<-2
#Regression discontinuity using delta.kwh.lag.ln and clustered by facility/day of week
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | monthJan + monthFeb + monthMar + monthJun + monthJul + monthAug + monthSep + monthOct + monthNov + Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
#initialize first row of matrix to reflect appropriate end date and relevant
#outputs from above RD
mat_3b[1,1]<-date2
mat_3b[1,2]<-RDest1$est[1]
mat_3b[1,3]<-RDest1$se[1]
mat_3b[1,4]<-RDest1$p[1]
mat_3b[1,5]<-RDest1$ci[1]
mat_3b[1,6]<-RDest1$ci[4]
mat_3b[1,7]<-150/7
for(i in 1:n)
{
date2<-date2+d
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | monthJan + monthFeb + monthMar + monthApr + monthMay+ monthJun + monthJul + monthAug + monthSep + monthOct + monthNov + Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
mat_3b[1+i,1]<-date2
mat_3b[1+i,2]<-RDest1$est[1]
mat_3b[1+i,3]<-RDest1$se[1]
mat_3b[1+i,4]<-RDest1$p[1]
mat_3b[1+i,5]<-RDest1$ci[1]
mat_3b[1+i,6]<-RDest1$ci[4]
mat_3b[1+i,7]<-mat_3b[i,7]+1/7
}
figure_3b <- ggplot(mat_3b, aes(mat_3b[,7], mat_3b[,2])) +
labs(x="Weeks since start of program", y= expression("Estimate of RD Coefficient"))+
geom_line(aes(y = mat_3b[,5], col = "CI_lower"), linetype= "dashed",colour="grey30",size = 0.5) +
geom_line(aes(y = mat_3b[,2], col = "estimate"),colour="black", size = 0.5)+
geom_line(aes(y = mat_3b[,6], col = "CI_upper"), linetype="dashed",colour="grey30", size = 0.5)+
scale_x_continuous(breaks = seq(20,50, by = 2), limits=c(20,50),
sec.axis = sec_axis(~ . / 4, name = "Months since start of program", breaks = seq(5,12,1)))+
scale_y_continuous(breaks = seq(-1, 1, by = 0.1), labels=abbreviate)+
geom_hline(yintercept=0, size = 1)+
geom_ribbon(aes(ymin=mat_3b[,5], ymax=mat_3b[,6]), linetype=2, alpha=0.2, fill = "salmon")+
theme_bw()+
coord_cartesian(xlim=c(22,44.75)) +
theme(axis.line = element_line(colour = "black"),
axis.text=element_text(size=20),
axis.title=element_text(size=20),
plot.title = element_text(size=11,
face="bold",hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.major.y = element_line(color = "grey80"),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"),
text = element_text(family = "sans"))
figure_3b
figure_3 <- grid.arrange(figure_3a, figure_3b, ncol=2)
# _____________________________________________________________________________
# Figure S1: RD FIGURES
# _____________________________________________________________________________
station.dat.4$over4 = station.dat.4$chargeTimeHrs>4
figure_s1a <- ggplot(station.dat.4, aes(x = chargeTimeHrs, y = delta.kwh.lag.ln, color = over4)) +
geom_point(alpha = 0.4, stroke = 0.4, size=3) +
geom_vline(xintercept=4, linetype="dashed") +
coord_cartesian(ylim=c(-1,1),xlim=c(1, 5.5)) +
stat_smooth(method="loess",formula = y~x, fill= "grey40", size = 1) +
scale_x_continuous(breaks = seq(1, 5.5, by = 1)) +
scale_colour_manual(values = c("grey5", "skyblue3")) +
labs(title = "", x="Charge Time (hrs)", y= expression("Change in Log of kWh Lag"), color = "black") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
legend.position="none", axis.text=element_text(size=20),
axis.title=element_text(size=20),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"),
text = element_text(family = "sans"))
figure_s1a
station.dat.4$over2 = station.dat.4$chargeTimeHrs>2
figure_s1b <- ggplot(station.dat.4, aes(x = chargeTimeHrs, y = delta.kwh.lag.ln, color = over2)) +
geom_point(alpha = 0.4, stroke = 0.4, size=3) +
geom_vline(xintercept=2, linetype="dashed") +
coord_cartesian(ylim=c(-1,1),xlim=c(1, 5)) +
stat_smooth(method="loess",formula = y~x, fill= "grey30", size = 1) +
scale_colour_manual(values = c("grey5", "salmon")) +
labs(title = "", x="Charge Time (hrs)", y= expression("Change in Log of kWh Lag")) +
scale_x_continuous(breaks = seq(1, 5, by = 1)) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
legend.position="none", axis.text=element_text(size=20),
axis.title=element_text(size=20),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"),
text = element_text(family = "sans"))
figure_s1b
figure_s1 <- grid.arrange(figure_s1a, figure_s1b, ncol=2)
#***************************************************************************
#Dynamic RD table
#***************************************************************************
table_4 <- matrix(nrow = 20,ncol = 6, dimnames = list(c("Month 4",
"SE4",
"Month 5",
"SE5",
"Month 6",
"SE6",
"Month 7",
"SE7",
"Month 8",
"SE8",
"Month 9",
"SE9",
"Month 10",
"SE10",
"Month 11",
"SE11",
"Month 12",
"SE12",
"Day of the week dummies",
"Cube charge time"),
c("1", "2", "3", "4", "5","6")))
#MODEL (1)
#Testing period: first 3 months
date1<-as.Date("0014-11-18")
date2<-as.Date("0015-02-18")
#Creating charge time^3
station.dat.4$charge3<-station.dat.4$chargeTimeHrs^3
station.dat.4$charge2<-station.dat.4$chargeTimeHrs^2
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
ctpt<-2
#RDestimate
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs , data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
summary(RDest1)
#9 months of the program excluding testing period
r=9
table_4[1,1]<-round(RDest1$est[1],3)
table_4[2,1]<-round(RDest1$se[1],3)
n=r-1
#Use d=30 for 30 days intervals
d=30
for(i in 1:n)
{
date2<-date2+d
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
summary(RDest1)
table_4[1+2*i,1]<-round(RDest1$est[1],3)
table_4[2+2*i,1]<-round(RDest1$se[1],3)
}
#MODEL (2)
#Testing period: first 3 months
date1<-as.Date("0014-11-18")
date2<-as.Date("0015-02-18")
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
ctpt<-2
#RDestimate
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
summary(RDest1)
#9 months of the program excluding testing period
r=9
table_4[1,2]<-round(RDest1$est[1],3)
table_4[2,2]<-round(RDest1$se[1],3)
n=r-1
#Use d=30 for 30 days intervals
d=30
for(i in 1:n)
{
date2<-date2+d
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs| Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
table_4[1+2*i,2]<-round(RDest1$est[1],3)
table_4[2+2*i,2]<-round(RDest1$se[1],3)
}
#MODEL (3)
#Testing period: first 3 months
date1<-as.Date("0014-11-18")
date2<-as.Date("0015-02-18")
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
ctpt<-2
#RDestimate
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | charge3+ Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
summary(RDest1)
#9 months of the program excluding testing period
r=9
table_4[1,3]<-round(RDest1$est[1],3)
table_4[2,3]<-round(RDest1$se[1],3)
n=r-1
#Use d=30 for 30 days intervals
d=30
for(i in 1:n)
{
date2<-date2+d
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs| charge3+Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
table_4[1+2*i,3]<-round(RDest1$est[1],3)
table_4[2+2*i,3]<-round(RDest1$se[1],3)
summary(RDest1)
}
#MODEL (4)
#Testing period: first 3 months
date1<-as.Date("0014-11-18")
date2<-as.Date("0015-02-18")
#Creating charge time^3
station.dat.4$charge3<-station.dat.4$chargeTimeHrs^3
station.dat.4$charge2<-station.dat.4$chargeTimeHrs^2
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
ctpt<-4
#RDestimate
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs , data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
summary(RDest1)
#9 months of the program excluding testing period
r=9
table_4[1,4]<-round(RDest1$est[1],3)
table_4[2,4]<-round(RDest1$se[1],3)
n=r-1
#Use d=30 for 30 days intervals
d=30
for(i in 1:n)
{
date2<-date2+d
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
table_4[1+2*i,4]<-round(RDest1$est[1],3)
table_4[2+2*i,4]<-round(RDest1$se[1],3)
}
#MODEL (5)
#Testing period: first 3 months
date1<-as.Date("0014-11-18")
date2<-as.Date("0015-02-18")
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
ctpt<-4
#RDestimate
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
summary(RDest1)
#9 months of the program excluding testing period
r=9
table_4[1,5]<-round(RDest1$est[1],3)
table_4[2,5]<-round(RDest1$se[1],3)
n=r-1
#Use d=30 for 30 days intervals
d=30
for(i in 1:n)
{
date2<-date2+d
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs| Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
table_4[1+2*i,5]<-round(RDest1$est[1],3)
table_4[2+2*i,5]<-round(RDest1$se[1],3)
}
#MODEL (6)
#Testing period: first 3 months
date1<-as.Date("0014-11-18")
date2<-as.Date("0015-02-18")
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
ctpt<-4
#RDestimate
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs | charge3+ Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
summary(RDest1)
#9 months of the program excluding testing period
r=9
table_4[1,6]<-round(RDest1$est[1],3)
table_4[2,6]<-round(RDest1$se[1],3)
n=r-1
#Use d=30 for 30 days intervals
d=30
for(i in 1:n)
{
date2<-date2+d
subset1<-station.dat.4[station.dat.4$created >= date1 & station.dat.4$created <= date2,]
RDest1<-RDestimate(delta.kwh.lag.ln~chargeTimeHrs| charge3+Mon + Tues + Wed + Thurs + Fri, data = subset1, cutpoint = ctpt,
verbose = TRUE, cluster=subset1$locationId, se.type='HC0')
table_4[1+2*i,6]<-round(RDest1$est[1],3)
table_4[2+2*i,6]<-round(RDest1$se[1],3)
summary(RDest1)
}
#Input model specifications
table_4[19,] <- c("No", "Yes", "Yes", "No", "Yes", "Yes")
table_4[20,] <- c("No", "No", "Yes", "No", "No", "Yes")
|
a5f0affa4b0f0ae2824873e2ff863878747e8303
|
bec6bef19012a98f5ebd8994bcb01671d53be03a
|
/ERAnalysis/man/gsS.Rd
|
4f42656609df39bae5a8ec18f97dd541e4539336
|
[] |
no_license
|
jlaake/ERAnalysis
|
f730b705f176dea89ff4b56a3b79b41011949d3e
|
fc5208a520a1ca4e3b95cc33f0036f7331933a9c
|
refs/heads/master
| 2021-01-18T22:34:34.335665
| 2017-10-30T23:36:28
| 2017-10-30T23:36:28
| 2,009,628
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,629
|
rd
|
gsS.Rd
|
\name{gsS}
\alias{gsS}
\alias{add.cf.all}
\alias{add.cf.laake}
\alias{add.cf.reilly}
\docType{data}
\title{Pod Size Correction Statistics}
\description{
Summarized calibration data for pod size estimation error. \code{gsS} is a calibration matrix
and the others are additive estimates.}
\usage{
data(gsS)
data(add.cf.all)
data(add.cf.reilly)
data(add.cf.laake)
}
\format{
\describe{
\item{\code{gsS}}{A matrix with 20 rows and columns; each row is the true size and
each column the estimated size. Value is probability that a pod
of a true size will be estimated to be a particular size.}
\item{\code{add.cf.all}}{vector of four additive correction factors (size 1,2,3 and 4+) using all of the
pod size calibration data via the Reilly approach.}
\item{\code{add.cf.reilly}}{vector of four additive correction factors (size 1,2,3 and 4+) using 1978/79
pod size calibration data via the Reilly approach.}
\item{\code{add.cf.all}}{vector of four additive correction factors (size 1,2,3 and 4+) using 1992/93 and 1993/94
pod size calibration data via the Reilly approach.}
}
}
\details{
See \code{\link{create.podsize.calibration.matrix}} and \code{\link{reilly.cf}} for details on calculation for the values contained
within these computed data sets. The number of rows and columns in \code{gsS} depends on the
value set for \code{nmax}, the maximum possible true/estimated pod size. The default value is 20.
}
\keyword{datasets}
|
4d2863b795650b6626813ec6a37770e854983a68
|
027d9bdd729698ecbe28dee02405ffb1ecc36066
|
/R/sim.pheno.bin.G.R
|
f1bfafdf84b67bc2670c1d381d950f1e00ef3db6
|
[] |
no_license
|
agaye/ESPRESSO.G
|
a8e6935bcc94e4bbb70795604cca8f9f03ae4a19
|
5c83c5833f1be81240179280272cfc7b576746e3
|
refs/heads/master
| 2021-01-20T22:29:07.920729
| 2014-07-09T11:50:32
| 2014-07-09T11:50:32
| 65,598,927
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,725
|
r
|
sim.pheno.bin.G.R
|
#'
#' @title Generates phenotype statuses
#' @description Generates affected and non-affected subjects using the genotypes.
#' @param num.obs number of observations to generate per iteration.
#' @param disease.prev prevalence of the binary outcome.
#' @param genotype a vector that represents the exposure data.
#' @param subject.effect.data subject effect data, reflects the heterogenity in baseline disease risk.
#' @param geno.OR odds ratio related to the 'at risk' genotype.
#' @return a binary vector that represents the phenotype data.
#' @keywords internal
#' @author Gaye A.
#'
sim.pheno.bin.G <- function(num.obs=10000, disease.prev=0.1, genotype=NULL, subject.effect.data=NULL, geno.OR=1.5){
# IF GENOTYPE AND SUBJECT EFFECT DATA ARE NOT SUPPLIED STOP AND ISSUE AN ALERT
if(is.null(genotype)){
cat("\n\n ALERT!\n")
cat(" No genotype data found.\n")
cat(" Check the argument 'genotype'\n")
stop(" End of process!\n\n", call.=FALSE)
}
if(is.null(subject.effect.data)){
cat("\n\n ALERT!\n")
cat(" No baseline effect data found.\n")
cat(" Check the argument 'subject.effect.data'\n")
stop(" End of process!\n\n", call.=FALSE)
}
numobs <- num.obs
pheno.prev <- disease.prev
genodata <- genotype
s.efkt.data <- subject.effect.data
geno.odds <- geno.OR
# GET THE ALPHA AND BETA VALUES
alpha <- log(pheno.prev/(1-pheno.prev))
beta <- log(geno.odds)
# GENERATE THE LINEAR PREDICTOR
lp <- alpha + (beta*genodata) + s.efkt.data
# GET 'mu' THE PROBABILITY OF DISEASE THROUGH LOGISTIC TRANSFORMATION
mu <- exp(lp)/(1 + exp(lp))
# GENERATE THE PHENOTYPE DATA AND RETURN IT AS A DATAFRAME
phenotype <- rbinom(numobs,1,mu)
return(phenotype)
}
|
c09017f4584a860f2d91045cb5d4f4f5c2fc1c0c
|
9a805694e5fb540cc461761a4fcb785c31528f32
|
/R/my_knn_cv.R
|
069c6a9e58469e5870ef9424be872e12472520b2
|
[] |
no_license
|
theloniousgoerz/tgpackage
|
cb0b3ea936f254fb8138b948d54c63a66310629f
|
8d96656120d0f3fec6548f9a1abd83a772fc9cb6
|
refs/heads/master
| 2023-05-21T10:15:53.364247
| 2021-06-08T22:17:12
| 2021-06-08T22:17:12
| 373,619,992
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,298
|
r
|
my_knn_cv.R
|
#' My_knn_cv
#'
#' This function calculates a k-fold cross validation for the k nearest neighbors algorithm.
#'
#' @param train Is a matrix with no NAs or missing values that is used to train the model.
#' @param cl Is the true classification of the training data.
#' @param k_nn Is the number of nearest neighbors to include in the cross validation calculation.
#' @param k_cv Is the number of folds to use for the cross validation (common Ks are 2,5, and 10).
#' @keywords prediction
#'
#' @examples
#' set.seed(1)
#'
#' rand_data <- data.frame(x1 = rnorm(100,0,1),x2 = rnorm(100,2,1))
#' rand_data_cl <- data.frame(y = rbinom(100,1,.3))
#' my_knn_cv(rand_data,rand_data_cl,k_nn = 5,k_cv = 5)
#' my_knn_cv(rand_data,rand_data_cl,k_nn = 5,k_cv = 10)
#'
#' @import class
#' @importFrom class knn
#'
#' @return Type list with a \code{cv_err} object and the predicted classification \code{class} output.
#'
#' @export
my_knn_cv <- function(train,cl,k_nn,k_cv) {
# Depends on:
# Create fold arg, with k partitions.
fold = sample(rep(1:k_cv,length = nrow(train)))
# Add train to fold vector.
data = cbind(train,fold)
# Create one with class
data_w_cl = cbind(fold,cl)
# Create full data for eventual KNN calc.
# Clone CL.
cl_full <- as_vector(cl)
# Clone train.
train_full <- train
# Create a list object for later values.
knn_list = list()
knn_error_list = list()
knn_corr_list = list()
# Iterate through the Ks.
for (i in 1:k_cv) {
# Create trainind and test data.
data_train = data %>% dplyr::filter(fold != i) %>%
dplyr::select(-fold)
data_test = data %>% dplyr::filter(fold == i) %>%
dplyr::select(-fold)
cl = data_w_cl %>% dplyr::filter(fold != i) %>%
dplyr::select(-fold)
# Create a prediction vector.
cl_predict = data_w_cl %>% dplyr::filter(fold == i) %>%
dplyr::select(-fold)
# Run KNN.
# Coerce type to work in the KNN function.
cl <- as_vector(cl)
cl_predict = as_vector(cl_predict)
knn_iter <- knn(data_train,
data_test,
cl = cl,
k = k_nn)
# Calculate the correctly classified
knn_test_eq = knn_iter == cl_predict
# Calculate the number of TRUEs
num_corr_class = sum(knn_test_eq)
# Calculate the correct classification rate
pct_corr_class = num_corr_class / length(knn_iter)
# Calculate the missclassification
pct_miss_class = 1-pct_corr_class
# Store these in the list
knn_corr_list[[i]] <- pct_corr_class
knn_list[[paste("pred_class",i,sep = "_")]] <- knn_iter
knn_error_list[[paste("pct_miss_class",i,sep = "_")]] <- pct_miss_class
}
# Calculate return values.
# Calculate the mean missclassification
# calculate the prediction for KNN
# Predict the final classification.
knn_prediction = knn(train_full,
train_full,
cl = cl_full, k = k_nn)
## Returns a KNN predictd off of all the test data.
# Calculate CV Error.
cv_err = mean(unlist(knn_error_list))
# Create an output list
list_out = list()
# Assign the values to a list.
list_out[["class"]] <- knn_prediction
list_out[["cv_err"]] <- cv_err
return(list_out)
# Returns a list of the models and their predictions
# Returns the cv error
}
|
3afabf92092d59382d439386be121a80a6463ce5
|
f7105536a44be844d652f28e5c5b5bab0db66aa8
|
/R/CMF/econometric/hw4/hw4.R
|
6664ca6bdf9a1ffc3f7a30ecfe8f0f0492847808
|
[] |
no_license
|
DmitryZheglov/code
|
e6f143c21287a250c01c639659b672fdef089bbe
|
fdec951c8dcf3145109076bc78f0646217b1b822
|
refs/heads/master
| 2022-12-13T11:43:00.962046
| 2019-08-12T18:58:55
| 2019-08-12T18:58:55
| 93,278,292
| 1
| 0
| null | 2022-12-07T23:49:07
| 2017-06-03T23:00:52
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 8,611
|
r
|
hw4.R
|
# исходные данные
library(datasets)
datMTLR= read.csv("C:/Users/Dmitriy/Desktop/proga/R/cmf/ecmetr/hw4/MTLR.csv",header=TRUE,sep=",")
datYNDX= read.csv("C:/Users/Dmitriy/Desktop/proga/R/cmf/ecmetr/hw4/RASP.csv",header=TRUE,sep=",")
dax=unlist((datYNDX[8]-datYNDX[5])/datYNDX[5],use.names = FALSE)
smi=unlist((datMTLR[8]-datMTLR[5])/datMTLR[5],use.names = FALSE)
T <- length(dax)
# LM-тест
library(FinTS)
ArchTest(dax,lags=12)
#большой пивалью значит есть арч эффекты,значит модель будет неточна
library(fGarch)
#общий вид модели
#dax.gfit=garchFit(formula=~arma(m,n)+aparch(p,q),data=dax,cond.dist="norm",include.delta=T/F,leverage=T/F,trace=FALSE)
#выберем одну из них,зафитим параметры и построим прогноз
#garche(1,1)
garchFit(formula=~aparch(1,1),data=dax,delta=2,include.delta=FALSE,leverage=FALSE,trace=FALSE)
#TS-GARCHE(1,1)
garchFit(formula=~aparch(1,1),data=dax,delta=1,include.delta=FALSE,leverage=FALSE,trace=FALSE)
#T-GARCH(1,1)
garchFit(formula=~aparch(1,1),data=dax,delta=2,include.delta=FALSE,leverage=TRUE,trace=FALSE)
#гРАФИЧЕСКИЙ АНАЛИЗ МОДЕЛИ
dax.gfit <- garchFit(formula=~aparch(1,1),data=dax,delta=2,
include.delta=FALSE,leverage=TRUE,cond.dist="sged",
shape=1.25,include.shape=FALSE,trace=FALSE)
plot(dax.gfit,which=13)
plot(dax.gfit,which=10)
#qqplot не изменяется нет особой разницы между моделями residuals не изменяются
library(tseries)
# ADF-тест(алтернатива=стационарность)
adf.test(dax)
# PP-тест
pp.test(dax)
# KPSS-тест
kpss.test(dax, null="Level")
#видим что тесты показывают большой пивалью значит стационарностьможем принять только на уровне значимости в 90 и меньше процентов
# прогноз среднего и дисперсии на i шагов вперёд
#это все используется дальше в цикле
#dax.frc <- predict(dax.gfit,n.ahead=5)
#dax.frc[,1] # вектор средних
#dax.frc[,3]^2 # вектор дисперсий
# расчёт границы потерь
alpha <- 0.05
#VaR <- dax.frc[1,1]+dax.frc[1,3]*qged(alpha,mean=0,sd=1,
# nu=dax.gfit@fit$par["shape"])
#Кривая VaR — набор последовательных во времени значений VaR
T1 <- 0.8*T; T2 <- T - T1 # обучающая и экзаменующая выборки
# на пространстве экзаменующей выборки построим набор
# последовательных значений VaR
VaR <- numeric()
h <- 0.18*T1
for (i in (T1+1):(T1+T2)) {
h.dax <- dax[(i-h):(i-1)]
dax.gfit <- garchFit(formula=~aparch(1,1),data=h.dax,
delta=2,include.delta=FALSE,leverage=TRUE,cond.dist="sged",
shape=1.5,include.shape=FALSE,trace=FALSE)
dax.frc <- predict(dax.gfit,n.ahead=1)
VaR[i-T1] <- dax.frc[1,1]+dax.frc[1,3]*qsged(alpha,mean=0,sd=1,
nu=1.5,xi=dax.gfit@fit$par["skew"])
}
#Кривая VaR
# сравнение оценок риска с фактом
fact <- dax[(T1+1):(T1+T2)]
plot(fact,type="l")
lines(VaR,col="red")
#ylim=c(-5.2,-4.6)
# тест Купика в R:
K <- sum(fact<VaR); alpha0 <- K/T2
S <- -2*log((1-alpha)^(T2-K)*alpha^K)+
2*log((1-alpha0)^(T2-K)*alpha0^K)
p.value <- 1-pchisq(S,df=1)
#высокий пвелью значит мы скорее всего правильно угодали альфу
############################Рассмотрим многомерный случай(портфель)
#Этапы моделирования:
# 1. Оценка частных GARCH-моделей;
#2. Расчёт условных стандартизированных остатков 𝑧𝑖,𝑡
#3. Моделирование многомерной величины 𝑧�
#Модель «copula–GARCH» в R
# одномерные GARCH-модели
library(fGarch)
dax.gfit <- garchFit(data=dax,formula=~garch(1,1),
shape=1.25,include.shape=F,cond.dist="ged",trace=F)
smi.gfit <- garchFit(data=smi,formula=~garch(1,1),
shape=1.3,include.shape=F,cond.dist="sged",trace=F)
# стандартизированные остатки
z <- matrix(nrow=T,ncol=2)
z[,1] <- dax.gfit@residuals / dax.gfit@sigma.t
z[,2] <- smi.gfit@residuals / smi.gfit@sigma.t
# частные распределения остатков
mean <- c(0,0); sd <- c(1,1); nu <- c(1.25,1.3)
xi <- c(1, smi.gfit@fit$par["skew"])
cdf <- matrix(nrow=T,ncol=2)
for (i in 1:2) cdf[,i] <- psged(z[,i],mean=mean[i],
sd=sd[i],nu=nu[i],xi=xi[i])
#Модель «copula–GARCH» в R
#Моделирование копулы
library(copula)
# объявление копул
norm.cop <- normalCopula(dim=2,param=0.5,dispstr="un")
stud.cop <- tCopula(dim=2,param=0.5,df=5,
df.fixed=TRUE,dispstr="un")
gumb.cop <- gumbelCopula(dim=2,param=2)
clay.cop <- claytonCopula(dim=2,param=2)
# подгонка копул
norm.fit <- fitCopula(cdf,copula=norm.cop)
stud.fit <- fitCopula(cdf,copula=stud.cop)
gumb.fit <- fitCopula(cdf,copula=gumb.cop)
clay.fit <- fitCopula(cdf,copula=clay.cop)
# метод Монте-Карло
N=1000
cdf.sim <- rCopula(n=N,copula=stud.fit@copula)
z.sim <- matrix(nrow=N,ncol=2)
for (i in 1:2) z.sim[,i] <- qsged(cdf.sim[,i],
mean=mean[i],sd=sd[i],nu=nu[i],xi=xi[i])
frc1 <- predict(dax.gfit,n.ahead=1)
frc2 <- predict(smi.gfit,n.ahead=1)
mu <- c(frc1[,1],frc2[,1])
sigma <- c(frc1[,3],frc2[,3])
#Оценка финансового риска
#Двумерный случай
# доходности портфеля из двух активов
prt <- cbind(dax, smi)
# оценка параметров модели
library(ghyp)
prt.fit <- fit.ghypmv(prt,symmetric=FALSE,silent=TRUE)
aic.mv <- stepAIC.ghyp(prt, dist=c("gauss","ghyp"),symmetric=NULL,silent=TRUE)
# выбор оптимальных весов активов в портфеле
opt <- portfolio.optimize(prt.fit,
risk.measure="value.at.risk",type="minimum.risk",
target.return=NULL,risk.free=NULL,level=0.95,silent=TRUE)
w=opt$opt.weights
# модельные доходности портфеля
prt.sim <- w[1]*(mu[1]+sigma[1]*z.sim[,1]) +
w[2]*(mu[2]+sigma[2]*z.sim[,2])
# измерители риска
prt.sim <- sort(prt.sim)
VaR <- prt.sim[alpha*N]
ES <- mean(prt.sim[1:(alpha*N-1)])
# расчёт границы потерь
T <- length(dax); alpha <- 0.05
T1 <- 400; T2 <- T - T1 # обучающая и экзаменующая выборки
# на пространстве экзаменующей выборки построим набор
# последовательных значений VaR
x=w[1]*dax+w[2]*smi
VaR <- numeric()
h <- 0.2*T1
for (i in (T1+1):(T1+T2)) {
h.dax <- x[(i-h):(i-1)]
dax.gfit <- garchFit(formula=~aparch(1,1),data=h.dax,
delta=2,include.delta=FALSE,leverage=TRUE,cond.dist="sged",
shape=1.5,include.shape=FALSE,trace=FALSE)
dax.frc <- predict(dax.gfit,n.ahead=1)
VaR[i-T1] <- dax.frc[1,1]+dax.frc[1,3]*qsged(alpha,mean=0,sd=1,
nu=1.5,xi=dax.gfit@fit$par["skew"])
}
fact <- x[(T1+1):(T1+T2)]
plot(fact,type="l")
lines(VaR,col="red")
#Проведем тест Купика
K <- sum(fact<VaR); alpha0 <- K/T2
S <- -2*log((1-alpha)^(T2-K)*alpha^K)+2*log((1-alpha0)^(T2-K)*alpha0^K)
p.value <- 1-pchisq(S,df=1)
#пивелью большой,значит мы нашли правильную альфа
#Функции потерь
#Величина функции потерь измеряет глубину пробоев кривой VaR
#и интерпретируется как размер понесённых потерь
#Функция потерь Лопеса:
L.Lo <- sum((fact-VaR)^2*(fact<VaR))/K
#Функция потерь Бланко-Ила:
L.BI <- sum((fact-VaR)/VaR*(fact<VaR))/K
L.Lo*10^4
L.BI
#Значения функции потерь в пределах нормы
|
75addeafb3d08c5416dd4279be61f6b2e5697a1e
|
7e39ca6104d055974719e15a076b6777c8c3f56a
|
/programs/explore_epidurOutliers_ILI.R
|
443ece8e145252d09bde7325ad158a0f0206f81c
|
[] |
no_license
|
Qasim-1develop/flu-SDI-dzBurden-drivers
|
00e61a992e75ea500b9d690ef6f25f77f9638def
|
c66a556c1d5012af3b69c16ba407b3d444ea23be
|
refs/heads/master
| 2022-07-22T16:46:10.612961
| 2018-09-26T14:45:35
| 2018-09-26T14:45:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,281
|
r
|
explore_epidurOutliers_ILI.R
|
## Name: Elizabeth Lee
## Date: 9/16/15
## Function: examine zip3-season time series with short and long epidemic durations; Does the time series appear to follow an expected epidemic pattern?
## Results: Similar to IR epiDur outliers, long epidemics seem more acceptable than short ones, which are noisy and often miss at least part of the epidemic peak if there appears to be one.
# Legend: black line at 0 means epi.week=T, other color designations refer to in.season variable
## 9/21/15: filter zip3-season combos before looking at outliers (write_zip3seasonFiltered_ILI.R), add adjusted R2 (indicator of model fit) to plot
### disease burden metrics: epidemic duration
## Filenames: sprintf('dbMetrics_periodicReg_%sILI%s_analyzeDB.csv', code, code2), sprintf('fullIndicAll_periodicReg_%sILI%s_analyzeDB.csv', code, code2), sprintf('zip3SeasonCombos_%sILI%s.csv', code, code2), sprintf('summaryStats_periodicReg_%sallZip3Modes_ILI%s.csv', code, code2)
## Data Source: IMS Health
## Notes:
##
## useful commands:
## install.packages("pkg", dependencies=TRUE, lib="/usr/local/lib/R/site-library") # in sudo R
## update.packages(lib.loc = "/usr/local/lib/R/site-library")
#### header ####################################
setwd('~/Dropbox/code')
source("GeneralTools.R")
require(ggplot2)
require(readr)
require(dplyr)
require(tidyr)
setwd(dirname(sys.frame(1)$ofile))
#### set these! ####################################
# code = "t2sa_" # semi-annual periodicity
code <- "t2_" # parabolic time trend term
# code="" # linear time trend term
code2 <- "_Oct"
#### import data ####################################
setwd('../R_export')
dbMetrics.g <- read.csv(sprintf('dbMetrics_periodicReg_%sILI%s_analyzeDB.csv', code, code2), header=T, colClasses=c(zipname="character", metric="character"))
# standardized data
dbMetrics.gz <- dbMetrics.g %>% group_by(season, metric) %>% mutate(burden.z = (burden - mean(burden))/sd(burden))
# import time series data
fullIndic <- read_csv(file=sprintf('fullIndicAll_periodicReg_%sILI%s_analyzeDB.csv', code, code2), col_types=list(zipname=col_character()))
# import model fit data
modelfit <- read_csv(file=sprintf('summaryStats_periodicReg_%sallZip3Mods_ILI%s.csv', code, code2))
modelfit2 <- modelfit %>% mutate(zipname = substr.Right(gsub("X", "00", zip3), 3))
# import zip3-season combinations
combos <- read.csv(sprintf('zip3SeasonCombos_%sILI%s.csv', code, code2), header=T, colClasses=c(zipname="character"))
combos2 <- combos %>% mutate(id = paste0(season, zipname))
#### plot formatting ####################################
w = 9
h = 6
ct = 6
dir.create(sprintf('../graph_outputs/explore_epidurOutliers_%sILI%s', code, code2), showWarnings=FALSE)
setwd(sprintf('../graph_outputs/explore_epidurOutliers_%sILI%s', code, code2))
# #### LONG DURATIONS (GREATER THAN OR EQUAL TO 20 WEEKS) ####################################
# # examine ts of zip-seasons with long epidemic durations
# db.dur20 <- dbMetrics.g %>% filter(metric=="epi.dur" & burden>=20) %>% mutate(id.combo = paste0(season, zipname))
# zip3list1 <- db.dur20 %>% select(id.combo) %>% distinct %>% mutate(for.plot = seq_along(1:nrow(.))) # id all zip3s with long durations
#
# # subset full data
# fi.dur20.all <- fullIndic %>% mutate(id.combo = paste0(season, zipname)) %>% filter(id.combo %in% zip3list1$id.combo) %>% filter(season!=1)
# data_plot <- right_join(fi.dur20.all, zip3list1, by='id.combo') %>% mutate(Thu.week=as.Date(Thu.week, origin="1970-01-01"))
#
# #### subset season-zip3 combinations in db.dur20 only ####################################
# fi.dur20.seas <- fullIndic %>% mutate(id.combo = paste0(season, zipname)) %>% filter(id.combo %in% db.dur20$id.combo)
# zip3list2 <- fi.dur20.seas %>% select(id.combo) %>% distinct %>% mutate(for.plot = seq_along(1:nrow(.)))
# data_plot2 <- right_join(fi.dur20.seas, zip3list2, by="id.combo") %>% mutate(Thu.week=as.Date(Thu.week, origin="1970-01-01")) %>% filter(flu.week)
#
# #### plot epidemic time series for zip3-season combinations with long durations ####################################
# indexes2 <- seq(1, max(data_plot2 %>% select(for.plot)), by=ct)
#
# # ILI plots by season
# dir.create(sprintf('./over20', code, code2), showWarnings=FALSE)
# setwd(sprintf('./over20', code, code2))
# for(i in indexes2){
# dummyplots <- ggplot(data_plot2 %>% filter(for.plot>= i & for.plot < i+ct) %>% mutate(is.epiweek = ifelse(epi.week, 0, NA)), aes(x=Thu.week, y=ili, group=id.combo)) +
# theme(axis.text=element_text(size=12), axis.title=element_text(size=14,face="bold")) +
# geom_line(aes(color = in.season)) + scale_color_brewer(palette="Set1") +
# geom_line(aes(y = is.epiweek), color = 'black') + # appears if epi.week=T
# geom_line(aes(y = epi.thresh), color = 'grey') +
# facet_wrap(~id.combo, scales = "free")
# # grab zip3s in plot for file name
# ziplabels <- data_plot2 %>% select(id.combo) %>% distinct %>% slice(c(i, i+ct-1))
# ggsave(sprintf("longEpiDur_seas_%sfits_ILI%s_%s-%s.png", code, code2, ziplabels[1,], ziplabels[2,]), dummyplots, width=w, height=h)
# }
#
#
# #### SHORT DURATIONS (LESS THAN OR EQUAL TO 5 WEEKS) ####################################
# # examine ts of zip-seasons with short epidemic durations
# db.dur5 <- dbMetrics.g %>% filter(metric=="epi.dur" & burden<=5) %>% mutate(id.combo = paste0(season, zipname))
#
# # subset season-zip3 combinations in db.dur8 only
# fi.dur5.seas <- fullIndic %>% filter(season != 1) %>% mutate(id.combo = paste0(season, zipname)) %>% filter(id.combo %in% db.dur5$id.combo)
# zip3list3 <- fi.dur5.seas %>% select(id.combo) %>% distinct %>% mutate(for.plot = seq_along(1:nrow(.)))
# data_plot3 <- right_join(fi.dur5.seas, zip3list3, by="id.combo") %>% mutate(Thu.week=as.Date(Thu.week, origin="1970-01-01")) %>% filter(flu.week)
#
# #### plot epidemic time series for zip3-season combinations with short durations ####################################
# indexes3 <- seq(1, max(data_plot3 %>% select(for.plot)), by=ct)
#
# # ILI plots by season
# dir.create(sprintf('../under5', code, code2), showWarnings=FALSE)
# setwd(sprintf('../under5', code, code2))
# for(i in indexes3){
# dummyplots <- ggplot(data_plot3 %>% filter(for.plot>= i & for.plot < i+ct) %>% mutate(is.epiweek = ifelse(epi.week, 0, NA)), aes(x=Thu.week, y=ili, group=id.combo)) +
# theme(axis.text=element_text(size=12), axis.title=element_text(size=14,face="bold")) +
# geom_line(aes(color = in.season)) + scale_color_brewer(palette="Set1") +
# geom_line(aes(y = is.epiweek), color = 'black') + # appears if epi.week=T
# geom_line(aes(y = epi.thresh), color = 'grey') +
# facet_wrap(~id.combo, scales = "free")
# # grab zip3s in plot for file name
# ziplabels <- data_plot3 %>% select(id.combo) %>% distinct %>% slice(c(i, i+ct-1))
# ggsave(sprintf("shortEpiDur_seas_%sfits_ILI%s_%s-%s.png", code, code2, ziplabels[1,], ziplabels[2,]), dummyplots, width=w, height=h)
# }
#
# # all plots saved 9/16/15 morning
#### filter zip3-combos data ####################################
#### FILTERED LONG DURATIONS (GREATER THAN OR EQUAL TO 20 WEEKS) ####################################
# examine ts of zip-seasons with long epidemic durations
db.dur20.filt <- dbMetrics.g %>% filter(metric=="epi.dur" & burden>=20) %>% mutate(id.combo = paste0(season, zipname)) %>% filter(id.combo %in% combos2$id)
#### subset season-zip3 combinations in db.dur20 only ####################################
fi.dur20.seas <- fullIndic %>% mutate(id.combo = paste0(season, zipname)) %>% filter(id.combo %in% db.dur20.filt$id.combo)
zip3list2 <- fi.dur20.seas %>% select(zipname, id.combo) %>% distinct %>% mutate(for.plot = seq_along(1:nrow(.)))
zip3list2.stat <- left_join(zip3list2, modelfit2, by="zipname") %>% select(-zip3, -p.value, -df, -r.squared)
data_plot2 <- right_join(fi.dur20.seas, zip3list2.stat %>% select(-zipname), by="id.combo") %>% mutate(Thu.week=as.Date(Thu.week, origin="1970-01-01")) %>% filter(flu.week) %>% mutate(id.combo.lab = paste(id.combo, signif(adj.r.squared, digits=2)))
#### plot epidemic time series for zip3-season combinations with long durations ####################################
indexes2 <- seq(1, max(data_plot2 %>% select(for.plot)), by=ct)
# ILI plots by season
dir.create(sprintf('./over20filtered', code, code2), showWarnings=FALSE)
setwd(sprintf('./over20filtered', code, code2))
for(i in indexes2){
dummyplots <- ggplot(data_plot2 %>% filter(for.plot>= i & for.plot < i+ct) %>% mutate(is.epiweek = ifelse(epi.week, 0, NA)), aes(x=Thu.week, y=ili, group=id.combo.lab)) +
theme(axis.text=element_text(size=12), axis.title=element_text(size=14,face="bold")) +
geom_line(aes(color = in.season)) + scale_color_brewer(palette="Set1") +
geom_line(aes(y = is.epiweek), color = 'black') + # appears if epi.week=T
geom_line(aes(y = epi.thresh), color = 'grey') +
facet_wrap(~id.combo.lab, scales = "free")
# grab zip3s in plot for file name
ziplabels <- data_plot2 %>% select(id.combo) %>% distinct %>% slice(c(i, i+ct-1))
ggsave(sprintf("longEpiDur_seas_%sfits_ILI%s_%s-%s.png", code, code2, ziplabels[1,], ziplabels[2,]), dummyplots, width=w, height=h)
}
#### FILTERED SHORT DURATIONS (LESS THAN OR EQUAL TO 5 WEEKS) ####################################
# examine ts of zip-seasons with short epidemic durations
db.dur5.filt <- dbMetrics.g %>% filter(metric=="epi.dur" & burden<=5) %>% mutate(id.combo = paste0(season, zipname)) %>% filter(id.combo %in% combos2$id)
# subset season-zip3 combinations in db.dur8 only
fi.dur5.seas <- fullIndic %>% filter(season != 1) %>% mutate(id.combo = paste0(season, zipname)) %>% filter(id.combo %in% db.dur5.filt$id.combo)
zip3list3 <- fi.dur5.seas %>% select(zipname, id.combo) %>% distinct %>% mutate(for.plot = seq_along(1:nrow(.)))
zip3list3.stat <- left_join(zip3list3, modelfit2, by="zipname") %>% select(-zip3, -p.value, -df, -r.squared)
data_plot3 <- right_join(fi.dur5.seas, zip3list3.stat %>% select(-zipname), by="id.combo") %>% mutate(Thu.week=as.Date(Thu.week, origin="1970-01-01")) %>% filter(flu.week) %>% mutate(id.combo.lab = paste(id.combo, signif(adj.r.squared, digits=2)))
#### plot epidemic time series for zip3-season combinations with short durations ####################################
indexes3 <- seq(1, max(data_plot3 %>% select(for.plot)), by=ct)
# ILI plots by season
dir.create(sprintf('../under5filtered', code, code2), showWarnings=FALSE)
setwd(sprintf('../under5filtered', code, code2))
for(i in indexes3){
dummyplots <- ggplot(data_plot3 %>% filter(for.plot>= i & for.plot < i+ct) %>% mutate(is.epiweek = ifelse(epi.week, 0, NA)), aes(x=Thu.week, y=ili, group=id.combo)) +
theme(axis.text=element_text(size=12), axis.title=element_text(size=14,face="bold")) +
geom_line(aes(color = in.season)) + scale_color_brewer(palette="Set1") +
geom_line(aes(y = is.epiweek), color = 'black') + # appears if epi.week=T
geom_line(aes(y = epi.thresh), color = 'grey') +
facet_wrap(~id.combo.lab, scales = "free")
# grab zip3s in plot for file name
ziplabels <- data_plot3 %>% select(id.combo) %>% distinct %>% slice(c(i, i+ct-1))
ggsave(sprintf("shortEpiDur_seas_%sfits_ILI%s_%s-%s.png", code, code2, ziplabels[1,], ziplabels[2,]), dummyplots, width=w, height=h)
}
|
d18f8c972a41a053c1064c6f456f31d9dfe2b60c
|
8da078ced4ec8803d6dffe6ecce2f7b9f0212bb6
|
/ui.R
|
47856d901cc8302b97cb4a3aa9bdc60ba17906f9
|
[] |
no_license
|
pavlov-aa/Map-of-Road-Accidents-in-Moscow-and-Moscow-Oblast
|
8224b31e0db99857fd4b3b716e40bfba983f37d5
|
fead7d642a1d6efa80d0817848b65a305f7dad4d
|
refs/heads/master
| 2020-05-23T15:44:53.226814
| 2019-07-01T12:26:47
| 2019-07-01T12:26:47
| 186,833,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 678
|
r
|
ui.R
|
library('shiny')
library('leaflet')
ui <- fluidPage(
titlePanel("Car Accidents in Russia"),
sidebarPanel(sliderInput("userdate","Car accidents dates:",
min=as.Date("2018-01-01","%Y-%m-%d"),
max=as.Date("2018-11-01","%Y-%m-%d"),
value=c(as.Date("2018-02-01"),as.Date("2018-10-01")),
timeFormat="%Y-%m-%d"),
uiOutput("regionSelector")
),
mainPanel(
#this will create a space for us to display our map
leafletOutput(outputId = "mymap")
)
)
|
9e9cf79f03c886e5d908af12f8630d7618857bb4
|
dd25470dd810951426776e9f332a571270a7e7e4
|
/server.R
|
a95dfe7ba0233ca3eb4066356131b1b6993b17f9
|
[] |
no_license
|
scottbedwell/shiny-proj
|
238e89d4d02a19a2d08a85462c826af72e777450
|
d767a83741642a6a0350dd9fae6593eaac38604d
|
refs/heads/master
| 2021-01-01T17:52:30.082243
| 2014-12-21T20:37:58
| 2014-12-21T20:37:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 501
|
r
|
server.R
|
library(UsingR)
library(ggplot2)
library(caret)
data(father.son)
#with(father.son, plot(fheight,sheight))
qplot(fheight,sheight, data=father.son) + geom_smooth(method = "lm", color = "red")
#modFit <- lm(sheight~.,data=father.son)
modFit <- train(sheight~., data=father.son, method="lm")
summary(modFit)
shinyServer(function(input, output) {
output$oFatherHeight <- renderPrint({input$fatherHeight})
output$oSonHeight <- renderPrint({predict(modFit,data.frame(fheight=input$fatherHeight))})
})
|
a280b1a2cb6ae98d0acbe13afc08ef2b15037cc3
|
5c427a0dff846e42a83c33d0251280b149eaa991
|
/maintenance/wiki-schema/wiki_col_lifetime.R
|
6a552e05af5d97fcf6db537e3e255ff2be88210c
|
[] |
no_license
|
montahdaya/ESEUR-code-data
|
27c4bb3f719565125b57998f408b1a11b1ede041
|
f9280e23c397807aa3505135423797dad1acf09a
|
refs/heads/master
| 2020-05-29T08:55:40.168842
| 2017-01-29T22:33:42
| 2017-01-29T22:33:42
| 82,587,420
| 1
| 0
| null | 2017-02-20T18:07:07
| 2017-02-20T18:07:07
| null |
UTF-8
|
R
| false
| false
| 918
|
r
|
wiki_col_lifetime.R
|
#
# wiki_col_lifetime.R, 16 May 14
#
# Data from:
# Schema evolution in wikipedia toward a Web Information System Benchmark
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("survival")
col_life=read.csv(paste0(ESEUR_dir, "maintenance/wiki-schema/tabcol_life.csv.xz"), as.is=TRUE)
# cur_release,days_difference,days_since_start
ver_days=read.csv(paste0(ESEUR_dir, "maintenance/wiki-schema/ver-date-diff.csv.xz"), as.is=TRUE)
ver_surv=Surv(col_life$last_v-col_life$first_v, event=col_life$last_v != 280)
ver_mod=survfit(ver_surv ~ 1)
plot(ver_mod, col=point_col,
xlab="Versions since first release", ylab="Survival")
# day_surv=Surv(ver_days$days_since_start[col_life$last_v]-ver_days$days_since_start[col_life$first_v], event=col_life$last_v != 280)
# day_mod=survfit(day_surv ~ 1)
# plot(day_mod,
# xlab="Days since first release", ylab="Survival")
|
eb63557cffc67229b1bc0f37325fa0946744c521
|
ff12ace2203836c526198b1460718caba7993834
|
/R/method-preview_.R
|
dadf5d58e6b54df1eb24e1a54c0211819137f74e
|
[] |
no_license
|
abresler/PivotalR
|
55abbeeeed82e858e15f4abe486bfde562d9108a
|
8a14875159931883b01d223ae24a61764b751001
|
refs/heads/master
| 2021-01-18T19:37:43.497184
| 2013-08-04T19:44:23
| 2013-08-04T19:44:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,984
|
r
|
method-preview_.R
|
## ------------------------------------------------------------------------
## Preview the object
## ------------------------------------------------------------------------
setGeneric (
"preview",
def = function (x, ...) standardGeneric("preview"),
signature = "x")
## ------------------------------------------------------------------------
.limit.str <- function (nrows)
{
if (is.null(nrows) || (is.character(nrows) && nrows == "all"))
limit.str <- ""
else if (is.numeric(nrows))
limit.str <- paste(" limit ", nrows, sep = "")
else
stop("nrows must be NULL, \"all\" or an integer!")
limit.str
}
## ------------------------------------------------------------------------
setMethod (
"preview",
signature (x = "db.table"),
def = function (x, nrows = 100, array = TRUE) {
warn.r <- getOption("warn")
options(warn = -1)
if (array) {
x <- .expand.array(x)
res <- .db.getQuery(paste("select * from (", content(x), ") s",
.limit.str(nrows), sep = ""),
conn.id(x))
} else
res <- .db.getQuery(paste("select * from ", content(x),
.limit.str(nrows), sep = ""),
conn.id(x))
options(warn = warn.r) # reset R warning level
res
})
## ------------------------------------------------------------------------
setMethod (
"preview",
signature (x = "db.view"),
def = function (x, nrows = 100, interactive = FALSE, array = TRUE) {
warn.r <- getOption("warn")
options(warn = -1)
if (interactive) {
cat(deparse(substitute(x)),
"points to a view in the database",
dbname(conn.id(x)),
"and it might take time to evaluate and extract a preview of it if the data is large!\n")
go <- .read.input("Do you really want to continue ? (Yes/No) : ",
c("yes", "y", "no", "n"))
if (go == "no" || go == "n") return
}
if (array) {
x <- .expand.array(x)
res <- .db.getQuery(paste("select * from (", content(x), ") s",
.limit.str(nrows), sep = ""),
conn.id(x))
} else
res <- .db.getQuery(paste("select * from ", content(x),
.limit.str(nrows), sep = ""),
conn.id(x))
options(warn = warn.r) # reset R warning level
res
})
## ------------------------------------------------------------------------
setMethod (
"preview",
signature (x = "db.Rquery"),
def = function (x, nrows = 100, interactive = FALSE, array = TRUE) {
msg.level <- .set.msg.level("panic", conn.id(x)) # suppress all messages
warn.r <- getOption("warn")
options(warn = -1)
if (interactive) {
cat(deparse(substitute(x)),
"is just a query in R and does not point to any object in the database",
dbname(conn.id(x)),
"and it might take time to evaluate and extract a preview of it if the data is large!\n")
go <- .read.input("Do you really want to continue ? (Yes/No) : ",
c("yes", "y", "no", "n"))
if (go == "no" || go == "n") return
}
if (array) x <- .expand.array(x)
res <- .db.getQuery(paste(content(x), .limit.str(nrows),
sep = ""), conn.id(x))
msg.level <- .set.msg.level(msg.level, conn.id(x)) # reset message level
options(warn = warn.r) # reset R warning level
if (length(names(x)) == 1 && x@.col.data_type == "array") {
if (gsub("int", "", x@.col.udt_name) != x@.col.udt_name)
res <- arraydb.to.arrayr(res[[1]], "integer")
else if (gsub("float", "", x@.col.udt_name) != x@.col.udt_name)
res <- arraydb.to.arrayr(res[[1]], "double")
else if (x@.col.udt_name %in% c("_bool"))
res <- arraydb.to.arrayr(res[[1]], "logical")
else
res <- arraydb.to.arrayr(res[[1]], "character")
if (dim(res)[1] == 1)
res <- as.vector(res)
}
return (res)
})
## ------------------------------------------------------------------------
setMethod (
"preview",
signature (x = "db.Rcrossprod"),
def = function (x, interactive = FALSE) {
msg.level <- .set.msg.level("panic", conn.id(x)) # suppress all messages
warn.r <- getOption("warn")
options(warn = -1)
if (interactive) {
cat(deparse(substitute(x)),
"is just a query in R and does not point to any object in the database",
dbname(conn.id(x)),
"and it might take time to evaluate and extract a preview of it if the data is large!\n")
go <- .read.input("Do you really want to continue ? (Yes/No) : ",
c("yes", "y", "no", "n"))
if (go == "no" || go == "n") return
}
res <- .db.getQuery(content(x), conn.id(x))
dims <- x@.dim
res <- arraydb.to.arrayr(res[1,1], "double")
res <- matrix(res, nrow = dims[1], ncol = dims[2])
msg.level <- .set.msg.level(msg.level, conn.id(x)) # reset message level
options(warn = warn.r) # reset R warning level
return (res)
})
## ------------------------------------------------------------------------
## same as preview
lookat <- function (x, nrows = 100, array = TRUE)
{
if (is(x, "db.table")) return (preview(x, nrows, array = array))
if (is(x, "db.Rcrossprod")) return (preview(x, FALSE))
preview(x, nrows, FALSE, array)
}
|
de4734838cee068bc7274e798d9e0e16185549d7
|
6b0235f498a3c02ff81631142aef02d5e2757544
|
/R/Consistency_checks.R
|
f653cf9fd7a46693f4043feb1108547d1b56f9e9
|
[] |
no_license
|
Isabella84/SECFISH
|
69c8bd8b15bd5d212632970f87debfc6d718d838
|
5d447efa36ad7347edf2aaa3e9b6e7ba38dc4902
|
refs/heads/master
| 2020-07-26T21:05:33.907985
| 2019-09-16T10:02:52
| 2019-09-16T10:02:52
| 208,765,622
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,759
|
r
|
Consistency_checks.R
|
#########################################################################################################
# SECFISH (Strengthening regional cooperation in the area of fisheries data collection #
# -Socio-economic data collection for fisheries, aquaculture and the processing industry at EU level) #
# Functions to identify correlations between costs and transversal variables by metier using #
# individual vessel data and for disaggregating variable costs from fleet segment to metier level #
# #
# Authors: Isabella Bitetto (COISPA), Loretta Malvarosa (NISEA), Maria Teresa Spedicato (COISPA), #
# Ralf Doering (THUENEN), Joerg Berkenhagen (THUENEN) #
# #
# #
# In case of use, the Authors should be cited. If you have any comments or suggestions please #
# contact the following e-mail address: bitetto@coispa.it #
# SECFISH is believed to be reliable. #
# However, we disclaim any implied warranty. #
# #
# July 2019 #
#########################################################################################################
# Comparison between the costs by fleet segment and the sum of the costs disaggregated by metier
Cons_check <- function(Costs_FS,Costs_MET,path=tempdir()) {
dir.create(file.path(path,"Consistency_checks"))
Costs_sum= aggregate(Costs_MET$value,by=list(Costs_MET$year, Costs_MET$Fleet_segment,Costs_MET$variable_name ),FUN="sum")
colnames(Costs_sum)=c("year","Fleet_segment","variable_name","Sum_costs_by_metier")
Merge=merge(Costs_sum,Costs_FS,by=c("year","Fleet_segment","variable_name") )[,c(1,2,3,4,7)]
colnames(Merge)=c("year","Fleet_segment","variable_name","Sum_costs_by_metier","Costs_by_fleet_segment")
Merge$DIFF= round((Merge$Sum_costs_by_metier - Merge$Costs_by_fleet_segment)/ Merge$Costs_by_fleet_segment*100,1)
#print(Merge)
write.table(Merge,file.path(path,"Consistency_checks","Consistency_checks.csv"),sep=";",row.names=F)
unlink(file.path(tempdir(),"Consistency_checks"),recursive=T)
}
|
2d5d1632ebcc5c8a1e6517d008c37eb6f03ed8d3
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/FlyingR/R/method_1.R
|
8044c0a911c75e363f70a000c7d0c9c6ba3242f3
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,008
|
r
|
method_1.R
|
# Method 1 on practical range calculation based on Breguets equations
#
# @author Brian Masinde
# @param bodyMass all up mass
# @param wingSpan wing span of bird in metres
# @param fatMass fat mass of bird
# @param ordo Passerine (1) or non-passerine (2)
# @param wingArea area of wing
# @param constants A list of re-definition of constants (i.e *airDensity*,
# *consume*, *enegry e*, *mechanical mce n*).
# @importFrom utils tail
# @return List with range (in km), constants used and fat fraction
# @include misc_functions.R lookup_table2.R
#
#' @importFrom utils tail
.breguet <- function(bodyMass, wingSpan, fatMass, ordo, wingArea, constants) {
##############################################################################
# fat fraction
fatFrac <- fatMass/bodyMass
# metabolic power ratio metPowRatio
metPowRatio <- .met.pow.ratio(constants, bodyMass, wingSpan, ordo)
# x1:ppcons/Aspect ratio + metPowRatio:mpratio check for Drag
# Aspect ratio = wingSpan^2 / wingArea
# drag is the effective drag force found by interpolation (table 2)
# add ppratio to metPowRatio and interpolate
# round off to 2 digits
table2 <- .gen.table2()
dFactor <-
sapply(round((
.prof.pow.ratio(ws = wingSpan, wa = wingArea, constants) + metPowRatio
),
2), .interpolate, table2)
##############################################################################
# Effective lift:drag ratio
# Disk area diskArea
diskArea <- 0.25 * pi * (wingSpan ^ 2)
# flat-plate area
flatPlateArea <- 0.00813 * (bodyMass ^ 0.666) * constants$bdc
# lift drag ratio at beginning of flight
liftDragRatio <- (dFactor / ((constants$ipf ^ 0.5) * constants$vcp)) *
((diskArea / flatPlateArea) ^ 0.5)
# increase by 10F%
liftDragRatio <- liftDragRatio + (liftDragRatio * (10 * fatFrac) / 100)
# range in kilometres
kmRange <-
((constants$fed * constants$mce) / constants$g) * liftDragRatio *
log(1 / (1 - fatFrac))/1000
return(round(kmRange, 1))
}
|
b90315dfce6b1703f54a63489bb0f6458c6d00ff
|
ce75af5efc0d9390447867fdf4ebb35dac155202
|
/man/MeanTLLandings.Rd
|
0e8468cd0a4df19cde7e3ce6414c22eaa02c752e
|
[] |
no_license
|
dempseydanielle/marindicators
|
eefd014489c5478f7762fa1c3753365f41402f96
|
2ba76adc8cf7f02bbf0e3a09b1990da8eef0a070
|
refs/heads/master
| 2020-06-16T01:04:52.572916
| 2020-05-12T15:41:12
| 2020-05-12T15:41:12
| 195,438,940
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,374
|
rd
|
MeanTLLandings.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MeanTLLandings.R
\name{meanTLLandings}
\alias{meanTLLandings}
\title{Calculates the Mean Trophic Level or Marine Trophic Index of fisheries
landings}
\usage{
meanTLLandings(land, TL.table, minTL = 0, years)
}
\arguments{
\item{land}{A dataframe of commercial landings data with columns \code{YEAR},
\code{ID}, \code{SPECIES} and \code{CATCH}. \code{YEAR} indicates the year
the landing was recorded, \code{ID} is an area code indicating where the
landing was recorded, \code{SPECIES} is a numeric code indicating the
species landed, and \code{CATCH} is the corresponding landed weight.}
\item{TL.table}{A dataframe with columns \code{SPECIES} and the corresponding
\code{TL_LAND} (trophic level). Entries in the \code{SPECIES} column should
be the unique values of species codes in \code{land} (or a subset thereof).
Other columns in \code{TL.table} are ignored.}
\item{minTL}{The minimum trophic level of species to include. Set \code{minTL
= 0} to calculate the mean trophic level of the landings; Set \code{minTL =
3.25} to calculate the marine trophic index. Default is \code{minTL = 0}.}
\item{years}{A vector of years for which to calculate indicator.}
}
\value{
Returns a dataframe with three columns: \code{ID}, \code{YEAR}, and if
\code{minTL = 0}: \code{MeanTL.Landings}, if \code{minTL = 3.25}:
\code{MTI.Landings}, or if \code{minTL} is a different value:
\code{MeanTL.Landings_minTL}.
If there are no observations in land for spatial scale \eqn{j} in year
\eqn{i}, indicator value is set to \code{NA}.
}
\description{
This function calculates the Mean Trophic Level or Marine Trophic
Index of fisheries landings for \eqn{j} areas and \eqn{i} years.
}
\details{
Mean trophic level of fisheries landings (\eqn{TL_{Land}}):
\deqn{TL_{Land} = \Sigma (TL_i*Y_i)/Y} where \eqn{TL_i} is the trophic level
of species \eqn{i}, \eqn{Y_i} is the landings of species \eqn{i}, and
\eqn{Y} is the total landings of all species. Trophic Level of individual
species can be estimated either through an Ecopath model or dietary
analysis, or taken from a global database such as Fishbase.
This indicator captures the average trophic level of the species exploited
in the fishery. In general, it reflects a transition from long-lived, high
trophic level, demersal fish toward short-lived, low trophic level pelagic
fish and invertebrates (Pauly et al., 1998).
The marine trophic index is calculated similarly to \eqn{TL_{Land}}, but
only includes species with trophic level greater than or equal to an
explicitly stated trophic level minTL. For instance, Pauly and Watson 2005
adopted a trophic level minTL of 3.25 to emphasize changes in the relative
abundance of higher trophic level fishes, and Shannon et al. 2014 used a
minTL of 4.0 to examine changes within the apex predator community. If used
in this way, this indicator highlights changes in the relative abundance of
the more threatened high-trophic level fishes (Pauly et al., 1998).
}
\examples{
# Compile data
data(land)
data(species.info)
# Calculate indicators
# Mean trophic level of landings
meanTLLandings(land, TL.table = species.info, minTL = 0, years = c(2014:2019))
# Marine trophic index
meanTLLandings(land, TL.table = species.info, minTL = 3.25, years = c(2014:2019))
}
\references{
Bundy A, Gomez C, Cook AM. 2017. Guidance framework for the
selection and evaluation of ecological indicators. Can. Tech. Rep. Fish.
Aquat. Sci. 3232: xii + 212 p.
Pauly D, Christensen V, Dalsgaard J, Froese R, Torres F. 1998. Fishing Down
Marine Food Webs. Science 279:860-863
Pauly D, Watson R. 2005. Background and interpretation of the Marine Trophic
Index as a measure of biodiversity. Philos Trans R Soc B Biol Sci 360:415
423
Shannon L, Coll M, Bundy A, Gascuel D, Heymans, JJ, Kleisner K, Lynam CP,
Piroddi C, Tam J, Travers-Trolet M and Shin Y. 2014. Trophic level-based
indicators to track fishing impacts across marine ecosystems. Marine Ecology
Progress Series, 512, pp.115-140.
}
\seealso{
Other fishing pressure indicators:
\code{\link{allPressure}()},
\code{\link{fishingPressure}()},
\code{\link{landings}()},
\code{\link{speciesRichness}()}
}
\author{
Danielle Dempsey, Adam Cook \email{Adam.Cook@dfo-mpo.gc.ca},
Catalina Gomez, Alida Bundy
}
\concept{fishing pressure indicators}
|
4462dc35f61856e0815d326da4202969e29f62a0
|
b8d806a3fbf69da270080af7e1d7553e764c26e5
|
/script.R
|
69ada0cc35c1469e88510fe304c354179f5e94e4
|
[] |
no_license
|
khaibaromari/time_check
|
748f5c7b751f0bf3853df45f698d673616cc2eb9
|
5e79a5fe93113f428d020372cbcc74bf107943d8
|
refs/heads/main
| 2023-06-11T00:27:03.248499
| 2021-06-29T09:34:35
| 2021-06-29T09:34:35
| 377,139,615
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,010
|
r
|
script.R
|
library(readxl)
source("functions/time_check.R")
# initializing the minimum and maximum interview duration (in minutes) to be flagged
time_min <- 15
time_max <- 60
# reading the raw data set
df <- read_xlsx("input/data_frame.xlsx")
# time check based on start and end time
time_checked_df <- time_check(df, time_min, time_max)
# time check based on both audit files and start and end time
audit_time_checked_df <- time_check_audit(df, x_uuid = "_uuid", time_min, time_max,audit_dir_path = "audit_files/", today = "date")
# check the elapsed time between each interview
elapsed_time_between_ints <- time_btwn_ints(df ,device_id = "deviceid", start_col = "start", end_col = "end", village_col = "village", same_village_threshold = 3, diff_village_threshold = 10)
# exporting the result
write.xlsx(time_checked_df, "output/time_checked_df.xlsx")
write.xlsx(audit_time_checked_df, "output/audit_time_checked_df.xlsx")
write.xlsx(elapsed_time_between_ints, "output/elapsed_time_between_ints_checked_df.xlsx")
|
09378d98f9931bbcab8b94f9d38de6d7e850b0f4
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/hht/R/rendering_and_plotting.R
|
b0dcfca97d4f16a0f421b6c6101f91f96db35dda
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 42,378
|
r
|
rendering_and_plotting.R
|
# Plotting and data analysis functions
FTGramImage <- function(sig, dt, ft, time.span = NULL, freq.span = NULL, amp.span = NULL, blur = NULL, taper = 0.05, scaling = "none", grid=TRUE, colorbar=TRUE, backcol=c(0, 0, 0), colormap=NULL, pretty=FALSE, ...)
{
#Plots a Fourier spectrogram
#INPUTS
# SIG is the signal to analyze
# DT is the sample rate (must be constant)
# FT is the Fourier transform input parameters, adopted from Jonathan Lees' code in RSEIS
# FT$NFFT is the fft length
# FT$NS is the number of samples in a window
# FT$NOV is the number of samples to overlap
# TIME.SPAN is the time span to plot, NULL plots everything
# FREQ.SPAN is the frequency span to plot (<=max frequency in spectrogram), NULL plots everything up to the Nyquist frequency
# AMP.SPAN is the amplitude range to plot. NULL plots everything.
# BLUR is a list of parameters for a Gaussian image smoothing kernel, if desired. If not null then
# BLUR$SIGMA - Standard deviation of Gaussian kernel. If a 2 element vector, then the kernel has independent coordinate
# BLUR$BLEED - Whether to allow blur to bleed out of the domain of the image
# TAPER is the cosine taper factor (amount of the signal to apply the taper to, must be < 0.5)
# SCALING determines whether to apply a logarithmic (log), or square root (sqrt) scaling to the amplitude data
# GRID is a boolean asking whether to display grid lines
# COLORBAR is a boolean asking whether to plot an amplitude colorbar
# BACKCOL is a 3 element vector of RGB values for the background of the spectrogram, based on a 0 to 255 scale: [red, green, blue]
# COLORMAP is an R palette object determining how the spectrogram colors should look
# PRETTY is a boolean asking whether to adjust axis labels so that they're pretty (TRUE) or give the exactly specified time and frequency intervals (FALSE)
# OPTIONAL PARAMETERS
# TRACE.FORMAT is the format of the trace minima and maxima in sprintf format
# IMG.X.FORMAT is the format of the X axis labels of the image in sprintf format
# IMG.Y.FORMAT is the format of the Y axis labels of the image in sprintf format
# COLORBAR.FORMAT is the format of the colorbar labels in sprintf format
# CEX.LAB is the font size of the image axis labels
# CEX.COLORBAR is the font size of the colorbar
# CEX.TRACE is the font size of the trace axis labels
# IMG.X.LAB is the X - axis label of the image, it defaults to "time"
# IMG.Y.LAB is the Y - axis label of the image, it defaults to "frequency"
# MAIN gives the figure a title.
#OUTPUTS
# IMG is the spectrogram
opts = list(...)
if(!"img.x.lab" %in% names(opts))
{
opts$img.x.lab = "time"
}
if(!"img.y.lab" %in% names(opts))
{
opts$img.y.lab = "frequency"
}
if(is.null(time.span))
{
time.span=c(dt, length(sig) * dt)
}
if(time.span[2] > length(sig) * dt)
{
time.span[2]= length(sig) * dt
warning("The requested spectrogram is longer than the actual signal.")
}
if(is.null(freq.span))
{
freq.span=c(0, 1/(dt * 2))
}
if(freq.span[2] > 1 / (dt * 2))
{
freq.span[2] = 1 / (dt * 2)
warning("Requested maximum frequency is higher than the Nyquist frequency.")
}
sig = sig[(time.span[1]/dt):(time.span[2]/dt)]
tt = (seq_len(length(sig)) * dt) + time.span[1]
ev=EvolutiveFFT(sig, dt, ft, freq.span, taper) #Calculate the Fourier spectrogram
ev$tt = tt
if(is.null(amp.span))
{
amp.span = c(min(ev$z[ev$z>-Inf]), max(ev$z[ev$z<Inf]))
}
img.xvec = ev$x + time.span[1]
img.yvec = seq(freq.span[1], freq.span[2], by = ev$y[2] - ev$y[1])
img = list(z = array(0, dim = c(length(img.xvec), length(img.yvec))),
x = img.xvec, y = img.yvec)
img$z[,img.yvec >= min(ev$y) & img.yvec <= max(ev$y)] = ev$z[,ev$y >= freq.span[1] & ev$y <= freq.span[2]]
if(scaling == "ln") #Scale by natural log
{
img$z[img$z == 0] = NA
img$z = log(img$z)
amp.span <- log(amp.span)
}
if(scaling == "log") #Log 10 scale
{
img$z[img$z == 0] = NA
img$z = log10(img$z)
amp.span <- log10(amp.span)
}
if(scaling == "sqrt") #Take the square root
{
img$z = sqrt(img$z)
amp.span <- sqrt(amp.span)
}
trace = list()
trace$sig = ev$original.signal[ev$tt >= time.span[1] & ev$tt <= time.span[2]]
trace$tt = ev$tt[ev$tt >= time.span[1] & ev$tt <= time.span[2]]
window = ft$ns / (length(tt[tt >= min(img$x) & tt <= max(img$x)]))
HHTPackagePlotter(img, trace, amp.span, blur = blur, opts$img.x.lab, opts$img.y.lab, window = window, colormap = colormap, backcol = backcol, pretty = pretty, grid = grid, colorbar = colorbar, opts = opts)
invisible(img)
}
HHRender <- function(hres, dt, dfreq, time.span = NULL, freq.span = NULL, scaling = "none", combine.imfs = TRUE, verbose = TRUE)
{
#Renders a spectrogram of EMD or Ensemble EMD (EEMD) results.
#INPUTS
# HRES is a matrix of data generated by EEMD.COMPILE or the output of HHTRANSFORM
# it represents a set on all time/frequency/amplitude points from the given EEMD run
# DT is the time resolution of the spectrogram. Currently, if there is a hres$dt field, DT must be greater than or equal to hres$dt.
# this prevents subsample resolution.
# DFREQ is the frequency resolution of the spectrogram
# TIME.SPAN is the portion of the signal to include. NULL means the whole signal.
# FREQ.SPAN is the frequency range to calculate the spectrum over c(MIN, MAX). NULL means capture the full frequency spectrum of the signal.
# SCALING determines whether to plot frequency as log 10 ("log") or linear ("none")
# COMBINE.IMFS will combine all the IMFs into one image, saving space and time for HHGramImage if TRUE. If FALSE, keep them separate for individual plotting options for HHGramImage.
# VERBOSE prints out status messages (i.e. IMF 1 COMPLETE!)
#OUTPUTS
# HGRAM is a spectrogram matrix ready to be plotted by HHGRAM.IMAGE
#Danny Bowman
#UNC Chapel Hill
hgram = hres
if(scaling == "log")
{
hres$hinstfreq = log10(hres$hinstfreq)
}
else if (scaling != "none")
{
warning("Did not recognize scaling request \"", scaling, ".\" Reverting to linear frequency (scaling = \"none\").")
}
#Deal with logarithms of 0
hres$hamp[hres$hinstfreq == -Inf] = 0
hres$hinstfreq[hres$hinstfreq == -Inf] = 0
if(is.null(freq.span))
{
freq.span = c(min(hres$hinstfreq), max(hres$hinstfreq))
}
if(!"trials" %in% names(hres))
{
hres$trials=1
hres$hinstfreq = array(hres$hinstfreq, dim = c(dim(hres$hinstfreq), 1))
hres$hamp = array(hres$hamp, dim = c(dim(hres$hamp), 1))
}
if("dt" %in% names(hres))
{
if(hres$dt > dt) #We don't want to have to interpolate between samples
{
warning(paste("The time resolution", sprintf("%.2e", dt), "is lower than the sample rate", sprintf("%.2e", hres$dt), "of the time series. This may introduce time gaps in the spectrogram."))
}
if("tt" %in% names(hres))
{
warning("Input data has both DT (sample rate) and TT (sample times) components. Component TT will be used to calculate the spectrogram")
hgram$tt = hres$tt
}
else
{
hgram$tt = seq_len(length(hres$original.signal)) * hres$dt
}
}
if(is.null(time.span))
{
time.span = c(min(hgram$tt), max(hgram$tt))
}
if(!(("tt" %in% names(hres)) | ("dt" %in% names(hres))))
{
warning("Neither DT (sample rate) nor TT (sample times) were specified in the input data. Assuming DT is 1...")
hgram$tt = seq_len(length(hres$original.signal))
}
if(time.span[2]>max(hgram$tt))
{
time.span[2]=max(hgram$tt)
warning("Requested time window is longer than the actual signal.")
}
t.ind = which(hgram$tt >= time.span[1] & hgram$tt <= time.span[2])
hgram$tt = hgram$tt[t.ind]
hres$hinstfreq = array(hres$hinstfreq[t.ind,,], dim = c(length(hgram$tt), hres$nimf, hres$trials))
hres$hamp = array(hres$hamp[t.ind,,], dim = c(length(hgram$tt), hres$nimf, hres$trials))
hres$original.signal = hres$original.signal[t.ind]
grid = list()
grid$x = hgram$tt
grid$y = seq(from = freq.span[1], to = freq.span[2] + dfreq, by = dfreq)
if(combine.imfs)
{
imf.dim = 1
}
else{
imf.dim = hres$nimf
}
hgram$z=array(0,dim=c(length(grid$x),length(grid$y), imf.dim))
hgram$cluster=hgram$z #Shows how many times a given grid node has data.
for(i in seq(hres$nimf))
{
x = array(c(rep(hgram$tt,hres$trials), hres$hinstfreq[,i,]), dim = c(length(hgram$tt)*hres$trials, 2))
imf.img = fields::as.image(hres$hamp[,i,], grid = grid, x = x)
imf.img$z[is.na(imf.img$z)] = 0
imf.img$weights[is.na(imf.img$weights)] = 0
if(combine.imfs)
{
hgram$z[,,1] = hgram$z[,,1] + imf.img$z
hgram$cluster[,,1] = hgram$cluster[,,1] + imf.img$weights
}
else{
hgram$z[,,i] = imf.img$z
hgram$cluster[,,i] = imf.img$weights
}
if(verbose)
{
print(paste("IMF", i, "COMPLETE!"))
}
}
hgram$combine.imfs = combine.imfs
hgram$hinstfreq = hres$hinstfreq
hgram$hamp = hres$hamp
hgram$original.signal = hres$original.signal
hgram$x = imf.img$x
hgram$y = imf.img$y
hgram$dfreq=dfreq
hgram$dt=hres$dt
hgram$scaling = scaling
invisible(hgram) #Return the spectrogram structure.
}
HHSpectrum <- function(hres, dfreq, freq.span = NULL, time.span = NULL, scaling = "none", verbose = TRUE)
{
#Calculate the Hilbert spectrogram of a signal contained in HRES (returned by HHTRANSFORM or EEMD.COMPILE)
#INPUTS
# HRES is a matrix of data generated by EEMD.COMPILE or the output of HHTRANSFORM
# it represents a set on all time/frequency/amplitude points from the given EEMD run
# DFREQ is the frequency resolution of the spectrogram
# FREQ.SPAN is the frequency range to calculate the spectrum over c(MIN, MAX). NULL means capture the full frequency spectrum of the signal.
# TIME.SPAN is the time span to calculate the spectrum over c(MIN, MAX). NULL means use the entire signal
# SCALING determines whether to calculate frequency as log 10 ("log") or linear ("none")
# VERBOSE prints out status messages (i.e. IMF 1 COMPLETE!)
#OUTPUTS
# HSPEC is the Hilbert spectrum of the signal, separated by IMF.
if(is.null(time.span))
{
dt = max(hres$tt) - min(hres$tt)
}
else {
dt = time.span[2] - time.span[1]
}
hgram = HHRender(hres, dt, dfreq, freq.span = freq.span, time.span = time.span, scaling = scaling, combine.imfs = FALSE, verbose = TRUE)
amps = array(0, dim = dim(hgram$z)[2:3])
for(i in seq(hres$nimf))
{
amps[, i] = apply(hgram$z[, , i], 2, sum)
}
hspec = list(amplitude = amps, frequency = hgram$y, original.signal = hgram$original.signal, dt = dt, tt=hres$tt, dfreq = dfreq)
invisible(hspec)
}
HHGramImage <- function(hgram,time.span = NULL,freq.span = NULL, amp.span = NULL, blur = NULL, clustergram = FALSE, cluster.span=NULL, imf.list = NULL, fit.line = FALSE, scaling = "none", grid=TRUE, colorbar=TRUE, backcol=c(0, 0, 0), colormap=NULL, pretty=FALSE, ...)
{
#Plots a spectrogram of the EEMD processed signal as an image.
#INPUTS
# HGRAM is the subsetted spectrogram from HH.RENDER.
# HGRAM$X is time
# HGRAM$Y is frequency
# HGRAM$Z is amplitude normalized to trials
# HGRAM$CLUSTER is a matrix containing integer values corresponding to the number of times a signal was recorded in a given spectrogram cell during EEMD
# The more often the signal is recorded, the more likely it is that the signal is real and not noise
# HGRAM$TRIALS is the number of times EEMD was run to generate signal
# HGRAM$ORIGINAL.SIGNAL is the original seismogram (without added noise)
# HGRAM$TT is the sample times
# TIME.SPAN is the time span to plot, NULL plots everything
# FREQ.SPAN is the frequency span to plot (<=max frequency in spectrogram), NULL plots everything
# AMP.SPAN is the amplitude span to plot, everything below is set to black, everything above is set to max color, NULL scales to range in signal
# BLUR is a list of parameters for a Gaussian image smoothing kernel, if desired. If not null then
# BLUR$SIGMA - Standard deviation of Gaussian kernel. If a 2 element vector, then the kernel has independent coordinate
# BLUR$BLEED - Whether to allow blur to bleed out of the domain of the image
# CLUSTERGRAM tells the code to plot the signal amplitude (FALSE) or the number of times data occupies a given pixel (TRUE).
# CLUSTER.SPAN plots only the parts of the signal that have a certain number of data points per pixel [AT LEAST, AT MOST] this only applies to EEMD with multiple trials.
# IMF.LIST is a list of IMFs to plot on the spectrogram. If NULL, plot all IMFs.
# IMF.SUM can be set to show the sum of IMFs shown in the spectrogram plotted as a red line against the original trace
# SCALING determines whether to apply a logarithmic (log), or square root (sqrt) scaling to the amplitude data, default is "none"
# GRID is a boolean asking whether to display grid lines
# COLORBAR is a boolean asking whether to plot an amplitude colorbar
# BACKCOL is a 3 element vector of RGB values for the background of the spectrogram, based on a 0 to 255 scale: [red, green, blue]
# COLORMAP is an R palette object determining how the spectrogram colors should look
# PRETTY is a boolean asking whether to adjust axis labels so that they're pretty (TRUE) or give the exactly specified time and frequency intervals (FALSE)
#OPTIONAL PARAMETERS
# TRACE.FORMAT is the format of the trace minima and maxima in sprintf format
# IMG.X.FORMAT is the format of the X axis labels of the image in sprintf format
# IMG.Y.FORMAT is the format of the Y axis labels of the image in sprintf format
# COLORBAR.FORMAT is the format of the colorbar labels in sprintf format
# CEX.LAB is the font size of the image axis labels
# CEX.COLORBAR is the font size of the colorbar
# CEX.TRACE is the font size of the trace axis labels
# IMG.X.LAB is the X - axis label of the image, it defaults to "time"
# IMG.Y.LAB is the Y - axis label of the image, it defaults to "frequency"
#OUTPUTS
# IMG is the spectrogram returned as an image
opts = list(...)
if(!"img.x.lab" %in% names(opts))
{
opts$img.x.lab = "time"
}
if(!"img.y.lab" %in% names(opts))
{
opts$img.y.lab = "frequency"
}
#Subset by IMFs
if(is.null(imf.list))
{
if(hgram$combine.imfs)
{
imf.list = seq(1)
}
else{
imf.list = seq(hgram$nimf)
}
}
else
{
if(hgram$combine.imfs)
{
warning("The IMFs were combined when HHRender was run on this data (combine.imfs = TRUE). Individual IMF spectrograms cannot be plotted - the image you see is the combined IMFs. Rerun HHRender with combined.imfs = FALSE if you want the ability to plot single IMFs using HHGramImage.")
imf.list = seq(1)
}
if(max(imf.list) > hgram$nimf)
{
warning("Requested more IMFs than are present in the actual EMD results!")
imf.list = imf.list[imf.list < hgram$nimf]
}
}
if(is.null(time.span))
{
time.span=c(min(hgram$tt), max(hgram$tt))
}
if(time.span[2]>max(hgram$tt))
{
time.span[2]=max(hgram$tt)
warning("Requested time window is longer than the actual signal.")
}
if(is.null(freq.span))
{
freq.span=c(min(hgram$y), max(hgram$y))
}
if(freq.span[2]>max(hgram$hinstfreq))
{
freq.span[2]=max(hgram$y)
warning("Requested frequency window is higher than maximum frequency in the spectrogram.")
}
if(fit.line)
{
if(hgram$combine.imfs)
{
warning("User requested the IMF.SUM option but the spectrogram data indicates that the IMFs were combined when HHRender was run (combine.imfs = TRUE). The IMF sum will still be plotted but the spectrogram will display all the IMFs in the signal.")
}
fit.line = rowSums(hgram$averaged.imfs[hgram$x >= time.span[1] & hgram$x <= time.span[2], imf.list])
}
else
{
fit.line = NULL
}
img = list()
img$x = hgram$x[hgram$x >= time.span[1] & hgram$x <= time.span[2]]
img$y = hgram$y[hgram$y >= freq.span[1] & hgram$y <= freq.span[2]]
if(hgram$combine.imfs)
{
cluster = hgram$cluster[hgram$x >= time.span[1] & hgram$x <= time.span[2], hgram$y >= freq.span[1] & hgram$y <= freq.span[2],imf.list]
}
else{
cluster = apply(hgram$cluster[hgram$x >= time.span[1] & hgram$x <= time.span[2], hgram$y >= freq.span[1] & hgram$y <= freq.span[2],imf.list], c(1, 2), sum)
}
#Determine if we are plotting clustering or amplitudes
if(clustergram)
{
img$z = cluster
}
else
{
if(hgram$combine.imfs)
{
img$z = hgram$z[hgram$x >= time.span[1] & hgram$x <= time.span[2], hgram$y >= freq.span[1] & hgram$y <= freq.span[2],imf.list]
}
else
{
img$z = apply(hgram$z[hgram$x >= time.span[1] & hgram$x <= time.span[2], hgram$y >= freq.span[1] & hgram$y <= freq.span[2],imf.list], c(1, 2), sum)
}
}
if(!is.null(cluster.span))
{
img$z[cluster <= cluster.span[1] | cluster >= cluster.span[2]] = 0
}
if(is.null(amp.span))
{
if(scaling == "log")
{
amp.span = c(min(img$z[img$z>0]), max(img$z))
}
else
{
amp.span = c(min(img$z), max(img$z))
}
}
if(scaling == "log") #Log 10 scale
{
img$z = log10(img$z)
amp.span = log10(amp.span)
}
if(scaling == "sqrt") #Take the square root
{
img$z = sqrt(img$z)
amp.span = sqrt(amp.span)
}
trace = list()
trace$sig = hgram$original.signal[hgram$tt >= time.span[1] & hgram$tt <= time.span[2]]
trace$tt = hgram$tt[hgram$tt >= time.span[1] & hgram$tt <= time.span[2]]
HHTPackagePlotter(img, trace, amp.span, opts$img.x.lab, opts$img.y.lab, blur = blur, fit.line = fit.line, colormap = colormap, backcol = backcol, pretty = pretty, grid = grid, colorbar = colorbar, opts = opts)
invisible(img)
}
HHSpecPlot <- function(hspec, freq.span = NULL, scaling = "none", imf.list = NULL, show.total = TRUE, show.fourier = FALSE, scale.fourier = FALSE, show.imfs = FALSE, legend = TRUE, ...)
{
#Plot the Hilbert spectrum, optionally as individual IMFs, optionally with the scaled Fourier spectrum for comparison
#INPUTS
# HSPEC is the Hilbert spectrogram returned by HHSPECTRUM
# FREQ.SPAN is the frequencies to plot, NULL means plot everything
# SCALING whether to take the base 10 logarithm of amplitude ("log") or square root of amplitude ("sqrt") or do nothing ("none")
# IMF.LIST means only include these IMFS, NULL includes all of them
# SHOW.TOTAL means show the sum of the IMF Hilbert spectra
# SHOW.IMFS means plot individual IMFs
# SHOW.FOURIER determines whether you want a Fourier spectrum for comparison (TRUE) or not (FALSE)
# SCALE.FOURIER scales the Fourier spectrum line to the Hilbert spectrum line if TRUE. Defaults to FALSE.
# LEGEND asks whether to plot a legend. Additional options will place the legend where you want it.
#ADDITIONAL OPTIONS
# XLAB is the X axis label
# YLAB is the Y axis label
# LEGEND.LOCATION determines where to put the legend.
# TOTAL.COL is the color of the ensemble Hilbert spectrum
# TOTAL.LWD is the line weight of the ensemble Hilbert spectrogram
# LOTAL.LTY is the line type of the ensemble Hilbert spectrogram
# IMF.COLS sets the color of each IMF - a vector with length IMF.LIST
# IMF.LWD is the line weight for the IMFs as a vector with length IMF.LIST
# IMF.LTY is the line type for the IMFs as a vector with length IMF.LIST
# FOURIER.COL is the color of the Fourier spectrum line
# FOURIER.LTY is the line type of the Fourier spectrum line
# FOURIER.LWD is the line weight of the Fourier spectrum line
if(!(show.total | show.imfs | show.fourier))
{
stop("Nothing to plot! Set at least one of SHOW.TOTAL, SHOW.IMFS, or SHOW.FOURIER to TRUE.")
}
opts = list(...)
if(!(scaling == "log" | scaling == "sqrt" | scaling == "none"))
{
warning(paste("Did not recognize requested scaling: \"", scaling, "\". Options are \"log\" (base 10 logarithm), \"sqrt\" (square root), or \"none\""))
scaling = "none"
}
if(is.null(freq.span))
{
freq.span = c(0, max(hspec$frequency))
}
hspec$amplitude = hspec$amplitude[hspec$frequency >= freq.span[1] & hspec$frequency<= freq.span[2],]
hspec$frequency = hspec$frequency[hspec$frequency >= freq.span[1] & hspec$frequency<= freq.span[2]]
if(!"legend.location" %in% names(opts) & legend)
{
opts$legend.location = "topright"
}
if(!"total.col" %in% names(opts))
{
opts$total.col = "red"
}
if(!"total.lwd" %in% names(opts))
{
opts$total.lwd = 1
}
if(!"total.lty" %in% names(opts))
{
opts$total.lty = 1
}
if(!"xlab" %in% names(opts))
{
opts$xlab = "frequency"
}
if(!"ylab" %in% names(opts))
{
if(scaling != "none")
{
opts$ylab = paste(scaling, "amplitude")
}
else
{
opts$ylab = "amplitude"
}
}
if(is.null(imf.list))
{
imf.list = seq(dim(hspec$amplitude)[2])
}
if(!"imf.cols" %in% names(opts))
{
if(show.total)
{
opts$imf.cols = rainbow(length(imf.list), start = 1/6, end = 5/6)
}
else
{
opts$imf.cols = rainbow(length(imf.list), start = 0, end = 5/6)
}
}
if(!"imf.lwd" %in% names(opts))
{
opts$imf.lwd = rep(1, length(imf.list))
}
if(!"imf.lty" %in% names(opts))
{
opts$imf.lty = rep(1, length(imf.list))
}
if(!"fourier.col" %in% names(opts))
{
opts$fourier.col = "black"
}
if(!"fourier.lty" %in% names(opts))
{
opts$fourier.lty = 1
}
if(!"fourier.lwd" %in% names(opts))
{
opts$fourier.lwd = 1
}
if(!"main" %in% names(opts))
{
opts$main = ""
}
pmin = Inf
pmax = -Inf
if(show.imfs)
{
imf.amp = hspec$amplitude[, imf.list]
pmin = min(imf.amp[imf.amp>0])
pmax = max(imf.amp)
}
if(show.total)
{
if(length(imf.list)>1)
{
total.amp = apply(hspec$amplitude[,imf.list], 1, sum)
}
else
{
total.amp = hspec$amplitude[,imf.list]
}
if(max(total.amp) > pmax)
{
pmax = max(total.amp[total.amp > 0])
}
if(min(total.amp) < pmin)
{
pmin = min(total.amp[total.amp > 0])
}
}
if(show.fourier)
{
fourier.freqs = seq(0, 1/(mean(diff(hspec$tt)) * 2), length.out = length(hspec$original.signal)-1)
fspec = Mod(fft(hspec$original.signal - mean(hspec$original.signal)))[1:length(hspec$original.signal)/2][fourier.freqs >= freq.span[1] & fourier.freqs <= freq.span[2]]
if(scale.fourier)
{
fspec = fspec * pmax/max(fspec)
}
if(max(fspec) > pmax)
{
pmax = max(fspec)
}
if(min(fspec[fspec > 0]) < pmin)
{
pmin = min(fspec[fspec > 0])
}
}
if(scaling == "log")
{
pmax = log10(pmax)
pmin = log10(pmin)
}
if(scaling == "sqrt")
{
pmax = sqrt(pmax)
pmin = sqrt(pmin)
}
plot(c(min(hspec$frequency), max(hspec$frequency)), c(pmin, pmax), type = "n", xlab = opts$xlab, ylab = opts$ylab, main = opts$main)
if(show.imfs)
{
for(k in seq_len(length(imf.list)))
{
amp = imf.amp[,k]
if(scaling == "log")
{
amp = log10(amp)
}
if(scaling == "sqrt")
{
amp = sqrt(amp)
}
lines(hspec$frequency[amp > -Inf], amp[amp > -Inf], col = opts$imf.cols[k], lwd = opts$imf.lwd[k], lty = opts$imf.lty[k])
}
}
if(show.total)
{
if(scaling == "log")
{
total.amp = log10(total.amp)
}
if(scaling == "sqrt")
{
total.amp = sqrt(total.amp)
}
lines(hspec$frequency, total.amp, lwd = opts$total.lwd, lty = opts$total.lty, col = opts$total.col)
}
if(show.fourier)
{
if(scaling == "log")
{
fspec = log10(fspec)
}
if(scaling == "sqrt")
{
fspec = sqrt(fspec)
}
lines(fourier.freqs[fourier.freqs >= freq.span[1] & fourier.freqs <= freq.span[2]], fspec,
lty = opts$fourier.lty, lwd = opts$fourier.lwd, col = opts$fourier.col)
}
if(legend)
{
legend.labs = c()
legend.cols = c()
legend.lty = c()
legend.lwd = c()
if(show.total)
{
legend.labs = c(legend.labs, "Total Hilbert")
legend.cols = c(legend.cols, opts$total.col)
legend.lty = c(legend.lty, opts$total.lty)
legend.lwd = c(legend.lwd, opts$total.lwd)
}
if(show.imfs)
{
legend.labs = c(legend.labs, paste(rep("IMF", length(imf.list)), imf.list))
legend.cols = c(legend.cols, opts$imf.cols)
legend.lty = c(legend.lty, opts$imf.lty)
legend.lwd = c(legend.lwd, opts$imf.lwd)
}
if(show.fourier)
{
legend.labs = c(legend.labs, "Fourier")
legend.cols = c(legend.cols, opts$fourier.col)
legend.lty = c(legend.lty, opts$fourier.lty[1])
legend.lwd = c(legend.lwd, opts$fourier.lwd[1])
}
legend(opts$legend.location, legend = legend.labs, lty = legend.lty, lwd = legend.lwd, col = legend.cols)
}
}
HHTPackagePlotter <- function(img, trace, amp.span, img.x.lab, img.y.lab, blur = NULL, fit.line = NULL, window = NULL, colormap = NULL, backcol = c(0, 0, 0), pretty = FALSE, grid = TRUE, colorbar = TRUE, opts = list())
{
#Plots images and time series for Hilbert spectra, Fourier spectra, and cluster analysis.
#This function is internal to the package and users should not be calling it.
#
#INPUTS
# IMG is the image portion of the figure
# IMG$X is the columns
# IMG$Y is the rows
# IMG$Z is the image data
# TRACE is the time series to plot at the top of the figure
# TRACE$SIG is the time series
# TRACE$TT is the time of each sample
# AMP.SPAN are the maximum and minimum values of the image.
# IMG.X.LAB is the label of the X axis of the image
# IMG.Y.LAB is the label of the Y axis of the image
# BLUR is a list of parameters for a Gaussian image smoothing kernel, if desired. If not null then
# BLUR$SIGMA - Standard deviation of Gaussian kernel. If a 2 element vector, then the kernel has independent coordinate
# BLUR$BLEED - Whether to allow blur to bleed out of the domain of the image
# IMF.SUM is a red line on the time series plot showing the sum of the plotted IMFs, if available
# IMF.SUM$SIG is the summed IMFS
# IMF.SUM$TT is the time of each sample. We assume all IMFS have equivalent timing.
# WINDOW is the length of the Fourier window, if applicable
# COLORMAP is the colormap to use for the image
# BACKCOL is the background color of the image
# PRETTY allows for nice axis labels
# GRID draws a grid on the image
# COLORBAR puts a colorbar corresponding to the range of values on the image
#
# OPTS OTHER POSSIBLE OPTIONS
# OPTS$TRACE.FORMAT is the format of the trace minima and maxima in sprintf format
# OPTS$IMG.X.FORMAT is the format of the X axis labels of the image in sprintf format
# OPTS$IMG.Y.FORMAT is the format of the Y axis labels of the image in sprintf format
# OPTS$COLORBAR.FORMAT is the format of the colorbar labels in sprintf format
# OPTS$CEX.LAB is the font size of the image axis labels
# OPTS$CEX.COLORBAR is the font size of the colorbar
# OPTS$CEX.TRACE is the font size of the trace axis labels
# OPTS$TRACE.COL is the color of the trace
# OPTS$IMF.SUM.COL is the color of the IMF sums (if shown)
# OPTS$PRETTY.X.N is the number of pretty divisions on the X axis
# OPTS$PRETTY.Y.N is the number of pretty divisions on the Y axis
#Configure parameters
if(!"trace.format" %in% names(opts))
{
opts$trace.format = "%.1e"
}
if(!"img.x.format" %in% names(opts))
{
opts$img.x.format = "%.2f"
}
if(!"img.y.format" %in% names(opts))
{
opts$img.y.format = "%.2f"
}
if(!"colorbar.format" %in% names(opts))
{
opts$colorbar.format = "%.1e"
}
if(!"cex.main" %in% names(opts))
{
opts$cex.main = 1
}
if(!"cex.trace" %in% names(opts))
{
opts$cex.trace = opts$cex.main * 0.75
}
if(!"cex.colorbar" %in% names(opts))
{
opts$cex.colorbar = opts$cex.main * 0.75
}
if(!"cex.lab" %in% names(opts))
{
opts$cex.lab = opts$cex.main
}
if(!"fit.line.col" %in% names(opts))
{
opts$fit.line.col = "red"
}
if(!"trace.col" %in% names(opts))
{
opts$trace.col = "black"
}
if(!"pretty.x.n" %in% names(opts))
{
opts$pretty.x.n = 10
}
if(!"pretty.y.n" %in% names(opts))
{
opts$pretty.y.n = 5
}
if(pretty)
{ #Get nice divisions
pretty.x = pretty(img$x, n=opts$pretty.x.n)
pretty.y = pretty(img$y, n=opts$pretty.y.n)
#pretty.x = pretty.x[pretty.x <= max(img$x) & pretty.x >= min(img$x)]
#pretty.y = pretty.y[pretty.y <= max(img$y) & pretty.y >= min(img$y)]
if(!is.null(window))
{
window = window * ((max(img$x) - min(img$x))/(max(pretty.x) - min(pretty.x)))
}
img$z = img$z[img$x <= max(pretty.x) & img$x >= min(pretty.x), img$y <= max(pretty.y) & img$y >= min(pretty.y)]
img$x = img$x[img$x <= max(pretty.x) & img$x >= min(pretty.x)]
img$y = img$y[img$y <= max(pretty.y) & img$y >= min(pretty.y)]
img.x.labels=sprintf(opts$img.x.format, pretty.x)
img.y.labels=sprintf(opts$img.y.format, pretty.y)
trace$sig = trace$sig[trace$tt >= min(pretty.x) & trace$tt<= max(pretty.x)]
trace$tt = trace$tt[trace$tt >= min(pretty.x) & trace$tt<= max(pretty.x)]
cat("Adjusting Time and Frequency limits to nice looking numbers (the \"pretty\" option is currently set to TRUE)\n")
}
else
{
img.x.labels=sprintf(opts$img.x.format, seq(min(img$x), max(img$x), length.out = 10))
img.y.labels=sprintf(opts$img.y.format, seq(min(img$y), max(img$y), length.out=5))
}
if(is.null(colormap))
{
colormap=rainbow(500,start=0,end=5/6)
}
colorbins = length(colormap)
plot(c(-0.15,1),c(-0.15,1),type="n",axes=FALSE,xlab="", ylab="") # Set up main plot window
#Plot TRACE
sig = trace$sig - mean(trace$sig)
trace.y=0.75
trace.x=0
trace.yspan=0.10
trace.xspan=0.9
trace.at=seq(trace.y,trace.y+trace.yspan,length.out=2)
trace.labels=c(min(trace$sig), max(trace$sig))
trace.scale=trace.yspan/(max(sig)-min(sig))
tt.scale=trace.xspan/(max(trace$tt) - min(trace$tt))
axis(4,pos=trace.x+trace.xspan,at=trace.at, labels=c("",""), cex.axis=opts$cex.trace)
lines((trace$tt - min(trace$tt)) * tt.scale + trace.x, trace.y + (sig - min(sig)) * trace.scale, col = opts$trace.col)
if(!is.null(fit.line))
{
lines(((trace$tt - min(trace$tt))*tt.scale+trace.x), (trace.y + (fit.line - min(sig)) * trace.scale), col = opts$fit.line.col)
}
rect(trace.x, trace.y, trace.x+trace.xspan, trace.y+trace.yspan)
#Plot IMAGE
image.y=0
image.x=0
image.yspan=0.75
image.xspan=0.9
pixel.width = image.xspan/(length(img$x) * 2)
pixel.height = image.yspan/(length(img$y) * 2)
image.xvec=seq(image.x + pixel.width, image.x+image.xspan - pixel.width, length.out=length(img$x))
image.yvec=seq(image.y + pixel.height, image.y+image.yspan - pixel.height, length.out=length(img$y))
img.x.at=seq(image.x,image.x+image.xspan,length.out=length(img.x.labels))
img.y.at=seq(image.y,image.y+image.yspan, length.out=length(img.y.labels))
rect(image.x,image.y,image.x+image.xspan,image.y+image.yspan,col=rgb(red=backcol[1], green=backcol[2], blue=backcol[3], maxColorValue=255))
#Add blur, if requested
if(!is.null(blur)) {
if(!"sigma" %in% names(blur)) {
stop("Please provide a standard deviation value when using the \"blur\" option.")
} else {
if("bleed" %in% names(blur)) {
bleed <- blur$bleed
} else {
bleed <- TRUE
}
}
tmp.im <- spatstat::as.im(list(x = image.xvec, y = image.yvec, z = as.matrix(img$z)))
z <- t(spatstat::blur(tmp.im, sigma = blur$sigma, bleed = bleed)$v)
} else {
z <- img$z
}
z[z<amp.span[1]] = NA
z[z>amp.span[2]] = amp.span[2]
z[z == 0] = NA
image(image.xvec,image.yvec, z, zlim = amp.span, col=colormap,add=TRUE)
axis(2, pos=image.x, at=img.y.at,labels=img.y.labels, cex.axis=opts$cex.lab)
axis(1,pos=image.y, at=img.x.at,labels=img.x.labels, cex.axis=opts$cex.lab)
#Plot Fourier window, if applicable
if(!is.null(window))
{
rwidth = trace.xspan * window
rect(trace.x + trace.xspan - rwidth, trace.y + trace.yspan, trace.x + trace.xspan, trace.y + trace.yspan + 0.01, col = "black")
}
#Plot GRID
if(grid)
{
line.color=rgb(red=100, green=100, blue=100, maxColorValue=255)
line.type=3
for(k in 2:(length(img.x.at)-1))
{
lines(c(img.x.at[k], img.x.at[k]), c(image.y, trace.y+trace.yspan), col=line.color, lty=line.type)
}
for(k in 2:(length(img.y.at)-1))
{
lines(c(image.x, image.x+image.xspan), c(img.y.at[k], img.y.at[k]), col=line.color, lty=line.type)
}
}
#Plot COLORBAR
if(colorbar)
{
color.x=image.x+image.xspan+0.015
color.xspan=0.025
color.y=image.y+image.yspan-0.20
color.yspan=0.10
color.xvec=c(color.x,color.x+color.xspan)
color.yvec=seq(color.y, color.y+color.yspan, length.out=colorbins)
color.at=seq(color.y,color.y+color.yspan,length.out=2)
colorbar.matrix=array(seq_len(colorbins),dim=c(1, colorbins))
image(color.xvec, color.yvec, colorbar.matrix, col=colormap, axes=FALSE, add=TRUE)
}
#Plot TEXT
text(trace.x + trace.xspan + 0.03, trace.y, srt=90, sprintf(opts$trace.format,trace.labels[1]), cex=opts$cex.trace)
text(trace.x + trace.xspan + 0.03, trace.y+trace.yspan, srt=90, sprintf(opts$trace.format, trace.labels[2]), cex=opts$cex.trace)
text(image.x-0.095, image.y+image.yspan/2, srt=90, img.y.lab, cex=opts$cex.lab)
text(image.x+image.xspan/2, image.y-0.1, img.x.lab, cex=opts$cex.lab)
if("main" %in% names(opts))
{
text(trace.x+trace.xspan/2, trace.y+trace.yspan+0.05,opts$main, cex=opts$cex.main)
}
if(colorbar)
{
text(color.x+0.015, color.y-0.0125, sprintf(opts$colorbar.format, amp.span[1]), cex=opts$cex.colorbar)
text(color.x+0.015, color.y+color.yspan+0.0125, sprintf(opts$colorbar.format, amp.span[2]), cex=opts$cex.colorbar)
}
}
PlotIMFs <-function(sig, time.span = NULL, imf.list = NULL, original.signal = TRUE, residue = TRUE, fit.line=FALSE, lwd=1, cex=1, ...)
{
#Better IMF plotter
#This function plots IMFs on the same plot so they can be checked for mode mixing or other problems.
#It plots shifted traces in a single window
#INPUTS
# SIG is the signal data structure returned by EEMD or EMD analysis
# Note that SIG$AVERAGED.IMFS will be plotted instead of SIG$IMF, likewise SIG$AVERAGED.RESIDUE takes precedence
# over SIG$RESIDUE, if both exist.
# SIG$IMF is a N by M array where N is the length of the signal and M is the number of IMFs
# SIG$ORIGINAL.SIGNAL is the original signal before EEMD
# SIG$RESIDUE is the residual after EMD
# SIG$DT is the sample rate
# TIME.SPAN is a 2 element vector giving the time range to plot
# IMF.LIST is the IMFs to plot
# ORIGINAL.SIGNAL is a boolean asking if you are going to plot the original signal also (defaults to be on top)
# RESIDUE is a boolean asking if you are going to plot the residual (defaults to be on bottom)
# FIT.LINE is a boolean asking if you want to plot a line showing the sum of IMFs on top of the original signal (to check how the selected IMFs reconstruct the original signal)
# LWT is the line weight (for plotting figures)
# CEX is the size of text (for plotting figures)
# ... other parameters to pass to main plotting function
opts <- list(...)
if(!"xlab" %in% names(opts)) {
opts$xlab <- "Time (s)"
}
if(!"ylab" %in% names(opts)) {
opts$ylab <- ""
}
if(is.null(time.span))
{
time.span = c(min(sig$tt), max(sig$tt))
}
if(is.null(imf.list))
{
imf.list = 1:sig$nimf
}
if("averaged.imfs" %in% names(sig))
{
sig$imf=sig$averaged.imfs
}
if("averaged.residue" %in% names(sig))
{
sig$residue=sig$averaged.residue
}
time.ind = which(sig$tt >= time.span[1] & sig$tt <= time.span[2])
tt = sig$tt[time.ind]
plot(c(0, 1), c(0, 1), type="n", axes=FALSE, xlab=opts$xlab, ylab=opts$ylab, cex.lab=cex)
yax.labs=c()
snum=length(imf.list)+residue+original.signal
sp=1/snum # Spacing of subplots
if(original.signal)
{
snum=snum+1
os=sig$original.signal[time.ind]-mean(sig$original.signal[time.ind])
scale=max(abs(os))
}
else
{
scale=max(abs(sig$imf[time.ind,imf.list]))
}
if(residue)
{
snum=snum+1
res=sig$residue[time.ind]-mean(sig$residue[time.ind])
res=res*(sp/(2*scale))
yax.labs=append(yax.labs,"Residue")
}
trace.pos=sp/2 #Where the trace starts on the plot
imfs=sig$imf*(sp/(scale*2))
ts=(tt-min(tt))*(1/(time.span[2]-time.span[1]))
if(residue)
{
lines(ts, res+trace.pos, lwd=lwd)
trace.pos=trace.pos+sp
}
for(k in rev(imf.list))
{
lines(ts, imfs[time.ind,k]+trace.pos, lwd=lwd)
trace.pos=trace.pos+sp
yax.labs=append(yax.labs, paste("IMF",k))
}
if(original.signal)
{
lines(ts, os*(sp/(2*scale))+trace.pos, lwd=lwd)
yax.labs=append(yax.labs,"Signal")
if(fit.line)
{
if(length(imf.list)>1)
{
fline=rowSums(imfs[time.ind,imf.list])
}
else
{
fline=imfs[time.ind,imf.list]
}
if(residue)
{
fline=fline+res
}
lines(ts, fline+trace.pos, lwd=lwd, col="red")
}
}
xax.labs=pretty(seq(min(tt), max(tt), length.out=11))
axis(1, pos=0, at=seq(0,1, length.out=length(xax.labs)), labels=xax.labs, cex.axis=cex)
axis(2, pos=0, at=seq(sp/2, 1, by=sp), labels=yax.labs, cex.axis=cex)
segments(c(0,0,1, 0), c(0, 1, 1, 0), c(0,1, 1, 1), c(1,1, 0, 0), lwd=lwd)
}
|
9988b348f9e9234a886c84014c83e1ab658904e8
|
e0b530f1d389c1de35175643d306eb4be64445f4
|
/googlevisionv1.auto/man/FaceAnnotation.Rd
|
9c130dc360e1b46112cc3070ef225d86d7be0814
|
[
"MIT"
] |
permissive
|
Phippsy/autoGoogleAPI
|
3ce645c2432b8ace85c51c2eb932e1b064bbd54a
|
d44f004cb60ce52a0c94b978b637479b5c3c9f5e
|
refs/heads/master
| 2021-01-17T09:23:17.926887
| 2017-03-05T17:41:16
| 2017-03-05T17:41:16
| 83,983,685
| 0
| 0
| null | 2017-03-05T16:12:06
| 2017-03-05T16:12:06
| null |
UTF-8
|
R
| false
| true
| 1,489
|
rd
|
FaceAnnotation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vision_objects.R
\name{FaceAnnotation}
\alias{FaceAnnotation}
\title{FaceAnnotation Object}
\usage{
FaceAnnotation(tiltAngle = NULL, underExposedLikelihood = NULL,
fdBoundingPoly = NULL, landmarkingConfidence = NULL,
joyLikelihood = NULL, detectionConfidence = NULL,
surpriseLikelihood = NULL, angerLikelihood = NULL,
headwearLikelihood = NULL, panAngle = NULL, boundingPoly = NULL,
landmarks = NULL, blurredLikelihood = NULL, rollAngle = NULL,
sorrowLikelihood = NULL)
}
\arguments{
\item{tiltAngle}{Pitch angle}
\item{underExposedLikelihood}{Under-exposed likelihood}
\item{fdBoundingPoly}{This bounding polygon is tighter than the previous}
\item{landmarkingConfidence}{Face landmarking confidence}
\item{joyLikelihood}{Joy likelihood}
\item{detectionConfidence}{Detection confidence}
\item{surpriseLikelihood}{Surprise likelihood}
\item{angerLikelihood}{Anger likelihood}
\item{headwearLikelihood}{Headwear likelihood}
\item{panAngle}{Yaw angle}
\item{boundingPoly}{The bounding polygon around the face}
\item{landmarks}{Detected face landmarks}
\item{blurredLikelihood}{Blurred likelihood}
\item{rollAngle}{Roll angle}
\item{sorrowLikelihood}{Sorrow likelihood}
}
\value{
FaceAnnotation object
}
\description{
FaceAnnotation Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A face annotation object contains the results of face detection.
}
|
2f4576de59469849eff58589ad4c366cc5df3522
|
ef3dff575826b9f9d620c4ad348a80caa9c023de
|
/functions/plot_functions/make_valdist_plots.R
|
22074822be303ecdee98a3fc73454ec823f78cc2
|
[] |
no_license
|
anthonyleezhang/dlcode
|
6fc5fd30327957903735c2664bfce2bbcaba21ef
|
2471bf86cc168aa4400d7e0e8f8e789221076287
|
refs/heads/master
| 2020-04-26T20:00:56.945691
| 2019-03-04T18:09:24
| 2019-03-04T18:09:24
| 173,794,714
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,332
|
r
|
make_valdist_plots.R
|
make_valdist_plots = function(valdata) {
### Make log gamma pdfs by interpfun tricks
plotdata = data.table(
loggam = numeric(0),
Fgam = numeric(0),
fgam = numeric(0),
tau = numeric(0)
)
for(my_tau in c(0, 0.05, 0.1, 0.15, 0.2)) {
# for(my_tau in c(0)) {
data = valdata[tau == my_tau]
data[, loggam := log(gam)]
# Hack -- code lowest to a finite number
min_loggam = data[, min(loggam)]
data[is.na(loggam), loggam := min_loggam - 0.01]
# End truncation
subdata = data[Fgam < 0.9985]
subdata[, val_cdf := cumsum(val_ss)]
Fgam_approxfun = approxfun(x = subdata$loggam, y = subdata$val_cdf, rule = 2)
loggrid = seq(subdata[loggam > -Inf, min(loggam)], subdata[, max(loggam)], length.out = 1000)
temp = data.table(loggam = loggrid, Fgam = Fgam_approxfun(loggrid))
temp[, fgam := c(0, diff(Fgam) / diff(loggam))]
temp[, tau := my_tau]
plotdata = rbindlist(list(plotdata, temp))
}
# Entering buyer distribution
subdata = data[Fgam < 0.9985]
subdata[, fgam := c(0, diff(Fgam) / diff(loggam))]
valpdf = ggplot(plotdata) +
geom_line(size = 1.3, aes(x = loggam, y = fgam, group = tau, color = tau)) +
scale_x_continuous(name = "Log use value") +
scale_y_continuous(name = "Density") +
scale_color_gradient(name = "Tau", low = "green3", high = "red", limits = c(0, 0.26), label = percent) +
geom_line(size = 1.3, data = subdata, aes(x = loggam, y = fgam), color = "gray50") +
theme(text = element_text(size = 60),
legend.key.size = unit(1.5, "cm"),
legend.title = element_text(size = 50),
legend.text = element_text(size = 40))
data = valdata[tau %in% c(0, 0.05, 0.1, 0.15, 0.2)]
valcdf = ggplot(plotdata, aes(x = loggam, y = Fgam, group = tau, color = tau)) +
geom_line(size = 1.3) +
scale_x_continuous(name = "Log use value") +
scale_y_continuous(name = "CDF") +
scale_color_gradient(name = "Tau", low = "green3", high = "red", limits = c(0, 0.26)) +
theme(text = element_text(size = 60),
legend.key.size = unit(1.5, "cm"),
legend.title = element_text(size = 50),
legend.text = element_text(size = 40))
out = list()
out$valpdf = valpdf
out$valcdf = valcdf
return(out)
}
|
a006366bff66be52b9a66dd92478d8f68931c586
|
0fbc58702c39addfa7949391d92533922dcf9d49
|
/inst/examples/diamonds-carat.R
|
5fa048a1e479600e029c82e3fd47b2e1de3f29cd
|
[] |
no_license
|
yihui/MSG
|
d3d353514464f962a0d987efd8cf32ed50ac901a
|
8693859ef41139a43e32aeec33ab2af700037f82
|
refs/heads/master
| 2021-11-29T08:12:02.820072
| 2021-08-15T17:14:36
| 2021-08-15T17:14:36
| 1,333,662
| 30
| 12
| null | 2021-08-15T17:14:37
| 2011-02-06T05:42:53
|
R
|
UTF-8
|
R
| false
| false
| 302
|
r
|
diamonds-carat.R
|
# 按雕琢水平切片后的钻石重量密度曲线
data("diamonds")
library(ggplot2)
levels(diamonds$cut) = c("一般", "良好", "优质", "珍贵", "完美")
p = ggplot(aes(x = carat), data = diamonds) +
geom_density() +
labs(x = "重量", y = "分布密度") +
facet_grid(cut ~ .)
print(p)
|
14945021eae2ccf61c58a5e2d9d3df8513156f1b
|
fc6e720c153e22846dabe8d4c826b99c77f8ea2a
|
/fraud_msba.R
|
b85e24af661607ec973afeba875e325bacb0184b
|
[] |
no_license
|
jlhf80/r_programming
|
80c3994caeac8d8437d876f54262b3973477e311
|
d7d5a6a4298dbcfb1671b79c62c473d527dd0941
|
refs/heads/master
| 2021-06-24T21:41:35.102808
| 2017-09-01T06:35:49
| 2017-09-01T06:35:49
| 49,107,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,456
|
r
|
fraud_msba.R
|
#clear global
rm(list=ls())
# load packages
library(doMC)
library(C50)
library(caret)
library(klaR)
library(MASS)
#register cores
registerDoMC(4)
#load data
data <- read.csv("/Users/jameshenson/Downloads/fraud_train.csv")
data2 <- read.csv("/Users/jameshenson/Downloads/fraud_test.csv")
#remove rows with missing values
data <- data[complete.cases(data),]
data2 <- data2[complete.cases(data2),]
#combine datasets
df <- rbind(data,data2)
#Unused levels in df, drop empty levels
df <- droplevels(df)
#stratified test and train
set.seed(1)
inTraining <- createDataPartition(df$FRAUD, p=.5, list=FALSE)
training <- df[inTraining,]
testing <- df[-inTraining,]
#cross validation
fitControl <- trainControl(method = "repeatedcv"
,number = 10
,repeats = 5
,classProbs = TRUE
,allowParallel = TRUE
,summaryFunction = twoClassSummary)
#fit models
set.seed(2)
gbmFit1 <- train(FRAUDFOUND ~ ., data = training
,method = "gbm"
,trControl = fitControl
,verbose = FALSE
,metric = "ROC")
set.seed(2)
xgboost <- train(FRAUDFOUND ~ ., data = training
,method = "xgbTree"
,trControl = fitControl
,verbose = FALSE
,metric = "ROC")
#tree depth vs ROC
plot(gbmFit1)
plot(xgboost)
#confusion and metrics
gbmClasses <- predict(gbmFit1, testing)
gbmConfusion <- confusionMatrix(gbmClasses, testing$FRAUDFOUND)
gbmConfusion$byClass
xgbClasses <- predict(xgboost, testing)
xgbConfusion <- confusionMatrix(xgbClasses, testing$FRAUDFOUND)
xgbConfusion$byClass
#ROC
gbmProbs <- predict(gbmFit1, testing, type = "prob")
gbmROC <- roc(predictor = gbmProbs$yes
,response = testing$y
,levels = rev(levels(testing$y)))
plot(gbmROC)
gbmROC$auc
xgbProbs <- predict(xgboost,testing,type="prob")
xgbROC <- roc(predictor = xgbProbs$Yes
,response = testing$FRAUDFOUND
,level = rev(levels(testing$FRAUDFOUND)))
plot(xgbROC)
xgbROC$auc
#prediction dataset
prediction <- predict(gbmFit1,testing, type = "prob")
#plot differences
resamps <- resamples(list(GBM = gbmFit1, XGB = xgboost))
trellis.par.set(caretTheme())
bwplot(resamps, layout = c(1,3))
#ROC, Sens, Spec
values <- resamples(list(gbm=gbmFit1, xgb=xgboost))
values$values
summary(values)
|
0ec9570ec07a7aaa4188a96cca16e6ba6b3a8c9e
|
c0a51646e07e25c725924caacd8bd590aee89e07
|
/man/gen_token.Rd
|
ed80bc41df07e7c2edb035f760ec2ad0216ea3fd
|
[] |
no_license
|
grosenf1/immport.api
|
5e002eb6cae5ef6f8b2c5ef1c262c69234e47daa
|
4ed1163681db7e3a5f740fc96ced254f3bb8695c
|
refs/heads/master
| 2021-08-31T15:17:45.696593
| 2017-12-21T21:34:04
| 2017-12-21T21:34:04
| 114,577,745
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 501
|
rd
|
gen_token.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{gen_token}
\alias{gen_token}
\title{Generate token function}
\usage{
gen_token(Username, Password)
}
\arguments{
\item{Username}{Your ImmPort username}
\item{Password}{Your ImmPort password}
}
\description{
This function allows the user to specify their ImmPort Username and Password to generate an API token.
}
\examples{
gen_token(Username = your_user_name, Password = your_password)
}
\keyword{token}
|
dd22afbd734cae7577ac69c5ce2f8d59ee836e42
|
a2b21c77a41c4d77033577881c006cf451a97433
|
/01_clean_individual_datasets/_archive/Clusters of NTL/02_join_close_together_clusters.R
|
1a64f4e2900c2fe1999eb1a30b5c90f593f5c9fa
|
[] |
no_license
|
ramarty/Industry-Nighttime-Lights
|
85de909e64d66b7eb7610f137f68b518efaa2012
|
c120a037abf46935ed87e58a4de52457a46a5fa9
|
refs/heads/master
| 2022-05-17T12:13:08.917525
| 2022-04-19T20:18:35
| 2022-04-19T20:18:35
| 217,305,866
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,732
|
r
|
02_join_close_together_clusters.R
|
# Identify Clusters of Lights
for(country in c("mexico", "canada")){
clumps_sp <- readRDS(file.path(data_file_path, "DMSPOLS_Clusters", "RawData",
paste0(country %>% substring(1,3) %>% paste0("_dmspcluster.Rds"))))
# Group together close together clusters ---------------------------------------
## Centroid
points_sp <- coordinates(clumps_sp) %>%
as.data.frame() %>%
dplyr::rename(lon = V1,
lat = V2) %>%
bind_cols(clumps_sp@data)
## Spatially Define and project
coordinates(points_sp) <- ~lon+lat
crs(points_sp) <- CRS("+init=epsg:4326")
points_sp <- spTransform(points_sp, CRS(UTM_ETH))
## Back to dataframe
points <- as.data.frame(points_sp)
## Clusters
points_dist <- points[,c("lat", "lon")] %>% dist()
clumps_sp$wardheirch_clust_id <- hclust(points_dist, method = "ward.D2") %>%
cutree(h = 10000)
clumps_sp <- raster::aggregate(clumps_sp, by = "wardheirch_clust_id", sums=list(list(sum, 'cluster_n_cells')))
clumps_sp@data <- clumps_sp@data %>%
dplyr::select(-c(wardheirch_clust_id)) %>%
dplyr::mutate(cell_id = 1:n()) # prevous cell_id summed version; fresh, aggregated version
# Export -----------------------------------------------------------------------
# We save "polygon" and "points" file, where "points" is actually just the polygon.
# We do this to make compatible with some scripts that also process grid data
# TODO: Should we just export to GRID folder? may make life easier?
saveRDS(clumps_sp, file.path(data_file_path, "DMSPOLS_Clusters", "RawData",
paste0(country %>% substring(1,3) %>% paste0("_dmspclustergrouped.Rds"))))
}
|
164c608b58bf5d2a01675283a6a477f81cbfcd38
|
c9502009489fcb7ecad5a563a7827766e346f90b
|
/Tutorials/R Intermediate/Chapter 4/Problemwithsapply.R
|
17c0895c30c079338f9d134bcf039f4d3a5e25bb
|
[] |
no_license
|
Lingesh2311/R
|
c96f38481795f2d87f89faecce58a4d63304fd63
|
86d2501d1f4979b4391943a93d52df9e7e613e06
|
refs/heads/master
| 2020-06-04T15:31:01.767700
| 2019-06-17T23:14:18
| 2019-06-17T23:14:18
| 192,082,786
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 414
|
r
|
Problemwithsapply.R
|
# temp is already prepared for you in the workspace
# Definition of below_zero()
below_zero <- function(x) {
return(x[x < 0])
}
# Apply below_zero over temp using sapply(): freezing_s
freezing_s = sapply(temp, below_zero)
# Apply below_zero over temp using lapply(): freezing_l
freezing_l = sapply(temp, below_zero)
# Are freezing_s and freezing_l identical?
identical(freezing_l, freezing_s)
|
4577c10a0e0b946382ebbce8c8a8c2f0ef9c1afa
|
cbc2cb3cfbe1a9193bbee7e661922bb63b2184d6
|
/R/update_db.R
|
b164d7218343ba711fa5dcca9c34ab445abacdc7
|
[
"MIT"
] |
permissive
|
joebrew/nepal
|
cca80b7c49922e4a26d9e9e765bcd89a6b2b4ce8
|
dc9860fc026323286b3d36bfb566487186cc3d2b
|
refs/heads/master
| 2022-04-21T15:32:40.131749
| 2020-03-27T08:15:51
| 2020-03-27T08:15:51
| 113,752,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 982
|
r
|
update_db.R
|
#' Update DB
#'
#' Update a table
#' @param data The data to be uploaded
#' @param table_name Name of table
#' @param connection_object The connection object
#' @return The transcriptions relation in the postgresql database will be updated
#' @import dplyr
#' @import RPostgreSQL
#' @export
#' @examples
#' 2+2
update_db <- function(data,
table_name = 'transcriptions',
connection_object = NULL){
# If not connection object, try to find one
if(is.null(connection_object)){
message(paste0('No connection_object provided. Will try ',
'to find a credentials file.'))
# Get credentials
the_credentials <- credentials_extract()
# Establish the connection
connection_object <- credentials_connect(the_credentials)
}
# Replace the table in db
dbWriteTable(conn = connection_object,
name = table_name,
value = data,
append=TRUE, row.names=FALSE)
}
|
f26a14fe76c602c4dba12973861b02bed433fe3e
|
506e90f07f77098c3ddc3325e30f74d1e30b2c61
|
/man/bias_function.Rd
|
e8a42359b40facbf04a0cb77c4363697739e0317
|
[] |
no_license
|
cran/cmaes
|
9aa8b7747757c9c0358c028b278d33ea0005bd0b
|
f921cfbd20d03122805200e30420f0dd524be688
|
refs/heads/master
| 2022-05-02T02:07:37.795177
| 2022-03-18T09:20:18
| 2022-03-18T09:20:18
| 17,695,139
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 402
|
rd
|
bias_function.Rd
|
\name{bias_function}
\alias{bias_function}
\title{Create a biased test function...}
\usage{bias_function(f, bias)
}
\description{Create a biased test function}
\details{Returns a new biased test function defined as
\deqn{g(x) = f(x) + bias.}}
\value{The biased test function.}
\author{Olaf Mersmann \email{olafm@statistik.tu-dortmund.de}}
\arguments{\item{f}{test function}
\item{bias}{bias value.}
}
|
89011d6ec232d33f54d059489ff1d9ef1d70a334
|
b56624dbbc2e2b059ae0dc1e8899ec5b7b14c7ad
|
/R/RcppExports.R
|
e28cc5b4f7fa5c60b8726a6b9d1ec4f1cc157e8e
|
[] |
no_license
|
oswaldogressani/mixcurelps
|
315c5f5327e8be46b3393104cc566f247d0f48f9
|
11787d26d294e789c1f1afc89c789875b7358403
|
refs/heads/main
| 2023-05-23T05:24:35.490598
| 2022-06-18T08:57:50
| 2022-06-18T08:57:50
| 404,776,270
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 405
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
Rcpp_Laplace <- function(lat0, v, K, PDcorrect, Dloglik, D2loglik, Qv) {
.Call(`_mixcurelps_Rcpp_Laplace`, lat0, v, K, PDcorrect, Dloglik, D2loglik, Qv)
}
Rcpp_cubicBspline <- function(x, lower, upper, K) {
.Call(`_mixcurelps_Rcpp_cubicBspline`, x, lower, upper, K)
}
|
ebf385593eca8b188b37a11ee15c95217231287e
|
7c56a8400746861d57d2801f85252e022a934a05
|
/SL.glmnet2way.R
|
4ce22d3b95ee3925301f0c88347169163c4dbcd2
|
[] |
no_license
|
tarynam/epi-drtmle
|
3d497979c6aacab3f54fea0b2c673ed4e96829d6
|
0097452c70342a79b3db841cfda7b49114cda0ae
|
refs/heads/master
| 2021-07-19T19:25:55.963568
| 2020-05-01T20:21:15
| 2020-05-01T20:21:15
| 154,034,182
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,479
|
r
|
SL.glmnet2way.R
|
SL.glmnet2way<-function (Y, X, newX, family, obsWeights, id, alpha = 1, nfolds = 10,
nlambda = 100, useMin = TRUE, loss = "deviance", ...)
{
.SL.require("glmnet") #keep this one
if (!is.matrix(X)) {
X <- model.matrix(~-1 + .^2, X) #updated
newX <- model.matrix(~-1 + .^2, newX) #updated
}
fitCV <- glmnet::cv.glmnet(x = X, y = Y, weights = obsWeights,
lambda = NULL, type.measure = loss, nfolds = nfolds,
family = family$family, alpha = alpha, nlambda = nlambda,
...)
pred <- predict(fitCV, newx = newX, type = "response", s = ifelse(useMin,
"lambda.min", "lambda.1se"))
fit <- list(object = fitCV, useMin = useMin)
class(fit) <- "SL.glmnet2way" #changed this to match the name
out <- list(pred = pred, fit = fit)
return(out)
}
#How does this know to pull from the new glmnet and not the old one???
predict.SL.glmnet2way<-function (object, newdata, remove_extra_cols = T, add_missing_cols = T,
...)
{
.SL.require("glmnet") #keep this one
if (!is.matrix(newdata)) {
newdata <- model.matrix(~-1 + .^2, newdata) #updated
}
original_cols = rownames(object$object$glmnet.fit$beta) #This is where I wasn't sure.
if (remove_extra_cols) {
extra_cols = setdiff(colnames(newdata), original_cols)
if (length(extra_cols) > 0) {
warning(paste("Removing extra columns in prediction data:",
paste(extra_cols, collapse = ", ")))
newdata = newdata[, !colnames(newdata) %in% extra_cols,
drop = F]
}
}
if (add_missing_cols) {
missing_cols = setdiff(original_cols, colnames(newdata))
if (length(missing_cols) > 0) {
warning(paste("Adding missing columns in prediction data:",
paste(missing_cols, collapse = ", ")))
new_cols = matrix(0, nrow = nrow(newdata), ncol = length(missing_cols))
colnames(new_cols) = missing_cols
newdata = cbind(newdata, new_cols)
newdata = newdata[, original_cols]
}
}
pred <- predict(object$object, newx = newdata, type = "response",
s = ifelse(object$useMin, "lambda.min", "lambda.1se"))
return(pred)
}
|
489324717b55c0ea727da72483f748c85b3874f8
|
c1151b4ba28614a99a06bf838c5669789eb5e203
|
/Main/lib/R/vizTools/Scatterplot_001_WASHb_BNG.R
|
f3c9d967c10f42bf1ae947b8c71bef8dfa391d48
|
[
"Apache-2.0"
] |
permissive
|
VEuPathDB/ClinEpiWorkflow
|
acf263b1a90b018b8ec943eec0c4b2b01c670fca
|
e34aef14c570ea7f7890674d5445c31e73f2b298
|
refs/heads/master
| 2023-07-06T10:25:05.204676
| 2023-03-31T16:56:25
| 2023-03-31T16:56:25
| 201,106,536
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,430
|
r
|
Scatterplot_001_WASHb_BNG.R
|
rm(list=ls())
library(ggplot2)
#############################################################
# set working directory
setwd("~/Documents/GitHub/ClinEpiWorkflow/Main/lib/R/vizTools")
#############################################################
# load data
d <- read.csv("./Data/2020_09_17_WASHb_BNG_bulk_download_merged.csv", as.is=T)
#############################################################
#rename variables of interest because default lables are too long/messy
d$pid <- d$Participant_Id
d$lns <- d$Percent.LNS.consumed..caregiver.report..EUPATH_0035031.
d$hfias <- d$Household.Food.Insecurity.Access.Scale..HFIAS...EUPATH_0011145.
d$hfias_score <- d$Household.Food.Insecurity.Access.Scale..HFIAS..score..EUPATH_0011151.
d$diar <- d$Diarrhea.case.during.the.last.7.days..caregiver.report..EUPATH_0035097.
d$weight_for_age <- d$Weight.for.age.z.score..using.median.weight..EUPATH_0035073.
d$circ_for_age <- d$Head.circumference.for.age.z.score..using.median.circumference..EUPATH_0035075.
d$height_for_age <- d$Length..or.height.for.age.z.score..using.median.stature..EUPATH_0035067.
d$svy <- d$Study.timepoint..OBI_0001508.
d$hh_svy <- d$Household.study.timepoint..EUPATH_0044122.
#############################################################
# limit data to target kids & renamed variables of interest
table(d$Target.child.or.sibling.neighbor..EUPATH_0035112., useNA="ifany")
names(d)
d <- d[d$Target.child.or.sibling.neighbor..EUPATH_0035112.=="Target child",110:119]
#############################################################
#############################################################
# clean up data --> general issue: household observation data is in a different row than participant observation data,
# even when the study timepoint is the same
#############################################################
#############################################################
#############################################################
# pull out household data
# specific issue with households:
# there are household data (hh) with timepoint: svy==NA and household observation (hh_obs) data with svy==c(0,1,2)
# need to fill in the hh data at every timepoint of hh_obs & remove rows where svy==NA
hh <- d[,c("pid", "lns", "hfias", "hfias_score", "hh_svy")]
#hfias data is measured 1x not at the observation level --> fill in for each participant
for(i in hh$pid){
if(length(unique(hh$hfias[!is.na(hh$hfias) & hh$pid==i]))>0){
hh$hfias[hh$pid==i] <- unique(hh$hfias[!is.na(hh$hfias) & hh$pid==i])
}
}
# remove rows where hh_svy==NA
hh <- hh[!is.na(hh$hh_svy),]
# rename hh_svy to svy
names(hh)[names(hh)=="hh_svy"] <- "svy"
#############################################################
# pull out participant observation data & clean up
p <- d[!is.na(d$svy),c("pid", "svy", "diar", "weight_for_age", "circ_for_age","height_for_age")]
head(p)
#############################################################
#merge household observation and participant observation data by svy & pid
d <- merge(p, hh, all.x=T, all.y=T, by=c("pid", "svy"))
#############################################################
# plot
diar.labs <- c("Diarrhea", "No diarrhea")
names(diar.labs) <- c("Yes", "No")
p <- ggplot(d[!is.na(d$diar),], aes(weight_for_age, height_for_age, color=as.character(svy))) +
geom_point(alpha=0.6, shape=1, size=1) +
geom_smooth () +
xlab("Weight-for-age z-score") +
ylab("Height-for-age z-score") +
labs(color="Timepoint") +
facet_grid(cols = vars(diar), labeller=labeller(.cols=diar.labs))
p_built <- ggplot_build(p)
p_built
#############################################################
# pull out data for the smoothed mean
p_data <- p_built$data
smoothed_mean <- p_data[[2]]
table(smoothed_mean$PANEL, smoothed_mean$colour, useNA="ifany")
#F8766D, PANEL 1 = timepoint 1, no diarrhea
#F8766D, PANEL 2 = timepoint 1, diarrhea
#00BFC4, PANEL 1 = timepoint 2, no diarrhea
#00BFC4, PANEL 2 = timepoint 2, diarrhea
smoothed_mean$subset <- "timepoint 1, no diarrhea"
smoothed_mean$subset[smoothed_mean$PANEL==2] <- "timepoint 1, diarrhea"
smoothed_mean$subset[smoothed_mean$PANEL==1 & smoothed_mean$colour=="#00BFC4"] <- "timepoint 2, no diarrhea"
smoothed_mean$subset[smoothed_mean$PANEL==2 & smoothed_mean$colour=="#00BFC4"] <- "timepoint 2, diarrhea"
table(smoothed_mean$subset)
smoothed_mean <- smoothed_mean[,c("subset", "x", "y", "ymin", "ymax", "se")]
|
252eec2a2548df8a231046a504e5de921da7d110
|
b635759bb7d14305d0fdda68a46308db0b50224b
|
/R/calculateScenarios.r
|
4b8c30c3420d041aab102d13d331dba6896f3e6b
|
[] |
no_license
|
tobiasreischmann/matchingmarketsevaluation
|
d27d0c34a72ca5a1d8feb816cb57d32244934792
|
eaa3855185d0258092d726109b4a12d70c577c61
|
refs/heads/master
| 2020-08-31T22:50:31.892226
| 2020-04-22T19:00:27
| 2020-04-22T19:00:27
| 218,805,489
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,974
|
r
|
calculateScenarios.r
|
# ----------------------------------------------------------------------------
# R-code for analyzing the rounds played in decentralized college admission problem
# of the matchingmarkets package.
# For multiple dimensions it is analysed how a dimension influences the output for multiple
# exemplary scenarios.
#
# Copyright (c) 2019 Tobias Reischmann
#
# This library is distributed under the terms of the GNU Public License (GPL)
# for full details see the file LICENSE
#
# ----------------------------------------------------------------------------
#' @title Simulate multiple scenarios for the college admissions problem
#'
#' @description This function simulates multiple scenarios for the iterative deferred acceptance mechanism with ties, implemented as stabsim3 within the matchingmarkets package.
#' The results can be used to analyse the number of rounds necessary for the market to be cleared up to a specified threshold.
#'
#' @param scenarios list of lists containing the different scenarios.
#' @param nruns integer indicating the number of markets to be simulated (results are averaged over all simulated markets).
#' @param nworkers integer number of workers generated for the parallel package.
#' @param fullresult boolean if true not only the aggregated rounds of iterations it returned but the full object of each run.
#'
#' @export
#'
#' @return
#'
#' @return
#' \code{calculateScenarios} returns a list of lists, which contains the following fields
#' \item{occupancyrate}{double indicating the ratio of #students/#availableplaces}
#' \item{nStudents}{integer indicating the number of students per market}
#' \item{nColleges}{integer indicating the number of colleges per market}
#' \item{threshold}{double influencing the number of decentrailzed rounds played. The mechanism terminates if the ratio of places, which are different in comparison to the finished mechanism are below this percentage value.}
#' \item{areasize}{integer indicating the length of the grid used for the horizontal preferences.}
#' \item{horizontalscenario}{integer (0,1,2) indicating which colleges uses horizontal preferences in their ranking (1=>all, 2=>only public colleges, 3=> none).}
#' \item{conf.s.prefs}{vector representing the size of the tiers for students' ranking lists}
#' \item{quota}{double between 0 and 1 indicating the percentage of private facilities}
#'
#' @author Tobias Reischmann
#'
#' @keywords generate
#'
#' @examples
#'
#' ## Simulate a set of different scenarios and return the average number of decentralized rounds played.
#'
#' elem1 <- list(occupancyrate = .8, quota = .3, nStudents = 2700, nColleges = 600,
#' areasize = 7, conf.s.prefs = c(3,7,10,10), horizontalscenario = 1)
#' elem2 <- list(occupancyrate = .8, quota = .3, nStudents = 600, nColleges = 200,
#' areasize = 6, conf.s.prefs = c(2,5,6,7), horizontalscenario = 1)
#' elements <- list(elem1, elem2)
#' scenarios <- lapply(elements, function(elem) {
#' lapply(c(0.2,0.5), function(x){
#' elem$threshold <- x
#' elem
#' })
#' })
#'
#' xdata <- calculateScenarios(scenarios, nruns=2)
calculateScenarios <- function(scenarios,nruns=10,nworkers=detectCores(),seed=NULL,fullresult=FALSE) {
if (!is.null(seed)) {
set.seed(seed = seed)
}
library(digest)
hash <- digest(scenarios)
filename <- paste('./data/',hash,'.rds',sep='')
if (file.exists(filename)) {
initialresults <- readRDS(filename)
for (i in 1:length(scenarios)) {
for (j in 1:length(scenarios[[i]])) {
if (length(initialresults) >= i &&
length(initialresults[[i]]) >= j &&
is.numeric(initialresults[[i]][[j]])) {
scenarios[[i]][[j]]$cache <- TRUE
}
}
}
}
equaldist <- function(x) {
runif(x)
}
category <- function(c) {
function(x) {
round(runif(x) * c + 0.5)
}
}
######### Run ##############
applyresults <- lapply(scenarios, function(elements) {
rowresults <- mclapply(elements, function(elem) { # Loop over elements
if (!is.null(elem$cache)) {
return(NULL);
}
occupancy <- elem$occupancyrate
nStudents <- elem$nStudents
nColleges <- elem$nColleges
threshold <- elem$threshold
areasize <- elem$areasize
scenario <- elem$horizontalscenario
s.prefs.count = elem$conf.s.prefs
quota <- elem$quota
mean <- (nStudents/nColleges)/occupancy # Mean number of places per program
sd <- mean/2 # Standard deviation for distribution of places per program
capacityfun <- function(n, mean, sd=1) {
sapply(round(rnorm(n, mean=mean, sd=sd)), function(x) max(1,x))
}
nSlots <- capacityfun(nColleges, mean, sd)
private <- function(x) {
runif(x) < quota
}
if (scenario == 1) {
scenariomodel = as.formula("~ I((1000**(firstpref %% 3)) * (abs(cx-sx)<=1) * (abs(cy-sy)<=1))
+ I((1000**((firstpref + secondpref) %% 3)) * social)
+ I((1000**((firstpref - secondpref) %% 3)) * private * ceiling((cidio1 + sidio1 %% 1) * 100))")
}
if (scenario == 2) {
scenariomodel = as.formula("~ I(social)")
}
if (scenario == 3) {
scenariomodel = as.formula("~ I(ceiling((cidio1 + sidio1 %% 1) * 100))")
}
if (scenario == 4) {
scenariomodel = as.formula("~ I((abs(cx-sx)<=1) * (abs(cy-sy)<=1))")
}
collegemodel = as.formula("~ I(-idist * 2 * sqrt(((cx-sx))**2 + ((cy-sy))**2)/areasize)
+ I(iquality * quality)
+ I(iidio * (cidiocat == sidiocat))")
if (scenario == 5) {
scenariomodel = as.formula("~ I(social)")
collegemodel = as.formula("~ I(iquality * quality)")
}
daresult <- stabsim3(m=nruns, nStudents=nStudents, nSlots=nSlots, verbose=FALSE,
colleges = c("cx","cy", "firstpref", "secondpref", "quality", "cidiocat", "cidio1", "cidio2", "private"),
students = c("sx", "sy", "social", "sidiocat", "idist", "iidio", "sidio1", "sidio2", "iquality"),
colleges_fun = c(category(areasize),category(areasize),category(3),category(2),equaldist,category(10),equaldist,equaldist,private),
students_fun = c(category(areasize),category(areasize),category(100),category(10),equaldist,equaldist,equaldist,equaldist,equaldist),
outcome = ~ I(sqrt(((cx-sx)/areasize)**2 + ((cy-sy)/areasize)**2)),
selection = c(
student = scenariomodel
#+ I((1000**((firstpref - secondpref) %% 3)) * private * (cidiocat == sidiocat) )
,
colleges = collegemodel
),
private_college_quota = quota,
count.waitinglist = function(x) {x}, s.prefs.count = s.prefs.count)
curr <- 0
for (m in 1:nruns) { # Average results
iteration <- daresult$iterations[[m]]
iterationtable <- t(as.matrix(iteration[,-1]))
complete <- sum(iterationtable[,1])
ratio <- complete * threshold
curr <- curr + sum(iteration$new+iteration$altered > ratio) + 1
}
if (fullresult){
return(daresult)
}
result <- curr/nruns
# Clean workspace of heavy objects
rm(daresult)
gc()
return(result)
}, mc.silent=FALSE, mc.cores=nworkers)
})
if (exists("initialresults")) {
for (i in 1:length(scenarios)) {
for (j in 1:length(scenarios[[i]])) {
if (length(initialresults) >= i &&
length(initialresults[[i]]) >= j &&
is.numeric(initialresults[[i]][[j]])) {
applyresults[[i]][[j]] <- initialresults[[i]][[j]]
}
}
}
}
saveRDS(applyresults, file = filename)
return(applyresults)
}
|
7963c65fed212cc946c402e49b2d87bb885ace3d
|
974eb9aa2e1d19028a62d52cd527b6ca92ce15af
|
/repres1.R
|
5cb92f9a01e2c3787ccd5cc901a6132f22a40f35
|
[] |
no_license
|
VapoVu/RepData_PeerAssessment1
|
7266f23dd47c1c49aff46064614876ebf6919cb9
|
837104cdec2e82ba7f23db901dfcfb2ee11f70a6
|
refs/heads/master
| 2021-01-22T14:20:48.504747
| 2015-04-19T19:59:47
| 2015-04-19T19:59:47
| 34,189,639
| 0
| 0
| null | 2015-04-19T01:45:21
| 2015-04-19T01:45:21
| null |
UTF-8
|
R
| false
| false
| 1,735
|
r
|
repres1.R
|
# Reproducible research
# Assignment 1
library(plyr, lattice)
# Loading and preprocessing the data
pvDat = read.csv('activity.csv')
pvu <- aggregate(steps ~ date, data = pvDat, sum, na.rm =T)
summary(pvu)
# What is mean total number of steps taken per day?
hist(pvu$steps, breaks =10)
ss = c(mean(pvu$steps), median(pvu$steps))
# What is the average daily activity pattern?
pvi <- aggregate(steps ~ interval, data = pvDat, mean, na.rm =T)
pvm <- aggregate(steps ~ interval, data = pvDat, median, na.rm =T)
names(pvm) <- c("interval", "medStep")
names(pvi) <- c("interval", "medStep")
plot(pvi$interval, pvi$steps, xlab = '5-minute interval',
ylab = 'Daily Average Steps', type = "l")
ind = which(pvi$steps == max(pvi$steps))
# Imputing missing values
nmiss <- colSums(is.na(pvDat))
pvJoin <- join(pvDat, pvm, by = "interval", type = "left", match = "all")
pvJoin$steps[is.na(pvJoin$steps)] <- pvJoin$medStep[is.na(pvJoin$steps)]
pvFill <- pvJoin[,c(1:3)]
pvs <- aggregate(steps ~ date, data = pvFill, sum, na.rm =T)
hist(pvs$steps, breaks =10)
ss0 = c(mean(pvs$steps), median(pvs$steps))
# Are there differences in activity patterns between weekdays and weekends?
pvDat$weekday <- weekdays(as.Date(pvDat$date))
pvDat$wkend <- "weekday"
pvDat$wkend[pvDat$weekday %in% c("Saturday", "Sunday")] = "weekend"
pvk <- aggregate(steps ~ interval, data = pvDat, mean, subset = (wkend == "weekend"),na.rm =T)
pvd <- aggregate(steps ~ interval, data = pvDat, mean, subset = (wkend == "weekday"),na.rm =T)
xyplot(steps ~ interval, xlab = '5-minute interval',
ylab = 'Daily Average Steps', type = "l")
xyplot(steps ~ interval, xlab = '5-minute interval',
ylab = 'Daily Average Steps', type = "l")
|
ba5ec30980a2529730384b52cb95c338fa9ddcfb
|
6eda53f2c4f775261c625e5393ce1464addf965f
|
/man/processing_func.Rd
|
eb014d67203bdd355f8e7012be1dacf95d17b9aa
|
[
"MIT"
] |
permissive
|
streampulse/StreamPULSE
|
84783964e1dc419cbbd264d3f589080396c68365
|
561c560a320cb21f7e01c90c8a45a9c565c9fbb4
|
refs/heads/master
| 2022-06-23T03:54:01.975626
| 2022-06-02T20:36:12
| 2022-06-02T20:36:12
| 122,374,090
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 349
|
rd
|
processing_func.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny_helpers.R
\name{processing_func}
\alias{processing_func}
\title{Internal functions}
\usage{
processing_func(ts, st, en)
}
\description{
Not intended to be called directly by the user.
}
\details{
Not intended to be called directly by the user.
}
\keyword{internal}
|
208de565a09e2692a7faeddd5a69c87687db2e39
|
81b11aba0ae87dcad017aba0d2fc59626904b07e
|
/man/Gaussian.approx.Rd
|
cf29173c8af4b8f0edce8378a0135f7801149ead
|
[] |
no_license
|
liusf15/skydivide
|
2f4e5473986e110ffb6cc3ff861061b4a160278b
|
0c43fcb26ce66a969227e2d5bd533e05eca7956a
|
refs/heads/master
| 2023-04-13T02:38:36.789136
| 2021-05-01T21:47:18
| 2021-05-01T21:47:18
| 359,648,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 471
|
rd
|
Gaussian.approx.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combine.R
\name{Gaussian.approx}
\alias{Gaussian.approx}
\title{Gaussian approximation}
\usage{
Gaussian.approx(f.all)
}
\arguments{
\item{f.all}{an array with shape (K, recon.len, num.samples), where K is the number of subsets, recon.len is the number of grid points, num.samples is the number of samples used}
}
\value{
reconstructed population size
}
\description{
Gaussian approximation
}
|
60275e3a33147c361e0402103fa358cd3a41a1f2
|
6ad0b3fb43e174c7fc6d58bfa67cf76f38139816
|
/DataPrediction/code/predicteurs.R
|
765a69ff4c6d940652986123531aba3b171edcd1
|
[] |
no_license
|
Kerudal/MachineLearning
|
09ed75587334f21e88e8b3a2bf7bbc30c0c0392c
|
435a02c99b6566f2ca9ce86569b7b6d162ef4c89
|
refs/heads/main
| 2023-01-08T12:33:11.286260
| 2020-11-05T10:22:42
| 2020-11-05T10:22:42
| 310,256,479
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 305
|
r
|
predicteurs.R
|
regresseur = function(dataset) {
load("env.RData")
pred.test = predict(model.reg, newdata = dataset)
return(pred.test)
}
classifieur = function(dataset) {
load("env.RData")
library(klaR)
library(MASS)
prediction <- predict(best.modelClass,newdata = dataset)
return(prediction$class)
}
|
37a351eb57fa80f0df1f909207b5147b629deb96
|
01204b228054d6d3240961f5a115a7d6ce6296ad
|
/man/theta.summary.Rd
|
acb5b761e19a0dfb952c8561f88ca77b6032a8cd
|
[] |
no_license
|
rbarcellos/MScPack
|
90e111f10ad0eaf66984909a321e151aea7d0951
|
687b10b626613beae4770a28932a032f5917547a
|
refs/heads/master
| 2020-05-29T17:12:55.723706
| 2014-07-06T20:12:06
| 2014-07-06T20:12:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 507
|
rd
|
theta.summary.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{theta.summary}
\alias{theta.summary}
\title{Medidas resumo parametros de estado}
\usage{
theta.summary(theta, sig.level = 0.95)
}
\arguments{
\item{theta}{array com as matrizes de estados simuladas via MCMC;}
\item{sig.level}{nivel de credibilidade do intervalo}
}
\value{
\code{array} cujas laminas sao media, limite inferior e limite superior.
}
\description{
Calcula a media e os quantis de interesse para os parametros de estado simulados.
}
|
89719654f1448a679d3cc6d6090ab219dd0a05f3
|
6482ba3e2e027a21339c7dd728704b6ee680f247
|
/Correlation&Visualization.R
|
30236cfea6c21cd34307fd480be3b59cbc3b8640
|
[] |
no_license
|
Qingoy05/USDA
|
60a410fadff7fa60105187f4696239e9cbdbd37b
|
62be21b98e9104afd7c9c8b1ccf78f020b1b1483
|
refs/heads/master
| 2022-12-12T18:26:21.781026
| 2020-09-06T09:46:14
| 2020-09-06T09:46:14
| 275,272,332
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 345
|
r
|
Correlation&Visualization.R
|
#Calculate correlation coefficient
res <- cor(df)
round(res, 2)
#data output
m<-round(res, 2)
m<-as.data.frame(m)
write.table(m,"coefficent.csv",sep=",")
#visualization
library(corrplot)
corrplot(res, tl.cex = 0.7, type = "upper", order = "hclust", tl.col = "black", tl.srt = 45)
mtext("Correlation Matrix", at=9, line=-0.8, cex=1)
|
bf119cc5b1033ebd7465249cc52e2ac8e1ebf0df
|
c0bce42fcea5993c3d9976248c157f4a4433db0b
|
/figure_invitro_CD34/code/07_mutations_count.R
|
497886c4378f4d1804d00ac89f05b253dedeb6c0
|
[] |
no_license
|
ChenPeizhan/mtscATACpaper_reproducibility
|
a01b22f43c6222a56e04e731d68de7440c3cfc76
|
e543610bf29dbac1094994c54d3e7edd41609d5a
|
refs/heads/master
| 2022-12-11T11:31:24.877462
| 2020-08-29T18:40:36
| 2020-08-29T18:40:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,994
|
r
|
07_mutations_count.R
|
library(dplyr)
library(SummarizedExperiment)
library(Matrix)
library(BuenColors)
c500 <- readRDS("../output/filtered_mitoSE_CD34-500.rds")
c500_1<- Matrix::colSums(assays(c500)[["allele_frequency"]] >= 0.01)
c500_5<- Matrix::colSums(assays(c500)[["allele_frequency"]] >= 0.05)
c500_10 <- Matrix::colSums(assays(c500)[["allele_frequency"]] >= 0.10)
c500_20 <- Matrix::colSums(assays(c500)[["allele_frequency"]] >= 0.20)
c800 <- readRDS("../output/filtered_mitoSE_CD34-800.rds")
c800_1<- Matrix::colSums(assays(c800)[["allele_frequency"]] >= 0.01)
c800_5<- Matrix::colSums(assays(c800)[["allele_frequency"]] >= 0.05)
c800_10<- Matrix::colSums(assays(c800)[["allele_frequency"]] >= 0.10)
c800_20<- Matrix::colSums(assays(c800)[["allele_frequency"]] >= 0.20)
# function taking a value (proportion of cells) and vector of muts/cell
vv <- function(value, vec){
sum(vec>=value)/length(vec) * 100
}
df <- rbind(data.frame(
n1 = sapply(0:10, vv, c500_1),
n5 = sapply(0:10, vv, c500_5),
n10 = sapply(0:10, vv, c500_10),
n20 = sapply(0:10, vv, c500_20),
what = "500 input (175 mutations)",
n = 0:10
),
data.frame(
n1 = sapply(0:10, vv, c800_1),
n5 = sapply(0:10, vv, c800_5),
n10 = sapply(0:10, vv, c800_10),
n20 = sapply(0:10, vv, c800_20),
what = "800 input (305 mutations)",
n = 0:10
))
# Make plot of CDF=like visualization
mdf <- reshape2::melt(df, id.vars = c("what", "n"))
p_out <- ggplot(mdf %>% dplyr::filter(variable != "n20"), aes(x = n, y = value, color = variable)) +
facet_wrap(~what, nrow = 1) +
geom_point(size = 0.8) + geom_line() +
pretty_plot(fontsize = 8) + L_border() +
theme(legend.position = "none") +
scale_x_continuous(breaks = c(0:10)) +
labs(x = "# of mutations per cell", y = "% of cells with mutations") +
scale_color_manual(values = c("firebrick", "black", "dodgerblue3", "purple3"))
cowplot::ggsave2(p_out, file = "../plots/cells_CDF_like.pdf", width = 3.5, height = 1.8)
# For the callout in the figure
df %>% filter(n == 1)
|
75ecdf0e0166ab27ac907cc0ef990d2753e25a1d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TSA/examples/cref.bond.Rd.R
|
0eb92e7a458a306ca5473813c2242238847b6571
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 183
|
r
|
cref.bond.Rd.R
|
library(TSA)
### Name: cref.bond
### Title: Daily CREF Bond Values
### Aliases: cref.bond
### Keywords: datasets
### ** Examples
data(CREF)
## maybe str(CREF) ; plot(CREF) ...
|
0568f3414064a6438b2707d206540acb71d518b6
|
e219b6709cd236e1607eca875fbbf67246f7b1b3
|
/R/chipStatMatrixInverterNoise.R
|
2d2edbd3ae327a7fb60d48f1f97a2203955fe64b
|
[] |
no_license
|
lawrennd/chipdyno
|
d9d16cd124e7227bbe5f17986a21f2567658362b
|
395390b85b4ecef533ff538789003cf98c033483
|
refs/heads/master
| 2020-12-25T18:17:43.480194
| 2015-05-25T19:54:14
| 2015-05-25T19:54:14
| 35,702,393
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,768
|
r
|
chipStatMatrixInverterNoise.R
|
# CHIPSTATMATRIXINVERTERNOISE inverts block tridiagonal matrices for chipChip
# CHIPDYNO toolbox
# chipStatMatrixInverterNoise.R version 1.0.1
# FORMAT chipStatMatrixInverterNoise <- function(Sigma, gamma, beta, precs, x, npts)
# DESC inverts block tridiagonal matrices for chipChip
# ARG Sigma : prior covariance matrix of TFA
# ARG gamma : degree of temporal continuity
# ARG beta :
# ARG precs : uncertainty of the expression level
# ARG x : connectivity measurement between genes and transcription factors
# ARG npts : number of transcription factors
# RETURN f : inverted block tridiagonal matrices
# COPYRIGHT : Neil D. Lawrence, 2006
# COPYRIGHT : Guido Sanguinetti, 2006
# MODIFICATIONS : Muhammad A. Rahman, 2013
# SEEALSO : chipStatMatrixInverter
chipStatMatrixInverterNoise <- function(Sigma, gamma, beta, precs, x, npts){
lambda = t(x) %*% Sigma %*% x
lambda = as.vector(lambda)
nTrans = length(x)
Y = Sigma %*% x
factor=cos(gamma)^2
UcoeffInvSigma=mat.or.vec(1,npts)
UcoeffXXT=mat.or.vec(1,npts)
LcoeffId=mat.or.vec(1,npts-1) # computes LU dec exploiting simple block
LcoeffXYT=mat.or.vec(1,npts-1)
UcoeffXXT[1]=(beta^-2+precs[1]^-1)^-1
UcoeffInvSigma[1]=(1-factor^2)^-1
LcoeffXYT[1]=factor*(1-factor^2)^-1*(beta^-2+precs[1]^-1)^-1/
(UcoeffInvSigma[1]* (UcoeffInvSigma[1]+(beta^-2+precs[1]^-1)^-1*lambda));
LcoeffId[1]=-factor
for (i in 2:(npts-1)){
UcoeffXXT[i]=(beta^-2+precs[i]^-1)^-1+factor*(1-factor^2)^-1*LcoeffXYT[(i-1)]
UcoeffInvSigma[i]=(1+factor^2)*(1-factor^2)^-1+
factor*(1-factor^2)^-1*LcoeffId[(i-1)]
LcoeffXYT[i]=factor*(1-factor^2)^-1*UcoeffXXT[i]/
(UcoeffInvSigma[i]*(UcoeffInvSigma[i]+UcoeffXXT[i]*lambda))
LcoeffId[i]=-factor*(1-factor^2)^-1*UcoeffInvSigma[i]^-1
}
UcoeffXXT[ncol(UcoeffXXT)]=(beta^-2+precs[length(precs)]^-1)^-1+
factor*(1-factor^2)^-1*LcoeffXYT[ncol(LcoeffXYT)]
UcoeffInvSigma[ncol(UcoeffInvSigma)]=(1-factor^2)^-1+
factor*(1-factor^2)^-1*LcoeffId[ncol(LcoeffId)]
#%lambda=Y'*x;
invL.XYT=mat.or.vec(npts,npts); #%computes the inverse of the L bit
invL.Id=mat.or.vec(npts,npts)
for (i in 1:npts){
invL.Id[i,i]=1
}
for (i in 2:npts) {
invL.Id[i,(i-1)]=(-1)^(2*i-1)*LcoeffId[i-1]
invL.XYT[i,(i-1)]=(-1)^(2*i-1)*LcoeffXYT[i-1]
}
for (i in 3 : npts) {
for (j in 1:(i-2)){
invL.Id[i,j]=invL.Id[(i-1),j]*invL.Id[i,(i-1)]
invL.XYT[i,j]=(invL.Id[(i-1),j]*invL.XYT[i,(i-1)]+
invL.Id[i,(i-1)]*invL.XYT[(i-1),j]+
invL.XYT[i,(i-1)]*invL.XYT[(i-1),j]*lambda)
}
}
invU.Sigma=mat.or.vec(npts,npts)
invU.YYT=mat.or.vec(npts,npts)
for (i in 1:(npts-1)){
invU.Sigma[i,i]=-LcoeffId[i]*(1-factor^2)/factor
invU.YYT[i,i]=-LcoeffXYT[i]*(1-factor^2)/factor
}
invU.Sigma[nrow(invU.Sigma),ncol(invU.Sigma)]=
UcoeffInvSigma[length(UcoeffInvSigma)]^-1
invU.YYT[nrow(invU.YYT),ncol(invU.YYT)]=-UcoeffXXT[length(UcoeffXXT)]/
(UcoeffInvSigma[length(UcoeffInvSigma)]*
(UcoeffInvSigma[length(UcoeffInvSigma)]+
UcoeffXXT[length(UcoeffXXT)]*lambda))
for (i in 1:(npts-1)){
for (j in 1:i){
invU.Sigma[(npts-i),(npts-j+1)]=factor*(1-factor^2)^-1*
invU.Sigma[(npts-i+1),(npts-j+1)]*invU.Sigma[(npts-i),(npts-i)];
invU.YYT[(npts-i),(npts-j+1)]=factor*(1-factor^2)^-1*
(invU.Sigma[(npts-i+1),(npts-j+1)]*invU.YYT[(npts-i),(npts-i)]+
invU.Sigma[(npts-i),(npts-i)]*invU.YYT[(npts-i+1),(npts-j+1)]+
invU.YYT[(npts-i+1),(npts-j+1)]*
invU.YYT[(npts-i),(npts-i)]*lambda)
}
}
invC.Sigma=mat.or.vec(npts,npts); #%computes the inverses of the matrix;
invC.YYT=mat.or.vec(npts,npts)
for (i in 1:npts) {
for (j in 1:npts) {
invC.Sigma[i,j]=invU.Sigma[i,]%*%invL.Id[,j]
invC.YYT[i,j]=invU.Sigma[i,]%*%invL.XYT[,j]+invU.YYT[i,]%*% invL.Id[,j]+
lambda*invU.YYT[i,]%*%invL.XYT[,j]
}
}
invC <- list(invC.Sigma, invC.YYT)
return(invC)
}
|
41b22b962c8095258e67ec0da69bd33cdd28a38e
|
4eea2102c32e055a785dd2903e4bad1a4ad48994
|
/00c-createSexInfo.R
|
b0f4a84ec37c1eb5e3cba45a641f75a2c8167735
|
[] |
no_license
|
mrparker909/PNCgwasWorkflow
|
e867efbf0c48c0b857c1ffa0fb104e4b21a9bbe9
|
e5144a4796637594857fcc9ede3da451c1db62bc
|
refs/heads/master
| 2020-07-04T21:20:00.111637
| 2019-10-07T18:08:12
| 2019-10-07T18:08:12
| 202,420,366
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 441
|
r
|
00c-createSexInfo.R
|
library(dplyr)
inp <- "/zfs3/scratch/saram_lab/PNC/data/phenotype/FID_IID_Neurodevelopmental_Genomics_Subject_Phenotypes.GRU-NPU.txt"
out <- "/zfs3/scratch/saram_lab/PNC/data/phenotype/subjectSexInfo.txt"
df <- read.csv(inp, header = TRUE, stringsAsFactors = FALSE, quote = "", sep = " ", skip=0)
df2 <- dplyr::select(df,FID,IID,Sex)
print("Sex Summary:")
table(df2$Sex)
write.table(df2, out, sep=" ", row.names = F,col.names=F, quote=F)
|
47c24694187acbe821a15c470b4180a64f8914e1
|
23d151378bdccd1a2ffc7bf89647d2150c125738
|
/Practica2/Ejercicio3.R
|
544023ff97f5772a53d4a8bc37ae198dd0b450ef
|
[] |
no_license
|
JhordanSalvatierra/Estadistica-2017-II
|
a54296e126710049836762afa706c5522badc8fb
|
66af4890e824c7ed595a0b1a88f8ae6604b30f83
|
refs/heads/master
| 2021-01-23T21:23:31.930824
| 2017-12-10T21:33:58
| 2017-12-10T21:33:58
| 102,897,774
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 685
|
r
|
Ejercicio3.R
|
#Salvatierra Ramos Jhordan 20112152A
#Ejercicio 3
#Definimos la funcion nth con parametros x y n
nth<-function(x,n){
# Colocamos en un vector "y" las indices del vector x cuyos valores coinciden con TRUE
y<-which(x,TRUE)
# En el caso de que "y" tengo un numero de elementos mayor a n, retornamos y[n], el cual
# es el indice correspondiente al n-esimo valor TRUE en x
if(n<=length(y)) return(y[n])
# En caso de que n sea mayor a la longitud de "y", se devolvera NA
else return(NA)
}
#Probamos nuestra funcion usando el ejemplo mostrado en la hoja
x<-c(1,2,4,2,1,3)
#Nos retorna el valor de 6
nth(x>2,2)
#Nos retorna el valor de NA
nth(x>4,2)
|
bd00e90ef44e39f49582176c17976a2d7bb1ddab
|
5cfac25a4691dfd30bc6d5d6faf804c404e2ae13
|
/Plot2.R
|
cf38d9de044f209f7c542fb53bc682b40301a390
|
[] |
no_license
|
marce25cl/Plots
|
1266f93ba198c5f55c38ca57d3e3b5882e3fadd0
|
e95c75ce089b233aa3d69c497162bb984cb7cd4f
|
refs/heads/master
| 2020-12-24T21:28:12.280276
| 2016-04-16T00:56:54
| 2016-04-16T00:56:54
| 56,358,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 641
|
r
|
Plot2.R
|
## Plot2
##Reading the data and subseting the dates 2007-02-01 and 2007-02-02
datos = read.table("household_power_consumption.txt", header=TRUE,
sep=";", na.strings = "?")
filtrados <- datos[datos$Date %in% c("1/2/2007","2/2/2007") ,]
##Converting the Date and Time variables to Date/Time classes
filtrados$Date_Time = dmy_hms(paste(filtrados$Date, filtrados$Time))
##Saving plot to a PNG file
png(file="plot2.png",width=480,height=480, units="px")
##Making plot
plot(filtrados$Date_Time, filtrados$Global_active_power,
type="l", ylab="Global Active Power (kilowatts)", xlab="")
##Closing the device
dev.off()
|
7bb3eeded18ffca8ab5fded250ea2031ee2b5bb8
|
1d80ea56e9759f87ef9819ed92a76526691a5c3b
|
/R/pooled.R
|
0d4f5890677e1fc0f51e99f8783696ade81e36d2
|
[] |
no_license
|
cran/effectsize
|
5ab4be6e6b9c7f56d74667e52162c2ca65976516
|
e8baef181cc221dae96f60b638ed49d116384041
|
refs/heads/master
| 2023-08-16T21:23:58.750452
| 2023-08-09T18:40:02
| 2023-08-09T19:30:51
| 236,590,396
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,303
|
r
|
pooled.R
|
#' Pooled Indices of (Co)Deviation
#'
#' The Pooled Standard Deviation is a weighted average of standard deviations
#' for two or more groups, *assumed to have equal variance*. It represents the
#' common deviation among the groups, around each of their respective means.
#'
#' @inheritParams cohens_d
#' @inheritParams stats::mad
#'
#' @details
#' The standard version is calculated as:
#' \deqn{\sqrt{\frac{\sum (x_i - \bar{x})^2}{n_1 + n_2 - 2}}}{sqrt(sum(c(x - mean(x), y - mean(y))^2) / (n1 + n2 - 2))}
#' The robust version is calculated as:
#' \deqn{1.4826 \times Median(|\left\{x - Median_x,\,y - Median_y\right\}|)}{mad(c(x - median(x), y - median(y)), constant = 1.4826)}
#'
#' @return Numeric, the pooled standard deviation. For `cov_pooled()` a matrix.
#'
#' @examples
#' sd_pooled(mpg ~ am, data = mtcars)
#' mad_pooled(mtcars$mpg, factor(mtcars$am))
#'
#' cov_pooled(mpg + hp + cyl ~ am, data = mtcars)
#'
#' @seealso [cohens_d()], [mahalanobis_d()]
#'
#' @export
sd_pooled <- function(x, y = NULL, data = NULL, verbose = TRUE, ...) {
data <- .get_data_2_samples(x, y, data, verbose = verbose, ...)
x <- data[["x"]]
y <- data[["y"]]
V <- cov_pooled(
data.frame(x = x),
data.frame(x = y)
)
as.vector(sqrt(V))
}
#' @rdname sd_pooled
#' @export
mad_pooled <- function(x, y = NULL, data = NULL,
constant = 1.4826,
verbose = TRUE, ...) {
data <- .get_data_2_samples(x, y, data, verbose = verbose, ...)
x <- data[["x"]]
y <- data[["y"]]
n1 <- length(x)
n2 <- length(y)
Y <- c(x, y)
G <- rep(1:2, times = c(n1, n2))
Yc <- Y - stats::ave(Y, factor(G), FUN = stats::median)
stats::mad(Yc, center = 0, constant = constant)
}
#' @rdname sd_pooled
#' @export
cov_pooled <- function(x, y = NULL, data = NULL,
verbose = TRUE, ...) {
data <- .get_data_multivariate(x, y, data = data, verbose = verbose)
x <- data[["x"]]
y <- data[["y"]]
n1 <- nrow(x)
n2 <- nrow(y)
Y <- rbind(x, y)
G <- rep(1:2, times = c(n1, n2))
Yc <- lapply(Y, function(.y) .y - stats::ave(.y, factor(G), FUN = mean))
Yc <- as.data.frame(Yc)
stats::cov(Yc) * (n1 + n2 - 1) / (n1 + n2 - 2)
}
# TODO Add com_pooled?
|
aa0da43c0138407468849170671a662374273d57
|
039de27358cf206fe9337ad9f05c55fa265a950e
|
/basal_script.R
|
444a98698544360883a6328350ab6b8868882452
|
[
"MIT"
] |
permissive
|
guillermodeandajauregui/cdre-miR-BrCanSub
|
e395354b66e48d6617ec9460b9b80004c1198aea
|
046d4a571ae3e7d31985b123cb8e42d5a50d70e9
|
refs/heads/master
| 2021-07-06T21:50:01.784281
| 2020-09-17T14:40:08
| 2020-09-17T14:40:08
| 180,678,357
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
basal_script.R
|
########
#testing
########
#libraries
source("libs/functions_mi.R")
#paths
path_mir <- "basal/basal_FPKM.tsv"
path_rna <- "basal/basal_mirna_rpmmm.tsv"
#read data
mir <- as.data.frame(readr::read_tsv(path_mir))
rna <- as.data.frame(readr::read_tsv(path_rna))
#discretizing
tempus <- proc.time()
d.mir <- par_discretizer(mir, korez = 10)
tempus <- proc.time() - tempus
print(tempus)
tempus <- proc.time()
d.rna <- par_discretizer(rna, korez = 10)
tempus <- proc.time() - tempus
print(tempus)
#mi calculating
tempus <- proc.time()
mirXrna <- par_mi_calc(sources = d.mir,
targets = d.rna,
korez = 10)
tempus <- proc.time() - tempus
print(tempus)
mi_matrix <- bind_rows(!!!mirXrna, #explicit splicing
.id = "mirna/gen")
write_tsv(mi_matrix, "basal_mi.tsv")
|
c310dffbc0de043662cf1f649babd8e57c5b1143
|
5c2374557193bd5a741aa36bf44532dc462003ae
|
/man/Span.Rd
|
cd252b5d4ac7b929653fee72df0145192426bf57
|
[] |
no_license
|
andrie/pandocfilters
|
fab302e760702ec437188bb0da23a4b92d08c255
|
2fa1f1ee40168c4ccb6ac5c294877b5cc8349c53
|
refs/heads/master
| 2021-09-14T11:06:43.009929
| 2018-02-23T20:00:03
| 2018-02-23T20:00:03
| 105,012,941
| 0
| 1
| null | 2017-09-27T12:02:42
| 2017-09-27T12:02:41
| null |
UTF-8
|
R
| false
| true
| 965
|
rd
|
Span.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inline_elements.R
\name{Span}
\alias{Span}
\title{Generic Inline Container with Attributes.}
\usage{
Span(attr, inline)
}
\arguments{
\item{attr}{an object of type \link{Attr}}
\item{inline}{a inline object or a list of inline objects which will be shown}
}
\description{
A constructor of an inline object of type \code{"Span"}.
}
\examples{
attr <- Attr("A", "B", list(c("C", "D")))
Span(attr, "some inline string")
}
\seealso{
Other Inline element constructors: \code{\link{Cite}},
\code{\link{Code}}, \code{\link{Emph}},
\code{\link{Image}}, \code{\link{LineBreak}},
\code{\link{Link}}, \code{\link{Math}},
\code{\link{Note}}, \code{\link{Quoted}},
\code{\link{RawInline}}, \code{\link{SmallCaps}},
\code{\link{SoftBreak}}, \code{\link{Space}},
\code{\link{Strikeout}}, \code{\link{Strong}},
\code{\link{Str}}, \code{\link{Subscript}},
\code{\link{Superscript}}
}
|
c333d9f40df1f5c3eba4e12c6f5cafd23e25ecdc
|
29b5648126bc3fa4ba28e3691e6f8b62a9b63c51
|
/man/BootAtlantaCorr.Rd
|
583d6869129fea0a222e65ce5d43f55783f9696f
|
[] |
no_license
|
statmanrobin/Lock5Data
|
37fee8e729dc538757959354a503d612fd384892
|
db7f829192fb61cbf7cbfb409245e70e51b196e6
|
refs/heads/master
| 2021-01-10T20:03:17.273933
| 2013-08-07T17:22:29
| 2013-08-07T17:22:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 718
|
rd
|
BootAtlantaCorr.Rd
|
\name{BootAtlantaCorr}
\alias{BootAtlantaCorr}
\docType{data}
\title{
Bootstrap Correlations for Atlanta Commutes
}
\description{
Boostrap correlations between Time and Distance for 500 commuters in Atlanta
}
\usage{data(BootAtlantaCorr)}
\format{
A data frame with 1000 observations on the following variable.
\describe{
\item{\code{CorrTimeDist}}{Correlation between Time and Distance for a bootstrap sample of Atlanta commuters}
}
}
\details{
Correlations for bootstrap samples of Time vs. Distance for the data on Atlanta commuters in CommuteAtlanta.
}
\source{
Computer simulation
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
%% data(BootAtlantaCorr)
}
\keyword{datasets}
|
de6291a2c753baf20167108fba50fa0219e569a2
|
3fefe890b546e1b9cbdc6daeed56f9ee121bbfd1
|
/R/agenda.R
|
91e904517b9f388e056b40c5a7ff72741ce7329f
|
[] |
no_license
|
analytics-ufcg/rcongresso
|
0cc0078aebbdd57047e1d21c93e56b60128d2fd0
|
d34877d8f7e7ef4da1ad9053d5391f9be02c2828
|
refs/heads/master
| 2021-12-24T07:32:39.539758
| 2021-10-18T14:45:43
| 2021-10-18T14:45:43
| 100,041,012
| 53
| 12
| null | 2021-06-28T18:06:16
| 2017-08-11T14:37:06
|
R
|
UTF-8
|
R
| false
| false
| 10,808
|
r
|
agenda.R
|
#' @title Get the Senate's schedule
#' @description Return a list with 3 dataframes: schedule, bills and speakers. All the dfs contains a column named
#' codigo_sessao
#' @param initial_date inital date yyyy-mm-dd
#' @return list
#' @examples
#' \dontrun{
#' fetch_agenda_senado('2018-07-03')
#' }
#' @rdname fetch_agenda_senado
fetch_agenda_senado <- function(initial_date) {
url <- paste0(.AGENDA_SENADO_PATH, gsub('-','', initial_date))
json_proposicao <- .senado_api(url, asList = T)
if (is.null(json_proposicao$AgendaPlenario)) {
return(list(agenda = tibble::as_tibble(), materias = tibble::as_tibble(), oradores = tibble::as_tibble()))
}
agenda <-
json_proposicao$AgendaPlenario$Sessoes$Sessao
if (is.null(agenda)) {
return(list(agenda = tibble::as_tibble(), materias = tibble::as_tibble(), oradores = tibble::as_tibble()))
}
agenda <-
agenda %>%
rename_table_to_underscore() %>%
tibble::as_tibble()
descricoes_inuteis <- c('SESSAO SOLENE', 'SESSAO NAO DELIBERATIVA', 'NAO HAVERA SESSAO', 'SESSAO ESPECIAL')
agenda <-
agenda %>%
dplyr::filter(!(iconv(c(tipo_sessao), from="UTF-8", to="ASCII//TRANSLIT") %in% descricoes_inuteis))
materia <- tibble::tibble()
if('materias_materia' %in% names(agenda)) {
materia <- purrr::map_df(agenda$materias_materia, dplyr::bind_rows, .id = "codigo_sessao")
materia_not_null <-
agenda %>%
dplyr::filter(materias_materia != "NULL")
num_de_materias <-
materia %>%
dplyr::group_by(codigo_sessao) %>%
dplyr::summarise(id = 0)
num_de_materias$id <- materia_not_null$codigo_sessao
materia <-
merge(materia, num_de_materias) %>%
dplyr::select(-codigo_sessao) %>%
dplyr::rename("codigo_sessao" = id) %>%
rename_table_to_underscore()
}
oradores <- tibble::tibble()
if(nrow(agenda) != 0 && 'oradores_tipo_orador_orador_sessao_orador' %in% names(agenda)) {
oradores <- purrr::map_df(agenda$oradores_tipo_orador_orador_sessao_orador, dplyr::bind_rows, .id = "codigo_sessao")
oradores_not_null <-
agenda %>%
dplyr::filter(oradores_tipo_orador_orador_sessao_orador != "NULL")
num_de_oradores <-
oradores %>%
dplyr::group_by(codigo_sessao) %>%
dplyr::summarise(id = 0)
num_de_oradores$id <- oradores_not_null$codigo_sessao
oradores <-
merge(oradores, num_de_oradores) %>%
dplyr::select(-codigo_sessao) %>%
dplyr::rename("codigo_sessao" = id) %>%
rename_table_to_underscore()
}
agenda <- list(agenda = agenda, materias = materia, oradores = oradores)
}
#' @title Dataframe with the Senate schedule
#' @description Return a dataframe with the Senate schedule
#' @param initial_date initial date yyyy-mm-dd
#' @param end_date end date yyyy-mm-dd
#' @return Dataframe
#' @examples
#' \dontrun{
#' .get_data_frame_agenda_senado('2016-05-15', '2016-05-25')
#' }
.get_data_frame_agenda_senado <- function(initial_date, end_date) {
url <-
paste0(.AGENDA_SENADO_COMISSOES, gsub('-','', initial_date), "/", gsub('-','', end_date))
json_proposicao <- .senado_api(url, asList = T)
agenda_senado <- json_proposicao$AgendaReuniao$reunioes$reuniao
if (!is.null(agenda_senado)) {
agenda_senado <- agenda_senado %>%
rename_table_to_underscore() %>%
dplyr::filter(situacao != 'Cancelada')
} else {
agenda_senado <- tibble::as_tibble()
}
agenda_senado
}
#' @title Comissions schedule Senate
#' @description Return a dataframe with the Senate's Comissions schedule
#' @param initial_date initial date yyyy-mm-dd
#' @param end_date end date yyyy-mm-dd
#' @return Dataframe
#' @examples
#' \dontrun{
#' fetch_agenda_senado_comissoes('2016-05-15', '2016-05-25')
#' }
fetch_agenda_senado_comissoes <- function(initial_date, end_date) {
tipos_inuteis <- c('Outros eventos', 'Reuniao de Subcomissao')
agenda <-
.get_data_frame_agenda_senado(initial_date, end_date)
if ("tipo_descricao" %in% names(agenda)) {
agenda <- agenda %>%
dplyr::filter(!(iconv(c(tipo_descricao), from="UTF-8", to="ASCII//TRANSLIT") %in% tipos_inuteis))
}
agenda <- agenda %>%
dplyr::distinct(codigo, .keep_all = TRUE)
if (nrow(agenda) != 0) {
if ("partes" %in% names(agenda)) {
agenda <-
agenda %>%
dplyr::mutate(id_proposicao = purrr::map(partes, ~ .get_id_proposicao_agenda_senado_comissoes(.))) %>%
dplyr::mutate(nome = purrr::map(partes, ~ .get_nome_proposicao_agenda_senado_comissoes(.))) %>%
dplyr::filter(id_proposicao != "")
if (nrow(agenda) != 0) {
agenda <-
agenda %>%
dplyr::rowwise() %>%
dplyr::mutate(local = strsplit(descricao, ",")[[1]][[1]]) %>%
dplyr::select(c(data_inicio, nome, id_proposicao, local)) %>%
dplyr::mutate(id_proposicao = strsplit(as.character(id_proposicao), ",")) %>%
dplyr::mutate(nome = strsplit(as.character(nome), ",")) %>%
tidyr::unnest(cols = c(nome)) %>%
tidyr::unnest(cols = c(id_proposicao)) %>%
dplyr::mutate(data = lubridate::ymd_hms(data_inicio, tz = "America/Sao_Paulo")) %>%
dplyr::select(data, nome, id_proposicao, local) %>%
dplyr::distinct(data, nome, id_proposicao, local) %>%
dplyr::filter(nome != "")
} else {
return(tibble::tibble(data = double(), sigla = character(), id_proposicao = character(), local = character()))
}
}else {
agenda <-
agenda %>%
dplyr::filter(partes_parte_tipo == "Deliberativa")
if (nrow(agenda) != 0) {
agenda <-
agenda %>%
dplyr::mutate(id_proposicao = purrr::map(partes_parte_itens_item, ~ .$Codigo)) %>%
dplyr::mutate(nome = purrr::map(partes_parte_itens_item, ~ .$Nome)) %>%
dplyr::select(data, hora, id_proposicao, nome, titulo_da_reuniao) %>%
tidyr::unnest() %>%
dplyr::rowwise() %>%
dplyr::mutate(local = strsplit(titulo_da_reuniao, ",")[[1]][[1]]) %>%
dplyr::mutate(data = lubridate::dmy_hm(paste(data, hora))) %>%
dplyr::select(c(data, nome, id_proposicao, local))
}else {
return(tibble::tibble(data = double(), sigla = character(), id_proposicao = character(), local = character()))
}
}
new_names <- c("data", "sigla", "id_proposicao", "local")
names(agenda) <- new_names
agenda %>%
dplyr::arrange(data)
}else {
tibble::tibble(data = double(), sigla = character(), id_proposicao = character(), local = character())
}
}
#' @title Extract the proposition id
#' @description Receive as param a list from the Senate schedule and return the propositions ids that are in 'pauta'
#' @param lista_com_id list that has the id
#' @return char
.get_id_proposicao_agenda_senado_comissoes <- function(lista_com_id){
id <- ""
if("Deliberativa" %in% (lista_com_id %>% dplyr::pull(descricaoTipo))) {
if (!is.null(lista_com_id$itens.item)) {
id <- purrr::map_chr(lista_com_id$itens.item, ~ paste(.$doma.codigoMateria, collapse = ","))
} else {
itens <- lista_com_id$itens[[1]]
if (!is.null(itens) && nrow(itens) > 0 && ("doma.codigoMateria" %in% names(itens))) {
id <- purrr::map_chr(lista_com_id$itens, ~ paste(.$doma.codigoMateria, collapse = ","))
} else {
id <- purrr::map_chr(lista_com_id$itens, ~ paste(NA, collapse = ","))
}
}
}
paste(id, collapse = ",")
}
#' @title Extract proposition name
#' @description Receive as param a list from the Senate schedule and return the propositions name that are in 'pauta'
#' @param lista_com_nome list that has the name
#' @return char
.get_nome_proposicao_agenda_senado_comissoes <- function(lista_com_nome){
nome <- ""
if("Deliberativa" %in% (lista_com_nome %>% dplyr::pull(descricaoTipo))) {
if (!is.null(lista_com_nome$itens.item)) {
nome <- purrr::map_chr(lista_com_nome$itens.item, ~ paste(.$nome, collapse = ","))
} else {
itens <- lista_com_nome$itens[[1]]
if (!is.null(itens) && nrow(itens) > 0 && ("nome" %in% names(itens))) {
nome <- purrr::map_chr(lista_com_nome$itens, ~ paste(.$nome, collapse = ","))
} else {
nome <- purrr::map_chr(lista_com_nome$itens, ~ paste(NA, collapse = ","))
}
}
}
paste(nome, collapse = ",")
}
#' @title Get the schedule of Deputies' Chamber
#' @description Return a dataframe with the meetings and sessions schedule of Deputies' Chamber
#' @param initial_date initial date yyyy-mm-dd
#' @param end_date end date yyyy-mm-dd
#' @return Dataframe
#' @examples
#' \dontrun{
#' fetch_agenda_camara('2018-07-03', '2018-07-10')
#' }
#' @rdname fetch_agenda_camara
#' @export
fetch_agenda_camara <- function(initial_date, end_date) {
json_proposicao <- .camara_api(.AGENDA_CAMARA_PATH, query = list(
dataInicio = initial_date,
dataFim = end_date,
ordem = "ASC",
ordenarPor = "dataHoraInicio")
)
descricoes_inuteis <- c('Seminario', 'Diligencia', 'Sessao Nao Deliberativa de Debates', 'Reuniao de Instalacao e Eleicao',
'Outro Evento', 'Mesa Redonda', 'Sessao Nao Deliberativa Solene')
agenda <-
json_proposicao %>%
dplyr::filter(situacao != 'Cancelada' &
!(iconv(c(descricaoTipo), from="UTF-8", to="ASCII//TRANSLIT") %in% descricoes_inuteis)) %>%
tidyr::unnest(cols=c(orgaos), names_repair=tidyr::tidyr_legacy)
agenda %>%
dplyr::rowwise() %>%
dplyr::do(fetch_pauta_camara(
.$id, .$dataHoraInicio, .$dataHoraFim, .$sigla, .$nome) %>%
tibble::as_tibble()) %>%
unique() %>%
.assert_dataframe_completo(.COLNAMES_AGENDA_CAMARA) %>%
.coerce_types(.COLNAMES_AGENDA_CAMARA)
}
#' @title Get the agenda of a meeting
#' @description Return a dataframe with data about the agenda
#' @param id event id
#' @param hora_inicio inital time
#' @param hora_fim end time
#' @param sigla_orgao Acronym of the organ
#' @param nome_orgao Name of the organ
#' @return Dataframe
#' @examples
#' \dontrun{
#' fetch_pauta_camara('53184', '2018-07-03T10:00', '2018-07-03T12:37', 'CVT', 'Comissão de Viação e Transportes VIAÇÃO E TRANSPORTES')
#' }
#' @rdname fetch_pauta_camara
fetch_pauta_camara <- function(id, hora_inicio, hora_fim, sigla_orgao, nome_orgao) {
url <- paste0(.PAUTAS_CAMARA, id, "/pauta")
json_proposicao <- .camara_api(url)
json_proposicao %>%
tibble::as_tibble() %>%
dplyr::mutate(hora_inicio = hora_inicio,
hora_fim = hora_fim,
sigla_orgao = sigla_orgao,
nome_orgao = nome_orgao) %>%
.assert_dataframe_completo(.COLNAMES_PAUTA_CAMARA) %>%
.coerce_types(.COLNAMES_PAUTA_CAMARA)
}
|
8fb2e4b09fd91133e0fcce773969269bc3f8b918
|
f30dbbc23d2d5347b5053c2ac1cd58a9c6464f73
|
/Finalisingmodels.R
|
edf9d3ac465b73030b412c6dfcda9665fb61cb32
|
[] |
no_license
|
FiSykes/Soaydata1
|
3e11feafb1a053fe65b39e12f14d2f870fd7b410
|
6f2fbe4496e4704703d2b8b56e87577f9b82ade7
|
refs/heads/main
| 2023-03-29T13:36:35.534935
| 2021-04-02T14:52:08
| 2021-04-02T14:52:08
| 348,644,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,627
|
r
|
Finalisingmodels.R
|
# Adonis test
library(vegan)
### test out adonis 2
library(devtools)
Sys.setenv("R_REMOTES_NO_ERRORS_FROM_WARNINGS"=TRUE)
install_github("pmartinezarbizu/pairwiseAdonis/pairwiseAdonis")
library(pairwiseAdonis)
adonis.test2 <- adonis2(bray_ps_nemadults ~ Season + Sex + Id,
by="margin",
data = metadf.bxadults)
# reg adonis
adonis.testadultsmulti<- adonis(bray_ps_nemadults ~ Id + Season + Sex, data = metadf.bxadults)
R2 <- adonis.test.soay$aov.tab$R2
Terms <- adonis.test.soay$aov %>% row.names()
# adonois2
R2 <- adonis.test2$R2
Terms <- adonis.test2%>% row.names()
adonisDFadults <- data.frame(R2, Terms) %>%
filter(Terms!="Total")
adonisDFadults %>%
ggplot(aes(x=1, y=R2, fill=Terms)) +
geom_bar(stat="identity") +
labs(x='Variance Explained') +
scale_fill_brewer(palette = "Spectral") +
theme_bw(base_size=14) +
theme(axis.text.x = element_blank())
adonis.test.soay.pair <- pairwise.adonis2(bray_ps.bx.soay ~ Season,
by="margin",
data = metadf.bx.soay)
## this is not working but should if the data can be formatted - deseq2 good option as alternate
coef <- coefficients(adonis.testadultssex)["Sex",]
top.coef <- coef[rev(order(abs(coef)))[1:20]] #error in evaluating the argument 'x' in selecting a method for function 'rev': non-numeric argument to mathematical function
par(mar = c(3, 14, 2, 1))
barplot(sort(top.coef), horiz = T, las = 1, main = "Top taxa")
##deseq
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("DESeq2")
library(DESeq2)
ds2 <- phyloseq_to_deseq2(ps_nem_filt_repeatsrem, ~ Sex)
diagdds = DESeq(ds2, test="Wald", fitType="parametric") #alternative to Run DESeq analysis, comes up with same error. Sourced from https://joey711.github.io/phyloseq-extensions/DESeq2.html
#calculate geometric means prior to estimate size factors
#following is sourced here https://bioconductor.org/packages/devel/bioc/vignettes/phyloseq/inst/doc/phyloseq-mixture-models.html
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
geoMeans = apply(counts(ds2), 1, gm_mean)
ds2 = estimateSizeFactors(ds2, geoMeans = geoMeans) #comes up with Error in .local(object, ..., value) : all(!is.na(value)) is not TRUE
ds2 = DESeq(ds2, fitType="local")
#comes up with error, in estimateSizeFactorsForMatrix(counts(object), locfunc = locfunc, :
#every gene contains at least one zero, cannot compute log geometric means
#another alternative found here https://support.bioconductor.org/p/62246/#62250
ds2 <- ds2[ rowSums(counts(ds2)) > 5, ]
cts <- counts(ds2)
geoMeans <- apply(cts, 1, function(row) if (all(row == 0)) 0 else exp(mean(log(row[row != 0]))))
ds2 <- estimateSizeFactors(ds2, geoMeans=geoMeans) #Error in .local(object, ..., value) : all(!is.na(value)) is not TRUE
# Run DESeq2 analysis (all taxa at once!)
dds <- DESeq(ds2) # comes up with error Error in estimateSizeFactorsForMatrix(counts(object), locfunc = locfunc, :
#every gene contains at least one zero, cannot compute log geometric means
# Investigate results
deseq.results <- as.data.frame(results(dds))
deseq.results$taxon <- rownames(results(dds))
# Sort (arrange) by pvalue and effect size
library(knitr)
deseq.results <- deseq.results %>%
arrange(pvalue, log2FoldChange)
# Print the result table
# Let us only show significant hits
knitr::kable(deseq.results %>%
filter(pvalue < 0.05 & log2FoldChange > 1.5),
digits = 5)
|
dddc1d61affa0ae5263159d378c19a83d64c9b57
|
0473ced366f12bb1050ee018f74da22f535f607e
|
/1c.Random Forest.R
|
91e5e1ab31602114e5679fe7b601a7643534960a
|
[] |
no_license
|
MadhuTanna/madhutanna
|
9a8227233f56ff04b3f6ad356b358a451b48b412
|
b405cc3fd3a532fac9d74a9b573fcdb9abcc9e87
|
refs/heads/master
| 2020-03-22T17:20:34.320618
| 2019-05-16T18:52:49
| 2019-05-16T18:52:49
| 140,389,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,634
|
r
|
1c.Random Forest.R
|
library(dplyr)
library(glmnet)
library(caret)
#setwd("C:/Users/mtanna108360/Downloads/Downloads/Data Science/Proschool/Term 3 Recordings/Project")
setwd("C:/Users/MADHU/Downloads/Data Science/Term 3 Project")
custdata=read.csv("term3.csv")
head(custdata)
glimpse(custdata) ## like str
summary(custdata)
cust_data_table = tbl_df(custdata)
cust_data_table = na.omit(cust_data_table)
cust_data_table = cust_data_table[,-1]
NROW(cust_data_table)
str(cust_data_table)
set.seed(1)
train = sample_frac(cust_data_table, 0.7)
test = setdiff(cust_data_table,train)
str(cust_data_table)
NROW(train)
NROW(test)
predicators_train = model.matrix(train$Reached.on.Time_Y.N~.,train)[,-train$Reached.on.Time_Y.N] ## IV
dependent_train = train$Reached.on.Time_Y.N
predicators_test = model.matrix(test$Reached.on.Time_Y.N~.,test)[,-test$Reached.on.Time_Y.N] ## IV
dependent_test = test$Reached.on.Time_Y.N
#RF
library(randomForest)
modelrf <- randomForest(Reached.on.Time_Y.N ~ . , data = train, do.trace=T)
modelrf
importance(modelrf)
varImpPlot(modelrf)
predrf_test <- predict(modelrf, newdata = test)
predrf = ifelse(predrf_test>0.5,1,0)
cm_rf = confusionMatrix(test$Reached.on.Time_Y.N,predrf)
cm_rf
#XGBOOST
library(xgboost)
set.seed(1)
dependent_train_xgboost <- train$Reached.on.Time_Y.N
dependent_test_xgboost <- test$Reached.on.Time_Y.N
dependent_train_xgboost <- as.numeric(dependent_train_xgboost)
dependent_test_xgboost <- as.numeric(dependent_test_xgboost)
train.mx <- sparse.model.matrix(Reached.on.Time_Y.N ~ ., train)
test.mx <- sparse.model.matrix(Reached.on.Time_Y.N ~ ., test)
dtrain <- xgb.DMatrix(train.mx, label = dependent_train_xgboost)
dtest <- xgb.DMatrix(test.mx, label = dependent_test_xgboost)
train.gdbt <- xgb.train(params = list(objective = "binary:logistic",
#num_class = 2,
eval_metric = "auc",
eta = 0.3,
max_depth = 5,
subsample = 1,
colsample_bytree = 0.5),
data = dtrain,
nrounds = 70,
watchlist = list(train = dtrain, test = dtest))
# Generate predictions on test dataset
preds_xg <- predict(train.gdbt, newdata = dtest)
# Compute AUC on the test set
cvAUC::AUC(predictions = preds_xg, labels = dependent_test_xgboost)
#model prediction
xgbpred <- ifelse (preds > 0.5,1,0)
#confusion matrix
cm_xg = confusionMatrix (xgbpred, dependent_test_xgboost)
cm_xg
#view variable importance plot
mat <- xgb.importance (model = train.gdbt)
xgb.plot.importance (importance_matrix = mat[1:20])
#===============
library(FactoMineR)
# to use PCA function
library(factoextra)
str(cust_data_table)
new_cust_table = cust_data_table[,-11]
new_cust_table$Warehouse_block = as.numeric(cust_data_table$Warehouse_block)
new_cust_table$Mode_of_Shipment = as.numeric(cust_data_table$Mode_of_Shipment)
new_cust_table$Product_importance = as.numeric(cust_data_table$Product_importance)
new_cust_table$Gender = as.numeric(cust_data_table$Gender)
str(new_cust_table)
pca = prcomp(new_cust_table)
summary(pca)
#summary of PC1 to PC3 individually
#for a colorful plot
fviz_pca_var(pca,
col.var = "contrib", # Color by contributions to the PC
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Avoid text overlapping
)
eig.val <- get_eigenvalue(pca)
eig.val
pca$eig
data(iris)
attach(iris)
## classification mode
# default with factor response:
model <- svm(Reached.on.Time_Y.N ~ ., data = train)
# alternatively the traditional interface:
x <- subset(train, select = -Reached.on.Time_Y.N)
y <- train$Reached.on.Time_Y.N
model <- svm(x, y)
print(model)
summary(model)
# test with train data
pred <- predict(model, x)
# (same as:)
pred <- fitted(model)
NROW(pred)
# Check accuracy:
table(pred, y)
# compute decision values and probabilities:
pred <- predict(model, x, decision.values = TRUE)
attr(pred, "decision.values")[1:4,]
# visualize (classes by color, SV by crosses):
plot(cmdscale(dist(train[,-11])),
col = as.integer(train[,11]),
pch = c("o","+")[1:150 %in% model$index + 1])
newdata = data.frame(cust_data_table$Reached.on.Time_Y.N,pca$x)
head(newdata)
svm_mode <- svm(cust_data_table.Reached.on.Time_Y.N ~ PC1+PC2, data = newdata)
func = predict(svm_mod,xgrid,decision.values = T)
#func = predict(svm_mod,predicators_train,decision.values = T)
func=attributes(func)$decision
x=seq(from = min(predicators_train[,14]), to = max(predicators_train[,14]), length = 100)
y=seq(from = min(predicators_train[,15]), to = max(predicators_train[,15]), length = 100)
contour(x,y,z=matrix(func,length(x),length(y)), add = TRUE)
dat_new_pred = data.frame(predicators_test, dependent_test = as.factor(dependent_test))
svm_pred = predict(svm_mod, newdata = dat_new_pred)
cm_svm = confusionMatrix(as.factor(dependent_test),svm_pred)
cm_svm
beta = drop(t(svm_mod$coefs)%*%pca$x[svm_mod$index,c(1,2)])
beta0 = svm_mod$rho
# two most big coefficients are Discount offered (positive) and weight in gms (-negative)
beta=sort(beta)
abline(beta0/beta[2], -beta[2]/beta[1])
abline((beta0 - 1) / beta[15], -beta[1] / beta[15], lty = 2)
abline((beta0 + 1) / beta[15], -beta[1] / beta[15], lty = 2)
plot(xgrid[,c(1,2)],col = c("red","green")[as.numeric(ygrid)], pch = 20, cex = .2)
points(pca$x[,c(1,2)], col = c("red","green")[as.numeric(predtrain)], pch = 18)
#Below shows the support vector
points(predicators_train[svm_mod$index,c(14,15)], pch = 20, cex = .2,col="yellow")
============================
data(iris)
attach(iris)
str(iris)
## classification mode
# default with factor response:
model <- svm(Species ~ ., data = iris)
# alternatively the traditional interface:
x <- subset(iris, select = -Species)
y <- Species
model <- svm(x, y)
print(model)
summary(model)
# test with train data
pred <- predict(model, x)
# (same as:)
pred <- fitted(model)
# Check accuracy:
table(pred, y)
# compute decision values and probabilities:
pred <- predict(model, x, decision.values = TRUE)
attr(pred, "decision.values")[1:4,]
# visualize (classes by color, SV by crosses):
plot(cmdscale(dist(iris[,-5])),
col = as.integer(iris[,5]),
pch = c("o","+")[1:150 %in% model$index + 1])
|
cd958de0f42f0b5033610fce91d697e4b55f250a
|
d2eda24acceb35dc11263d2fa47421c812c8f9f6
|
/create_data/make.eez.data.R
|
aba252e825e6811cdca5eeba1b873b411ea3bb84
|
[] |
no_license
|
tbrycekelly/TheSource
|
3ddfb6d5df7eef119a6333a6a02dcddad6fb51f0
|
461d97f6a259b18a29b62d9f7bce99eed5c175b5
|
refs/heads/master
| 2023-08-24T05:05:11.773442
| 2023-08-12T20:23:51
| 2023-08-12T20:23:51
| 209,631,718
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,185
|
r
|
make.eez.data.R
|
library(rgdal)
eez = readOGR('inst/extdata/eez_boundaries_v11.shp')
eez@data$SOVEREIGN1[is.na(eez@data$SOVEREIGN1)] = 'Other'
eez@data$SOVEREIGN2[is.na(eez@data$SOVEREIGN2)] = 'Other'
highseas = readOGR('inst/extdata/High_Seas_v1.shp')
eez.data = list()
for (name in unique(c(eez@data$SOVEREIGN1, eez@data$SOVEREIGN2))) {
if (!is.na(name)) {
l = which((name == eez@data$SOVEREIGN1 | name == eez@data$SOVEREIGN2) & eez@data$LINE_TYPE != 'Straight Baseline')
eez.data[[name]] = list()
for (i in 1:length(l)) {
k = l[i]
eez.data[[name]][[i]] = data.frame(lon = eez@lines[[k]]@Lines[[1]]@coords[,1],
lat = eez@lines[[k]]@Lines[[1]]@coords[,2])
}
}
}
eez.data[['Highseas']] = list()
for (i in 1:length(highseas@polygons[[1]]@Polygons)) {
for (j in 1:length(highseas@polygons[[1]]@Polygons[[i]])) {
eez.data[['Highseas']][[length(eez.data[['Highseas']]) + 1]] = data.frame(lon = highseas@polygons[[1]]@Polygons[[i]]@coords[,1],
lat = highseas@polygons[[1]]@Polygons[[i]]@coords[,2])
}
}
save(eez.data, file = 'data/eez.rdata')
|
5e4e5fbcbdfd73e5691b126391ccf7cfba1d501b
|
5cdfa6f09b7b1722d3b9610591d80edd79a0e999
|
/R/survival-tidiers.R
|
54b00d779eaae7e0ed1deb48f8ac8464ab871183
|
[] |
no_license
|
AndreMikulec/broom
|
2cf2527007682652526ba766f8352d82a5a80dac
|
d13b86da72c77f93ac0bf8dcda5a0f6531555455
|
refs/heads/master
| 2021-01-14T08:59:25.563995
| 2014-09-24T17:37:04
| 2014-09-24T17:38:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,928
|
r
|
survival-tidiers.R
|
# # tidying functions for the survival package
# # http://cran.r-project.org/web/packages/survival/index.html
#
# afit <- aareg(Surv(time, status) ~ age + sex + ph.ecog, data=lung,
# dfbeta=TRUE)
# summary(afit)
#
# tidy.aareg <- function(x, ...) {
# nn <- c("estimate", "statistic", "stderror", "robust.se", "z", "p.value")
# fix_data_frame(summary(x)$table, nn)
# }
#
#
# fit <- coxph(Surv(time, status) ~ age + sex, lung)
#
# tidy.coxph <- function(x, ...) {
# # decided not to include the exp(coef) vlaues
# co <- coef(summary(fit))
# nn <- c("estimate", "stderror", "statistic", "p.value")
# fix_data_frame(co[, -2], nn)
# }
#
# fit1 <- survexp(futime ~ 1, rmap=list(sex="male", year=accept.dt,
# age=(accept.dt-birth.dt)), method='conditional', data=jasa)
#
# summary(fit1)
#
# tidy.survexp <- function(x, ...) {
# as.data.frame(summary(x)[c("time", "surv", "n.risk")])
# }
#
#
# fit <- coxph(Surv(time, status) ~ age + sex, lung)
# sfit <- survfit(fit)
#
# library(ggplot2)
# ggplot(tidy(sfit), aes(time, estimate)) + geom_line() + geom_ribbon(aes(ymin=conf.low, ymax=conf.high), alpha=.25)
#
# tidy.survfit <- function(x, ...) {
# ret <- as.data.frame(unclass(x)[c("time", "n.risk", "n.event",
# "n.censor", "cumhaz")])
# # give it names consistent with broom style
# ret <- cbind(ret, estimate=x$surv, stderror=x$std.err,
# conf.high=x$upper, conf.low=x$lower)
# ret
# }
#
#
# temp.yr <- tcut(mgus$dxyr, 55:92, labels=as.character(55:91))
# temp.age <- tcut(mgus$age, 34:101, labels=as.character(34:100))
# ptime <- ifelse(is.na(mgus$pctime), mgus$futime, mgus$pctime)
# pstat <- ifelse(is.na(mgus$pctime), 0, 1)
# pfit <- pyears(Surv(ptime/365.25, pstat) ~ temp.yr + temp.age + sex, mgus,
# data.frame=TRUE)
#
#
#
# tidy.pyears <-
|
633a0446ab89c82ad972f5bc0f452b2824b83747
|
3a73255a30f04b4e7eb7f31f698fbd771ebecd6b
|
/tests/testthat/test-h5.R
|
d845331c28342a69f52e320f4da4cac33840b5cf
|
[
"MIT"
] |
permissive
|
dynverse/dynutils
|
799786859c52128bb6140a063c05fa2ace563e67
|
f26939ec83ebe148bde519472ed353d8fdec76a8
|
refs/heads/master
| 2022-10-19T22:52:17.697595
| 2022-10-06T06:49:12
| 2022-10-06T06:49:12
| 101,953,647
| 1
| 3
|
NOASSERTION
| 2022-09-23T02:51:00
| 2017-08-31T03:11:04
|
R
|
UTF-8
|
R
| false
| false
| 1,313
|
r
|
test-h5.R
|
context("Testing write_h5 and read_h5")
if (require("hdf5r")) {
obj <- get_h5_test_data()
test_that("write_h5 and read_h5 works", {
file <- tempfile()
on.exit(file.remove(file))
write_h5(obj, file)
obj2 <- read_h5(file)
testthat::expect_equivalent(obj2, obj)
})
test_that("test_h5_installation works", {
expect_true(test_h5_installation())
expect_message(test_h5_installation(detailed = TRUE), "HDF5 test successful")
expect_output(expect_error(test_h5_installation_write(detailed = TRUE, obj = list(x = print))))
expect_output(expect_error(test_h5_installation_read(detailed = TRUE, file = tempfile())))
expect_output(expect_error(test_h5_installation_equal(detailed = TRUE, obj = 1, obj2 = 2)))
})
test_that("is_sparse works", {
expect_false(is_sparse(matrix(c(1:10))))
m <- Matrix::Matrix(matrix(c(1:10)), sparse = FALSE)
expect_false(is_sparse(m))
expect_true(is_sparse(methods::as(m, "CsparseMatrix")))
expect_false(is_sparse(methods::as(m, "denseMatrix")))
})
test_that("errors gracefully", {
file <- tempfile()
on.exit(file.remove(file))
h5file <- hdf5r::H5File$new(file, mode = "w")
h5file[["a"]] <- 1
h5file$close_all()
expect_error(read_h5(file), regexp = "Object class not found")
})
}
|
51ab122460a191a74ed919449707c8b04d5e6ac8
|
f153381432a864aa0f1cf789d27aa2e0aba00614
|
/man/make_sampling_table.Rd
|
47f4728c5d46ea326bce265d92c9770fe58a9491
|
[] |
no_license
|
rdrr1990/keras
|
0f997cf8632f6db623afcdb376ea8c258923e094
|
72b510456f15f5570388d0e610aa4917f1f1674b
|
refs/heads/master
| 2021-05-06T06:42:08.086819
| 2017-12-30T00:11:11
| 2017-12-30T00:11:11
| 113,892,962
| 2
| 0
| null | 2017-12-11T18:19:25
| 2017-12-11T18:19:24
| null |
UTF-8
|
R
| false
| true
| 1,381
|
rd
|
make_sampling_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{make_sampling_table}
\alias{make_sampling_table}
\title{Generates a word rank-based probabilistic sampling table.}
\usage{
make_sampling_table(size, sampling_factor = 1e-05)
}
\arguments{
\item{size}{int, number of possible words to sample.}
\item{sampling_factor}{the sampling factor in the word2vec formula.}
}
\value{
An array of length \code{size} where the ith entry is the
probability that a word of rank i should be sampled.
}
\description{
This generates an array where the ith element is the probability that a word
of rank i would be sampled, according to the sampling distribution used in
word2vec. The word2vec formula is: p(word) = min(1,
sqrt(word.frequency/sampling_factor) / (word.frequency/sampling_factor)) We
assume that the word frequencies follow Zipf's law (s=1) to derive a
numerical approximation of frequency(rank): frequency(rank) ~ 1/(rank *
(log(rank) + gamma) + 1/2 - 1/(12*rank)) where gamma is the Euler-Mascheroni
constant.
}
\note{
The word2vec formula is: p(word) = min(1,
sqrt(word.frequency/sampling_factor) / (word.frequency/sampling_factor))
}
\seealso{
Other text preprocessing: \code{\link{pad_sequences}},
\code{\link{skipgrams}},
\code{\link{text_hashing_trick}},
\code{\link{text_one_hot}},
\code{\link{text_to_word_sequence}}
}
|
5bfee103af1d96bd9fc61f9b2be3172a2ce92558
|
cf606e7a3f06c0666e0ca38e32247fef9f090778
|
/test/integration/example-models/ARM/Ch.21/21.6_SummarizingtheAmmountofPartialPooling.R
|
7e4f471a5c55107c75579e3818854df7b6415573
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
nhuurre/stanc3
|
32599a71d5f82c759fd6768b8b699fb5f2b2d072
|
5612b357c1cd5a08cf2a57db97ce0e789bb87018
|
refs/heads/master
| 2023-07-05T02:27:08.083259
| 2020-11-12T15:37:42
| 2020-11-12T15:37:42
| 222,684,189
| 0
| 0
|
BSD-3-Clause
| 2019-11-19T11:50:39
| 2019-11-19T11:50:38
| null |
UTF-8
|
R
| false
| false
| 2,372
|
r
|
21.6_SummarizingtheAmmountofPartialPooling.R
|
library(rstan)
library(ggplot2)
## Read the data
# Data are at http://www.stat.columbia.edu/~gelman/arm/examples/radon
# The R codes & data files should be saved in the same directory for
# the source command to work
srrs2 <- read.table ("srrs2.dat", header=T, sep=",")
mn <- srrs2$state=="MN"
radon <- srrs2$activity[mn]
log.radon <- log (ifelse (radon==0, .1, radon))
floor <- srrs2$floor[mn] # 0 for basement, 1 for first floor
n <- length(radon)
y <- log.radon
x <- floor
# get county index variable
county.name <- as.vector(srrs2$county[mn])
uniq <- unique(county.name)
J <- length(uniq)
county <- rep (NA, J)
for (i in 1:J){
county[county.name==uniq[i]] <- i
}
# no predictors
ybarbar = mean(y)
sample.size <- as.vector (table (county))
sample.size.jittered <- sample.size*exp (runif (J, -.1, .1))
cty.mns = tapply(y,county,mean)
cty.vars = tapply(y,county,var)
cty.sds = mean(sqrt(cty.vars[!is.na(cty.vars)]))/sqrt(sample.size)
cty.sds.sep = sqrt(tapply(y,county,var)/sample.size)
## Get the county-level predictor
srrs2.fips <- srrs2$stfips*1000 + srrs2$cntyfips
cty <- read.table ("cty.dat", header=T, sep=",")
usa.fips <- 1000*cty[,"stfips"] + cty[,"ctfips"]
usa.rows <- match (unique(srrs2.fips[mn]), usa.fips)
uranium <- cty[usa.rows,"Uppm"]
u <- log (uranium)
u.full <- u[county]
## Fit the model
dataList.1 <- list(N=n,J=85,y=y,u=u,x=x,county=county)
radon_vary_intercept_a.sf1 <- stan(file='radon_vary_intercept_a.stan',
data=dataList.1, iter=1000, chains=4)
print(radon_vary_intercept_a.sf1,pars = c("a","b","sigma_y", "lp__"))
post <- extract(radon_vary_intercept_a.sf1)
e.a <- colMeans(post$e_a)
omega <- (sd(e.a)/mean(post$sigma_a))^2
omega <- pmin (omega, 1)
## Summary pooling factor for each batch of parameters
dataList.1 <- list(N=n,J=85,y=y,u=u,x=x,county=county)
radon_vary_intercept_b.sf1 <- stan(file='radon_vary_intercept_b.stan',
data=dataList.1, iter=1000, chains=4)
print(radon_vary_intercept_b.sf1,pars = c("a","b","sigma_y", "lp__"))
post <- extract(radon_vary_intercept_b.sf1)
e.y <- (post$e_y)
e.a <- (post$e_a)
lambda.y <- 1 - var (apply (e.y, 2, mean))/ mean (apply (e.y, 1, var))
lambda.a <- 1 - var (apply (e.a, 2, mean))/ mean (apply (e.a, 1, var))
# if slope varies
lambda.b <- 1 - var (apply (e.b, 2, mean))/ mean (apply (e.b, 1, var))
|
f832b8bbfe4c67ad897c2ab15c1d969ca8cfb6cf
|
3a94d8d3d791b06252406a4c50fa2183627fc41d
|
/Microarray_Roopali.R
|
527f3b411ef02f003fb049c2b6bacf01e02ad33e
|
[] |
no_license
|
Roopali87/Microarray-analysis
|
95148baf2ae1e4d3d2ceea42d333cb0da2061d25
|
1600b2f1913baf571bca1f16f7de339669b7bfa5
|
refs/heads/main
| 2023-01-22T11:30:29.176166
| 2020-12-05T03:03:15
| 2020-12-05T03:03:15
| 318,688,698
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,057
|
r
|
Microarray_Roopali.R
|
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
#Package for human ref and annotation
#BiocManager::install("pd.hugene.1.0.st.v1")
#library("pd.hugene.1.0.st.v1")
#BioacManager::install("hugene10stv1cdf")
#library("hugene10stv1cdf")
###########################################
BiocManager::install("limma")
library("limma")
## For wheat reference genome
BiocManager::install("pd.wheat")
library("pd.wheat")
## For wheat affymetrix genome array annotation
BiocManager::install("wheatcdf")
library("wheatcdf")
##
BiocManager::install("affyPLM")
library("affyPLM")
##
BiocManager::install("affy")
library("affy")
BiocManager::install("IRanges")
library("IRanges")
BiocManager::install("RColorBrewer")
library("RColorBrewer")
BiocManager::install("methods")
library("methods")
BiocManager::install("S4Vectors")
library("S4Vectors")
BiocManager::install("Hmisc")
library("Hmisc")
#To get Boxplot for Pre-Normalized Expression
targets <- readTargets("Target.txt")
#Read CEL Files
dat <- ReadAffy(filenames = targets$FileName) ###FileName is the first column name in txt
dat
eset<-rma(dat)
eset
normset<-eset
pData(normset)
###Oligo package, if required.
#BiocManager::install("oligo", version = "3.8")
#library("oligo")
#Oligo Read in the CEL files in directory
#celFiles<- list.celfiles()
#affyRaw<-read.celfiles(celFiles)
#affyRaw
#exprset <-affyRaw
#exprset
#pData(exprset)
#RMA Normalization
#BiocManager::install("gcrma")
#library("gcrma")
#exprset <- gcrma(exprset)
#Finally, save the data to an output file to be used by other programs, etc (Data will be log2 transformed and normalized)
write.exprs(eset,file="PostNormalisedData.txt")
#Boxplot Before Normalization ##############################################################
tiff(file="Control-treatment Pre-Normalization [BoxPlot].tiff", bg="transparent", width=600, height=600)
par(mar = c(7, 5, 3, 2) + 0.1); # This sets the plot margins #bottom,left,top,right
boxplot(dat,col="red", main="Pre-Normalization", las=2, cex.axis=0.74, ylab="Intensities")#, ylim=c(2,14))
title(xlab = "Sample Array", line = 6); # Add x axis title
dev.off()
#Boxplot After Normalization
tiff(file="Control-treatment Post-Normalization [BoxPlot].tiff", bg="transparent", width=600, height=600)
par(eset,mar = c(7, 5, 3, 2) + 0.1); # This sets the plot margins #bottom,left,top,right
boxplot(eset, col="blue",main="Post-Normalization", las=2, cex.axis=0.74, ylab="Intensities")#, ylim=c(2,14))
title(xlab = "Sample Array", line = 6); # Add x axis title
dev.off()
###################################################################################
#https://rpubs.com/ge600/limma
data<-read.table(file = "PostNormalisedData.txt", header = T, row.names=1)
groups = gsub("_.*", "", colnames(data)) #clip sample name after underscore(_)
groups <- factor(groups, levels = c("Control","Treatment") )
design <- model.matrix( ~ 0 + groups )
colnames(design) <- c("Control","Treatment")
design
library(limma)
# Fits a linear model for each gene based on the given series of arrays.
fit <- lmFit(data, design)
#write.csv(fit, "lmFit.csv", quote = F)
#Matrix
#design<-model.matrix(~factor(c("Control", "Control", "Treatment", "Treatment")))
#colnames(design)<-c("Control","Treatment")
head(data)
#Contrast Matrix Design
cont.matrix <- makeContrasts(contrasts = "Treatment-Control", levels=design)
fit2 <- contrasts.fit(fit, cont.matrix)
# Computes moderated t-statistics and log-odds of differential expression by empirical Bayes shrinkage of the standard errors towards a common value.
fit2 <- eBayes(fit2, trend=FALSE)
# calls differential gene expression 1 for up, -1 for down
#results <- decideTests(fit2, p.value = 0.05, lfc= log2(2) )
topGenes =topTable(fit2, number = 1e12,sort.by="M" )
head(topGenes)
write.csv(topGenes, "Result Top Table Final.csv", quote = F)
############################################################################
|
4996901831c712a80d8a8e2aac86f3a9becf201e
|
e4af769514afb803dba67125a4394befa7312a46
|
/scripts/rprop2.R
|
8be750a56f0fe5197737f6700627d623e0dd57bf
|
[] |
no_license
|
juliusgoth/fstrain
|
6e8c46a547f2cf45c6a55fd2c537a5f9a5ec034b
|
6d0340e12c6cdcf31676052d4e4d20f7937454df
|
refs/heads/master
| 2021-05-27T04:53:01.899410
| 2013-05-30T17:59:26
| 2013-05-30T18:00:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,294
|
r
|
rprop2.R
|
#!/usr/bin/rscript
rprop <- function(params, func, grad, makeConverge, abstol=1e-3) {
maxiter <- 100
updates <- rep(0.5, length(params))
prevGradients <- rep(0.0, length(params))
prevGradientsMean <- 0.0
eta.minus <- 0.5
eta.plus <- 1.2
updateMin <- 1e-6
updateMax <- 50
funval <- Inf
for(iter in 1:maxiter) {
write(cat("rprop iter", iter), stderr())
funval <- func(params)
gradients <- grad(params)
len <- length(gradients)
for(i in 1:len) {
gradientProduct <- prevGradients[i] * gradients[i]
if(gradientProduct > 0){ # no sign change
updates[i] <- min(updates[i] * eta.plus, updateMax)
delta <- -sign(gradients[i]) * updates[i]
params[i] <- params[i] + delta
prevGradients[i] <- gradients[i]
} else if(gradientProduct < 0) {
updates[i] <- max(updates[i] * eta.minus, updateMin)
prevGradients[i] <- 0
} else {
delta <- -sign(gradients[i]) * updates[i]
params[i] <- params[i] + delta
prevGradients[i] <- gradients[i]
}
}
updatesMean <- mean(updates)
write(cat("rprop updates mean=", updatesMean), stderr())
if(updatesMean < abstol){
write("converged", stderr())
break
}
}
return(list(par=params, value=funval))
}
|
aa5bab96c0276e910aac6436013b435c27637bb2
|
13c6306ae730acf0d3a204edff967976933262c0
|
/man/rollback-RangoPostgresConnection-method.Rd
|
f929e035eaa47175d18185f777158ec35c2019f8
|
[] |
no_license
|
AndreMikulec/Rango
|
f3b3ae51e5187594a4ee74654056b8a1b874fc45
|
326f43d6a46cad569e01f53d7503a03835c4d570
|
refs/heads/master
| 2021-01-21T06:09:58.321170
| 2018-10-04T00:42:13
| 2018-10-04T00:42:13
| 47,441,839
| 0
| 0
| null | 2015-12-05T04:12:15
| 2015-12-05T04:12:15
| null |
UTF-8
|
R
| false
| true
| 416
|
rd
|
rollback-RangoPostgresConnection-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postgres.R
\docType{methods}
\name{rollback,RangoPostgresConnection-method}
\alias{rollback,RangoPostgresConnection-method}
\title{Roll back the changes in the current transaction}
\usage{
\S4method{rollback}{RangoPostgresConnection}(object)
}
\description{
Roll back the changes in the current transaction
}
\author{
Willem Ligtenberg
}
|
e236a3794a8ff10b611caad55df0ab52b1acfc78
|
799e44f7c7600b2f0a6023be2a3342212e68c3ad
|
/R/make.ix.mat.R
|
f05a00f04401819686a08875334c72fa47f38227
|
[] |
no_license
|
cran/MLDS
|
4cf92e7f4fd2d24ab48c8f40a53315e201d9ea0b
|
6ad8eb2921c22d5c57faea74a134ce810306acdd
|
refs/heads/master
| 2022-02-17T05:52:25.157134
| 2022-02-04T10:40:02
| 2022-02-04T10:40:02
| 17,691,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
r
|
make.ix.mat.R
|
`make.ix.mat` <-
function(data, xi = NULL, ...) {
# data, basic data.frame from diff scale experiment
# xi, in case some values not included as from 6pt ana.
if ( missing(xi) ) xi <- max(data)
nr <- nrow(data)
wts <- rep(c(1, -1, -1, 1), each = nr)
ix.mat <- matrix(0, ncol = xi, nrow = nr)
ix.mat[matrix(c(rep(1:nr, 4),
as.vector(unlist(data[, -data$resp]))), ncol = 2)] <- wts
dsInc.df <- data.frame(resp = data$resp, stim = ix.mat)
dsInc.df <- dsInc.df[, -2]
dsInc.df
}
|
81237eeded615a2b0511071bae597c3d877af9df
|
8a32ea28dc84c6423221dc0d3d2d44dc38d05cfa
|
/R/readFragpipeFile.R
|
2fb10476df8f23d6bb49e4039a1b0ab27e64bdd3
|
[] |
no_license
|
cran/wrProteo
|
d5932a2f48d8e1ff0397691b0e8027ca360b9b7f
|
348376e6931d279200da2cc1ed85f8c57ec516f4
|
refs/heads/master
| 2023-09-05T12:04:30.055590
| 2023-08-18T10:10:02
| 2023-08-18T11:31:04
| 236,959,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,258
|
r
|
readFragpipeFile.R
|
#' Read Tabulated Files Exported by FragPipe At Protein Level
#'
#' This function allows importing protein identification and quantification results from \href{https://fragpipe.nesvilab.org/}{Fragpipe}
#' which were previously exported as tabulated text (tsv). Quantification data and other relevant information will be extracted similar like the other import-functions from this package.
#' The final output is a list containing the elements: \code{$annot}, \code{$raw} and \code{$quant}, or a data.frame with the quantication data and a part of the annotation if argument \code{separateAnnot=FALSE}.
#'
#' @details
#' This function has been developed using Fragpipe versions 18.0 and 19.0.
#'
#' Using the argument \code{suplAnnotFile} it is possible to specify a specific file (or search for default file) to read for extracting file-names as sample-names and other experiment related information.
#'
#' @param fileName (character) name of file to be read
#' @param path (character) path of file to be read
#' @param normalizeMeth (character) normalization method, defaults to \code{median}, for more details see \code{\link[wrMisc]{normalizeThis}})
#' @param sampleNames (character) custom column-names for quantification data; this argument has priority over \code{suplAnnotFile}
#' @param read0asNA (logical) decide if initial quntifications at 0 should be transformed to NA (thus avoid -Inf in log2 results)
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param refLi (character or integer) custom specify which line of data is main species, if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final log2 (normalized) quantitations
#' @param annotCol (character) column names to be read/extracted for the annotation section (default c("Accession","Description","Gene","Contaminant","Sum.PEP.Score","Coverage....","X..Peptides","X..PSMs","X..Unique.Peptides", "X..AAs","MW..kDa.") )
#' @param FDRCol (list) optional indication to search for protein FDR information
#' @param wex (integer) relative expansion factor of the violin-plot (will be passed to \code{\link[wrGraph]{vioplotW}})
#' @param specPref (character or list) define characteristic text for recognizing (main) groups of species (1st for comtaminants - will be marked as 'conta', 2nd for main species- marked as 'mainSpe',
#' and optional following ones for supplemental tags/species - maked as 'species2','species3',...);
#' if list and list-element has multiple values they will be used for exact matching of accessions (ie 2nd of argument \code{annotCol})
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files; however, if \code{gr} is provided, \code{gr} gets priority for grouping of replicates;
#' if \code{character} the respective file-name (relative or absolute path)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param plotGraph (logical or integer) optional plot of type vioplot of initial and normalized data (using \code{normalizeMeth}); if integer, it will be passed to \code{layout} when plotting
#' @param titGraph (character) custom title to plot of distribution of quantitation values
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot}, \code{$counts} an array with number of peptides, \code{$quantNotes}
#' and \code{$notes}; or if \code{separateAnnot=FALSE} the function returns a data.frame with annotation and quantitation only
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readMaxQuantFile}}, \code{\link{readProtDiscovFile}}, \code{\link{readProlineFile}}
#' @examples
#' FPproFi1 <- "tinyFragpipe1.tsv.gz"
#' path1 <- system.file("extdata", package="wrProteo")
#' ## let's define the main species and allow tagging some contaminants
#' specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="MOUSE")
#' dataFP <- readFragpipeFile(path1, file=FPproFi1, specPref=specPref1, tit="Tiny Fragpipe Data")
#' summary(dataFP$quant)
#'
#' @export
readFragpipeFile <- function(fileName, path=NULL, normalizeMeth="median", sampleNames=NULL, read0asNA=TRUE, quantCol="Intensity$",
annotCol=NULL, refLi=NULL, separateAnnot=TRUE, FDRCol=list("Protein.Probability", lim=0.99), # contamCol="Contaminant",
groupPref=list(lowNumberOfGroups=TRUE), plotGraph=TRUE, titGraph="FragPipe", wex=1.6, specPref=c(conta="CON_|LYSC_CHICK", mainSpecies="OS=Homo sapiens"),
gr=NULL, sdrf=NULL, suplAnnotFile=FALSE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## read Fragpipe exported txt
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readFragpipeFile")
oparMar <- if(plotGraph) graphics::par("mar") else NULL # only if figure might be drawn
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
excluCol <- "^Abundances.Count" # exclude this from quantifications columns
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
infoDat <- infoFi <- setupSd <- parametersD <- NULL # initialize
## check if path & file exist
if(!grepl("\\.tsv$|\\.tsv\\.gz$", fileName)) message(fxNa,"Trouble ahead, expecting tabulated text file (the file'",fileName,"' might not be right format) !!")
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="tsv", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
if(debug) message(fxNa,"rfp0a ..")
## note : reading sample-setup from 'suplAnnotFile' at this place won't allow comparing if number of samples/columns corresponds to data; do after reading main data
if(debug) message(fxNa,"rfp0 .. Ready to read", if(length(path) >0) c(" from path ",path[1])," the file ",fileName[1])
## read (main) file
## future: look for fast reading of files
tmp <- try(utils::read.delim(file.path(paFi), stringsAsFactors=FALSE), silent=TRUE)
if(length(tmp) <1 || inherits(tmp, "try-error") || length(dim(tmp)) <2) {
if(inherits(tmp, "try-error")) warning("Unable to read input file ('",paFi,"')! (check if rights to read)") else {
if(!silent) message(fxNa,"Content of file '",paFi,"' seeps empty or non-conform ! Returning NULL; check if this is really a Fragpipe-file") }
NULL
} else {
if(debug) { message(fxNa,"rfp1 .. dims of initial data : ", nrow(tmp)," li and ",ncol(tmp)," col "); rfp1 <- list(fileName=fileName,path=path,paFi=paFi,tmp=tmp,normalizeMeth=normalizeMeth,sampleNames=sampleNames,read0asNA=read0asNA,quantCol=quantCol,
annotCol=annotCol,refLi=refLi,separateAnnot=separateAnnot,FDRCol=FDRCol )}
## locate & extract annotation
## note : space (' ') in orig colnames are transformed to '.'
if(length(annotCol) <1) annotCol <- c("Protein","Protein.ID","Entry.Name","Description","Gene","Organism", "Protein.Length","Protein.Existence","Protein.Probability",
"Top.Peptide.Probability", "Combined.Total.Peptides","Combined.Spectral.Count","Combined.Unique.Spectral.Count")
## note cols 2-6 are part to common format wrProteo
PSMCol <- "\\.Spectral\\.Count$" # pattern searching tag for PSM-data
PepCol <- "Unique\\.Spectral\\.Count$" # pattern searching tag for Number of peptides
## future option : lateron rename columns called as "Description" to annotCol[2]
## below use explicit colnames "Accession","Description", rename if tolower() fits
.chColNa <- function(x, mat, renameTo=NULL, silent=FALSE, fxNa=NULL){
## check in 'matr' for column-name 'x', if required rename best hit (if no direct hit look using grep, then grep wo case); return corrected mat
chX <- x %in% colnames(mat)
if(all(chX)) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[match(x, colnames(mat))] <- renameTo # juste simple rename (single col only)
} else { # try to localize column to use
chX <- grep(x, colnames(mat))
if(length(chX) >0) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[chX[1]] <- renameTo else x
if(!silent && length(chX) >1) message(fxNa,"Found multiple columns containing '",x,"' : ",wrMisc::pasteC(colnames(mat)[chX], quoteC="'"),", using 1st")
} else {
chX <- grep(tolower(x), tolower(colnames(mat)))
if(length(chX) >0) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[chX[1]] <- renameTo else x
if(!silent && length(chX) >1) message(fxNa,"Found multiple columns containing '",tolower(x),"' : ",wrMisc::pasteC(colnames(mat)[chX], quoteC="'"),", using 1st")
} else stop("Could NOT find column '",x,"' !!\n (available columns ",wrMisc::pasteC(colnames(mat), quoteC="'"),")") }
}
mat }
## check for essential colnames !
if(is.character(annotCol)) annotColNo <- match(annotCol, colnames(tmp))
chNa <- is.na(annotColNo)
if(any(chNa) & silent) message(fxNa,"Missing ",sum(chNa)," annotation columns: ",wrMisc::pasteC(annotCol[chNa], quoteC="'"))
## rename to wrProteo format
tmp <- .chColNa(annotCol[2], tmp, renameTo="Accession", silent=silent, fxNa=fxNa) # rename 'Protein ID' to 'Accession' (Uniprot ID)
tmp <- .chColNa(annotCol[3], tmp, renameTo="EntryName", silent=silent, fxNa=fxNa) # like THOC2_MOUSE
tmp <- .chColNa(annotCol[4], tmp, renameTo="Description", silent=silent, fxNa=fxNa) # full (long) name
annot <- cbind(Accession=tmp[,"Accession"], EntryName=tmp[,"EntryName"], GeneName=NA, Species=NA, Contam=NA, SpecType=NA,
Description=tmp[,"Description"], tmp[,wrMisc::naOmit(annotColNo[-(1:6)])]) # may be better to name column 'species'
if(debug) { message(fxNa,"rfp2 .. annotColNo : ", wrMisc::pasteC(annotColNo)); rfp2 <- list(annot=annot,annotCol=annotCol,tmp=tmp,specPref=specPref )}
## Species (need to run before reparsing badly parsed)
if(!is.na(annotColNo[6])) { spec <- tmp[,annotColNo[6]]
spec <- sub("^\ +|\ +$","", spec) # remove heading or tailing (white) space
chOX <- grep(" OX=", spec)
if(length(chOX) >0) { OX <- sub(" OX=", "", spec[chOX])
spec[chOX] <- sub(" OX=[[:digit:]]+[[:print:]]*","", spec[chOX])
chO2 <- nchar(spec[chOX]) <3 & nchar(OX) >1
if(any(chO2)) spec[chOX[which(chO2)]] <- OX[which(chO2)] # use OX=.. in case no other information available
}
if(TRUE) spec <- sub(" \\([[:alpha:]][[:print:]]+\\).*", "", spec) # remove ' (..)'
annot[,"Species"] <- spec
}
## look for not well parsed (use separator '|' as indicator)
chPa <- grep("\\|", annot[,"Accession"])
if(length(chPa) >0) {
chSp <- grep(" ", annot[chPa,"Accession"])
if(length(chSp) >0) {
# extract species
chOS <- grep("[[:print:]]+ OS=[[:alpha:]]", annot[chPa[chSp],"Accession"])
if(length(chOS) >0) annot[chPa[chSp[chOS]],"Species"] <- sub(" [[:upper:]]{2}=.+","", sub("[[:print:]]+ OS=","", annot[chPa[chSp[chOS]],"Accession"])) # extract species
## extract GeneName
chGn <- grep("[[:print:]]+ GN=", annot[chPa[chSp],"Accession"])
if(length(chGn) >0) annot[chPa[chSp[chGn]],"GeneName"] <- sub(" [[:upper:]]{2}=.+","", sub("[[:print:]]+ GN=","", annot[chPa[chSp[chGn]],"Accession"]))
## extract Description
annot[chPa[chSp],"Description"] <- sub(".*? ", "", sub(" [[:upper:]]{2}=.+","", annot[chPa[chSp],"Accession"]))
## extract EntryName (option 1)
annot[chPa[chSp],"EntryName"] <- gsub(".*\\|","", sub(" .+","", annot[chPa,"Accession"]))
} else {
annot[chPa,"EntryName"] <- gsub(".*\\|","", annot[chPa,"Accession"]) ## extract EntryName (option 2)
}
## extract Accession
annot[chPa,"Accession"] <- sapply(strsplit(annot[chPa,"Accession"], "\\|"), function(x) if(length(x) >1) x[2] else NA)
}
## clean 'Description' entries: remove tailing punctuation or open brackets (ie not closed) at end of (truncated) fasta header
if(cleanDescription) {
if(debug) { message(fxNa,"rfp3a") }
annot[,"Description"] <- sub("[[:punct:]]+$","", sub("\\ +$", "", annot[,"Description"])) # tailing ';' and/or tailing space
annot[,"Description"] <- sub(" \\([[:alpha:]]*$", "", annot[,"Description"]) # tailing (ie truncated) open '(xxx'
}
if(debug) { message(fxNa,"rfp3b"); rfp3b <- list() }
if(debug) {message(fxNa,"rfp4 .. dim annot: ", nrow(annot)," li and ",ncol(annot)," cols; colnames : ",wrMisc::pasteC(colnames(annot))," ")}
.MultGrep <- function(pat, y) if(length(pat)==1) grep(pat, y) else unlist(sapply(pat, grep, y)) # (multiple) grep() when length of pattern 'pat' >0
## Contam
if("Contaminant" %in% colnames(annot)) { # just in case there is a column called 'Contaminant' (so far not seen)
useLi <- which[nchar(annot[,"Contaminant"]) >0 && !is.na(annot[,"Contaminant"])]
if(length(useLi) >0) annot[useLi,"Contam"] <- toupper(gsub(" ","",annot[useLi,"Contaminant"]))}
chConta <- grep("^contam", tmp[,annotCol[1]]) # specific to Fragpipe
if(length(chConta) >0) annot[chConta,"Contam"] <- TRUE
## get more species annot; separate multi-species (create columns 'Accession','GeneName','Species','SpecType')
chSp <- is.na(annot[,"Species"]) | nchar(annot[,"Species"]) <2
if(any(chSp)) { chSep <- grep("_", annot[which(chSp),"EntryName"]) # look for eg 'TRY1_BOVIN'
if(length(chSep) >0) { chSep <- which(chSp)[chSep]
spe2 <- sub("[[:alnum:]]+_", "", annot[chSep,"EntryName"])
if(debug) message(fxNa,"Recover Species name for ",length(chSep)," entries based on 'EntryName'")
commonSpec <- .commonSpecies()
chSp3 <- which(sub("^_","",commonSpec[,1]) %in% spe2)
if(length(chSp3) >0) for(i in chSp3) annot[chSep,"Species"] <- commonSpec[i,2]
}
chSp <- is.na(annot[,"Species"]) | nchar(annot[,"Species"]) <2 } # update
if(debug) {message(fxNa,"rfp6d .. "); rfp6d <- list(annot=annot,tmp=tmp,chSp=chSp,specPref=specPref,annotCol=annotCol,PSMCol=PSMCol,PepCol=PepCol)}
## look for tags from specPref
if(length(specPref) >0) {
## set annot[,"specPref"] according to specPref
annot <- .extrSpecPref(specPref, annot, silent=silent, debug=debug, callFrom=fxNa)
} else if(debug) message(fxNa,"Note: Argument 'specPref' not specifed (empty)")
if(debug) {message(fxNa,"rfp6b .. ")}
if(!silent) {
if(any(chSp, na.rm=TRUE) && !all(chSp)) message(fxNa,"Note: ",sum(chSp)," (out of ",nrow(tmp),") lines with unrecognized species")
if(!all(chSp)) { tab <- table(annot[,"Species"])
tab <- rbind(names(tab), paste0(": ",tab," ; "))
if(!silent) message(fxNa,"Count by 'specPref' : ",apply(tab, 2, paste)) }} # all lines assigned
if(debug) {message(fxNa,"rfp6e .. ")}
## check for unique annot[,"Accession"]
chDu <- duplicated(annot[,"Accession"], fromLast=FALSE)
if(any(chDu)) { warning(fxNa," NOTE : ",sum(chDu)," entries have same '",annotCol[2],"' (ie Accession) - correcting to UNIQUE !")
rownames(tmp) <- rownames(annot) <- wrMisc::correctToUnique(annot[,"Accession"], sep="_", atEnd=TRUE, callFrom=fxNa)
} else { rownames(annot) <- rownames(tmp) <- annot[,"Accession"] }
if(debug) { message(fxNa,"rfp7 .. dim annot ",nrow(annot)," and ",ncol(annot)); rfp7 <- list() }
## locate & extract abundance/quantitation data
msg <- " CANNOT find ANY quantification columns"
if(length(quantCol) >1) {
## explicit columns (for abundance/quantitation data)
if(is.character(quantCol)) quantCol <- match(quantCol, colnames(tmp))
} else {
## pattern search (for abundance/quantitation data)
## problem : extract 'xx1.Intensity' but NOT 'xx.MaxLFQ.Intensity'
useMaxLFQItens <- FALSE
quantColIni <- quantCol <- grep(quantCol, colnames(tmp))
chLFQ <- grep("MaxLFQ\\.", colnames(tmp)[quantCol])
if(length(chLFQ) >0) { if(!silent && length(chLFQ)==length(quantCol)) message(fxNa,"All quantification columns are MaxLFQ !")
if(length(chLFQ) < length(quantCol)) quantCol <- quantCol[(if(useMaxLFQItens) 1 else -1) *chLFQ] else warning("No non-MaxLFQ data available, using MaxLFQ.Intensity instead !") }
}
if(length(quantCol) <1) stop(msg," ('",quantCol,"')")
abund <- as.matrix(tmp[, quantCol])
rownames(abund) <- annot[,"Accession"]
if(debug) { message(fxNa,"rfp8 .. dim abund ",nrow(abund)," and ",ncol(abund)) ; rfp8 <- list(abund=abund,sampleNames=sampleNames,annot=annot,tmp=tmp,annot=annot,specPref=specPref)}
## check & clean abundances
## add custom sample names (if provided)
if(length(sampleNames) ==ncol(abund) && ncol(abund) >0) {
if(debug) { message(fxNa,"Valid 'sampleNames' were provided rfp8b") }
if(length(unique(sampleNames)) < length(sampleNames)) {
if(!silent) message(fxNa,"Custom sample names not unique, correcting to unique")
sampleNames <- wrMisc::correctToUnique(sampleNames, callFrom=fxNa) }
colnames(abund) <- sampleNames
}
if(debug) { message(fxNa,"rfp9"); rfp9 <- list(abund=abund,sampleNames=sampleNames,annot=annot,tmp=tmp,annot=annot,specPref=specPref,FDRCol=FDRCol)}
## (optional) filter by FDR (so far use 1st of list where matches are found from argument FDRCol)
if(length(FDRCol) >0) {
if(FDRCol[[1]] %in% colnames(tmp)) {
if(length(FDRCol[[2]]) >0 && is.numeric(FDRCol[[2]])) FdrLim <- FDRCol[[2]][1] else {
if(!silent) message(fxNa,"No valid FDR limit found, using default 0.95 (ie 5% filter)")
FdrLim <- 0.95 }
rmLi <- which(as.numeric(tmp[,FDRCol[[1]]]) < FdrLim) # default 5% 'FDR' filter
if(length(rmLi) == nrow(abund)) warning(fxNa,"Omitting FDR-filter; otherwise NO MORE LINES/proteins remaining !!!") else {
if(length(rmLi) >0) {
if(!silent) message(fxNa,"Removing ",length(rmLi)," lines/proteins removed as NOT passing protein identification filter at ",FdrLim, if(debug) " rfp9b")
abund <- abund[-rmLi,]
if(length(dim(abund)) <2) abund <- matrix(abund, nrow=1, dimnames=list(rownames(annot)[-rmLi], names(abund)))
annot <- if(nrow(abund) ==1) matrix(annot[-rmLi,], nrow=1, dimnames=list(rownames(abund), colnames(annot))) else annot[-rmLi,]
tmp <- if(nrow(abund) ==1) matrix(tmp[-rmLi,], nrow=1, dimnames=list(rownames(abund), colnames(tmp))) else tmp[-rmLi,]}
}
}
}
if(debug) { message(fxNa,"rfp11 .. length(FDRCol) ",length(FDRCol)," dim annot ",nrow(annot)," and ",ncol(annot)); rfp11 <- list()}
PSMCol <- "\\.Spectral\\.Count$" # pattern searching tag for PSM-data
PepCol <- "Unique\\.Spectral\\.Count$" # pattern searching tag for Number of peptides
PSMColExcl <- "Total\\.Spectral\\.Count$" # exclude this pattern searching tag for PSM
usTy <- c("PSM", "UniquePeptides")
## optional/additional counting results (PSM, no of peptides)
PSMExl <- grep(paste0("Combined",PSMCol), colnames(tmp))
PepExl <- grep(paste0("Combined\\.",PepCol), colnames(tmp))
PSMCol <- if(length(PSMCol) ==1) grep(PSMCol, colnames(tmp)) else NULL
PepCol <- if(length(PepCol) ==1) grep(PepCol, colnames(tmp)) else NULL
if(any(c(length(PSMExl), length(PSMColExcl)) >0)) PSMCol <- PSMCol[-which(PSMCol %in% c(PepCol, PSMExl, grep(PSMColExcl, colnames(tmp))))] # remove unwanted columns
if(length(PepExl) >0) PepCol <- PepCol[-which(PepCol %in% PepExl)]
if(any(c(length(PSMCol), length(PepCol)) >0)) {
counts <- array(NA, dim=c(nrow(abund), ncol(abund), length(usTy)), dimnames=list(rownames(abund),colnames(abund), usTy))
if(length(PSMCol) >0) counts[,,"PSM"] <- as.matrix(tmp[,PSMCol])
if(length(PepCol) >0) counts[,,"UniquePeptides"] <- as.matrix(tmp[,PepCol])
} else counts <- NULL
if(debug) {message(fxNa,"rfp12 .. ");
rfp12 <- list(tmp=tmp,abund=abund,annot=annot,sdrf=sdrf, fileName=fileName,path=path,paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,
refLi=refLi,specPref=specPref,read0asNA=read0asNA,quantCol=quantCol,annotCol=annotCol,refLi=refLi,separateAnnot=separateAnnot,FDRCol=FDRCol,gr=gr) }
## correct colnames from 'Xabc_1.Intensity' to 'abc_1'
ch1 <- grepl("^X[[:digit:]]", colnames(abund))
if(any(ch1)) colnames(abund)[which(ch1)] <- sub("^X","", colnames(abund)[which(ch1)])
colnames(abund) <- sub("\\.Intensity$","", colnames(abund))
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi)==1) {
refLi <- which(annot[,"SpecType"]==refLi)
if(length(refLi) <1 ) { refLi <- 1:nrow(abund)
if(!silent) message(fxNa,"Could not find any proteins matching argument 'refLi=",refLiIni,"', ignoring ...")
} else {
if(!silent) message(fxNa,"Normalize using (custom) subset of ",length(refLi)," lines specified as '",refLiIni,"'")}} # may be "mainSpe"
## set 0 values to NA (avoid -Inf at log2)
if(!isFALSE(read0asNA)) { ch0 <- abund ==0
if(any(ch0, na.rm=TRUE)) abund[which(ch0)] <- NA }
## take log2 & normalize
quant <- try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, callFrom=fxNa), silent=TRUE)
if(debug) { message(fxNa,"rfp13 .. dim quant: ", nrow(quant)," li and ",ncol(quant)," cols; colnames : ",wrMisc::pasteC(colnames(quant))," ")
rfp13 <- list(tmp=tmp,quant=quant,abund=abund,annot=annot,sdrf=sdrf, fileName=fileName,path=path,paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,groupPref=groupPref,
refLi=refLi,refLiIni=refLiIni,specPref=specPref,read0asNA=read0asNA,quantCol=quantCol,annotCol=annotCol,separateAnnot=separateAnnot,FDRCol=FDRCol,gr=gr,silent=silent,debug=debug) }
### GROUPING OF REPLICATES AND SAMPLE META-DATA
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=separateAnnot, quantMeth="FP", path=path, abund=utils::head(quant), groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rfp13b .."); rfp13b <- list()}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="FP", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) #
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- setupSd$sampleNames
if(debug) {message(fxNa,"Read sample-meta data, rfp14"); rfp14 <- list(setupSd=setupSd, sdrf=sdrf, suplAnnotFile=suplAnnotFile,quant=quant,abund=abund,plotGraph=plotGraph)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="FragPipe",
refLi=refLi, refLiIni=refLiIni, tit=titGraph, las=NULL, silent=silent, callFrom=fxNa, debug=debug)
if(debug) {message(fxNa,"Read sample-meta data, rfp15"); rfp15 <- list()}
## meta-data
notes <- c(inpFile=paFi, qmethod="FragPipe", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=match.call(),
created=as.character(Sys.time()), wrProteo.version=utils::packageVersion("wrProteo"), machine=Sys.info()["nodename"])
## final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(quant,annot) }
}
|
10354402a5e3cd0f731014c58d6fd0f839ef8dcd
|
b945a76daddff0988dccaf4dcf4af8c296c296d7
|
/plot4.R
|
ba5c963cd22f1fd08ca1cd923ab570513f514abc
|
[] |
no_license
|
pooimun/coursera-01_exploratory_data_analysis_course_project_1
|
f5cc0e21a81eedbd16d61565b33bccbbd8b01912
|
d18742628e777dc0990853adc27a2d21ddd202ce
|
refs/heads/master
| 2020-05-20T10:08:15.058561
| 2019-05-08T03:22:09
| 2019-05-08T03:22:09
| 185,519,181
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,252
|
r
|
plot4.R
|
#Read data
data <- read.csv('household_power_consumption.txt', header = TRUE, sep=';',stringsAsFactors = FALSE,dec = '.')
data2 <- subset(data,data$Date == '1/2/2007'|data$Date == '2/2/2007')
data3 <- subset(data2,data2$Voltage !='?')
#Plot 4
global_active_power <- as.numeric(data3$Global_active_power)
global_reactive_power <- as.numeric(data3$Global_reactive_power)
voltage <- as.numeric(data3$Voltage)
Sub_metering_1 <- as.numeric(data3$Sub_metering_1)
Sub_metering_2 <- as.numeric(data3$Sub_metering_2)
Sub_metering_3 <- as.numeric(data3$Sub_metering_3)
datetime <- strptime(paste(data3$Date, data3$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot4.png",width = 480,height = 480)
par(mfrow = c(2, 2))
plot(datetime, global_active_power,type='l',xlab='',ylab='Global Active Power')
plot(datetime, voltage,type='l',xlab='datetime',ylab='Voltage')
plot(datetime, Sub_metering_1,type='l',xlab='',ylab='Energy sub metering')
lines(datetime, Sub_metering_2,type='l',col='red')
lines(datetime, Sub_metering_3,type='l',col='blue')
legend('topright',c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),lty=1,lwd=2.5,col=c('black','red','blue'))
plot(datetime, global_reactive_power,type='l',xlab='datetime',ylab='Global_reactive_power')
dev.off()
|
0cacb04d6e007486d0f8e0e66bcb027e24780bae
|
e0e2c3f01cb102e65ec8f704206e74616f757633
|
/04-ExploratoryDataAnalysis/Project_2/plot3.R
|
b6d55bbd56e6b3ad584498cd5db4d81523a58aca
|
[] |
no_license
|
MoonJeounghoon/Data-Science-Specialization
|
50a8ca52898e062041d51d331a0661fc8694a413
|
89a528d856c63e6606ba9ac4c70136ceb9f1f6cc
|
refs/heads/master
| 2021-04-29T02:11:45.113985
| 2015-01-26T12:21:44
| 2015-01-26T12:21:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 596
|
r
|
plot3.R
|
library(ggplot2)
## load two data file
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## subset data by choose the fips == 24510
NEI_Baltimore <- NEI[which(NEI$fips == 24510), ]
NEI_final <- aggregate(NEI_Baltimore["Emissions"], list(type = NEI_Baltimore$type, year = NEI_Baltimore$year), sum)
## plot using ggplot2
g <- ggplot(NEI_final, aes(year, Emissions, colour = type))
g + geom_line() + geom_point() + labs(title = "Total Emissions by Type in Baltimore City")
## save graph
dev.copy(png, file = "plot3.png", height = 480, width = 480)
dev.off()
|
92d011dae4117c49bbf114ef853178287a067dda
|
84c3e18e0e28d7494d2a3aa34cf2b62376368157
|
/man/geom_abline.rd
|
512e6334a36058a8a2930394c10db48257b74f8a
|
[] |
no_license
|
genome-vendor/r-cran-ggplot2
|
01859b20e506b1b4e282b3d321afd92fb5628a86
|
6430a6765bc27a9d68a4c2e70530f1cd4718aebc
|
refs/heads/master
| 2021-01-01T20:05:23.558551
| 2012-07-17T19:48:36
| 2012-07-17T19:48:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,041
|
rd
|
geom_abline.rd
|
\name{geom_abline}
\alias{geom_abline}
\alias{GeomAbline}
\title{geom\_abline}
\description{Line, specified by slope and intercept}
\details{
The abline geom adds a line with specified slope and intercept to the plot.
With its siblings geom\_hline and geom\_vline, it's useful for annotating plots. You can supply the parameters for geom\_abline, intercept and slope, in two ways: either explicitly as fixed values, or stored in the data set. If you specify the fixed values (\code{geom\_abline(intercept=0, slope=1)}) then the line will be the same in all panels, but if the intercept and slope are stored in the data, then can vary from panel to panel. See the examples for more ideas.
This page describes geom\_abline, see \code{\link{layer}} and \code{\link{qplot}} for how to create a complete plot from individual components.
}
\section{Aesthetics}{
The following aesthetics can be used with geom\_abline. Aesthetics are mapped to variables in the data with the aes function: \code{geom\_abline(aes(x = var))}
\itemize{
\item \code{colour}: border colour
\item \code{size}: size
\item \code{linetype}: line type
\item \code{alpha}: transparency
}
}
\usage{geom_abline(mapping = NULL, data = NULL, stat = "abline", position = "identity",
...)}
\arguments{
\item{mapping}{mapping between variables and aesthetics generated by aes}
\item{data}{dataset used in this layer, if not specified uses plot dataset}
\item{stat}{statistic used by this layer}
\item{position}{position adjustment used by this layer}
\item{...}{ignored }
}
\seealso{\itemize{
\item \code{\link{stat_smooth}}: To add lines derived from the data
\item \code{\link{geom_hline}}: for horizontal lines
\item \code{\link{geom_vline}}: for vertical lines
\item \code{\link{geom_segment}}: for a more general approach
\item \url{http://had.co.nz/ggplot2/geom_abline.html}
}}
\value{A \code{\link{layer}}}
\examples{\dontrun{
p <- qplot(wt, mpg, data = mtcars)
# Fixed slopes and intercepts
p + geom_abline() # Can't see it - outside the range of the data
p + geom_abline(intercept = 20)
# Calculate slope and intercept of line of best fit
coef(lm(mpg ~ wt, data = mtcars))
p + geom_abline(intercept = 37, slope = -5)
p + geom_abline(intercept = 10, colour = "red", size = 2)
# See ?stat_smooth for fitting smooth models to data
p + stat_smooth(method="lm", se=FALSE)
# Slopes and intercepts as data
p <- ggplot(mtcars, aes(x = wt, y=mpg), . ~ cyl) + geom_point()
df <- data.frame(a=rnorm(10, 25), b=rnorm(10, 0))
p + geom_abline(aes(intercept=a, slope=b), data=df)
# Slopes and intercepts from linear model
coefs <- ddply(mtcars, .(cyl), function(df) {
m <- lm(mpg ~ wt, data=df)
data.frame(a = coef(m)[1], b = coef(m)[2])
})
str(coefs)
p + geom_abline(data=coefs, aes(intercept=a, slope=b))
# It's actually a bit easier to do this with stat_smooth
p + geom_smooth(aes(group=cyl), method="lm")
p + geom_smooth(aes(group=cyl), method="lm", fullrange=TRUE)
}}
\author{Hadley Wickham, \url{http://had.co.nz/}}
\keyword{hplot}
|
a1dc87609081f1b103abf7d9528a1e8dcac571be
|
4913c9ca1d164582f935048c3d425e733539d3ce
|
/plotDepthVsN.R
|
cf68e1728b639353fd3b3514ad4ae8189d4b4f89
|
[] |
no_license
|
kant/pydt
|
a59960de4ce18121ed781cb9b7a1a26be498c059
|
8772c230134f015874beb288919e13b2cb3e0c1f
|
refs/heads/master
| 2020-09-10T00:24:30.613581
| 2014-01-29T10:07:07
| 2014-01-29T10:07:07
| 221,604,689
| 0
| 0
| null | 2019-11-14T03:35:37
| 2019-11-14T03:35:36
| null |
UTF-8
|
R
| false
| false
| 828
|
r
|
plotDepthVsN.R
|
rm(list = ls(all = TRUE))
fn="depth_vs_n.txt"
system(paste("rm",fn))
# theta alpha c
theta=0.0
alpha=0.5
c=1.0
system(paste("./func_of_n.bin",theta,alpha,c,">>",fn))
a=read.table(fn)
#a=a[100:nrow(a),]
attach(a)
plot(V1,V2,"b",xlab="n",ylab="mean depth",main=paste("100 repeats, theta=",theta,", alpha=",alpha,", c=",c,sep=""))
dev.print(pdf,paste("depth_vs_n_plots/rep100_theta=",theta,"_alpha=",alpha,"_c=",c,".pdf",sep=""))
a=a[100:nrow(a),]
colnames(a)=c("n","d")
a$logn=log(a$n)
a$logd=log(a$d)
l=lm(logd~logn,data=a)
s=summary(l)
print(s)
attach(a)
plot(logn,logd,"b",xlab="log n",ylab="log mean depth",main=paste("100 repeats, theta=",theta,", alpha=",alpha,", c=",c,", slope=",format(s[[4]][2,1],digits=2),sep=""))
dev.print(pdf,paste("depth_vs_n_plots/loglog_rep100_theta=",theta,"_alpha=",alpha,"_c=",c,".pdf",sep=""))
|
35a68b0ae74cdd05869a26d53f465d012078c662
|
ec5db8e0e525c5198b59a14ac0c7ac00864fde28
|
/scripts/R-scripts/ggplot_bar.r
|
6a5ba9249f81f078dc95f227389d52836010bbb4
|
[] |
no_license
|
ChromatinCardiff/DanielPassProcessingPipelines
|
bdc1a02f6ec0fb99387db85574cdeefe7c247525
|
c3b02255a69f81c888f5cba14fb1b031ce99dc2e
|
refs/heads/master
| 2021-06-02T15:13:18.958935
| 2020-01-29T01:07:27
| 2020-01-29T01:07:27
| 32,514,237
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 378
|
r
|
ggplot_bar.r
|
library(ggplot2)
library(scales)
library(reshape2)
x <- read.table("Dropbox/Work/Manuscripts/Worm/Voxel/Data/L1raw.csv", header=TRUE, sep=",", check.names=FALSE)
x
x.melt <- melt(x, id=c("ID"))
head(x.melt)
p <- ggplot(x.melt, aes(x=ID, y = value))
p +
geom_bar(stat = 'identity') +
scale_x_continuous(breaks=seq(1,19))+
coord_flip()+
facet_wrap(~ variable, ncol=6)
|
cdeeec5cb5422fea83f9ea5419fc8a9bb4560bf9
|
0967f001a1b725978d8708778ca491b535bbb549
|
/R/tree_traversal_util.R
|
38362208f539b1170d119aa1a0c059780ef7244f
|
[] |
no_license
|
cran/climbeR
|
60a458e1c01ade4c272608d2b812d0bdbd98b9bb
|
21cfce2fd96981898e4de2d70def6d26e03ee869
|
refs/heads/master
| 2020-07-03T03:19:02.709090
| 2016-11-19T09:13:21
| 2016-11-19T09:13:21
| 74,199,250
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,476
|
r
|
tree_traversal_util.R
|
#' Recursive Depth Binning
#'
#' Function to recursively traverse depths of a tree.
#'
#' @param node_list Must be in the format of elements in the Ranger package's
#' forest$split.varIDs, which represents one tree in the forest.
#' Recursion is done by counting the number of terminal nodes at the current
#' depth to anticipate the correct number of nodes at the next depth.
#' @param depth Each recursive call must know the current depth in the tree.
#' @param expected_num_children The number of nodes in the current depth must
#' be anticipated, given the number of terminal nodes at the previous depth.
#' @param binned_depths A list passed between recursive calls, to store
#' results.
#' @return A list of vectors, where elements correspond to depths,
#' and vectors contain variable ID's of variables used to split at that depth.
recursiveDepthBinning <- function(node_list,
depth,
expected_num_children,
binned_depths) {
# take expected number of children at current depth
children <- node_list[1:expected_num_children]
# nodelist becomes all nodes at depths lower than current depth
node_list <- node_list[(expected_num_children + 1):length(node_list)]
# store and count the # of non-leaves at the current depth
nodes <- children[children != 0]
# calculate expected number of children in the next recursion
expected_num_children <- 2 * length(nodes)
### BASE CASE ### base case (no nodes at current depth)
if (expected_num_children == 0) {
return(binned_depths)
}
# update binned depths
binned_depths[[depth + 1]] <- nodes
### RECURSION ### recursive call on next depth
binned_depths <- recursiveDepthBinning(node_list,
depth + 1,
expected_num_children,
binned_depths)
return(binned_depths)
}
#' Start Recursive Depth Binning
#'
#' The starter function for the recursion in recursiveDepthBinning.
#'
#' @param tree_split_varIDs Given one element of a 'split.varIDs' list, this
#' function will pass it to the recursiveDepthBinning function to bin the tree
#' by depth, starting at the root.
#' @return A list with an element per depth encountered. Each
#' element is a vector of variable IDs
startRecursiveDepthBinning <- function(tree_split_varIDs) {
binned_depths <- list()
binned_depths <- recursiveDepthBinning(tree_split_varIDs,
0, 1, binned_depths)
return(binned_depths)
}
#' Bin Forest by Depth
#'
#' Given a forest object from the ranger package, this function will bin the
#' forest into depths. This is a helper function for the 'calculateAMDMS'
#' function.
#'
#' @param ranger_obj A ranger object from the ranger package, which was created
#' with param write.forest set to TRUE. In other words, it must have a
#' 'forest' property.
#' @return A list with 3 elements. The first is a list of vectors -
#' one for each independent variable ocurring in the forest (this may not
#' be the complete set of independent variables, but we will account for any
#' variables that do not occur in the forest later). Each vector contains all
#' minimal depths of maximal subtrees in the forest, for the corresponding
#' independent variable. The second element is a vector of tree heights
#' 'forest_depths'. The third element is a set of variable id's for matching to
#' independent variable names.
binForestByDepth <- function(ranger_obj) {
# forest properties
trees <- ranger_obj$split.varIDs
num_trees <- ranger_obj$num.trees
# return these data structures, once populated
depth_bins <- list()
forest_depths <- c()
# get all non-zero var ID's, (0 represents a leaf node)
var_id_dump <- unlist(ranger_obj$split.varIDs)
var_id_set <- unique(var_id_dump[var_id_dump != 0])
# number of vars that occur in the forest
num_vars <- length(var_id_set)
# preallocate an array for each var's list of subtree depths
for (var in 1:num_vars) {
depth_bins[[var]] <- vector(mode = "list", length = num_trees)
}
# iterate over the forest
for (tree_idx in 1:num_trees) {
# bin each tree
tree_depth_bins <- startRecursiveDepthBinning(trees[[tree_idx]])
# remember the depth of each tree
forest_depths <- c(forest_depths, length(tree_depth_bins))
# add results to depth_bins structure
for (depth in 1:length(tree_depth_bins)) {
for (var_id in tree_depth_bins[[depth]]) {
# find variable index
var_idx <- match(var_id, var_id_set)
depth_bins[[var_idx]][[tree_idx]] <-
c(depth_bins[[var_idx]][[tree_idx]], depth)
}
}
}
return(list(depth_bins = depth_bins,
forest_depths = forest_depths,
variable_ids_used = var_id_set))
}
#' Count Splits Per Variable
#'
#' This function counts the number of times each variable was used to split a
#' tree.
#'
#' @param ranger_obj_forest A ranger object from the ranger package, which was created
#' with param write.forest set to TRUE. In other words, it must have a
#' 'forest' property.
#' @return A dataframe with one column of counts, and one column of
#' normalized counts. Rows are labeled by variable names.
countSplitsPerVar <- function(ranger_obj_forest) {
trees <- ranger_obj_forest$split.varIDs
counts <- c()
# check this, to see if we need to offset var id's by 1
status_var_exists <- ("status.varID" %in% attributes(ranger_obj_forest)$names)
# we need a count for every independent var (some may be 0)
num_ind_vars <- length(ranger_obj_forest$independent.variable.names)
# dump all the split ID's into one container
dump_split_IDs <- unlist(trees)
dump_split_IDs <- dump_split_IDs[dump_split_IDs != 0]
# get the list of var id's that occurred in the forest
sorted_var_id <- sort(unique(dump_split_IDs))
# it's possible for a var ID to be absent from the forest because
# it was never used to split. In this case, we need to build the complete
# set of var ID's, as anticipated in binForestByDepth
# create a vector of the range of these ID's.
vars_used <- min(sorted_var_id):max(sorted_var_id)
# (this may find some of the var ID's that were not used to split in the
# forest, but it may still be incomplete, which will be fixed below)
# exclude the status var ID, if it's in the list of var ID's
if (status_var_exists) {
if (ranger_obj_forest$status.varID %in% vars_used) {
vars_used <- vars_used[c(-ranger_obj_forest$status.varID)]
}
}
# tally counts if var was used, otherwise, it gets a count of 0
for (i in vars_used) {
if (!(i %in% sorted_var_id)) {
counts <- c(counts, 0)
} else {
counts <- c(counts, length(dump_split_IDs[dump_split_IDs == i]))
}
}
# call helper to look for missed variables
counts <- lookForVarsAbsentInForest(counts, vars_used,
num_ind_vars, ranger_obj_forest)
# normalize the counts
normalized_counts <- counts / sum(counts)
# ready to return
result <- data.frame(normalized_counts = normalized_counts,
counts = counts,
var_ids = vars_used)
rownames(result) <- ranger_obj_forest$independent.variable.names
return(result)
}
#' Look for Variable ID's that didn't occur in the Forest.
#'
#' Find any remaining vars, if missing. Vars can be absent in the forest, if
#' they were never used to split. This function does some bookkeeping, to find
#' elements in the count vector that should be 0. If there weren't enough vars
#' observed, their indeces must be either at the end of vars_used, or the
#' beginning.
#'
#' @param counts A vector of split counts in the forest. This may need to be
#' updated with 0's for variables that didn't occur in the forest.
#' @param vars_used The current list of varID's that have been found in the
#' forest.
#' @param num_ind_vars The number of independent vars. Counts must have this
#' many elements.
#' @param forest Pass this to access the 'status.varID' if necessary.
#' @return updated counts vector.
lookForVarsAbsentInForest <- function(counts, vars_used,
num_ind_vars, forest) {
# get the number missing
num_missing <- num_ind_vars - length(counts)
if (num_missing > 0) {
# find where to start
ifelse(min(vars_used) == 1, missing_at_start <- c(),
missing_at_start <- 1:(min(vars_used) - 1))
# check for missing vars at the end
ifelse(length(missing_at_start) < num_missing,
missing_at_end <- (max(vars_used) + 1):num_ind_vars,
missing_at_end <- c())
# combine missing indexes
missing_idxs <- c(missing_at_start, missing_at_end)
# check to exclude status variable (not a possible covariate)
status_var_exists <- ("status.varID" %in% attributes(forest)$names)
if (status_var_exists && forest$status.varID %in% missing_idxs) {
missing_idxs <- missing_idxs[c(-forest$status.varID)]
}
# depending on variable ID, add it to the start/end
for (i in missing_idxs) {
if (i < min(vars_used)) {
counts <- c(0, counts)
} else {
counts <- c(counts, 0)
}
}
}
return(counts)
}
#' Forest Averaged Minimal Depth of a Maximal Subtree (AMDMS)
#'
#' Given a result from the Ranger package (write.forest must
#' be set to TRUE), this function will traverse the trees and calculate the
#' first and second order average minimal depth of a maximal subtree.
#'
#' @param ranger_obj A ranger object from the ranger package, which was created
#' with param write.forest set to TRUE. In other words, it must have a
#' 'forest' property.
#' @return A data.frame with two columns: averaged first and second order
#' minimal depth of a maximal subtree.
#' @export
calculateAMDMS <- function(ranger_obj) {
variable_id <- NULL;
if(!("forest" %in% names(ranger_obj))){
stop("no forest attribute present in ranger result.
Please run Ranger with write_forest set to TRUE")
}
forest <- ranger_obj$forest
binned_forest <- binForestByDepth(forest)
# retrieve variable ID's for matching
var_ids <- binned_forest$variable_ids_used
# forest averaged First and Second Order Minimal Depth
avg_fom_depths <- c(); avg_som_depths <- c()
# iterate over depth_bins to calculate first and second order minimal
# depth of maximal subtrees
for (var_depth_bins in binned_forest[[1]]) {
var_fom_depths <- c()
var_som_depths <- c()
for (tree_depths in var_depth_bins) {
var_fom_depths <- c(var_fom_depths, tree_depths[1])
if (length(tree_depths) > 1) {
var_som_depths <- c(var_som_depths, tree_depths[2])
}
}
# assign first order max depth mean
fom_depth <- mean(var_fom_depths)
# assign second order max depth mean
if (length(var_som_depths) > 0) {
som_depth <- mean(var_som_depths)
} else {
# in the case where there is no second order depth, populate
# with a -1
som_depth <- -1
}
avg_fom_depths <- c(avg_fom_depths, fom_depth)
avg_som_depths <- c(avg_som_depths, som_depth)
}
# combine the results
result <- data.frame(avg_fom_depths, avg_som_depths, var_ids)
names(result) <- c("first_order", "second_order", "variable_id")
# count number of times that each variable was split
splits_per_var <- countSplitsPerVar(forest)
# assign the rownames
# match splits per variable to the df with variable ID key
splits_per_var <- splits_per_var[match(result[["variable_id"]],
splits_per_var[["var_ids"]]), ]
result <- cbind(result, splits_per_var)
# sort by first order
result <- result[order(result$first_order), ]
result <- subset(result, select = -c(var_ids, variable_id))
return(result)
}
|
0de9db59b0761923d7a4517778f392abf2ee232b
|
2347dce12c59970ebb46578c85a17ffdf031733b
|
/data_processing/covariates/covariates_data_final.R
|
593880ffbe061f46029c3bcdb285642a84bc6c1f
|
[] |
no_license
|
eleonore-schneeg/tds_7
|
d9a3b0e5cad762725c31ce28aeb80daee8610395
|
c49b57f6b01d52b337337b92912039de9db1aab1
|
refs/heads/main
| 2023-03-27T18:10:07.130142
| 2021-03-24T10:50:11
| 2021-03-24T10:50:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,290
|
r
|
covariates_data_final.R
|
data <- data.frame(fread("ukb26390.csv"))
mycoding <- data.frame(fread("Codings_Showcase.csv"))
withdrawn=as.character(read.csv("w19266_20200204.csv")[,1])
data <-filter(data,!(eid %in% withdrawn)) # 502506 observations
# add all names of columns that we want to extract
columns_names_to_extract=unname(unlist(read.table("/rds/general/project/hda_students_data/live/Group7/General/Demetris/List_field_ids_to_extract-copy.txt", header=FALSE)))
exact_column_names = c()
# Find the exact name of the columns (i.e "X21022.0.0") and create a new vector with them
# Don't forget that we need to extract "eid" as well
for (i in (columns_names_to_extract)){
exact_column_names = c(exact_column_names,(colnames(data)[grep(paste0("X",i,".0"),fixed = TRUE,colnames(data))]))
}
# It's time to add "eid" to the vector and it must be the 1st column
exact_column_names <- c("eid",exact_column_names)
# filter the data set to subset rows
covariates_data <- data %>% select(all_of(exact_column_names))
# Check for missing values
for (i in 1:ncol(covariates_data)){
print(c(exact_column_names[i],sum(is.na(covariates_data[,i]))))
}
#saveRDS("/rds/general/project/hda_students_data/live/Group7/General/Demetris")
# Rename columns
covariates_data %>%
rename(
age_at_baseline = X21022.0.0
)
|
ae470d87b6eb9b7a69fc901f102d4cec657b0900
|
2d106850736d5d003e3ba0aa2d0b7142ddecf697
|
/man/stacked_bar_with_right_labels.Rd
|
9305b2aa889b000c7c2aa4d3af05b8cf6c249d00
|
[] |
no_license
|
HughParsonage/grattanCharts
|
cd7bc4c154a9200a45bb1e259adfe0286b417a37
|
5fd242ae4c35d7762c16aabfa2d1632ac413a9b2
|
refs/heads/master
| 2021-01-11T06:59:37.823484
| 2019-02-21T03:21:12
| 2019-02-21T03:21:12
| 72,337,562
| 1
| 2
| null | 2018-07-28T11:37:08
| 2016-10-30T08:21:52
|
R
|
UTF-8
|
R
| false
| true
| 2,842
|
rd
|
stacked_bar_with_right_labels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stacked_with_right_labels.R
\name{stacked_bar_with_right_labels}
\alias{stacked_bar_with_right_labels}
\title{Stacked charts with labels at right}
\usage{
stacked_bar_with_right_labels(.data, geom = "bar", barwidth,
verbose = FALSE, right_margin = 0.5, reverse = FALSE,
scale_fill_manual_args, scale_y_args, x_continuous = FALSE,
scale_x_args, coord_cartesian_args, text_family = NULL, Annotate_Args,
theme_grattan.args, theme.args, nudge_up = 0, nudge_right = 0.5,
extra_left_spaces = 0L)
}
\arguments{
\item{.data}{A data frame, containing entries for \code{x}, \code{y}, and \code{fill}. \code{x} and \code{fill} must be ordered factors.}
\item{geom}{The type of chart ("bar", "area").}
\item{barwidth}{Passed to the \code{width} argument of \code{geom_bar}}
\item{verbose}{Report the margin used (in grid:: 'lines').}
\item{right_margin}{The amount of padding at right to use. The whole point of this function is to select a good right margin to allow space. But if the margin provided is wrong, it can be changed manually here.}
\item{reverse}{(logical) Use the reverse palette.}
\item{scale_fill_manual_args}{Arguments passed to \code{ggplot2::scale_fill_manual}.}
\item{scale_y_args}{A list of arguments passed to r \code{ggplot2::scale_y_continuous}.}
\item{x_continuous}{Should the x axis be continuous?}
\item{scale_x_args}{A list of arguments passed to \code{ggplot2::scale_x_discrete}. If \code{x_continuous}, then the arguments passed to \code{ggplot2::scale_x_continuous}.}
\item{coord_cartesian_args}{A list of arguments passed to \code{ggplot2::coord_cartesian}.}
\item{text_family}{Text family for theme and geom text.}
\item{Annotate_Args}{A list of list of arguments passed to \code{ggplot2::annotate}. Each element of the top-level list is an additional layer of \code{annotate}.}
\item{theme_grattan.args}{Arguments passed to \code{theme_hugh}, an alias for \code{theme_grattan}. (For example, the \code{base_size}.)}
\item{theme.args}{A list of arguments passed to \code{ggplot2::theme}.}
\item{nudge_up}{A numeric vector to be added every text y-coordinate.}
\item{nudge_right}{Move text right in units of \code{x}.}
\item{extra_left_spaces}{Number of space characters \code{" "} preceding the text labels. Extra space characters are added before every newline.}
}
\value{
A chart with the labels in the right gutter
}
\description{
Stacked charts with labels at right
}
\examples{
library(data.table)
dat <- data.table::CJ(
x = factor(1:10, ordered = TRUE),
fill = factor(c("A long but not\\ntoo long label", letters[2:3]),
levels = c("A long but not\\ntoo long label", letters[2:3]),
ordered = TRUE)
)
dat$y <- abs(rnorm(1:nrow(dat)))
stacked_bar_with_right_labels(dat)
}
|
bc30638c906a620c15f15f53497e0350ad2b7ef4
|
93f8e4312d4de70c0fe8012d0cf3e0f56451982d
|
/ques_carbon.rsx
|
73d865bdccfec2a2e86cbaf899bc3ba1af81fe07
|
[] |
no_license
|
alfanugraha/lumens_scripts
|
c312ce23e690c3e561c34443071befa5f0269649
|
dba6a7ad48bc21aaeb9f021f4cc354d72b144840
|
refs/heads/master
| 2023-07-27T19:16:14.244002
| 2019-07-16T10:44:01
| 2019-07-16T10:44:01
| 103,492,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52,594
|
rsx
|
ques_carbon.rsx
|
##QUES-PostgreSQL=group
##proj.file=string
##landuse_1=string
##landuse_2=string
##planning_unit=string
##lookup_c=string
##raster.nodata=number 0
#include_peat=selection Yes;No
#peatmap=string
#lookup_c_peat=string
##resultoutput=output table
##statusoutput=output table
#=Load library
library(tiff)
library(foreign)
library(rasterVis)
library(reshape2)
library(plyr)
library(lattice)
library(latticeExtra)
library(RColorBrewer)
library(grid)
library(ggplot2)
library(spatial.tools)
library(rtf)
library(jsonlite)
library(splitstackshape)
library(stringr)
library(DBI)
library(RPostgreSQL)
library(rpostgis)
library(magick)
time_start<-paste(eval(parse(text=(paste("Sys.time ()")))), sep="")
#=Load active project
load(proj.file)
# set driver connection
driver <- dbDriver('PostgreSQL')
project <- as.character(proj_descr[1,2])
DB <- dbConnect(
driver, dbname=project, host=as.character(pgconf$host), port=as.character(pgconf$port),
user=as.character(pgconf$user), password=as.character(pgconf$pass)
)
#=Retrieve all list of data that are going to be used
# list_of_data_luc ==> list of data land use/cover
# list_of_data_pu ==> list of data planning unit
# list_of_data_f ==> list of data factor
# list_of_data_lut ==> list of data lookup table
list_of_data_luc<-dbReadTable(DB, c("public", "list_of_data_luc"))
list_of_data_pu<-dbReadTable(DB, c("public", "list_of_data_pu"))
list_of_data_lut<-dbReadTable(DB, c("public", "list_of_data_lut"))
# return the selected data from the list
data_luc1<-list_of_data_luc[which(list_of_data_luc$RST_NAME==landuse_1),]
data_luc2<-list_of_data_luc[which(list_of_data_luc$RST_NAME==landuse_2),]
data_pu<-list_of_data_pu[which(list_of_data_pu$RST_NAME==planning_unit),]
data_lut<-list_of_data_lut[which(list_of_data_lut$TBL_NAME==lookup_c),]
T1<-data_luc1$PERIOD
T2<-data_luc2$PERIOD
#=Set Working Directory
pu_name<-data_pu$RST_DATA
idx_QUESC<-idx_QUESC+1
dirQUESC<-paste(dirname(proj.file), "/QUES/QUES-C/", idx_QUESC, "_QUESC_", T1, "_", T2, "_", pu_name, sep="")
dir.create(dirQUESC, mode="0777")
# create temp directory
dir.create(LUMENS_path_user, mode="0777")
setwd(LUMENS_path_user)
#=Set initial variables
# reference map
ref.obj<-exists('ref')
ref.path<-paste(dirname(proj.file), '/ref.tif', sep='')
if(!ref.obj){
if(file.exists(ref.path)){
ref<-raster(ref.path)
} else {
ref<-getRasterFromPG(pgconf, project, 'ref_map', 'ref.tif')
}
}
# peat
# if (include_peat == 1){
# data_peat<-list_of_data_pu[which(list_of_data_pu$RST_NAME==peatmap),]
# peat<-getRasterFromPG(pgconf, project, data_peat$RST_DATA, paste(data_peat$RST_DATA, '.tif', sep=''))
# lookup_peat<-dbReadTable(DB, c("public", data_peat$LUT_NAME))
# }
# planning unit
if (data_pu$RST_DATA=="ref") {
zone<-ref
count_ref<-as.data.frame(freq(ref))
count_ref<-na.omit(count_ref)
colnames(count_ref)<-c("IDADM", "COUNT")
ref_table<-dbReadTable(DB, c("public", data_pu$LUT_NAME))
lookup_z<-merge(count_ref, ref_table, by="IDADM")
} else {
zone<-getRasterFromPG(pgconf, project, data_pu$RST_DATA, paste(data_pu$RST_DATA, '.tif', sep=''))
lookup_z<-dbReadTable(DB, c("public", data_pu$LUT_NAME))
}
# landuse first time period
landuse1<-getRasterFromPG(pgconf, project, data_luc1$RST_DATA, paste(data_luc1$RST_DATA, '.tif', sep=''))
# landuse second time period
landuse2<-getRasterFromPG(pgconf, project, data_luc2$RST_DATA, paste(data_luc2$RST_DATA, '.tif', sep=''))
# landcover lookup table
lookup_c<-dbReadTable(DB, c("public", data_lut$TBL_DATA))
# set lookup table
lookup_c<-lookup_c[which(lookup_c[1] != raster.nodata),]
lookup_lc<-lookup_c
lookup_ref<-lut_ref
colnames(lookup_lc)<-c("ID","LC","CARBON")
colnames(lookup_z)<-c("ID", "COUNT_ZONE", "ZONE")
colnames(lookup_ref)<-c("REF", "REF_NAME")
nLandCoverId<-nrow(lookup_lc)
nPlanningUnitId<-nrow(lookup_z)
nRefId<-nrow(lookup_ref)
#=Projection handling
if (grepl("+units=m", as.character(ref@crs))){
print("Raster maps have projection in meter unit")
Spat_res<-res(ref)[1]*res(ref)[2]/10000
paste("Raster maps have ", Spat_res, " Ha spatial resolution, QuES-C will automatically generate data in Ha unit")
} else if (grepl("+proj=longlat", as.character(ref@crs))){
print("Raster maps have projection in degree unit")
Spat_res<-res(ref)[1]*res(ref)[2]*(111319.9^2)/10000
paste("Raster maps have ", Spat_res, " Ha spatial resolution, QuES-C will automatically generate data in Ha unit")
} else{
statuscode<-0
statusmessage<-"Raster map projection is unknown"
statusoutput<-data.frame(statuscode=statuscode, statusmessage=statusmessage)
quit()
}
#=Set project properties
title=location
tab_title<-as.data.frame(title)
period1=T1
period2=T2
period=period2-period1
proj_prop<-as.data.frame(title)
proj_prop$period1<-period1
proj_prop$period2<-period2
proj_prop$period <- do.call(paste, c(proj_prop[c("period1", "period2")], sep = " - "))
#=Create land use change data dummy
#=Create cross-tabulation for reference
dummy1<-data.frame(nPU=lookup_ref$REF, divider=nLandCoverId*nLandCoverId)
dummy1<-expandRows(dummy1, 'divider')
dummy2<-data.frame(nT1=lookup_lc$ID, divider=nLandCoverId)
dummy2<-expandRows(dummy2, 'divider')
dummy2<-data.frame(nT1=rep(dummy2$nT1, nRefId))
dummy3<-data.frame(nT2=rep(rep(lookup_lc$ID, nLandCoverId), nRefId))
landUseChangeRefDummy<-cbind(dummy1, dummy2, dummy3)
colnames(landUseChangeRefDummy)<-c('REF', 'ID_LC1', 'ID_LC2')
R1<-(ref*1) + (landuse1*100^1)+ (landuse2*100^2)
ref.db<-as.data.frame(freq(R1))
ref.db<-na.omit(ref.db)
n<-3
k<-0
ref.db$value_temp<-ref.db$value
while(k < n) {
eval(parse(text=(paste("ref.db$Var", n-k, "<-ref.db$value_temp %% 100", sep=""))))
ref.db$value_temp<-floor(ref.db$value_temp/100)
k=k+1
}
ref.db$value_temp<-NULL
colnames(ref.db) = c("ID_CHG", "COUNT", "REF", "ID_LC1", "ID_LC2")
ref.db<-merge(landUseChangeRefDummy, ref.db, by=c('REF', 'ID_LC1', 'ID_LC2'), all=TRUE)
ref.db$ID_CHG<-ref.db$REF*1 + ref.db$ID_LC1*100^1 + ref.db$ID_LC2*100^2
ref.db<-replace(ref.db, is.na(ref.db), 0)
#=Create cross-tabulation for zone
xtab<-tolower(paste('xtab_', pu_name, T1, T2, sep=''))
data_xtab<-list_of_data_lut[which(list_of_data_lut$TBL_NAME==xtab),]
if(nrow(data_xtab)==0){
dummy1<-data.frame(nPU=lookup_z$ID, divider=nLandCoverId*nLandCoverId)
dummy1<-expandRows(dummy1, 'divider')
dummy2<-data.frame(nT1=lookup_lc$ID, divider=nLandCoverId)
dummy2<-expandRows(dummy2, 'divider')
dummy2<-data.frame(nT1=rep(dummy2$nT1, nPlanningUnitId))
dummy3<-data.frame(nT2=rep(rep(lookup_lc$ID, nLandCoverId), nPlanningUnitId))
landUseChangeMapDummy<-cbind(dummy1, dummy2, dummy3)
colnames(landUseChangeMapDummy)<-c('ZONE', 'ID_LC1', 'ID_LC2')
R2<-(zone*1) + (landuse1*100^1)+ (landuse2*100^2)
lu.db<-as.data.frame(freq(R2))
lu.db<-na.omit(lu.db)
n<-3
k<-0
lu.db$value_temp<-lu.db$value
while(k < n) {
eval(parse(text=(paste("lu.db$Var", n-k, "<-lu.db$value_temp %% 100", sep=""))))
lu.db$value_temp<-floor(lu.db$value_temp/100)
k=k+1
}
lu.db$value_temp<-NULL
colnames(lu.db) = c("ID_CHG", "COUNT", "ZONE", "ID_LC1", "ID_LC2")
lu.db<-merge(landUseChangeMapDummy, lu.db, by=c('ZONE', 'ID_LC1', 'ID_LC2'), all=TRUE)
lu.db$ID_CHG<-lu.db$ZONE*1 + lu.db$ID_LC1*100^1 + lu.db$ID_LC2*100^2
lu.db<-replace(lu.db, is.na(lu.db), 0)
idx_lut<-idx_lut+1
eval(parse(text=(paste("in_lut", idx_lut, " <- lu.db", sep=""))))
eval(parse(text=(paste("list_of_data_lut<-data.frame(TBL_DATA='in_lut", idx_lut,"', TBL_NAME='", xtab, "', row.names=NULL)", sep=""))))
# save to PostgreSQL
InLUT_i <- paste('in_lut', idx_lut, sep="")
dbWriteTable(DB, InLUT_i, eval(parse(text=(paste(InLUT_i, sep="" )))), append=TRUE, row.names=FALSE)
dbWriteTable(DB, "list_of_data_lut", list_of_data_lut, append=TRUE, row.names=FALSE)
setwd(dirQUESC)
idx_factor<-idx_factor+1
chg_map<-tolower(paste('chgmap_', pu_name, T1, T2, sep=''))
eval(parse(text=(paste("writeRaster(R2, filename='", chg_map, ".tif', format='GTiff', overwrite=TRUE)", sep=""))))
eval(parse(text=(paste("factor", idx_factor, "<-'", chg_map, "'", sep=''))))
eval(parse(text=(paste("list_of_data_f<-data.frame(RST_DATA='factor", idx_factor,"', RST_NAME='", chg_map, "', row.names=NULL)", sep=""))))
InFactor_i <- paste("factor", idx_factor, sep="")
dbWriteTable(DB, "list_of_data_f", list_of_data_f, append=TRUE, row.names=FALSE)
#write to csv
list_of_data_f<-dbReadTable(DB, c("public", "list_of_data_f"))
csv_file<-paste(dirname(proj.file),"/csv_factor_data.csv", sep="")
write.table(list_of_data_f, csv_file, quote=FALSE, row.names=FALSE, sep=",")
addRasterToPG(project, paste0(chg_map, '.tif'), InFactor_i, srid)
unlink(paste0(chg_map, '.tif'))
} else {
lu.db<-dbReadTable(DB, c("public", data_xtab$TBL_DATA))
}
# rename column
colnames(lookup_c) = c("ID_LC1", "LC_t1", "CARBON_t1")
data_merge <- merge(lu.db,lookup_c,by="ID_LC1")
colnames(lookup_c) = c("ID_LC2", "LC_t2", "CARBON_t2")
data_merge <- as.data.frame(merge(data_merge,lookup_c,by="ID_LC2"))
colnames(lookup_z)[1]="ZONE"
colnames(lookup_z)[3]="Z_NAME"
data_merge <- as.data.frame(merge(data_merge,lookup_z,by="ZONE"))
#data_merge <- as.data.frame(merge(data_merge,lookup_ref,by="REF"))
data_merge$COUNT<-data_merge$COUNT*Spat_res
data_merge$COUNT_ZONE<-data_merge$COUNT_ZONE*Spat_res
#save crosstab
# original_data<-subset(data_merge, select=-c(CARBON_t1, CARBON_t2))
# eval(parse(text=(paste("write.dbf(original_data, 'lu.db_", pu_name ,"_", T1, "_", T2, ".dbf')", sep=""))))
# rm(lu.db, original_data)
#calculate area based on reference/administrative data
refMelt<-melt(data = ref.db, id.vars=c('REF'), measure.vars=c('COUNT'))
refArea<-dcast(data = refMelt, formula = REF ~ ., fun.aggregate = sum)
#=Carbon accounting process
NAvalue(landuse1)<-raster.nodata
NAvalue(landuse2)<-raster.nodata
rcl.m.c1<-as.matrix(lookup_lc[,1])
rcl.m.c2<-as.matrix(lookup_lc[,3])
rcl.m<-cbind(rcl.m.c1,rcl.m.c2)
rcl.m<-rbind(rcl.m, c(0, NA))
carbon1<-reclassify(landuse1, rcl.m)
carbon2<-reclassify(landuse2, rcl.m)
chk_em<-carbon1>carbon2
chk_sq<-carbon1<carbon2
emission<-((carbon1-carbon2)*3.67)*chk_em
sequestration<-((carbon2-carbon1)*3.67)*chk_sq
#=Modify carbon stock density for each time series
data_merge$ck_em<-data_merge$CARBON_t1>data_merge$CARBON_t2
data_merge$ck_sq<-data_merge$CARBON_t1<data_merge$CARBON_t2
data_merge$em<-(data_merge$CARBON_t1-data_merge$CARBON_t2)*data_merge$ck_em*data_merge$COUNT*3.67
data_merge$sq<-(data_merge$CARBON_t2-data_merge$CARBON_t1)*data_merge$ck_sq*data_merge$COUNT*3.67
data_merge$LU_CHG <- do.call(paste, c(data_merge[c("LC_t1", "LC_t2")], sep = " to "))
data_merge$null<-0
data_merge$nullCek<-data_merge$em+data_merge$sq
#=Generate area_zone lookup and calculate min area
area_zone<-melt(data = data_merge, id.vars=c('ZONE'), measure.vars=c('COUNT'))
area_zone<-dcast(data = area_zone, formula = ZONE ~ ., fun.aggregate = sum)
colnames(area_zone)[1]<-"ID"
colnames(area_zone)[2]<-"COUNT"
area_zone$ID<-as.numeric(as.character(area_zone$ID))
area_zone<-area_zone[with(area_zone, order(ID)),]
colnames(lookup_z)[1]<-"ID"
area_zone<-merge(area_zone, lookup_z, by="ID")
area<-min(sum(area_zone$COUNT), sum(data_merge$COUNT))
#=Generate administrative unit
colnames(refArea)[1]<-"ID"
colnames(refArea)[2]<-"COUNT"
colnames(lookup_ref)[1]<-"ID"
colnames(lookup_ref)[2]<-"KABKOT"
area_admin<-merge(refArea, lookup_ref, by="ID")
#=Calculate emission for each planning unit
zone_emission <- as.data.frame(zonal((Spat_res*emission),zone,'sum')) #adjust emission by actual raster area
zone_sequestration <- as.data.frame(zonal((Spat_res*sequestration),zone,'sum'))#adjust sequestration by actual raster area
colnames(zone_emission)[1] = "ID"
colnames(zone_emission)[2] = "Em_tot"
colnames(zone_sequestration)[1] = "ID"
colnames(zone_sequestration)[2]="Sq_tot"
zone_emission<-merge(area_zone,zone_emission,by="ID")
zone_carbon<-merge(zone_emission,zone_sequestration,by="ID")
zone_carbon$COUNT_ZONE<-NULL
zone_carbon$Net_em<-zone_carbon$Em_tot-zone_carbon$Sq_tot
zone_carbon$Net_em_rate<-round((zone_carbon$Net_em/zone_carbon$COUNT/period), digits=2)
zone_carbon[,4:7]<-round(zone_carbon[,4:7], digits=2)
#=Calculate emission for each administrative unit
admin_emission <- as.data.frame(zonal((Spat_res*emission),ref,'sum')) #adjust emission by actual raster area
admin_sequestration <- as.data.frame(zonal((Spat_res*sequestration),ref,'sum'))#adjust sequestration by actual raster area
colnames(admin_emission)[1] = "ID"
colnames(admin_emission)[2] = "Em_tot"
colnames(admin_sequestration)[1] = "ID"
colnames(admin_sequestration)[2]="Sq_tot"
admin_emission<-merge(area_admin,admin_emission,by="ID")
admin_carbon<-merge(admin_emission,admin_sequestration,by="ID")
admin_carbon$Net_em<-admin_carbon$Em_tot-admin_carbon$Sq_tot
admin_carbon$Net_em_rate<-round((admin_carbon$Net_em/admin_carbon$COUNT/period), digits=2)
admin_carbon[,4:7]<-round(admin_carbon[,4:7], digits=2)
#=Create final summary of emission calculation at landscape level
fs_id<-c(1,2,3,4,5,6,7)
fs_cat<-c("Period", "Total area", "Total Emisi (Ton CO2-eq)", "Total Sequestrasi (Ton CO2-eq)", "Emisi Bersih (Ton CO2-eq)", "Laju Emisi (Ton CO2-eq/tahun)","Laju emisi per-unit area (Ton CO2-eq/ha.tahun)")
fs_em<-sum(zone_carbon$Em_tot)
fs_sq<-sum(zone_carbon$Sq_tot)
fs_Nem<-fs_em-fs_sq
fs_Rem<-fs_Nem/period
fs_ARem<-fs_Rem/area
fs_summary<-c(proj_prop$period, area,round(fs_em, digits=2),round(fs_sq, digits=2),round(fs_Nem, digits=2),round(fs_Rem, digits=2),round(fs_ARem, digits=2))
fs_table<-data.frame(fs_id,fs_cat,fs_summary)
fs_table$fs_summary<-as.character(fs_table$fs_summary)
colnames(fs_table)<-c("ID", "Kategori", "Ringkasan")
#=Create QUES-C database
#=Zonal statistics database
lg<-length(unique(data_merge$ZONE))
zone_lookup<-area_zone
data_zone<-area_zone
data_zone$Z_CODE<-toupper(abbreviate(data_zone$Z_NAME))
data_zone$Rate_seq<-data_zone$Rate_em<-data_zone$Avg_C_t2<-data_zone$Avg_C_t1<-0
for(a in 1:lg){
i<-unique(data_merge$ZONE)[a]
data_z<-data_merge[which(data_merge$ZONE == i),]
data_zone<-within(data_zone, {Avg_C_t1<-ifelse(data_zone$ID == i, sum(data_z$CARBON_t1*data_z$COUNT)/sum(data_z$COUNT),Avg_C_t1)})
data_zone<-within(data_zone, {Avg_C_t2<-ifelse(data_zone$ID == i, sum(data_z$CARBON_t2*data_z$COUNT)/sum(data_z$COUNT),Avg_C_t2)})
data_zone<-within(data_zone, {Rate_em<-ifelse(data_zone$ID == i, sum(data_z$em)/(sum(data_z$COUNT)*period),Rate_em)})
data_zone<-within(data_zone, {Rate_seq<-ifelse(data_zone$ID == i, sum(data_z$sq)/(sum(data_z$COUNT)*period),Rate_seq)})
}
data_zone$COUNT_ZONE<-NULL
data_zone[,5:8]<-round(data_zone[,5:8],digits=2)
#=Emission
# calculate largest source of emission
data_merge_sel <- data_merge[ which(data_merge$nullCek > data_merge$null),]
order_sq <- as.data.frame(data_merge[order(-data_merge$sq),])
order_em <- as.data.frame(data_merge[order(-data_merge$em),])
# total emission
tb_em_total<-as.data.frame(cbind(order_em$LU_CHG, as.data.frame(round(order_em$em, digits=3))))
colnames(tb_em_total)<-c("LU_CHG", "em")
tb_em_total<-aggregate(em~LU_CHG,data=tb_em_total,FUN=sum)
tb_em_total$LU_CODE<-as.factor(toupper(abbreviate(tb_em_total$LU_CHG, minlength=5, strict=FALSE, method="both")))
tb_em_total<-tb_em_total[order(-tb_em_total$em),]
tb_em_total<-tb_em_total[c(3,1,2)]
tb_em_total$Percentage<-as.numeric(format(round((tb_em_total$em / sum(tb_em_total$em) * 100),2), nsmall=2))
tb_em_total_10<-head(tb_em_total,n=10)
# zonal emission
tb_em_zonal<-as.data.frame(NULL)
for (i in 1:length(zone_lookup$ID)){
tryCatch({
a<-(zone_lookup$ID)[i]
tb_em<-as.data.frame(cbind(order_em$ZONE, order_em$LU_CHG, as.data.frame(round(order_em$em, digits=3))))
colnames(tb_em)<-c("ZONE","LU_CHG", "em")
tb_em_z<-as.data.frame(tb_em[which(tb_em$ZONE == a),])
tb_em_z<-aggregate(em~ZONE+LU_CHG,data=tb_em_z,FUN=sum)
tb_em_z$LU_CODE<-as.factor(toupper(abbreviate(tb_em_z$LU_CHG, minlength=5, strict=FALSE, method="both")))
tb_em_z<-tb_em_z[order(-tb_em_z$em),]
tb_em_z<-tb_em_z[c(1,4,2,3)]
tb_em_z$Percentage<-as.numeric(format(round((tb_em_z$em / sum(tb_em_z$em) * 100),2), nsmall=2))
tb_em_z_10<-head(tb_em_z,n=10)
tb_em_zonal<-rbind(tb_em_zonal,tb_em_z_10)
},error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
# rm(tb_em, tb_em_total, tb_em_z, tb_em_z_10)
#=Sequestration
# total sequestration
tb_seq_total<-as.data.frame(cbind(order_sq$LU_CHG, as.data.frame(round(order_sq$sq, digits=3))))
colnames(tb_seq_total)<-c("LU_CHG", "seq")
tb_seq_total<-aggregate(seq~LU_CHG,data=tb_seq_total,FUN=sum)
tb_seq_total$LU_CODE<-as.factor(toupper(abbreviate(tb_seq_total$LU_CHG, minlength=5, strict=FALSE, method="both")))
tb_seq_total<-tb_seq_total[order(-tb_seq_total$seq),]
tb_seq_total<-tb_seq_total[c(3,1,2)]
tb_seq_total$Percentage<-as.numeric(format(round((tb_seq_total$seq / sum(tb_seq_total$seq) * 100),2), nsmall=2))
tb_seq_total_10<-head(tb_seq_total,n=10)
# zonal sequestration
tb_seq_zonal<-as.data.frame(NULL)
for (i in 1:length(zone_lookup$ID)){
tryCatch({
a<-(zone_lookup$ID)[i]
tb_seq<-as.data.frame(cbind(order_sq$ZONE, order_sq$LU_CHG, as.data.frame(round(order_sq$sq, digits=3))))
colnames(tb_seq)<-c("ZONE","LU_CHG", "seq")
tb_seq_z<-as.data.frame(tb_seq[which(tb_seq$ZONE == i),])
tb_seq_z<-aggregate(seq~ZONE+LU_CHG,data=tb_seq_z,FUN=sum)
tb_seq_z$LU_CODE<-as.factor(toupper(abbreviate(tb_seq_z$LU_CHG, minlength=5, strict=FALSE, method="both")))
tb_seq_z<-tb_seq_z[order(-tb_seq_z$seq),]
tb_seq_z<-tb_seq_z[c(1,4,2,3)]
tb_seq_z$Percentage<-as.numeric(format(round((tb_seq_z$seq / sum(tb_seq_z$seq) * 100),2), nsmall=2))
tb_seq_z_10<-head(tb_seq_z,n=10)
tb_seq_zonal<-rbind(tb_seq_zonal,tb_seq_z_10)
},error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
# rm(tb_seq, tb_seq_total, tb_seq_z, tb_seq_z_10)
#=Zonal additional statistics
if (((length(unique(data_merge$ID_LC1)))>(length(unique(data_merge$ID_LC2))))){
dimention<-length(unique(data_merge$ID_LC1))
name.matrix<-cbind(as.data.frame(data_merge$ID_LC1), as.data.frame(data_merge$LC_t1))
name.matrix<-unique(name.matrix)
colnames(name.matrix)<-c("ID","LC")
name.matrix<-name.matrix[order(name.matrix$ID),]
name.matrix$LC_CODE<-toupper(abbreviate(name.matrix$LC, minlength=4, method="both"))
} else{
dimention<-length(unique(data_merge$ID_LC2))
name.matrix<-cbind(as.data.frame(data_merge$ID_LC2), as.data.frame(data_merge$LC_t2))
name.matrix<-unique(name.matrix)
colnames(name.matrix)<-c("ID","LC")
name.matrix<-name.matrix[order(name.matrix$ID),]
name.matrix$LC_CODE<-toupper(abbreviate(name.matrix$LC, minlength=4, method="both"))
}
#=Transition matrix
# zonal emission matrix
e.m.z<-matrix(0, nrow=dimention, ncol=dimention)
em.matrix.zonal<-as.data.frame(NULL)
for (k in 1:length(zone_lookup$ID)){
for (i in 1:nrow(e.m.z)){
for (j in 1:ncol(e.m.z)){
em.data<-data_merge_sel[which(data_merge_sel$ID_LC1==i & data_merge_sel$ID_LC2==j & data_merge_sel$ZONE==k),]
e.m.z[i,j]<-as.numeric(round(sum(em.data$em), 2))
}
}
e.m.z<-as.data.frame(e.m.z)
e.m.z.c<-as.data.frame(cbind(name.matrix$LC_CODE,e.m.z))
e.m.z.c<-cbind(rep(k,nrow(e.m.z)),e.m.z.c)
em.matrix.zonal<-rbind(em.matrix.zonal,e.m.z.c)
}
colnames(em.matrix.zonal)<-c("ZONE","LC_CODE",as.vector(name.matrix$LC_CODE))
# rm(em.data, e.m.z, e.m.z.c)
# total emission matrix
e.m<-matrix(0, nrow=dimention, ncol=dimention)
for (i in 1:nrow(e.m)){
for (j in 1:ncol(e.m)){
em.data<-data_merge_sel[which(data_merge_sel$ID_LC1==i & data_merge_sel$ID_LC2==j),]
e.m[i,j]<-round(sum(em.data$em), digits=2)
}
}
e.m<-as.data.frame(e.m)
em.matrix.total<-as.data.frame(cbind(name.matrix$LC_CODE,e.m))
colnames(em.matrix.total)<-c("LC_CODE",as.vector(name.matrix$LC_CODE))
# rm(em.data, e.m)
# zonal sequestration matrix
s.m.z<-matrix(0, nrow=dimention, ncol=dimention)
seq.matrix.zonal<-as.data.frame(NULL)
for (k in 1:length(zone_lookup$ID)){
for (i in 1:nrow(s.m.z)){
for (j in 1:ncol(s.m.z)){
seq.data<-data_merge_sel[which(data_merge_sel$ID_LC1==i & data_merge_sel$ID_LC2==j & data_merge_sel$ZONE==k),]
s.m.z[i,j]<-round(sum(seq.data$sq), digits=2)
}
}
s.m.z<-as.data.frame(s.m.z)
s.m.z.c<-as.data.frame(cbind(name.matrix$LC_CODE,s.m.z))
s.m.z.c<-cbind(rep(k,nrow(s.m.z)),s.m.z.c)
seq.matrix.zonal<-rbind(seq.matrix.zonal,s.m.z.c)
}
colnames(seq.matrix.zonal)<-c("ZONE","LC_CODE",as.vector(name.matrix$LC_CODE))
# rm(seq.data, s.m.z, s.m.z.c)
# total sequestration matrix
s.m<-matrix(0, nrow=dimention, ncol=dimention)
for (i in 1:nrow(s.m)){
for (j in 1:ncol(s.m)){
seq.data<-data_merge_sel[which(data_merge_sel$ID_LC1==i & data_merge_sel$ID_LC2==j),]
s.m[i,j]<-round(sum(seq.data$sq), digits=2)
}
}
s.m<-as.data.frame(s.m)
seq.matrix.total<-as.data.frame(cbind(name.matrix$LC_CODE,s.m))
colnames(seq.matrix.total)<-c("LC_CODE",as.vector(name.matrix$LC_CODE))
# rm(seq.data, s.m, order_em, order_sq)
#=Save database
write.dbf(data_merge, paste0('QUESC_database_', T1, '-', T2, '.dbf'))
idx_lut<-idx_lut+1
eval(parse(text=(paste("in_lut", idx_lut, " <- data_merge", sep=""))))
eval(parse(text=(paste("list_of_data_lut<-data.frame(TBL_DATA='in_lut", idx_lut,"', TBL_NAME='out_hist_quesc_", tolower(pu_name), T1, T2, "', row.names=NULL)", sep=""))))
# save to PostgreSQL
InLUT_i <- paste('in_lut', idx_lut, sep="")
dbWriteTable(DB, InLUT_i, eval(parse(text=(paste(InLUT_i, sep="" )))), append=TRUE, row.names=FALSE)
dbWriteTable(DB, "list_of_data_lut", list_of_data_lut, append=TRUE, row.names=FALSE)
#=Rearrange zone carbon
zone_carbon_pub<-zone_carbon
colnames(zone_carbon_pub) <- c("ID", "Luas (Ha)", "Tutupan lahan", "Total emisi (Ton CO2-eq)", "Total sekuestrasi(Ton CO2-eq)", "Emisi bersih (Ton CO2-eq)", "Laju emisi (Ton CO2/Ha.yr)")
admin_carbon_pub<-admin_carbon
colnames(admin_carbon_pub) <- c("ID", "Luas (Ha)", "Wil. Administratif", "Total emisi (Ton CO2-eq)", "Total sekuestrasi(Ton CO2-eq)", "Emisi bersih (Ton CO2-eq)", "Laju emisi (Ton CO2/Ha.yr)")
data_zone_pub<-data_zone
data_zone_pub$Z_CODE<-NULL
colnames(data_zone_pub) <- c("ID", "Luas (Ha)", "Unit Perencanaan", "Rerata Karbon Periode 1", "Rerata Karbon Periode 2", "Emisi bersih", "Laju emisi")
#=Create QUES-C Report (.doc)
# create maps and charts for report
# arrange numerous colors with RColorBrewer
myColors1 <- brewer.pal(9,"Set1")
myColors2 <- brewer.pal(8,"Accent")
myColors3 <- brewer.pal(12,"Paired")
myColors4 <- brewer.pal(9, "Pastel1")
myColors5 <- brewer.pal(8, "Set2")
myColors6 <- brewer.pal(8, "Dark2")
myColors7 <- brewer.pal(11, "Spectral")
myColors8 <- rev(brewer.pal(11, "RdYlGn"))
myColors <- c(myColors8,myColors5,myColors1, myColors2, myColors3, myColors4, myColors7, myColors8)
# land use/cover map first period
myColors.lu <- myColors[1:length(unique(lookup_lc$ID))]
lookup_lc$Colors<-myColors.lu
lu1<-as.data.frame(unique(lu.db$ID_LC1))
colnames(lu1)<-"ID"
# lu1<-merge(lu1,lookup_lc, by="ID", all=TRUE)
# lu1<-within(lu1, {Colors<-ifelse(is.na(Colors), "#FF0000", Colors)})
lu1<-merge(lu1,lookup_lc, by="ID")
lu1$ID<-as.numeric(as.character(lu1$ID))
lu1<-lu1[order(lu1$ID),]
lu1<-rbind(lu1, c(0, NA, NA, '#FFFFFF')) # new line
ColScale.lu1<-scale_fill_manual(name="Tipe tutupan lahan t1", breaks=lu1$ID, labels=lu1$LC, values=lu1$Colors)
plot.LU1<-gplot(landuse1, maxpixels=100000) + geom_raster(aes(fill=as.factor(value))) +
coord_equal() + ColScale.lu1 +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 6),
legend.key.height = unit(0.25, "cm"),
legend.key.width = unit(0.25, "cm"))
# land use/cover map next period
lu2<-as.data.frame(unique(lu.db$ID_LC2))
colnames(lu2)<-"ID"
# lu2<-merge(lu2,lookup_lc, by="ID", all=TRUE)
# lu2<-within(lu2, {Colors<-ifelse(is.na(Colors), "#FFFFFF", Colors)})
lu2<-merge(lu2,lookup_lc, by="ID")
lu2$ID<-as.numeric(as.character(lu2$ID))
lu2<-lu2[order(lu2$ID),]
lu2<-rbind(lu2, c(0, NA, NA, '#FFFFFF')) # new line
ColScale.lu2<-scale_fill_manual(name="Tipe tutupan lahan t2", breaks=lu2$ID, labels=lu2$LC, values=lu2$Colors)
plot.LU2<-gplot(landuse2, maxpixels=100000) + geom_raster(aes(fill=as.factor(value))) +
coord_equal() + ColScale.lu2 +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 6),
legend.key.height = unit(0.25, "cm"),
legend.key.width = unit(0.25, "cm"))
myColors <-c(myColors5,myColors1, myColors2, myColors3, myColors4, myColors7, myColors6, myColors8)
# zone
myColors.Z <- myColors[1:length(unique(lookup_z$ID))]
lookup_z$Colors<-myColors.Z
pu<-as.data.frame(unique(lu.db$ZONE))
colnames(pu)<-"ID"
pu<-merge(pu,lookup_z, by="ID", all=TRUE)
pu<-within(pu, {Colors<-ifelse(is.na(Colors), "#FFFFFF", Colors)})
pu$ID<-as.numeric(as.character(pu$ID))
pu<-pu[order(pu$ID),]
# pu<-rbind(pu, c(0, NA, NA, '#FFFFFF'))
ColScale.Z<-scale_fill_manual(name="Kelas Unit Perencanaan", breaks=pu$ID, labels=pu$Z_NAME, values=pu$Colors)
plot.Z<-gplot(zone, maxpixels=100000) + geom_raster(aes(fill=as.factor(value))) +
coord_equal() + ColScale.Z +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 6),
legend.key.height = unit(0.25, "cm"),
legend.key.width = unit(0.25, "cm"))
# administrative
myColors.Admin <- myColors[1:(length(unique(lookup_ref$ID))+1)]
ColScale.Admin<-scale_fill_manual(name="Wilayah Administratif", breaks=lookup_ref$ID, labels=lookup_ref$KABKOT, values=myColors.Admin)
plot.Admin<-gplot(ref, maxpixels=100000) + geom_raster(aes(fill=as.factor(value))) +
coord_equal() + ColScale.Admin +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 6),
legend.key.height = unit(0.25, "cm"),
legend.key.width = unit(0.25, "cm"))
# rm(myColors7,myColors1, myColors2, myColors3, myColors4, myColors5, myColors6,myColors8)
# save carbon, emission, and sequestration maps
setwd(dirQUESC)
color_pallete_cat <- c("#FFCC66", "#A5C663")
color_pallete_cont <- c("#62D849", "#0000f5", "#6B54D3")
writeRastFile(carbon1, paste0('carbon_', T1, '.tif'), cat = TRUE, colorpal = color_pallete_cat, lookup = lookup_lc)
writeRastFile(carbon2, paste0('carbon_', T2, '.tif'), cat = TRUE, colorpal = color_pallete_cat, lookup = lookup_lc)
writeRastFile(emission, paste0('emission_', T1, '-', T2, '.tif'), colorpal = color_pallete_cont)
writeRastFile(sequestration, paste0('sequestration_', T1, '-', T2, '.tif'), colorpal = color_pallete_cont)
# analysis_map=c('carbon1', 'carbon2', 'emission', 'sequestration')
# for(i in 1:length(analysis_map)){
# idx_factor<-idx_factor+1
# eval(parse(text=(paste('factor', idx_factor, '<-', analysis_map[i], sep=''))))
# eval(parse(text=(paste("list_of_data_f<-data.frame(RST_DATA='factor", idx_factor,"', RST_NAME='", analysis_map[i], "_", T1, T2, "', row.names=NULL)", sep=""))))
# InFactor_i <- paste("factor", idx_factor, sep="")
# dbWriteTable(DB, "list_of_data_f", list_of_data_f, append=TRUE, row.names=FALSE)
# #write to csv
# list_of_data_f<-dbReadTable(DB, c("public", "list_of_data_f"))
# csv_file<-paste(dirname(proj.file),"/csv_factor_data.csv", sep="")
# write.table(list_of_data_f, csv_file, quote=FALSE, row.names=FALSE, sep=",")
# eval(parse(text=(paste("addRasterToPG(project, '", analysis_map[i], ".tif', InFactor_i, srid)", sep=''))))
# }
# unlink(list.files(pattern = ".tif"))
resave(idx_QUESC, idx_lut, idx_factor, file=proj.file)
# carbon t1 map
y<-ceiling( maxValue(carbon1)/100)
y<-y*100
plot.C1 <- gplot(carbon1, maxpixels=100000) + geom_raster(aes(fill=value)) + coord_equal() +
scale_fill_gradient(name="Kerapatan karbon",low = "#FFCC66", high="#003300",limits=c(0,y), breaks=c(0,10,20,50,100,200,300), guide="colourbar") +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 7),
legend.key.height = unit(1.5, "cm"),
legend.key.width = unit(0.375, "cm"))
# carbon t2 map
plot.C2 <- gplot(carbon2, maxpixels=100000) + geom_raster(aes(fill=value)) + coord_equal() +
scale_fill_gradient(name="Kerapatan karbon",low = "#FFCC66", high="#003300",limits=c(0,y), breaks=c(0,10,20,50,100,200,300), guide="colourbar") +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 7),
legend.key.height = unit(1.5, "cm"),
legend.key.width = unit(0.375, "cm"))
# carbon emission map
plot.E <- gplot(emission, maxpixels=100000) + geom_raster(aes(fill=value)) + coord_equal() +
scale_fill_gradient(name="Emisi (ton CO2-eq)",low = "#FFCC66", high="#FF0000", guide="colourbar") +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 8),
legend.key.height = unit(0.375, "cm"),
legend.key.width = unit(0.375, "cm"))
# carbon sequestration map
plot.S <- gplot(sequestration, maxpixels=100000) + geom_raster(aes(fill=value)) + coord_equal() +
scale_fill_gradient(name="Sequestrasi (ton CO2-eq)",low = "#FFCC66", high="#000033", guide="colourbar") +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 8),
legend.key.height = unit(0.375, "cm"),
legend.key.width = unit(0.375, "cm"))
# average zonal carbon rate t1
rcl.m.c1<-as.matrix(data_zone[,1])
rcl.m.c2<-as.matrix(data_zone[,5])
rcl.m<-cbind(rcl.m.c1,rcl.m.c2)
rcl.m<-rbind(rcl.m, c(0, NA))
Z.Avg.C.t1<-reclassify(zone, rcl.m)
plot.Z.Avg.C.t1<-gplot(Z.Avg.C.t1, maxpixels=100000) + geom_raster(aes(fill=value)) +
coord_equal() + scale_fill_gradient(name="Carbon Density Level",low = "#FFCC66", high="#003300", guide="colourbar") +
ggtitle(paste("Rerata Kerapatan Karbon", location, period1 )) +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 8),
legend.key.height = unit(0.375, "cm"),
legend.key.width = unit(0.375, "cm"))
# average zonal carbon rate t2
rcl.m.c1<-as.matrix(data_zone[,1])
rcl.m.c2<-as.matrix(data_zone[,6])
rcl.m<-cbind(rcl.m.c1,rcl.m.c2)
rcl.m<-rbind(rcl.m, c(0, NA))
Z.Avg.C.t2<-reclassify(zone, rcl.m)
plot.Z.Avg.C.t2<-gplot(Z.Avg.C.t2, maxpixels=100000) + geom_raster(aes(fill=value)) +
coord_equal() + scale_fill_gradient(name="Carbon Density Level",low = "#FFCC66", high="#003300", guide="colourbar") +
ggtitle(paste("Rerata Kerapatan Karbon", location, period2 )) +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 8),
legend.key.height = unit(0.375, "cm"),
legend.key.width = unit(0.375, "cm"))
# average zonal emission rate
rcl.m.c1<-as.matrix(data_zone[,1])
rcl.m.c2<-as.matrix(data_zone[,7])
rcl.m<-cbind(rcl.m.c1,rcl.m.c2)
rcl.m<-rbind(rcl.m, c(0, NA))
Z.Avg.em<-reclassify(zone, rcl.m)
plot.Z.Avg.em<-gplot(Z.Avg.em, maxpixels=100000) + geom_raster(aes(fill=value)) +
coord_equal() + scale_fill_gradient(name="Tingkat Emisi",low = "#fff5f0", high="#67000d", guide="colourbar") +
ggtitle(paste(" Rerata laju emisi", location, period1, "-", period2 )) +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 8),
legend.key.height = unit(0.375, "cm"),
legend.key.width = unit(0.375, "cm"))
# average zonal sequestration rate
rcl.m.c1<-as.matrix(data_zone[,1])
rcl.m.c2<-as.matrix(data_zone[,8])
rcl.m<-cbind(rcl.m.c1,rcl.m.c2)
rcl.m<-rbind(rcl.m, c(0, NA))
Z.Avg.sq<-reclassify(zone,rcl.m)
plot.Z.Avg.sq<-gplot(Z.Avg.sq, maxpixels=100000) + geom_raster(aes(fill=value)) +
coord_equal() + scale_fill_gradient(name="Tingkat Sequestrasi",low = "#fff5f0", high="#67000d", guide="colourbar") +
ggtitle(paste("Rerata laju sequestrasi", location, period1, "-", period2 )) +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme( axis.title.x=element_blank(),axis.title.y=element_blank(),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title = element_text(size=8),
legend.text = element_text(size = 8),
legend.key.height = unit(0.375, "cm"),
legend.key.width = unit(0.375, "cm"))
# emission rate
emissionRate<-ggplot(data=zone_carbon, aes(x=reorder(Z_NAME, -Net_em_rate), y=(zone_carbon$Net_em_rate))) + geom_bar(stat="identity", fill="Red") +
geom_text(data=zone_carbon, aes(label=round(Net_em_rate, 1)),size=4) +
ggtitle(paste("Rerata laju emisi bersih", location, period1,"-", period2 )) + guides(fill=FALSE) + ylab("CO2-eq/ha.yr") +
theme(plot.title = element_text(lineheight= 5, face="bold")) +
theme(axis.title.x=element_blank(), axis.text.x = element_text(angle=20),
panel.grid.major=element_blank(), panel.grid.minor=element_blank())
# largest emission
largestEmission<-ggplot(data=tb_em_total_10, aes(x=reorder(LU_CODE, -em), y=(em))) + geom_bar(stat="identity", fill="blue") +
geom_text(data=tb_em_total_10, aes(x=LU_CODE, y=em, label=round(em, 1)),size=3, vjust=0.1) +
ggtitle(paste("Sumber emisi terbesar", location )) + guides(fill=FALSE) + ylab("CO2-eq") +
theme(plot.title = element_text(lineheight= 5, face="bold")) + scale_y_continuous() +
theme(axis.title.x=element_blank(), axis.text.x = element_text(size=8),
panel.grid.major=element_blank(), panel.grid.minor=element_blank())
# largest sequestration
largestSeq<-ggplot(data=tb_seq_total_10, aes(x=reorder(LU_CODE, -seq), y=(seq))) + geom_bar(stat="identity", fill="green") +
geom_text(data=tb_seq_total_10, aes(x=LU_CODE, y=seq, label=round(seq, 1)),size=3, vjust=0.1) +
ggtitle(paste("Sumber sequestrasi terbesar", location )) + guides(fill=FALSE) + ylab("CO2-eq") +
theme(plot.title = element_text(lineheight= 5, face="bold")) + scale_y_continuous() +
theme(axis.title.x=element_blank(), axis.text.x = element_text(size=8),
panel.grid.major=element_blank(), panel.grid.minor=element_blank())
printArea <- function(x){
format(x, digits=15, big.mark=",")
}
printRate <- function(x){
format(x, digits=15, nsmall=2, decimal.mark=".", big.mark=",")
}
tabel_ket<-proj_descr
row.names(tabel_ket)<-NULL
tabel_ket$Type<-as.character(tabel_ket$Type)
colnames(tabel_ket)<-c("Tipe", "Keterangan")
tabel_ket[1,1]<-"Proyek"
tabel_ket[2,1]<-"Deskripsi"
tabel_ket[3,1]<-"Direktori"
tabel_ket[4,1]<-"Wilayah Analisis"
tabel_ket[5,1]<-"Provinsi"
tabel_ket[6,1]<-"Negara"
# write report
title1<-"{\\colortbl;\\red0\\green0\\blue0;\\red255\\green0\\blue0;\\red146\\green208\\blue80;\\red0\\green176\\blue240;\\red140\\green175\\blue71;\\red0\\green112\\blue192;\\red79\\green98\\blue40;} \\pard\\qr\\b\\fs70\\cf2 L\\cf3U\\cf4M\\cf5E\\cf6N\\cf7S \\cf1HASIL ANALISIS \\par\\b0\\fs20\\ql\\cf1"
title2<-paste("\\pard\\qr\\b\\fs40\\cf1 Modul QUES-C - Analisis Dinamika Cadangan Karbon \\par\\b0\\fs20\\ql\\cf1", sep="")
sub_title<-"\\cf2\\b\\fs32 ANALISIS DINAMIKA CADANGAN KARBON\\cf1\\b0\\fs20"
#rad_grk<-"\\pard\\qr\\b\\fs40\\cf1 Dokumen RAD GRK - Bab 2.3. Permasalahan Emisi GRK \\par\\b0\\fs20\\ql\\cf1"
test<-as.character(Sys.Date())
date<-paste("Date : ", test, sep="")
time_start<-paste("Proses dimulai : ", time_start, sep="")
time_end<-paste("Proses selesai : ", eval(parse(text=(paste("Sys.time ()")))), sep="")
line<-paste("------------------------------------------------------------------------------------------------------------------------------------------------")
area_name_rep<-paste("\\b", "\\fs20", location, "\\b0","\\fs20")
I_O_period_1_rep<-paste("\\b","\\fs20", period1)
I_O_period_2_rep<-paste("\\b","\\fs20", period2)
chapter1<-"\\b\\fs32 DATA YANG DIGUNAKAN \\b0\\fs20"
chapter2<-"\\b\\fs32 ANALISIS PADA TINGKAT BENTANG LAHAN \\b0\\fs20"
chapter3<-"\\b\\fs32 ANALISIS PADA TINGKAT UNIT PERENCANAAN \\b0\\fs20"
# ==== Report 0. Cover=====
rtffile <- RTF("QUES-C_report.doc", font.size=11, width = 8.267, height = 11.692, omi = c(0,0,0,0))
# INPUT
file.copy(paste0(LUMENS_path, "/ques_cover.png"), dirQUESC, recursive = FALSE)
img_location<-paste0(dirQUESC, "/ques_cover.png")
# loading the .png image to be edited
cover <- image_read(img_location)
# to display, only requires to execute the variable name, e.g.: "> cover"
# adding text at the desired location
text_submodule <- paste("Sub-Modul Karbon\n\nAnalisis Dinamika Cadangan Karbon\n", location, ", ", "Periode ", T1, "-", T2, sep="")
cover_image <- image_annotate(cover, text_submodule, size = 23, gravity = "southwest", color = "white", location = "+46+220", font = "Arial")
cover_image <- image_write(cover_image)
# 'gravity' defines the 'baseline' anchor of annotation. "southwest" defines the text shoul be anchored on bottom left of the image
# 'location' defines the relative location of the text to the anchor defined in 'gravity'
# configure font type
addPng(rtffile, cover_image, width = 8.267, height = 11.692)
addPageBreak(rtffile, width = 8.267, height = 11.692, omi = c(1,1,1,1))
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addNewLine(rtffile)
addParagraph(rtffile, title1)
addParagraph(rtffile, title2)
#addNewLine(rtffile)
#addParagraph(rtffile, rad_grk)
addNewLine(rtffile)
addParagraph(rtffile, line)
addParagraph(rtffile, time_start)
addParagraph(rtffile, time_end)
addParagraph(rtffile, line)
addNewLine(rtffile)
width<-as.vector(c(1.34,3.1))
addTable(rtffile,tabel_ket,font.size=8,col.widths=width)
addPageBreak(rtffile)
addParagraph(rtffile, sub_title)
addNewLine(rtffile)
addParagraph(rtffile, line)
addParagraph(rtffile, date)
addParagraph(rtffile, time_start)
addParagraph(rtffile, time_end)
addParagraph(rtffile, line)
addNewLine(rtffile)
addParagraph(rtffile, "Analisis dinamika cadangan karbon dilakukan untuk perubahan cadangan karbon di suatu daerah pada satu kurun waktu. Metode yang digunakan adalah metode Stock Difference. Emisi dihitung sebagai jumlah penurunan cadangan karbon akibat perubahan tutupan lahan terjadi apabila cadangan karbon awal lebih tinggi dari cadangan karbon setelah terjadinya perubahan penggunaan lahan. Sebaliknya, sequestrasi dihitung sebagai jumlah penambahan cadangan karbon akibat perubahan tutupan lahan (cadangan karbon pada penggunaan lahan awal lebih rendah dari cadangan karbon setelah terjadinya perubahan penggunaan lahan).. Analisis ini dilakukan dengan menggunakan data peta tutupan lahan pada dua periode waktu yang berbeda dan tabel acuan kerapatan karbon untuk masing-masing tipe tutupan lahan. Selain itu, dengan memasukkan data unit perencanaan kedalam analisis, dapat diketahui tingkat perubahan cadangan karbon pada masing-masing kelas unit perencanaan yang ada. Informasi yang dihasilkan melalui analisis ini dapat digunakan dalam proses perencanaan untuk berbagai hal, diantaranya menentukan prioritas aksi mitigasi perubahan iklim, mengetahui faktor pemicu terjadinya emisi, dan merencanakan skenario pembangunan di masa yang akan datang.")
addNewLine(rtffile)
addParagraph(rtffile, chapter1)
addParagraph(rtffile, line)
addNewLine(rtffile)
addParagraph(rtffile, "Data yang digunakan dalam analisis ini adalah data peta penggunaan lahan dan data peta unit perencanaan daerah. Data pendukung yang digunakan adalah peta acuan tipe penggunaan lahan, data acuan kerapatan karbon masing-masing tipe tutupan lahan dan data acuan kelas unit perencanaan.")
addNewLine(rtffile)
text <- paste("\\b \\fs20 Peta penutupan lahan \\b0 \\fs20 ", area_name_rep, "\\b \\fs20 tahun \\b0 \\fs20 ", I_O_period_1_rep, sep="")
addParagraph(rtffile, text)
addNewLine(rtffile, n=1)
addPlot.RTF(rtffile, plot.fun=plot, width=6.4, height=4, res=150, plot.LU1 )
#rm(plot.LU1)
text <- paste("\\b \\fs20 Peta penutupan lahan \\b0 \\fs20 ", area_name_rep, "\\b \\fs20 tahun \\b0 \\fs20 ", I_O_period_2_rep, sep="")
addParagraph(rtffile, text)
addNewLine(rtffile, n=1)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=4, res=150, plot.LU2 )
#rm(plot.LU2)
text <- paste("\\b \\fs20 Peta unit perencanaan \\b0 \\fs20 ", area_name_rep, sep="")
addParagraph(rtffile, text)
addNewLine(rtffile, n=1)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=4, res=150, plot.Z )
#rm(plot.Z)
text <- paste("\\b \\fs20 Peta wilayah administratif \\b0 \\fs20 ", area_name_rep, sep="")
addParagraph(rtffile, text)
addNewLine(rtffile, n=1)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=4, res=150, plot.Admin )
#rm(plot.Admin)
addNewLine(rtffile, n=1)
addNewLine(rtffile, n=1)
addNewLine(rtffile, n=1)
addNewLine(rtffile, n=1)
addParagraph(rtffile, chapter2)
addParagraph(rtffile, line)
addNewLine(rtffile)
addParagraph(rtffile, "Pada bagian ini disajikan hasil analisis dinamika cadangan karbon untuk keseluruhan bentang lahan yang dianalisis. Beberapa bentuk analisis yang dilakukan antara lain: tingkat emisi, tingkat sequestrasi, laju emisi dan tipe perubahan penggunaan lahan yang paling banyak menyebabkan emisi/sequestrasi.")
addNewLine(rtffile)
text <- paste("\\b \\fs20 Peta kerapatan karbon \\b0 \\fs20 ", area_name_rep, "\\b \\fs20 tahun \\b0 \\fs20 ", I_O_period_1_rep, " \\b \\fs20 (dalam Ton C/Ha)\\b0 \\fs20", sep="")
addParagraph(rtffile, text)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=4, res=150, plot.C1 )
#rm(plot.C1)
text <- paste("\\b \\fs20 Peta kerapatan karbon \\b0 \\fs20 ", area_name_rep, "\\b \\fs20 tahun \\b0 \\fs20 ", I_O_period_2_rep, " \\b \\fs20 (dalam Ton C/Ha)\\b0 \\fs20", sep="")
addParagraph(rtffile, text)
addNewLine(rtffile, n=1)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=4, res=150, plot.C2 )
addNewLine(rtffile, n=1)
#rm(plot.C2)
text <- paste("\\b \\fs20 Peta emisi karbon \\b0 \\fs20 ", area_name_rep, "\\b \\fs20 tahun \\b0 \\fs20 ", I_O_period_1_rep, "\\b \\fs20 - \\b0 \\fs20 ", I_O_period_2_rep, sep="")
addParagraph(rtffile, text)
addNewLine(rtffile, n=1)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=4, res=150, plot.E )
addNewLine(rtffile, n=1)
#rm(plot.E)
text <- paste("\\b \\fs20 Peta penyerapan karbon \\b0 \\fs20 ", area_name_rep, "\\b \\fs20 tahun \\b0 \\fs20 ", I_O_period_1_rep, "\\b \\fs20 - \\b0 \\fs20 ", I_O_period_2_rep, sep="")
addParagraph(rtffile, text)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=4, res=150, plot.S )
#rm(plot.S)
addNewLine(rtffile, n=1)
addNewLine(rtffile, n=1)
addParagraph(rtffile, "\\b \\fs20 Intisari perhitungan emisi\\b0 \\fs20")
addNewLine(rtffile, n=1)
fs_table[2,3]<-printArea(as.numeric(as.character(fs_table[2,3])))
fs_table[3,3]<-printRate(as.numeric(as.character(fs_table[3,3])))
fs_table[4,3]<-printRate(as.numeric(as.character(fs_table[4,3])))
fs_table[5,3]<-printRate(as.numeric(as.character(fs_table[5,3])))
fs_table[6,3]<-printRate(as.numeric(as.character(fs_table[6,3])))
fs_table[7,3]<-printRate(as.numeric(as.character(fs_table[7,3])))
addTable(rtffile, fs_table)
addNewLine(rtffile, n=1)
addParagraph(rtffile, "\\b \\fs20 Intisari perhitungan emisi per unit perencanaan\\b0 \\fs20")
addNewLine(rtffile, n=1)
data_zone_pub[2]<-printArea(data_zone_pub[2])
addTable(rtffile, data_zone_pub)
addNewLine(rtffile, n=1)
addNewLine(rtffile, n=1)
zone_carbon_pub[2]<-printArea(zone_carbon_pub[2])
zone_carbon_pub[4]<-printRate(zone_carbon_pub[4])
zone_carbon_pub[5]<-printRate(zone_carbon_pub[5])
zone_carbon_pub[6]<-printRate(zone_carbon_pub[6])
addTable(rtffile, zone_carbon_pub)
addNewLine(rtffile, n=1)
addParagraph(rtffile, "\\b \\fs20 Intisari perhitungan emisi per wilayah administrasi\\b0 \\fs20")
addNewLine(rtffile, n=1)
admin_carbon_pub[2]<-printArea(admin_carbon_pub[2])
admin_carbon_pub[4]<-printRate(admin_carbon_pub[4])
admin_carbon_pub[5]<-printRate(admin_carbon_pub[5])
admin_carbon_pub[6]<-printRate(admin_carbon_pub[6])
addTable(rtffile, admin_carbon_pub)
addParagraph(rtffile, "Keterangan : ")
addParagraph(rtffile, "Emisi bersih = Total emisi - Total sequestrasi ")
addParagraph(rtffile, "Laju emisi = (Total Emisi - Total Sequestrasi) / (luas * periode waktu) ")
addNewLine(rtffile, n=1)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=3, res=150, emissionRate )
addNewLine(rtffile, n=1)
# rm(emissionRate)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=4, res=150, plot.Z.Avg.C.t1 )
addNewLine(rtffile, n=1)
#rm(plot.Z.Avg.C.t1)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=4, res=150, plot.Z.Avg.C.t2 )
addNewLine(rtffile, n=1)
#rm(plot.Z.Avg.C.t2)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=4, res=150, plot.Z.Avg.em )
addNewLine(rtffile, n=1)
#rm(plot.Z.Avg.em)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=4, res=150, plot.Z.Avg.sq )
#rm(plot.Z.Avg.sq)
addNewLine(rtffile, n=1)
addNewLine(rtffile, n=1)
addNewLine(rtffile, n=1)
addNewLine(rtffile, n=1)
addNewLine(rtffile, n=1)
addParagraph(rtffile, "\\b \\fs20 Sumber Emisi Terbesar\\b0 \\fs20")
addNewLine(rtffile, n=1)
tb_em_total_10[3]<-printRate(tb_em_total_10[3])
addTable(rtffile, tb_em_total_10)
addNewLine(rtffile, n=1)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=3, res=150, largestEmission )
addNewLine(rtffile, n=1)
# rm(largestEmission)
addParagraph(rtffile, "\\b \\fs20 Sumber sequestrasi terbesar\\b0 \\fs20")
addNewLine(rtffile, n=1)
tb_seq_total_10[3]<-printRate(tb_seq_total_10[3])
addTable(rtffile, tb_seq_total_10)
addNewLine(rtffile, n=1)
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=3, res=150, largestSeq )
addNewLine(rtffile, n=1)
# rm(largestSeq)
addNewLine(rtffile, n=1)
addNewLine(rtffile, n=1)
addParagraph(rtffile, chapter3)
addParagraph(rtffile, line)
addNewLine(rtffile)
addParagraph(rtffile, "Pada bagian ini disajikan hasil analisis dinamika cadangan karbon untuk masing-masing kelas unit perencanaan yang dianalisis. Beberapa bentuk analisis yang dilakukan antara lain: tingkat emisi, tingkat sequestrasi, laju emisi dan tipe perubahan penggunaan lahan yang paling banyak menyebabkan emisi/sequestrasi.")
addNewLine(rtffile)
#z.emission.name<-as.vector(NULL)
#z.seq.name<-as.vector(NULL)
for(i in 1:length(zone_lookup$ID)){
tryCatch({
a<-zone_lookup$ID[i]
zona<-paste("\\b", "\\fs20", i, "\\b0","\\fs20")
zona_nm<-paste("\\b", "\\fs20", data_zone$Z_NAME[i], "\\b0","\\fs20")
zona_ab<-paste("\\b", "\\fs20", data_zone$Z_CODE[i], "\\b0","\\fs20")
addParagraph(rtffile, "\\b \\fs20 Sumber Emisi terbesar pada \\b0 \\fs20", zona,"\\b \\fs20 - \\b0 \\fs20", zona_nm, "\\b \\fs20 (\\b0 \\fs20", zona_ab, "\\b \\fs20)\\b0 \\fs20" )
addNewLine(rtffile, n=1)
tb_em_zon<-tb_em_zonal[which(tb_em_zonal$ZONE == a),]
tb_em_zon$ZONE<-NULL
tabel_em_zon<-tb_em_zon
tabel_em_zon[3]<-printRate(tabel_em_zon[3])
addTable(rtffile, tabel_em_zon)
addNewLine(rtffile, n=1)
#largest emission
largestE.Z<-ggplot(data=tb_em_zon, aes(x=reorder(LU_CODE, -em), y=(em))) + geom_bar(stat="identity", fill="blue") +
geom_text(data=tb_em_zon, aes(x=LU_CODE, y=em, label=round(em, 1)),size=3, vjust=0.1) +
ggtitle(paste("Sumber Emisi Terbesar Pada",i, "-", data_zone$Z_CODE[i] )) + guides(fill=FALSE) + ylab("CO2-eq") +
theme(plot.title = element_text(lineheight= 5, face="bold")) + scale_y_continuous() +
theme(axis.title.x=element_blank(), axis.text.x = element_text(size=8),
panel.grid.major=element_blank(), panel.grid.minor=element_blank())
#png(filename=paste("Largest_Emission_Z_",a,".png", sep=""),type="cairo",units="in",width=6.7,height=4,res=125)
#print(largestE.Z)
#dev.off()
#z.emission.name<-c(z.emission.name, paste("Largest_Emission_Z_",a,".png", sep=""))
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=3, res=150, largestE.Z )
addNewLine(rtffile, n=1)
addParagraph(rtffile, "\\b \\fs20 Sumber Sequestrasi Terbesar Pada \\b0 \\fs20", zona,"\\b \\fs20 - \\b0 \\fs20", zona_nm, "\\b \\fs20 (\\b0 \\fs20", zona_ab, "\\b \\fs20)\\b0 \\fs20" )
addNewLine(rtffile, n=1)
tb_seq_zon<-tb_seq_zonal[which(tb_seq_zonal$ZONE == a),]
tb_seq_zon$ZONE<-NULL
tabel_seq_zon<-tb_seq_zon
tabel_seq_zon[3]<-printRate(tabel_seq_zon[3])
addTable(rtffile, tabel_seq_zon)
addNewLine(rtffile, n=1)
#largest sequestration
largestS.Z<-ggplot(data=tb_seq_zon, aes(x=reorder(LU_CODE, -seq), y=(seq))) + geom_bar(stat="identity", fill="green") +
geom_text(data=tb_seq_zon, aes(x=LU_CODE, y=seq, label=round(seq, 1)),size=3, vjust=0.1) +
ggtitle(paste("Sumber Sequestrasi Terbesar Pada",i, "-", data_zone$Z_CODE[i] )) + guides(fill=FALSE) + ylab("CO2-eq") +
theme(plot.title = element_text(lineheight= 5, face="bold")) + scale_y_continuous() +
theme(axis.title.x=element_blank(), axis.text.x = element_text(size=8),
panel.grid.major=element_blank(), panel.grid.minor=element_blank())
#png(filename=paste("Largest_Seq_Z_",a,".png", sep=""),type="cairo",units="in",width=6.7,height=4,res=125)
#print(largestS.Z)
#dev.off()
#z.seq.name<-c(z.seq.name, paste("Largest_Seq_Z_",a,".png", sep=""))
addPlot.RTF(rtffile, plot.fun=plot, width=6.7, height=3, res=150, largestS.Z )
addNewLine(rtffile, n=1)
},error=function(e){cat("Nice try pal! ~ please re-check your input data :",conditionMessage(e), "\n"); addParagraph(rtffile, "no data");addNewLine(rtffile)})
}
# rm(largestE.Z, largestS.Z)
addNewLine(rtffile)
done(rtffile)
unlink(img_location)
eval(parse(text=(paste('rtf_QUESC_', T1, '_', T2, '_', pu_name, '<-rtffile', sep=''))))
eval(parse(text=(paste('resave(rtf_QUESC_', T1, '_', T2, '_', pu_name, ', file=proj.file)', sep=''))))
# command<-paste("start ", "winword ", dirQUESC, "/LUMENS_QUES-C_report.doc", sep="" )
# shell(command)
resultoutput<-data.frame(PATH=c(paste0(dirQUESC, '/carbon_', T1, '.tif'),
paste0(dirQUESC, '/carbon_', T2, '.tif'),
paste0(dirQUESC, '/emission_', T1, '-', T2, '.tif'),
paste0(dirQUESC, '/sequestration_', T1, '-', T2, '.tif'),
paste0(dirQUESC, '/QUESC_database_', T1, '-', T2, '.dbf')))
dbDisconnect(DB)
#=Writing final status message (code, message)
statuscode<-1
statusmessage<-"QUES-C analysis successfully completed!"
statusoutput<-data.frame(statuscode=statuscode, statusmessage=statusmessage)
|
6ca8191746e0936ecfc9cb326298d69f18d6a5d7
|
e68e99f52f3869c60d6488f0492905af4165aa64
|
/tests/testthat/test-nn-rnn.R
|
2309a5e0ef82fbddc7e27164eff99f7832ef5939
|
[
"MIT"
] |
permissive
|
mlverse/torch
|
a6a47e1defe44b9c041bc66504125ad6ee9c6db3
|
f957d601c0295d31df96f8be7732b95917371acd
|
refs/heads/main
| 2023-09-01T00:06:13.550381
| 2023-08-30T17:44:46
| 2023-08-30T17:44:46
| 232,347,878
| 448
| 86
|
NOASSERTION
| 2023-09-11T15:22:22
| 2020-01-07T14:56:32
|
C++
|
UTF-8
|
R
| false
| false
| 8,368
|
r
|
test-nn-rnn.R
|
context("nn-rnn")
test_that("rnn nonlinearity", {
rnn <- nn_rnn(1, 10)
expect_equal(rnn$nonlinearity, "tanh")
rnn <- nn_rnn(1, 10, nonlinearity = "relu")
expect_equal(rnn$nonlinearity, "relu")
expect_error(
rnn <- nn_rnn(1, 10, nonlinearity = "garbage"),
class = "value_error"
)
})
test_that("rnn dropout", {
for (p in c(0., .276, .731, 1)) {
for (train in c(TRUE, FALSE)) {
rnn <- nn_rnn(10, 1000, 2, bias = FALSE, dropout = p, nonlinearity = "relu")
with_no_grad({
rnn$weight_ih_l1$fill_(1)
rnn$weight_hh_l1$fill_(1)
rnn$weight_ih_l2$fill_(1)
rnn$weight_hh_l2$fill_(1)
})
if (train) {
rnn$train()
} else {
rnn$eval()
}
input <- torch_ones(1, 1, 10)
hx <- torch_zeros(2, 1, 1000)
out <- rnn(input, hx)
output <- out[[1]]
hy <- out[[2]]
expect_equal_to_tensor(output$min(), output$max(), tolerance = 1e-2)
output_val <- output[1, 1, 1]
if (p == 0 || !train) {
expect_equal_to_r(output_val, 10000)
} else if (p == 1) {
expect_equal_to_r(output_val, 0)
} else {
expect_equal_to_r(output_val > 8000, TRUE)
expect_equal_to_r(output_val < 12000, TRUE)
}
expect_equal_to_tensor(hy[1, , ]$min(), hy[1, , ]$max(), tolerance = 1e-2)
expect_equal_to_tensor(hy[2, , ]$min(), hy[2, , ]$max(), tolerance = 1e-2)
expect_equal_to_r(hy[1, 1, 1], 10)
expect_equal_to_tensor(hy[2, 1, 1], output_val, tolerance = 1e-2)
}
}
})
test_that("rnn packed sequence", {
x <- torch_tensor(rbind(
c(1, 2, 0, 0),
c(1, 2, 3, 0),
c(1, 2, 3, 4)
), dtype = torch_float())
x <- x[, , newaxis]
lens <- torch_tensor(c(2, 3, 4), dtype = torch_long())
p <- nn_utils_rnn_pack_padded_sequence(x, lens,
batch_first = TRUE,
enforce_sorted = FALSE
)
rnn <- nn_rnn(1, 4, nonlinearity = "relu")
out <- rnn(p)
unpack <- nn_utils_rnn_pad_packed_sequence(out[[1]])
expect_tensor_shape(unpack[[1]], c(4, 3, 4))
expect_equal_to_r(unpack[[2]]$to(dtype = torch_int()), c(2, 3, 4))
})
test_that("lstm", {
lstm <- nn_lstm(10, 5)
expect_equal(lstm$mode, "LSTM")
input <- torch_ones(1, 1, 10)
o <- lstm(input)
expect_length(o, 2)
expect_tensor_shape(o[[1]], c(1, 1, 5))
expect_tensor_shape(o[[2]][[1]], c(1, 1, 5))
expect_tensor_shape(o[[2]][[2]], c(1, 1, 5))
expect_tensor_shape(lstm$weight_ih_l1, c(20, 10))
expect_tensor_shape(lstm$weight_hh_l1, c(20, 5))
expect_tensor_shape(lstm$bias_ih_l1, c(20))
expect_tensor_shape(lstm$bias_hh_l1, c(20))
expect_length(lstm$parameters, 4)
with_no_grad({
lstm$weight_ih_l1$fill_(1)
lstm$weight_hh_l1$fill_(1)
lstm$bias_ih_l1$fill_(1)
lstm$bias_hh_l1$fill_(1)
})
z <- lstm(input)
expect_equal_to_tensor(z[[1]], torch_ones(1, 1, 5) * 0.7615868, tolerance = 1e-5)
expect_equal_to_tensor(z[[2]][[1]], torch_ones(1, 1, 5) * 0.7615868, tolerance = 1e-5)
expect_equal_to_tensor(z[[2]][[2]], torch_ones(1, 1, 5), tolerance = 1e-5)
lstm <- nn_lstm(10, 5, bias = FALSE)
expect_tensor_shape(lstm$weight_ih_l1, c(20, 10))
expect_tensor_shape(lstm$weight_hh_l1, c(20, 5))
expect_null(lstm$bias_ih_l1)
expect_null(lstm$bias_hh_l1, NULL)
with_no_grad({
lstm$weight_ih_l1$fill_(1)
lstm$weight_hh_l1$fill_(1)
})
z <- lstm(input)
expect_equal_to_tensor(z[[1]], torch_ones(1, 1, 5) * 0.7615405, tolerance = 1e-5)
expect_equal_to_tensor(z[[2]][[1]], torch_ones(1, 1, 5) * 0.7615405, tolerance = 1e-4)
expect_equal_to_tensor(z[[2]][[2]], torch_ones(1, 1, 5), tolerance = 1e-4)
lstm <- nn_lstm(10, 5, num_layers = 2)
expect_length(lstm$parameters, 8)
lstm <- nn_lstm(10, 5, num_layers = 3)
expect_length(lstm$parameters, 12)
with_no_grad({
for (p in lstm$parameters) {
p$fill_(1)
}
})
z <- lstm(input)
expect_equal_to_tensor(z[[1]], torch_ones(1, 1, 5) * 0.7580, tolerance = 1e-4)
expect_equal_to_tensor(z[[2]][[1]][1, , ], torch_ones(1, 5) * 0.7616, tolerance = 1e-4)
expect_equal_to_tensor(z[[2]][[1]][2, , ], torch_ones(1, 5) * 0.7580, tolerance = 1e-4)
expect_equal_to_tensor(z[[2]][[1]][3, , ], torch_ones(1, 5) * 0.7580, tolerance = 1e-4)
expect_equal_to_tensor(z[[2]][[2]][1, , ], torch_ones(1, 5), tolerance = 1e-4)
expect_equal_to_tensor(z[[2]][[2]][2, , ], torch_ones(1, 5) * 0.9970, tolerance = 1e-4)
expect_equal_to_tensor(z[[2]][[2]][3, , ], torch_ones(1, 5) * 0.9969, tolerance = 1e-4)
})
test_that("gru", {
gru <- nn_gru(10, 5)
expect_equal(gru$mode, "GRU")
input <- torch_ones(1, 1, 10)
o <- gru(input)
expect_length(o, 2)
expect_tensor_shape(o[[1]], c(1, 1, 5))
expect_tensor_shape(o[[2]], c(1, 1, 5))
expect_tensor_shape(gru$weight_ih_l1, c(15, 10))
expect_tensor_shape(gru$weight_hh_l1, c(15, 5))
expect_tensor_shape(gru$bias_ih_l1, c(15))
expect_tensor_shape(gru$bias_hh_l1, c(15))
expect_length(gru$parameters, 4)
with_no_grad({
gru$weight_ih_l1$fill_(1)
gru$weight_hh_l1$fill_(1)
gru$bias_ih_l1$fill_(1)
gru$bias_hh_l1$fill_(1)
})
z <- gru(input)
expect_equal_to_tensor(z[[1]], torch_ones(1, 1, 5) * 6.1989e-06, tolerance = 1e-5)
expect_equal_to_tensor(z[[2]], torch_ones(1, 1, 5) * 6.1989e-06, tolerance = 1e-5)
gru <- nn_gru(10, 5, bias = FALSE)
expect_tensor_shape(gru$weight_ih_l1, c(15, 10))
expect_tensor_shape(gru$weight_hh_l1, c(15, 5))
expect_null(gru$bias_ih_l1)
expect_null(gru$bias_hh_l1, NULL)
with_no_grad({
gru$weight_ih_l1$fill_(1)
gru$weight_hh_l1$fill_(1)
})
z <- gru(input)
expect_equal_to_tensor(z[[1]], torch_ones(1, 1, 5) * 4.5419e-05, tolerance = 1e-5)
expect_equal_to_tensor(z[[2]], torch_ones(1, 1, 5) * 4.5419e-05, tolerance = 1e-4)
gru <- nn_gru(10, 5, num_layers = 2)
expect_length(gru$parameters, 8)
gru <- nn_gru(10, 5, num_layers = 3)
expect_length(gru$parameters, 12)
with_no_grad({
for (p in gru$parameters) {
p$fill_(1)
}
})
z <- gru(input)
expect_equal_to_tensor(z[[1]], torch_ones(1, 1, 5) * 0.0702, tolerance = 1e-4)
expect_equal_to_tensor(z[[2]][1, , ], torch_ones(1, 5) * 6.1989e-06, tolerance = 1e-4)
expect_equal_to_tensor(z[[2]][2, , ], torch_ones(1, 5) * 1.1378e-01, tolerance = 1e-4)
expect_equal_to_tensor(z[[2]][3, , ], torch_ones(1, 5) * 7.0209e-02, tolerance = 1e-4)
})
test_that("rnn gpu", {
skip_if_cuda_not_available()
rnn <- nn_rnn(10, 1)
rnn$to(device = "cuda")
input <- torch_ones(1, 1, 10, device = "cuda")
expect_message(out <- rnn(input), regexp = NA)
expect_length(out, 2)
expect_tensor_shape(out[[1]], c(1, 1, 1))
expect_tensor_shape(out[[2]], c(1, 1, 1))
})
test_that("GRU on the GPU keeps its parameters", {
skip_if_cuda_not_available()
model <- nn_module(
initialize = function(input_size, hidden_size) {
self$rnn <- nn_gru(
input_size = input_size,
hidden_size = hidden_size,
batch_first = TRUE
)
self$output <- nn_linear(hidden_size, 1)
},
forward = function(x) {
# list of [output, hidden]
# we are interested in the final timestep only, so we can directly use [[2]]
# but we want to remove the un-needed singleton dimension on the left
x <- self$rnn(x)[[2]]$squeeze(1)
x %>% self$output()
}
)
m <- model(1, 64)
e_pars <- names(m$parameters)
m$cuda()
r_pars <- names(m$parameters)
expect_equal(r_pars, e_pars)
})
test_that("lstm and gru works with packed sequences", {
# regression test for https://github.com/mlverse/torch/issues/499
x <- torch_tensor(rbind(
c(1, 2, 0, 0),
c(1, 2, 3, 0),
c(1, 2, 3, 4)
), dtype = torch_float())
x <- x[, , newaxis]
lens <- torch_tensor(c(2, 3, 4), dtype = torch_long())
p <- nn_utils_rnn_pack_padded_sequence(x, lens,
batch_first = TRUE,
enforce_sorted = FALSE
)
rnn <- nn_lstm(1, 4)
out <- rnn(p)
unpack <- nn_utils_rnn_pad_packed_sequence(out[[1]])
expect_tensor_shape(unpack[[1]], c(4, 3, 4))
rnn <- nn_gru(1, 4)
out <- rnn(p)
unpack <- nn_utils_rnn_pad_packed_sequence(out[[1]])
expect_tensor_shape(unpack[[1]], c(4, 3, 4))
})
test_that("gru can be traced", {
x <- nn_gru(10, 10)
tr <- jit_trace(x, torch_randn(10, 10, 10))
v <- torch_randn(10, 10, 10)
expect_equal_to_tensor(
x(v)[[1]],
tr(v)[[1]]
)
})
|
337f8e47a056f324f18ef6bac2a69fceb8b4a2da
|
4a6647ad7e52a85c9dc5de06613a8f4e3c9696da
|
/man/get_song_meta.Rd
|
4490c7eb3885504aab7dc8c72cf0fefcdfdb5b58
|
[] |
no_license
|
MicahJackson/geniusr
|
23f0ac16d2d8b8c8148b016f0537922df2c20588
|
a1cf5c904c2592f5082d73071923d1f6d787efc9
|
refs/heads/master
| 2020-03-17T12:03:05.155078
| 2018-09-20T18:07:38
| 2018-09-20T18:07:38
| 131,529,681
| 0
| 0
| null | 2018-04-29T20:59:20
| 2018-04-29T20:59:20
| null |
UTF-8
|
R
| false
| true
| 556
|
rd
|
get_song_meta.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meta.R
\name{get_song_meta}
\alias{get_song_meta}
\title{Retrieve meta data for a song}
\usage{
get_song_meta(song_id, access_token = genius_token())
}
\arguments{
\item{song_id}{A song ID (\code{song_id} returned in \code{\link{search_song}})}
\item{access_token}{Genius' client access token, defaults to \code{genius_token}}
}
\description{
The Genius API lets you search for meta data for a song, given a song ID.
}
\examples{
\dontrun{
get_song_meta(song_id = 3039923)
}
}
|
f68452f57fdfe2d11278ed700eb87f8fd47abfe9
|
8d418db208008a6693168f7d4ee47d2587e6a197
|
/cachematrix.R
|
4936b74516babe69a86e44410dc38ad64dd4c31a
|
[] |
no_license
|
Pbailon/ProgrammingAssignment2
|
1c993856bad6f57cab1648af14c03a633a5ac3f9
|
49274e8ec29075851bcbedb328d7c25f7edeab07
|
refs/heads/master
| 2020-04-05T23:33:58.537632
| 2015-03-22T01:42:24
| 2015-03-22T01:42:24
| 32,656,148
| 0
| 0
| null | 2015-03-22T00:45:29
| 2015-03-22T00:45:28
| null |
UTF-8
|
R
| false
| false
| 1,297
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## We create a function which defines a list containing a function
## to set the value of the vector, get the value of the vector,
## set the value of the inverse and get the value of the inverse.
## We suppose that the matrix we receive is squared and can be inversed
## so the function solve with only one argument returns the inverse of the
## original matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function checks if the inverse of the matrix has already been calculated
## and is cached. If it is, the function gets the inverse from the cache, if it
## isn't, it calculates the inverse and caches it.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.