blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5c67ec72aa95c2cf9fd434274b5ac71be51d57c | a816bcd3416d393ef0b933c968ed100124dee404 | /BG_population_model/param_sanity_check.R | b1ed32c6d5e21d92bbe905d9ea7f61039259bd33 | [] | no_license | ShaunCoutts/BG_herb_res | c97ec21f07d864faaaed2a1243a99d8b74a39113 | 8056b1320d262efe8208753b9c15922494a4e99b | refs/heads/master | 2021-01-17T04:02:14.378459 | 2018-07-31T15:43:15 | 2018-07-31T15:43:15 | 42,309,872 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,604 | r | param_sanity_check.R | # sanity check script that looks for parameter combinations that produce population numbers
# that are too low or too high compaitred to field observations.
#install.packages('gbm', repos = 'http://cran.us.r-project.org')
library(gbm)
library(gridExtra)
library(gtable)
library(grid)
# get the data produced by the model runs
all_dat = read.csv("/home/shauncoutts/Dropbox/projects/MHR_blackgrass/BG_population_model/model_output/param_filtering_out.csv",
header = TRUE, stringsAsFactors = FALSE)
# use post-herb above ground population to see select the parameter values that make
# result in populations in the range 16,348 - 132,003, which comes from
# Queenborough et al 2011, Figure 3 after some proccessing to get from the counts used in
# that study to plants per hectare.
all_dat$final_pop = NA
all_dat$final_pop[all_dat$num_ab_ph_tot < 16348] = 'low'
all_dat$final_pop[all_dat$num_ab_ph_tot > 132000] = 'high'
all_dat$final_pop[is.na(all_dat$final_pop)] = 'in'
all_dat$in_out = ifelse(all_dat$final_pop == 'in', 1, 0)
# histograms of parameters for in vs out parameter values
par(mfrow = c(6, 3))
pred_names = names(all_dat)[1:30]
plot_inds = c(13, 15:30)
for(i in plot_inds){
hist(all_dat[, i], main = pred_names[i], breaks = seq(min(all_dat[,i]), max(all_dat[,i]), length = 50))
hist(all_dat[all_dat$in_out == 1, i], col = grey(0.5), border = grey(0.50), add = TRUE,
breaks = seq(min(all_dat[,i]), max(all_dat[,i]), length = 50))
}
# looks like it is almost all fec_max and fec_dd that controls if a population goes through the
# sanity check or not
#Use a BRT to look for more complicated relationships that need intgeractions to explain them.
BRT_bi = gbm(in_out ~ int_Rr + germ_prob + fec0 + fec_cost + fec_max + dd_fec + herb_effect + g_prot + seed_sur +
pro_exposed + scale_pollen + shape_pollen + seed_pro_short + seed_mean_dist_short + pro_seeds_to_mean_short +
seed_mean_dist_long + pro_seeds_to_mean_long, distribution = 'bernoulli', interaction.depth = 4, shrinkage = 0.05,
n.trees = 10000, cv.folds = 10, class.stratify.cv = TRUE, data = all_dat, n.cores = 3, verbose = TRUE)
# setwd("/home/shauncoutts/Dropbox/projects/MHR_blackgrass/BG_population_model/model_output")
# save(BRT_bi, file = 'BRT_pop_filter.Rdata')
# load('BRT_pop_filter.Rdata')
# extract useful info from the trees
op_trees = gbm.perf(BRT_bi, oobag.curve = TRUE, method = 'cv')
# get realtive influence
rel_inf = summary(BRT_bi, n.trees = op_trees)
# var rel.inf
# fec_max fec_max 41.9977878
# dd_fec dd_fec 34.3847773
# seed_sur seed_sur 7.2111341
# fec_cost fec_cost 4.7544122
# fec0 fec0 1.5901951
# germ_prob germ_prob 1.5785734
# seed_pro_short seed_pro_short 1.2707816
# herb_effect herb_effect 1.1272816
# int_Rr int_Rr 1.1073332
# g_prot g_prot 1.0508439
# pro_exposed pro_exposed 0.9997911
# shape_pollen shape_pollen 0.9864782
# scale_pollen scale_pollen 0.9747225
# seed_mean_dist_short seed_mean_dist_short 0.9658881
#
# looks like 2 important variables fec_max, and dd_fec, with 2 others varables, seed_sur and fec_cost,
# have some influence
plot_inds = c(5, 6, 9, 4)
plot_list = list()
count = 1
for(i in 1:(length(plot_inds) - 1)){
for(j in (i + 1):length(plot_inds)){
plot_list[[count]] = plot(BRT_bi, i.var = c(plot_inds[i], plot_inds[j]), type = 'response', n.trees = op_trees)
count = count + 1
}
}
# looks like the fec_dd term could be a bit bigger maybe up to 0.15
setwd("/home/shauncoutts/Dropbox/projects/MHR_blackgrass/BG_population_model/model_output")
pdf(file = 'sanity_check_PDP.pdf', width = 10, height = 15)
grid.arrange(plot_list[[1]], plot_list[[2]], plot_list[[3]], plot_list[[4]], plot_list[[5]], plot_list[[6]], ncol = 2)
dev.off()
passed_dat = all_dat[all_dat$in_out == 1, ]
write.csv(passed_dat, file = 'sanity_check_pars_passed.csv')
#make a table and plots of the relative influence and PDP for the 4 most important parameters in predicting in or out of the sanity check
par_sym_str = c('int[Rr]', 'phi[e]', 'f[0]', 'f[r]', 'f[max]', 'f[d]', 'xi', 'rho', 'phi[b]', 'varsigma', 'a', 'c', 'alpha', 'mu[1]',
'omega[1]', 'mu[2]', 'omega[2]')
par_sym = c(expression(int[Rr]), expression(phi[e]), expression(f[0]), expression(f[r]), expression(f[max]), expression(f[d]),
expression(xi), expression(rho), expression(phi[b]), expression(varsigma), 'a', 'c', expression(alpha), expression(mu[1]),
expression(omega[1]), expression(mu[2]), expression(omega[2]))
par_names = strsplit('int_Rr + germ_prob + fec0 + fec_cost + fec_max + dd_fec + herb_effect + g_prot + seed_sur + pro_exposed + scale_pollen + shape_pollen + seed_pro_short + seed_mean_dist_short + pro_seeds_to_mean_short + seed_mean_dist_long + pro_seeds_to_mean_long',
split = ' + ', fixed = TRUE)[[1]]
padding <- unit(5,"mm")
par_order = as.numeric(sapply(as.character(rel_inf$var), FUN = function(x) which(par_names == x)))
rel_inf_df = data.frame(parameter = par_sym_str[par_order], rel_inf = rel_inf$rel.inf)
table = tableGrob(rel_inf_df, cols = c("parameter", "rel. inf."), theme = ttheme_default(base_size = 6, parse = TRUE))
pdf(file = 'sanity_check_rel_inf.pdf', width = 2, height = 4)
grid.draw(table)
dev.off()
plot_list = list()
plot_inds = c(5, 6, 9, 4)
count = 1
for(i in 1:(length(plot_inds) - 1)){
for(j in (i + 1):length(plot_inds)){
pg = plot(BRT_bi, i.var = c(plot_inds[i], plot_inds[j]), n.trees = op_trees, return.grid = TRUE, type = 'response')
preds = names(pg)
form = paste0(preds[3], '~', preds[1], '+', preds[2])
plot_list[[count]] = levelplot(as.formula(form), data = pg, xlab = list(label = par_sym[plot_inds[i]], cex = 1.7),
ylab = list(label = par_sym[plot_inds[j]], cex = 1.7), scales = list(x = list(cex = 1.5), y = list(cex = 1.5)),
colorkey = list(labels = list(cex = 1.5)))
count = count + 1
}
}
setwd("/home/shauncoutts/Dropbox/projects/MHR_blackgrass/BG_population_model/model_output")
pdf(file = 'sanity_check_PDP.pdf', width = 12, height = 15)
grid.arrange(plot_list[[1]], plot_list[[2]], plot_list[[3]], plot_list[[4]], plot_list[[5]], plot_list[[6]], ncol = 2)
grid.text(label = paste0(letters[1:6], ')'), x = c(0.05, 0.55), y = c(0.99, 0.99, 0.666, 0.666, 0.333, 0.333), gp = gpar(fontsize = 20))
dev.off()
|
f084d5799408f0155829d760ad881fd6bb586aec | 19eef6be2aafe216144bfe860fdfe07495a06de3 | /tests/testthat/test_interaction.R | fce5f27c49340afd41c1dab02e4e27fe2c72e538 | [] | no_license | cran/dexter | a634eb5ba9497c33e177d7d197e4e4ecf2f987e0 | a9d696898f137ffc124ca3a6132f5c03494bf802 | refs/heads/master | 2023-04-09T20:47:18.898845 | 2022-11-08T13:10:08 | 2022-11-08T13:10:08 | 81,347,103 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 616 | r | test_interaction.R | context('check interaction model')
library(dplyr)
test_that('interaction model parameters are stable over simulation',{
set.seed(123)
db = open_project('../verbAggression.db')
f = fit_inter(db)
ts = get_testscores(db)
#close_project(db)
simdat = r_score_IM(f, rep(ts$booklet_score,10))
g = fit_inter(simdat)
f = coef(f)
g = coef(g)
expect_gt(cor(f$beta_IM,g$beta_IM), 0.95, label='IM sim beta correlates >.95 true beta')
i = seq(1,nrow(f),2)
expect_gt(cor(f$sigma[i],g$sigma[i]), 0.9, label='IM sim sigma correlates >.9 true sigma')
}) |
0ed8f33be40bb1f7a7555c1dc025109f8f2475e9 | 876af37331d71acd4c42f4ab170e82bbf07872cc | /src/water_quality_classification.R | 1a1f837f0dbcbc6b76914c077a70bc88bf8c72c6 | [] | no_license | lreyp/Water-Quality-Classification | 5396cf514e61847007e568bd58c5ca8959dab308 | 91d8f8e226ac6ef185d3466c282d56c8af4f71b6 | refs/heads/main | 2023-05-10T06:42:54.231791 | 2021-06-08T09:38:16 | 2021-06-08T09:38:16 | 374,730,041 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 109,092 | r | water_quality_classification.R | ## ----setup, include=FALSE-----------------------------------------------------------------------------------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
data <- read.csv("water_potability.csv", sep = ",", strip.white = TRUE, header = TRUE, na.strings = "")
#data_plot <- read.csv("water_potability.csv", sep = ",", strip.white = TRUE, header = TRUE, na.strings = "")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(skimr)
library(Hmisc)
skim(data)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
describe(as.factor(data$Potability))
## ----fig.height=12, fig.width=12, message=FALSE, warning=FALSE----------------------------------------------------------------------------------------------
library(GGally)
ggpairs(data, columns = 1:9, ggplot2::aes(colour=as.factor(Potability)), progress = FALSE)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(reshape2)
data_m <- melt(data, id.vars = "Potability")
## ----fig.height=12, fig.width=12, message=FALSE, warning=FALSE----------------------------------------------------------------------------------------------
ggplot(data = data_m, aes(x=variable, y=value, fill=as.factor(Potability))) +
geom_boxplot() + facet_wrap(~variable, scales="free")
## ----fig.height=12, fig.width=12----------------------------------------------------------------------------------------------------------------------------
boxplot(scale(data[,1:9]))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
cor_mat <- cor(data[1:9], use="complete.obs")
cor_mat
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(ggcorrplot)
ggcorrplot(cor_mat, hc.order = TRUE, type = "lower", lab = TRUE, insig = "blank")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
ggplot(data, aes(y = Potability, fill = factor(Potability))) + geom_bar() + labs(title = "Proporción de registros por clase") + geom_text(stat = "count", aes(label = scales::percent(..count../sum(..count..))), nudge_x = 40)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(plyr)
for (i in names(data)){
plt <- ggplot(data, aes_string(x=i)) +
geom_histogram(aes(y=..density..), colour="black", fill="white")+
geom_density(alpha=.2, fill="#FF6666") + facet_wrap(~Potability, scales="free")
print(plt)
}
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
max(rowSums(is.na(data)))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
sum(!complete.cases(data))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(dplyr)
data_na_count <- data
data_na_count$na_count <- apply(data, 1, function(x) sum(is.na(x)))
head(data_na_count %>% slice_max(na_count, n = 20), 20)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(VIM)
marginplot(data[c(1,5)])
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
marginplot(data[c(1,8)])
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
marginplot(data[c(5,8)])
## ----fig.width=14-------------------------------------------------------------------------------------------------------------------------------------------
library(mice)
md.pattern(data)
## ----fig.width=10-------------------------------------------------------------------------------------------------------------------------------------------
mice_plot <- aggr(data, col=c('navyblue','yellow'),
numbers=TRUE, sortVars=TRUE,
labels=names(data), cex.axis=.7,
gap=3, ylab=c("Missing data","Pattern"))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(QuantPsyc)
mult.norm(data)$mult.test
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(energy)
complete_data <- data[complete.cases(data),]
mvnorm.etest(complete_data, R=100)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(MVN)
result <- mvn(complete_data, multivariatePlot = "qq", showOutliers = TRUE)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
result$multivariateNormality
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
result$univariateNormality
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
result$Descriptives
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
outliers_Tukey <- function(x) {
Q1 <- quantile(x, probs=.25)
Q3 <- quantile(x, probs=.75)
iqr = Q3-Q1
upper_limit = Q3 + (iqr*1.5)
lower_limit = Q1 - (iqr*1.5)
x > upper_limit | x < lower_limit
}
remove_outliers_Tukey <- function(df, cols = names(df)) {
for (col in cols) {
df <- df[!outliers_Tukey(df[[col]]),]
}
df
}
outliers_sd <- function(x) {
upper_limit = mean(x) + 3*sd(x)
lower_limit = mean(x) - 3*sd(x)
x > upper_limit | x < lower_limit
}
remove_outliers_sd <- function(df, cols = names(df)) {
for (col in cols) {
df <- df[!outliers_sd(df[[col]]),]
}
df
}
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
data_Tukey_outliers <- remove_outliers_Tukey(complete_data)
data_sd_outliers <- remove_outliers_sd(complete_data)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
result <- mvn(data_Tukey_outliers, multivariatePlot = "qq", showOutliers = TRUE)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
result$multivariateNormality
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
summary(data_Tukey_outliers)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
boxplot(scale(data_Tukey_outliers[,1:9]))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
result <- mvn(data_sd_outliers, multivariatePlot = "qq", showOutliers = TRUE)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
result$multivariateNormality
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
summary(data_sd_outliers)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
boxplot(scale(data_sd_outliers[,1:9]))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
cat("data_sd_outliers: ",nrow(data_sd_outliers), "\n")
cat("data_Tukey_outliers: ",nrow(data_Tukey_outliers))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(plyr)
library(ggplot2)
data_test <- data
data_test$na_count <- apply(data, 1, function(x) sum(is.na(x)))
data_test$na_count[data_test$na_count>0] <- 1
for (i in names(data)){
plt <- ggplot(data_test, aes_string(x=i)) +
geom_histogram(aes(y=..density..), colour="black", fill="white")+
geom_density(alpha=.2, fill="#FF6666") + facet_wrap(~na_count, scales="free")
print(plt)
}
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
summary(data_test[data_test$na_count==0,])
summary(data_test[data_test$na_count==1,])
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
apply(data, 2, function(col)sum(is.na(col))/length(col))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(missForest)
library(tidyverse)
set.seed(564165)
complete_data <- data[complete.cases(data),]
head(complete_data)
complete_data_ph <- complete_data[,c("ph")]
complete_data_Sulfate <- complete_data[,c("Sulfate")]
complete_data_Triha <- complete_data[,c("Trihalomethanes")]
complete_data_noNAcols <- complete_data[,c("Hardness", "Solids", "Chloramines", "Conductivity", "Organic_carbon", "Turbidity", "Potability")]
complete_data_ph <- prodNA(as.data.frame(complete_data_ph), noNA = 0.14987790)
complete_data_Sulfate <- prodNA(as.data.frame(complete_data_Sulfate), noNA = 0.23840049)
complete_data_Triha <- prodNA(as.data.frame(complete_data_Triha), noNA = 0.04945055)
complete_data_NAcols <- cbind(complete_data_ph, complete_data_Sulfate)
complete_data_NAcols <- cbind(complete_data_NAcols, complete_data_Triha)
complete_data_NAs <- cbind(complete_data_NAcols, complete_data_noNAcols)
complete_data_NAs <- complete_data_NAs[, c(1, 4, 5, 6, 2, 7, 8, 3, 9, 10)]
names(complete_data_NAs)[names(complete_data_NAs) == "complete_data_ph"] <- "ph"
names(complete_data_NAs)[names(complete_data_NAs) == "complete_data_Sulfate"] <- "Sulfate"
names(complete_data_NAs)[names(complete_data_NAs) == "complete_data_Triha"] <- "Trihalomethanes"
head(complete_data_NAs)
## ----fig.height=12, fig.width=12----------------------------------------------------------------------------------------------------------------------------
md.pattern(complete_data_NAs)
## ----fig.width=10-------------------------------------------------------------------------------------------------------------------------------------------
library(VIM)
library(mice)
set.seed(100)
mice_plot <- aggr(complete_data_NAs, col=c('navyblue','yellow'),
numbers=TRUE, sortVars=TRUE,
labels=names(complete_data_NAs), cex.axis=.7,
gap=3, ylab=c("Missing data","Pattern"))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(VIM)
marginplot(complete_data_NAs[c(1,5)])
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
marginplot(complete_data_NAs[c(1,8)])
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
marginplot(complete_data_NAs[c(5,8)])
## ----echo=TRUE, results='hide', include=FALSE---------------------------------------------------------------------------------------------------------------
library(mice)
set.seed(100)
complete_data_NAs_MICE <- mice(complete_data_NAs,m=5, maxit = 100, method="pmm",seed=245435, print=FALSE)
## ----fig.width=20, fig.height=12----------------------------------------------------------------------------------------------------------------------------
plot(complete_data_NAs_MICE)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
densityplot(complete_data_NAs_MICE)
## ----fig.height=12, fig.width=12----------------------------------------------------------------------------------------------------------------------------
stripplot(complete_data_NAs_MICE, pch = 20, cex = 1.2)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
summary(data)
summary(mice::complete(complete_data_NAs_MICE, "long"))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
MICE_test <- mice::complete(complete_data_NAs_MICE, "long")
fit <- with(complete_data_NAs_MICE, lm(Sulfate~ Hardness+Solids+Chloramines+Conductivity+Organic_carbon+Turbidity+Trihalomethanes+Potability))
summary(mice::pool(fit))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
imp_2 <- mice(complete_data_NAs[,-c(4,6,9,10)],m=5, maxit = 100, method="pmm",seed=245435, print=FALSE)
fit_2 <- with(imp_2, lm(Sulfate~ Hardness+Solids+Organic_carbon+Trihalomethanes))
summary(mice::pool(fit_2))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
imp_3 <- mice(complete_data_NAs[,-c(4,6,8,9,10)],m=5, maxit = 100, method="pmm",seed=245435, print=FALSE)
fit_3 <- with(imp_3, lm(Sulfate~ Hardness+Solids+Organic_carbon))
summary(mice::pool(fit_3))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
mice::pool(fit)
mice::pool(fit_2)
mice::pool(fit_3)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(Metrics)
a=0
for (i in 1:5) {
predicted <- mice::complete(complete_data_NAs_MICE, i)[is.na(complete_data_NAs$Sulfate),]$Sulfate
actual <- complete_data[is.na(complete_data_NAs$Sulfate),]$Sulfate
cat(Metrics::rmse(actual, predicted), "\n")
a= a + (Metrics::rmse(actual, predicted))
}
cat("mean RMSE: ",a/5)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
a=0
for (i in 1:5) {
predicted <- mice::complete(imp_2, i)[is.na(complete_data_NAs$Sulfate),]$Sulfate
actual <- complete_data[is.na(complete_data_NAs$Sulfate),]$Sulfate
cat(Metrics::rmse(actual, predicted), "\n")
a= a + (Metrics::rmse(actual, predicted))
}
cat("mean RMSE: ",a/5)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
a=0
for (i in 1:5) {
predicted <- mice::complete(imp_3, i)[is.na(complete_data_NAs$Sulfate),]$Sulfate
actual <- complete_data[is.na(complete_data_NAs$Sulfate),]$Sulfate
cat(Metrics::rmse(actual, predicted), "\n")
a= a + (Metrics::rmse(actual, predicted))
}
cat("mean RMSE: ",a/5)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(Metrics)
a=0
for (i in 1:5) {
predicted <- mice::complete(complete_data_NAs_MICE, i)[is.na(complete_data_NAs$ph),]$ph
actual <- complete_data[is.na(complete_data_NAs$ph),]$ph
cat(Metrics::rmse(actual, predicted), "\n")
a= a + (Metrics::rmse(actual, predicted))
}
cat("mean RMSE: ",a/5)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
a=0
for (i in 1:5) {
predicted <- mice::complete(imp_2, i)[is.na(complete_data_NAs$ph),]$ph
actual <- complete_data[is.na(complete_data_NAs$ph),]$ph
cat(Metrics::rmse(actual, predicted), "\n")
a= a + (Metrics::rmse(actual, predicted))
}
cat("mean RMSE: ",a/5)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
a=0
for (i in 1:5) {
predicted <- mice::complete(imp_3, i)[is.na(complete_data_NAs$ph),]$ph
actual <- complete_data[is.na(complete_data_NAs$ph),]$ph
cat(Metrics::rmse(actual, predicted), "\n")
a= a + (Metrics::rmse(actual, predicted))
}
cat("mean RMSE: ",a/5)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(Metrics)
a=0
for (i in 1:5) {
predicted <- mice::complete(complete_data_NAs_MICE, i)[is.na(complete_data_NAs$Trihalomethanes),]$Trihalomethanes
actual <- complete_data[is.na(complete_data_NAs$Trihalomethanes),]$Trihalomethanes
cat(Metrics::rmse(actual, predicted), "\n")
a= a + (Metrics::rmse(actual, predicted))
}
cat("mean RMSE: ",a/5,"\n")
a=0
for (i in 1:5) {
predicted <- mice::complete(imp_2, i)[is.na(complete_data_NAs$Trihalomethanes),]$Trihalomethanes
actual <- complete_data[is.na(complete_data_NAs$Trihalomethanes),]$Trihalomethanes
cat(Metrics::rmse(actual, predicted), "\n")
a= a + (Metrics::rmse(actual, predicted))
}
cat("mean RMSE: ",a/5, "\n")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
res <- "Resultado"
name <- "Model"
add_value <- function(result, new_name) {
res <<- c(res, result)
name <<- c(name, new_name)
}
add_value(2.183171, "result_MICE_ph")
add_value(55.8299, "result_MICE_Sulfate")
add_value(22.33897 , "result_MICE_Trihalomethanes")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(missForest)
set.seed(100)
complete_data_NAs_MISSFOREST_1 <- missForest(complete_data_NAs, verbose=TRUE)
complete_data_NAs_MISSFOREST <- complete_data_NAs_MISSFOREST_1$ximp
head(complete_data_NAs_MISSFOREST)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
complete_data_NAs_MISSFOREST_2 <- missForest(complete_data_NAs, verbose=TRUE, ntree=500)
complete_data_NAs_MISSFOREST_test <- complete_data_NAs_MISSFOREST_2$ximp
head(complete_data_NAs_MISSFOREST)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Realizamos los mismos pasos que con el modelo de imputación MICE para comparar los vectores de cada variable.
actual = complete_data[is.na(complete_data_NAs$ph), ]$ph
predicted = complete_data_NAs_MISSFOREST_test[is.na(complete_data_NAs$ph), ]$ph
result_MISSFOREST_ph = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Sulfate), ]$Sulfate
predicted = complete_data_NAs_MISSFOREST_test[is.na(complete_data_NAs$Sulfate), ]$Sulfate
result_MISSFOREST_Sulfate = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
predicted = complete_data_NAs_MISSFOREST_test[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
result_MISSFOREST_Trihalomethanes = Metrics::rmse(actual, predicted)
result_MISSFOREST_ph
result_MISSFOREST_Sulfate
result_MISSFOREST_Trihalomethanes
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Realizamos los mismos pasos que con el modelo de imputación MICE para comparar los vectores de cada variable.
actual = complete_data[is.na(complete_data_NAs$ph), ]$ph
predicted = complete_data_NAs_MISSFOREST[is.na(complete_data_NAs$ph), ]$ph
result_MISSFOREST_2_ph = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Sulfate), ]$Sulfate
predicted = complete_data_NAs_MISSFOREST[is.na(complete_data_NAs$Sulfate), ]$Sulfate
result_MISSFOREST_2_Sulfate = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
predicted = complete_data_NAs_MISSFOREST[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
result_MISSFOREST_2_Trihalomethanes = Metrics::rmse(actual, predicted)
result_MISSFOREST_2_ph
result_MISSFOREST_2_Sulfate
result_MISSFOREST_2_Trihalomethanes
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
add_value(result_MISSFOREST_ph, "result_MISSFOREST_ph")
add_value(result_MISSFOREST_Sulfate, "result_MISSFOREST_Sulfate")
add_value(result_MISSFOREST_Trihalomethanes, "result_MISSFOREST_Trihalomethanes")
res
name
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(Hmisc)
set.seed(100)
complete_data_NAs_HMISC_areg <- aregImpute(~ Sulfate + Hardness + Solids + Chloramines + ph + Conductivity + Organic_carbon + Trihalomethanes + Turbidity + Potability, data = complete_data_NAs, n.impute = 5, type= "pmm", nk=3, burnin=10)
print(complete_data_NAs_HMISC_areg)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(Hmisc)
set.seed(100)
complete_data_NAs_HMISC_areg_test <- aregImpute(~ Sulfate + Hardness + Solids + Chloramines + ph + Organic_carbon + Trihalomethanes + Turbidity + Potability, data = complete_data_NAs, n.impute = 10, type= "pmm", nk=c(0,3:5), tlinear = FALSE)
print(complete_data_NAs_HMISC_areg_test)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
for (i in 1:10){
temp_data <- impute.transcan(complete_data_NAs_HMISC_areg_test, imputation = i, data = complete_data_NAs, list.out = TRUE,
pr = FALSE, check = FALSE)
temp_data <- data.frame(temp_data)
actual = complete_data[is.na(complete_data_NAs$ph), ]$ph
predicted = temp_data[is.na(complete_data_NAs$ph), ]$ph
result_HMISC_ph = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Sulfate), ]$Sulfate
predicted = temp_data[is.na(complete_data_NAs$Sulfate), ]$Sulfate
result_HMISC_Sulfate = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
predicted = temp_data[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
result_HMISC_Trihalomethanes = Metrics::rmse(actual, predicted)
cat("ph RMSE:\t\t", result_HMISC_ph, "\n")
cat("Sulfate RMSE:\t\t", result_HMISC_Sulfate, "\n")
cat("Trihalomethanes RMSE: ", result_HMISC_Trihalomethanes, "\n")
cat("----------------------------------\n")
}
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Realizamos los mismos pasos que con los modelos de imputación MICE y MISSFOREST para comparar los vectores de cada variable.
complete_data_NAs_HMISC <- impute.transcan(complete_data_NAs_HMISC_areg_test, imputation = 10, data = complete_data_NAs, list.out = TRUE,
pr = FALSE, check = FALSE)
complete_data_NAs_HMISC <- data.frame(complete_data_NAs_HMISC)
actual = complete_data[is.na(complete_data_NAs$ph), ]$ph
predicted = complete_data_NAs_HMISC[is.na(complete_data_NAs$ph), ]$ph
result_HMISC_ph = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Sulfate), ]$Sulfate
predicted = complete_data_NAs_HMISC[is.na(complete_data_NAs$Sulfate), ]$Sulfate
result_HMISC_Sulfate = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
predicted = complete_data_NAs_HMISC[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
result_HMISC_Trihalomethanes = Metrics::rmse(actual, predicted)
result_HMISC_ph
result_HMISC_Sulfate
result_HMISC_Trihalomethanes
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
add_value(result_HMISC_ph, "result_HMISC_ph")
add_value(result_HMISC_Sulfate, "result_HMISC_Sulfate")
add_value(result_HMISC_Trihalomethanes, "result_HMISC_Trihalomethanes")
res
name
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(mi)
set.seed(100)
complete_data_NAs_MI_mi <- mi(complete_data_NAs)
summary(complete_data_NAs_MI_mi)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
plot(complete_data_NAs_MI_mi, ask=FALSE)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
complete_data_NAs_MI <- mi::complete(complete_data_NAs_MI_mi, m=1)
# Realizamos los mismos pasos que con los modelos anteriores para comparar los vectores de cada variable.
actual = complete_data[is.na(complete_data_NAs$ph), ]$ph
predicted = complete_data_NAs_MI[is.na(complete_data_NAs$ph), ]$ph
result_MI_ph = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Sulfate), ]$Sulfate
predicted = complete_data_NAs_MI[is.na(complete_data_NAs$Sulfate), ]$Sulfate
result_MI_Sulfate = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
predicted = complete_data_NAs_MI[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
result_MI_Trihalomethanes = Metrics::rmse(actual, predicted)
result_MI_ph
result_MI_Sulfate
result_MI_Trihalomethanes
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
add_value(result_MI_ph, "result_MI_ph")
add_value(result_MI_Sulfate, "result_MI_Sulfate")
add_value(result_MI_Trihalomethanes, "result_MI_Trihalomethanes")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
complete_data_NAs_kNN <- kNN(complete_data_NAs, k=3)
complete_data_NAs_kNN <- complete_data_NAs_kNN[, 1:10]
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Realizamos los mismos pasos que con los modelos anteriores para comparar los vectores de cada variable.
actual = complete_data[is.na(complete_data_NAs$ph), ]$ph
predicted = complete_data_NAs_kNN[is.na(complete_data_NAs$ph), ]$ph
result_kNN_ph = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Sulfate), ]$Sulfate
predicted = complete_data_NAs_kNN[is.na(complete_data_NAs$Sulfate), ]$Sulfate
result_kNN_Sulfate = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
predicted = complete_data_NAs_kNN[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
result_kNN_Trihalomethanes = Metrics::rmse(actual, predicted)
result_kNN_ph
result_kNN_Sulfate
result_kNN_Trihalomethanes
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
complete_data_NAs_kNN <- kNN(complete_data_NAs, k=100)
complete_data_NAs_kNN <- complete_data_NAs_kNN
head(complete_data_NAs_kNN)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Realizamos los mismos pasos que con los modelos anteriores para comparar los vectores de cada variable.
actual = complete_data[is.na(complete_data_NAs$ph), ]$ph
predicted = complete_data_NAs_kNN[is.na(complete_data_NAs$ph), ]$ph
result_kNN_ph = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Sulfate), ]$Sulfate
predicted = complete_data_NAs_kNN[is.na(complete_data_NAs$Sulfate), ]$Sulfate
result_kNN_Sulfate = Metrics::rmse(actual, predicted)
actual = complete_data[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
predicted = complete_data_NAs_kNN[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
result_kNN_Trihalomethanes = Metrics::rmse(actual, predicted)
result_kNN_ph
result_kNN_Sulfate
result_kNN_Trihalomethanes
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(caret)
scaled_test <- scale(complete_data_NAs)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
complete_data_NAs_kNN <- kNN(scaled_test, k=100)
complete_data_NAs_kNN <- as.data.frame(complete_data_NAs_kNN[, 1:10])
# Realizamos los mismos pasos que con los modelos anteriores para comparar los vectores de cada variable.
actual = scale(complete_data[is.na(complete_data_NAs$ph), ]$ph)
predicted = complete_data_NAs_kNN[is.na(complete_data_NAs$ph), ]$ph
result_kNN_ph_scale = Metrics::rmse(actual, predicted)
actual = scale(complete_data[is.na(complete_data_NAs$Sulfate), ]$Sulfate)
predicted = complete_data_NAs_kNN[is.na(complete_data_NAs$Sulfate), ]$Sulfate
result_kNN_Sulfate_scale = Metrics::rmse(actual, predicted)
actual = scale(complete_data[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes)
predicted = complete_data_NAs_kNN[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
result_kNN_Trihalomethanes_scale = Metrics::rmse(actual, predicted)
result_kNN_ph_scale
result_kNN_Sulfate_scale
result_kNN_Trihalomethanes_scale
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(missForest)
set.seed(100)
complete_data_NAs_MISSFOREST_scale <- missForest(scale(complete_data_NAs), verbose=TRUE)
complete_data_NAs_MISSFOREST_scaled <- as.data.frame(complete_data_NAs_MISSFOREST_scale$ximp)
# Realizamos los mismos pasos que con el modelo de imputación MICE para comparar los vectores de cada variable.
actual = scale(complete_data[is.na(complete_data_NAs$ph), ]$ph)
predicted = complete_data_NAs_MISSFOREST_scaled[is.na(complete_data_NAs$ph), ]$ph
result_MISSFOREST_ph_scale = Metrics::rmse(actual, predicted)
actual = scale(complete_data[is.na(complete_data_NAs$Sulfate), ]$Sulfate)
predicted = complete_data_NAs_MISSFOREST_scaled[is.na(complete_data_NAs$Sulfate), ]$Sulfate
result_MISSFOREST_Sulfate_scale = Metrics::rmse(actual, predicted)
actual = scale(complete_data[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes)
predicted = complete_data_NAs_MISSFOREST_scaled[is.na(complete_data_NAs$Trihalomethanes), ]$Trihalomethanes
result_MISSFOREST_Trihalomethanes_scale = Metrics::rmse(actual, predicted)
result_MISSFOREST_ph_scale
result_MISSFOREST_Sulfate_scale
result_MISSFOREST_Trihalomethanes_scale
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
add_value(result_kNN_ph, "result_kNN_ph")
add_value(result_kNN_Sulfate, "result_kNN_Sulfate")
add_value(result_kNN_Trihalomethanes, "result_kNN_Trihalomethanes")
add_value(result_kNN_ph_scale, "result_kNN_ph_scale")
add_value(result_kNN_Sulfate_scale, "result_kNN_Sulfate_scale")
add_value(result_kNN_Trihalomethanes_scale, "result_kNN_Trihalomethanes_scale")
add_value(result_MISSFOREST_ph_scale, "result_MISSFOREST_ph_scale")
add_value(result_MISSFOREST_Sulfate_scale, "result_MISSFOREST_Sulfate_scale")
add_value(result_MISSFOREST_Trihalomethanes_scale, "result_MISSFOREST_Trihalomethanes_scale")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
cat(paste0(data.frame(cbind(name, res))[1,1], "\t\t\t\t"))
cat(paste0(data.frame(cbind(name, res))[1,2], "\n"))
cat("--------------------------------------------------------------\n")
for (i in c(2,5,8,11,14,17,20)){
cat(paste0(data.frame(cbind(name, res))[i,1], ": "))
cat(paste0(data.frame(cbind(name, res))[i,2], "\n"))
cat(paste0(data.frame(cbind(name, res))[i+1,1], ": "))
cat(paste0(data.frame(cbind(name, res))[i+1,2], "\n"))
cat(paste0(data.frame(cbind(name, res))[i+2,1], ": "))
cat(paste0(data.frame(cbind(name, res))[i+2,2], "\n"))
cat("--------------------------------------------------------------\n")
}
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
outliers_Tukey_na <- function(x) {
Q1 <- quantile(x, probs=.25)
Q3 <- quantile(x, probs=.75)
iqr = Q3-Q1
upper_limit = Q3 + (iqr*1.5)
lower_limit = Q1 - (iqr*1.5)
cat(upper_limit,"\t",lower_limit,"\n")
}
outliers_sd_na <- function(x) {
upper_limit = mean(x) + 3*sd(x)
lower_limit = mean(x) - 3*sd(x)
cat(upper_limit,"\t",lower_limit,"\n")
}
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
outliers_Tukey_na(data[complete.cases(data$ph),]$ph)
outliers_Tukey_na(data[complete.cases(data$Hardness),]$Hardness)
outliers_Tukey_na(data[complete.cases(data$Solids),]$Solids)
outliers_Tukey_na(data[complete.cases(data$Chloramines),]$Chloramines)
outliers_Tukey_na(data[complete.cases(data$Sulfate),]$Sulfate)
outliers_Tukey_na(data[complete.cases(data$Conductivity),]$Conductivity)
outliers_Tukey_na(data[complete.cases(data$Organic_carbon),]$Organic_carbon)
outliers_Tukey_na(data[complete.cases(data$Trihalomethanes),]$Trihalomethanes)
outliers_Tukey_na(data[complete.cases(data$Turbidity),]$Turbidity)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
test <- data
head(test)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
summary(test)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
test <- subset(test, (ph < 11.01553 & ph > 3.139631) | is.na(ph))
test <- subset(test, Hardness < 276.3928 & Hardness > 117.1252)
test <- subset(test, Solids < 44831.87 & Solids > -1832.417)
test <- subset(test, Chloramines < 11.09609 & Chloramines > 3.146221)
test <- subset(test, (Sulfate < 438.3262 & Sulfate > 229.3235) | is.na(Sulfate))
test <- subset(test, Conductivity < 655.8791 & Conductivity > 191.6476)
test <- subset(test, Organic_carbon < 23.29543 & Organic_carbon > 5.328026)
test <- subset(test, (Trihalomethanes < 109.5769 & Trihalomethanes > 23.60513) | is.na(Trihalomethanes))
test <- subset(test, Turbidity < 6.091233 & Turbidity > 1.848797)
no_ouliers_incomplete_Tukey <- test
head(no_ouliers_incomplete_Tukey)
nrow(no_ouliers_incomplete_Tukey)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
summary(no_ouliers_incomplete_Tukey)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
outliers_sd_na(data[complete.cases(data$ph),]$ph)
outliers_sd_na(data[complete.cases(data$Hardness),]$Hardness)
outliers_sd_na(data[complete.cases(data$Solids),]$Solids)
outliers_sd_na(data[complete.cases(data$Chloramines),]$Chloramines)
outliers_sd_na(data[complete.cases(data$Sulfate),]$Sulfate)
outliers_sd_na(data[complete.cases(data$Conductivity),]$Conductivity)
outliers_sd_na(data[complete.cases(data$Organic_carbon),]$Organic_carbon)
outliers_sd_na(data[complete.cases(data$Trihalomethanes),]$Trihalomethanes)
outliers_sd_na(data[complete.cases(data$Turbidity),]$Turbidity)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
test <- data
test <- subset(test, (ph < 11.86375 & ph > 2.297836) | is.na(ph))
test <- subset(test, Hardness < 295.0088 & Hardness > 97.73021)
test <- subset(test, Solids < 48319.81 & Solids > -4291.62)
test <- subset(test, Chloramines < 11.87153 & Chloramines > 2.373022)
test <- subset(test, (Sulfate < 458.0263 & Sulfate > 209.5253) | is.na(Sulfate))
test <- subset(test, Conductivity < 668.6773 & Conductivity > 183.7329)
test <- subset(test, Organic_carbon < 24.20946 & Organic_carbon > 4.360484)
test <- subset(test, (Trihalomethanes < 114.9213 & Trihalomethanes > 17.87127) | is.na(Trihalomethanes))
test <- subset(test, Turbidity < 6.307933 & Turbidity > 1.625639)
no_ouliers_incomplete_sd <- test
head(no_ouliers_incomplete_sd)
nrow(no_ouliers_incomplete_sd)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
summary(no_ouliers_incomplete_sd)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
boxplot(scale(data[,1:9]))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
boxplot(scale(no_ouliers_incomplete_Tukey[,1:9]))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
boxplot(scale(no_ouliers_incomplete_sd[,1:9]))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
sort(boxplot.stats(data$ph)$out)
cat("--------------------------------------------------------------------------------------------------------------------\n")
sort(boxplot.stats(data$Hardness)$out)
cat("--------------------------------------------------------------------------------------------------------------------\n")
sort(boxplot.stats(data$Solids)$out)
cat("--------------------------------------------------------------------------------------------------------------------\n")
sort(boxplot.stats(data$Chloramines)$out)
cat("--------------------------------------------------------------------------------------------------------------------\n")
sort(boxplot.stats(data$Sulfate)$out)
cat("--------------------------------------------------------------------------------------------------------------------\n")
sort(boxplot.stats(data$Conductivity)$out)
cat("--------------------------------------------------------------------------------------------------------------------\n")
sort(boxplot.stats(data$Organic_carbon)$out)
cat("--------------------------------------------------------------------------------------------------------------------\n")
sort(boxplot.stats(data$Trihalomethanes)$out)
cat("--------------------------------------------------------------------------------------------------------------------\n")
sort(boxplot.stats(data$Turbidity)$out)
cat("--------------------------------------------------------------------------------------------------------------------\n")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
test <- data
test <- subset(test, (ph < 11.0278799 & ph > 3.1020756) | is.na(ph))
test <- subset(test, Hardness < 276.69976 & Hardness > 117.05731)
test <- subset(test, Solids < 44868.46 & Solids > -4291.62)
test <- subset(test, Chloramines < 11.1016281 & Chloramines > 3.1395527)
test <- subset(test, (Sulfate < 439.7879 & Sulfate > 227.6656) | is.na(Sulfate))
test <- subset(test, Conductivity < 656.9241 & Conductivity > 181.4838)
test <- subset(test, Organic_carbon < 23.317699 & Organic_carbon > 5.315287)
test <- subset(test, (Trihalomethanes < 110.431080 & Trihalomethanes > 23.136611) | is.na(Trihalomethanes))
test <- subset(test, Turbidity < 6.099632 & Turbidity > 1.844372)
no_ouliers_incomplete_boxplot <- test
head(no_ouliers_incomplete_boxplot)
nrow(no_ouliers_incomplete_boxplot)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
boxplot(scale(no_ouliers_incomplete_boxplot[,1:9]))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
imp_1 <- missForest(data, verbose=TRUE)
mF_complete <- imp_1$ximp
imp_2 <- missForest(no_ouliers_incomplete_Tukey, verbose=TRUE)
mF_Tukey <- imp_2$ximp
imp_3 <- missForest(no_ouliers_incomplete_sd, verbose=TRUE)
mF_sd <- imp_3$ximp
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
nrow(mF_complete)
nrow(mF_Tukey)
nrow(mF_sd)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
nrow(remove_outliers_Tukey(mF_complete))
nrow(remove_outliers_sd(mF_complete))
nrow(remove_outliers_Tukey(mF_Tukey))
nrow(remove_outliers_sd(mF_sd))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
mF_complete_Tuk <- remove_outliers_Tukey(mF_complete)
mF_complete_sd <- (mF_complete)
mF_Tukey_Tuk <- remove_outliers_Tukey(mF_Tukey)
mF_sd_sd <- remove_outliers_sd(mF_sd)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
data_scaled = as.data.frame(cbind(scale(data[,1:9]), data[,10]))
names(data_scaled)[names(data_scaled) == "V10"] <- "Potability"
data_scaled_tuk = as.data.frame(cbind(scale(no_ouliers_incomplete_Tukey[,1:9]), no_ouliers_incomplete_Tukey[,10]))
names(data_scaled_tuk)[names(data_scaled_tuk) == "V10"] <- "Potability"
data_scaled_sd = as.data.frame(cbind(scale(no_ouliers_incomplete_sd[,1:9]), no_ouliers_incomplete_sd[,10]))
names(data_scaled_sd)[names(data_scaled_sd) == "V10"] <- "Potability"
imp_1 <- kNN(data_scaled, k=100)
KNN_complete <- imp_1[,1:10]
imp_2 <- kNN(data_scaled_tuk, k=100)
KNN_Tukey <- imp_2[,1:10]
imp_3 <- kNN(data_scaled_sd, k=100)
KNN_sd <- imp_3[,1:10]
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
nrow(KNN_complete)
nrow(KNN_Tukey)
nrow(KNN_sd)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
nrow(remove_outliers_Tukey(KNN_complete))
nrow(remove_outliers_sd(KNN_complete))
nrow(remove_outliers_Tukey(KNN_Tukey))
nrow(remove_outliers_sd(KNN_sd))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
KNN_complete_Tuk <- remove_outliers_Tukey(KNN_complete)
KNN_complete_sd <- (KNN_complete)
KNN_Tukey_Tuk <- remove_outliers_Tukey(KNN_Tukey)
KNN_sd_sd <- remove_outliers_sd(KNN_sd)
## ----fig.height=12, fig.width=12, message=FALSE, warning=FALSE----------------------------------------------------------------------------------------------
library(GGally)
ggpairs(mF_Tukey_Tuk, columns = 1:9, ggplot2::aes(colour=as.factor(Potability)), progress = FALSE)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
boxplot(scale(mF_Tukey_Tuk[,1:9]))
## ----fig.height=12, fig.width=12, message=FALSE, warning=FALSE----------------------------------------------------------------------------------------------
library(GGally)
ggpairs(KNN_Tukey_Tuk, columns = 1:9, ggplot2::aes(colour=as.factor(Potability)), progress = FALSE)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
boxplot(scale(KNN_Tukey_Tuk[,1:9]))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(MVN)
result <- mvn(mF_sd_sd, multivariatePlot = "qq", showOutliers = TRUE)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
result$multivariateNormality
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
result$univariateNormality
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(stats)
fligner.test(ph ~ as.factor(Potability), data=mF_sd_sd)
fligner.test(Hardness ~ as.factor(Potability), data=mF_sd_sd)
fligner.test(Solids ~ as.factor(Potability), data=mF_sd_sd)
fligner.test(Chloramines ~ as.factor(Potability), data=mF_sd_sd)
fligner.test(Sulfate ~ as.factor(Potability), data=mF_sd_sd)
fligner.test(Conductivity ~ as.factor(Potability), data=mF_sd_sd)
fligner.test(Organic_carbon ~ as.factor(Potability), data=mF_sd_sd)
fligner.test(Trihalomethanes ~ as.factor(Potability), data=mF_sd_sd)
fligner.test(Turbidity ~ as.factor(Potability), data=mF_sd_sd)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
ggplot(mF_sd_sd,aes(x=as.factor(Potability),y=ph, col=ph)) + geom_boxplot() +
geom_jitter(position=position_jitter(0.1)) + guides(col=FALSE)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
ggplot(mF_sd_sd,aes(x=as.factor(Potability),y=Trihalomethanes, col=Trihalomethanes)) + geom_boxplot() +
geom_jitter(position=position_jitter(0.1)) + guides(col=FALSE)
## ----fig.width=12-------------------------------------------------------------------------------------------------------------------------------------------
par(mfrow=c(2,1)) # enable two panels per plot
stripchart(ph ~ Potability, data=mF_sd_sd, pch="|", ylim=c(.5, 2.5)) # narrow plotting symbol
stripchart(ph ~ Potability, data=mF_sd_sd, meth="j", ylim=c(.5, 2.5)) # jittered to mitigate overplotting
par(mfrow=c(1,1)) # return to single-panel plotting
## ----fig.width=12-------------------------------------------------------------------------------------------------------------------------------------------
par(mfrow=c(2,1)) # enable two panels per plot
stripchart(Trihalomethanes ~ Potability, data=mF_sd_sd, pch="|", ylim=c(.5, 2.5)) # narrow plotting symbol
stripchart(Trihalomethanes ~ Potability, data=mF_sd_sd, meth="j", ylim=c(.5, 2.5)) # jittered to mitigate overplotting
par(mfrow=c(1,1))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(DescTools)
x.norm <- BoxCox(mF_sd_sd$Chloramines, lambda = BoxCoxLambda(mF_sd_sd$Chloramines))
par(mfrow=c(2,2))
qqnorm(mF_sd_sd$Chloramines, main="Original")
qqline(mF_sd_sd$Chloramines,col=2)
qqnorm(x.norm, main="Box-Cox")
qqline(x.norm,col=2)
hist(mF_sd_sd$Chloramines,main="Original")
hist(x.norm, main="Box-Cox")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
shapiro.test(mF_sd_sd$Chloramines)
shapiro.test(x.norm)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
fligner.test(Chloramines ~ as.factor(Potability), data=mF_sd_sd)
fligner.test(x.norm ~ as.factor(mF_sd_sd$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(MASS)
library(rcompanion)
Box = boxcox(mF_sd_sd$Chloramines ~ 1,
lambda = seq(-6,6,0.1)
)
Cox = data.frame(Box$x, Box$y)
Cox2 = Cox[with(Cox, order(-Cox$Box.y)),]
Cox2[1,]
lambda = Cox2[1, "Box.x"]
T_box = (mF_sd_sd$Chloramines ^ lambda - 1)/lambda
plotNormalHistogram(mF_sd_sd$Chloramines)
plotNormalHistogram(T_box)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
shapiro.test(T_box)
fligner.test(T_box ~ as.factor(mF_sd_sd$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(rminer)
set.seed(100)
h<-holdout(mF_complete_Tuk$Potability,ratio=2/3,mode="stratified")
mF_complete_Tuk_train<-mF_complete_Tuk[h$tr,]
mF_complete_Tuk_test<-mF_complete_Tuk[h$ts,]
print(table(mF_complete_Tuk_train$Potability))
print(table(mF_complete_Tuk_test$Potability))
h<-holdout(mF_complete_sd$Potability,ratio=2/3,mode="stratified")
mF_complete_sd_train<-mF_complete_sd[h$tr,]
mF_complete_sd_test<-mF_complete_sd[h$ts,]
print(table(mF_complete_sd_train$Potability))
print(table(mF_complete_sd_test$Potability))
h<-holdout(mF_Tukey_Tuk$Potability,ratio=2/3,mode="stratified")
mF_Tukey_Tuk_train<-mF_Tukey_Tuk[h$tr,]
mF_Tukey_Tuk_test<-mF_Tukey_Tuk[h$ts,]
print(table(mF_Tukey_Tuk_train$Potability))
print(table(mF_Tukey_Tuk_test$Potability))
h<-holdout(mF_sd_sd$Potability,ratio=2/3,mode="stratified")
mF_sd_sd_train<-mF_sd_sd[h$tr,]
mF_sd_sd_test<-mF_sd_sd[h$ts,]
print(table(mF_sd_sd_train$Potability))
print(table(mF_sd_sd_test$Potability))
h<-holdout(KNN_complete_Tuk$Potability,ratio=2/3,mode="stratified")
KNN_complete_Tuk_train<-KNN_complete_Tuk[h$tr,]
KNN_complete_Tuk_test<-KNN_complete_Tuk[h$ts,]
print(table(KNN_complete_Tuk_train$Potability))
print(table(KNN_complete_Tuk_test$Potability))
h<-holdout(KNN_complete_sd$Potability,ratio=2/3,mode="stratified")
KNN_complete_sd_train<-KNN_complete_sd[h$tr,]
KNN_complete_sd_test<-KNN_complete_sd[h$ts,]
print(table(KNN_complete_sd_train$Potability))
print(table(KNN_complete_sd_test$Potability))
h<-holdout(KNN_Tukey_Tuk$Potability,ratio=2/3,mode="stratified")
KNN_Tukey_Tuk_train<-KNN_Tukey_Tuk[h$tr,]
KNN_Tukey_Tuk_test<-KNN_Tukey_Tuk[h$ts,]
print(table(KNN_Tukey_Tuk_train$Potability))
print(table(KNN_Tukey_Tuk_test$Potability))
h<-holdout(KNN_sd_sd$Potability,ratio=2/3,mode="stratified")
KNN_sd_sd_train<-KNN_sd_sd[h$tr,]
KNN_sd_sd_test<-KNN_sd_sd[h$ts,]
print(table(KNN_sd_sd_train$Potability))
print(table(KNN_sd_sd_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(caret)
library(parallel)
library(doParallel)
set.seed(100)
cl <- makePSOCKcluster(parallel::detectCores() - 1)
registerDoParallel(cl)
model_1 <- caret::train(mF_complete_Tuk_train[, -10], as.factor(mF_complete_Tuk_train$Potability),
method = "rf", trControl = caret::trainControl(method = "cv", p = 0.8, number = 5))
stopCluster(cl)
model_1
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict <- predict(model_1, mF_complete_Tuk_test)
confusionMatrix(predict, as.factor(mF_complete_Tuk_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
plot(model_1)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
plot(varImp(model_1, scale = FALSE))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(randomForest)
set.seed(100)
model_1.1<-randomForest(as.factor(Potability)~.,mF_complete_Tuk_train,ntree=10000)
model_1.1
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf_2 <- predict(model_1.1, mF_complete_Tuk_test)
cm <- confusionMatrix(predict_rf_2, as.factor(mF_complete_Tuk_test$Potability))
cm
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
overall <- cm$overall
overall.accuracy <- overall['Accuracy']
accuracy_data <- data.frame("Nombre"="model_1", "accuracy"=c(overall.accuracy))
accuracy_data
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
model_2<-randomForest(as.factor(Potability)~.,mF_complete_sd_train,ntree=10000)
model_2
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_2, mF_complete_sd_test)
cm <- confusionMatrix(predict_rf, as.factor(mF_complete_sd_test$Potability))
cm
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
overall <- cm$overall
overall.accuracy <- overall['Accuracy']
temp <- data.frame("Nombre"="model_2", "accuracy"=c(overall.accuracy))
accuracy_data <- rbind(accuracy_data, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
model_3<-randomForest(as.factor(Potability)~.,mF_Tukey_Tuk_train,ntree=10000)
model_3
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_3, mF_Tukey_Tuk_test)
cm <- confusionMatrix(predict_rf, as.factor(mF_Tukey_Tuk_test$Potability))
cm
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
overall <- cm$overall
overall.accuracy <- overall['Accuracy']
temp <- data.frame("Nombre"="model_3", "accuracy"=c(overall.accuracy))
accuracy_data <- rbind(accuracy_data, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
model_4<-randomForest(as.factor(Potability)~.,mF_sd_sd_train,ntree=10000)
model_4
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_4, mF_sd_sd_test)
cm <- confusionMatrix(predict_rf, as.factor(mF_sd_sd_test$Potability))
cm
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
overall <- cm$overall
overall.accuracy <- overall['Accuracy']
temp <- data.frame("Nombre"="model_4", "accuracy"=c(overall.accuracy))
accuracy_data <- rbind(accuracy_data, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
model_5<-randomForest(as.factor(Potability)~.,KNN_complete_Tuk_train,ntree=10000)
model_5
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_5, KNN_complete_Tuk_test)
cm <- confusionMatrix(predict_rf, as.factor(KNN_complete_Tuk_test$Potability))
cm
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
overall <- cm$overall
overall.accuracy <- overall['Accuracy']
temp <- data.frame("Nombre"="model_5", "accuracy"=c(overall.accuracy))
accuracy_data <- rbind(accuracy_data, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
model_6<-randomForest(as.factor(Potability)~.,KNN_complete_sd_train,ntree=10000, mtry=3)
model_6
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_6, KNN_complete_sd_test)
cm <- confusionMatrix(predict_rf, as.factor(KNN_complete_sd_test$Potability))
cm
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
overall <- cm$overall
overall.accuracy <- overall['Accuracy']
temp <- data.frame("Nombre"="model_6", "accuracy"=c(overall.accuracy))
accuracy_data <- rbind(accuracy_data, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
model_7<-randomForest(as.factor(Potability)~.,KNN_Tukey_Tuk_train,ntree=10000)
model_7
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_7, KNN_Tukey_Tuk_test)
cm <- confusionMatrix(predict_rf, as.factor(KNN_Tukey_Tuk_test$Potability))
cm
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
overall <- cm$overall
overall.accuracy <- overall['Accuracy']
temp <- data.frame("Nombre"="model_7", "accuracy"=c(overall.accuracy))
accuracy_data <- rbind(accuracy_data, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
model_8<-randomForest(as.factor(Potability)~.,KNN_sd_sd_train,ntree=10000)
model_8
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_8, KNN_sd_sd_test)
cm <- confusionMatrix(predict_rf, as.factor(KNN_sd_sd_test$Potability))
cm
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
overall <- cm$overall
overall.accuracy <- overall['Accuracy']
temp <- data.frame("Nombre"="model_8", "accuracy"=c(overall.accuracy))
accuracy_data <- rbind(accuracy_data, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
h<-holdout(mF_Tukey$Potability,ratio=2/3,mode="stratified")
mF_Tukey_train<-mF_Tukey[h$tr,]
mF_Tukey_test<-mF_Tukey[h$ts,]
print(table(mF_Tukey_train$Potability))
print(table(mF_Tukey_test$Potability))
h<-holdout(mF_sd$Potability,ratio=2/3,mode="stratified")
mF_sd_train<-mF_sd[h$tr,]
mF_sd_test<-mF_sd[h$ts,]
print(table(mF_sd_train$Potability))
print(table(mF_sd_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
model_test_1<-randomForest(as.factor(Potability)~.,mF_Tukey_train,ntree=10000)
model_test_1
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_test_1, mF_Tukey_test)
confusionMatrix(predict_rf, as.factor(mF_Tukey_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
model_test_2<-randomForest(as.factor(Potability)~.,mF_sd_train,ntree=10000)
model_test_2
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_test_2, mF_sd_test)
confusionMatrix(predict_rf, as.factor(mF_sd_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
varImpPlot(model_6)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
KNN_complete_sd_train$Potability <- as.factor(KNN_complete_sd_train$Potability)
KNN_complete_sd_test$Potability <- as.factor(KNN_complete_sd_test$Potability)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(ranger)
library(tidymodels)
library(parallel)
library(doParallel)
# DEFINICIÓN DEL MODELO Y DE LOS HIPERPARÁMETROS A OPTIMIZAR
# ==============================================================================
modelo <- rand_forest(
mode = "classification",
mtry = tune(),
trees = tune()
) %>%
set_engine(
engine = "ranger",
max.depth = tune(),
importance = "none",
seed = 100
)
control <- control_resamples(save_pred = TRUE)
# DEFINICIÓN DEL PREPROCESADO
# ==============================================================================
# En este caso no hay preprocesado, por lo que el transformer solo contiene
# la definición de la fórmula y los datos de entrenamiento.
transformer <- recipe(
formula = Potability ~ .,
data = KNN_complete_sd_train
)
# DEFINICIÓN DE LA ESTRATEGIA DE VALIDACIÓN Y CREACIÓN DE PARTICIONES
# ==============================================================================
set.seed(100)
cv_folds <- vfold_cv(
data = KNN_complete_sd_train,
v = 5,
strata = Potability
)
# WORKFLOW
# ==============================================================================
workflow_modelado <- workflow() %>%
add_recipe(transformer) %>%
add_model(modelo)
# GRID DE HIPERPARÁMETROS
# ==============================================================================
hiperpar_grid <- expand_grid(
'trees' = c(100, 500, 1000, 2000),
'mtry' = c( 4, 5),
'max.depth' = c(1, 3, 10, 20, 40, 60, 80, 100)
)
# EJECUCIÓN DE LA OPTIMIZACIÓN DE HIPERPARÁMETROS
# ==============================================================================
cl <- makePSOCKcluster(parallel::detectCores() - 1)
registerDoParallel(cl)
grid_fit <- tune_grid(
object = workflow_modelado,
resamples = cv_folds,
metrics = metric_set(yardstick::accuracy),
grid = hiperpar_grid,
control = control
)
stopCluster(cl)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
grid_fit %>% collect_metrics(summarize = FALSE) %>% head()
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(ggpubr)
p1 <- ggplot(
data = grid_fit %>% collect_metrics(summarize = FALSE),
aes(x = .estimate, fill = .metric)) +
geom_density(alpha = 0.5) +
theme_bw()
p2 <- ggplot(
data = grid_fit %>% collect_metrics(summarize = FALSE),
aes(x = .metric, y = .estimate, fill = .metric, color = .metric)) +
geom_boxplot(outlier.shape = NA, alpha = 0.1) +
geom_jitter(width = 0.05, alpha = 0.3) +
coord_flip() +
theme_bw() +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank())
ggarrange(p1, p2, nrow = 2, common.legend = TRUE, align = "v") %>%
annotate_figure(
top = text_grob("Distribución errores de validación cruzada", size = 15)
)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
show_best(grid_fit, metric="accuracy")
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
mejores_hiperpar <- select_best(grid_fit, metric="accuracy")
modelo_final_fit <- finalize_workflow(
x = workflow_modelado,
parameters = mejores_hiperpar
) %>%
fit(
data = KNN_complete_sd_train
) %>%
pull_workflow_fit()
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predicciones <- modelo_final_fit %>%
predict(new_data = KNN_complete_sd_test)
predicciones <- predicciones %>%
bind_cols(KNN_complete_sd_test %>% dplyr::select(Potability))
accuracy_test <- accuracy(
data = predicciones,
truth = Potability,
estimate = .pred_class,
na_rm = TRUE
)
accuracy_test
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
mat_confusion <- predicciones %>%
conf_mat(
truth = Potability,
estimate = .pred_class
)
mat_confusion
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(randomForest)
set.seed(100)
model_6_1<-randomForest(as.factor(Potability)~Sulfate+ph+Hardness+Solids+Chloramines+Turbidity,KNN_complete_sd_train,ntree=2000, mtry=4)
model_6_1
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_6_1, KNN_complete_sd_test)
confusionMatrix(predict_rf, as.factor(KNN_complete_sd_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(randomForest)
set.seed(100)
model_6_2<-randomForest(as.factor(Potability)~Sulfate+ph+Hardness+Solids+Chloramines+Organic_carbon,KNN_complete_sd_train,ntree=2000, mtry=4)
model_6_2
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_6_2, KNN_complete_sd_test)
cm <- confusionMatrix(predict_rf, as.factor(KNN_complete_sd_test$Potability))
cm
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
overall <- cm$overall
overall.accuracy <- overall['Accuracy']
temp <- data.frame("Nombre"="model_6_mod", "accuracy"=c(overall.accuracy))
accuracy_data <- rbind(accuracy_data, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(randomForest)
set.seed(100)
model_6_3<-randomForest(as.factor(Potability)~Sulfate+ph+Hardness+Solids+Chloramines,KNN_complete_sd_train,ntree=2000, mtry=4)
model_6_3
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
predict_rf <- predict(model_6_3, KNN_complete_sd_test)
confusionMatrix(predict_rf, as.factor(KNN_complete_sd_test$Potability))
## ----fig.width=10-------------------------------------------------------------------------------------------------------------------------------------------
ggplot(accuracy_data, aes(x=as.factor(Nombre), y=accuracy, fill=as.factor(Nombre))) +
geom_bar(stat = "identity") + geom_text(aes(label=round(accuracy*100, digits = 2)), position=position_dodge(width=0.9), vjust=-0.25)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(rminer)
set.seed(100)
h<-holdout(mF_complete_Tuk$Potability,ratio=2/3,mode="stratified")
mF_complete_Tuk_train<-mF_complete_Tuk[h$tr,]
mF_complete_Tuk_test<-mF_complete_Tuk[h$ts,]
print(table(mF_complete_Tuk_train$Potability))
print(table(mF_complete_Tuk_test$Potability))
h<-holdout(mF_complete_sd$Potability,ratio=2/3,mode="stratified")
mF_complete_sd_train<-mF_complete_sd[h$tr,]
mF_complete_sd_test<-mF_complete_sd[h$ts,]
print(table(mF_complete_sd_train$Potability))
print(table(mF_complete_sd_test$Potability))
h<-holdout(mF_Tukey_Tuk$Potability,ratio=2/3,mode="stratified")
mF_Tukey_Tuk_train<-mF_Tukey_Tuk[h$tr,]
mF_Tukey_Tuk_test<-mF_Tukey_Tuk[h$ts,]
print(table(mF_Tukey_Tuk_train$Potability))
print(table(mF_Tukey_Tuk_test$Potability))
h<-holdout(mF_sd_sd$Potability,ratio=2/3,mode="stratified")
mF_sd_sd_train<-mF_sd_sd[h$tr,]
mF_sd_sd_test<-mF_sd_sd[h$ts,]
print(table(mF_sd_sd_train$Potability))
print(table(mF_sd_sd_test$Potability))
h<-holdout(KNN_complete_Tuk$Potability,ratio=2/3,mode="stratified")
KNN_complete_Tuk_train<-KNN_complete_Tuk[h$tr,]
KNN_complete_Tuk_test<-KNN_complete_Tuk[h$ts,]
print(table(KNN_complete_Tuk_train$Potability))
print(table(KNN_complete_Tuk_test$Potability))
h<-holdout(KNN_complete_sd$Potability,ratio=2/3,mode="stratified")
KNN_complete_sd_train<-KNN_complete_sd[h$tr,]
KNN_complete_sd_test<-KNN_complete_sd[h$ts,]
print(table(KNN_complete_sd_train$Potability))
print(table(KNN_complete_sd_test$Potability))
h<-holdout(KNN_Tukey_Tuk$Potability,ratio=2/3,mode="stratified")
KNN_Tukey_Tuk_train<-KNN_Tukey_Tuk[h$tr,]
KNN_Tukey_Tuk_test<-KNN_Tukey_Tuk[h$ts,]
print(table(KNN_Tukey_Tuk_train$Potability))
print(table(KNN_Tukey_Tuk_test$Potability))
h<-holdout(KNN_sd_sd$Potability,ratio=2/3,mode="stratified")
KNN_sd_sd_train<-KNN_sd_sd[h$tr,]
KNN_sd_sd_test<-KNN_sd_sd[h$ts,]
print(table(KNN_sd_sd_train$Potability))
print(table(KNN_sd_sd_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(caret)
# Seleccionamos el método, Cross Validation y el número de folds, 5.
trctrl <- trainControl(method = "cv", number = 5)
# Lanzamos múltiples modelos con distintos valores para cada uno de los parámetros.
set.seed(100)
cl <- makePSOCKcluster(parallel::detectCores() - 1)
registerDoParallel(cl)
tune_grid <- expand.grid(nrounds=c(100,300,500),
max_depth = c(5, 10, 15),
eta = c(0.05, 0.2),
gamma = c(0.01, 1),
colsample_bytree = c(1),
subsample = c(1),
min_child_weight = c(1))
model_xgb_1 <- train(as.factor(Potability) ~., data = mF_complete_Tuk_train, method = "xgbTree",
trControl=trctrl,
tuneGrid = tune_grid,
tuneLength = 10)
stopCluster(cl)
# Resultados gráficos de los modelos, que nos permitirán elegir los mejores valores de los parámetros.
plot(model_xgb_1)
# Test y matriz de confusión
test_predict <- predict(model_xgb_1, mF_complete_Tuk_test)
confusionMatrix(test_predict, as.factor(mF_complete_Tuk_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(xgboost)
set.seed(100)
train.data = as.matrix(mF_complete_Tuk_train[, -10])
train.label = mF_complete_Tuk_train[, 10]
test.data = as.matrix(mF_complete_Tuk_test[, -10])
test.label = mF_complete_Tuk_test[, 10]
xgb.train = xgb.DMatrix(data=train.data,label=(train.label))
xgb.test = xgb.DMatrix(data=test.data,label=(test.label))
# Definición de los parámetros seleccionados
num_class = length(unique(mF_complete_Tuk_train$Potability))
params = list(
booster="gbtree",
eta = 0.05,
max_depth = 15,
gamma = 1,
subsample = 1,
colsample_bytree = 1,
objective="multi:softprob",
eval_metric="mlogloss",
num_class=num_class
)
# Entrenamiento del modelo
xgb.fit=xgb.train(
params=params,
data=xgb.train,
nrounds = 100
)
# Resultados
xgb.fit
# Predicción
xgb.pred = predict(xgb.fit,test.data,reshape=T)
xgb.pred = as.data.frame(xgb.pred)
colnames(xgb.pred) = unique(mF_complete_Tuk_train$Potability)
# Use the predicted label with the highest probability
xgb.pred$prediction = apply(xgb.pred,1,function(x) colnames(xgb.pred)[which.max(x)])
xgb.pred$label = unique(mF_complete_Tuk_train$Potability)[test.label+1]
# Calculate the final accuracy
result_xgb1 = sum(xgb.pred$prediction==xgb.pred$label)/nrow(xgb.pred)
print(paste("Final Accuracy =",sprintf("%1.2f%%", 100*result_xgb1)))
accuracy_data2<- data.frame("Nombre"="model_xgb_1", "accuracy"=result_xgb1)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Seleccionamos el método, Cross Validation y el número de folds, 5.
trctrl <- trainControl(method = "cv", number = 5)
# Lanzamos múltiples modelos con distintos valores para cada uno de los parámetros.
set.seed(100)
cl <- makePSOCKcluster(parallel::detectCores() - 1)
registerDoParallel(cl)
tune_grid <- expand.grid(nrounds=c(100,300,500),
max_depth = c(5, 10, 15),
eta = c(0.05, 0.2),
gamma = c(0.01, 1),
colsample_bytree = c(1),
subsample = c(1),
min_child_weight = c(1))
model_xgb_2 <- train(as.factor(Potability) ~., data = mF_complete_sd_train, method = "xgbTree",
trControl=trctrl,
tuneGrid = tune_grid,
tuneLength = 10)
stopCluster(cl)
# Resultados de los modelos.
plot(model_xgb_2)
# Test y matriz de confusión
test_predict <- predict(model_xgb_2, mF_complete_sd_test)
confusionMatrix(test_predict, as.factor(mF_complete_sd_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
train.data = as.matrix(mF_complete_sd_train[, -10])
train.label = mF_complete_sd_train[, 10]
test.data = as.matrix(mF_complete_sd_test[, -10])
test.label = mF_complete_sd_test[, 10]
xgb.train = xgb.DMatrix(data=train.data,label=(train.label))
xgb.test = xgb.DMatrix(data=test.data,label=(test.label))
# Definición de los parámetros seleccionados
num_class = length(unique(mF_complete_sd_train$Potability))
params = list(
booster="gbtree",
eta = 0.05,
max_depth = 15,
gamma = 0.01,
subsample = 1,
colsample_bytree = 1,
objective="multi:softprob",
eval_metric="mlogloss",
num_class=num_class
)
# Entrenamiento del modelo
xgb.fit=xgb.train(
params=params,
data=xgb.train,
nrounds = 100
)
# Resultados
xgb.fit
# Predicción
xgb.pred = predict(xgb.fit,test.data,reshape=T)
xgb.pred = as.data.frame(xgb.pred)
colnames(xgb.pred) = unique(mF_complete_sd_train$Potability)
# Use the predicted label with the highest probability
xgb.pred$prediction = apply(xgb.pred,1,function(x) colnames(xgb.pred)[which.max(x)])
xgb.pred$label = unique(mF_complete_sd_train$Potability)[test.label+1]
# Calculate the final accuracy
result_xgb2 = sum(xgb.pred$prediction==xgb.pred$label)/nrow(xgb.pred)
print(paste("Final Accuracy =",sprintf("%1.2f%%", 100*result_xgb2)))
temp <- data.frame("Nombre"="model_xgb_2", "accuracy"=c(result_xgb2))
accuracy_data2 <- rbind(accuracy_data2, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Seleccionamos el método, Cross Validation y el número de folds, 5.
trctrl <- trainControl(method = "cv", number = 5)
# Lanzamos múltiples modelos con distintos valores para cada uno de los parámetros.
set.seed(100)
cl <- makePSOCKcluster(parallel::detectCores() - 1)
registerDoParallel(cl)
tune_grid <- expand.grid(nrounds=c(100,300,500),
max_depth = c(5, 10, 15),
eta = c(0.05, 0.2),
gamma = c(0.01, 1),
colsample_bytree = c(1),
subsample = c(1),
min_child_weight = c(1))
model_xgb_3 <- train(as.factor(Potability) ~., data = mF_Tukey_Tuk_train, method = "xgbTree",
trControl=trctrl,
tuneGrid = tune_grid,
tuneLength = 10)
stopCluster(cl)
# Resultados de los modelos.
plot(model_xgb_3)
# Test y matriz de confusión
test_predict <- predict(model_xgb_3, mF_Tukey_Tuk_test)
confusionMatrix(test_predict, as.factor(mF_Tukey_Tuk_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
train.data = as.matrix(mF_Tukey_Tuk_train[, -10])
train.label = mF_Tukey_Tuk_train[, 10]
test.data = as.matrix(mF_Tukey_Tuk_test[, -10])
test.label = mF_Tukey_Tuk_test[, 10]
xgb.train = xgb.DMatrix(data=train.data,label=(train.label))
xgb.test = xgb.DMatrix(data=test.data,label=(test.label))
# Definición de los parámetros seleccionados
num_class = length(unique(mF_Tukey_Tuk_train$Potability))
params = list(
booster="gbtree",
eta = 0.05,
max_depth = 15,
gamma = 0.01,
subsample = 1,
colsample_bytree = 1,
objective="multi:softprob",
eval_metric="mlogloss",
num_class=num_class
)
# Entrenamiento del modelo
xgb.fit=xgb.train(
params=params,
data=xgb.train,
nrounds = 500
)
# Resultados
xgb.fit
# Predicción
xgb.pred = predict(xgb.fit,test.data,reshape=T)
xgb.pred = as.data.frame(xgb.pred)
colnames(xgb.pred) = unique(mF_Tukey_Tuk_train$Potability)
# Use the predicted label with the highest probability
xgb.pred$prediction = apply(xgb.pred,1,function(x) colnames(xgb.pred)[which.max(x)])
xgb.pred$label = unique(mF_Tukey_Tuk_train$Potability)[test.label+1]
# Calculate the final accuracy
result_xgb3 = sum(xgb.pred$prediction==xgb.pred$label)/nrow(xgb.pred)
print(paste("Final Accuracy =",sprintf("%1.2f%%", 100*result_xgb3)))
temp <- data.frame("Nombre"="model_xgb_3", "accuracy"=c(result_xgb3))
accuracy_data2 <- rbind(accuracy_data2, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Seleccionamos el método, Cross Validation y el número de folds, 5.
trctrl <- trainControl(method = "cv", number = 5)
# Lanzamos múltiples modelos con distintos valores para cada uno de los parámetros.
set.seed(100)
cl <- makePSOCKcluster(parallel::detectCores() - 1)
registerDoParallel(cl)
tune_grid <- expand.grid(nrounds=c(100,300,500),
max_depth = c(5, 10, 15),
eta = c(0.05, 0.2),
gamma = c(0.01, 1),
colsample_bytree = c(1),
subsample = c(1),
min_child_weight = c(1))
model_xgb_4 <- train(as.factor(Potability) ~., data = mF_sd_sd_train, method = "xgbTree",
trControl=trctrl,
tuneGrid = tune_grid,
tuneLength = 10)
stopCluster(cl)
# Resultados de los modelos.
plot(model_xgb_4)
# Test y matriz de confusión
test_predict <- predict(model_xgb_4, mF_sd_sd_test)
confusionMatrix(test_predict, as.factor(mF_sd_sd_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
train.data = as.matrix(mF_sd_sd_train[, -10])
train.label = mF_sd_sd_train[, 10]
test.data = as.matrix(mF_sd_sd_test[, -10])
test.label = mF_sd_sd_test[, 10]
xgb.train = xgb.DMatrix(data=train.data,label=(train.label))
xgb.test = xgb.DMatrix(data=test.data,label=(test.label))
# Definición de los parámetros seleccionados
num_class = length(unique(mF_sd_sd_train$Potability))
params = list(
booster="gbtree",
eta = 0.2,
max_depth = 15,
gamma = 1,
subsample = 1,
colsample_bytree = 1,
objective="multi:softprob",
eval_metric="mlogloss",
num_class=num_class
)
# Entrenamiento del modelo
xgb.fit=xgb.train(
params=params,
data=xgb.train,
nrounds = 500
)
# Resultados
xgb.fit
# Predicción
xgb.pred = predict(xgb.fit,test.data,reshape=T)
xgb.pred = as.data.frame(xgb.pred)
colnames(xgb.pred) = unique(mF_sd_sd_train$Potability)
# Use the predicted label with the highest probability
xgb.pred$prediction = apply(xgb.pred,1,function(x) colnames(xgb.pred)[which.max(x)])
xgb.pred$label = unique(mF_sd_sd_train$Potability)[test.label+1]
# Calculate the final accuracy
result_xgb4 = sum(xgb.pred$prediction==xgb.pred$label)/nrow(xgb.pred)
print(paste("Final Accuracy =",sprintf("%1.2f%%", 100*result_xgb4)))
temp <- data.frame("Nombre"="model_xgb_4", "accuracy"=c(result_xgb4))
accuracy_data2 <- rbind(accuracy_data2, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Seleccionamos el método, Cross Validation y el número de folds, 5.
trctrl <- trainControl(method = "cv", number = 5)
# Lanzamos múltiples modelos con distintos valores para cada uno de los parámetros.
set.seed(100)
cl <- makePSOCKcluster(parallel::detectCores() - 1)
registerDoParallel(cl)
tune_grid <- expand.grid(nrounds=c(100,300,500),
max_depth = c(5, 10, 15),
eta = c(0.05, 0.2),
gamma = c(0.01, 1),
colsample_bytree = c(1),
subsample = c(1),
min_child_weight = c(1))
model_xgb_5 <- train(as.factor(Potability) ~., data = KNN_complete_Tuk_train, method = "xgbTree",
trControl=trctrl,
tuneGrid = tune_grid,
tuneLength = 10)
stopCluster(cl)
# Resultados de los modelos.
plot(model_xgb_5)
# Test y matriz de confusión
test_predict <- predict(model_xgb_5, KNN_complete_Tuk_test)
confusionMatrix(test_predict, as.factor(KNN_complete_Tuk_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
train.data = as.matrix(KNN_complete_Tuk_train[, -10])
train.label = KNN_complete_Tuk_train[, 10]
test.data = as.matrix(KNN_complete_Tuk_test[, -10])
test.label = KNN_complete_Tuk_test[, 10]
xgb.train = xgb.DMatrix(data=train.data,label=(train.label))
xgb.test = xgb.DMatrix(data=test.data,label=(test.label))
# Definición de los parámetros seleccionados
num_class = length(unique(KNN_complete_Tuk_train$Potability))
params = list(
booster="gbtree",
eta = 0.05,
max_depth = 10,
gamma = 1,
subsample = 1,
colsample_bytree = 1,
objective="multi:softprob",
eval_metric="mlogloss",
num_class=num_class
)
# Entrenamiento del modelo
xgb.fit=xgb.train(
params=params,
data=xgb.train,
nrounds = 500
)
# Resultados
xgb.fit
# Predicción
xgb.pred = predict(xgb.fit,test.data,reshape=T)
xgb.pred = as.data.frame(xgb.pred)
colnames(xgb.pred) = unique(KNN_complete_Tuk_train$Potability)
# Use the predicted label with the highest probability
xgb.pred$prediction = apply(xgb.pred,1,function(x) colnames(xgb.pred)[which.max(x)])
xgb.pred$label = unique(KNN_complete_Tuk_train$Potability)[test.label+1]
# Calculate the final accuracy
result_xgb5 = sum(xgb.pred$prediction==xgb.pred$label)/nrow(xgb.pred)
print(paste("Final Accuracy =",sprintf("%1.2f%%", 100*result_xgb5)))
temp <- data.frame("Nombre"="model_xgb_5", "accuracy"=c(result_xgb5))
accuracy_data2 <- rbind(accuracy_data2, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Seleccionamos el método, Cross Validation y el número de folds, 5.
trctrl <- trainControl(method = "cv", number = 5)
# Lanzamos múltiples modelos con distintos valores para cada uno de los parámetros.
set.seed(100)
cl <- makePSOCKcluster(parallel::detectCores() - 1)
registerDoParallel(cl)
tune_grid <- expand.grid(nrounds=c(100,300,500),
max_depth = c(5, 10, 15),
eta = c(0.05, 0.2),
gamma = c(0.01, 1),
colsample_bytree = c(1),
subsample = c(1),
min_child_weight = c(1))
model_xgb_6 <- train(as.factor(Potability) ~., data = KNN_complete_sd_train, method = "xgbTree",
trControl=trctrl,
tuneGrid = tune_grid,
tuneLength = 10)
stopCluster(cl)
# Resultados de los modelos.
plot(model_xgb_6)
# Test y matriz de confusión
test_predict <- predict(model_xgb_6, KNN_complete_sd_test)
confusionMatrix(test_predict, as.factor(KNN_complete_sd_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
train.data = as.matrix(KNN_complete_sd_train[, -10])
train.label = KNN_complete_sd_train[, 10]
test.data = as.matrix(KNN_complete_sd_test[, -10])
test.label = KNN_complete_sd_test[, 10]
xgb.train = xgb.DMatrix(data=train.data,label=(train.label))
xgb.test = xgb.DMatrix(data=test.data,label=(test.label))
# Definición de los parámetros seleccionados
num_class = length(unique(KNN_complete_sd_train$Potability))
params = list(
booster="gbtree",
eta = 0.05,
max_depth = 10,
gamma = 0.01,
subsample = 1,
colsample_bytree = 1,
objective="multi:softprob",
eval_metric="mlogloss",
num_class=num_class
)
# Entrenamiento del modelo
xgb.fit=xgb.train(
params=params,
data=xgb.train,
nrounds = 500
)
# Resultados
xgb.fit
# Predicción
xgb.pred = predict(xgb.fit,test.data,reshape=T)
xgb.pred = as.data.frame(xgb.pred)
colnames(xgb.pred) = unique(KNN_complete_sd_train$Potability)
# Use the predicted label with the highest probability
xgb.pred$prediction = apply(xgb.pred,1,function(x) colnames(xgb.pred)[which.max(x)])
xgb.pred$label = unique(KNN_complete_sd_train$Potability)[test.label+1]
# Calculate the final accuracy
result_xgb6 = sum(xgb.pred$prediction==xgb.pred$label)/nrow(xgb.pred)
print(paste("Final Accuracy =",sprintf("%1.2f%%", 100*result_xgb6)))
temp <- data.frame("Nombre"="model_xgb_6", "accuracy"=c(result_xgb6))
accuracy_data2 <- rbind(accuracy_data2, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Seleccionamos el método, Cross Validation y el número de folds, 5.
trctrl <- trainControl(method = "cv", number = 5)
# Lanzamos múltiples modelos con distintos valores para cada uno de los parámetros.
set.seed(100)
cl <- makePSOCKcluster(parallel::detectCores() - 1)
registerDoParallel(cl)
tune_grid <- expand.grid(nrounds=c(100,300,500),
max_depth = c(5, 10, 15),
eta = c(0.05, 0.2),
gamma = c(0.01, 1),
colsample_bytree = c(1),
subsample = c(1),
min_child_weight = c(1))
model_xgb_7 <- train(as.factor(Potability) ~., data = KNN_Tukey_Tuk_train, method = "xgbTree",
trControl=trctrl,
tuneGrid = tune_grid,
tuneLength = 10)
stopCluster(cl)
# Resultados de los modelos.
plot(model_xgb_7)
# Test y matriz de confusión
test_predict <- predict(model_xgb_7, KNN_Tukey_Tuk_test)
confusionMatrix(test_predict, as.factor(KNN_Tukey_Tuk_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
train.data = as.matrix(KNN_Tukey_Tuk_train[, -10])
train.label = KNN_Tukey_Tuk_train[, 10]
test.data = as.matrix(KNN_Tukey_Tuk_test[, -10])
test.label = KNN_Tukey_Tuk_test[, 10]
xgb.train = xgb.DMatrix(data=train.data,label=(train.label))
xgb.test = xgb.DMatrix(data=test.data,label=(test.label))
# Definición de los parámetros seleccionados
num_class = length(unique(KNN_Tukey_Tuk_train$Potability))
params = list(
booster="gbtree",
eta = 0.05,
max_depth = 15,
gamma = 0.01,
subsample = 1,
colsample_bytree = 1,
objective="multi:softprob",
eval_metric="mlogloss",
num_class=num_class
)
# Entrenamiento del modelo
xgb.fit=xgb.train(
params=params,
data=xgb.train,
nrounds = 500
)
# Resultados
xgb.fit
# Predicción
xgb.pred = predict(xgb.fit,test.data,reshape=T)
xgb.pred = as.data.frame(xgb.pred)
colnames(xgb.pred) = unique(KNN_Tukey_Tuk_train$Potability)
# Use the predicted label with the highest probability
xgb.pred$prediction = apply(xgb.pred,1,function(x) colnames(xgb.pred)[which.max(x)])
xgb.pred$label = unique(KNN_Tukey_Tuk_train$Potability)[test.label+1]
# Calculate the final accuracy
result_xgb7 = sum(xgb.pred$prediction==xgb.pred$label)/nrow(xgb.pred)
print(paste("Final Accuracy =",sprintf("%1.2f%%", 100*result_xgb7)))
temp <- data.frame("Nombre"="model_xgb_7", "accuracy"=c(result_xgb7))
accuracy_data2 <- rbind(accuracy_data2, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
# Seleccionamos el método, Cross Validation y el número de folds, 5.
trctrl <- trainControl(method = "cv", number = 5)
# Lanzamos múltiples modelos con distintos valores para cada uno de los parámetros.
set.seed(100)
cl <- makePSOCKcluster(parallel::detectCores() - 1)
registerDoParallel(cl)
tune_grid <- expand.grid(nrounds=c(100,300,500),
max_depth = c(5, 10, 15),
eta = c(0.05, 0.2),
gamma = c(0.01, 1),
colsample_bytree = c(1),
subsample = c(1),
min_child_weight = c(1))
model_xgb_8 <- train(as.factor(Potability) ~., data = KNN_sd_sd_train, method = "xgbTree",
trControl=trctrl,
tuneGrid = tune_grid,
tuneLength = 10)
stopCluster(cl)
# Resultados de los modelos.
plot(model_xgb_8)
# Test y matriz de confusión
test_predict <- predict(model_xgb_8, KNN_sd_sd_test)
confusionMatrix(test_predict, as.factor(KNN_sd_sd_test$Potability))
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
set.seed(100)
train.data = as.matrix(KNN_sd_sd_train[, -10])
train.label = KNN_sd_sd_train[, 10]
test.data = as.matrix(KNN_sd_sd_test[, -10])
test.label = KNN_sd_sd_test[, 10]
xgb.train = xgb.DMatrix(data=train.data,label=(train.label))
xgb.test = xgb.DMatrix(data=test.data,label=(test.label))
# Definición de los parámetros seleccionados
num_class = length(unique(KNN_sd_sd_train$Potability))
params = list(
booster="gbtree",
eta = 0.05,
max_depth = 15,
gamma = 1,
subsample = 1,
colsample_bytree = 1,
objective="multi:softprob",
eval_metric="mlogloss",
num_class=num_class
)
# Entrenamiento del modelo
xgb.fit=xgb.train(
params=params,
data=xgb.train,
nrounds = 100
)
# Resultados
xgb.fit
# Predicción
xgb.pred = predict(xgb.fit,test.data,reshape=T)
xgb.pred = as.data.frame(xgb.pred)
colnames(xgb.pred) = unique(KNN_sd_sd_train$Potability)
# Use the predicted label with the highest probability
xgb.pred$prediction = apply(xgb.pred,1,function(x) colnames(xgb.pred)[which.max(x)])
xgb.pred$label = unique(KNN_sd_sd_train$Potability)[test.label+1]
# Calculate the final accuracy
result_xgb8 = sum(xgb.pred$prediction==xgb.pred$label)/nrow(xgb.pred)
print(paste("Final Accuracy =",sprintf("%1.2f%%", 100*result_xgb8)))
temp <- data.frame("Nombre"="model_xgb_8", "accuracy"=c(result_xgb8))
accuracy_data2 <- rbind(accuracy_data2, temp)
## ----fig.width=10-------------------------------------------------------------------------------------------------------------------------------------------
ggplot(accuracy_data2, aes(x=as.factor(Nombre), y=accuracy, fill=as.factor(Nombre))) +
geom_bar(stat = "identity") + geom_text(aes(label=round(accuracy*100, digits = 2)), position=position_dodge(width=0.9), vjust=-0.25)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(dplyr)
train.data <- mF_complete_Tuk_train
test.data <- mF_complete_Tuk_test
# Creamos el modelo
model <- glm( Potability ~., data = train.data, family = binomial)
# Mostramos el resumen de los resultados del modelo
summary(model)
# Creamos las predicciones
probabilities <- model %>% predict(test.data, type = "response")
predicted.classes <- ifelse(probabilities > 0.5, 1, 0)
# Observamos la exactitud del modelo
result <- mean(predicted.classes == test.data$Potability)
result
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
accuracy_data_3<- data.frame("Nombre"="model_1_reg", "accuracy"=result)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(dplyr)
train.data <- mF_complete_sd_train
test.data <- mF_complete_sd_test
# Creamos el modelo
model <- glm( Potability ~., data = train.data, family = binomial)
# Mostramos el resumen de los resultados del modelo
summary(model)
# Creamos las predicciones
probabilities <- model %>% predict(test.data, type = "response")
predicted.classes <- ifelse(probabilities > 0.5, 1, 0)
# Observamos la exactitud del modelo
result <- mean(predicted.classes == test.data$Potability)
result
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
temp <- data.frame("Nombre"="model_2_reg", "accuracy"=c(result))
accuracy_data_3 <- rbind(accuracy_data_3, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(dplyr)
train.data <- mF_Tukey_Tuk_train
test.data <- mF_Tukey_Tuk_test
# Creamos el modelo
model <- glm( Potability ~., data = train.data, family = binomial)
# Mostramos el resumen de los resultados del modelo
summary(model)
# Creamos las predicciones
probabilities <- model %>% predict(test.data, type = "response")
predicted.classes <- ifelse(probabilities > 0.5, 1, 0)
# Observamos la exactitud del modelo
result <- mean(predicted.classes == test.data$Potability)
result
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
temp <- data.frame("Nombre"="model_3_reg", "accuracy"=c(result))
accuracy_data_3 <- rbind(accuracy_data_3, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(dplyr)
train.data <- mF_sd_sd_train
test.data <- mF_sd_sd_test
# Creamos el modelo
model <- glm( Potability ~., data = train.data, family = binomial)
# Mostramos el resumen de los resultados del modelo
summary(model)
# Creamos las predicciones
probabilities <- model %>% predict(test.data, type = "response")
predicted.classes <- ifelse(probabilities > 0.5, 1, 0)
# Observamos la exactitud del modelo
result <- mean(predicted.classes == test.data$Potability)
result
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
temp <- data.frame("Nombre"="model_4_reg", "accuracy"=c(result))
accuracy_data_3 <- rbind(accuracy_data_3, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(dplyr)
train.data <- KNN_complete_Tuk_train
test.data <- KNN_complete_Tuk_test
# Creamos el modelo
model <- glm( Potability ~., data = train.data, family = binomial)
# Mostramos el resumen de los resultados del modelo
summary(model)
# Creamos las predicciones
probabilities <- model %>% predict(test.data, type = "response")
predicted.classes <- ifelse(probabilities > 0.5, 1, 0)
# Observamos la exactitud del modelo
result <- mean(predicted.classes == test.data$Potability)
result
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
temp <- data.frame("Nombre"="model_5_reg", "accuracy"=c(result))
accuracy_data_3 <- rbind(accuracy_data_3, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(dplyr)
train.data <- KNN_complete_sd_train
test.data <- KNN_complete_sd_test
# Creamos el modelo
model <- glm( Potability ~., data = train.data, family = binomial)
# Mostramos el resumen de los resultados del modelo
summary(model)
# Creamos las predicciones
probabilities <- model %>% predict(test.data, type = "response")
predicted.classes <- ifelse(probabilities > 0.5, 1, 0)
# Observamos la exactitud del modelo
result <- mean(predicted.classes == test.data$Potability)
result
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
temp <- data.frame("Nombre"="model_6_reg", "accuracy"=c(result))
accuracy_data_3 <- rbind(accuracy_data_3, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(dplyr)
train.data <- KNN_Tukey_Tuk_train
test.data <- KNN_Tukey_Tuk_test
# Creamos el modelo
model <- glm( Potability ~., data = train.data, family = binomial)
# Mostramos el resumen de los resultados del modelo
summary(model)
# Creamos las predicciones
probabilities <- model %>% predict(test.data, type = "response")
predicted.classes <- ifelse(probabilities > 0.5, 1, 0)
# Observamos la exactitud del modelo
result <- mean(predicted.classes == test.data$Potability)
result
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
temp <- data.frame("Nombre"="model_7_reg", "accuracy"=c(result))
accuracy_data_3 <- rbind(accuracy_data_3, temp)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
library(dplyr)
train.data <- KNN_sd_sd_train
test.data <- KNN_sd_sd_test
# Creamos el modelo
model <- glm( Potability ~., data = train.data, family = binomial)
# Mostramos el resumen de los resultados del modelo
summary(model)
# Creamos las predicciones
probabilities <- model %>% predict(test.data, type = "response")
predicted.classes <- ifelse(probabilities > 0.5, 1, 0)
# Observamos la exactitud del modelo
result <- mean(predicted.classes == test.data$Potability)
result
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
temp <- data.frame("Nombre"="model_8_reg", "accuracy"=c(result))
accuracy_data_3 <- rbind(accuracy_data_3, temp)
## ----fig.width=10-------------------------------------------------------------------------------------------------------------------------------------------
ggplot(accuracy_data_3, aes(x=as.factor(Nombre), y=accuracy, fill=as.factor(Nombre))) +
geom_bar(stat = "identity") + geom_text(aes(label=round(accuracy*100, digits = 2)), position=position_dodge(width=0.9), vjust=-0.25)
## ----fig.width=10-------------------------------------------------------------------------------------------------------------------------------------------
ggplot(accuracy_data, aes(x=as.factor(Nombre), y=accuracy, fill=as.factor(Nombre))) +
geom_bar(stat = "identity") + geom_text(aes(label=round(accuracy*100, digits = 2)), position=position_dodge(width=0.9), vjust=-0.25)
## ----fig.width=10-------------------------------------------------------------------------------------------------------------------------------------------
ggplot(accuracy_data2, aes(x=as.factor(Nombre), y=accuracy, fill=as.factor(Nombre))) +
geom_bar(stat = "identity") + geom_text(aes(label=round(accuracy*100, digits = 2)), position=position_dodge(width=0.9), vjust=-0.25)
## ----fig.width=10-------------------------------------------------------------------------------------------------------------------------------------------
ggplot(accuracy_data_3, aes(x=as.factor(Nombre), y=accuracy, fill=as.factor(Nombre))) +
geom_bar(stat = "identity") + geom_text(aes(label=round(accuracy*100, digits = 2)), position=position_dodge(width=0.9), vjust=-0.25)
## -----------------------------------------------------------------------------------------------------------------------------------------------------------
accuracy_data_final <- data.frame("Nombre"= c(accuracy_data[9,1], accuracy_data2[6,1], accuracy_data_3[1,1]),
"accuracy"= c(accuracy_data[9,2], accuracy_data2[6,2], accuracy_data_3[1,2]))
ggplot(accuracy_data_final, aes(x=as.factor(Nombre), y=accuracy, fill=as.factor(Nombre))) +
geom_bar(stat = "identity") + geom_text(aes(label=round(accuracy*100, digits = 2)), position=position_dodge(width=0.9), vjust=-0.25)
|
5238223c3a504bea410e1e2b4638206bbbd7f22e | 6cba97c8bcfb3071adf3ecbcdeb83f33c300a564 | /man/ease_swanping.Rd | 0e89b692567e25a153986e50ec2cd6776fc2aff5 | [] | no_license | GreenwoodLab/hidetify-1 | 33963a85cd76616b121e9a73efa9b1225e825c1d | f56c1657e858d85f486d8b9d5b80d8e193f3dccd | refs/heads/master | 2023-07-14T18:27:40.592036 | 2021-08-16T15:33:08 | 2021-08-16T15:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,033 | rd | ease_swanping.Rd | \name{ease_swanping}
\alias{ease_swanping}
\title{ Compute the min of the min of a sequence of asymmetric influence measure}
\description{
This function is part of the algorithm which identify multiple influential observations in high dimension linear regression.It computes the min of the min of the asymmetric influence measure to ease the swanping effect}
\usage{
ease_swamping(x,y,xquant, yquant, inv_rob_sdx, rob_sdy,number_subset,size_subset,est_clean_set,asymvec,ep=0.1,alpha)
}
\arguments{
\item{x}{ Matrix with the values of the predictors.}
\item{y}{ Numertic vector of the response variable. }
\item{xquant }{ Matrix with the quantiles of the predictors. }
\item{yquant }{ Numertic vector of the quantiles of the response variable.}
\item{inv_rob_sdx}{ Numertic vector of the inverse of the median absolute deviation of the predictors. }
\item{rob_sdy}{ Median absolute deviation of the response variable. }
\item{number_subset}{ Number of random subsets.}
\item{size_subset}{ Size of the random subsets. The default is half of the initial sample size.}
\item{est_clean_set}{ The subjct id of the estimated clean subset. The default is the initial sample.}
\item{asymvec}{ Numertic vector of the asymmetric values.}
\item{ep}{ Threshold value to ensure that the estimated clean set is not empty. The default value is 0.1.}
\item{alpha}{Significance level. }
}
\value{
A index vector identifying the estimated non-influential observations using a conservative approach
}
\author{
Amadou Barry \email{barryhafia@gmail.com}}
\examples{
## Simulate a dataset where the first 10 observations are influentials
require("MASS")
asymvec <- c(0.25,0.5,0.75)
beta_param <- c(3,1.5,0,0,2,rep(0,1000-5))
gama_param <- c(0,0,1,1,0,rep(1,1000-5))
# Covariance matrice for the predictors distribution
sigmain <- diag(rep(1,1000))
for (i in 1:1000)
{
for (j in i:1000)
{
sigmain[i,j] <- 0.5^(abs(j-i))
sigmain[j,i] <- sigmain[i,j]
}
}
set.seed(13)
x <- mvrnorm(100, rep(0, 1000), sigmain)
error_var <- rnorm(100)
y <- x %*% beta_param + error_var
# Generate influential observations
youtlier = y
youtlier[1:10] <- x[1:10,]%*%(beta_param + 1.2*gama_param) + error_var[1:10]
xquant <- apply(x,2,quantile,asymvec)
yquant <- quantile(y,asymvec)
inv_rob_sdx <- 1/apply(x,2,mad)
rob_sdy = mad(y)
number_subset = 5
size_subset = 100/2
est_clean_set = 1:100
alpha = 0.05
est_clean_set_ease_swamping = ease_swamping(x,youtlier,xquant, yquant, inv_rob_sdx, rob_sdy,number_subset,size_subset,est_clean_set,asymvec,ep=0.1,alpha)
}
\references{
Barry, A., Bhagwat, N., Misic, B., Poline, J.-B., and Greenwood, C. M. T. (2020). \emph{Asymmetric
influence measure for high dimensional regression}. Communications in Statistics - Theory and
Methods.
Barry, A., Bhagwat, N., Misic, B., Poline, J.-B., and Greenwood, C. M. T. (2021). \emph{An algorithm-based multiple detection influence measure for high dimensional regression using expectile}.
arXiv: 2105.12286 [stat]. arXiv: 2105.12286.
}
|
71dbb4463ae84d331ac61b85f9bfedd1ac792e02 | 671731b8b81379fd2f4617c309ba989d6aa5489d | /man/radio.Rd | a4c6579140f80ce030d504d017f42d74f3e7c077 | [
"MIT"
] | permissive | rpodcast/truelle | 959b9373633565b4a9a0f4eed034b5e3f7394452 | 3e4b797779d91fe2795f1f7339ff1157f748289c | refs/heads/main | 2023-05-31T05:23:40.995738 | 2021-07-09T10:35:44 | 2021-07-09T10:35:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 476 | rd | radio.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_helpers.R
\name{f7Radio}
\alias{f7Radio}
\title{Modified Framework7 radio input}
\usage{
f7Radio(inputId, label, choices = NULL, selected = NULL)
}
\arguments{
\item{inputId}{Radio input id.}
\item{label}{Radio label}
\item{choices}{List of choices. Must be a nested list.}
\item{selected}{Selected element. NULL by default.}
}
\description{
\code{f7Radio} creates a radio button input.
}
|
00c28c0b9106dce5b88560077ae6436b2b56af59 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /chickn/man/COMPR.Rd | 564e44fe6803fb2a48fbc282ec8f60bdd1282bbe | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,160 | rd | COMPR.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/COMPR.R
\name{COMPR}
\alias{COMPR}
\title{Compressive Orthogonal Matching Pursuit with Replacement}
\usage{
COMPR(
Data,
ind.col = 1:ncol(Data),
K,
Frequencies,
lower_b,
upper_b,
SK_Data,
maxIter = 300,
HardThreshold = TRUE,
options = list(tol_centroid = 1e-08, nIterCentroid = 1500, min_weight = 0, max_weight
= Inf, nIterLast = 1000, tol_global = 1e-12)
)
}
\arguments{
\item{Data}{A Filebacked Big Matrix n x N, data vectors are stored in the matrix columns.}
\item{ind.col}{Column indeces, which indicate which data vectors are considered for clustering. By default the entire \code{Data} matrix.}
\item{K}{Number of clusters.}
\item{Frequencies}{A frequency matrix m x n with frequency vectors in rows.}
\item{lower_b}{A vector of the lower boundary of data.}
\item{upper_b}{A vector of the upper boundary.}
\item{SK_Data}{Data sketch vector of the length 2m. It can be computed using \code{\link{Sketch}}.}
\item{maxIter}{Maximum number of iterations in the global optimization with respect to cluster centroid vectors and their weights. Default is 300.}
\item{HardThreshold}{logical that indicates whether to perform the replacement. Default is TRUE.}
\item{options}{List of optimization parameters:
\itemize{
\item \code{tol_centroid} is a tolerance value for the centroid optimization. Default is 1e-8.
\item \code{nIterCentroid} is a maximum number of iterations in the centroid optimization (default is 1500).
\item \code{min_weight} is a lower bound for centroid weights (default is 0).
\item \code{max_weight} is an upper bound for centroids weights (default is Inf)
\item \code{nIterLast} is a number of iteration in the global optimization at the last algorithm iteration. Default is 1000.
\item \code{tol_global} is a tolerance value for the global optimization. Default is 1e-12.
}}
}
\value{
A matrix n x K with cluster centroid vectors in columns.
}
\description{
An implementation of the Compressive Orthogonal Matching Pursuit with Replacement algorithm
}
\details{
COMPR is an iterative greedy method, which alternates between expanding
the cluster centroid set \eqn{C} with a new element \eqn{c_i}, whose sketch is the most correlated to the residue and
the global minimization with respect to cluster centroids \eqn{c_1, \dots, c_K} and their weights \eqn{w_1, \dots, w_K}.
It clusters the data collection into K groups
by minimizing the difference between the compressed data version (data sketch) and
a linear combination of cluster centroid sketches, \emph{i.e.} \eqn{\|Sk(Data) - \sum_{i=1}^K w_i \cdot Sk(c_i)\|}.
}
\note{
This method is also referred to as Compressive K-means and it has been published in
\insertRef{DBLP:journals/corr/KerivenTTG16}{chickn}.
}
\examples{
X = matrix(rnorm(1e5), ncol=1000, nrow = 100)
lb = apply(X, 1, min)
ub = apply(X, 1, max)
X_FBM = bigstatsr::FBM(init = X, ncol=1000, nrow = 100)
out = GenerateFrequencies(Data = X_FBM, m = 20, N0 = ncol(X_FBM))
SK = Sketch(Data = X_FBM, W = out$W)
C <- COMPR(Data = X_FBM, K = 2, Frequencies = out$W, lower_b = lb, upper_b = ub, SK_Data = SK)
}
|
7cedc3364738d39cf7ecad6b9cb98d4e48507f08 | 51bdc2a36490541ae18ccfb146b428a26fc9e1ea | /data/nutrients-data.R | 1a90555cf0a843c3971e0ebff60186af02cf79ce | [] | no_license | ssmufer/nutritionR | fa0301831e2300f1d721ba368cb113f6fac9f760 | 6312a18273b483a9590aea3db7514f5a7d90ce3f | refs/heads/master | 2023-04-15T11:05:35.163095 | 2019-09-22T13:09:51 | 2019-09-22T13:09:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 592 | r | nutrients-data.R | #' Nutrients list for use in `nutrient_single()``
#'
#' Use this list to convert between plain language and attributes
#'
#'
#' @docType data
#' @usage data(nutrients)
#' @format An object of class \code{"list"}
#' @keywords datasets
#'
#' @source \href{https://docs.google.com/document/d/1_q-K-ObMTZvO0qUEAxROrN3bwMujwAN25sLHwJzliK0/edit#}{Nutritionix API v2 - Documentation}
#' @source \href{https://docs.google.com/spreadsheets/d/14ssR3_vFYrVAidDLJoio07guZM80SMR5nxdGpAX-1-A/edit#gid=0}{Nutritionix API v2 - Full Nutrient USDA Field Mapping}
#'
#' @examples
#' data(nutrients)
"nutrients"
|
a1f25ddee3fb87b72d27b5e33cb0d29e9eb571f4 | c30ee92ad91f1c7d94ff9ae0e61e73f12455edae | /R/catn.R | 0475263f93ff9f93c5ccf116fd9dbc4283419af5 | [
"MIT"
] | permissive | nelsonroque/RM2C2_dev | 41ee60ca597cdd8e432f72a4d919a47996c1b414 | 56e7a1388321da587e7c0333e440585bd4301186 | refs/heads/master | 2021-06-24T03:28:56.920524 | 2021-05-13T15:22:19 | 2021-05-13T15:22:19 | 227,636,776 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 110 | r | catn.R | #' RM2C2dev
#' @name catn
#' @export
catn <- function(x, n=1) {
cat(x)
for(i in 1:n) {
cat("\n")
}
} |
b99fe8441dc80385f784058fbe672cff2510ec51 | a4139960f74b1012fa130c4bb604c1bb09882059 | /script.R | 08261b25d0653f1921bcfc1a40c5629468ff1dc5 | [] | no_license | yanellynobrega/AprendizajeNoSupervisado | f88b6a56a47a6baab077a7cf5b1a4c3dbaebe579 | 0c42e27a5acb90d359c63e58bcdce9a3a6178584 | refs/heads/master | 2021-01-22T17:17:49.441120 | 2016-04-11T04:25:41 | 2016-04-11T04:25:41 | 55,326,582 | 0 | 0 | null | 2016-04-03T02:50:50 | 2016-04-03T02:50:49 | null | UTF-8 | R | false | false | 15,055 | r | script.R | install.packages('scatterplot3d')
library(scatterplot3d)
################# a.csv ############################
data_a = read.csv('a.csv', header = F);
colnames(data_a) <- c("X","Y","CLASS");
data_a$CLASS <- data_a$CLASS + 1;
data_a <- data_a[order(data_a$CLASS), ]
#################################################
############ kmedias a.csv #########################
km_a <- kmeans(data_a[,1:2], 3)
plot(data_a$X, data_a$Y, col = km_a$cluster)
points(km_a$centers[, c("X","Y")], col = 1:3, pch = 19, cex = 2)
MatrizConfusionK_a <- table(true = data_a$CLASS, predicted = km_a$cluster)
MatrizConfusionK_a
ka = 0;
for (i in 1:ncol(MatrizConfusionK_a)){
ka = ka + MatrizConfusionK_a[i,i]
}
aciertos_ka <- (ka / nrow(data_a))*100
aciertos_ka
####################################################
############ Cluster jerarquicos (a.csv) ###########
data_a1 = data_a
data_a1$CLASS <- NULL
data_a1= as.matrix(data_a1)
distancia = dist(data_a1)
plot(distancia)
####################################################
############single method (a.csv) ##################
cluster_as <- hclust(distancia, method="single")
corte_as <- cutree(cluster_as, k=3)
plot(x = data_a[,1], y <- data_a[,2],col = corte_as)
MatrizConfusion_as <- table(true = data_a$CLASS, predicted = corte_as)
MatrizConfusion_as
as = 0
for (i in 1:ncol(MatrizConfusion_as)){
as = as + MatrizConfusion_as[i,i]
}
aciertos_as <- (as / nrow(data_a))*100
aciertos_as
######################################################
############complete method (a.csv) ##################
cluster_ac <- hclust(distancia, method="complete")
corte_ac <- cutree(cluster_ac, k=3)
plot(x = data_a[,1], y <- data_a[,2],col = corte_ac)
MatrizConfusion_ac <- table(true = data_a$CLASS, predicted = corte_ac)
MatrizConfusion_ac
ac = 0
for (i in 1:ncol(MatrizConfusion_ac)){
ac = ac + MatrizConfusion_ac[i,i]
}
aciertos_ac <- (ac / nrow(data_a))*100
####################################################
############ average method (a.csv) ##################
cluster_aa <- hclust(distancia, method="average")
corte_aa <- cutree(cluster_aa, k=3)
plot(x = data_a[,1], y <- data_a[,2],col = corte_aa)
MatrizConfusion_aa <- table(true = data_a$CLASS, predicted = corte_aa)
MatrizConfusion_aa
aa = 0
for (i in 1:ncol(MatrizConfusion_aa)){
aa = aa + MatrizConfusion_aa[i,i]
}
aciertos_aa <- (aa / nrow(data_a))*100
aciertos_aa
###################################################
########### Mejor modelo #####################
#############################################
######a_big.csv
data_a_big = read.csv('a_big.csv', header = F);
colnames(data_a_big) <- c("X","Y","CLASS");
data_a_big$CLASS <- data_a_big$CLASS + 1;
plot(data_a_big$X, data_a_big$Y);
data_a_big <- data_a_big[order(data_a_big$CLASS), ]
k <- kmeans(data_a_big[,1:2], 3)
plot(data_a_big$X, data_a_big$Y, col = k$cluster)
points(k$centers[, c("X","Y")], col = 1:3, pch = 19, cex = 2)
MatrixConfusionk_a_big <- table(data_a_big$CLASS,k$cluster)
MatrixConfusionk_a_big
################ moon.csv ########################
data_moon = read.csv('moon.csv', header = F)
colnames(data_moon) <- c("X","Y","CLASS");
plot(data_moon$X, data_moon$Y);
data_moon$CLASS <- data_moon$CLASS + 1;
##############################################
############ k medias moon.csv #########################
km_moon <- kmeans(data_moon[,1:2], 2)
plot(data_moon$X, data_moon$Y, col = km_moon$cluster)
points(km_moon$centers[, c("X","Y")], col = 1:2, pch = 19, cex = 2)
MatrizConfusionK_moon <- table(true = data_moon$CLASS, predicted = km_moon$cluster)
MatrizConfusionK_moon
km = 0;
for (i in 1:ncol(MatrizConfusionK_moon)){
km = km + MatrizConfusionK_moon[i,i]
}
aciertos_km <- (km / nrow(data_moon))*100
aciertos_km
####################################################
############ Cluster jerarquicos (moon.csv) ###########
data_moon1 = data_moon
data_moon1$CLASS <- NULL
data_moon1= as.matrix(data_moon1)
distancia = dist(data_moon1)
####################################################
############single method (moon.csv) ##################
cluster_ms <- hclust(distancia, method="single")
corte_ms <- cutree(cluster_ms, k=2)
plot(x = data_moon[,1], y <- data_moon[,2],col = corte_ms)
MatrizConfusion_ms <- table(true = data_moon$CLASS, predicted = corte_ms)
MatrizConfusion_ms
ms = 0
for (i in 1:ncol(MatrizConfusion_ms)){
ms = ms + MatrizConfusion_ms[i,i]
}
aciertos_ms <- (ms / nrow(data_moon))*100
aciertos_ms
######################################################
############complete method (moon.csv) ##################
cluster_mc <- hclust(distancia, method="complete")
corte_mc <- cutree(cluster_mc, k=2)
plot(x = data_moon[,1], y <- data_moon[,2],col = corte_mc)
MatrizConfusion_mc <- table(true = data_moon$CLASS, predicted = corte_mc)
MatrizConfusion_mc
mc = 0
for (i in 1:ncol(MatrizConfusion_mc)){
mc = mc + MatrizConfusion_mc[i,i]
}
aciertos_mc <- (mc / nrow(data_moon))*100
aciertos_mc
####################################################
############ average method (a.csv) ##################
cluster_ma <- hclust(distancia, method="average")
corte_ma <- cutree(cluster_ma, k=2)
plot(x = data_moon[,1], y <- data_moon[,2],col = corte_ma)
MatrizConfusion_ma <- table(true = data_moon$CLASS, predicted = corte_ma)
MatrizConfusion_ma
ma = 0
for (i in 1:ncol(MatrizConfusion_ma)){
ma = ma + MatrizConfusion_ma[i,i]
}
aciertos_ma <- (ma / nrow(data_moon))*100
aciertos_ma
###################################################
######good_luck.csv#################################
data_good_luck = read.csv('good_luck.csv', header = F)
data_good_luck$V11 <- data_good_luck$V11 + 1
##################################################
########## k means good_luck.csv #################
km_gl <- kmeans(data_good_luck[,1:10], 2)
plot(data_good_luck[,1:10], col = km_gl$cluster)
MatrizConfusionK_gl <- table(true = data_good_luck$V11, predicted = km_gl$cluster)
MatrizConfusionK_gl
kgl = 0;
for (i in 1:ncol(MatrizConfusionK_moon)){
kgl = kgl + MatrizConfusionK_moon[i,i]
}
aciertos_kgl <- (kgl / nrow(data_moon))*100
aciertos_kgl
##################################################
############ Cluster jerarquicos (good_luck.csv) ###########
data_good_luck1 = data_good_luck
data_good_luck1$CLASS <- NULL
data_good_luck1= as.matrix(data_good_luck1)
distancia = dist(data_good_luck1)
####################################################
############single method (good_luck.csv) ##################
cluster_gls <- hclust(distancia, method="single")
corte_gls <- cutree(cluster_gls, k=2)
plot(x = data_good_luck[,1], y <- data_good_luck[,2],col = corte_gls)
MatrizConfusion_gls <- table(true = data_good_luck$V11, predicted = corte_gls)
MatrizConfusion_gls
gls = 0
for (i in 1:ncol(MatrizConfusion_gls)){
gls = gls + MatrizConfusion_gls[i,i]
}
aciertos_gls <- (gls / nrow(data_moon))*100
aciertos_gls
######################################################
############complete method (good_luck.csv) ##################
cluster_glc <- hclust(distancia, method="complete")
corte_glc <- cutree(cluster_glc, k=2)
plot(x = data_good_luck[,1], y <- data_good_luck[,2],col = corte_glc)
MatrizConfusion_glc <- table(true = data_good_luck$V11, predicted = corte_glc)
MatrizConfusion_glc
glc = 0
for (i in 1:ncol(MatrizConfusion_glc)){
glc = glc + MatrizConfusion_glc[i,i]
}
aciertos_glc <- (glc / nrow(data_moon))*100
aciertos_glc
####################################################
############ average method (good_luck.csv) ##################
cluster_gla <- hclust(distancia, method="average")
corte_gla <- cutree(cluster_gla, k=2)
plot(x = data_good_luck[,1], y <- data_good_luck[,2],col = corte_gla)
MatrizConfusion_gla <- table(true = data_good_luck$V11, predicted = corte_gla)
MatrizConfusion_gla
gla = 0
for (i in 1:ncol(MatrizConfusion_gla)){
gla = gla + MatrizConfusion_gla[i,i]
}
aciertos_gla <- (gla / nrow(data_good_luck))*100
aciertos_gla
######################################################
#############h.csv#####################################
data_h = read.csv('h.csv', header = F)
data_h$V5 = floor(data_h$V4)-3
scatterplot3d(data_h$V1, data_h$V2, data_h$V3, color = data_h$V5)
#########################################################
############ kmedias h.csv #########################
km_h <- kmeans(data_h[,1:3], 11)
scatterplot3d(data_h$V1, data_h$V2, data_h$V3, color = km_h$cluster)
points(km_h$centers[, c("X","Y")], col = 1:3, pch = 19, cex = 2)
MatrizConfusionK_h <- table(true = data_h$V5, predicted = km_h$cluster)
MatrizConfusionK_h
kh = 0;
for (i in 1:ncol(MatrizConfusionK_h)){
kh = kh + MatrizConfusionK_h[i,i]
}
aciertos_kh <- (kh / nrow(data_h))*100
aciertos_kh
####################################################
############ Cluster jerarquicos (h.csv) ###########
data_h1 = data_h
data_h1$V5 <- NULL
data_h1$V4 <- NULL
data_h1= as.matrix(data_h1)
distancia = dist(data_h1)
####################################################
############single method (h.csv) ##################
cluster_hs <- hclust(distancia, method="single")
corte_hs <- cutree(cluster_hs, k=11)
scatterplot3d(data_h$V1, data_h$V2, data_h$V3, color = corte_hs)
MatrizConfusion_hs <- table(true = data_h$V5, predicted = corte_hs)
MatrizConfusion_hs
hs = 0
for (i in 1:ncol(MatrizConfusion_hs)){
hs = hs + MatrizConfusion_hs[i,i]
}
aciertos_hs <- (hs / nrow(data_h))*100
aciertos_hs
######################################################
############complete method (h.csv) ##################
cluster_hc <- hclust(distancia, method="complete")
corte_hc <- cutree(cluster_hc, k=11)
scatterplot3d(data_h$V1, data_h$V2, data_h$V3, color = corte_hc)
MatrizConfusion_hc <- table(true = data_h$V5, predicted = corte_hc)
MatrizConfusion_hc
hc = 0
for (i in 1:ncol(MatrizConfusion_hc)){
hc = hc + MatrizConfusion_hc[i,i]
}
aciertos_hc <- (hc / nrow(data_h))*100
aciertos_hc
######################################################
############average method (h.csv) ##################
cluster_ha <- hclust(distancia, method="complete")
corte_ha <- cutree(cluster_ha, k=11)
scatterplot3d(data_h$V1, data_h$V2, data_h$V3, color = corte_ha)
MatrizConfusion_ha <- table(true = data_h$V5, predicted = corte_ha)
MatrizConfusion_ha
ha = 0
for (i in 1:ncol(MatrizConfusion_ha)){
ha = ha + MatrizConfusion_ha[i,i]
}
aciertos_ha <- (ha / nrow(data_h))*100
aciertos_ha
######################################################
#################s.csv######################
data_s = read.csv('s.csv', header = F)
data_s$V5 = ceiling(data_s$V4) + 5
scatterplot3d(data_s$V1, data_s$V2, data_s$V3, color = data_s$V5)
#########################################################
############ kmedias s.csv #########################
km_s <- kmeans(data_s[,1:3], 10)
scatterplot3d(data_s$V1, data_s$V2, data_s$V3, color = km_s$cluster)
points(km_s$centers[, c("X","Y")], col = 1:3, pch = 19, cex = 2)
MatrizConfusionK_s <- table(true = data_s$V5, predicted = km_s$cluster)
MatrizConfusionK_s
ks = 0;
for (i in 1:ncol(MatrizConfusionK_s)){
ks = ks + MatrizConfusionK_s[i,i]
}
aciertos_ks <- (ks / nrow(data_s))*100
aciertos_ks
####################################################
############ Cluster jerarquicos (s.csv) ###########
data_s1 = data_s
data_s1$V5 <- NULL
data_s1$V4 <- NULL
data_s1= as.matrix(data_s1)
distancia = dist(data_s1)
####################################################
############single method (s.csv) ##################
cluster_ss <- hclust(distancia, method="single")
corte_ss <- cutree(cluster_ss, k=10)
scatterplot3d(data_s$V1, data_s$V2, data_s$V3, color = corte_ss)
MatrizConfusion_ss <- table(true = data_s$V5, predicted = corte_ss)
MatrizConfusion_ss
ss = 0
for (i in 1:ncol(MatrizConfusion_ss)){
ss = ss + MatrizConfusion_ss[i,i]
}
aciertos_ss <- (ss / nrow(data_s))*100
aciertos_ss
######################################################
############complete method (s.csv) ##################
cluster_sc <- hclust(distancia, method="single")
corte_sc <- cutree(cluster_sc, k=11)
scatterplot3d(data_s$V1, data_s$V2, data_s$V3, color = corte_sc)
MatrizConfusion_sc <- table(true = data_s$V5, predicted = corte_sc)
MatrizConfusion_sc
sc = 0
for (i in 1:ncol(MatrizConfusion_sc)){
sc = sc + MatrizConfusion_sc[i,i]
}
aciertos_sc <- (sc / nrow(data_s))*100
aciertos_sc
######################################################
############average method (s.csv) ##################
cluster_sa <- hclust(distancia, method="average")
corte_sa <- cutree(cluster_sa, k=11)
saatterplot3d(data_s$V1, data_s$V2, data_s$V3, color = corte_sa)
MatrizConfusion_sa <- table(true = data_s$V5, predicted = corte_sa)
MatrizConfusion_sa
sa = 0
for (i in 1:ncol(MatrizConfusion_sa)){
sa = sa + MatrizConfusion_sa[i,i]
}
aciertos_sa <- (sa / nrow(data_s))*100
aciertos_sa
######################################################
###############guess.csv##########################
data_guess = read.csv('guess.csv', header = F)
aux = rep(0, 30)
for (k in 1:30) {
grupos = kmeans(data_guess, k)
aux[k] = grupos$tot.withinss
}
plot(aux, type = "b", main = "Codo de Jambu")
####################################################
############ kmedias guess.csv #########################
km_guess <- kmeans(data_guess[,1:2], 5)
plot(data_guess$V1, data_guess$V2, col = km_guess$cluster)
####################################################
############ Cluster jerarquicos (guess.csv) ###########
data_guess1 = data_guess
data_guess1= as.matrix(data_guess1)
distancia = dist(data_guess1)
####################################################
############single method (guess.csv) ##################
cluster_guess_s <- hclust(distancia, method="single")
corte_guess_s <- cutree(cluster_guess_s, k=5)
plot(x = data_guess[,1], y <- data_guess[,2],col = corte_guess_s)
######################################################
############complete method (guess.csv) ##################
cluster_guess_c <- hclust(distancia, method="complete")
corte_guess_c <- cutree(cluster_guess_c, k=5)
plot(x = data_guess[,1], y <- data_guess[,2],col = corte_guess_c)
######################################################
############average method (guess.csv) ##################
cluster_guess_a <- hclust(distancia, method="average")
corte_guess_a <- cutree(cluster_guess_a, k=5)
plot(x = data_guess[,1], y <- data_guess[,2],col = corte_guess_a)
######################################################
###############help.csv###########################
data_help = read.csv('help.csv', header = F)
data_help$V5 = ceiling(data_help$V4) + 5
scatterplot3d(data_help$V1, data_help$V2, data_help$V3, color = data_help$V5)
table(data_help$V5)
|
60f8e43e306380252faf2879e98504b509c8432d | f8bff081c75336cc4fbd65ee6dfcfdaab9c98646 | /ui.R | 09dc66f6c3fff405f9e3e988e305bc5b4b6135ee | [] | no_license | EEPlei/LaQuinta-Denny-s | 340f373930b69bc5f73d465fca50b64236e01682 | ade7e8e6df9090ba89dd8d99ead0be0aa7318687 | refs/heads/master | 2021-01-18T13:17:13.457876 | 2017-02-02T22:49:49 | 2017-02-02T22:49:49 | 80,748,504 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,930 | r | ui.R | library(shiny)
total_priors = c("Poisson" = "pois", "Negative Binomial" = "nbinom")
prop_priors = c("Beta" = "beta", "Truncated Normal"="tnorm")
shinyUI(
fluidPage(
titlePanel(
"Karl Broman's Socks"
),
sidebarPanel(
numericInput("n_sims", h4("Simulations:"),value = 1000, min = 100, step = 1),
hr(),
h4("Data:"),
sliderInput("n_pairs", "Number of picked sock pairs:", min=0, max=30, value=0, step=1),
sliderInput("n_odd", "Number of picked odd socks:", min=0, max=30, value=11, step=1),
hr(),
h4("Priors:"),
selectInput("total_prior", "Prior for total", total_priors),
selectInput("prop_prior", "Prior for proportion", prop_priors),
hr(),
h4("Hyperparameters:"),
conditionalPanel(
condition="input.total_prior == 'pois'",
sliderInput("total_lambda",HTML("Total prior - λ"), value = 50, min=1, max=120)
),
conditionalPanel(
condition="input.total_prior == 'nbinom'",
numericInput("total_r",HTML("Total prior - r"), value = 50, min=1, max=120),
numericInput("total_p",HTML("Total prior - p"), value = 0.5, min=0, max=1)
),
conditionalPanel(
condition="input.prop_prior == 'beta'",
numericInput("prop_alpha",HTML("Total prior - α"), value = 1/3 , min=0, max=NA),
numericInput("prop_beta",HTML("Total prior - β"), value = 1, min=0, max=NA)
),
conditionalPanel(
condition="input.prop_prior == 'tnorm'",
numericInput("prop_mu",HTML("Proportion prior - μ"), value = 0.5, min=0.0000001, max=0.99999999),
numericInput("prop_sigma",HTML("Proportion prior - σ"), value = 0.1, min=0, max=NA)
)
),
mainPanel(
h4("Results:"),
#Total Socks in Laundry
plotOutput("total_plot"),
br(),
#Proportion of Socks in Pairs
plotOutput("prop_plot")
)
)
) |
ce931b698dd46185dff1c4bcc462fe8cb541364e | 71db7f612a3d6e3848bc747d942f43b17a9325ca | /server.R | 0528068b91c97f6b0eed47faf7d2963b15e16cc2 | [] | no_license | sathms/predictapp | 33d3ce6e950fccc4f66e55d7a6dbb507f4ea89e8 | 0310a09678ee56595ae8e0884e21d717666b1bd1 | refs/heads/master | 2022-11-21T13:19:18.834149 | 2020-07-27T16:01:41 | 2020-07-27T16:01:41 | 282,946,202 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,648 | r | server.R | library(shiny)
library(ggplot2)
library(caret)
library(lattice)
library(randomForest)
library(e1071)
data(mtcars)
mdata <- mtcars
mdata$am <- factor(mdata$am, labels = c("Automatic", "Manual"))
set.seed(7826)
inTrain <- createDataPartition(mdata$am, p = 0.7, list = FALSE)
train <- mdata[inTrain, ]
valid <- mdata[-inTrain, ]
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
k <- reactive({
k <- input$kfoldn
})
formulaText <- reactive({
if(length(input$checkGroup) > 0){
paste("am ~", paste(input$checkGroup, collapse = " + "), collapse = " ")
} else {
paste("am ~", "mpg", collapse = " ")
}
})
fit_model <- reactive({
control <- trainControl(method = "cv", number = k())
train(as.formula(formulaText()),
data = train,
method = "rf",
trControl = control)
})
predict_rf <- reactive({
predict_rf <- predict(results, valid)
conf_rf <- confusionMatrix(valid$cyl, predict_rf)
print(conf_rf)
})
observeEvent(input$do,{
removeUI({
selector = "div#welcome"
}
)
results <- fit_model()
predict_rf <- predict(results, valid)
conf_rf <- confusionMatrix(valid$am, predict_rf)
if (results$method == 'rf')
output$fitmethod <- renderText("Random Forest")
output$methodHeadings <- renderUI({
h4("Training Method:")
})
output$methodControls <- renderUI({
textOutput("fitmethod")
})
output$coeffs <- renderText(input$checkGroup)
output$coeffsHeadings <- renderUI({
h4("Predictors used for Training:")
})
output$coeffsControls <- renderUI({
textOutput("coeffs")
})
output$fitresult <-renderTable(results$results)
output$fitHeadings <- renderUI({
h4("Results from Training:")
})
output$fitControls <- renderUI({
tableOutput("fitresult")
})
output$predtable <-renderTable(conf_rf$table)
output$predHeadings <- renderUI({
h4("Results from Predictions on validation data:")
})
output$predControls <- renderUI({
tableOutput("predtable")
})
output$byclass <-renderPrint(conf_rf$byClass)
output$classHeadings <- renderUI({
h4("Predictions on Validation data: By Class:")
})
output$classControls <- renderUI({
textOutput("byclass")
})
})
})
|
4ee16269ce07a4eb4333f80126ee1f15bb8d12a7 | 77157987168fc6a0827df2ecdd55104813be77b1 | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615773241-test.R | 8dcb649de100e074b9d4b368568713a73b432ae2 | [] | no_license | akhikolla/updatedatatype-list2 | e8758b374f9a18fd3ef07664f1150e14a2e4c3d8 | a3a519440e02d89640c75207c73c1456cf86487d | refs/heads/master | 2023-03-21T13:17:13.762823 | 2021-03-20T15:46:49 | 2021-03-20T15:46:49 | 349,766,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 345 | r | 1615773241-test.R | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307390365e+77, 1.50066211734794e+225, 1.61662656971842e+213, 2.16452887909903e+294, 2.10747668061271e+101, 5.78517196954443e+98, 2.02410200510026e-79, 0, 0, 0), .Dim = c(5L, 2L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
70d4e99d2ec9afe18457a8c26e2496b568a9268a | d614aa32f52b4307af5a35945a3641a23d8b535b | /R/feOx.R | ba89bb5860d51316e275cdf8b84a71d3cb97c6ce | [] | no_license | kennybolster/kbolster | 6c664fd0eeccd2a8562822f5294cc88f56e97d8a | 953d4648777d1eeb590a772adde227788791aaca | refs/heads/master | 2020-09-06T05:32:18.083205 | 2020-04-03T21:20:45 | 2020-04-03T21:20:45 | 220,339,438 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 883 | r | feOx.R | #' Iron(II) oxidation kinetics
#'
#' Given temperature, pH, O2 concentrations, and ionic strength, calculate
#' the first order reaction constant for iron(II) oxidation by oxygen. Based
#' on the equations of Millero et al. GCA. 1987.
#' @param temp Temperature in degrees C
#' @param ph pH
#' @param o2 Dissolved oxygen concentration, in umol/kg
#' @param I Ionic strength. Defaults to 0.7 (35 psu seawater)
#' @return Oxidation rate constant (kg H20 / mol Fe(II) / min)
#' @export
#' @examples feOx(temp = 20, ph = 8.2, o2 = 100)
feOx <- function(temp, ph, o2, I = 0.7){
# temp should be in celsius
# o2 should be in umol / kg
tkelvin <- temp + 273.15
poh <- 14 - ph
concohmolar <- 10^-poh
concoh <- concohmolar / 1.025
logk0 <- 21.56 - 1545 / tkelvin
logk <- logk0 - (3.29 * I^0.5) + (1.52 * I)
conco2 <- o2 / 1e6
k <- 10^logk
k1 <- k * concoh^2 * conco2
} |
a385debe81fcaa186dd34a10859cbc24bbbf94e6 | 3c52fb22c575a0d62cb86ac929e38fc46f174f74 | /code/fit_granger_causality.R | 5f5b9681322ee0dc2e794c11f422a8aef138c070 | [] | no_license | rmtrane/directed_causal_graphs | c4c4a1697e8f7659622fd61352b1e2de601c8865 | d2035902c07cf0b9f4e73a55a3a6e44792eca6a7 | refs/heads/master | 2020-09-03T03:56:15.000158 | 2019-11-28T02:29:03 | 2019-11-28T02:29:03 | 219,379,459 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,291 | r | fit_granger_causality.R | library(tidyverse)
source(paste0(here::here(), "/code/granger_causality.R"))
input_folder <- ifelse(interactive(), "data/large_from_sparse", commandArgs(TRUE)[1])
network_type <- ifelse(interactive(), "large_from_sparse", str_split(input_folder, "/", simplify = T)[,2])
output_folder <- paste(here::here(), "results",
ifelse(interactive(), "large_from_sparse", network_type), sep = "/")
input_datafile <- paste(here::here(), input_folder, "sim_data_normalized.csv", sep = "/")
#input_networkfile <- paste(input_folder, "sparse_network.tsv", sep = "/")
sim_data <- read_csv(file = input_datafile)
lm_fits <- granger_causality(sim_data %>% filter(as.numeric(ID) < 501),
gene_cols = colnames(sim_data[,-c(1,2)]), ID_col = "ID") %>%
unnest(cols = lms)
network <- lm_fits %>%
mutate(adj.p.value = p.adjust(p.value, method = "BH")) %>%
filter(adj.p.value < 0.05,
!str_detect(term, "ID")) %>%
select(node2 = child_node, node1 = term, estimate) %>%
mutate(node1 = str_remove_all(node1, pattern = "lag\\(|\\)"),
edges = paste(node1, node2, sep = "-"))
if(!dir.exists(output_folder))
dir.create(output_folder, recursive = TRUE)
write_tsv(network, path = paste0(output_folder, "/granger_causal_network.tsv"))
|
acad878d8092d706e8db96f02b2d49e8067d8a7a | c7c9a9753e068d93b32f90a9bafff236c0c85c11 | /Expt2_EmotionStudies/generate_emo_stims.r | 5fc69eef4d67a5c7bb425ed28746f6d520645319 | [] | no_license | hughrabagliati/CFS_Compositionality | bc1af660df540d43fc73414a029a1a0f941521ca | 364270a3a61289f54dffa4e1468b3c3afc158dca | refs/heads/master | 2021-01-17T15:18:20.755866 | 2017-07-04T10:31:45 | 2017-07-04T10:31:45 | 52,354,314 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 802 | r | generate_emo_stims.r | library(doBy)
emo.stims <- read.csv("all_data_with_w1w2_ratings.csv")
sklar.emo.stims <- subset(emo.stims, prime_semantics %in% c("Negative phrase","Neutral phrase"))
sklar.emo.stims <- summaryBy(MeanAffectivity + W1.score + W2.score ~ Condition + prime + prime_semantics, data = sklar.emo.stims, keep.names = T,na.rm = T)
mean(c(sklar.emo.stims$W1.score,sklar.emo.stims$W2.score))
sd(c(sklar.emo.stims$W1.score,sklar.emo.stims$W2.score))
new.emo.stims <- subset(emo.stims, prime_semantics %in% c("Negative sentence","Neutral sentence"))
new.emo.stims <- summaryBy(MeanAffectivity + W1.score + W2.score ~ Condition + prime + prime_semantics, data = new.emo.stims, keep.names = T,na.rm = T)
mean(c(new.emo.stims$W1.score,new.emo.stims$W2.score))
sd(c(new.emo.stims$W1.score,new.emo.stims$W2.score)) |
2b2f7042bd569a4de478c715dcd69069859a6faf | b2757d8cca182148d664e55a5d29aa1b82e453e5 | /script/process_modelling/rprom/smartcoat/test.R | 40d72bd0c49b83c7d71f52749b1dde4f24912b76 | [] | no_license | genpack/tutorials | 99c5efb6b0c81a6ffeda0878a43ad13b1987ceeb | c1fe717272f06b81ca7a4855a0b874e98fde4c76 | refs/heads/master | 2023-06-10T21:18:27.491378 | 2023-06-06T00:15:14 | 2023-06-06T00:15:14 | 163,214,555 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,392 | r | test.R | ##### Setup ####
library(magrittr)
library(dplyr)
library(viser)
path.data = paste(getwd(), 'script', 'process_modelling', 'promer', 'smartcoat', 'data', sep = '/')
path.promer = paste(getwd(), '..', '..', 'packages', 'promer', 'R', sep = '/')
path.gener = paste(getwd(), '..', '..', 'packages', 'gener', 'R', sep = '/')
source(path.promer %>% paste('tsvis.R', sep = '/'))
source(path.promer %>% paste('transys.R', sep = '/'))
source(path.gener %>% paste('gener.R', sep = '/'))
source(path.gener %>% paste('linalg.R', sep = '/'))
source(path.gener %>% paste('io.R', sep = '/'))
##### Read Data ####
read.csv(path.data %>% paste('SmartCoat_Eventlog_March_2016.csv', sep = '/')) %>%
mutate(Timestamp = as.character(Timestamp) %>% lubridate::dmy_hm()) -> data
x = TRANSYS()
x$feed.eventlog(dataset = data, caseID_col = 'Case.ID', status_col = 'Activity.description', startTime_col = 'Timestamp',
caseStartFlag_col = NULL, caseEndFlag_col = NULL, caseStartTags = NULL, caseEndTags = NULL,
sort_startTime = T, add_start = T, remove_sst = F,
extra_col = c("Resource", "Location", "Brand", "Customer", "Estimated.activity.cost"))
cfg = list(direction = 'left.right')
x$plot.process.map(config = cfg, width = "800px")
######
x$history$caseID[1]
x$filter.case(IDs = "Phone 3651")
plot_case_timeline(x)
pm4py::discovery_alpha(el) -> petrin
|
bca8ecdcb01f11b959305b757d9e1fa60047004c | 6af2f9b88a5e98f1cbeb496e5e8bc86ff6b5790f | /Archive/CS03_CreateDB_MAIAC_terra.R | f5d8d1277606af99fae785a05b5a85600509bb92 | [] | no_license | alexandrashtein/Model-code | d1e063fdfa9be298e9985c94f5feda5da336260f | d9d3fc5db5ed6e5958faf2f449aa63edc3be9e94 | refs/heads/master | 2021-01-18T16:37:24.367196 | 2017-08-17T07:26:17 | 2017-08-17T07:26:17 | 100,463,627 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 63,868 | r | CS03_CreateDB_MAIAC_terra.R | ### This code builds the 3 input databases (mod1, mod2, mod3) for the model from the new MAIAC version (08.2016)
rm(list=ls())
#load libraries
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
library(DataCombine)
library(readr)
library(bit64)
library(devtools)
install_github("allanjust/aodlur", dependencies = TRUE)
library("aodlur")
library(sp)
library(rgdal)
library(stringi)
#load aod data from new MAIAC data (08.2016)
maiac=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/MAIAC_data_082016/MAIACTAOT_Israel_2006.csv")
# maiac=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/MAIAC_data_082016/MAIACAAOT_Israel_2006.csv")
# cutting the data according to the project area
pol=readOGR(dsn="/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/General/Project_border/Project_aoi","Project_border_latlon")
# pol=readOGR(dsn="/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/General/Project_border/Project_aoi","Tel_aviv")
# Convert to data.frame
maiac = as.data.frame(maiac)
# Spatial subset
coordinates(maiac) = ~ lon + lat
proj4string(maiac) = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
maiac = maiac[pol, ]
# Convert back to data.table
maiac = as.data.table(maiac)
# creat id field
maiac$aodid=paste(formatC(round(maiac$lon,3),format='f',3),formatC(round(maiac$lat,3),format='f',3),sep="-")
# set names abbriviations
setnames(maiac,"AOT_Uncertainty","UN")
setnames(maiac,"AOT_QA","QA")
setnames(maiac,"Optical_Depth_047","aod_047")
setnames(maiac,"Optical_Depth_055","aod_055")
setnames(maiac,"date","day")
setnames(maiac,"lon","long_aod")
setnames(maiac,"lat","lat_aod")
# saveRDS(maiac,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/raw_data/AOD.AQ.2006.RDS")
saveRDS(maiac,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/raw_data/AOD.TR.2006.RDS")
# maiac=readRDS("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/raw_data/AOD.TR.2006.RDS")
# Use the QA data to remove problematic observations
# The QA have to be used bofore the next stage of creating a single aod point per aodid per day
system.time(maiac[, CloudMask := as.factor(sapply(QA, function(x){paste(rev(as.integer(intToBits(x))[1:3]), collapse = "")}))])
system.time(maiac[, MaskLandWaterSnow := as.factor(sapply(QA, function(x){paste(rev(as.integer(intToBits(x))[4:5]), collapse = "")}))])
system.time(maiac[, MaskAdjacency := as.factor(sapply(QA, function(x){paste(rev(as.integer(intToBits(x))[6:8]), collapse = "")}))])
system.time(maiac[, CloudDetection := as.factor(sapply(QA, function(x){paste(rev(as.integer(intToBits(x))[9:12]), collapse = "")}))])
system.time(maiac[, GlintMask := as.factor(sapply(QA, function(x){paste(rev(as.integer(intToBits(x))[13]), collapse = "")}))])
system.time(maiac[, AerosolModel := as.factor(sapply(QA, function(x){paste(rev(as.integer(intToBits(x))[14:15]), collapse = "")}))])
summary(maiac$CloudMask)
summary(maiac$MaskLandWaterSnow)
summary(maiac$CloudDetection)
summary(maiac$AerosolModel)
summary(maiac$GlintMask)
summary(maiac$MaskAdjacency)
# remove cloudy QA
maiac=filter(maiac,CloudMask!="011")
maiac=filter(maiac,CloudMask!="010")
# remove observatiobs surrounded by more than 8 cloudy pixels QA
maiac=filter(maiac,MaskAdjacency!="010")
# remove water QA
maiac=filter(maiac,MaskLandWaterSnow!="01")
# remove Adjacent to snow QA
maiac=filter(maiac,MaskAdjacency!="100")
# create single aod point per aodid per day
maiac <-maiac %>%
group_by(aodid,day) %>%
summarise(long_aod=mean(long_aod,na.rm=TRUE),lat_aod=mean(lat_aod,na.rm=TRUE),aod_047=mean(aod_047,na.rm=TRUE),aod_055=mean(aod_055,na.rm=TRUE),UN=mean(UN,na.rm=TRUE),RelAZ=mean(RelAZ,na.rm=TRUE))
# saving as shapefile only unique id
# maiac=as.data.table(maiac)
# setkey(maiac,aodid)
# maiac_grid=maiac[!duplicated(aodid)]
# coordinates(maiac_grid) = ~ long_aod + lat_aod
# proj4string(maiac_grid) = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
# # writeOGR(maiac_grid,dsn="/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/IL_maiac.grid.unique.north/MAIAC_grid_082016", layer="1km_maiac_grid_latlon", driver="ESRI Shapefile")
# converting to ITM crs and adding X and Y columns
# maiac = as.data.frame(maiac)
# coordinates(maiac) = ~ long_aod + lat_aod
# proj4string(maiac) = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
# maiac$long_aod2 <- coordinates(maiac)[,1]
# maiac$lat_aod2 <- coordinates(maiac)[,2]
# maiac2 = spTransform(maiac, "+proj=tmerc +lat_0=31.7343936111111 +lon_0=35.2045169444445 +k=1.0000067 +x_0=219529.584 +y_0=626907.39 +ellps=GRS80 +towgs84=-24.002400,-17.103200,-17.844400,-0.330090,-1.852690,1.669690,5.424800 +units=m +no_defs")
# maiac2$x_aod_ITM <- coordinates(maiac2)[,1]
# maiac2$y_aod_ITM <- coordinates(maiac2)[,2]
# maiac2=as.data.table(maiac2)
# maiac2[,c("long_aod","lat_aod"):=NULL]
# setnames(maiac2,"long_aod2","long_aod")
# setnames(maiac2,"lat_aod2","lat_aod")
# maiac=maiac2
# rm(maiac2)
maiac$day=as.Date(maiac$day)
maiac=as.data.table(maiac)
## Add General variables
# creating a filter field of the forward scattering (FS=1) and the backward scaterring (BS=0 or else)
# maiac$FS_BS=1
# # First option for data devision be Azimuth angle:
# maiac <- maiac[RelAZ> 90, FS_BS := 0]
# maiac$FS_BS_1=0
# # First option for data devision be Azimuth angle:
# maiac <- maiac[RelAZ< 90, FS_BS_1 := 1]
# maiac <- maiac[RelAZ>= 90 & RelAZ< 100, FS_BS_1 := 2]
# maiac <- maiac[RelAZ>= 100 & RelAZ< 110, FS_BS_1 := 3]
# maiac <- maiac[RelAZ>= 110 & RelAZ< 120, FS_BS_1 := 4]
# maiac <- maiac[RelAZ>= 120 & RelAZ< 130, FS_BS_1 := 5]
# maiac <- maiac[RelAZ>= 130 & RelAZ< 140, FS_BS_1 := 6]
# maiac <- maiac[RelAZ>= 140 & RelAZ< 150, FS_BS_1 := 7]
# maiac <- maiac[RelAZ>= 150 & RelAZ< 160, FS_BS_1 := 8]
# maiac <- maiac[RelAZ>= 160 & RelAZ< 170, FS_BS_1 := 9]
# maiac <- maiac[RelAZ>= 170 & RelAZ< 180, FS_BS_1 := 10]
#
# # Second option for data devision be Zenit angle:
# maiac$FS_BS_2=0
# maiac <- maiac[RelAZ< 90, FS_BS_2 := 1]
# maiac <- maiac[RelAZ>= 90 & RelAZ< 120, FS_BS_2:= 2]
# maiac <- maiac[RelAZ>= 120 & RelAZ< 150, FS_BS_2 := 3]
# maiac <- maiac[RelAZ>= 150 & RelAZ< 180, FS_BS_2 := 4]
# maiac <- maiac[is.na(RelAZ), FS_BS_2 := 5]
#add season
maiac$month <- as.numeric(format(maiac$day, "%m"))
#1-winter, 2-spring,3-summer,4-autum
maiac$season<-car::recode(maiac$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1")
#1-winter, 2-summer
maiac$seasonSW<-car::recode(maiac$month,"1=1;2=1;3=1;4=2;5=2;6=2;7=2;8=2;9=2;10=1;11=1;12=1")
################ add Spatial Variables
#load clipped/LU grid
lu<-fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Keytables/1km_grid/1km_MAIAC_grid_fixed.csv")
lu$V1=NULL
maiac$V1=NULL
all(lu$aodid %in% maiac$aodid)
mean(lu$aodid %in% maiac$aodid)
maiac <- maiac[maiac$aodid %in% lu$aodid, ]
#create full LU-aod TS
days<-seq.Date(from = as.Date("2006-01-01"), to = as.Date("2006-12-31"), 1)
#create date range
days2006 <- data.table(expand.grid(aodid = lu[, unique(aodid)], day = days))
days2006$aodid<-as.character(days2006$aodid)
#merge maiac data
setkey(maiac,aodid,day)
setkey(days2006 ,aodid,day)
db2006 <- merge(days2006,maiac, all.x = T)
# precentage of NA in the data
sum(is.na(db2006$aod_055))*100/length(db2006$aod_055)
#add land use data
setkey(db2006,aodid)
setkey(lu,aodid)
db2006 <- merge(db2006, lu, all.x = T)
head(db2006)
gc()
#get rid of duplicate names to cut down on DB size
# db2006[,c("lat","lon"):=NULL]
# gc()
# setnames(db2006,"long_aod.x","long_aod")
# setnames(db2006,"lat_aod.x","lat_aod")
# setnames(db2006,"x_aod_ITM.y","x_aod_ITM")
# setnames(db2006,"y_aod_ITM.y","y_aod_ITM")
db2006[,c("V1","lon","lat"):=NULL]
#save maiac aod data for 2006
# saveRDS(db2006,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/maiac_aod/AQ.AOD.2006.data.rds")
saveRDS(db2006,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/maiac_aod/TR.AOD.2006.data.rds")
# db2006=readRDS("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/maiac_aod/TR.AOD.2006.data.rds")
# db2006=readRDS("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/maiac_aod/AQ.AOD.2006.data.rds")
################ add TEMPORAL Variables
#### add ndvi
#import NDVI
ndvi<-readRDS("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/General/NDVI/MODIS/ndvi_2000_2015.rds")
ndvi=as.data.table(ndvi)
ndvi=filter(ndvi,c=="2006")
ndvi$m<-stri_pad_left(str=ndvi$m, 2, pad="0")
db2006$m=substr(db2006$day,6,7)
# add ndviid to db2006
#join actual NDVI to aod
setkey(ndvi, ndviid, m)
setkey(db2006,ndviid, m)
db2006<- merge(db2006, ndvi,all.x = T)
#delete unnecessery columns
db2006[,c("lat_ndvi","long_ndvi","c.y"):=NULL]
###### Add Pbl
# add daily average PBL
pblid=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/Joins/1km_grid_PBLid/1km_grid_PBLid.csv")
mean(unique(pblid$aodid) %in% unique(db2006$aodid))
hpbl=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/HPBL_Israel/newmodel.2004_2015_dailyavg.csv")
hpbl$year=substr(hpbl$date,1,4)
hpbl=filter(hpbl,hpbl$year=="2006")
hpbl=as.data.table(hpbl)
setnames(hpbl,"PBLid","pblid")
setnames(hpbl,"date","day")
hpbl$day=as.Date(hpbl$day)
hpbl[,c("V1","year"):=NULL]
setkey(db2006,pblid,day)
setkey(hpbl,pblid,day)
db2006 <- merge(db2006,hpbl,all.x = T)
#
# ## ADD ventilation coefficient
db2006$vc_D=c(db2006$WS_D/(db2006$daily_hpbl*1000))
# mod1$vc_H=c(mod1$WS_H/(mod1$pbl_11*1000))
#add overpass PBL
# pbl<-fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/HPBL_Israel/newmodel.2004_2015_11_12am.csv")
pbl<-fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/HPBL_Israel/newmodel.2004_2015_8_9am.csv")
pbl$pblid=paste(formatC(round(pbl$lon,3),format='f',3),formatC(round(pbl$lat,3),format='f',3),sep="-")
setnames(pbl,"date","day")
pbl$day=as.Date(pbl$day)
#create single pbl point per day
pbl <-pbl %>%
group_by(pblid,day) %>%
summarise(lon_pbl=mean(lon),lat_pbl=mean(lat),pbl=mean(hpbl) )
pbl=as.data.table(pbl)
#join pbl to aod
setkey(pbl, pblid, day )
setkey(db2006, pblid, day)
db2006 <- merge(db2006, pbl, all.x = T)
db2006[,c("lon_pbl","lat_pbl"):=NULL]
db2006=as.data.table(db2006)
summary(db2006$pbl)
setnames(db2006,"pbl","pbl_08")
# setnames(db2006,"pbl","pbl_11")
#add night PBL
pbl<-fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/HPBL_Israel/newmodel.2003_2015_2_3am.csv")
pbl$pblid=paste(formatC(round(pbl$lon,3),format='f',3),formatC(round(pbl$lat,3),format='f',3),sep="-")
setnames(pbl,"date","day")
pbl$day=as.Date(pbl$day)
#create single pbl point per day
pbl <-pbl %>%
group_by(pblid,day) %>%
summarise(lon_pbl=mean(lon),lat_pbl=mean(lat),pbl=mean(hpbl))
pbl=as.data.table(pbl)
#join pbl to aod
setkey(pbl, pblid, day )
setkey(db2006, pblid, day)
db2006 <- merge(db2006, pbl, all.x = T)
db2006=as.data.table(db2006)
db2006[,c("lon_pbl","lat_pbl"):=NULL]
setnames(db2006,"pbl","pbl_02")
summary(db2006$pbl_02)
#### Add daily average PBL
hpbl=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/HPBL_Israel/newmodel.2004_2015_dailyavg.csv")
hpbl$year=substr(hpbl$date,1,4)
hpbl=filter(hpbl,hpbl$year=="2014")
hpbl=as.data.table(hpbl)
setnames(hpbl,"PBLid","pblid")
setnames(hpbl,"date","day")
hpbl$day=as.Date(hpbl$day)
hpbl[,c("V1","year"):=NULL]
setkey(db2006,pblid,day)
setkey(hpbl,pblid,day)
mod1 <- merge(db2006,hpbl,all.x = T)
###### Add Temperature
## Hourly Temperature (for AQUA- average of 11:30 to 14:30)
# Temp <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/Hourly_Temp_aqua_IMS_Pollution_stn.csv")
## Hourly Temperature (for TERRA- average of 09:00 to 11:30)
Temp <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/Hourly_Temp_terra_IMS_Pollution_stn.csv")
Temp$date<-paste(Temp$Day,Temp$Month,Temp$Year,sep="/")
Temp[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
Temp[, c := as.numeric(format(day, "%Y")) ]
Temp[,c("Year","Month","Day","date"):=NULL]
Temp <- Temp[X != 'NaN']
Temp <- Temp[Temp != 'NaN']
Temp <- Temp[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = Temp,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = Temp,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "Temp",
knearest = 15, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,Temp,aodid)], all.x = T)
head(db2006)
summary(db2006$Temp)
setnames(db2006,"Temp","Temp_H") # Hourly temperature
## Daily Temperature
Temp <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Daily_Data/Daily_Data_Yuval/Daily_Temp_data_IMS_Pollution.csv")
Temp$date<-paste(Temp$Day,Temp$Month,Temp$Year,sep="/")
Temp[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
Temp[, c := as.numeric(format(day, "%Y")) ]
Temp[,c("Year","Month","Day","date"):=NULL]
Temp <- Temp[X != 'NaN']
Temp <- Temp[Temp != 'NaN']
Temp <- Temp[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = Temp,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = Temp,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "Temp",
knearest = 15, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,Temp,aodid)], all.x = T)
head(db2006)
summary(db2006$Temp)
setnames(db2006,"Temp","Temp_D") # Daily temperature
###### Add Hourly average WD
# WD <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/AQUA_Hourly_data_July16/WD_H.csv")
# WD$date<-paste(WD$Day,WD$Month,WD$Year,sep="/")
# WD[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
# WD[, c := as.numeric(format(day, "%Y")) ]
# WD[,c("Year","Month","Day","date"):=NULL]
# WD <- WD[X != 'NaN']
# WD <- WD[WD != 'NaN']
# WD <- WD[c == 2006]
#
# jointo.pt <- makepointsmatrix(datatable = db2006,
# xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
#
# joinfrom.pt <- makepointsmatrix(datatable = WD,
# xvar = "X", yvar = "Y", idvar = "stn")
#
# joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
# jointo = db2006, joinfrom = WD,
# jointovarname = "aodid", joinfromvarname = "stn",
# joinprefix = "nearest", valuefield = "WD",
# knearest = 15, maxdistance = 60000,
# nearestmean = FALSE, verbose = T)
#
# setkey(db2006,aodid,day)
# setkey(joinout,aodid,day)
# db2006 <- merge(db2006, joinout[,list(day,WD,aodid)], all.x = T)
# head(db2006)
# summary(db2006$WD)
# setnames(db2006,"WD","WD_H")
###### Add Hourly WS
# for AQUA
# WS <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/AQUA_Hourly_data_July16/WS_H.csv")
# for TERRA
WS <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/Terra_Hourly_data_July16/WS_H.csv")
WS$date<-paste(WS$Day,WS$Month,WS$Year,sep="/")
WS[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
WS[, c := as.numeric(format(day, "%Y")) ]
WS[,c("Year","Month","Day","date"):=NULL]
WS <- WS[X != 'NaN']
WS <- WS[WS != 'NaN']
WS <- WS[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = WS,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = WS,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "WS",
knearest = 15, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,WS,aodid)], all.x = T)
head(db2006)
summary(db2006$WS)
setnames(db2006,"WS","WS_H")
###### Add daily WS
WS <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Daily_Data/Daily_Data_Yuval/IMS_stn_July16/WS_D.csv")
WS$date<-paste(WS$Day,WS$Month,WS$Year,sep="/")
WS[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
WS[, c := as.numeric(format(day, "%Y")) ]
WS[,c("Year","Month","Day","date"):=NULL]
WS <- WS[X != 'NaN']
WS <- WS[WS != 'NaN']
WS <- WS[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = WS,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = WS,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "WS",
knearest = 15, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,WS,aodid)], all.x = T)
head(db2006)
summary(db2006$WS)
setnames(db2006,"WS","WS_D")
###### Add Hourly RH
# for AQUA
# RH <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/AQUA_Hourly_data_July16/RH_H.csv")
# for TERRA
RH <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/Terra_Hourly_data_July16/RH_H.csv")
RH$date<-paste(RH$Day,RH$Month,RH$Year,sep="/")
RH[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
RH[, c := as.numeric(format(day, "%Y")) ]
RH[,c("Year","Month","Day","date"):=NULL]
RH <- RH[X != 'NaN']
RH <- RH[RH != 'NaN']
RH <- RH[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = RH,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = RH,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "RH",
knearest = 15, maxdistance = 60000,
nearestmean = FALSE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,RH,aodid)], all.x = T)
head(db2006)
summary(db2006$RH)
setnames(db2006,"RH","RH_H")
###### Add Daily RH
RH <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Daily_Data/Daily_Data_Yuval/IMS_stn_July16/RH_D.csv")
RH$date<-paste(RH$Day,RH$Month,RH$Year,sep="/")
RH[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
RH[, c := as.numeric(format(day, "%Y")) ]
RH[,c("Year","Month","Day","date"):=NULL]
RH <- RH[X != 'NaN']
RH <- RH[RH != 'NaN']
RH <- RH[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = RH,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = RH,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "RH",
knearest = 15, maxdistance = 60000,
nearestmean = FALSE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,RH,aodid)], all.x = T)
head(db2006)
summary(db2006$RH)
setnames(db2006,"RH","RH_D")
###### Add hourly Rain
## for AQUA
# Rain <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/Hourly_Rain_AQUA_IMS_Pollution_stn.csv")
## for TERRA
Rain <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/Hourly_Rain_terra_IMS_Pollution_stn.csv")
Rain$date<-paste(Rain$Day,Rain$Month,Rain$Year,sep="/")
Rain[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
Rain[, c := as.numeric(format(day, "%Y")) ]
Rain[,c("Year","Month","Day","date"):=NULL]
Rain <- Rain[X != 'NaN']
Rain<- Rain[Rain != 'NaN']
Rain<- Rain[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = Rain,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = Rain,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "Rain",
knearest = 15, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,Rain,aodid)], all.x = T)
head(db2006)
summary(db2006$Rain)
setnames(db2006,"Rain","Rain_H")
## Daily Rain
Rain <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Daily_Data/Daily_Data_Yuval/Daily_Rain_Sum_IMS_Pollutants.csv")
Rain$date<-paste(Rain$Day,Rain$Month,Rain$Year,sep="/")
Rain[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
Rain[, c := as.numeric(format(day, "%Y")) ]
Rain[,c("Year","Month","Day","date"):=NULL]
Rain <- Rain[X != 'NaN']
Rain<- Rain[Rain != 'NaN']
Rain<- Rain[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = Rain,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = Rain,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "Rain",
knearest = 15, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,Rain,aodid)], all.x = T)
head(db2006)
summary(db2006$Rain)
setnames(db2006,"Rain","Rain_D")
###### Add Hourly NO2
# for AQUA
# NO2 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/AQUA_Hourly_data_May16/NO2_H.csv")
# for TERRA
NO2 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/TERRA_Hourly_data_May16/NO2_H.csv")
NO2$date<-paste(NO2$Day,NO2$Month,NO2$Year,sep="/")
NO2[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
NO2[, c := as.numeric(format(day, "%Y")) ]
NO2[,c("Year","Month","Day","date"):=NULL]
NO2 <- NO2[X != 'NaN']
NO2<- NO2[NO2 != 'NaN']
NO2<- NO2[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = NO2,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = NO2,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "NO2",
knearest = 15, maxdistance = 60000,
nearestmean = FALSE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,NO2,aodid)], all.x = T)
head(db2006)
summary(db2006$NO2)
setnames(db2006,"NO2","NO2_H")
###### Add Daily NO2
NO2 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Daily_Data/Daily_Data_Yuval/Pollution_stn_May16/NO2_D.csv")
NO2$date<-paste(NO2$Day,NO2$Month,NO2$Year,sep="/")
NO2[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
NO2[, c := as.numeric(format(day, "%Y")) ]
NO2[,c("Year","Month","Day","date"):=NULL]
NO2 <- NO2[X != 'NaN']
NO2<- NO2[NO2 != 'NaN']
NO2<- NO2[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = NO2,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = NO2,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "NO2",
knearest = 15, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,NO2,aodid)], all.x = T)
head(db2006)
summary(db2006$NO2)
setnames(db2006,"NO2","NO2_D")
###### Add Hourly SO2
# for AQUA
# SO2 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/AQUA_Hourly_data_May16/SO2_H.csv")
# for TERRA
SO2 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/TERRA_Hourly_data_May16/SO2_H.csv")
SO2$date<-paste(SO2$Day,SO2$Month,SO2$Year,sep="/")
SO2[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
SO2[, c := as.numeric(format(day, "%Y")) ]
SO2[,c("Year","Month","Day","date"):=NULL]
SO2 <- SO2[X != 'NaN']
SO2<- SO2[SO2 != 'NaN']
SO2<- SO2[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = SO2,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = SO2,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "SO2",
knearest = 15, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,SO2,aodid)], all.x = T)
head(db2006)
summary(db2006$SO2)
setnames(db2006,"SO2","SO2_H")
###### Add Daily SO2
SO2 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Daily_Data/Daily_Data_Yuval/Pollution_stn_May16/SO2_D.csv")
SO2$date<-paste(SO2$Day,SO2$Month,SO2$Year,sep="/")
SO2[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
SO2[, c := as.numeric(format(day, "%Y")) ]
SO2[,c("Year","Month","Day","date"):=NULL]
SO2 <- SO2[X != 'NaN']
SO2<- SO2[SO2 != 'NaN']
SO2<- SO2[c == 2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = SO2,
xvar = "X", yvar = "Y", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = SO2,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "SO2",
knearest = 15, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,SO2,aodid)], all.x = T)
head(db2006)
summary(db2006$SO2)
setnames(db2006,"SO2","SO2_D")
#### Add Hourly mean PM2.5
# for aqua
# PM25 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/AQUA_Hourly_data_May16/PM25_H.csv")
# for terra
PM25 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/TERRA_Hourly_data_May16/PM25_H.csv")
PM25$date<-paste(PM25$Day,PM25$Month,PM25$Year,sep="/")
PM25[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
PM25[, c := as.numeric(format(day, "%Y")) ]
PM25[,c("Year","Month","Day","date"):=NULL]
PM25 <- PM25[X != 'NaN']
PM25<-PM25[!is.na(PM25)]
#clear non continous stations
setnames(PM25,"X","x_stn_ITM")
setnames(PM25,"Y","y_stn_ITM")
pmall2006<- PM25[c==2006]
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = pmall2006,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = pmall2006,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "PM25",
knearest = 9, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,PM25,aodid)], all.x = T)
head(db2006)
summary(db2006$PM25)
setnames(db2006,"PM25","PM25_H_mean")
## Add Daily PM25
# Add Daily closest PM2.5
PM25 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Daily_Data/Daily_Data_Yuval/Pollution_stn_May16/PM25_D.csv")
PM25$date<-paste(PM25$Day,PM25$Month,PM25$Year,sep="/")
PM25[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
PM25[, c := as.numeric(format(day, "%Y")) ]
PM25[,c("Year","Month","Day","date"):=NULL]
PM25 <- PM25[X != 'NaN']
PM25<-PM25[!is.na(PM25)]
#clear non continous stations
setnames(PM25,"X","x_stn_ITM")
setnames(PM25,"Y","y_stn_ITM")
pmall2006<- PM25[c==2006]
# Join the closest PM2.5 value for each day
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = pmall2006,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = pmall2006,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "PM25",
knearest = 9, maxdistance = 60000,
nearestmean = FALSE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,PM25,aodid)], all.x = T)
head(db2006)
summary(db2006$PM25)
setnames(db2006,"PM25","PM25_D_closest")
# Add daily mean PM2.5
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = pmall2006,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = pmall2006,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "PM25",
knearest = 9, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,PM25,aodid)], all.x = T)
head(db2006)
summary(db2006$PM25)
setnames(db2006,"PM25","PM25_D_mean")
## Add IDW PM2.5
# calculate IDW for PM2.5
for(i in unique(db2006$day)) {
x<-pmall2006[pmall2006$day==i, ]
y= db2006[db2006$day==i, ]
library(gstat)
#defaults to idw (gstat)
library(sp)
coordinates(x) = ~ x_stn_ITM + y_stn_ITM
coordinates(y) = ~ x_aod_ITM + y_aod_ITM
#location statment uneeded since we defined coordinates
inter = gstat(formula = PM25 ~ 1, data =x)
z<-predict(object = inter, newdata = y)
# head(z)
db2006$pred[db2006$day==i] = z$var1.pred
# spplot(z, "var1.pred", at = 0:100)
}
setnames(db2006,"pred","PM25_IDW")
#### ADD Hourly PM10
# PM10 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/AQUA_Hourly_data_May16/PM10_H.csv")
PM10 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/TERRA_Hourly_data_May16/PM10_H.csv")
PM10$date<-paste(PM10$Day,PM10$Month,PM10$Year,sep="/")
PM10[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
PM10[, c := as.numeric(format(day, "%Y")) ]
PM10[,c("Year","Month","Day","date"):=NULL]
PM10 <- PM10[X != 'NaN']
PM10<-PM10[!is.na(PM10)]
#clear non continous stations
setnames(PM10,"X","x_stn_ITM")
setnames(PM10,"Y","y_stn_ITM")
pm_10all2006<- PM10[c==2006]
# Join the closest PM10 value for each day
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = pm_10all2006,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = pm_10all2006,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "PM10",
knearest = 9, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,PM10,aodid)], all.x = T)
head(db2006)
summary(db2006$PM10)
setnames(db2006,"PM10","PM10_H_mean")
#### ADD Daily PM10
PM10 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Daily_Data/Daily_Data_Yuval/Pollution_stn_May16/PM10_D.csv")
PM10$date<-paste(PM10$Day,PM10$Month,PM10$Year,sep="/")
PM10[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
PM10[, c := as.numeric(format(day, "%Y")) ]
PM10[,c("Year","Month","Day","date"):=NULL]
PM10 <- PM10[X != 'NaN']
PM10<-PM10[!is.na(PM10)]
#clear non continous stations
setnames(PM10,"X","x_stn_ITM")
setnames(PM10,"Y","y_stn_ITM")
pm_10all2006<- PM10[c==2006]
# Join the closest PM10 value for each day
jointo.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinfrom.pt <- makepointsmatrix(datatable = pm_10all2006,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = db2006, joinfrom = pm_10all2006,
jointovarname = "aodid", joinfromvarname = "stn",
joinprefix = "nearest", valuefield = "PM10",
knearest = 9, maxdistance = 60000,
nearestmean = TRUE, verbose = T)
setkey(db2006,aodid,day)
setkey(joinout,aodid,day)
db2006 <- merge(db2006, joinout[,list(day,PM10,aodid)], all.x = T)
head(db2006)
summary(db2006$PM10)
setnames(db2006,"PM10","PM10_D_closest")
setnames(db2006,"PM10","PM10_D_mean")
## Add daily IDW PM10
PM10 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Daily_Data/Daily_Data_Yuval/Pollution_stn_May16/PM10_D.csv")
PM10$date<-paste(PM10$Day,PM10$Month,PM10$Year,sep="/")
PM10[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
PM10[, c := as.numeric(format(day, "%Y")) ]
PM10[,c("Year","Month","Day","date"):=NULL]
PM10 <- PM10[X != 'NaN']
PM10<-PM10[!is.na(PM10)]
#clear non continous stations
setnames(PM10,"X","x_stn_ITM")
setnames(PM10,"Y","y_stn_ITM")
pmall2006<- PM10[c==2006]
# calculate IDW for PM10
for(i in unique(db2006$day)) {
x<-pmall2006[pmall2006$day==i, ]
y= db2006[db2006$day==i, ]
library(gstat)
#defaults to idw (gstat)
library(sp)
coordinates(x) = ~ x_stn_ITM + y_stn_ITM
coordinates(y) = ~ x_aod_ITM + y_aod_ITM
#location statment uneeded since we defined coordinates
inter = gstat(formula = PM10 ~ 1, data =x)
z<-predict(object = inter, newdata = y)
# head(z)
db2006$pred[db2006$day==i] = z$var1.pred
# spplot(z, "var1.pred", at = 0:100)
}
setnames(db2006,"pred","PM10_IDW")
## Add ventilation coefficient variable (VC)
# db2006$vc_H= c(db2006$WS_H/(db2006$pbl_11*1000))
# db2006$vc_D= c(db2006$WS_D/(db2006$pbl_11*1000))
db2006$vc_H= c(db2006$WS_H/(db2006$pbl_08*1000))
db2006$vc_D= c(db2006$WS_D/(db2006$pbl_08*1000))
#take out uneeded
#save mod3
gc()
saveRDS(db2006,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod3/TR.MAIAC.2006.mod3.rds")
# x1db2006<- readRDS("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod3/TR.PM25.2006.mod3.rds")
# saveRDS(db2006,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod3/AQ.PM25.2006.mod3.rds")
# x1db2006<- readRDS("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod3/AQ.PM25.2006.mod3.rds")
x1db2006<- readRDS("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod3/TR.MAIAC.2006.mod3.rds")
#calculate weights
x1db2006[, m := as.numeric(format(day, "%m")) ]
x1db2006<-x1db2006[,obs:=1]
x1db2006[is.na(aod_055), obs:= 0]
ws.2006<-dplyr::select(x1db2006,obs,Elev,pbl_08,m,Temp_D,aodid,day)
#to save memory
gc()
w1 <- glm(obs ~ Elev+Temp_D+pbl_08+as.factor(m),family=binomial,data=ws.2006)
ws.2006$prob <- predict(w1 ,type = c("response"))
ws.2006$wt <- 1/ws.2006$prob
#ws.2006$normwt <- ws.2006$wt/mean(ws.2006$wt)
#tray scaled and compare
ws.2006$normwt <- scale(ws.2006$wt)
ws.2006[, c("prob", "wt","obs","Elev", "pbl_08" , "m","Temp_D" ) := NULL]
gc()
setkey(x1db2006,aodid,day)
setkey(ws.2006,aodid,day)
x1db2006 <- merge(x1db2006,ws.2006,all.x = T)
x1db2006[,c("m","obs"):=NULL]
saveRDS(x1db2006,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod3/TR.MAIAC.2006.mod3.rds")
# saveRDS(x1db2006,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod3/AQ.MAIAC.2006.mod3.rds")
# Create MOD2
#SPLIT the DATA
#create mod 2 file
db2006.m2 <- db2006[!is.na(aod_055)]
#rm db2006
rm(x1db2006)
gc()
#save mod2
saveRDS(db2006.m2,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod2/TR.MAIAC.2006.mod2.rds")
# saveRDS(db2006.m2,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod2/AQ.MAIAC.2006.mod2.rds")
gc()
####### building model 1 -MOD1
### Create daily PM2.5 mod1
# daily database
PM25 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Daily_Data/Daily_Data_Yuval/Pollution_stn_May16/PM25_D.csv")
PM25$date<-paste(PM25$Day,PM25$Month,PM25$Year,sep="/")
PM25[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
PM25[, c := as.numeric(format(day, "%Y")) ]
PM25[,c("Year","Month","Day","date"):=NULL]
PM25 <- PM25[X != 'NaN']
PM25<-PM25[!is.na(PM25)]
# Add field classification (General or transportation)
PM_Type <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/Joins/PM_monitors_classification/PM_monitors.csv")
setnames(PM_Type,"Code","stn")
PM_Type$stn=substr(PM_Type$stn, 2, 4)
PM25=dplyr::left_join(PM25,PM_Type,by="stn")
PM25=as.data.table(PM25)
PM25[,c("Name","Region","X.y","Y.y","Long","Lat","HASL","HAGL","Parameters"):=NULL]
PM25$stn_type<-0
PM25[Type=="'Gener'",stn_type:=1]
PM25[Type=="'Trans'",stn_type:=0]
PM25[Type=="'NaN'",stn_type:=2]
#clear non continous stations
pmall2006<- PM25[c==2006]
setnames(pmall2006,"X.x","x_stn_ITM")
setnames(pmall2006,"Y.x","y_stn_ITM")
# ADD AOD 055 to PM25 mod 1
jointo.pt <- makepointsmatrix(datatable = pmall2006,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinfrom.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = pmall2006 , joinfrom = db2006,
jointovarname = "stn", joinfromvarname = "aodid",
joinprefix = "nearest", valuefield = "aod_055",
knearest = 9, maxdistance = 1100,
nearestmean = TRUE, verbose = T)
setkey(pmall2006,stn,day)
setkey(joinout,stn,day)
PM25.m1 <- merge(pmall2006, joinout, all.x = T)
PM25.m1<-PM25.m1[!is.na(aod_055)]
setnames(PM25.m1,"nearestmean", "aod_055_mean")
PM25.m1[,nearestknn:=NULL]
PM25.m1[,nearestnobs:=NULL]
PM25.m1[,c.y:=NULL]
setnames(PM25.m1,"c.x", "year")
# ADD AOD 047
db2006_s=db2006[, c("aodid","x_aod_ITM","y_aod_ITM","aod_047","day"), with = FALSE]
jointo.pt <- makepointsmatrix(datatable = PM25.m1,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinfrom.pt <- makepointsmatrix(datatable = db2006_s,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = PM25.m1 , joinfrom = db2006_s,
jointovarname = "stn", joinfromvarname = "aodid",
joinprefix = "nearest", valuefield = "aod_047",
knearest = 9, maxdistance = 1100,
nearestmean = TRUE, verbose = T)
setkey(PM25.m1,stn,day)
setkey(joinout,stn,day)
PM25.m1<- merge(PM25.m1, joinout, all.x = T)
setnames(PM25.m1,"nearestmean", "aod_047_mean")
setnames(PM25.m1,"aod_047.x", "aod_047")
setnames(PM25.m1,"x_aod_ITM.x", "x_aod_ITM")
setnames(PM25.m1,"y_aod_ITM.x", "y_aod_ITM")
PM25.m1[,c("nearest.x","nearestknn","nearestnobs","x_aod_ITM.y", "y_aod_ITM.y", "aod_047.y","nearest.y"):=NULL]
# add variable that excludes aod observations higher than 2.5
# PM25.m1$filt_2.5=0
# PM25.m1<-PM25.m1[aod_055_mean>2.5, filt_2.5:= 1]
# # add variable that excludes aod observations higher than 2.5
# PM25.m1$filt_1.5=0
# PM25.m1<-PM25.m1[aod_055>1.5, filt_1.5:= 1]
# Join 200 m spatial variables
# add 200 m key field to the database
key_field=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/Joins/PM25_stn_200m_keytable_id/PM25_stn_200m_keytable_id.csv")
key_field=as.data.table(key_field)
key_field$Key200_id <- paste0(key_field$POINT_X,"-",key_field$POINT_Y)
setnames(key_field, "Code","stn")
key_field$stn=substr(key_field$stn,2,4)
key_field=key_field[,.(stn,Key200_id)]
setkey(PM25.m1,stn)
setkey(key_field,stn)
PM25.m1= merge(PM25.m1,key_field,all.x = T)
lu_200m=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Keytables/200m_grid/200m_grid_spatial_Data.csv")
setnames(lu_200m,"X_Y","Key200_id")
lu_200m$V1=NULL
colnames(lu_200m) <- paste(colnames(lu_200m),"200m", sep = "_")
setnames(lu_200m,"Key200_id_200m","Key200_id")
setkey(lu_200m,Key200_id)
setkey(PM25.m1,Key200_id)
PM25.m1 <- merge(PM25.m1, lu_200m, all.x = T)
PM25.m1[,c("X_ITM_200m","Y_ITM_200m","V1_200m"):=NULL]
setnames(PM25.m1,"aod_047.x" ,"aod_047")
PM25.m1_D=PM25.m1
# # delete hourly meteorological variables
PM25.m1_D[,c("Temp_H","WS_H" ,"RH_H","Rain_H","NO2_H" ,"SO2_H","PM25_D_closest","PM25_D_mean","PM25_IDW","PM10_D_closest","PM10_IDW","PM10_H_mean","vc_H","PM25_H_mean"):=NULL]
# Save RDS files
saveRDS(PM25.m1_D,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1/mod1.TR.2006.PM25_Daily.rds")
##### Create Hourly PM2.5 mod1
# hourly terra database
# PM25 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/TERRA_Hourly_data_May16/PM25_H.csv")
# hourly aqua database
PM25 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/AQUA_Hourly_data_May16/PM25_H.csv")
PM25$date<-paste(PM25$Day,PM25$Month,PM25$Year,sep="/")
PM25[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
PM25[, c := as.numeric(format(day, "%Y")) ]
PM25[,c("Year","Month","Day","date"):=NULL]
PM25 <- PM25[X != 'NaN']
PM25<-PM25[!is.na(PM25)]
# Add field classification (General or transportation)
PM_Type <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/Joins/PM_monitors_classification/PM_monitors.csv")
setnames(PM_Type,"Code","stn")
PM_Type$stn=substr(PM_Type$stn, 2, 4)
PM25=dplyr::left_join(PM25,PM_Type,by="stn")
PM25=as.data.table(PM25)
PM25[,c("Name","Region","X.y","Y.y","Long","Lat","HASL","HAGL","Parameters"):=NULL]
PM25$stn_type<-0
PM25[Type=="'Gener'",stn_type:=1]
PM25[Type=="'Trans'",stn_type:=0]
PM25[Type=="'NaN'",stn_type:=2]
#clear non continous stations
pmall2006<- PM25[c==2006]
setnames(pmall2006,"X.x","x_stn_ITM")
setnames(pmall2006,"Y.x","y_stn_ITM")
# ADD AOD 055 to PM25 mod 1
jointo.pt <- makepointsmatrix(datatable = pmall2006,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinfrom.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = pmall2006 , joinfrom = db2006,
jointovarname = "stn", joinfromvarname = "aodid",
joinprefix = "nearest", valuefield = "aod_055",
knearest = 9, maxdistance = 1100,
nearestmean = TRUE, verbose = T)
setkey(pmall2006,stn,day)
setkey(joinout,stn,day)
PM25.m1 <- merge(pmall2006, joinout, all.x = T)
PM25.m1<-PM25.m1[!is.na(aod_055)]
setnames(PM25.m1,"nearestmean", "aod_055_mean")
PM25.m1[,nearestknn:=NULL]
PM25.m1[,nearestnobs:=NULL]
PM25.m1[,c.y:=NULL]
setnames(PM25.m1,"c.x", "year")
# ADD AOD 047
db2006_s=db2006[, c("aodid","x_aod_ITM","y_aod_ITM","aod_047","day"), with = FALSE]
jointo.pt <- makepointsmatrix(datatable = PM25.m1,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinfrom.pt <- makepointsmatrix(datatable = db2006_s,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = PM25.m1 , joinfrom = db2006_s,
jointovarname = "stn", joinfromvarname = "aodid",
joinprefix = "nearest", valuefield = "aod_047",
knearest = 9, maxdistance = 1100,
nearestmean = TRUE, verbose = T)
setkey(PM25.m1,stn,day)
setkey(joinout,stn,day)
PM25.m1<- merge(PM25.m1, joinout, all.x = T)
setnames(PM25.m1,"nearestmean", "aod_047_mean")
setnames(PM25.m1,"aod_047.x", "aod_047")
setnames(PM25.m1,"x_aod_ITM.x", "x_aod_ITM")
setnames(PM25.m1,"y_aod_ITM.x", "y_aod_ITM")
PM25.m1[,c("nearest.x","nearestknn","nearestnobs","x_aod_ITM.y", "y_aod_ITM.y", "aod_047.y","nearest.y"):=NULL]
# Join 200 m spatial variables
# add 200 m key field to the database
key_field=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/Joins/PM25_stn_200m_keytable_id/PM25_stn_200m_keytable_id.csv")
key_field=as.data.table(key_field)
key_field$Key200_id <- paste0(key_field$POINT_X,"-",key_field$POINT_Y)
setnames(key_field, "Code","stn")
key_field$stn=substr(key_field$stn,2,4)
key_field=key_field[,.(stn,Key200_id)]
setkey(PM25.m1,stn)
setkey(key_field,stn)
PM25.m1= merge(PM25.m1,key_field,all.x = T)
lu_200m=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Keytables/200m_grid/200m_grid_spatial_Data.csv")
setnames(lu_200m,"X_Y","Key200_id")
lu_200m$V1=NULL
colnames(lu_200m) <- paste(colnames(lu_200m),"200m", sep = "_")
setnames(lu_200m,"Key200_id_200m","Key200_id")
setkey(lu_200m,Key200_id)
setkey(PM25.m1,Key200_id)
PM25.m1 <- merge(PM25.m1, lu_200m, all.x = T)
PM25.m1[,c("X_ITM_200m","Y_ITM_200m","V1_200m"):=NULL]
setnames(PM25.m1,"aod_047.x" ,"aod_047")
PM25.m1_H=PM25.m1
# delete daily meteorological variables
PM25.m1_H[,c("c.y","Temp_D","WS_D" ,"RH_D","Rain_D","NO2_D","SO2_D","PM25_D_closest","PM25_D_mean","PM25_IDW","PM10_D_closest","PM10_IDW","PM10_H_mean","vc_D"):=NULL ]
# Save RDS files
saveRDS(PM25.m1_H,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1/mod1.TR.2006.PM25_Hourly.rds")
# saveRDS(PM25.m1,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1/mod1.TR.2006.PM25_Daily.rds")
# Create mod 1 for PM10
### PM10 mod1 Daily
# daily database
# PM10 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Daily_Data/Daily_Data_Yuval/Pollution_stn_May16/PM10_D.csv")
PM10$date<-paste(PM10$Day,PM10$Month,PM10$Year,sep="/")
PM10[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
PM10[, c := as.numeric(format(day, "%Y")) ]
PM10[,c("Year","Month","Day","date"):=NULL]
PM10 <- PM10[X != 'NaN']
PM10<-PM10[!is.na(PM10)]
# Add field classification (General or transportation)
PM_Type <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/Joins/PM_monitors_classification/PM_monitors.csv")
setnames(PM_Type,"Code","stn")
PM_Type$stn=substr(PM_Type$stn, 2, 4)
PM10=left_join(PM10,PM_Type,by="stn")
PM10=as.data.table(PM10)
PM10[,c("Name","Region","X.y","Y.y","Long","Lat","HASL","HAGL","Parameters"):=NULL]
PM10$stn_type<-0
PM10[Type=="'Gener'",stn_type:=1]
PM10[Type=="'Trans'",stn_type:=0]
PM10[Type=="'NaN'",stn_type:=2]
#clear non continous stations
pmall2006<- PM10[c==2006]
setnames(pmall2006,"X.x","x_stn_ITM")
setnames(pmall2006,"Y.x","y_stn_ITM")
# ADD AOD 055 to MOD1
jointo.pt <- makepointsmatrix(datatable = pmall2006,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinfrom.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = pmall2006 , joinfrom = db2006,
jointovarname = "stn", joinfromvarname = "aodid",
joinprefix = "nearest", valuefield = "aod_055",
knearest = 9, maxdistance = 1100,
nearestmean = TRUE, verbose = T)
setkey(pmall2006,stn,day)
setkey(joinout,stn,day)
PM10.m1 <- merge(pmall2006, joinout, all.x = T)
PM10.m1<-PM10.m1[!is.na(aod_055)]
setnames(PM10.m1,"nearestmean", "aod_055_mean")
PM10.m1[,nearestknn:=NULL]
PM10.m1[,nearestnobs:=NULL]
PM10.m1[,c.y:=NULL]
setnames(PM10.m1,"c.x", "year")
# ADD AOD 047
db2006_s=db2006[, c("aodid","x_aod_ITM","y_aod_ITM","aod_047","day"), with = FALSE]
jointo.pt <- makepointsmatrix(datatable = PM10.m1,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinfrom.pt <- makepointsmatrix(datatable = db2006_s,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = PM10.m1 , joinfrom = db2006_s,
jointovarname = "stn", joinfromvarname = "aodid",
joinprefix = "nearest", valuefield = "aod_047",
knearest = 9, maxdistance = 1100,
nearestmean = TRUE, verbose = T)
setkey(PM10.m1,stn,day)
setkey(joinout,stn,day)
PM10.m1<- merge(PM10.m1, joinout, all.x = T)
setnames(PM10.m1,"nearestmean", "aod_047_mean")
setnames(PM10.m1,"aod_047.x", "aod_047")
PM10.m1[,c("nearest.x","nearestknn","nearestnobs","x_aod_ITM.y", "y_aod_ITM.y", "aod_047.y","nearest.y"):=NULL]
# Join 200 m spatial variables
# add 200 m key field to the database
key_field=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/Joins/PM25_stn_200m_keytable_id/PM25_stn_200m_keytable_id.csv")
key_field=as.data.table(key_field)
key_field$Key200_id <- paste0(key_field$POINT_X,"-",key_field$POINT_Y)
setnames(key_field, "Code","stn")
key_field$stn=substr(key_field$stn,2,4)
key_field=key_field[,.(stn,Key200_id)]
setkey(PM10.m1,stn)
setkey(key_field,stn)
PM10.m1= merge(PM10.m1,key_field,all.x = T)
lu_200m=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Keytables/200m_grid/200m_grid_spatial_Data.csv")
setnames(lu_200m,"X_Y","Key200_id")
lu_200m$V1=NULL
colnames(lu_200m) <- paste(colnames(lu_200m),"200m", sep = "_")
setnames(lu_200m,"Key200_id_200m","Key200_id")
setkey(lu_200m,Key200_id)
setkey(PM10.m1,Key200_id)
PM10.m1 <- merge(PM10.m1, lu_200m, all.x = T)
PM10.m1[,c("X_ITM_200m","Y_ITM_200m"):=NULL]
setnames(PM10.m1,"aod_047.x" ,"aod_047")
PM10.m1_D=PM10.m1
# delete unneeded variables from daily database
PM10.m1_D[,c("aod_047.x","aod_055","nearest.x","Temp_H","WS_H" ,"RH_H","Rain_H","NO2_H" ,"SO2_H","PM25_D_closest","PM25_D_mean","PM25_IDW","PM25_H_mean","PM10_H_mean","PM10_D_closest","PM10_IDW","vc_H","V1_200m","c.y","lon_200m","lat_200m","m"):=NULL]
setnames(PM10.m1_D,"c.x","c")
# Save RDS files
saveRDS(PM10.m1_D,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1/mod1.TR.2006.PM10_Daily.rds")
# saveRDS(PM10.m1_D,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1/mod1.TR.2006.PM10_Daily.rds")
### PM10 mod1 Hourly
# hourly terra database
# PM10 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/TERRA_Hourly_data_May16/PM10_H.csv")
# hourly aqua database
PM10 <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Meteorological_Data/Hourly_data/AQUA_Hourly_data_May16/PM10_H.csv")
PM10$date<-paste(PM10$Day,PM10$Month,PM10$Year,sep="/")
PM10[, day:=as.Date(strptime(date, "%d/%m/%Y"))]
PM10[, c := as.numeric(format(day, "%Y")) ]
PM10[,c("Year","Month","Day","date"):=NULL]
PM10 <- PM10[X != 'NaN']
PM10<-PM10[!is.na(PM10)]
# Add field classification (General or transportation)
PM_Type <- fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/Joins/PM_monitors_classification/PM_monitors.csv")
setnames(PM_Type,"Code","stn")
PM_Type$stn=substr(PM_Type$stn, 2, 4)
PM10=left_join(PM10,PM_Type,by="stn")
PM10=as.data.table(PM10)
PM10[,c("Name","Region","X.y","Y.y","Long","Lat","HASL","HAGL","Parameters"):=NULL]
PM10$stn_type<-0
PM10[Type=="'Gener'",stn_type:=1]
PM10[Type=="'Trans'",stn_type:=0]
PM10[Type=="'NaN'",stn_type:=2]
#clear non continous stations
pmall2006<- PM10[c==2006]
setnames(pmall2006,"X.x","x_stn_ITM")
setnames(pmall2006,"Y.x","y_stn_ITM")
#--------->mod1
#PM10
# ADD AOD 055
jointo.pt <- makepointsmatrix(datatable = pmall2006,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinfrom.pt <- makepointsmatrix(datatable = db2006,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = pmall2006 , joinfrom = db2006,
jointovarname = "stn", joinfromvarname = "aodid",
joinprefix = "nearest", valuefield = "aod_055",
knearest = 9, maxdistance = 1100,
nearestmean = TRUE, verbose = T)
setkey(pmall2006,stn,day)
setkey(joinout,stn,day)
PM10.m1 <- merge(pmall2006, joinout, all.x = T)
PM10.m1<-PM10.m1[!is.na(aod_055)]
setnames(PM10.m1,"nearestmean", "aod_055_mean")
PM10.m1[,nearestknn:=NULL]
PM10.m1[,nearestnobs:=NULL]
PM10.m1[,c.y:=NULL]
setnames(PM10.m1,"c.x", "year")
# ADD AOD 047
db2006_s=db2006[, c("aodid","x_aod_ITM","y_aod_ITM","aod_047","day"), with = FALSE]
jointo.pt <- makepointsmatrix(datatable = PM10.m1,
xvar = "x_stn_ITM", yvar = "y_stn_ITM", idvar = "stn")
joinfrom.pt <- makepointsmatrix(datatable = db2006_s,
xvar = "x_aod_ITM", yvar = "y_aod_ITM", idvar = "aodid")
joinout <- nearestbyday(jointo.pts = jointo.pt, joinfrom.pts = joinfrom.pt,
jointo = PM10.m1 , joinfrom = db2006_s,
jointovarname = "stn", joinfromvarname = "aodid",
joinprefix = "nearest", valuefield = "aod_047",
knearest = 9, maxdistance = 1100,
nearestmean = TRUE, verbose = T)
setkey(PM10.m1,stn,day)
setkey(joinout,stn,day)
PM10.m1<- merge(PM10.m1, joinout, all.x = T)
setnames(PM10.m1,"nearestmean", "aod_047_mean")
setnames(PM10.m1,"aod_047.x", "aod_047")
PM10.m1[,c("nearest.x","nearestknn","nearestnobs","x_aod_ITM.y", "y_aod_ITM.y", "aod_047.y","nearest.y"):=NULL]
# Join 200 m spatial variables
# add 200 m key field to the database
key_field=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Qgis/Joins/PM25_stn_200m_keytable_id/PM25_stn_200m_keytable_id.csv")
key_field=as.data.table(key_field)
key_field$Key200_id <- paste0(key_field$POINT_X,"-",key_field$POINT_Y)
setnames(key_field, "Code","stn")
key_field$stn=substr(key_field$stn,2,4)
key_field=key_field[,.(stn,Key200_id)]
setkey(PM10.m1,stn)
setkey(key_field,stn)
PM10.m1= merge(PM10.m1,key_field,all.x = T)
lu_200m=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/Keytables/200m_grid/200m_grid_spatial_Data.csv")
setnames(lu_200m,"X_Y","Key200_id")
lu_200m$V1=NULL
colnames(lu_200m) <- paste(colnames(lu_200m),"200m", sep = "_")
setnames(lu_200m,"Key200_id_200m","Key200_id")
setkey(lu_200m,Key200_id)
setkey(PM10.m1,Key200_id)
PM10.m1 <- merge(PM10.m1, lu_200m, all.x = T)
PM10.m1[,c("X_ITM_200m","Y_ITM_200m"):=NULL]
setnames(PM10.m1,"aod_047.x" ,"aod_047")
PM10.m1_H=PM10.m1
# delete unneeded variables from hourly database
PM10.m1_H[,c("aod_047.x","aod_055","nearest.x","Temp_D","WS_D" ,"RH_D","Rain_D","NO2_D","SO2_D","PM25_D_closest","PM25_D_mean","PM25_IDW","PM25_H_mean","PM10_H_mean","PM10_D_closest","PM10_IDW","vc_D")] =NULL
saveRDS(PM10.m1_H,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1/mod1.TR.2006.PM10_Hourly.rds")
# saveRDS(PM10.m1_H,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1/mod1.TR.2006.PM10_Hourly.rds")
# PM10.m1=readRDS("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1/mod1.TR.2006.PM10_Daily.rds")
# PM10.m1=readRDS("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1/mod1.TR.2006.PM10_Hourly.rds")
# ## Exploring mod 1
#
# summary(PM25.m1$aod_055_mean)
# PM25.m1<-filter(PM25.m1,aod_055_mean < 1.5)
# # basic correlation
# summary(lm(PM25.m1$PM25~PM25.m1$aod_047_mean))
# summary(lm(PM25.m1$PM25~PM25.m1$aod_055_mean))
# plot(PM25.m1$PM25~PM25.m1$aod_055_mean,type= "p",ylim=c(0,300),ylab="PM2.5",xlab="AOD",main="PM2.5 ~ AOD_055")
#
# # correlation after filtering
# PM25.m1<-filter(PM25.m1,UN >0 & UN <0.04)
# PM25.m1=filter(PM25.m1,aod_055_mean < 1.5)
# PM25.m1_fs=filter(PM25.m1,FS_BS !=0)
# summary(lm(PM25.m1_fs$PM25~PM25.m1_fs$aod_055_mean))
# plot(PM25.m1_fs$PM25~PM25.m1_fs$aod_055_mean,type= "p",ylim=c(0,300),ylab="PM2.5",xlab="AOD",main="PM2.5 ~ AOD_055_FS")
# plot(PM25.m1_fs$PM25~PM25.m1_fs$aod_055_mean,type= "p",ylim=c(0,300),ylab="PM2.5",xlab="AOD",main="PM2.5 ~ AOD_055_BS")
#
# # Correlation between PM~ AOD in each azimuth bin (steps of 10)
# R2=c()
# len=c()
# for (i in 1:10)
# {
# PM25.m1_fs=filter(PM25.m1,FS_BS == i)
# if (nrow(PM25.m1_fs)!=0)
# {
# a=round(summary(lm(PM25.m1_fs$PM25~PM25.m1_fs$aod_055_mean))$r.squared,2)
# R2=c(R2,a)
# b=nrow(PM25.m1_fs)
# len=c(len,b)
# }
# else
# {
# a=NA
# b=0
# R2=c(R2,a)
# len=c(len,b)
# }
# }
#
# print(R2)
# print(len)
# res=data.frame(R2,len)
# plot(R2)
# write.csv(res,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/results/pm_aod_sensetivity_to_BS/2006_sen.csv")
#
# # Correlation between PM~ AOD in each azimuth bin (steps of 30)
# R2=c()
# len=c()
# for (i in 1:4)
# {
# PM25.m1_fs=filter(PM25.m1,FS_BS_2 == i)
# if (nrow(PM25.m1_fs)!=0)
# {
# a=round(summary(lm(PM25.m1_fs$PM25~PM25.m1_fs$aod_055_mean))$r.squared,2)
# R2=c(R2,a)
# b=nrow(PM25.m1_fs)
# len=c(len,b)
# }
# else
# {
# a=NA
# b=0
# R2=c(R2,a)
# len=c(len,b)
# }
# }
#
# print(R2)
# print(len)
# res=data.frame(R2,len)
# plot(R2)
# write.csv(res,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/results/pm_aod_sensetivity_to_BS/2006_sen_2.csv")
#
# sen=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/results/pm_aod_sensetivity_to_BS/sens_all_years.csv",header = TRUE)
# sen=fread("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/results/pm_aod_sensetivity_to_BS/sens_all_years_2.csv",header = TRUE)
# plot(sen$`2010`, col="RED",ylim=c(0,0.6),xlab="azimuth_bin",ylab="R2",pch=16)
# points(sen$`2006`, col="BLUE",ylim=c(0,0.6),xlab="azimuth_bin",ylab="R2",pch=16)
# points(sen$`2012`, col="GRAY",ylim=c(0,0.6),xlab="azimuth_bin",ylab="R2",pch=16)
# points(sen$`2013`, col="BLACK",ylim=c(0,0.6),xlab="azimuth_bin",ylab="R2",pch=16)
# points(sen$`2006`, col="ORANGE",ylim=c(0,0.6),xlab="azimuth_bin",ylab="R2",pch=16)
# legend(3.5,0.6,c("2010","2006","2012","2013","2006"),pch=16,col=c("RED","BLUE","GRAY","BLACK","ORANGE"))
#
# ################### cleaning mod 1
# mod1=PM25.m1
# mod1=as.data.table(mod1)
# setnames(mod1,"PM25", "pm25")
# setnames(mod1,"aod_055_mean", "aod")
#
# #filter nasa
# mod1<-filter(mod1,UN >0 & UN <0.04)
#
# #massimos thresholds
# x<-select(mod1,aod,stn)
# x$c<-1
# x <- x %>%
# group_by (stn) %>%
# summarise(saod=sum(c))
# #merge back count
# setkey(x,stn)
# setkey(mod1,stn)
# mod1 <- merge(mod1,x, all.x = T)
#
# mod1$exobs<-0
# mod1<-mod1[aod < quantile(aod, c(.50)) & pm25 > quantile(pm25, c(.90)), exobs := 2]
# mod1<-mod1[aod > quantile(aod, c(.90)) & pm25 < quantile(pm25, c(.50)), exobs := 3]
# mod1<-mod1[aod > 1.8 , exobs := 4]
# mod1<-mod1[saod < 30 , exobs := 5]
#
# #take out bad exobs
# mod1<-filter(mod1,exobs==0)
# # saveRDS(mod1,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1_clean/mod1.AQ.2006.PM25.clean.rds")
# saveRDS(mod1,"/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1_clean/mod1.TR.2006.PM25.clean.rds")
#
#
# ### simple linear modeling
# summary(lm(mod1$pm25~mod1$aod))
# plot(mod1$pm25~mod1$aod,type= "p",ylim=c(0,300),ylab="PM2.5",xlab="AOD",main="PM2.5 ~ AOD_055_clean")
#
# ### Mixed effect modeling
# m1.formula <- as.formula(pm25~aod+(1+aod|day))
# m1_sc <- lmer(m1.formula,data=mod1)
# mod1$pred.m1 <- predict(m1_sc)
# #check fits of model
# print(summary(lm(pm25~pred.m1,data=mod1))$r.squared)
|
853329b61d398215bbcc8cc2a5148ddeda8af939 | 4178c7382aafec74fee5d49f12312ed0fe61eae3 | /fastai_tabular_learner.R | 160622320162dc89a5369648d6edcf3baa085a97 | [] | no_license | DSOTM-RSA/octagon | f16eb400d44c83369d29b0b605091b5522050987 | 91adf83c2a6453f4bfb1e2cb1976807460c60723 | refs/heads/main | 2023-03-15T20:47:37.887123 | 2021-03-04T22:08:47 | 2021-03-04T22:08:47 | 323,157,234 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,354 | r | fastai_tabular_learner.R | # TREE MBASED MODELS
library(tidyverse)
library(caret)
setwd("C:/Users/eoedd/Desktop/locstore/projects/_octagon")
df_caret = data.table::fread('2021-01-31_modelset_selected.csv',drop = c('V1','Method.x'))
# make a binary problem and remove zero variance predictors
df_caret %>% filter(Result.x=="win" | Result.x =="loss") %>%
select(-Fighter.x, -Opponent.x) -> df_caret_bin
# split into training and test data
inTraining <- createDataPartition(df_caret_bin$Result.x, p = .70, list = FALSE)
training <- df_caret_bin[ inTraining,]
testing <- df_caret_bin[-inTraining,]
# basic summary
str(df_caret_bin)
summary(df_caret_bin)
# pre-process for this model
preProcessSet <- preProcess(df_caret_bin,
method = c("center","scale","YeoJohnson","nzv"))
preProcessSet
# apply to data
trainTransformed <- predict(preProcessSet,training)
testTransformed <- predict(preProcessSet,testing)
# define training regime
plsCtrl <- trainControl(
method = "repeatedcv",
repeats = 5,
number=5,
classProbs = TRUE,
summaryFunction = twoClassSummary
)
# train model
set.seed(111)
plsFit <- train(Result.x~.,
data = trainTransformed,
method="pls",
tuneLength=5,
trControl = plsCtrl,
metric="ROC")
plsFit
# predict on training
plsClasses <- predict(plsFit, newdata = testTransformed)
plsProbs <- predict(plsFit, newdata = testTransformed, type = "prob")
head(plsProbs)
# check performance
confusionMatrix(data=plsClasses, as.factor(testTransformed$Result.x),positive = "win")
# gbm native
library(gbm)
set.seed(111)
training %>% mutate(FinishPrevious.x = as.factor(FinishPrevious.x),
FinishPrevious.y = as.factor(FinishPrevious.y),
Result = case_when(Result.x == "win" ~1, TRUE~0)) ->training
gbmFit<- gbm(Result~.,
data=training[,-1],
n.trees = 2000,
interaction.depth = 3,
shrinkage = 0.001,
cv.folds = 5,
n.cores = 3)
gbmFit
gbm.perf(gbmFit,method = "cv")
pretty.gbm.tree(gbmFit)
predict.gbm(gbmFit, newdata = testing[,-1], type = "response")
# Initialize Libraries
library(reticulate)
use_condaenv("pipenv")
library(tidyverse)
library(fastai)
# Read in src
df = data.table::fread('rawset.csv')
df %>% dplyr::mutate(Y = case_when(Outcome.x == 1 ~ "Win", TRUE ~ "Loss")) -> df
# URLs_ADULT_SAMPLE()
# df = data.table::fread('adult_sample/adult.csv')
# Define Dependent Var
dep_var = 'Y'
# Define Categorical Vars
cat_names = c('FinishCount.x','FinishCount.y',
'FinishedCount.x','FinishedCount.y',
'CountFights.x','CountFights.y',
'StreakLength.x','StreakLength.y',
'PreviousOutcome.x','PreviousOutcome.y',
'FinishPrevious.x','FinishPrevious.x')
# Define Continuous Vars
cont_names = c('WinRatio.x','WinRatio.y',
'FinishRatio.x','FinishRatio.y',
'FinishedRatio.x','FinishedRatio.y',
'DamageDiff.x','DamageDiff.y')
# Define Pre-processing
procs = list(FillMissing(),Categorify(),Normalize())
# Define Data Loader
dls = TabularDataTable(df, procs, cat_names, cont_names,
y_names = dep_var,splits = list(c(1:2000),c(2001:2786))) %>%
dataloaders(bs=64)
# Learn Model
model = dls %>% tabular_learner(layers=c(200,100), metrics=accuracy)
model %>% summary()
model %>% lr_find()
model %>% plot_lr_find(dpi = 200)
model %>% fit(15,lr = 10^-2)
model %>% plot_loss(dpi = 200)
# View Perf
model %>% get_confusion_matrix()
interp = ClassificationInterpretation_from_learner(model)
interp %>% plot_confusion_matrix(dpi = 90,figsize = c(6,6))
# Prediction
model %>% predict(df[255:265,])
model %>% predict(df[0:2786,]) %>% mutate(RC=row_number()) -> outcomes
outcomes %>% filter(., Win >0.9 & class == 1 | Loss > 0.9 & class == 0) ->outcomes_we
df %>% left_join(outcomes_we,by=c("V1"="RC")) -> df_fit
df_fit %>% na.omit() -> df_fit
df_fit %>% dplyr::mutate(Y = case_when(Outcome.x == 1 ~ "Win", TRUE ~ "Loss")) -> df_fit
####
write.csv(df_fit,"model_validation.csv")
|
dbdc5076de91630f077f963743ce94c2ace41a3b | 2045d6ea6e9f3faa4dd4d2bcc0c6542f0a8454e3 | /hydroinfomatics/data modeling/data_splitting.R | 9bea1528198fee285cf1fb7009a85af5c105ddf9 | [] | no_license | chrimerss/CourseMaterials | 7c1740d4460e5a23b0ab08bc49f3af5d2ec0ce33 | 43fa4187328409a48675e4cfa9c387c640af9009 | refs/heads/master | 2021-07-06T18:30:27.597444 | 2020-10-08T16:07:13 | 2020-10-08T16:07:13 | 192,747,645 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,617 | r | data_splitting.R | # data splitting
# Content below is from https://ragrawal.wordpress.com/2012/01/14/dividing-data-into-training-and-testing-dataset-in-r/
# During machine learning one often needs to divide the two different data sets, namely training and testing datasets.
# While you can't directly use the "sample" command in R, there is a simple workaround for this.
# Essentially, use the "sample" command to randomly select certain index number and then use the selected index numbers to divide the dataset into training and testing dataset.
# Below is the sample code for doing this. In the code below I use 20% of the data for testing and rest of the 80% for training.
# By default R comes with few datasets.
data = mtcars
dim(data) # 32 11
#Sample Indexes
indexes = sample(1:nrow(data), size=round(0.2*nrow(data)))
# Split data
test = data[indexes,]
dim(test) # 6 11
train = data[-indexes,]
dim(train) # 26 11
# Sometimes, we need to divide the data frames into three groups, trainning data, validation data, and testing data
# say, we need to divide the data into 0.6 trainning data, 0.2 validation data, 0.2 testing data
nr<-dim(data)[1]
# shuffle the data
my_data<-data[sample.int(nr),] # if we just want to select the first 60% as training data, the subsequent 20% and 20% as validation and testing data,respectively, then we do not need to do the shuffling.
train_index=round(nr*0.6); # training index
val_index=round(nr*(0.6+0.2)); # validation index
# the remaining is testing index
train_data<-my_data[1:train_index,]
val_data<-my_data[(train_index+1):val_index,]
test_data<-my_data[(val_index+1):nr,]
|
82f44624d7ddb42d15426322076dfa7c7fb19bf2 | f7e62d90fe45c48f379b3c575bc7dfcfad748b4f | /Moment Drive Trading Strategies/Head and Shoulder Patter.r | 134bd001a81172321035f98b4654de57cc12b266 | [] | no_license | KshitizSharmaV/Computational-Financial-Engineering-in-R | 3531afa4fe04bd43fa1e953b3b3317035e197b57 | 7ab537e861846d039d7e91b1a9662090bc903078 | refs/heads/master | 2021-04-27T10:24:47.422550 | 2018-07-09T15:16:27 | 2018-07-09T15:16:27 | 122,537,704 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,405 | r | Head and Shoulder Patter.r | # Author Kshitiz Sharma
# Permission is granted to use this code hence forth
# I don't assure any kind of dependency if the code generate error for you
# This code is foe head and shoulder patern only you need to tweak variable for inverse HS pattern
######################################################
# Head Shoulder Patterns
# Substitute over here your price put prices = your closing prices
# Prices is your time series for your closing prices
prices <- time.series
# Give your time series above
# Using SM Library to smooth the prices using Kernel Regression estimation
library(sm)
t = 1:length(prices)
# fit kernel regression with cross-validatio
# t/1.3 is for smaller bandwith, the smaller the badnwidth is more, number of maxims and minimas you will have
# Refer Here for more http://faculty.washington.edu/yenchic/17Sp_302/R12.pdf
h = h.select(t/1.3, prices, method = 'cv')
temp = sm.regression(t, prices, h=h, display = 'none')
# find estimated fit
mhat = approx(temp$eval.points, temp$estimate, t, method='linear')$prices
# second step is to find local extrema, tops and bottoms, using first
# derivative of the kernel regression estimator.
temp = diff(sign(diff(mhat)))
# loc - location of extrema, loc.dir - direction of extrema
loc = which( temp != 0 ) + 1
loc.dir = -sign(temp[(loc - 1)])
patterns.price = data.frame(E1=double(),E2=double(),E3=double(),E4=double(),E5=double())
patterns.position = data.frame(E1=double(),E2=double(),E3=double(),E4=double(),E5=double())
for(i in 1:(length(loc)-5)){
E1=prices[loc[i]]
E2=prices[loc[i+1]]
E3=prices[loc[i+2]]
E4=prices[loc[i+3]]
E5=prices[loc[i+4]]
t=0
# Check for opposite signs, and change temp if found
#for(j in 1:4){
# a=loc.dir[j+i]
# b=loc.dir[(j+1)+i]
## if((a*b)>0){
# temp=1
# }
#}
if(t==0){
avg.top = (E1 + E5) / 2
avg.bot = (E2 + E4) / 2
if(E3>E2 & E3 > E4){
if(E3>E1 & E3>E5){
print(E1)
print(E2)
print(E3)
print(E4)
print(E5)
print("asas")
if((abs(E1 - avg.top) < (1.5/100 * avg.top)) & (abs(E5 - avg.top) < (1.5/100 * avg.top))){
if((abs(E2 - avg.bot) < (1.5/100 * avg.bot)) & ((abs(E4 - avg.bot) < 1.5/100 * avg.bot))){
temp=data.frame(E1=E1,E2=E2,E3=E3,E4=E4,E5=E5)
patterns.price=rbind(patterns.price,temp)
temp=data.frame(E1=loc[i],E2=loc[i+1],E3=loc[i+2],E4=loc[i+3],E5=loc[i+4])
patterns.position=rbind(patterns.position,temp)
}
}
}
}
}
}
patterns.price
patterns.position
plot(1:length(prices),prices,type="l")
par(new=TRUE)
l=c(patterns.price[1,1],patterns.price[1,2],patterns.price[1,3],patterns.price[1,4],patterns.price[1,5])
f=c(patterns.position[1,1],patterns.position[1,2],patterns.position[1,3],patterns.position[1,4],patterns.position[1,5])
points(f,l,type="o",col="blue")
par(new=TRUE)
cutoff = (patterns.price[1,2]+patterns.price[1,4])/2
abline(h=cutoff,col="red")
par(new=TRUE)
patter_height = patterns.price[1,3] - cutoff
# The Pattern Height is 3.62
for(t in patterns.position[1,5]:length(prices)){
prices=prices[t]
if(prices<cutoff){
print("Enter the short Position")
price_shorted_at=prices[t]
break
}
}
target_price = price_shorted_at - patter_height
target_price
# The Target Price is 8.96
abline(h=target_price,col="blue")
|
fc20bc12ae6f8e83bf859bee3a331817bca5a213 | 085851f73e9d5a279d17bb080fb3b546acea0f2e | /app.R | 6c0a84e91a2cd8dbb3c2e3a7244e168b5a47c5c3 | [] | no_license | eunheelily/EnergyChoice | 2eb61f6e8df532460d97b92cdb9dbbb8eadc8e87 | 42feb3c2b3f8b4984d75ec02a244b8273ab63f46 | refs/heads/master | 2021-04-28T14:45:54.324188 | 2018-05-02T01:42:47 | 2018-05-02T01:42:47 | 121,974,518 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 37,779 | r | app.R |
library(shiny)
library(readr)
library(shinydashboard)
library(reshape2)
library(ggplot2)
library(bsplus)
# seperate normal distribution
# calculate the environmental benefits, health benenfits.
#reactive?
# Define UI for app that draws a bar graph ----
ui <- dashboardPage(
dashboardHeader(title="EV Incentive Program Toolkit"),
dashboardSidebar(
sidebarMenu(
menuItem("Model Overview", tabName = "tab_1"),
menuItem("User Guide", tabName = "tab_2"),
menuItem("Toolkit", tabName = "tab_3")
)),
dashboardBody(tabItems(
tabItem(tabName = "tab_1",
fluidPage(h3("Model Overview"),
box(width = 12, h4("Introduction"),p("This is a tool meant to help Community Choice Energy Agencies predict the costs and benefits associated with offering an incentive program to subsidize residents’ purchases of battery electric vehicles (BEVs) or plug-in hybrid electric vehicles (PHEVs). Based on incentive amount, total budget, and a variety of other program and agency specifications, the model predicts the number of vehicle purchases that will be directly caused by an incentive program, then calculates associated greenhouse gas (GHG) emissions reductions and health impacts.")
),
box(width = 12, h4("Using the Toolkit"),p("To use this model, at a minimum, users will need to enter values into the Primary Inputs section of the Toolkit tab. This section includes: Agency (Region), Total Incentives Budget, Year, Electric Vehicele (BEV) Incentive, Plug-in Hybrid (PHEV) Incentive, and Energy mix, and incentive amounts. There are a variety of additional inputs that allow users to users add further program specifications as appropriate. A detailed breakdown of all available input options is included in the User Guide tab.")
),
box(width = 12, h4("Results"), p("Once the user has filled in the appropriate inputs and run the model, results will be displayed on the right-hand side of the Toolkit tab. The main results show the predicted participation in the incentive program for EV and PHEV incentives. These results show the total number of predicted incentives redeemed, and the predicted number of sales directly caused by the incentive. The model then displays predicted health, greenhouse gas, and monetary impacts associated with the incentive program. ")
))) ,
tabItem(tabName = "tab_2",
fluidPage(h3("User Guide"),
box( width = 12, h4("Primary Inputs"),p("These inputs represent the minimum amount of information necessary to run the model. They are:"),
br(),tags$div(tags$ul(tags$li("Agency (Region): Which CCE Agency will be running the program. The model uses this) information to set the correct population level and predict local emissions impacts."),
tags$li("Total Incentives Budget: The total available budget for providing EV and PHEV incentives."),
tags$li("Year: The year that the incentives program will run."),
tags$li("Electric Vehicle (BEV) Incentive: The dollar amount that the agency will offer for each electric vehicle purchase."),
tags$li("Plug-in Hybrid (PHEV) Incentive: The dollar amount that the agency will offer for each plug-in hybrid purchase."),
tags$li("Energy mix: These values specify the composition of the energy mix that is used to charge electric vehicles."), style = "font-size: 13px"))), box( width = 12, h4("Incentive Details"),p("These allow agencies to add further details to their incentive offerings. These are included with
general default values that can be altered if necessary to match the agency’s needs."), br(),tags$div(tags$ul(tags$li("Include incentive for High end BEV and luxury PHEV: These are Yes/No inputs set at No by default. If switched to Yes, the model will include Tesla and luxury plug-in hybrid vehicles among those that receive their respective incentives."),
tags$li("Federal Tax Credit Availability/Clean Vehicle Rebate Project Availability: These are Yes/No inputs set at Yes by default. If switched to No the model will remove that credit or rebate from its calculations of vehicle cost."),
tags$li("Additional Discount EV/Plug-in: These inputs give the user the option to add additional discounts on the cost of BEVs or PHEVs. These are not included in the agency’s overall program costs and may represent discounts offered by vehicle dealers or manufacturers. They benefit the customer but are not costs incurred by the agency.")), style = "font-size: 13px")),
box( width = 12, h4("Program Details"), p("These allow the user to add details about their program, including administrative costs and program length. Defaults are provided based on the pilot incentive program that Sonoma Clean Power ran in 2016. Inputs include:"), br(), tags$div(tags$ul(tags$li("Program length: The number of months that the incentive program will run, with the default set at 12."), tags$li("Number of staff required: The number of full-time employees needed to run the program."), tags$li("Administrative costs per person: The salary and administrative costs per full time employee working on the program."), tags$li("Additional implementation costs: Any additional costs to run the program that the user anticipates. Defaults have been set based on the costs to run Sonoma Clean Power’s pilot EV program."),tags$li("Percent Net Revenue: This input allows the user to set the portion of electricity sales that goes to revenues with a default set at 10%."), tags$li("Marketing effectiveness: This input represents a way to account for the role of marketing on influencing program effectiveness. The user may input the percentage of eligible customers they expect will be aware of the program being offered. This percentage directly modifies the predicted number of rebates redeemed. Because this only modifies the number of people aware of available discounts, it does not take into account marketing that changes the likelihood of customers taking advantage of the discounts (i.e. marketing that is more or less persuasive).", footer = NULL, status = NULL,solidHeader = FALSE, background = NULL, height = NULL, collapsible = FALSE, collapsed = FALSE)), style = "font-size: 13px")
))),
tabItem(tabName ="tab_3",
fluidPage(
titlePanel("To get results, click Calculate button and wait"),
sidebarLayout(
sidebarPanel(
selectInput(inputId="Agency", "Agency (region)",
choices = list("Apple Valley" = "Apple Valley", "San Francisco" = "San Francisco", "Lancaster" = "Lancaster", "MCE" ="MCE", "Peninsula"="Peninsula", "Redwood Coast"="Redwood Coast", "Silicon Valley"="Silicon Valley", "Sonoma"="Sonoma"), selected = "Sonoma")%>%
shinyInput_label_embed(
shiny_iconlink() %>%
bs_embed_tooltip(
title = "Which CCE Agency will be running the program. The model uses this information to set the correct population level and predict local emissions impacts.",placement = "right")),
numericInput(inputId ="Budget", "Total Incentive Budget ($)",
value = 1500000)%>%
shinyInput_label_embed(
shiny_iconlink() %>%
bs_embed_tooltip(
title = "The total available budget for providing EV and PHEV incentives.",placement = "right")),
selectInput(inputId="Year","Year from 2016 to 2030",
choices = list(2016,2017,2018,2019,2020,2021,2022,2023, 2024,2025,2026,2027,2028,2029,2030), selected = 2017)%>%
shinyInput_label_embed(
shiny_iconlink() %>%
bs_embed_tooltip(
title = "The year that the incentives program will run.",placement = "right")),
numericInput(inputId ="EV_rebate","Electric Vehicle (BEV) Incentive", value = 2000)%>%
shinyInput_label_embed(
shiny_iconlink() %>%
bs_embed_tooltip(
title = "The dollar amount that the agency will offer for each electric vehicle purchase.",placement = "right")),
numericInput(inputId ="PHEV_rebate", "Electric Vehicle (PHEV) Incentive", value = 1000)%>%
shinyInput_label_embed(
shiny_iconlink() %>%
bs_embed_tooltip(
title = "The dollar amount that the agency will offer for each plug-in hybrid purchase.",placement = "right")),
numericInput(inputId ="Energymix1", "Energy Mix - Coal (%)",
value = 0),
numericInput(inputId ="Energymix2", "Energy Mix - Natural Gas (%)",
value = 0),
numericInput(inputId ="Energymix3","Energy Mix - Geothermal (%)",
value = 8),
numericInput(inputId ="Energymix4","Energy Mix - Petroleum (%)",
value = 0),
numericInput(inputId ="Energymix5","Energy Mix - Large Hydro (%)",
value = 49),
numericInput(inputId ="Energymix6","Energy Mix - Biomass (%)",
value = 0),
numericInput(inputId ="Energymix7", "Energy Mix - Biogas (%)",
value = 0),
numericInput(inputId ="Energymix8", "Energy Mix - Eligible Renewable (%)",
value = 33),
numericInput(inputId ="Energymix9","Energy Mix - Nuclear (%)",
value = 0),
numericInput(inputId ="Energymix10", "Energy Mix - Other (%)",
value = 10),
selectInput(inputId ="Lux_BEV", "Include incentive for High-end BEV (e.g. Tesla)", choices = list("Yes"=1, "No"=2), selected = 2),
selectInput(inputId ="Lux_PHEV", "Include incentive for Luxury PHEV (e.g. Audi A3 e-tron)", choices = list("Yes"=1, "No"=2), selected = 2),
selectInput(inputId ="Fed", "Federal Tax Credit Availability", choices = list("Yes"=1, "No"=2), selected = 1),
selectInput(inputId ="CVRP", "Clean Vehicle Rebate Project (CVRP) Availability", choices = list("Yes"=1, "No"=2), selected = 1),
numericInput(inputId ="Discount_EV","Additional discount BEV (e.g. dealer discount)",
value = 0),
numericInput(inputId ="Discount_PHEV", "Additional discount PHEV (e.g. dealer discount)",
value = 0),
numericInput(inputId ="Length", "Program Length (month)",
value = 4),
numericInput(inputId ="Staff", "Number of staff reqired",
value = 5),
numericInput(inputId ="Admincost", "Administrative Cost ($/person/year)",
value = 124000),
numericInput(inputId ="Impcost", "Additional Implementation Costs ($)",
value = 80000),
sliderInput(inputId ="Profit", "Profit portion (%)",
min = 0, max = 100, value = 10),
sliderInput(inputId ="Marketing", "Marketing Effectiveness (%)",
min = 0, max = 100, value = 50),
numericInput(inputId ="Gas", "California Average Gasoline Price ($/gallon)",
value = 2.78),
numericInput(inputId ="Elec", "California Average Electricity Rate ($/kwh)",
value = 0.19),
numericInput(inputId ="Rebound", "Rebound Effect (%)", value = 3),
numericInput(inputId ="Trans", "Transmission Losses (%)", value = 5),
numericInput(inputId ="Discount", "Discount rate (%)", value = 5),
numericInput(inputId ="carbon_p", "Carbon Value (dollar per ton CO2e)", value = 13),
selectInput(inputId="Impact", "Value of Health Impact Estimates", choices = list("Low","Mid","High"), selected = "High")),
mainPanel(fluidRow(actionButton("go", "Calculate"),br(),br(),
column(12, box(h4("The Estimated Number of Sales"), tableOutput("table1"), height = 150, width = 350)),
column(12, box(plotOutput("plot1"), height = 420, width = 350)),
column(12, box(h4("Total Benefits and Costs"),tableOutput("table2"), height = 370, width = 350)),
column(12, box(plotOutput("plot2"), height = 420, width = 350)))
)))
))))
server <- function(input, output) {
TCM <- eventReactive(input$go, {
Data <- read_csv("Database.csv")
Market_Share_Simple <- read_csv("Market_Share_Simple.csv")
Cost_data <- read_csv("Cost.csv")
Projection <- read_csv("Price_Projection.csv")
N = 1000 # Number of simulation
N_car_model = nrow(Data)# count number of car models
Need_col = N_car_model+1
ln_Mean_VMT <- 8.87192821723217 # Mean value of Ln(VTM). THis is because the VTM distribution is lognormal distribution
ln_SD_VMT <- 1.09899648130874 # Standard deviation of Ln(VTM)
VMT <- exp(rnorm(n=N,mean=ln_Mean_VMT, sd=ln_SD_VMT)) # Calulate VTM from normally distributed ln(VMT)
VMT[VMT>150000]=150000 # VMT that is larger than 150,000 is 150,000
# these are the values we use and subject to change
discount = 0.2
years_own = 7
Gas_price <- input$Gas
Elec_price <- input$Elec
Phybrid_Gas_Percentage = 0.6
Uncertainty = 0.3
year <- input$Year
EV_rebate <- input$EV_rebate
PHEV_rebate <- input$PHEV_rebate
Discount_PHEV <- input$Discount_PHEV
Discount_EV <- input$Discount_EV
Agency <- input$Agency
Length <- input$Length
Fed <- input$Fed
CVRP <- input$CVRP
Marketing <- input$Marketing/100
Budget <- input$Budget
Lux_BEV_rebate <- input$Lux_BEV
Lux_PHEV_rebate <- input$Lux_PHEV
lease <- 0.4
# change the total incentive depending on the availability.
Cost_data$incen <- rep(0, nrow=Cost_data)
for (i in 72:83){
Cost_data$incen[i] <- ifelse(Fed == 1, Cost_data$Incentive[i],0)
Cost_data$incen[i] <- ifelse(CVRP == 1,Cost_data$Incentive[i]+1500,Cost_data$Incentive[i])}
for (i in 84:95){
Cost_data$incen[i] <- ifelse(Fed == 1, Cost_data$Incentive[i],0)
Cost_data$incen[i] <- ifelse(CVRP == 1,Cost_data$Incentive[i]+2500,Cost_data$Incentive[i])}
# Calculate the new PHEV and EV price based on year and subtract the incentive
Cost_data$Year_Pur_Cost[72:76]<-Cost_data$Base_Pur_Cost[72:76]*(1-Projection$PHEV[match(year,Projection$Year)])-Cost_data$incen[72:76]
Cost_data$Year_Pur_Cost[77:83]<-Cost_data$Base_Pur_Cost[77:83]*(1-Projection$PHEV[match(year,Projection$Year)]*0.68)-Cost_data$incen[77:83]
Cost_data$Year_Pur_Cost[84:91] <- Cost_data$Base_Pur_Cost[84:91]*(1-Projection$EV[match(year,Projection$Year)])-Cost_data$incen[84:91]
Cost_data$Year_Pur_Cost[92:93] <- Cost_data$Base_Pur_Cost[92:93]*(1-Projection$EV[match(year,Projection$Year)]*0.681)-Cost_data$incen[92:93]
Cost_data$Year_Pur_Cost[94:95] <- Cost_data$Base_Pur_Cost[94:95]*(1-Projection$EV[match(year,Projection$Year)]*0.96)-Cost_data$incen[94:95]
# Calculate the total purchase price - incentive + owndership cost
Cost_data[,8] <- Cost_data[,4]+Cost_data[,5]
# Generate the data sample based on the proportion of each vehicle market share.
Cost_matrix <- matrix(rep(0,N*30), nrow=N, ncol=30)
Cost_matrix[,1] <- as.numeric(sample(as.character(unlist(Cost_data[1:5,8])),N, prob=as.character(unlist(Cost_data[1:5,6])),replace=TRUE))
Cost_matrix[,2] <- as.numeric(sample(as.character(unlist(Cost_data[6:10,8])),N, prob=as.character(unlist(Cost_data[6:10,6])),replace=TRUE))
Cost_matrix[,3] <- as.numeric(sample(as.character(unlist(Cost_data[11:15,8])),N, prob=as.character(unlist(Cost_data[11:15,6])),replace=TRUE))
Cost_matrix[,4] <- as.numeric(sample(as.character(unlist(Cost_data[16:20,8])),N, prob=as.character(unlist(Cost_data[16:20,6])),replace=TRUE))
Cost_matrix[,5] <- as.numeric(sample(as.character(unlist(Cost_data[21:25,8])),N, prob=as.character(unlist(Cost_data[21:25,6])),replace=TRUE))
Cost_matrix[,6] <-as.numeric(sample(as.character(unlist(Cost_data[26:29,8])),N, prob=as.character(unlist(Cost_data[26:29,6])),replace=TRUE))
Cost_matrix[,7] <- as.numeric(sample(as.character(unlist(Cost_data[30:34,8])),N, prob=as.character(unlist(Cost_data[30:34,6])),replace=TRUE))
Cost_matrix[,8] <- as.numeric(sample(as.character(unlist(Cost_data[35:39,8])),N, prob=as.character(unlist(Cost_data[35:39,6])),replace=TRUE))
Cost_matrix[,9] <- as.numeric(sample(as.character(unlist(Cost_data[40:44,8])),N, prob=as.character(unlist(Cost_data[40:44,6])),replace=TRUE))
Cost_matrix[,10] <- as.numeric(sample(as.character(unlist(Cost_data[45:49,8])),N, prob=as.character(unlist(Cost_data[45:49,6])),replace=TRUE))
Cost_matrix[,11] <- as.numeric(sample(as.character(unlist(Cost_data[50:54,8])),N, prob=as.character(unlist(Cost_data[50:54,6])),replace=TRUE))
Cost_matrix[,12] <- as.numeric(sample(as.character(unlist(Cost_data[55,8])),N, prob=as.character(unlist(Cost_data[55,6])),replace=TRUE))
Cost_matrix[,13] <- as.numeric(sample(as.character(unlist(Cost_data[56:58,8])),N, prob=as.character(unlist(Cost_data[56:58,6])),replace=TRUE))
Cost_matrix[,14] <- as.numeric(sample(as.character(unlist(Cost_data[59:63,8])),N, prob=as.character(unlist(Cost_data[59:63,6])),replace=TRUE))
Cost_matrix[,15] <- as.numeric(sample(as.character(unlist(Cost_data[64,8])),N, prob=as.character(unlist(Cost_data[64,6])),replace=TRUE))
Cost_matrix[,16] <- as.numeric(sample(as.character(unlist(Cost_data[65:67,8])),N, prob=as.character(unlist(Cost_data[65:67,6])),replace=TRUE))
Cost_matrix[,17] <- as.numeric(sample(as.character(unlist(Cost_data[68,8])),N, prob=as.character(unlist(Cost_data[68,6])),replace=TRUE))
Cost_matrix[,18] <- as.numeric(sample(as.character(unlist(Cost_data[69,8])),N, prob=as.character(unlist(Cost_data[69,6])),replace=TRUE))
Cost_matrix[,19] <- as.numeric(sample(as.character(unlist(Cost_data[70:71,8])),N, prob=as.character(unlist(Cost_data[70:71,6])),replace=TRUE))
Cost_matrix[,20] <- as.numeric(sample(as.character(unlist(Cost_data[72:73,8])),N, prob=as.character(unlist(Cost_data[72:73,6])),replace=TRUE))
Cost_matrix[,21] <- as.numeric(sample(as.character(unlist(Cost_data[74:76,8])),N, prob=as.character(unlist(Cost_data[74:76,6])),replace=TRUE))
Cost_matrix[,22] <- as.numeric(sample(as.character(unlist(Cost_data[77,8])),N, prob=as.character(unlist(Cost_data[77,6])),replace=TRUE))
Cost_matrix[,23] <- as.numeric(sample(as.character(unlist(Cost_data[78:79,8])),N, prob=as.character(unlist(Cost_data[78:79,6])),replace=TRUE))
Cost_matrix[,24] <- as.numeric(sample(as.character(unlist(Cost_data[80,8])),N, prob=as.character(unlist(Cost_data[80,6])),replace=TRUE))
Cost_matrix[,25] <- as.numeric(sample(as.character(unlist(Cost_data[81:83,8])),N, prob=as.character(unlist(Cost_data[81:83,6])),replace=TRUE))
Cost_matrix[,26] <- as.numeric(sample(as.character(unlist(Cost_data[84:87,8])),N, prob=as.character(unlist(Cost_data[84:87,6])),replace=TRUE))
Cost_matrix[,27] <- as.numeric(sample(as.character(unlist(Cost_data[88:91,8])),N, prob=as.character(unlist(Cost_data[88:91,6])),replace=TRUE))
Cost_matrix[,28] <- as.numeric(sample(as.character(unlist(Cost_data[92:93,8])),N, prob=as.character(unlist(Cost_data[92:93,6])),replace=TRUE))
Cost_matrix[,29] <- as.numeric(sample(as.character(unlist(Cost_data[94,8])),N, prob=as.character(unlist(Cost_data[94,6])),replace=TRUE))
Cost_matrix[,30] <- as.numeric(sample(as.character(unlist(Cost_data[95,8])),N, prob=as.character(unlist(Cost_data[95,6])),replace=TRUE))
# make a mtrix to generate normally distributed delta
Delta_matrix <- matrix(rep(NA,N*N_car_model),nrow=N, ncol=N_car_model)
for (j in 1:N_car_model){
Delta_matrix[,j] <- rnorm(n=N,mean=Data$Delta[j],sd=Data$Delta[j]*Uncertainty)
}
# Make a matrix for Total life cycle costs by each segment.
TotalCost <- matrix(rep(NA,N*N_car_model),nrow=N, ncol=N_car_model)
N_ICEV <- sum(Data$Fuel_Type=="ICEV")
N_Hy <- sum(Data$Fuel_Type=="Hybrid")
N_PHy <- 2
N_PHy_Lux <- 4
N_EV <- 3
N_EV_Lux <- 2
# the "for" functions below are to calculate total costs by each segment.
for (i in 1:N){
for (j in 1:(N_ICEV+N_Hy)){
TotalCost[i,j] <- Data$Oper[j]*VMT[i]/15000+VMT[i]*Data$Fuel_gas[j]*Gas_price/discount*(1-1/(1+discount)^years_own)-Delta_matrix[i,j]+Cost_matrix[i,j]
}
for (j in (1+N_ICEV+N_Hy):(N_ICEV+N_Hy+N_PHy)){
TotalCost[i,j] <- Data$Oper[j]*VMT[i]/15000+VMT[i]*Data$Fuel_gas[j]*Gas_price*Phybrid_Gas_Percentage/discount*(1-1/(1+discount)^years_own)+VMT[i]*Data$Fuel_Elec[j]*Elec_price*(1-Phybrid_Gas_Percentage)/discount*(1-1/(1+discount)^years_own)-Delta_matrix[i,j]+Cost_matrix[i,j]
}
for (j in (1+N_ICEV+N_Hy+N_PHy):(N_ICEV+N_Hy+N_PHy_Lux)){
TotalCost[i,j] <- Data$Oper[j]*VMT[i]/15000+VMT[i]*Data$Fuel_gas[j]*Gas_price*Phybrid_Gas_Percentage/discount*(1-1/(1+discount)^years_own)+VMT[i]*Data$Fuel_Elec[j]*Elec_price*(1-Phybrid_Gas_Percentage)/discount*(1-1/(1+discount)^years_own)-Delta_matrix[i,j]+Cost_matrix[i,j]
}
for (j in (1+N_ICEV+N_Hy+N_PHy+N_PHy_Lux):(N_ICEV+N_Hy+N_PHy+N_PHy_Lux+N_EV)){
TotalCost[i,j] <- Data$Oper[j]*VMT[i]/15000+VMT[i]*Data$Fuel_Elec[j]*Elec_price/discount*(1-1/(1+discount)^years_own)-Delta_matrix[i,j]+Cost_matrix[i,j]
}
for (j in (1+N_ICEV+N_Hy+N_PHy+N_PHy_Lux+N_EV):(N_ICEV+N_Hy+N_PHy+N_PHy_Lux+N_EV+N_EV_Lux)){
TotalCost[i,j] <- Data$Oper[j]*VMT[i]/15000+VMT[i]*Data$Fuel_Elec[j]*Elec_price/discount*(1-1/(1+discount)^years_own)-Delta_matrix[i,j]+Cost_matrix[i,j]
}
}
# make matrix to choose the minimum cost option.
Decision_Matrix <- matrix(rep(NA,N*N_car_model),nrow=N, ncol=N_car_model)
# if the segment is the lowest cost, put 1, otherwise, put "0"
for (i in 1:N) {
for (j in 1:N_car_model){
Decision_Matrix[i,j] = ifelse(as.numeric(j)==as.numeric(which.min(TotalCost[i,1:N_car_model])),as.numeric(1),as.numeric(0))}}
# Calculate Baseline market share
Marketshare <-matrix(rep(NA,N_car_model),nrow=1, ncol=N_car_model)
for (j in 1:N_car_model){
Marketshare[,j]=sum(Decision_Matrix[1:N,j],na.rm=TRUE)
}
Marketshare_Table <- Market_Share_Simple
Marketshare_Table[1:2,] <- Market_Share_Simple
Marketshare_Table[3,] <- Marketshare[1,]
Marketshare_Table[4,] <- Marketshare_Table[3,]/Marketshare_Table[1,]
Marketshare_Table[5,] <- Marketshare_Table[4,]*0.853628632417563/sum(Marketshare_Table[4,])
colnames(Marketshare_Table) <- colnames(Market_Share_Simple)
rownames(Marketshare_Table) <- c("Propotion of sales from these models","Real Market Share","Counts from TCM","Recalculated Counts","Estimated Market Share")
###########################################################################
PHEV_rebate_Lux <-ifelse(Lux_PHEV_rebate==1, PHEV_rebate, 0)
Discount_PHEV_Lux<-ifelse(Lux_PHEV_rebate==1, Discount_PHEV, 0)
EV_rebate_Lux<-ifelse(Lux_BEV_rebate==1, EV_rebate, 0)
Discount_EV_Lux<-ifelse(Lux_BEV_rebate==1, Discount_EV, 0)
# Calculate the total life cycle costs but with rebates
TotalCost2 <- matrix(rep(NA,N*N_car_model),nrow=N, ncol=N_car_model)
for (i in 1:N){
for (j in 1:(N_ICEV+N_Hy)){
TotalCost2[i,j] <- Data$Oper[j]*VMT[i]/15000+VMT[i]*Data$Fuel_gas[j]*Gas_price/discount*(1-1/(1+discount)^years_own)-Delta_matrix[i,j]+Cost_matrix[i,j]
}
for (j in (1+N_ICEV+N_Hy):(N_ICEV+N_Hy+N_PHy)){
TotalCost2[i,j] <- Data$Oper[j]*VMT[i]/15000+VMT[i]*Data$Fuel_gas[j]*Gas_price*Phybrid_Gas_Percentage/discount*(1-1/(1+discount)^years_own)+VMT[i]*Data$Fuel_Elec[j]*Elec_price*(1-Phybrid_Gas_Percentage)/discount*(1-1/(1+discount)^years_own)-Delta_matrix[i,j]+Cost_matrix[i,j]-PHEV_rebate-Discount_PHEV
}
for (j in (1+N_ICEV+N_Hy+N_PHy):(N_ICEV+N_Hy+N_PHy_Lux)){
TotalCost2[i,j] <- Data$Oper[j]*VMT[i]/15000+VMT[i]*Data$Fuel_gas[j]*Gas_price*Phybrid_Gas_Percentage/discount*(1-1/(1+discount)^years_own)+VMT[i]*Data$Fuel_Elec[j]*Elec_price*(1-Phybrid_Gas_Percentage)/discount*(1-1/(1+discount)^years_own)-Delta_matrix[i,j]+Cost_matrix[i,j]-PHEV_rebate_Lux-Discount_PHEV_Lux
}
for (j in (1+N_ICEV+N_Hy+N_PHy+N_PHy_Lux):(N_ICEV+N_Hy+N_PHy+N_PHy_Lux+N_EV)){
TotalCost2[i,j] <- Data$Oper[j]*VMT[i]/15000+VMT[i]*Data$Fuel_Elec[j]*Elec_price/discount*(1-1/(1+discount)^years_own)-Delta_matrix[i,j]+Cost_matrix[i,j]-EV_rebate-Discount_EV
}
for (j in (1+N_ICEV+N_Hy+N_PHy+N_PHy_Lux+N_EV):(N_ICEV+N_Hy+N_PHy+N_PHy_Lux+N_EV+N_EV_Lux)){
TotalCost2[i,j] <- Data$Oper[j]*VMT[i]/15000+VMT[i]*Data$Fuel_Elec[j]*Elec_price/discount*(1-1/(1+discount)^years_own)-Delta_matrix[i,j]+Cost_matrix[i,j]-EV_rebate_Lux-Discount_EV_Lux
}
}
# Make a matrix and choose the minimum cost option.
Decision_Matrix2 <- matrix(rep(NA,N*N_car_model),nrow=N, ncol=N_car_model)
for (i in 1:N) {
for (j in 1:N_car_model){
Decision_Matrix2[i,j] = ifelse(as.numeric(j)==as.numeric(which.min(TotalCost2[i,1:N_car_model])),as.numeric(1),as.numeric(0))}}
# Calculate predicted market share with rebates
Marketshare2 <-matrix(rep(NA,N_car_model),nrow=1, ncol=N_car_model)
for (j in 1:N_car_model){
Marketshare2[,j]=sum(Decision_Matrix2[1:N,j],na.rm=TRUE)
}
Marketshare_Table2 <- Market_Share_Simple
Marketshare_Table2[1:2,] <- Market_Share_Simple
Marketshare_Table2[3,] <- Marketshare2[1,]
Marketshare_Table2[4,] <- Marketshare_Table2[3,]/Marketshare_Table2[1,]
Marketshare_Table2[5,] <- Marketshare_Table2[4,]*0.853628632417563/sum(Marketshare_Table2[4,])
colnames(Marketshare_Table2) <- colnames(Market_Share_Simple)
rownames(Marketshare_Table2) <- c("Propotion of sales from these models","Real Market Share","Counts from TCM","Recalculated Counts","Estimated Market Share")
# Calculate how many number of vehicle would be purchased in the location.
Autosale = 2086966
CA_pop = 39250017
Agency_Pop <- ifelse(Agency== "Apple Valley",72553,ifelse(Agency=="San Francisco",870887, ifelse(Agency=="Lancaster", 160106, ifelse(Agency=="MCE",1537944,ifelse(Agency=="Peninsula",764797,ifelse(Agency=="Redwood Coast",136646,ifelse(Agency=="Silicon Valley",1919402,ifelse(Agency=="Sonoma",590698,0))))))))
P_sales <-Autosale*Agency_Pop/CA_pop/12*Length*Marketing
# Calculate the maximum number of vehicles
if (Lux_BEV_rebate == 1){
Base_EV <- sum(Marketshare_Table[5,26:30])
Predict_EV <- sum(Marketshare_Table2[5,26:30])
} else {
Base_EV <- sum(Marketshare_Table[5,26:28])
Predict_EV <- sum(Marketshare_Table2[5,26:28])
}
if (Lux_PHEV_rebate == 1){
Base_PHEV <- sum(Marketshare_Table[5,20:25])
Predict_PHEV <- sum(Marketshare_Table2[5,20:25])
} else {
Base_PHEV <- sum(Marketshare_Table[5,20:21])
Predict_PHEV <- sum(Marketshare_Table2[5,20:21])
}
Prob_demand_EV <- ifelse((PHEV_rebate==0)&(Discount_PHEV==0), Prob_demand_EV <- 1, Prob_demand_EV <- Predict_EV/(Predict_EV+Predict_PHEV))
Prob_demand_PHEV <- 1-Prob_demand_EV
max_total <- Budget/(EV_rebate*Prob_demand_EV+PHEV_rebate*Prob_demand_PHEV)
max_EV <- Prob_demand_EV*max_total
max_PHEV <- Prob_demand_PHEV*max_total
Final_EV <- ifelse((Predict_EV*P_sales*(1+(lease)/(1-lease)))>max_EV, max_EV, Predict_EV*P_sales*(1+(lease)/(1-lease)))
Final_PHEV <- ifelse((Predict_PHEV*P_sales)>max_PHEV, max_PHEV, Predict_PHEV*P_sales)
# Present the estimated results in the table.
FinalTable <- matrix(c(Final_EV,Final_PHEV,ifelse((Predict_EV-Base_EV)/Predict_EV*Final_EV>0,(Predict_EV-Base_EV)/Predict_EV*Final_EV, 0),ifelse((Predict_PHEV-Base_PHEV)/Predict_PHEV*Final_PHEV>0,(Predict_PHEV-Base_PHEV)/Predict_PHEV*Final_PHEV,0)), nrow=2, ncol=2)
colnames(FinalTable)<-c("Total Sales","Sales caused by incetive")
rownames(FinalTable)<-c("EV","PHEV")
print(FinalTable)
})
BC <- reactive({
TCM <- TCM()
Aveg_VTM <- 11244
Lifetime <- 15
agency <-input$Agency
year <- input$Year
E1 <-input$Energymix1/100
E2 <- input$Energymix2/100
E3 <- input$Energymix3/100
E4 <- input$Energymix4/100
E5 <- input$Energymix5/100
E6 <- input$Energymix6/100
E7 <- input$Energymix7/100
E8 <- input$Energymix8/100
E9 <- input$Energymix9/100
E10 <- input$Energymix10/100
Rebound <- input$Rebound/100
Trans <- input$Trans/100
discount <-input$Discount/100
carbon_price <- input$carbon_p
Impact <- input$Impact
EV_rebate <- input$EV_rebate
PHEV_rebate <- input$PHEV_rebate
Length <- input$Length
Staff <- input$Staff
Admincost <- input$Admincost
Elec_price <- input$Elec
Aveg_VTM <- 11244
Lifetime <- 15
Efficiency <- 0.3
PHEV_gas_perc <-0.6
G_table <- read_csv("Emission_Gas.csv")
E_table <-read_csv("Emission_Elec.csv")
Health_impact <- read_csv("Health_impact.csv")
E_gas <- subset(G_table, Year==year & Agency==agency)
Emission_gas <- E_gas$CO2e/10^6
Emission_elec_CO2 <- (E1*E_table$CO2e[1]+E2*E_table$CO2e[2]+E3*E_table$CO2e[3]+E4*E_table$CO2e[4]+E5*E_table$CO2e[5]+E6*E_table$CO2e[6]+E7*E_table$CO2e[7]+E8*E_table$CO2e[8]+E9*E_table$CO2e[9]+E10*E_table$CO2e[10])/1000
Emission_elec_PM <- (E1*E_table$PM[1]+E2*E_table$PM[2]+E3*E_table$PM[3]+E4*E_table$PM[4]+E5*E_table$PM[5]+E6*E_table$PM[6]+E7*E_table$PM[7]+E8*E_table$PM[8]+E9*E_table$PM[9]+E10*E_table$PM[10])/1000
Emission_elec_Nox <- (E1*E_table$Nox[1]+E2*E_table$Nox[2]+E3*E_table$Nox[3]+E4*E_table$Nox[4]+E5*E_table$Nox[5]+E6*E_table$Nox[6]+E7*E_table$Nox[7]+E8*E_table$Nox[8]+E9*E_table$Nox[9]+E10*E_table$Nox[10])/1000
Emission_elec_Sox <- (E1*E_table$Sox[1]+E2*E_table$Sox[2]+E3*E_table$Sox[3]+E4*E_table$Sox[4]+E5*E_table$Sox[5]+E6*E_table$Sox[6]+E7*E_table$Sox[7]+E8*E_table$Sox[8]+E9*E_table$Sox[9]+E10*E_table$Sox[10])/1000
Annual_GHG_EV <- (Aveg_VTM*Emission_gas-Emission_elec_CO2*Aveg_VTM*Efficiency)*TCM[1,2]*(1+Rebound)/(1+Trans)
Annual_GHG_PHEV <-(Aveg_VTM*Emission_gas*(1-PHEV_gas_perc)-Emission_elec_CO2*Aveg_VTM*Efficiency*PHEV_gas_perc)*TCM[2,2]*(1+Rebound)/(1+Trans)
Disc_GHG_EV <- Annual_GHG_EV/discount*(1-1/(1+discount)^Lifetime)
Disc_GHG_PHEV <- Annual_GHG_PHEV/discount*(1-1/(1+discount)^Lifetime)
Total_GHG <- (Annual_GHG_EV+Annual_GHG_PHEV)*Lifetime
Total_disc_GHG <- Disc_GHG_EV + Disc_GHG_PHEV
GHG_benefits <- carbon_price*Total_disc_GHG
T_EV_PM2.5 <- Aveg_VTM*(E_gas$PM2.5/10^6-Emission_elec_PM/2*Efficiency)*TCM[1,2]*(1+Rebound)/(1+Trans)
T_EV_PM10 <- Aveg_VTM*(E_gas$PM10/10^6-Emission_elec_PM/2*Efficiency)*TCM[1,2]*(1+Rebound)/(1+Trans)
T_EV_Nox <- Aveg_VTM*(E_gas$Nox/10^6-Emission_elec_Nox*Efficiency)*TCM[1,2]*(1+Rebound)/(1+Trans)
T_EV_Sox <- Aveg_VTM*(E_gas$Sox/10^6-Emission_elec_Sox*Efficiency)*TCM[1,2]*(1+Rebound)/(1+Trans)
T_PHEV_PM2.5 <- Aveg_VTM*(E_gas$PM2.5/10^6*(1-PHEV_gas_perc)-Emission_elec_PM/2*Efficiency*PHEV_gas_perc)*TCM[2,2]*(1+Rebound)/(1+Trans)
T_PHEV_PM10 <- Aveg_VTM*(E_gas$PM10/10^6*(1-PHEV_gas_perc)-Emission_elec_PM/2*Efficiency*PHEV_gas_perc)*TCM[2,2]*(1+Rebound)/(1+Trans)
T_PHEV_Nox <- Aveg_VTM*(E_gas$Nox/10^6*(1-PHEV_gas_perc)-Emission_elec_Nox*Efficiency*PHEV_gas_perc)*TCM[2,2]*(1+Rebound)/(1+Trans)
T_PHEV_Sox <- Aveg_VTM*(E_gas$Sox/10^6*(1-PHEV_gas_perc)-Emission_elec_Sox*Efficiency*PHEV_gas_perc)*TCM[2,2]*(1+Rebound)/(1+Trans)
Annual_total_tail <-matrix(c(T_EV_PM2.5+T_PHEV_PM2.5, T_EV_PM10+T_EV_PM10, T_EV_Nox+T_PHEV_Nox, T_EV_Sox+T_PHEV_Sox), ncol=4)
Disc_toal_tail <- Annual_total_tail/discount*(1/(1+discount)^Lifetime)
colnames(Disc_toal_tail) <- c("PM2.5","PM10","Nox","Sox")
H_impact <- ifelse(Impact=="Low",Disc_toal_tail[1]*Health_impact$PM2.5[1]+Disc_toal_tail[2]*Health_impact$PM10[1]+Disc_toal_tail[3]*Health_impact$Sox[1]+Disc_toal_tail[4]*Health_impact$Nox[1] ,ifelse(Impact=="Med",Disc_toal_tail[1]*Health_impact$PM2.5[2]+Disc_toal_tail[2]*Health_impact$PM10[2]+Disc_toal_tail[3]*Health_impact$Sox[2]+Disc_toal_tail[4]*Health_impact$Nox[2],Disc_toal_tail[1]*Health_impact$PM2.5[3]+Disc_toal_tail[2]*Health_impact$PM10[3]+Disc_toal_tail[3]*Health_impact$Sox[3]+Disc_toal_tail[4]*Health_impact$Nox[3]))
Admin_cost <- Length*Staff*Admincost/12
Imp_cost <- input$Impcost
Total_rebates <- EV_rebate*TCM[1,2]+PHEV_rebate*ifelse(TCM[2,2]<=0, 0, TCM[2,2])
Revenue <- Elec_price*(Aveg_VTM*Efficiency*TCM[1,2]*(1+Rebound)/(1+Trans)+Aveg_VTM*Efficiency*PHEV_gas_perc*TCM[2,2]*(1+Rebound)/(1+Trans))/discount*(1-1/(1+discount)^Lifetime)*input$Profit/100
BCR <- (GHG_benefits+H_impact+Revenue)/(Admin_cost+Imp_cost+Total_rebates)
Cost_GHG <- (Admin_cost+Imp_cost+Total_rebates)/Total_GHG
Benefit <- matrix(c(GHG_benefits, H_impact, Revenue,Total_GHG, Cost_GHG, Admin_cost, Imp_cost, Total_rebates, BCR),nrow=5, ncol=2)
colnames(Benefit)<- c("Benefits", "Costs")
rownames(Benefit)<- c("a","b","c","d","e")
return(Benefit)
})
output$table1 <- renderTable({
TCM <- TCM()
FinalTable <- as.data.frame(TCM)
}, rownames = TRUE, colnames = TRUE, digits=0)
output$plot1 <- renderPlot({
TCM <- TCM()
Finalsale <- as.data.frame(TCM)
Finalsale[,3] <- TCM[,1]-TCM[,2]
Final1 <-Finalsale[1:2,3]
Final2 <-Finalsale[1:2,2]
Final <- as.data.frame(cbind(Final1, Final2))
colnames(Final) <-c("Remaining", "Caused by incentive")
DF <- data.frame(Final)
DF$Type <- c("EV","PHEV")
DF1 <- melt(DF, id.var="Type")
library(ggplot2)
ggplot(DF1, aes(x = Type, y = value, fill = variable)) +
geom_bar(stat = "identity")+
ggtitle("The Number of EV and PHEV Sales through EV Program")+
ylab("Number of Sales")+
xlab("")+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"), plot.title = element_text(hjust = 0.5), legend.title=element_blank(), text = element_text(size=15))
})
output$table2 <- renderTable({
BC <- BC()
Total_Value <- c(BC[4,1],BC[1,1],BC[2,1],BC[3,1],BC[1,2],BC[2,2],BC[3,2],BC[4,2],BC[5,1])
Cost_Benefit <- as.data.frame(Total_Value, row.names = c("GHG Reduction (ton)","GHG reduction benefits (dollar)", "Health Benefits (dollar)","Revenue (dollar)","Administrative Cost (dollar)", "Implementation Cost (dollar)", "Total rebates costs (dollar)","Benefit Cost Ratio","Cost of GHG reduction (dollar/tonCO2e)"))
},rownames = TRUE, colnames=TRUE, digits=2)
output$plot2 <- renderPlot({
BC <-BC()
DF <- matrix(rep(0,12),nrow=2,ncol=6)
DF[1,1:3] <- BC[1:3,1]
DF[2,4:6] <- BC[1:3,2]
colnames(DF) <-c("Benefit:GHG Reduction","Benefit:Health","Benefit:Revenue","Cost:Total Rebates","Cost:Implementation","Cost:Administration")
DF <- data.frame(DF)
DF$Type <- c("Benefits","Costs")
DF1 <- melt(DF, id.var="Type")
ggplot(DF1, aes(x = Type, y = value, fill = variable)) +
geom_bar(stat = "identity")+
ggtitle("Overall Benefits and Costs")+
ylab("Monetary Value (dollar)")+
xlab("")+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black"), plot.title = element_text(hjust = 0.5), legend.position="bottom", legend.title=element_blank(), text = element_text(size=15))+guides(fill=guide_legend(nrow=2,byrow=TRUE))
})
}
shinyApp(ui, server) |
f4234e39b757dc6357c829ea20d24b58ec16d790 | fe64d608db5c55d192d646cccdaa94148b524c56 | /EVT_all/M1_gamma/functions/alphabeta.R | 8aa123336caf1e0bc137680b19aa4d59e6f505af | [] | no_license | juetaoLim/DenVextremeMix | 765d199fd32e19bfcb01ba83bdae31ba83d3223f | 21d542a49b06985600442cd6ade398f2758a0867 | refs/heads/master | 2022-12-18T10:00:13.451584 | 2020-09-24T04:43:34 | 2020-09-24T04:43:34 | 298,165,132 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,122 | r | alphabeta.R |
# ------Sampling alpha and beta------------
new_bulk = function(param,Va,Vb,Ga,data)
{
alpha = param[1]
beta = param[2]
newalpha = rtnorm(1, mean = alpha, sd = sqrt(Va), lower = 0.1)
newbeta = rtnorm(1, mean = beta, sd = sqrt(Vb), lower = 0.1)
temp = param
temp[1] = newalpha
temp[2] = newbeta
prior_bulk = function(alpha, beta)
{
a = Ga[1]
b = Ga[2]
c = Ga[3]
d = Ga[4]
pri = dgamma(alpha, shape = a, rate = b) *
dgamma(alpha / beta, shape = c, rate = d) *
(alpha / beta^2)
return(pri)
}
r_M = log(prior_bulk(newalpha,newbeta) / prior_bulk(alpha,beta)) + ll(temp, data) - ll(param, data)
r_M = exp(r_M)
acc_bulk = r_M * dtnorm(alpha, mean = newalpha, sd = sqrt(Va) , lower = 0.1) * dtnorm(beta, mean = newbeta, sd = sqrt(Vb)) /
dtnorm(newalpha, mean = alpha, sd = sqrt(Va) , lower = 0.1) / dtnorm(newbeta, mean = beta, sd = sqrt(Vb))
if (runif(1) < acc_bulk)
{
alpha = newalpha
beta = newbeta
}
else
{
alpha = alpha
beta = beta
}
return(c(alpha,beta))
}
|
1382504811e0b55446313b01b80a5aa2cb455a6b | 8acb236d0826d4e35bb6dc1ca1816833499a2766 | /tests/testthat/test-idig_top_records.R | 9cd55a956f996f2c7c84970ad7e592f3fae00647 | [
"MIT"
] | permissive | iDigBio/ridigbio | ae2437d8490671804f1675dc4bd260dd475a9c23 | 5d8abbdbc09dfc10616c96037cb757ef9436a21b | refs/heads/master | 2022-08-07T16:28:07.779582 | 2022-07-15T20:23:50 | 2022-07-15T20:23:50 | 26,278,368 | 15 | 13 | NOASSERTION | 2022-07-14T23:34:57 | 2014-11-06T16:17:21 | R | UTF-8 | R | false | false | 1,843 | r | test-idig_top_records.R | context("test idig_top_records")
field <- "country"
most <- "united states"
count <- 11
genus <- "acer"
scientificname <- "acer macrophyllum"
test_that("default list of top 10 scientific names returns", {
testthat::skip_on_cran()
top <- idig_top_records()
expect_that(top, is_a("list"))
expect_that(length(top$scientificname), equals(10))
expect_true(top$itemCount > 20 * 1000 * 1000)
# Save the number of records in all iDigBio for later tests
#all_count <- top$itemCount
})
test_that("field and number of tops work", {
testthat::skip_on_cran()
top <- idig_top_records(top_fields=c(field), count=count)
expect_that(top, is_a("list"))
expect_that(length(top[[field]]), equals(count))
expect_true(top[[field]][[most]][["itemCount"]] > 1000 * 1000)
# Deprecating this since Alex changed the erorr behavior to tell you when
# JSON is bad or field unknown, no longer just spits out all iDigBio
# Still looking at all of iDigBio, assume things are not changing too fast
#expect_that(abs(top$itemCount - all_count) < 1000, is_true())
})
test_that("record searches work", {
testthat::skip_on_cran()
top <- idig_top_records(rq=list("genus"=genus), top_fields=c(field),
count=count)
expect_that(top, is_a("list"))
expect_true(top$itemCount < 200 * 1000)
# Save the number of genus records for later tests
#genus_count <- top$itemCount
})
test_that("multiple fields return nested results", {
testthat::skip_on_cran()
top <- idig_top_records(rq=list("genus"=genus), top_fields=c(field,
"scientificname"), count=count)
expect_that(top, is_a("list"))
#expect_that(abs(top$itemCount - genus_count) < 100, is_true())
expect_true(top[[field]][[most]][["scientificname"]][[scientificname]][["itemCount"]]
> 1000)
}) |
84ab7fce254d226e42bb7e3bdcb6a88cd3695f04 | fd0622e97276bba2c04d3c2fcba902cdfb65e214 | /packages/nimble/tests/testthat/test-getBound.R | 954caa64dd96820bbfc604e69c055f995f0e390b | [
"BSD-3-Clause",
"CC-BY-4.0",
"GPL-2.0-only",
"GPL-1.0-or-later",
"MPL-2.0",
"GPL-2.0-or-later"
] | permissive | nimble-dev/nimble | 7942cccd73815611e348d4c674a73b2bc113967d | 29f46eb3e7c7091f49b104277502d5c40ce98bf1 | refs/heads/devel | 2023-09-01T06:54:39.252714 | 2023-08-21T00:51:40 | 2023-08-21T00:51:40 | 20,771,527 | 147 | 31 | BSD-3-Clause | 2023-08-12T13:04:54 | 2014-06-12T14:58:42 | C++ | UTF-8 | R | false | false | 7,910 | r | test-getBound.R | source(system.file(file.path('tests', 'testthat', 'test_utils.R'), package = 'nimble'))
RwarnLevel <- options('warn')$warn
options(warn = 1)
nimbleVerboseSetting <- nimbleOptions('verbose')
nimbleOptions(verbose = FALSE)
context("Testing of getBound")
test_that("building of model with initial value issues in bounds", {
code <- nimbleCode({
y1 ~ T(dnorm(mu, sd = sig),b,d) # non-constant trunc bounds
y2 ~ T(dnorm(0,1), y2l, y2u) # constant trunc bounds
y3 ~ T(dnorm(0,1), exp(y2l), y3u) # constant trunc bounds
y2l ~ dnorm(0,1)
y3u <- 15*cc
mu ~ dnorm(0, 1)
sig ~ dunif(0, 5)
b ~ dunif(a,d) # non-constant unif bounds
a ~ dunif(al,4) # constant unif bounds
z[1:2] ~ dmnorm(mu0[1:2], prec0[1:2,1:2]) # mv dist
alpha[1:3] ~ ddirch(alpha0[1:3]) # mv dist with actual bound
for(i in 1:3)
alpha0[i] ~ dgamma(1,1)
y4 ~ T(dgamma(1, 1), y4l, y4u) # invalid trunc bound
y5 ~ T(dgamma(1, 1), mu, y5u) # stochastically-invalid trunc bound
y6 ~ T(dgamma(1, 1), y6l, y6u) # reversed trunc bound
})
consts <- list(y2u = 0.8, mu0 = rep(0, 2), prec0 = diag(rep(1,2)),
y4l = -2, y4u = 5, y5u = 0.5, y6l = 2, y6u = 1,
al = 0.1, dl = 0.15)
inits <- list(b = 0.7, mu = 1, sig = 1, d = 5,
z = rep(1, 2), alpha0 = 1:3, cc = 1, y2l = 0.3, y3u = 15, cc = 2)
expect_warning(m <- nimbleModel(code, inits = inits,
constants = consts,
data = list(y1 = 0, y2 = 0, y3 = 1, y4 = 1, y5 = 1, y6 = 1)))
cm <- compileNimble(m)
test <- nimbleFunction(
setup = function(model, node, bnd) {},
run = function() {
output <- numeric(2)
output[1] <- model$getBound(node, bnd)
output[2] <- getBound(model, node, bnd)
return(output)
returnType(double(1))
}
)
test_getBound(m, cm, test, 'y1', 'lower', inits$b, "non-constant lower truncation bound")
test_getBound(m, cm, test, 'y1', 'upper', inits$d, "non-constant upper truncation bound")
test_getBound(m, cm, test, 'y2', 'lower', inits$y2l, "non-constant lower truncation bound")
test_getBound(m, cm, test, 'y2', 'upper', consts$y2u, "non-constant upper truncation bound")
test_getBound(m, cm, test, 'y3', 'lower', exp(m$y2l)[1], "functional lower truncation bound")
test_getBound(m, cm, test, 'y3', 'upper', m$y3u[1], "functional/deterministic upper truncation bound")
test_getBound(m, cm, test, 'b', 'lower', m$a[1], "dunif non-constant lower truncation bound")
test_getBound(m, cm, test, 'b', 'upper', m$d[1], "dunif non-constant upper truncation bound")
test_getBound(m, cm, test, 'a', 'lower', consts$al, "dunif constant lower truncation bound")
test_getBound(m, cm, test, 'a', 'upper', 4, "dunif constant upper truncation bound")
test_getBound(m, cm, test, 'z[1:2]', 'lower', -Inf, "dmnorm constant lower truncation bound")
test_getBound(m, cm, test, 'z[1:2]', 'upper', Inf, "dmnorm constant upper truncation bound")
test_getBound(m, cm, test, 'alpha[1:3]', 'lower', 0, "ddirch constant lower truncation bound")
test_getBound(m, cm, test, 'alpha[1:3]', 'upper', 1, "ddirch constant upper truncation bound")
test_getBound(m, cm, test, 'y4', 'lower', 0, "invalid constant lower truncation bound")
test_getBound(m, cm, test, 'y4', 'upper', consts$y4u, "invalid constant upper truncation bound")
test_getBound(m, cm, test, 'y5', 'lower', m$mu[1], "stochastically-invalid constant lower truncation bound")
test_getBound(m, cm, test, 'y5', 'upper', consts$y5u, "stochastically-invalid constant upper truncation bound")
test_getBound(m, cm, test, 'y6', 'lower', consts$y6l, "reversed constant lower truncation bound")
test_getBound(m, cm, test, 'y6', 'upper', consts$y6u, "reversed constant upper truncation bound")
# test after a simulate
set.seed(1)
simulate(m)
set.seed(1)
simulate(cm)
test_getBound(m, cm, test, 'y1', 'lower', m$b[1], "non-constant lower truncation bound")
test_getBound(m, cm, test, 'y1', 'upper', m$d[1], "non-constant upper truncation bound")
test_getBound(m, cm, test, 'y2', 'lower', m$y2l[1], "non-constant lower truncation bound")
test_getBound(m, cm, test, 'y2', 'upper', consts$y2u, "non-constant upper truncation bound")
test_getBound(m, cm, test, 'y3', 'lower', exp(m$y2l[1]), "functional lower truncation bound")
test_getBound(m, cm, test, 'y3', 'upper', m$y3u[1], "functional/deterministic upper truncation bound")
test_getBound(m, cm, test, 'b', 'lower', m$a[1], "dunif non-constant lower truncation bound")
test_getBound(m, cm, test, 'b', 'upper', m$d[1], "dunif non-constant upper truncation bound")
test_getBound(m, cm, test, 'a', 'lower', consts$al, "dunif constant lower truncation bound")
test_getBound(m, cm, test, 'a', 'upper', 4, "dunif constant upper truncation bound")
test_getBound(m, cm, test, 'z[1:2]', 'lower', -Inf, "dmnorm constant lower truncation bound")
test_getBound(m, cm, test, 'z[1:2]', 'upper', Inf, "dmnorm constant upper truncation bound")
test_getBound(m, cm, test, 'alpha[1:3]', 'lower', 0, "ddirch constant lower truncation bound")
test_getBound(m, cm, test, 'alpha[1:3]', 'upper', 1, "ddirch constant upper truncation bound")
test_getBound(m, cm, test, 'y4', 'lower', 0, "invalid constant lower truncation bound")
test_getBound(m, cm, test, 'y4', 'upper', consts$y4u, "invalid constant upper truncation bound")
test_getBound(m, cm, test, 'y5', 'lower', m$mu[1], "stochastically-invalid constant lower truncation bound")
test_getBound(m, cm, test, 'y5', 'upper', consts$y5u, "stochastically-invalid constant upper truncation bound")
test_getBound(m, cm, test, 'y6', 'lower', consts$y6l, "reversed constant lower truncation bound")
test_getBound(m, cm, test, 'y6', 'upper', consts$y6u, "reversed constant upper truncation bound")
})
test_that('getBound, user-defined integer-valued', {
dtest <- nimbleFunction(
run = function(x = integer(0), thetaInt = integer(0), thetaDbl = double(0), log = integer(0, default = 0)) {
returnType(double(0))
return(0)
})
rtest <- nimbleFunction(
run = function(n = integer(0), thetaInt = integer(0), thetaDbl = double(0)) {
returnType(integer(0))
return(0)
})
ptest <- nimbleFunction(
run = function(q = double(0), thetaInt = integer(0), thetaDbl = double(0), lower.tail = integer(0, default = 1), log.p = integer(0, default = 0)) {
returnType(double(0))
return(0)
})
qtest <- nimbleFunction(
run = function(p = double(0), thetaInt = integer(0), thetaDbl = double(0), lower.tail = integer(0, default = 1), log.p = integer(0, default = 0)) {
returnType(double(0))
return(0)
})
temporarilyAssignInGlobalEnv(dtest)
temporarilyAssignInGlobalEnv(rtest)
temporarilyAssignInGlobalEnv(qtest)
temporarilyAssignInGlobalEnv(ptest)
registerDistributions(list(
dtest = list(
BUGSdist = "dtest(thetaInt, thetaDbl)",
pqAvail = TRUE,
discrete = TRUE))
)
code <- nimbleCode({
y ~ T(dtest(1, 1.3), -1, 7)
})
m <- nimbleModel(code, data = list(y = 0))
cm <- compileNimble(m)
expect_identical(cm$getBound('y','lower'), m$getBound('y', 'lower'), 'issue with getBound with lower')
expect_identical(cm$getBound('y','lower'), -1, 'issue with getBound with lower')
})
options(warn = RwarnLevel)
nimbleOptions(verbose = nimbleVerboseSetting)
|
9972883a4b826ec2b795e2ceb6b2bc110e869a97 | 67a7683b1901db9941b6ed61bcf3f7129f657d11 | /man/plotRocsInjectedSignals.Rd | ed84bdac08fc5721cb9d053e1055ea6e4df17de4 | [
"Apache-2.0"
] | permissive | odysseusinc/MethodEvaluation | 5a51796c66622ff7448c2f2fd2cff09052e87dc6 | 2d1d0775744486e10f3f850ed753e155365a94d8 | refs/heads/master | 2023-01-24T06:56:42.437017 | 2020-12-03T12:44:37 | 2020-12-03T12:44:37 | 313,928,398 | 0 | 0 | null | 2020-11-18T12:55:18 | 2020-11-18T12:31:17 | null | UTF-8 | R | false | true | 857 | rd | plotRocsInjectedSignals.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Plots.R
\name{plotRocsInjectedSignals}
\alias{plotRocsInjectedSignals}
\title{Plot the ROC curves for various injected signal sizes}
\usage{
plotRocsInjectedSignals(logRr, trueLogRr, showAucs, fileName = NULL)
}
\arguments{
\item{logRr}{A vector containing the log of the relative risk as estimated by a method.}
\item{trueLogRr}{A vector containing the injected log(relative risk) for each estimate.}
\item{showAucs}{Should the AUCs be shown in the plot?}
\item{fileName}{Name of the file where the plot should be saved, for example 'plot.png'. See the
function \code{ggsave} in the ggplot2 package for supported file formats.}
}
\value{
A Ggplot object. Use the \code{ggsave} function to save to file.
}
\description{
Plot the ROC curves for various injected signal sizes
}
|
e5d31ff3f524d1733587899b84f49d5af6f8d0eb | 5c3f1cb154f702c8a93732ca3c43592177367b62 | /clf2.Rd | 61528cf17dbe3cf0f755cf0e0aaea4c3adcd397d | [] | no_license | tarunsh2424/parijata | b0441c66be9759ed461da30e144630877fd18fee | 33c07e8bdad98be0feeec1f3f24ec29d938eb44a | refs/heads/master | 2022-11-24T04:55:00.384372 | 2020-07-16T05:57:21 | 2020-07-16T05:57:21 | 275,789,069 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 187 | rd | clf2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clf2.R
\name{clf2}
\alias{clf2}
\title{clf2}
\usage{
clf2()
}
\value{
provide f'o/f'm
}
\description{
clf2
}
|
c2e0b54329787ae76989d95230fb515accaa836c | c5bc2307bcead541658ccd7f49db4eda9a6a3762 | /man/loptAge.Rd | 90339ae68b8a49a34e90e91fd11dbab724f5c49b | [] | no_license | shfischer/FLife | b2216da5bdf3f463cc7ea354e49115f598ecafe0 | 4979df14be234debeb468d89cf2659bb2f659836 | refs/heads/master | 2021-08-18T07:43:51.998014 | 2020-08-02T18:35:08 | 2020-08-02T18:35:08 | 238,467,826 | 0 | 0 | null | 2020-02-05T14:20:48 | 2020-02-05T14:20:46 | null | UTF-8 | R | false | true | 1,074 | rd | loptAge.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lopt.R
\docType{methods}
\name{loptAge,FLPar-method}
\alias{loptAge,FLPar-method}
\alias{loptAge}
\alias{loptAge-method}
\title{Age at maximum biomass}
\usage{
\S4method{loptAge}{FLPar}(
params,
m = function(length, params) params["m1"] \%*\% (exp(log(length) \%*\% params["m2"])),
growth = vonB,
...
)
}
\arguments{
\item{params}{FLPar}
\item{m}{A function, i.e. gislason}
\item{growth}{A function, i.e. vonB}
\item{...}{any other arguments}
}
\value{
\code{FLPar} with length at maximum biomass of a cohort
}
\description{
Finds length at maximum biomass
}
\details{
There are several ways to calculate \eqn{L_{opt}}, i.e.
i) \eqn{{2/3}^{rds} L_{\infty}}
ii) \eqn{L_{\infty}\frac{3}{3+k/m}}
iii) by maximising the biomass of
iv) from an FLBRP object by fishing at F=0 and finding age where biomass is a maximum
}
\examples{
\dontrun{
params=lhPar(FLPar(linf=100))
loptAge(params)
}
}
\seealso{
\code{\link{loptAge}}, \code{\link{lhRef}}, \code{\link{lhPar}}, \code{\link{lhEql}},
}
|
7cfdb5a66840861526a719dff2f9052f7683cd91 | be4e8a80ffda6e364cdf71522c1c1caf4bdc26f9 | /scanone_sub_samples.R | e6f2560e994c9c3c23f3182c9f7b81d61db36f43 | [] | no_license | ApurvaChitre/sample_size | 3535bdeabbca6b138a3ec177a8c5e8a9d492dc97 | 6337e3a70246425a77184eb7f4671092d497a1e0 | refs/heads/main | 2023-04-10T07:33:32.146754 | 2023-03-20T17:44:46 | 2023-03-20T17:44:46 | 524,273,121 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,324 | r | scanone_sub_samples.R | #k represents each of the sub-sampled phenotype
args <- commandArgs(trailingOnly = TRUE)
#print(args)
k <- args[1]
setwd("path/to/sample_size/qtl2") # Replace with the actual directory path
library(qtl2)
library(qtl2fst) # For memory-intensive calc_genoprob with many SNPs
library(yaml)
CORES <- 8
GRID_SPACING <- 0.1 # Distance between pseudomarkers used for mapping, in centimorgans.
control_filename=paste0("control_",k,".yaml")
cross <- read_cross2(control_filename)
yaml_file=yaml::yaml.load_file(input=control_filename)
pheno_name=gsub(".csv","",yaml_file$pheno)
pr <-readRDS("/path/to/sample_size/qtl2/pr/pr_grid.rds")
## Uncomment this and comment out the above lines to reload previously computed probabilities:
# pr <- readRDS("qtl2/pr/pr_grid.rds")
kinship<-readRDS("/path/to/sample_size/qtl2/kinship/kinship.rds")
cat("Running genome scan...\n")
out <- scan1(pr, cross$pheno[,"bmi_bodylength_w_tail"], kinship, quiet = FALSE, cores = CORES)
out_filename=paste0("/path/to/sample_size/qtl2/out/scan_",k,"_out.rds")
saveRDS(out, out_filename)
gmap <- insert_pseudomarkers(cross$gmap, step = GRID_SPACING)
#out<-readRDS(out_filename)
all_lod<-find_peaks(out, gmap, threshold=2, drop=1.5)
write.csv(all_lod,paste0("/path/to/sample_size/qtl2/peaks/",pheno_name,".csv"),row.names = F,quote=T)
|
5e7aaa8d9d478b917fae05fb09a0f0003bd75d3e | bb3ee504bc72fec323766056b622af6cdce60f84 | /adultcode.R | 677fef4840dff4c0642388043c3017fd4471dedb | [] | no_license | anuchowdary1995/Codes1 | e21eba24854e814c50a9928523ada2060d373b6d | 3afeb5d1e3cc2b6c7d7510b14f983d12e2ece642 | refs/heads/master | 2020-03-29T11:13:59.738716 | 2019-09-26T08:37:46 | 2019-09-26T08:37:46 | 149,842,586 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,410 | r | adultcode.R | adult<-read.csv("D:/kaggle/Adult/Adult.csv")
View(adult)
adult1=adult[,-1]
name <- c("Age", "Workclass", "Fnlwgt", "Education", "Education_num", "Marital_status", "Occupation",
"Relationship", "Race", "Sex", "Capital_gain", "Capital_loss", "Hours_per_week", "Native_Country",
"Class")
# replace the ? into other on workclass varible
adult1$Workclass <- as.factor(ifelse(adult1$Workclass == "?", "Other", as.character(adult1$Workclass)))
# replace the ? into other on occupation
adult1$Occupation <- as.factor(ifelse(adult1$Occupation == "?", "Other", as.character(adult1$Occupation)))
# this is dependent varible change into 0's and 1's
adult1$Class <- as.factor(ifelse(adult1$Class == '<=50K', 0, 1))
summary(adult1)
#cleaning the data set
# workclass
adult1$Workclass = gsub("Federal-gov","Federal-Govt",adult1$Workclass)
adult1$Workclass = gsub("Local-gov","Other-Govt",adult1$Workclass)
adult1$Workclass = gsub("State-gov","Other-Govt",adult1$Workclass)
adult1$Workclass = gsub("Self-emp-inc","Self-Employed",adult1$Workclass)
adult1$Workclass = gsub("Self-emp-not-inc","Self-Employed",adult1$Workclass)
adult1$Workclass = gsub("Without-pay","Not-Working",adult1$Workclass)
adult1$Workclass = gsub("Never-worked","Not-Working",adult1$Workclass)
#occupation
adult1$Occupation = gsub("Adm-clerical","Admin",adult1$Occupation)
adult1$Occupation = gsub("Armed-Forces","Military",adult1$Occupation)
adult1$Occupation = gsub("Craft-repair","Blue-Collar",adult1$Occupation)
adult1$Occupation = gsub("Exec-managerial","White-Collar",adult1$Occupation)
adult1$Occupation = gsub("Farming-fishing","Blue-Collar",adult1$Occupation)
adult1$Occupation = gsub("Handlers-cleaners","Blue-Collar",adult1$Occupation)
adult1$Occupation = gsub("Machine-op-inspct","Blue-Collar",adult1$Occupation)
adult1$Occupation = gsub("Other-service","Service",adult1$Occupation)
adult1$Occupation = gsub("Priv-house-serv","Service",adult1$Occupation)
adult1$Occupation = gsub("Prof-specialty","Professional",adult1$Occupation)
adult1$Occupation = gsub("Protective-serv","Other-Occupations",adult1$Occupation)
adult1$Occupation = gsub("Tech-support","Other-Occupations",adult1$Occupation)
adult1$Occupation = gsub("Transport-moving","Blue-Collar",adult1$Occupation)
#education
adult1$Education = gsub("10th","Dropout",adult1$Education)
adult1$Education = gsub("11th","Dropout",adult1$Education)
adult1$Education = gsub("12th","Dropout",adult1$Education)
adult1$Education = gsub("1st-4th","Dropout",adult1$Education)
adult1$Education = gsub("5th-6th","Dropout",adult1$Education)
adult1$Education = gsub("7th-8th","Dropout",adult1$Education)
adult1$Education = gsub("9th","Dropout",adult1$Education)
adult1$Education = gsub("Assoc-acdm","Associates",adult1$Education)
adult1$Education = gsub("Assoc-voc","Associates",adult1$Education)
adult1$Education = gsub("Preschool","Dropout",adult1$Education)
adult1$Education = gsub("Some-college","HS-grad",adult1$Education)
View(adult1)
class(adult1$Capital.gain)
# convert into integer to factor and indicating the levels in captial_gain&captial_gain
adult1$Capital.gain <- cut(adult1$Capital.gain,c(-Inf, 0,
median(adult1$Capital.gain[adult1$Capital.gain >0]),
Inf),labels = c("None", "Low", "High"))
adult1$Capital.loss <- cut(adult1$Capital.loss,c(-Inf, 0,
median(adult1$Capital.loss[adult1$Capital.loss >0]),
Inf),labels = c("None", "Low", "High"))
adult1$Age <- cut(adult1$Age,seq(16,96,10),right = FALSE)
adult1$Hours.per.week <- cut(adult1$Hours.per.week,seq(0,100,10),right = FALSE)
names(adult1)
View(adultl)
adult1$Workclass <- as.factor(adult1$Workclass)
adult1$Education <- as.factor(adult1$Education)
adult1$Occupation <- as.factor(adult1$Occupation)
str(adult1)
sapply(adult1, sd)
#split the data into train and test
set.seed(10)
index<-sample(1:nrow(adult1),nrow(adult1)*0.7,replace=F)
train=adult1[index,]
test=adult1[-index,]
#build the model using logistic
Adult_Model <- glm(Class ~ Age+Workclass+Fnlwgt+Education+Education.num
+ Marital.status + Occupation + Relationship +
Race + Sex + Capital.gain + Capital.loss+Hours.per.week
,family = 'binomial', data = train)
summary(Adult_Model)
library(MASS)
stepAIC(Adult_Model)
#predictions
train$predit=predict(Adult_Model,type="response")
library(popbio)
logi.hist.plot(train$predit,train$Class)
library(ROCR)
pred<-prediction(train$predit,train$Class)
roc.perf = performance(pred, measure = "tpr", x.measure = "fpr")
plot(roc.perf,colorize=T)
# creating the thershold value
train$predit<-ifelse(train$predit>0.3,1,0)
View(train)
#calculate the accuracy
table(train$predit,train$Class)
somersD(train$predit,train$Class)
# test data set
Adult_Model1 <- glm(Class ~ Age+Workclass+Fnlwgt+Education+Education.num
+ Marital.status + Occupation + Relationship +
Race + Sex + Capital.gain + Capital.loss+Hours.per.week
,family = 'binomial', data = test)
summary(Adult_Model1)
test$pred<-predict(Adult_Model1)
test$pred<-ifelse(test$pred>0.5,1,0)
table(test$Class,test$pred)
|
ebfc1cf091e1ef5a3827d2eeafe18c562a809333 | b9f2e470d568cde6eac087d771f9adbee117dd1a | /man/taxon-class.Rd | 4a0d13b9b6e05286fde62c359d7dc1e66281ded1 | [] | no_license | cran/megaptera | 0e3852c12ab3d2ec0050d94bf82de74cf8ccc71f | 83855c9a44ba3f0280cd39c1bfdde1fccd25478f | refs/heads/master | 2020-05-16T15:12:09.188082 | 2014-10-30T00:00:00 | 2014-10-30T00:00:00 | 26,581,099 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,216 | rd | taxon-class.Rd | \name{taxon-class}
\docType{class}
\alias{taxon-class}
\title{Class "taxon"}
\description{
Class for taxonomic input parameters for \code{\link{megapteraProj}}.
}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{taxon(ingroup, outgroup, kingdom, hybrids = FALSE,
reference.rank = "auto")}.
}
\section{Slots}{
\describe{
\item{ingroup}{
a vector of mode \code{"character"}, giving species names or names of higher taxa that define the focal group.
}
\item{outgroup}{
a vector of mode \code{"character"}, giving species names or names of higher taxa that define the outgroup.
}
\item{kingdom}{
a vector of mode \code{"character"}, currently one of \code{"Fungi"}, \code{"Metazoa"}, or \code{"Viridiplantae"}. The \bold{International Code of Nomenclature for algae, fungi, and plants} (ICN) and the \bold{International Code of Zoological Nomenclature} (ICZN) do not exclude the possibility that genera and species of plants/fungi/algae and animals share the same name (e.g., \emph{Prunella} \emph{vulgaris}). Therefore it is necessary to include the kingdom into the search term when querying GenBank.
}
\item{species.list}{logical indicating if slot \code{ingroup} is a list of species or a higher rank.
}
\item{hybrids}{
logical: if \code{TRUE}, hybrids (as recognized by the regular expression \code{"^x_|_x_"}) will be excluded from the pipeline.
}
\item{reference.rank}{
a vector of mode \code{"character"}, giving the name of the rank to be used to create subsets of the sequences to derive the reference sequence(s). The default (\code{"auto"}) commits the selection of the reference rank to the pipeline and in most cases you should be fine using this option.
}
}
}
\section{Methods}{
\describe{
\item{show}{\code{signature(object = "taxon")}: prints taxonomic parameter setting}
}
}
%\details{}
%\value{an object of class \code{taxon}}
%\references{}
\author{Christoph Heibl}
\seealso{
\code{\link{dbPars}}, \code{\link{locus}}, and \code{\link{megapteraPars}} for defining of database parameters, loci, and the pipeline's parameters, respectively, and \code{\link{megapteraProj}} for the bundling of input data.
}
%\examples{}
\keyword{classes}
|
bb8916e09250eb52f1bf8d0506282ccadd8e701c | f5e3a947ff71a2479ba4ecc12bc2467bbb1353ba | /staphopia/man/antibiotic_data.Rd | 80d342a20475461556666d60f521235d1acc2565 | [] | no_license | rpetit3/staphopia-r | d9dda2fa1fd3ecf108324ea968489df8dfe14e7f | b6424559082aa032e420f9403b6245b716e13f84 | refs/heads/master | 2021-01-19T18:05:17.423280 | 2017-08-22T23:36:23 | 2017-08-22T23:36:23 | 59,863,793 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 429 | rd | antibiotic_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resistance.R
\name{antibiotic_data}
\alias{antibiotic_data}
\title{antibiotic_data}
\usage{
antibiotic_data(no_SNPs = TRUE)
}
\arguments{
\item{no_SNPs}{}
}
\value{
Dataframe of staphopia SNPs associated with known AR or (no_SNP=F) dataframe of antibiotic resistance loci or
}
\description{
antibiotic_data
}
\examples{
AR_data <- antibiotic_data()
}
|
89d4078eb3be9c3b5cf1ee81e1e80d2cf06eebd2 | 77157987168fc6a0827df2ecdd55104813be77b1 | /COMPoissonReg/inst/testfiles/pcmp_cpp/libFuzzer_pcmp_cpp/pcmp_cpp_valgrind_files/1612728402-test.R | 4bc08e40c3a9139989f479ce4ccdd7c3f49cb8de | [] | no_license | akhikolla/updatedatatype-list2 | e8758b374f9a18fd3ef07664f1150e14a2e4c3d8 | a3a519440e02d89640c75207c73c1456cf86487d | refs/heads/master | 2023-03-21T13:17:13.762823 | 2021-03-20T15:46:49 | 2021-03-20T15:46:49 | 349,766,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 138 | r | 1612728402-test.R | testlist <- list(lambda = NaN, nu = 0, tol = 0, x = numeric(0), ymax = 0)
result <- do.call(COMPoissonReg:::pcmp_cpp,testlist)
str(result) |
b0d12ecab1b46e3bc3a0668cf45516160902799a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/MSQC/examples/prism.Rd.R | 594ab040c5f8194a0f2581d73093018c3a232c14 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 251 | r | prism.Rd.R | library(MSQC)
### Name: prism
### Title: Draws a rectangular prism
### Aliases: prism
### Keywords: ~kwd1 ~kwd2
### ** Examples
require(rgl)
LSL <- c( 0.60, 0.30, 49.00)
USL <- c(1.40, 1.70, 51.00)
prism(LSL, USL, add = TRUE, col = "#D55E00" )
|
018e78bc4a7272cbe86988140537276d2502366c | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.application.integration/man/eventbridge_list_event_buses.Rd | 54220f3677cac027f9e185aa022b77219d98d94e | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,092 | rd | eventbridge_list_event_buses.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eventbridge_operations.R
\name{eventbridge_list_event_buses}
\alias{eventbridge_list_event_buses}
\title{Lists all the event buses in your account, including the default event
bus, custom event buses, and partner event buses}
\usage{
eventbridge_list_event_buses(NamePrefix = NULL, NextToken = NULL, Limit = NULL)
}
\arguments{
\item{NamePrefix}{Specifying this limits the results to only those event buses with names
that start with the specified prefix.}
\item{NextToken}{The token returned by a previous call to retrieve the next set of
results.}
\item{Limit}{Specifying this limits the number of results returned by this operation.
The operation also returns a NextToken which you can use in a subsequent
operation to retrieve the next set of results.}
}
\description{
Lists all the event buses in your account, including the default event bus, custom event buses, and partner event buses.
See \url{https://www.paws-r-sdk.com/docs/eventbridge_list_event_buses/} for full documentation.
}
\keyword{internal}
|
85f85b87e6db20636ee025ab070a7478ee8135ad | 26859bdff30893465b2ab4b9e98bbbbec3970698 | /man/simul.RLQ.Rd | adc0238282b74ab9a02f42d11317e59c255d03da | [] | no_license | zdealveindy/simcom | 7e4119612713083ea971bebe41ce4812472cba5d | 124defad7ca828c7abfba29f489771622dce081d | refs/heads/master | 2020-03-19T06:29:28.696695 | 2018-06-04T13:01:36 | 2018-06-04T13:01:36 | 136,023,080 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,788 | rd | simul.RLQ.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simul.RLQ.r
\name{simul.RLQ}
\alias{simul.RLQ}
\title{Simulation model sensu Dray & Legendre (2008), creating \bold{R}, \bold{L} and \bold{Q} matrices}
\usage{
simul.RLQ(n = 100, p = 100, mi.tol = 10, scenario = 1, add.noise = F,
pa = F, prop.noise.speatt = 0, prop.noise.env = 0, set.seed = NULL)
}
\arguments{
\item{n}{Integer. Number of samples.}
\item{p}{Integer. Number of species.}
\item{mi.tol}{Mean species tolerance.}
\item{scenario}{Simulation scenario. Default 1. See details.}
\item{add.noise}{Logical value. Should noise be added to all matrices? (defaults to FALSE). Refers to scenario 1N in Dray & Legendre (2008)}
\item{pa}{Logical value, default FALSE. Should the species composition matrix (\bold{L}) be converted into presence-absence values?}
\item{prop.noise.speatt}{Real value between 0 and 1. Proportion of noise added to matrix of species attributes by permuting given proportion of rows in matrix \bold{R} randomly among each other.}
\item{prop.noise.env}{Real value between 0 and 1. Proportion of noise added to matrix of sample attributes by permuting given proportion of rows in matrix \bold{Q} randomly among each other.}
\item{set.seed}{Integer number to set seed (for reproducibility of results).}
}
\value{
The function returns the list of three items:
\tabular{lll}{
\tab \code{sitspe} \tab Matrix with species composition data.\cr
\tab \code{env} \tab Matrix with sample attributes (environmnetal data).\cr
\tab \code{speatt} \tab Matrix with species attributes.
}
}
\description{
Dray & Legendre (2008) suggested an algorithm for generating three matrices (\bold{R}, \bold{L} and \bold{Q}) to test the performance of different permutation scenarios in the fourht corner analysis. This function reproduces their algorithm according to the description given in their article, and implements some additional functionality, such as adding proportional noise to species attributes and/or sample attributes matrix.
}
\details{
The artificial data are constructed in the following way (simplified): matrix \bold{R} contains one randomly generated variable with uniform distribution, representing sample attributes, while matrix \bold{Q} contains one randomly generated variable with uniform distribution representing species attributes. To make species and sample attributes linked via species composition, matrix \bold{L} is constructed from both sample and species attributes in a way that response of individual species abundances to the gradient is modelled as symmetric Gaussian curve with optima equivalent to species attributes generated in matrix \bold{Q} and species tolerance generated as random value with mean \code{mi.tol}. Abundance of given species in particular sample is then derived as the probability of species occurrence at particular value of environmental gradient (given by sample attribute in \bold{R}) based on constructed species response curve. For technical details, see the original description in Dray & Legendre (2008), pages 3405-3406.
Six scenarios were suggested by Dray & Legendre (2008) (1, 1N, 2, 3, 4, 5), from which only the scenarios 1, 2, 3 and 4 are currently implemented in this function. Additionally, scenario 0 is added.
The definition of scenarios is as follows:
\describe{
\item{\code{scenario = 1}}{All three matrices (\bold{R}, \bold{L} and \bold{Q}) are linked together by the mechanism of simulation model described above.}
\item{\code{scenario = 1N}}{Alternative to \code{scenario = 1} with added random noise. All three matrices are linked together; normal random noise is added to tables \bold{R} and \bold{Q} (mean = 5, sd = 1), and also to matrix \bold{L} (mean = 0, sd = 2).}
\item{\code{scenario = 2}}{Species composition (\bold{L}) is linked to sample attributes (\bold{R}), but not to species attributes (\bold{Q}). Matrices are created as in scenario 1, and afterwards the rows with species values in matrix \bold{Q} are permuted (cancelling the link between \bold{L} and \bold{Q}).}
\item{\code{scenario = 3}}{Species composition (\bold{L}) is linked to species attributes (\bold{Q}), but not to sample attributes (\bold{R}). Matrices are created as in scenario 1, and afterwards rows in matrix \bold{R} are permuted (this cancels link between \bold{L} and \bold{R}).}
\item{\code{scenario = 4}}{There is no link between \bold{L} and \bold{Q}, neither between \bold{L} and \bold{R}. Matrices are created as in scenario 1, and afterwards the rows in both matrices \bold{R} and \bold{Q} are permuted, cancelling all links between matrices.}
\item{\code{scenario = 0}}{This scenario represents the continuous transition between all above scenarios (1 to 4). Modifiying the proportion of noise in species attributes (argument \code{prop.noise.specatt}) and in sample attributes (argument \code{prop.noise.sampatt}) enables to create intermediary scenarios with varying degree to which the matrices are connected to each other. The following settings of arguments is analogous to the scenarios mentioned above:
\itemize{
\item \code{prop.noise.speatt = 0} and \code{prop.noise.env = 0} is analogous to \code{scenario = 1}
\item \code{prop.noise.speatt = 1} and \code{prop.noise.env = 0} is analogous to \code{scenario = 2}
\item \code{prop.noise.speatt = 0} and \code{prop.noise.env = 1} is analogous to \code{scenario = 3}
\item \code{prop.noise.speatt = 1} and \code{prop.noise.env = 1} is analogous to \code{scenario = 4}
}}}
}
\references{
Dray S. & Legendre P. 2008. Testing the species traits-environment relationships: the fourth-corner problem revisited. Ecology, 89:3400-3412.
}
\author{
David Zeleny (zeleny.david@gmail.com), created mostly according to the description in Dray & Legendre (2008).
}
|
06c5a033e5f5b76eb40884773fe7f5f2dc94bb89 | 097d950379f67e0133f6c151306d85f2e1d146c0 | /R/summary.R | 38d69d62f8f7cb3a7b4689088be8f0770097e639 | [] | no_license | kamapu/vegtable | 989b0d9cfa4cbf423f5d9201b454cb5e0478d283 | e57ab2e88ce17e72b787e8395dbe879ea2db5f69 | refs/heads/master | 2023-06-13T03:44:41.828449 | 2023-05-28T00:02:51 | 2023-05-28T00:02:51 | 55,006,983 | 4 | 0 | null | 2023-05-28T00:02:52 | 2016-03-29T20:05:28 | HTML | UTF-8 | R | false | false | 8,453 | r | summary.R | #' @name summary
#'
#' @title Summary method for vegtable objects
#'
#' @description
#' Display summaries for [vegtable-class] objects.
#'
#' Those methods are implemented for objects of the classes [vegtable-class],
#' [coverconvert-class] and [shaker-class].
#'
#' The method for class `vegtable` retrieves the metadata, the size of
#' the object, its validity and additional statistics on the content of input
#' object.
#'
#' For objects of class [shaker-class], the function `summary()` will either
#' retrieve general statistics when `companion` is missing, or a more detailed
#' display when accompained by a [taxlist-class] or [vegtable-class] object.
#'
#' @param object,x Object to be summarized.
#' @param units Units used for object size (passed to [format()]).
#' @param companion Companion object (either a [taxlist-class] or a
#' [vegtable-class] object.
#' @param authority Logical value indicating whether authors should be
#' displayed or not.
#' @param ... further arguments to be passed to or from other methods.
#'
#' @author Miguel Alvarez \email{kamapu78@@gmail.com}
#'
#' @examples
#' ## Summary for 'vegtable' objects
#' summary(Wetlands_veg)
#' @rdname summary
#' @aliases summary,vegtable-method
#'
#' @exportMethod summary
#'
setMethod(
"summary", signature(object = "vegtable"),
function(object, units = "Kb", ...) {
# Show original attributes (metadata)
cat("## Metadata", "\n")
if (length(object@description) > 0) {
for (i in names(object@description)) {
cat(" ", i, ": ", object@description[i], sep = "", "\n")
}
}
cat(" object size:", format(object.size(object), units = units),
sep = " ", "\n"
)
cat(" validity:", validObject(object), "\n")
cat("\n")
# Content of some slots
cat("## Content", "\n")
cat(" number of plots:", nrow(object@header), sep = " ", "\n")
cat(" plots with records:",
length(unique(object@samples$ReleveID)),
sep = " ", "\n"
)
cat(" variables in header:", ncol(object@header), sep = " ", "\n")
cat(" number of relations:", length(object@relations),
sep = " ",
"\n"
)
cat("\n")
# Content of species list
cat("## Taxonomic List", "\n")
cat(" taxon names:", nrow(object@species@taxonNames),
sep = " ",
"\n"
)
cat(" taxon concepts:", nrow(object@species@taxonRelations),
sep = " ",
"\n"
)
cat(" validity:", validObject(object@species), sep = " ", "\n")
cat("\n")
}
)
#' @rdname summary
#' @aliases summary,coverconvert-method
#'
#' @examples
#' ## Summary for 'coverconvert' objects
#' summary(braun_blanquet)
setMethod(
"summary", signature(object = "coverconvert"),
function(object, ...) {
cat("## Number of cover scales:", length(object@value), "\n")
cat("\n")
for (i in names(object@value)) {
Levels <- paste(object@value[[i]])
Range_1 <- paste(object@conversion[[i]])[
-length(object@conversion[[i]])
]
Range_2 <- paste(object@conversion[[i]])[-1]
for (j in 1:length(Range_2)) {
if (duplicated(Range_2)[j]) Range_1[j] <- Range_1[j - 1]
}
cat(paste0("* scale '", i, "':"), "\n")
print(data.frame(Levels = Levels, Range = paste(
Range_1, "-",
Range_2
), stringsAsFactors = FALSE))
cat("\n")
}
}
)
#' Re-writing formulas for print output
#'
#' Mediating between syntax and print format.
#'
#' @param shaker A [shaker-class] object.
#' @param companion A companion data set which is either missing or a
#' [vegtable-class] object.
#'
#' @return A formated output text.
#'
#' @keywords internal
rewrite_formulas <- function(shaker, companion) {
EQ <- list()
for (i in names(shaker@formulas)) {
EQ[[i]] <- {
x <- shaker@formulas[[i]]
if (grepl("\'", x)) SYM <- "\'"
if (grepl('\"', x)) SYM <- '\"'
x <- gsub("groups[[", "groups:", x, fixed = TRUE)
x <- gsub("dominants[[", "species:", x, fixed = TRUE)
x <- gsub("]]", "", x, fixed = TRUE)
Spp <- as.numeric(unlist(regmatches(
x,
gregexpr(
"[[:digit:]]+\\.*[[:digit:]]*",
x
)
)))
if (length(Spp) > 0) {
for (j in Spp) {
subformula <- shaker@dominants[j, ]
subformula$TaxonConceptID <- companion[
match(
subformula$TaxonConceptID,
companion$TaxonConceptID
), "TaxonName"
]
subformula <- paste(subformula[1, ], collapse = " ")
x <- sub(paste0("species:", j), paste0(
"species:", SYM,
subformula, SYM
), x)
}
}
x
}
}
return(EQ)
}
#' @rdname summary
#' @aliases summary,shaker-method
#'
#' @examples
#' ## Summary for 'shaker' objects (alone and with companion)
#' summary(Wetlands, Wetlands_veg)
setMethod(
"summary", signature(object = "shaker"),
function(object, companion, authority = FALSE, ...) {
if (missing(companion)) {
cat("Number of pseudo-species:", length(object@pseudos), "\n")
cat("Number of species groups:", length(object@groups), "\n")
cat("Number of formulas:", length(object@formulas), "\n")
} else {
if (is(companion, "vegtable")) {
companion <- companion@species
}
companion <- accepted_name(companion)
if (authority) {
companion$AuthorName[is.na(companion$AuthorName)] <- ""
companion$TaxonName <- with(companion, paste(
TaxonName,
AuthorName
))
}
if (length(object@pseudos) > 0) {
cat("## Pseudo-species:", "\n")
for (i in 1:length(object@pseudos)) {
cat(
"*", paste0(
"'",
companion[
match(
object@pseudos[[i]][1],
companion$TaxonConceptID
),
"TaxonName"
], "'"
),
"contains:", "\n"
)
for (j in 2:length(object@pseudos[[i]])) {
cat(" ", companion[
match(
object@pseudos[[i]][j],
companion$TaxonConceptID
),
"TaxonName"
], "\n")
}
}
cat("\n")
}
if (length(object@groups) > 0) {
cat("## Species groups:", "\n")
for (i in 1:length(object@groups)) {
cat("*", paste0(
"'", names(object@groups)[i],
"' group:"
), "\n")
for (j in 1:length(object@groups[[i]])) {
cat(" ", companion[
match(
object@groups[[i]][j],
companion$TaxonConceptID
),
"TaxonName"
], "\n")
}
}
cat("\n")
}
if (length(object@formulas) > 0) {
cat("## Formulas:", "\n")
EQ <- rewrite_formulas(object, companion)
for (i in 1:length(object@formulas)) {
cat(
"*", paste0(names(object@formulas)[i], ":"),
EQ[[i]], "\n"
)
}
cat("\n")
}
}
}
)
################################################################################
#' @rdname summary
#'
#' @aliases show,vegtable-method
#'
#' @exportMethod show
setMethod(
"show", signature(object = "vegtable"),
function(object) {
summary(object)
}
)
#' @rdname summary
#'
#' @aliases print,vegtable-method
setMethod(
"print", signature(x = "vegtable"),
function(x, ...) {
summary(x, ...)
}
)
################################################################################
#' @rdname summary
#'
#' @aliases show,coverconvert-method
#'
#' @exportMethod show
setMethod(
"show", signature(object = "coverconvert"),
function(object) {
summary(object)
}
)
#' @rdname summary
#'
#' @aliases print,coverconvert-method
setMethod(
"print", signature(x = "coverconvert"),
function(x, ...) {
summary(x, ...)
}
)
################################################################################
#' @rdname summary
#'
#' @aliases show,shaker-method
#'
#' @exportMethod show
setMethod(
"show", signature(object = "shaker"),
function(object) {
summary(object)
}
)
#' @rdname summary
#'
#' @aliases print,shaker-method
setMethod(
"print", signature(x = "shaker"),
function(x, ...) {
summary(x, ...)
}
)
|
351776305fabf314ae88c6188eeefdeebd9bf28d | a57b7b949fa6dba0b7d6f56cbe45c0ad46481537 | /man/se.Rd | b4023a643863e813415e4448e49583aa740dc8b4 | [
"MIT"
] | permissive | davebraze/FDButils | 12cf914b5bb04dd71f4ea60986650e4b608a083d | 7a3c5bb9cd5ef6dbe4dcd522eb1e1772f71bff46 | refs/heads/master | 2023-03-12T02:14:19.146630 | 2023-02-20T19:52:18 | 2023-02-20T19:52:18 | 57,219,505 | 1 | 2 | MIT | 2017-12-28T14:59:29 | 2016-04-27T14:23:14 | R | UTF-8 | R | false | true | 767 | rd | se.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/se.R
\name{se}
\alias{se}
\alias{seM}
\title{Standard Error Of The Mean.}
\usage{
se(x, na.rm = FALSE)
seM(x, na.rm = FALSE)
}
\arguments{
\item{x}{Object to compute SEMs for. Can be vector, matrix or data.frame.}
\item{na.rm}{Specify how to handle missing values.}
}
\value{
Standard error of the mean for x, or each column of x.
}
\description{
Compute standard error of the mean of x.
}
\details{
Returns the standard error of the mean of x, which can be either a vector, matrix or data.frame.
In the latter two cases, SEM is computed column-wise and a vector of values is returned. When x is
a vector, a single value is returned.
}
\author{
David Braze \email{davebraze@gmail.com}
}
|
4ee58f6e1799563bd2fdeb23352d1c167086cb76 | 7aa3377774fbf5b9d8a34ac88d7f06a5184be290 | /extsim-makeplots.R | 5626ad003f9d8b1b3981171bebe66f8e481ca8b3 | [] | no_license | wuhaonn/cbm-rbd | dc9a6903b911e6bfef0b8a73445fecf817c9dc4b | a8d6f2fa54e19c324894de1a50f551ecc3f1bf49 | refs/heads/master | 2021-06-20T07:32:18.067918 | 2017-06-13T11:35:29 | 2017-06-13T11:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,840 | r | extsim-makeplots.R | # --------------------------------------------------------------------------- #
# CBM systems paper - extended simulation study for revision - make plots
# --------------------------------------------------------------------------- #
# load packages
library(actuar)
library(ReliabilityTheory)
library(reshape2)
library(ggplot2)
library(gridExtra)
# code files (these contain also the plotting functions)
source("weibull-sys-functions.R")
source("cbm-sim2.R")
# plot definitions and summary functions
source("plotdefs.R")
source("extsim-summaryfunctions.R")
# Case A: failure times as expected
## load simulation objects
load("extsim-a-objects.RData")
## apply summary functions & create joint data.frame
AsimCBMcpuSummary <- simsummary(AsimCBMcpu)
AsimCBMepuSummary <- simsummary(AsimCBMepu)
AsimCBMnpuSummary <- simsummary(AsimCBMnpu)
AsimABMepuSummary <- simsummary(AsimABMepu)
AsimABMnpuSummary <- simsummary(AsimABMnpu)
AsimCMSummary <- simsummary(AsimCM)
AsimRes <- rbind(data.frame(sim = "CBM-cpu", AsimCBMcpuSummary),
data.frame(sim = "CBM-epu", AsimCBMepuSummary),
data.frame(sim = "CBM-npu", AsimCBMnpuSummary),
data.frame(sim = "ABM-epu", AsimABMepuSummary),
data.frame(sim = "ABM-npu", AsimABMnpuSummary),
data.frame(sim = "CM", AsimCMSummary))
names(AsimRes)[3:5] <- c(expression(e[sys]), expression(bar(r)[sys]), expression(bar(g)))
## the plots
### with lines like before (change sizes?)
AsimPlotLines <- ggplot(melt(AsimRes, c("id", "sim")), aes(x = id, y = value)) +
geom_line(aes(group = sim), size = 0.3) + geom_point(aes(group = sim), size = 0.5) +
facet_grid(variable ~ sim, scales = "free_y", labeller = label_parsed) +
xlab("25-cycle repetition number") + theme(axis.title.y = element_blank())
pdf("AsimPlotLines.pdf", width = 6, height = 3)
AsimPlotLines
dev.off()
### one boxplot per policy
AsimPlotBoxplot <- ggplot(melt(AsimRes, c("id", "sim")), aes(x = sim, y = value, group = sim)) + stat_boxplot() +
facet_grid(variable ~ ., scales = "free_y", labeller = label_parsed) + theme(axis.title = element_blank())
pdf("AsimPlotLines.pdf", width = 6, height = 3)
AsimPlotBoxplot
dev.off()
### other way to display the results?
#???
# boxplot test
#ggplot(melt(br1sim1Tt01acsummaryall, c("id", "sim")), aes(x = sim, y = value, group = sim)) + stat_boxplot() +
# facet_grid(variable ~ ., scales = "free_y", labeller = label_parsed) + theme(axis.title = element_blank())
## mean and median of average unit cost
#mean(AsimCBMcpuSummary$meancostrate); median(AsimCBMcpuSummary$meancostrate) # there must be a more clever way
#mean(AsimRes$"bar(g)", by = sim) # something like this
# Case B: failure times earlier than expected
# Case C: failure times later than expected
# Case D: varying failure time behaviour
# |
0cf5c59d501c5409e30bcde4129b26c65f1fdeb2 | b53744576b5c8a4725c5954b8e7f051d0b78f384 | /PH525-StatisticsAndR/populationVsSampleEstimates.R | 59669e1129cf647e70aa90005830f37bd0158a50 | [] | no_license | spiralelucide/R | 58e6b592a5955c63e76ee5343bc921b8d667f3c6 | a2854983c78f1ba5b212f34fbeb2239056098c00 | refs/heads/master | 2021-01-01T17:03:20.653042 | 2017-08-28T17:26:56 | 2017-08-28T17:26:56 | 97,988,535 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,209 | r | populationVsSampleEstimates.R | library(downloader)
url <- "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/mice_pheno.csv"
filename <- basename(url)
download(url, destfile=filename)
dat <- read.csv(filename)
dat <- na.omit(dat)
#population vs sample mean and standard deviation for male mice diets
x <- filter(dat, Sex=="M" & Diet=="chow") %>% select(Bodyweight) %>% unlist
x
mean(x)
popsd(x)
set.seed(1)
samp1 <- sample(x,size = 25)
mean(samp)
y <- filter(dat, Sex=="M" & Diet=="hf") %>% select(Bodyweight) %>% unlist
mean(y)
popsd(y)
set.seed(1)
samp <- sample(y,size = 25)
mean(samp)
#y-(ybar) - x-(xbar)
popMean <- mean(y)-mean(x)
sampleMean <- mean(samp) - mean(samp1)
popMean - sampleMean
#population vs sample mean and sd for female mice diets
## x filters control
x <- dat %>% filter(Sex=="F" & Diet=="chow") %>% select(Bodyweight) %>% unlist
mean(x)
popsd(x)
set.seed(1)
samp1 <- sample(x,size = 25)
mean(samp1)
## y filters experimental group
y <- dat %>% filter(Sex=="F" & Diet=="hf") %>% select(Bodyweight) %>% unlist
mean(y)
popsd(y)
set.seed(1)
samp <- sample(y,size = 25)
mean(samp)
#y-(ybar) - x-(xbar)
popMean <- mean(y)-mean(x)
sampleMean <- mean(samp) - mean(samp1)
popMean - sampleMean
|
e161136cee9bd844a177a5dbbed0580bf01c92cb | 7550bbca8c12d81be101172e80a8b7bcff3b0a89 | /man/comb_name.Rd | 617997f3e01ca8f4752f1658a38f448674c729ab | [
"MIT"
] | permissive | jokergoo/ComplexHeatmap | 57750d235297861c890bb3ab401fde0c16ef47f5 | ae0ec42cd2e4e0446c114d23dcf43cf2c2f585c8 | refs/heads/master | 2023-07-15T03:47:03.239635 | 2023-04-25T08:00:15 | 2023-04-25T08:00:15 | 30,017,750 | 1,155 | 231 | NOASSERTION | 2023-06-15T14:19:52 | 2015-01-29T11:45:58 | R | UTF-8 | R | false | false | 895 | rd | comb_name.Rd | \name{comb_name}
\alias{comb_name}
\title{
Names of the Combination sets
}
\description{
Names of the Combination sets
}
\usage{
comb_name(m, readable = FALSE)
}
\arguments{
\item{m}{A combination matrix returned by \code{\link{make_comb_mat}}.}
\item{readable}{Whether the combination represents as e.g. "A&B&C".}
}
\details{
The name of the combination sets are formatted as a string
of binary bits. E.g. for three sets of "a", "b", "c", the combination
set with name "101" corresponds to select set a, not select set b
and select set c. The definition of "select" depends on the value of
\code{mode} from \code{\link{make_comb_mat}}.
}
\value{
A vector of names of the combination sets.
}
\examples{
set.seed(123)
lt = list(a = sample(letters, 10),
b = sample(letters, 15),
c = sample(letters, 20))
m = make_comb_mat(lt)
comb_name(m)
comb_name(m, readable = TRUE)
}
|
0d1daeaeee8238cf86d39783b365608df1b2bf00 | 4a38335e699774774df1a7b86d3bacbca1d98b0f | /scripts/2alignreads.R | 417c5258c7f07535eac0d51c151b0b04737c4bd3 | [
"MIT"
] | permissive | Jake1Egelberg/DEAR | a50bb510a24bf32850037ee267615f5f1e04243e | fa01d4ea4f5803da6677ae6bbcb70ed9e3344173 | refs/heads/main | 2023-04-12T12:14:23.577879 | 2021-08-21T13:25:46 | 2021-08-21T13:25:46 | 396,860,994 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,106 | r | 2alignreads.R | #***********************************************************
#*************************RNA SEQ **************************
#***********************************************************
#---------------------LOADING PARMS----------------------
library(this.path)
library(stringr)
#Choose primary workflow file path
file.dir<-this.dir()
file.path<-str_replace(file.dir,"scripts","")
#Load user-set parms file
parms<-read.delim(paste(file.path,"parms.txt",sep=""),sep=":")
#Redefine parms for R
index.file<-trimws(parms[which(parms$RNA_SEQ_PARAMETERS=="index.file"),2])
paired.end.status<-as.logical(trimws(parms[which(parms$RNA_SEQ_PARAMETERS=="paired.end.status"),2]))
ref.genome<-trimws(parms[which(parms$RNA_SEQ_PARAMETERS=="ref.genome"),2])
#Load packages
library(BiocManager)
library(Rsubread)
library(stringr)
set.seed(42)
#Create text file to update user
update<-data.frame(Update="Status")
#Remove existing progress files
progress.files<-list.files(path=paste(file.path,"progress",sep=""),full.names = TRUE)
file.remove(progress.files)
setwd(paste(file.path,"progress",sep=""))
write.table(update,"ALIGNING READS.txt")
#---------------------ALIGN READS----------------------
#Identify fastq files in 1fastqfiles folder
fastq.files <- list.files(path = paste(file.path,"1fastqfiles/",sep=""),
pattern = ".fastq.gz$",
full.names = TRUE)
if(paired.end.status==TRUE){
fastq.files.pair <- list.files(path = paste(file.path,"1fastqfiles/pair",sep=""),
pattern = ".fastq.gz$",
full.names = TRUE)
}
#Align reads to index
if(paired.end.status==FALSE){
align(index = paste(file.path,"buildindex/",index.file,sep=""),
readfile1 = fastq.files)
} else if(paired.end.status==TRUE){
align(index = paste(file.path,"buildindex/",index.file,sep=""),
readfile1 = fastq.files,
readfile2 = fastq.files.pair)
}
setwd(paste(file.path,"progress",sep=""))
write.table(update,"ALIGNMENT COMPLETE.txt")
|
ba9b655c5e52f4996e86e7ea6ab53ca25fc88e18 | e8665e5b41bec56879ae687174659ece149b828b | /app.R | 00315d4b26360a3399a3f14b28501be1feb540b0 | [] | no_license | Akacker/WRC_dashboard | e65e641536f359ce2237173137cd199cbd46462d | 070879b4cbef2bf35bc8fab2a7dbb3e00676dca5 | refs/heads/master | 2020-07-27T13:43:39.287950 | 2019-02-25T05:29:22 | 2019-02-25T05:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,770 | r | app.R | rm(list = ls())
library(shinydashboard)
ui <- dashboardPage(
dashboardHeader(title = "WRC Dashboard"),
dashboardSidebar(sidebarMenu(
menuItem("Industry Water Reuse", tabName = "dashboard", icon = icon("dashboard")),
menuItem("Community Rating Map", tabName = "widgets", icon = icon("th")),
menuItem("Entity Behaviour", tabName = "rate", icon = icon("th")),
menuItem("WRC tokens", tabName = "transaction", icon = icon("th"))
)),
dashboardBody(tabItems(
tabItem(tabName = "dashboard", fluidRow(htmlOutput("frame"))
),
tabItem(tabName = "widgets", sidebarLayout(
sidebarPanel(width = 4,
fluidRow(
column(12, dataTableOutput('dto'))
)),
mainPanel(fluidRow(htmlOutput("hframe"))
)
)),
tabItem(tabName = "rate", fluidRow(dataTableOutput('bto'))),
tabItem(tabName = "transaction", fluidRow(dataTableOutput('tto')))
)
)
)
server <- function(input, output) {
observe({
test <<- paste0("http://localhost:5000/map")
map <<- paste0("http://localhost:5000/heatmap")
})
values <- read.csv("templates/ward.csv", stringsAsFactors = FALSE)
behave <- read.csv("templates/behave.csv", stringsAsFactors = FALSE)
tokens <- read.csv("templates/token.csv", stringsAsFactors = FALSE)
output$frame <- renderUI({
my_test <- tags$iframe(src=test, height=600, width=1130)
print(my_test)
my_test
})
output$hframe <- renderUI({
my_test <- tags$iframe(src=map, height=600, width=720)
print(my_test)
my_test
})
output$dto <- renderDataTable(values, options = list(dom = 'ft'))
output$tto <- renderDataTable(tokens)
output$bto <- renderDataTable(behave)
}
shinyApp(ui, server)
|
8a807e44fae0ed8c506a3904ef04c7f84af67824 | 59093dfdd9784a917581e77701f03871b8c9c80a | /R/server.R | 7d652a510fe3b43396c2f32f7abe6f049eb6d5e1 | [] | no_license | jameshay218/driftSimApp | bc643b2ecdb4a01739f7e4511f776fc2e14a65df | 27854fe29a7c28f2584a72a088a3afa3728627b2 | refs/heads/master | 2021-01-21T13:57:25.010682 | 2016-06-01T13:48:32 | 2016-06-01T13:48:32 | 50,588,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 31,243 | r | server.R | library(shiny)
library(ggplot2)
library(reshape2)
library(Rcpp)
library(gridExtra)
library(driftSim)
library(data.table)
library(deSolve)
library(plyr)
options(shiny.maxRequestSize=1000*1024^2)
shinyServer(
function(inputs, output, session){
plot_SIR <- function(filename){
N <- isolate(inputs$s0) + isolate(inputs$i0) + isolate(inputs$r0) + 500
dat <- read.csv(filename,header=0)
dat <- cbind(seq(1,nrow(dat),by=1),dat)
colnames(dat) <- c("t","S","I","R")
dat <- melt(dat,id="t")
colnames(dat) <- c("t","Population","value")
SIR_plot <- ggplot() +
geom_line(data=dat,aes(x=t,y=value,colour=Population,group=Population)) +
xlab("Time (days)") +
ylab("Number Individuals") +
scale_y_continuous(limits=c(0,N),expand=c(0,0))+
scale_x_continuous(limits=c(0,inputs$dur+1),expand=c(0,0))+
theme(
text=element_text(colour="gray20",size=14),
plot.title=element_text(size=28),
legend.text=element_text(size=20,colour="gray20"),
legend.title=element_text(size=20,colour="gray20"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line=element_line(colour="gray20"),
axis.line.x = element_line(colour = "gray20"),
axis.line.y=element_line(colour="gray20"),
axis.text.x=element_text(colour="gray20"),
panel.background=element_blank(),
axis.text.y=element_text(colour="gray20"))
return(SIR_plot)
}
calculate_deltaVMat <- observeEvent(inputs$dVcalc,{
print("Calculating deltaV matrix...")
maxV = 3
maxK = 80
time_step = 1
p = as.numeric(inputs$p)
r = as.numeric(inputs$r)
q = as.numeric(inputs$q)
a =as.numeric(inputs$a)
b = as.numeric(inputs$b)
kc = as.numeric(inputs$kc)
pars <- c(p,r,b,a,kc,q)
difeq <- function(t, V, params){
x <- V
j <- params[1]
p <- params[2]
r <- params[3]
b <- params[4]
a <- params[5]
kc <- params[6]
q <- params[7]
immK <- r*j
if(immK <0) immK <- 0
f_x <- (1-exp(-p*(x+q)))^(immK)
g_x <- exp(-a*(x^b))
f_dx <- p*(immK)*((1-exp(-p*(V+q)))^(immK-1))*(exp(-p*(x+q)))
g_dx <- -a*b*x^(b-1)*exp(-a*(x^b))
dV <- f_x*g_dx + g_x*f_dx
return(list(dV*kc))
}
V = seq(0,maxV,by=0.01)
immKs = seq(0,maxK,by=0.1)
allV <- matrix(nrow=length(immKs),ncol=length(V))
for(j in 1:length(immKs)){
print(j)
for(i in 1:length(V)){
deltaV <- ode(y=c(V[i]),seq(0,time_step,by=1/40),difeq,c(immKs[j],pars))
allV[j,i] <- deltaV[length(deltaV)] - V[i]
}
}
write.table(allV, file=paste(getwd(),"/outputs/deltaVMat.csv",sep=""),row.names=FALSE,col.names=FALSE,sep=",")
})
run_sim <- observeEvent(inputs$run, {
print("Running simulation")
#' Make sure Rcpp will compile and run
SIR_flag <- 1 %in% inputs$flags #' Flag to save SIR dynamics
voutput1_flag <- 2 %in% inputs$flags #' Flag to save virus information for Sean's phylogenetic tree
voutput2_flag <- 3 %in% inputs$flags #' Flag to save pairwise distance matrix
time_flag <- 4 %in% inputs$flags #' Flag to record time taken for simulation
VERBOSE <- 7 %in% inputs$flags #' Outputs in simulation
save_state <- 5 %in% inputs$flags #' Flag to save the final state of the simulation
input_flag <- 6 %in% inputs$flags #' Flag to use specified file as input for simulation
flags <- c(SIR_flag, voutput1_flag, voutput2_flag, time_flag, save_state, input_flag)
flags <- as.numeric(flags)
print(flags)
S0 <- as.numeric(inputs$s0)
I0 <- as.numeric(inputs$i0)
R0 <- as.numeric(inputs$r0)
contactRate <- as.numeric(inputs$contact)
mu <- 1/(as.numeric(inputs$mu)*365)
wane <- 1/as.numeric(inputs$wane)
gamma <- 1/as.numeric(inputs$gamma)
iniBind <- as.numeric(inputs$iniBinding)
meanBoost = as.numeric(inputs$boost)
iniDist = as.numeric(inputs$iniDist)
hostpars <- c(S0,I0, R0,contactRate,mu,wane,gamma,iniBind, meanBoost, iniDist)
deltaVMat <- unname(as.matrix(read.csv(paste(getwd(),"/outputs/deltaVMat.csv",sep=""),header=FALSE)))
p = as.numeric(inputs$p)
r = as.numeric(inputs$r)
q = as.numeric(inputs$q)
a =as.numeric(inputs$a)
b = as.numeric(inputs$b)
n = as.numeric(inputs$n)
v = as.numeric(inputs$v)
probMut = inputs$probMut
expDist = inputs$expDist
kc = inputs$kc
VtoD = inputs$VtoD
viruspars <- c(p,r,q,a,b,n,v,probMut,expDist,kc,VtoD)
print("Host pars:")
print(hostpars)
print("Virus pars:")
print(viruspars)
progress_within<- shiny::Progress$new()
on.exit(progress_within$close())
callback <- function(x) {
progress_within$set(value=x[[1]]/inputs$dur,detail= x[[1]])
# isolate(dummy$iter <- dummy$iter + 1)
##message(sprintf("day: %d [%d / %d / %d]", x[[1]], x[[2]], x[[3]], x[[4]]))
}
if(is.null(inputs$hostInput) || is.null(inputs$virusInput)){
inputFiles <- c("hosts.csv","viruses.csv")
}
else {
inputFiles <- c(inputs$hostInput$datapath,inputs$virusInput$datapath)
}
if(length(inputs$scenarios) > 0){
withProgress(message="Simulation number", value=0, detail=1, {
for(i in inputs$scenarios){
print(i)
print(paste("Scenario number: "),i,sep="")
progress_within$set(message = "Day", value = 0)
Sys.sleep(0.1)
filename1 <- paste("scenario_",i,"_SIR.csv",sep="")
filename2 <- paste("voutput1_",i,".csv",sep="")
filename3 <- paste("voutput2_",i,".csv",sep="")
filename4 <- paste("hosts_",i,".csv",sep="")
filename5 <- paste("viruses_",i,".csv",sep="")
filenames <- c(filename1, filename2, filename3, filename4, filename5)
y <- run_simulation(flags,hostpars,viruspars,deltaVMat,0,inputs$dur,inputFiles,filenames,VERBOSE, as.numeric(i),callback)
incProgress(1/length(inputs$scenarios),detail=(as.numeric(i)+1))
for(j in filenames){
if(file.exists(j)) file.rename(from=j,to = paste(getwd(),"/outputs/",j,sep=""))
}
}
})
}
})
output$sim_main_1<- renderPlot({
inputs$run
if(1 %in% inputs$scenarios){
g <- plot_SIR(paste(getwd(),"/outputs/scenario_1_SIR.csv",sep=""))
g
} else{
NULL
}
})
output$sim_main_2<- renderPlot({
inputs$run
if(2 %in% inputs$scenarios){
g <- plot_SIR(paste(getwd(),"/outputs/scenario_2_SIR.csv",sep=""))
g
}else{
NULL
}
})
output$sim_main_3<- renderPlot({
inputs$run
if(3 %in% inputs$scenarios){
g <- plot_SIR(paste(getwd(),"/outputs/scenario_3_SIR.csv",sep=""))
g
} else{
NULL
}
})
output$sim_main_4<- renderPlot({
inputs$run
if(4 %in% inputs$scenarios){
g <- plot_SIR(paste(getwd(),"outputs/scenario_4_SIR.csv",sep=""))
g
} else{
NULL
}
})
output$parameterPlots <- renderPlot({
base <- ggplot(data.frame(x=c(0,inputs$N_reinfect)),aes(x)) + stat_function(fun=dexp,geom="line",colour="red",args=list(rate=inputs$expDist)) +
xlab("Size of mutation") +
ylab("Probability (given a mutation did occur)") +
scale_y_continuous(expand=c(0,0))+
scale_x_continuous(expand=c(0,0))+
theme(
text=element_text(colour="gray20",size=14),
plot.title=element_text(size=28),
legend.text=element_text(size=20,colour="gray20"),
legend.title=element_text(size=20,colour="gray20"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line=element_line(colour="gray20"),
axis.line.x = element_line(colour = "gray20"),
axis.line.y=element_line(colour="gray20"),
axis.text.x=element_text(colour="gray20"),
panel.background=element_blank(),
axis.text.y=element_text(colour="gray20"))
base
})
output$boostPlot <- renderPlot({
base <- ggplot(data.frame(x=c(0,3*inputs$boost)),aes(x)) + stat_function(fun=dpois,geom="bar",colour="black",n=(3*inputs$boost + 1),args=list(lambda=inputs$boost))+
xlab("Magnitude of boost following infection") +
ylab("Probability") +
scale_y_continuous(expand=c(0,0))+
scale_x_continuous(expand=c(0,0))+
theme(
text=element_text(colour="gray20",size=14),
plot.title=element_text(size=28),
legend.text=element_text(size=20,colour="gray20"),
legend.title=element_text(size=20,colour="gray20"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line=element_line(colour="gray20"),
axis.line.x = element_line(colour = "gray20"),
axis.line.y=element_line(colour="gray20"),
axis.text.x=element_text(colour="gray20"),
panel.background=element_blank(),
axis.text.y=element_text(colour="gray20"))
base
})
output$dlPlots <- downloadHandler(
filename = "allSIR.png",
content = function(file){
ggsave(file,plot=plotAllSIR(),device="png",width=8)
}
)
output$dlPlot1 <- downloadHandler(
filename = "scenario1SIR.png",
content = function(file){
ggsave(file,plot=plot_SIR(paste(getwd(),"/outputs/scenario_1_SIR.csv",sep="")),device="png", width=10,height=6)
}
)
output$dlPlot2 <- downloadHandler(
filename = "scenario2SIR.png",
content = function(file){
ggsave(file,plot=plot_SIR(paste(getwd(),"/outputs/scenario_2_SIR.csv",sep="")),device="png", width=10,height=6)
}
)
output$dlPlot3 <- downloadHandler(
filename = "scenario3SIR.png",
content = function(file){
ggsave(file,plot=plot_SIR(paste(getwd(),"/outputs/scenario4_3_SIR.csv",sep="")),device="png", width=10,height=6)
}
)
output$dlPlot4 <- downloadHandler(
filename = "scenario4SIR.png",
content = function(file){
ggsave(file,plot=plot_SIR(paste(getwd(),"/outputs/scenario_4_SIR.csv",sep="")),device="png", width=10,height=6)
}
)
output$dlParPlots <- downloadHandler(
filename = "bindingAvidPlots.png",
content = function(file){
ggsave(file=file,plotDynamics(),device="png")
}
)
output$dlOutputsSIR <- downloadHandler(
filename = function(){paste("outputs",".tar",sep="")},
content=function(file){
tar(file,paste(getwd(),"/outputs",sep=""))
}
)
output$dlPars <- downloadHandler(
filename = "parameters.csv",
content = function(file){
p = as.numeric(inputs$p)
r = as.numeric(inputs$r)
b = as.numeric(inputs$b)
a =as.numeric(inputs$a)
n = as.numeric(inputs$n)
v = as.numeric(inputs$v)
q = as.numeric(inputs$q)
N_reinfect = as.numeric(inputs$N_reinfect)
max_reinfect = N_reinfect
delta = as.numeric(inputs$delta)
allPars <- list("p"=p,"r"=r,"b"=b,"a"=a,"n"=n,"v"=v,"q"=q,"max_titre"=N_reinfect,"delta"=delta)
write.csv(allPars, file,row.names=FALSE)
}
)
plotAllSIR <- function(){
p1 <- plot_SIR(paste(getwd(),"/outputs/scenario_1_SIR.csv",sep=""))
p2 <- plot_SIR(paste(getwd(),"/outputs/scenario_2_SIR.csv",sep=""))
p3 <- plot_SIR(paste(getwd(),"/outputs/scenario_3_SIR.csv",sep=""))
p4 <- plot_SIR(paste(getwd(),"/outputs/scenario_4_SIR.csv",sep=""))
plot.list <- list(p1, p2, p3, p4, ncol=1)
do.call(arrangeGrob, plot.list)
}
plotDynamics <- function(){
p = as.numeric(inputs$p) #' parameter to control degree by which changes in binding avidity affect probability of escape from immune response
r = as.numeric(inputs$r) #' parameter to control degree by which previous exposure reduce probability of immune escape
b = as.numeric(inputs$b) #' parameter to control the shape of the relationship between probability of successful replication and changes in binding avidity
a =as.numeric(inputs$a) #' controls rate of changes of relationship between probability of successful replication and change in binding avidity.
n = as.numeric(inputs$n) #' average number of virus copies: n is number of offspring per virus replication
v = as.numeric(inputs$v) #' v is number of virions initialyl transmitted
nv = n*v
q = as.numeric(inputs$q) #' parameter to control the shape of the relationship between binding avidity and immune escape (shift on the x-axis)
V = seq(0, 2, by = 0.005) #' Binding avidity
N_reinfect = as.numeric(inputs$N_reinfect)
max_reinfect = N_reinfect
delta = as.numeric(inputs$delta)
#' Get a colour scale gradient blue to red
f_array <- NULL #' Survival prob
df_array <- NULL #' Derivative of survival prob
for(k in 0:(N_reinfect-1)){
probTarget = exp(-p*(V+q)) #' probability of being targetted by immune system. As binding avidity increases, this probability decreases
probEscape = 1-probTarget #' probability of escape
immK = r*(k- delta) #' Strength of host immune respone. As k increases, virus must escape more antibodies. As delta increases, this effect diminishes as infecting virus is further from host immunity.
if(immK < 0) immK = 0
f = (probEscape)^(immK) #' probability of escape from immunity
if(k >= 1) f_dash= immK*p*((1-probTarget)^(immK-1))*(probTarget) #' derivative of this. ie. rate of change of relationship between binding avidity and probability of immune escape
else f_dash= rep(0, length(V))
f_array[[k+1]] <- f
df_array[[k+1]] <- f_dash
}
probInf <- exp(-a*(V^b)) #' probability of successful infection within a host. as binding avidity increases in a naive host, chance of successfully replicating decreases
probInf_dash= -a*b*(V^(b-1))*exp(-a*(V^b)) #' rate of change of this relationship
rho_array <- NULL
dV_array <- NULL
probRep_array <- NULL
for(i in 1:length(df_array)){
R0 = f_array[[i]]*probInf*n
rho = 1 - R0^-v
rho[rho < 0] = 0
rho_array[[i]] <- rho
dV = df_array[[i]]*probInf + f_array[[i]]*probInf_dash
dV_array[[i]] <- dV
probReplication = f_array[[i]]*probInf
probRep_array[[i]] = probReplication
}
rho0 = max(rho_array[[1]])
rho1 = max(rho_array[[2]])
rho2 = max(rho_array[[3]])
probRep_data <- NULL
for(i in 1:length(probRep_array)){
data <- data.frame(x=V,y=probRep_array[[i]],z=as.character(i))
probRep_data <- rbind(probRep_data,data)
}
colours <- NULL
blue <- rev(seq(0,1,by=1/N_reinfect))
red <- seq(0,1,by=1/N_reinfect)
for(i in 1:N_reinfect){
colours[i] <- rgb((i-1)/(max_reinfect-1),0,(N_reinfect-i)/(max_reinfect-1))
}
#' Replication
A <- ggplot() + geom_line(data=probRep_data,aes(x=x,y=y,colour=z)) + scale_color_manual(values=colours) +
theme(
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.background=element_blank(),
axis.text.x=element_text(colour="gray20",size=12),
axis.text.y = element_text(colour="gray20",size=12),
text = element_text(colour="gray20",size=14),
axis.line=element_line(colour="gray20"),
axis.line.x =element_line(colour="gray20"),
axis.line.y=element_line(colour="gray20"),
legend.position = "none",
axis.title=element_text(size=12)
) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(limits=c(0,1),expand=c(0,0)) +
ylab("Probability of Successful Replication Within a Host (theta(V))") +
xlab("Binding Avidity")
rho_data<- NULL
for(i in 1:length(rho_array)){
data <- data.frame(x=V,y=rho_array[[i]],z=as.character(i))
rho_data<- rbind(rho_data,data)
}
#' Infection (Rho)
B <- ggplot() + geom_line(data=rho_data,aes(x=x,y=y,colour=z)) + scale_color_manual(values=colours) +
theme(
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.background=element_blank(),
axis.text.x=element_text(colour="gray20",size=12),
axis.text.y = element_text(colour="gray20",size=12),
text = element_text(colour="gray20",size=14),
axis.line=element_line(colour="gray20"),
axis.line.x =element_line(colour="gray20"),
axis.line.y=element_line(colour="gray20"),
legend.position = "none",
axis.title=element_text(size=12)
) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(limits=c(0,1),expand=c(0,0)) +
ylab("Probability of Infection Between Hosts (rho)") +
xlab("Binding Avidity") +
geom_hline(yintercept = rho0,linetype="longdash",colour="dodgerblue4")+
geom_hline(yintercept = rho1,linetype="longdash",colour="dodgerblue4")+
geom_hline(yintercept = rho2,linetype="longdash",colour="dodgerblue4")
df_data<- NULL
for(i in 1:length(df_array)){
data <- data.frame(x=V,y=df_array[[i]],z=as.character(i))
df_data<- rbind(df_data,data)
}
#' Derivative of f
C <- ggplot() + geom_line(data=df_data,aes(x=x,y=y,colour=z)) + scale_color_manual(values=colours) +
theme(
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.background=element_blank(),
axis.text.x=element_text(colour="gray20",size=12),
axis.text.y = element_text(colour="gray20",size=12),
text = element_text(colour="gray20",size=14),
axis.line=element_line(colour="gray20"),
axis.line.x =element_line(colour="gray20"),
axis.line.y=element_line(colour="gray20"),
legend.position = "none",
axis.title=element_text(size=12)
) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
ylab(expression(d*f/d*V)) +
xlab("Binding Avidity")
dV_data<- NULL
for(i in 1:length(dV_array)){
data <- data.frame(x=V,y=dV_array[[i]],z=as.character(i))
dV_data<- rbind(dV_data,data)
}
#' Infection
D <- ggplot() + geom_line(data=dV_data,aes(x=x,y=y,colour=z)) + scale_color_manual(values=colours) +
theme(
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.background=element_blank(),
axis.text.x=element_text(colour="gray20",size=12),
axis.text.y = element_text(colour="gray20",size=12),
text = element_text(colour="gray20",size=12),
axis.line=element_line(colour="gray20"),
axis.line.x =element_line(colour="gray20"),
axis.line.y=element_line(colour="gray20"),
legend.position = "none",
axis.title=element_text(size=12)
) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
ylab(expression(d*beta/d*V)) +
xlab("Binding Avidity") +
geom_hline(yintercept=0,linetype='longdash',colour="gray20")
f_data<- NULL
for(i in 1:length(f_array)){
data <- data.frame(x=V,y=f_array[[i]],z=as.character(i))
f_data <- rbind(f_data,data)
}
E <- ggplot() + geom_line(data=f_data,aes(x=x,y=y,colour=z)) + scale_color_manual(values=colours) +
theme(
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.background=element_blank(),
axis.text.x=element_text(colour="gray20",size=12),
axis.text.y = element_text(colour="gray20",size=12),
text = element_text(colour="gray20",size=14),
axis.line=element_line(colour="gray20"),
axis.line.x =element_line(colour="gray20"),
axis.line.y=element_line(colour="gray20"),
legend.position = "none",
axis.title=element_text(size=12)
) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
ylab("Probability of Evading Immune System, f(k,V)") +
xlab("Binding Avidity")
probRep_dat <- data.frame(x=V,y=probInf)
F <- ggplot() + geom_line(data=probRep_dat,aes(x=x,y=y,colour="red")) +
theme(
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.background=element_blank(),
axis.text.x=element_text(colour="gray20",size=12),
axis.text.y = element_text(colour="gray20",size=12),
text = element_text(colour="gray20",size=14),
axis.line=element_line(colour="gray20"),
axis.line.x =element_line(colour="gray20"),
axis.line.y=element_line(colour="gray20"),
legend.position = "none",
axis.title=element_text(size=12)
) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
ylab("Probability of Successful Replication, g(V)") +
xlab("Binding Avidity")
#' Plot antigenic distance against j at a given binding avidity.
d <- seq(0,N_reinfect,by=0.1)
fixedV <- inputs$bindAvid
delta_array <- NULL
for(k in 0:(N_reinfect-1)){
probT = exp(-p*(fixedV+q))
probE = 1- probT
immK1 = r*(k-d)
immK1[immK1 < 0] <- 0
probSurvival1 = 1 - (n*exp(-a*(fixedV^b))*(probE^immK1))^-v
probSurvival1[probSurvival1 < 0] <- 0
delta_array[[k+1]] <- probSurvival1
}
delta_data<- NULL
for(i in 1:length(delta_array)){
data <- data.frame(x=d,y=delta_array[[i]],z=as.character(i))
delta_data <- rbind(delta_data,data)
}
G <- ggplot() + geom_line(data=delta_data,aes(x=x,y=y,colour=z)) + scale_color_manual(values=colours) +
theme(
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.background=element_blank(),
axis.text.x=element_text(colour="gray20",size=12),
axis.text.y = element_text(colour="gray20",size=12),
text = element_text(colour="gray20",size=14),
axis.line=element_line(colour="gray20"),
axis.line.x =element_line(colour="gray20"),
axis.line.y=element_line(colour="gray20"),
legend.position = "none",
axis.title=element_text(size=12)
) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(limits=c(0,1),expand=c(0,0)) +
ylab("Probability of Infection Between Hosts") +
xlab("Antigenic Distance to Host Immunity")
immK_array <- NULL
immK2 <- seq(0,r*N_reinfect,by=1)
for(k in 0:(N_reinfect-1)){
probT1 = exp(-p*(fixedV+q))
probE1 = 1- probT1
probSurvival2 = 1 - (n*exp(-a*(fixedV^b))*(probE1^immK2))^-v
probSurvival2[probSurvival2 < 0] <- 0
immK_array[[k+1]] <- probSurvival2
}
immK_data<- NULL
for(i in 1:length(immK_array)){
data <- data.frame(x=immK2/r,y=immK_array[[i]],z=as.character(i))
immK_data <- rbind(immK_data,data)
}
H <- ggplot() + geom_line(data=immK_data,aes(x=x,y=y,colour=z)) + scale_color_manual(values=colours) +
theme(
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.background=element_blank(),
axis.text.x=element_text(colour="gray20",size=12),
axis.text.y = element_text(colour="gray20",size=12),
text = element_text(colour="gray20",size=14),
axis.line=element_line(colour="gray20"),
axis.line.x =element_line(colour="gray20"),
axis.line.y=element_line(colour="gray20"),
legend.position = "none",
axis.title=element_text(size=12)
) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(limits=c(0,1),expand=c(0,0)) +
ylab("Probability of Infection Between Hosts") +
xlab("ImmK")
g <- grid.arrange(A,B,E,F,C,D,G,H,ncol=2)
}
output$Main <- renderPlot({
plotDynamics()
},height=1000,width=1000)
output$antigenicDistanceTime <- renderPlot({
cutOff <- 3
noSamples <- 1000
scenario <- inputs$scenarioPlot
filename <- paste("outputs/voutput1_",scenario,".csv",sep="")
if(file.exists(filename)){
dat <- fread(filename,header=T,data.table=FALSE)
tmp <- dat[sample(nrow(dat),noSamples,replace=FALSE),c("birth","distRoot","distance_to_parent")]
tmp$class[tmp$distance_to_parent >= cutOff] <- paste("Antigenic change >= ", cutOff,sep="")
tmp$class[tmp$distance_to_parent < cutOff] <- paste("Antigenic change <",cutOff,sep="")
p <- ggplot(data=tmp,aes(x=birth,y=distRoot,colour=class,group=class)) + geom_point() +
theme(
# panel.grid.major=element_blank(),
# panel.grid.minor=element_blank(),
#panel.background=element_blank(),
axis.text.x=element_text(colour="gray20",size=12),
axis.text.y = element_text(colour="gray20",size=12),
text = element_text(colour="gray20",size=14),
axis.line=element_line(colour="gray20"),
axis.line.x =element_line(colour="gray20"),
axis.line.y=element_line(colour="gray20"),
# legend.position = "none",
axis.title=element_text(size=12)
) +
ylab("Antigenic Distance to Root") +
xlab("Day of birth")
}
else {
p <- NULL
}
return(p)
})
output$hostImmunityHist <- renderPlot({
})
output$immKTime <- renderPlot({
})
output$virusPairwiseDist <- renderPlot({
})
output$deltaRBP <- renderPlot({
})
})
|
e5c44b5e5be68794aa7d4cd5bace7e4991997b45 | 6b0cbaf6c3b8362bf8615ff3268c31842715ac59 | /ThermalConduct_graduation_flow.R | dd936a56d9f8af7af748369ed1ca351a3c4137f5 | [] | no_license | Conan1991/grad_work | e672e9722f94d1366538f550ea778f59e483f322 | 7f9f6eb05b719f237056bff29d4e659807c7a697 | refs/heads/master | 2021-07-10T23:07:43.252285 | 2020-05-28T19:08:45 | 2020-05-28T19:08:45 | 130,038,234 | 0 | 0 | null | 2018-04-23T08:37:07 | 2018-04-18T09:28:16 | R | UTF-8 | R | false | false | 2,983 | r | ThermalConduct_graduation_flow.R | rm(list = ls())
a=1
tau = 10
h = 0.065 #шаг по x
L = 2 #длина
N = round(L/h) #Число шагов
k1=k2=1
k2_U= 0.5786
k1_U= 2.3
c2_U = 4195
c1_U = 2000
# k2_U= 1
# k1_U= 1
#
# c2_U = 1
# c1_U = 1
kU = function(U_i)
{
if( U_i < 0)
return(k1_U)
return(k2_U)
}
CU = function(U_i)
{
if( U_i < 0)
return(c1_U)
return(c2_U)
}
A = function(t)
{
0
#1/(3+t)
}
B = function(t)
{
#0
#2/(1+t)
1
}
n = 10 #Число шагов по времени
tj = numeric(n) #tau j
nj = numeric(n) #ветор моментов времени
x = numeric(N+1)
fij = numeric(N+1) #сила
Ai = numeric(N) #коэффициенты
Bi = numeric(N)
Ci = numeric(N)
Fi = numeric(N-1)
alpha = numeric(N)
beta = numeric(N)
#j=1 0-й слой
for(j in 1:n)
{
tj[j]=(j-1)*tau
nj[j]=j-1
}
Ux0=function(x)
{
#A(tj[1]) + (B(tj[1])-A(tj[1]))*(x/L)^2;
#1
0
}
for(i in 1:N+1)
{
x[i]= (i-1)*h
fij[i]=0
}
U = matrix(data=NA,nrow=n,ncol=N+1)
colnames(U)=x
row.names(U)= c(0:(n-1))
for(i in 0:N+1) #Считаем 0-й слой
{
U[1,i] = Ux0(x[i])
}
IterF = function(j) #Считаем остальные слои
{
alpha[1]=k1
beta[1]=-h*A(tj[j])
#print(beta[1])
for(i in 1:N) # Считаем очередные Ai, Bi , Ci
{
Ai[i] = - kU(U[j-1,i]) / h^2
Bi[i] = - kU(U[j-1,i]) / h^2
Ci[i] = CU(U[j-1,i]) / tau + 2 * kU(U[j-1,i]) / h^2
}
for(i in 1:(N-1)) #Считаем Fi
Fi[i]=CU(U[j-1,i])*U[j-1,i+1]/tau
#print(Fi)
for(i in 2:N) #Считаем альфа и бета коэффициенты
{
alpha[i]=-Bi[i-1]/(Ci[i-1]+Ai[i-1]*alpha[i-1])
beta[i]= (Fi[i-1]-Ai[i-1]*beta[i-1])/(Ci[i-1]+Ai[i-1]*alpha[i-1])
}
print(beta)
print(alpha)
B(tj[j])
U[j,N+1]=(h*B(tj[j])+k2*beta[N])/(1-k2*alpha[N])
for(i in N:1) #Считаем Uj
U[j,i]= alpha[i] * U[j,i+1] + beta[i]
#U[j,1]=k1*U[j,2]-h*A(tj[j])
return(U[j,])
}
for(j in 2:n)
U[j,] = IterF(j)
Uacc = matrix(data=NA,nrow=n,ncol=N+1)
colnames(Uacc)=x
row.names(Uacc)= c(0:(n-1))
summm=function(x,t)
{
summ=0
for(k in 1:10000)
{
#summ= summ + 1/(2*k-1)^3*sin((2*k-1)*pi*x/L) * exp(-(2*k-1)^2*pi^2*a^2*t/L^2)
#if(k==10) print(summ)
summ= summ + (-1)^(k+1)/k^2*exp(-k^2*pi^2*a^2*t/L^2)*cos(k*pi*x/L)
}
return(summ)
}
for(ix in 1:(N+1))
for(it in 1:n) {
#Uacc[it,ix]=A(tj[it]) + (B(tj[it]) - A(tj[it])) * x[ix]/L - 8*(B(tj[it])-A(tj[it]))/pi^3 * summm(x[ix], tj[it])
#Uacc[it,ix]=B(tj[it])*(a^2*tj[it]/L+(3*x[ix]^2-L^2)/(6*L))+2*L/pi^2*summm(x[ix], tj[it])
}
library(animation)
oopt = ani.options(interval = 0.3)
ani.record(reset = TRUE)
for(j in 1:n)
{
plot(U[j,], xaxt="n", xlab = 'X values', ylab = 'U[x,t] values')
lines(U[j,],col="red")
#lines(Uacc[j,],col="blue")
axis(1, at = c(1:(N+1)), labels = x)
ani.pause()
ani.record()
}
print("Численное решение")
print(U)
#print("Точное решение")
#print(Uacc)
while(TRUE)
{
ani.replay()
ani.options(oopt, loop = 100)
} |
e577732aa6c1eb26fe8d71dd04cf115132860e4e | 74c142f447083d6648a6b644ceb606bd4bd650f4 | /code_r/feat_modelling2.R | 925a20015bdc1c3ea7deb55933536ea49d0ad6e8 | [] | no_license | Wenshulou2019/mind_wandering_EEG | f5310014c63b2c2f5a4108ab2cd6d859645f0494 | 969f082d76e570fdaa7756c109e9f60123f6e74c | refs/heads/master | 2023-02-24T06:09:16.047307 | 2021-01-27T15:31:31 | 2021-01-27T15:31:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,214 | r | feat_modelling2.R | library(R.matlab)
library(matrixStats)
library(e1071)
library(lme4)
library(class)
library(caret)
library(DMwR)
library(utils)
library(tcltk)
get.settings <- function(){
print(paste0('Current feature path: ', f_measure_matfile))
print(paste0('Tasks to build models: ', tasks))
print(paste0('Current states of interest: ', states))
print(paste0('Current definition of ', states, ' : ', contentsets))
print(paste0('Subject count: ', length(subs)))
}
grid.search.svm <- function(data, cVec, gVec, validType = 'cv', balanced = 'copy', searchBy = 'acc', checkOn = FALSE) {
mat <- matrix(0,length(cVec), length(gVec))
count <- length(cVec) * length(gVec)
ti <- 1
pb <- tkProgressBar(title = 'Grid search SVM', min = 0, max = count, width = 500)
for (ci in 1:length(cVec)){
for (gi in 1:length(gVec)){
parList <- list(c = cVec[ci], gamma = gVec[gi])
# only for check purpose
if (checkOn) {tic(paste0('Grid search when c=', parList$c, ', gamma=', parList$gamma))}
if(validType == 'loo'){
perf <- leave.one.out(data, mtype = 'svm', parList = parList, balanced = balanced)
} else {
perf <- cross.validation(data, mtype = 'svm', parList = parList, balanced = balanced)
}
# fail to build a model?
if (is.nan(perf$accuracy)){
print('No model being built. Quit grid search of the input data')
close(pb)
return(NaN)
}
if (searchBy == 'acc'){
mat[ci,gi] <- perf$accuracy
} else if (searchBy == 'kappa'){
mat[ci,gi] <- perf$kappa
} else if (searchBy %in% c('sen-spe', 'spe-sen')) {
mat[ci,gi] <- perf$sensitivity + perf$specificity
} else {
warning('Invalid search criteria. Use the highest accuracy.')
mat[ci,gi] <- perf$accuracy
}
if (checkOn){toc()}
setTkProgressBar(pb, ti, label = paste(round(ti/count * 100, 1), "% done"))
ti <- ti + 1
}
}
close(pb)
return(mat)
}
leave.one.out <- function(data, mtype, parList = list(), balanced = 'copy'){
for (i in 1:nrow(data)) {
test <- data[i, ]
train <- data[-i,]
if (is.character(balanced)) {
train <- balance.class(train, balanced)
}
pred <- feat.modeling(train, test, mtype, parList)
if (i == 1){
pred_class <- pred$predictions
obs_class <- pred$observations
} else {
pred_class <- unlist(list(pred_class, pred$predictions))
obs_class <- unlist(list(obs_class, pred$observations))
}
}
performance <- measure.performance(pred_class, obs_class)
return(performance)
}
cross.validation <- function(data, mtype, nfold = 10, parList = list(), balanced = 'copy'){
# balanced indicates the way to balance the training dataset
idx <- split.sample(data, nfold)
# unable split? (because insufficient samples)
if (length(idx) == 0) {
return(list(accuracy = NaN, kappa = NaN,
sensitivity = NaN, specificity = NaN))
}
for (foldi in 1:nfold) {
train <- data[idx$trainIdx[[foldi]],]
test <- data[idx$testIdx[[foldi]], ]
if (is.character(balanced)) {
train <- balance.class(train, balanced)
}
pred <- feat.modeling(train, test, mtype, parList)
if (foldi == 1){
pred_class <- pred$predictions
obs_class <- pred$observations
} else {
pred_class <- unlist(list(pred_class, pred$predictions)) # to remain the factor type
obs_class <- unlist(list(obs_class, pred$observations))
}
}
performance <- measure.performance(pred_class, obs_class)
return(performance)
}
feat.modeling <- function(train, test, mtype, parList = list()){
if (mtype == 'lr'){
m <- glm(state ~ ., family = binomial('logit'), data = train)
p <- predict(m, test, type = "response")
states <- names(summary(train$state))
if (is.factor(train$state)) {
states <- factor(states, levels = states, labels = states)
}
p_class <- ifelse(p>0.5, states[2], states[1])
p_class <- factor(p_class, levels = 1:length(states), labels = states)
#pStrength <- p
} else if (mtype == 'svm') {
if (length(parList) > 0){
m <- svm(state ~ ., train, probability = TRUE, cost = parList$c, gamma = parList$gamma)
} else {
m <- svm(state ~ ., train, probability = TRUE)
}
p_class <- predict(m, test)
#prob <- attr(predict(m, test, probability = TRUE), 'probabilities')
#pStrength <- prob[,2] - prob[,1]
} else if (mtype == 'knn') {
# split feats from lab
trainFeats <- train[, -which(colnames(train) %in% 'state')]
testFeats <- test[, -which(colnames(test) %in% 'state')]
p_class <- knn(trainFeats, testFeats, train$state, k = 5)
#p <- attr(knn(trainFeats, testFeats, train$state, k = 5, prob = TRUE),'prob')
#p[p_class == states[1]] <- -p[p_class == states[1]]
#pStrength <- p
m <- 'No model for KNN'
} else {
stop('Invalid model type!')
}
return(list(predictions = p_class, observations = test$state, model = m))
}
measure.performance <- function(pred_class, obs_class) {
perf <- confusionMatrix(pred_class, obs_class)
if (perf[['positive']] == states[1]){
tnr <- perf[[c('byClass','Sensitivity')]]
tpr <- perf[[c('byClass','Specificity')]]
} else if (perf[['positive']] == states[2]){
tpr <- perf[[c('byClass','Sensitivity')]]
tnr <- perf[[c('byClass','Specificity')]]
}
return(list(accuracy = perf[[c('overall','Accuracy')]], kappa = perf[[c('overall','Kappa')]],
sensitivity = tpr, specificity = tnr))
}
split.sample <- function(data, nfold = 10, mincount = 10){
#set.seed(54)
rawIdx <- list()
count <- c()
ndataPerFold <- c()
for (si in 1:length(states)){
rawIdx[[si]] <- c(1:nrow(data))[data$state == states[si]]
count[si] <- length(rawIdx[[si]])
ndataPerFold[si] <- ceiling(count[si]/nfold)
if (ndataPerFold[si] <= ndataPerFold[si]*nfold - count[si]) {
ndataPerFold[si] <- floor(count[si]/nfold)
}
if (count[si] > 1) {rawIdx[[si]] <- sample(rawIdx[[si]], count[si])} # shuffle idx
}
# enough samples?
if (min(count) < mincount) {
print(paste0('Class size less than ', mincount, '. Fail to split data.'))
return(list())
}
trainIdx <- list()
testIdx <- list()
for (foldi in 1:nfold){
startPosition <- ndataPerFold * (foldi - 1) + 1
if (foldi < nfold) {
endPosition <- ndataPerFold * foldi
} else {
endPosition <- count
}
for (si in 1:length(states)){
testPositions <- startPosition[si]:endPosition[si]
if (si == 1){
testIdx[[foldi]] <- rawIdx[[si]][testPositions]
trainIdx[[foldi]] <- rawIdx[[si]][-testPositions]
} else {
testIdx[[foldi]] <- c(testIdx[[foldi]], rawIdx[[si]][testPositions])
trainIdx[[foldi]] <- c(trainIdx[[foldi]], rawIdx[[si]][-testPositions])
}
}
}
return(list(trainIdx = trainIdx, testIdx = testIdx))
}
balance.class <- function(data, method = 'copy'){
count <- c()
for (si in 1:length(states)){
eval(parse(text = paste0('data', si, ' <- subset(data, state == states[si])')))
eval(parse(text = paste0('count[si] <- nrow(data', si, ')')))
}
# if one class is empty
if (min(count) == 0){
print('One of the classes is empty! Return NaN')
return(NaN)
}
if (max(count) == min(count)) {return(data)}
nCopy <- floor(max(count)/min(count)) - 1
nSelect <- max(count) %% min(count)
if (method == 'copy'){
data2copy <- eval(parse(text = paste0('data', which(count == min(count)))))
if (nCopy > 0){
for (copyi in 1:nCopy){
if (copyi == 1){
copy <- data2copy
} else {
copy <- rbind(copy, data2copy)
}
}
}
if (nSelect > 0){
if (nCopy > 0){
copy <- rbind(copy, data2copy[sample(1:min(count), nSelect),])
} else {
copy <- data2copy[sample(1:min(count), nSelect),]
}
}
newData <- rbind(data, copy)
} else if (toupper(method) == 'SMOTE'){
newData <- SMOTE(state~., data, perc.over = nCopy*100 + 100*(min(1,nSelect)),
k = 5, perc.under = (1 + 1/(nCopy + min(1,nSelect))) * 100)
}
return(newData)
}
normalize <- function(data, algorithm, pars = list()){
# normalize data within each column
# algorithm options are:
# - range
# - z (pars: colMeans, colSds)
# normalize each column (feature)
labs <- data$state
dataMat <- as.matrix(subset(data, select = -state))
nObs <- nrow(dataMat)
if (algorithm == 'range'){
minMat <- matrix(rep(colMins(dataMat), nObs), nrow = nObs, byrow = TRUE)
maxMat <- matrix(rep(colMaxs(dataMat), nObs), nrow = nObs, byrow = TRUE)
dataNorm <- (dataMat - minMat) / (maxMat - minMat)
dataPars <- list(mins = colMins(dataMat), maxs = colMaxs(dataMat))
} else if (algorithm == 'z'){
if (length(pars) == 0) {
meanMat <- matrix(rep(colMeans(dataMat), nObs), nrow = nObs, byrow = TRUE)
sdMat <- matrix(rep(colSds(dataMat), nObs), nrow = nObs, byrow = TRUE)
} else {
meanMat <- matrix(rep(pars$means, nObs), nrow = nObs, byrow = TRUE)
sdMat <- matrix(rep(pars$sds, nObs), nrow = nObs, byrow = TRUE)
}
dataNorm <- (dataMat - meanMat) / sdMat
dataPars <- list(means = colMeans(dataMat), sds = colSds(dataMat))
} else {
return('Invalid algorithm!')
}
dataNorm <- as.data.frame(dataNorm)
dataNorm$state <- labs
return(list(dataNorm = dataNorm, dataPars = dataPars))
}
get.all.data <- function(subs, task, measures, feats, folders, normalize = TRUE){
for (subi in 1:length(subs)){
sub <- subs[subi]
temp <- get.data(sub, task, measures, feats, folders)
if (normalize) {
temp <- normalize(temp, 'z')$dataNorm
}
if (subi == 1){
data <- temp
} else {
data <- rbind(data, temp)
}
}
return(data)
}
get.data.3bk <- function(sub, task, measures, feats, folders) {
load('trialIdList.rdata')
for (feati in 1:length(feats)){
feat <- feats[feati]
folder <- folders[feati]
all <- readMat(paste0(f_measure_matfile, f_sep, folder, f_sep, sub, '.mat'))
for (si in 1:length(states)){
state <- states[si]
eval(parse(text = paste0('temp <- all$', feat, '.', task, '.', state)))
# filter for 3 preceding trials
for (sessioni in 1:2){
temp <- temp[temp[,sessioni] %in% unlist(idList[[sessioni]][[sub]]) | temp[,sessioni] == 0,]
}
if (feat %in% c('alpha', 'theta')){
temp <- cbind(temp[, 3:4], si - 1) # select feats, add lab
} else {
temp <- cbind(temp[, 3:5], si - 1)
}
if (si == 1){
data <- temp
} else {
data <- rbind(data, temp)
}
}
if (feat %in% c('alpha','theta')){
colnames(data) <- c(paste0(measures[feati], '.', c('Base','StimOn')), 'state')
} else {
colnames(data) <- c(paste0(measures[feati], '.', c('Size','Time','Scale')), 'state')
data[data[, 1] == 0, 1] <- NaN # mark detection failure in single trial ERP
}
if (feati == 1){
df <- data
} else {
df <- cbind(df, data[, -ncol(data)])
}
}
df <- as.data.frame(df)
df$state <- factor(df$state, levels = c(1:length(states)) - 1, labels = states)
df <- na.omit(df) # remove feat detection failure trials
return(df)
}
get.trialIdx <- function(sub, task, measures, feats, folders){
for (feati in 1:length(feats)){
feat <- feats[feati]
folder <- folders[feati]
all <- readMat(paste0(f_measure_matfile, f_sep, folder, f_sep, sub, '.mat'))
for (si in 1:length(states)){
state <- states[si]
eval(parse(text = paste0('temp <- all$', feat, '.', task, '.', state)))
if (feati == 1){
if (feat %in% c('alpha', 'theta')){
temp <- cbind(temp[, 1:4], si - 1) # trial idx + feats
} else {
temp <- cbind(temp[, 1:5], si - 1)
}
} else {
if (feat %in% c('alpha', 'theta')){
temp <- cbind(temp[, 3:4], si - 1) # feats
} else {
temp <- cbind(temp[, 3:5], si - 1)
}
}
if (si == 1){
data <- temp
} else {
data <- rbind(data, temp)
}
}
if (feati == 1) {
if (feat %in% c('alpha','theta')){
colnames(data) <- c('ID.s1', 'ID.s2', paste0(measures[feati], '.', c('Base','StimOn')), 'state')
} else {
colnames(data) <- c('ID.s1', 'ID.s2', paste0(measures[feati], '.', c('Size','Time','Scale')), 'state')
data[data[, 3] == 0, 1] <- NaN # mark the detection failure of single trial ERP
}
} else {
if (feat %in% c('alpha','theta')){
colnames(data) <- c(paste0(measures[feati], '.', c('Base','StimOn')), 'state')
} else {
colnames(data) <- c(paste0(measures[feati], '.', c('Size','Time','Scale')), 'state')
data[data[, 1] == 0, 1] <- NaN # mark the detection failure of single trial ERP
}
}
if (feati == 1){
df <- data
} else {
df <- cbind(df, data[, -ncol(data)])
}
}
df <- as.data.frame(df)
df$state <- factor(df$state, levels = c(1:length(states)) - 1, labels = states)
df <- na.omit(df) # remove feat detection failure trials
df.idx <- df[,1:2]
return(df.idx)
}
get.data.content <- function(sub, task, states, contentsets, measures, feats, folders, back3On = FALSE){
if (back3On) {
load('trialIdList.rdata')
print('Filter data for 3 preceding trials.')}
# intialize
df <- matrix(NaN, 0, 0)
for (feati in 1:length(feats)){
feat <- feats[feati]
folder <- folders[feati]
all <- readMat(paste0(f_measure_matfile, f_sep, folder, f_sep, sub, '.mat'))
# intialize
data <- matrix(NaN, 0, 0)
for (si in 1:length(states)){
state <- states[si]
contents <- contentsets[[si]]
# combine lists from different contents
for (ci in 1:length(contents)){
content <- contents[ci]
eval(parse(text = paste0('temp <- all$', feat, '.', task, '.', content)))
# convert to df; easy for case of onse obs
temp <- as.data.frame(temp)
# if temp is not empty
if (nrow(temp) > 0){
# filter for 3 preceding trials
if (back3On) {
for (sessioni in 1:2){
temp <- temp[temp[,sessioni] %in% unlist(idList[[sessioni]][[sub]]) | temp[,sessioni] == 0,]
}
}
# if the filterd temp is not empty
if (nrow(temp) > 0) {
# select feats, add lab
if (feat %in% c('alpha', 'theta')){
temp <- cbind(temp[, 3:4], si - 1)
} else {
temp <- cbind(temp[, 3:5], si - 1)
}
# combined with data in other specified labels
if (nrow(data) == 0){ # if data is empty
data <- temp
} else {
data <- rbind(data, temp)
}
}
}
} # loop over contents
} # loop over states
# if data being extracted in the specified labels are not empty
if (nrow(data) > 0){
# add column names
if (feat %in% c('alpha','theta')){
colnames(data) <- c(paste0(measures[feati], '.', c('Base','StimOn')), 'state')
} else {
colnames(data) <- c(paste0(measures[feati], '.', c('Size','Time','Scale')), 'state')
# mark the detection failure of single trial ERP
data[data[, 1] == 0, 1] <- NaN
}
# combined with data of other specified features
if (feati == 1){
df <- data
} else {
df <- cbind(df, data[, -ncol(data)])
}
} else { # if no data are extracted in the specified label
df <- as.data.frame(df) # output empty
break # exit the iteration because no data will be extracted for other features as well
}
} # loop over feats
# convert label type
df$state <- factor(df$state, levels = c(1:length(states)) - 1, labels = states)
# remove feat detection failure trials
df <- na.omit(df)
return(df)
}
get.data <- function(sub, task, measures, feats, folders, splitSessions = FALSE){
for (feati in 1:length(feats)){
feat <- feats[feati]
folder <- folders[feati]
all <- readMat(paste0(f_measure_matfile, f_sep, folder, f_sep, sub, '.mat'))
if (splitSessions) {sesPos <- c()} # to store the end position of data in each session
for (si in 1:length(states)){
state <- states[si]
eval(parse(text = paste0('temp <- all$', feat, '.', task, '.', state)))
if (splitSessions){sesPos <- c(sesPos, max(which(temp[,1]>0)), nrow(temp))}
if (nrow(temp) > 0){
if (feat %in% c('alpha', 'theta')){
temp <- cbind(temp[, 3:4], si - 1) # select feats, add lab
} else {
temp <- cbind(temp[, 3:5], si - 1)
}
}
if (si == 1){
data <- temp
} else {
if (nrow(data) > 0){
if (nrow(temp) > 0){
data <- rbind(data, temp)
} # else, no act
} else {
data <- temp
}
}
}
if (splitSessions){sesPos[3:4] <- sesPos[3:4] + sesPos[2]}
if (nrow(data) > 0){
if (feat %in% c('alpha','theta')){
colnames(data) <- c(paste0(measures[feati], '.', c('Base','StimOn')), 'state')
} else {
colnames(data) <- c(paste0(measures[feati], '.', c('Size','Time','Scale')), 'state')
data[data[, 1] == 0, 1] <- NaN # mark the detection failure of single trial ERP
}
if (feati == 1){
df <- data
} else {
df <- cbind(df, data[, -ncol(data)])
}
} else {
df <- data
}
}
df <- as.data.frame(df)
df$state <- factor(df$state, levels = c(1:length(states)) - 1, labels = states)
if (splitSessions){
sesOneIdx <- c(1:sesPos[1], (sesPos[2]+1):sesPos[3])
df.split <- list()
df.split[[1]] <- df[sesOneIdx, ]
df.split[[2]] <- df[-sesOneIdx, ]
df <- df.split
}
if (is.data.frame(df)){
df <- na.omit(df) # remove feat detection failure trials
} else {
df[[1]] <- na.omit(df[[1]])
df[[2]] <- na.omit(df[[2]])
}
return(df)
}
simulate.data <- function(sub, task, measures, measureTypes, hIncreaseOn){
load('trialCount.rdata')
centers <- c(0, 1)
sharp <- 1
eval(parse(text = paste0('trialCount <- trialCount.', task)))
for (si in 1:length(states)){
state <- states[si]
# get class size
size <- trialCount[sub, state]
# simlute data
for (mi in 1:length(measures)){
measure <- measures[mi]
measureType <- measureTypes[mi]
increaseOn <- hIncreaseOn[mi]
if (state == 'ot'){
if (increaseOn) {
center <- min(centers)
} else {
center <- max(centers)
}
} else {
if (increaseOn){
center <- max(centers)
} else {
center <- min(centers)
}
}
if (toupper(measureType) == 'ERP'){
temp <- cbind(rnorm(size, mean = center, sd = sharp),
rnorm(size, mean = mean(centers), sd = sharp),
rnorm(size, mean = mean(centers), sd = sharp))
colnames(temp) <- paste0(measure, '.', c('Size','Time','Scale'))
} else {
temp <- cbind(rnorm(size, mean = center, sd = sharp),
rnorm(size, mean = center, sd = sharp))
colnames(temp) <- paste0(measure, '.', c('Base','StimOn'))
}
if (mi == 1){
data <- temp
} else {
data <- cbind(data, temp)
}
}
data <- cbind(data, state = si-1)
if (si == 1){
df <- data
} else {
df <- rbind(df, data)
}
}
df <- as.data.frame(df)
df$state <- factor(df$state, levels = c(1:length(states)) - 1, labels = states)
return(df)
}
simulate.data.random <- function(sub, task, measures, measureTypes, equalSizeOn = FALSE){
center <- 0
sharp <- 1
size.default <- 100
if(is.logical(equalSizeOn) && !equalSizeOn) {
load('trialCount.rdata')
eval(parse(text = paste0('trialCount <- trialCount.', task)))
}
for (si in 1:length(states)){
state <- states[si]
# get class size
if (is.logical(equalSizeOn) && !equalSizeOn){
size <- trialCount[sub, state]
} else if (is.logical(equalSizeOn) && equalSizeOn) {
size <- size.default
} else if (is.numeric(equalSizeOn)){
size <- equalSizeOn
} else {
size <- NaN
}
# simlute data
for (mi in 1:length(measures)){
measure <- measures[mi]
measureType <- measureTypes[mi]
if (toupper(measureType) == 'ERP'){
temp <- cbind(rnorm(size, mean = center, sd = sharp),
rnorm(size, mean = center, sd = sharp),
rnorm(size, mean = center, sd = sharp))
colnames(temp) <- paste0(measure, '.', c('Size','Time','Scale'))
} else {
temp <- cbind(rnorm(size, mean = center, sd = sharp),
rnorm(size, mean = center, sd = sharp))
colnames(temp) <- paste0(measure, '.', c('Base','StimOn'))
}
if (mi == 1){
data <- temp
} else {
data <- cbind(data, temp)
}
}
data <- cbind(data, state = si-1)
if (si == 1){
df <- data
} else {
df <- rbind(df, data)
}
}
df <- as.data.frame(df)
df$state <- factor(df$state, levels = c(1:length(states)) - 1, labels = states)
return(df)
} |
59a947c98e709fd4fa7742cd619444236e0a2999 | aaecb836ca2696654fabe505af460dd32fd67e9b | /main.R | 67fda26e2462e8b9d4c88ad7b1899c48f54488eb | [] | no_license | pmdemetrio/A | 8683085da68bb718306352b32fe6d7e332931dbd | 0a6274caf324690583ed538e8d9ccda3aa8ad273 | refs/heads/master | 2021-01-10T06:15:53.033598 | 2015-12-28T20:01:44 | 2015-12-28T20:01:44 | 48,707,787 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 223 | r | main.R | # esqueleto
# Pablo Demetrio
# December 2015
# Import packages
library()
library()
# Source functions
source('R/function1.R')
source('R/function2.R')
# Load datasets
load('data/input_model.rda')
# Then the actual commands |
5d3beee4f0b30569169db5b6e1656e9d3880a252 | 58514496a583ee6eeb40ba46085fec3a4b03dd88 | /man/rforcecom.getServerTimestamp.Rd | 9d1d960ad3ecbef6d25b98bd1f91863b5e091216 | [
"MIT"
] | permissive | TonyWhiteSMS/salesforcer | 1dd820dc0e05de346dd8648a04695a72ab46b98d | 764442f713fde22f95f201a1c5601e67f4569515 | refs/heads/master | 2020-09-10T01:48:29.869567 | 2019-11-14T21:32:10 | 2019-11-14T21:32:10 | 221,619,307 | 1 | 0 | NOASSERTION | 2019-11-14T05:31:34 | 2019-11-14T05:31:33 | null | UTF-8 | R | false | true | 784 | rd | rforcecom.getServerTimestamp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compatibility.R
\name{rforcecom.getServerTimestamp}
\alias{rforcecom.getServerTimestamp}
\title{salesforcer's backwards compatible version of rforcecom.getServerTimestamp}
\usage{
rforcecom.getServerTimestamp(session)
}
\arguments{
\item{session}{\code{list}; a list containing "sessionID", "instanceURL", and "
apiVersion" as returned by \code{RForcecom::rforcecom.login()}. This argument is
ignored in all backward compatible calls because the authorization credentials
are stored in an environment internal to the salesforcer package, so it is no longer
necessary to pass the session in each function call.}
}
\description{
salesforcer's backwards compatible version of rforcecom.getServerTimestamp
}
|
fd3dda9183479b92ae04044c849169acc192f970 | 9dafa6da6d00dd320ea3af618a0f0395d2bd7036 | /2-Exercises/P07/BuildTpo.R | a8f3f2dfa9d0507239c587111e703e24c53abe47 | [] | no_license | hwangpo/ce-5362-webroot | fd3c8fae55d85c3680eb13ecb53b855b6b0bea19 | 5adffce6a342d3ea00b609e8c3cf2d5157d579e7 | refs/heads/main | 2023-05-14T12:12:02.551230 | 2021-06-10T20:09:56 | 2021-06-10T20:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 909 | r | BuildTpo.R | # script to generate an xyz .tpo file on a regular geometry
# set formula for z=f(x,y)
z <- function(x,y){
if (y < 1 ) {
z <- 4
return(z)
}
if (y <= 29 ) {
z <- 1
return(z)
}
if (y > 29) {
z <- 4
return(z)
}
}
numx <- 11 # number x lines
numy <- 11 # number y lines
elev <- numeric(0)
xcrd <- numeric(0)
ycrd <- numeric(0)
xloc <- numeric(0)
yloc <- numeric(0)
outputObj <-character(0)
dx <- 3
dy <- 3
xloc <- seq(0,dx*numx,dx)
yloc <- seq(0,dy*numy,dy)
pcount <-0
for (irow in 1:numy){
for (jcol in 1:numx) {
pcount <- pcount +1
elev[pcount] <- z(xloc[jcol],yloc[irow])
xcrd[pcount] <- xloc[jcol]
ycrd[pcount] <- yloc[irow]
outputObj[pcount] <- paste(xcrd[pcount]," ",ycrd[pcount]," ",elev[pcount])
}
}
write(length(elev),file="pond-in-pond.tpo")
write(outputObj, file = "pond-in-pond.tpo", append = TRUE)
|
97fcede690be8b1d14e4b85bb676203ea2b91b1b | 3d9da0e0416b7dc5226b4918e4b52122dd91de4d | /cachematrix.R | bb2aaee9e36a8a78ac6c22852eca59514d138bad | [] | no_license | pignrooster/ProgrammingAssignment2 | 7b3535f07777f968fb285161f4740710feaf2bfd | bcaf3f43ff569b62c0581be630216a28e68e597c | refs/heads/master | 2020-12-07T13:31:57.614509 | 2015-05-22T22:09:39 | 2015-05-22T22:09:39 | 36,047,158 | 0 | 0 | null | 2015-05-22T02:08:42 | 2015-05-22T02:08:41 | null | UTF-8 | R | false | false | 3,244 | r | cachematrix.R | ## calculating the matrix inverse can be a resource intensive operation. if
## we cached the matrix inverse, then cached matrix inverse can be get and the
## costly matrix inverse calculation can be avoided.
## makeCacheMatrix stores a matrix 'x' and a list of four functions to set
## matrix 'x', get matrix 'x', set the matrix inverse of 'x', and get the
## matrix inverse of 'x'. The set function will only make changes to the
## matrix 'x' and matrix inverse 'mi' if the new matrix does not equal 'x'.
makeCacheMatrix <- function(x = matrix()) {
mi <- NULL
## evaluate equality of previously stored matrix 'x' against the new
## matrix 'y' and replace only if 'x' does not equal 'y'
set <- function(y) {
#if the new matrix y is equal to the old matrix x, keep mi and x
if(all(x == y) == FALSE) {
## if the new matrix 'y' is not equal to the old matrix 'x',
## replace 'x' with 'y' and set matrix inverse 'mi' to NULL.
x <<- y
mi <<- NULL
}
}
## get the matrix 'x' fed into makeCacheMatrix(x)
get <- function() x
## set the inverser matrix 'mi' equal to the matrix inverse 'inv'
setinv <- function(inv) mi <<- inv
## get the inverse matrix 'mi'
getinv <- function() mi
## return a list of four functions defined above
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve takes a list 'x' of four functions: set, get,
## setinv, getinv which are defined in MakeCacheMatrix. If x$getinv is NULL
## then there is no matrix inverse for the matrix returned by x$get, so
## cacheSolve will recalculate and store the matrix inverse with x$setinv. If
## x$getinv is NOT NULL then cacheSolve will return the cached matrix inverse
## stored in 'mi'
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mi <- x$getinv()
if(!is.null(mi)) {
## if the matrix inverse 'mi' is NOT NULL, use the cached 'mi'
message("getting cached data")
## exit the cacheSolve function
return(mi)
}
## if 'mi' is NULL then get the matrix from the enclosure 'x'
data <- x$get()
## calculate the inverse of the matrix 'data'
mi <- solve(data, ...)
## store the matrix inverse value 'mi' in the enclosure 'x'
x$setinv(mi)
## return the newly calculated matrix inverse 'mi'
mi
}
## EXAMPLE CODE
##
## create a matrix 'm1'
## m1 <- matrix(rnorm(16,8,2),nrow=4,ncol=4)
##
## create 'matrixObject' containing a special matrix vector containing the four
## makeCacheMatrix functions and the matrix 'm1'
## matObject <- makeCahceMatrix(m1)
##
## cache the matrix inverse of 'm1' within 'matObject' using cacheSolve
## cacheSolve(matObject)
##
## if the 'm1' is replaced with an equal matrix 'm1' using matObject$set(m1)
## then the cached matrix inverse is retained and used by
##
##
## if a new matrix 'm2' is stored within 'matObject' using matObject$set(m2)
## then cached matrix inverse is set to NULL and the matrix inverse will be
## recalculated when cacheSolve(matObject) is called
|
2bfd31618b8afa074951ae0610dd37f90acd8426 | 7dc0116140c9493c4e69b8d89d1d77ac84a37c11 | /R/build_site.R | dcd85be3c9822a5d0d1215df858c27c88ab2742f | [
"MIT"
] | permissive | drakileshr/dataspice | 4d2bcb09f12cb2377bf53c262a0513f1a7c4f50d | 0be2af6e25c746948da65770c6796e13952bca20 | refs/heads/main | 2023-07-14T14:19:11.714663 | 2021-09-01T12:21:52 | 2021-09-01T12:21:52 | 402,013,657 | 0 | 0 | NOASSERTION | 2021-09-01T10:02:44 | 2021-09-01T10:02:43 | null | UTF-8 | R | false | false | 1,137 | r | build_site.R | #' Build a dataspice site
#'
#' @param path (character) Path to a JSON+LD file with dataspice metadata
#' @param template_path (character) Optional. Path to a template for
#' \code{\link[whisker]{whisker.render}}
#' @param out_path (character) Optional. Path to write the site's `index.html`
#' to. Defaults to `docs/index.html`.
#'
#' @return Nothing. Creates/overwrites \code{docs/index.html}
#' @export
#'
#' @examples
#' \dontrun{
#' # Create JSON+LD from a set of metadata templates
#' json <- write_json(biblio, access, attributes, creators)
#' build_site(json)
#' }
build_site <- function(path = file.path("data", "metadata", "dataspice.json"),
template_path = system.file("template.html5",
package = "dataspice"),
out_path = file.path("docs", "index.html")) {
data <- jsonld_to_mustache(path)
out_dir <- dirname(out_path)
if (!dir.exists(out_dir)) {
dir.create(out_dir, recursive = TRUE)
}
output <- whisker::whisker.render(
readLines(template_path),
data)
# Build site
writeLines(output, out_path)
}
|
c1dc3dbe3a19349c0095540a33f2bf7a44684c42 | f79cd4e052c5cbb24e7ef3e4bec1c39f9ce4e413 | /BEMTOOL-ver2.5-2018_0901/src/biol/bmtMTF/fromBMTtoXSAobject.r | af8c1e88a2fc3b2c7e424e3ab95cd9c1ff21f980 | [] | no_license | gresci/BEMTOOL2.5 | 4caf3dca3c67423af327a8ecb1e6ba6eacc8ae14 | 619664981b2863675bde582763c5abf1f8daf34f | refs/heads/master | 2023-01-12T15:04:09.093864 | 2020-06-23T07:00:40 | 2020-06-23T07:00:40 | 282,134,041 | 0 | 0 | null | 2020-07-24T05:47:24 | 2020-07-24T05:47:23 | null | UTF-8 | R | false | false | 39,045 | r | fromBMTtoXSAobject.r | # BEMTOOL - Bio-Economic Model TOOLs - version 2.5
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# BEMTOOL is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
fromBMTtoXSAobject <- function(XSAi) {
if (FALSE) {
XSAi <- XSAinfo
}
for (m_int in 1:length(BMT_SPECIES)) {
associated_fleetsegment <- as.vector(cfg[rownames(cfg) == paste("casestudy.S", m_int, ".associatedFleetsegment", sep=""), ])
associated_fleetsegment <- associated_fleetsegment[!is.na(associated_fleetsegment) & associated_fleetsegment!="" & associated_fleetsegment!="-"]
n_fleet_for_species <- length(associated_fleetsegment)
SAtool <- as.character(cfg[rownames(cfg) == paste("casestudy.S", m_int, ".StockAssessmentTool", sep=""),1])
mortality_constant <- as.logical(cfg[rownames(cfg) == paste("casestudy.S", m_int, ".params", sep=""),18])
lifespan_F <- as.numeric(Populations[[m_int]]@lifespan[2,1])
lifespan_M <- as.numeric(Populations[[m_int]]@lifespan[1,1])
ALADYM_sim <- as.logical(cfg[rownames(cfg) == paste("casestudy.S", m_int, ".AladymSimulation", sep=""),1])
if (exists("mat_slot")) { rm(mat_slot) }
if (exists("mean_weight_slot")) { rm(mean_weight_slot) }
if (exists("stock_mean_weight_slot")) { rm(stock_mean_weight_slot) }
if (exists("catch_mean_weight_slot")) { rm(catch_mean_weight_slot) }
if (exists("catch_n_slot")) { rm(catch_n_slot) }
if (exists("m_slot")) { rm(m_slot) }
if (exists("harvest_slot")) { rm(harvest_slot) }
if (exists("stock_n_slot")) { rm(stock_n_slot) }
if (exists("catch_slot")) { rm(catch_slot) }
if (exists("zero_vector")) { rm(zero_vector) }
if (exists("mat_slot_M")) { rm(mat_slot_M) }
if (exists("mean_weight_slot_M")) { rm(mean_weight_slot_M) }
if (exists("stock_mean_weight_slot_M")) { rm(stock_mean_weight_slot_M) }
if (exists("catch_mean_weight_slot_M")) { rm(catch_mean_weight_slot_M) }
if (exists("catch_n_slot_M")) { rm(catch_n_slot_M) }
if (exists("m_slot_M")) { rm(m_slot_M) }
if (exists("harvest_slot_M")) { rm(harvest_slot_M) }
if (exists("stock_n_slot_M")) { rm(stock_n_slot_M) }
if (exists("catch_slot_M")) { rm(catch_slot_M) }
if (exists("zero_vector_M")) { rm(zero_vector_M) }
SAtool_dummy <- ifelse( (SAtool == "none" | SAtool == "NONE"), "" , SAtool)
if (SAtool_dummy == "VIT") {
print(paste("Trasforming BMT objects (from VIT assessment) in XSA object [",BMT_SPECIES[m_int],"]...", sep=""), quote=FALSE)
# print(paste("(stock assessment tool = ",SAtool, ")", sep=""), quote=FALSE)
minAge <- as.numeric(as.character(cfg[rownames(cfg) == paste("casestudy.S", m_int, ".StockAssessmentTool", sep=""),5]))
maxAge <- as.numeric(as.character(cfg[rownames(cfg) == paste("casestudy.S", m_int, ".StockAssessmentTool", sep=""),6]))
num_classes_all_years <- c(0)
for (y_int in 1:simperiod) {
num_classes_all_years <- c(num_classes_all_years, length(VITinfo[[m_int]][[y_int]]$results[[1]]$age_classes) )
}
num_classes_all_years <- num_classes_all_years[num_classes_all_years !=0]
min_num_classes <- min(num_classes_all_years)
for (y_int in 1:simperiod) {
VIT.sex <- as.logical(cfg[rownames(cfg) == paste("casestudy.S", m_int, ".StockAssessmentTool", sep=""),2])
if (!VIT.sex) {
resultsVit <- VITinfo[[m_int]][[y_int]]$results[[1]]
# num_classes <- max(ages_F, ages_M)
num_classes <- length(VITinfo[[m_int]][[y_int]]$results[[1]]$age_classes)
# maturity ratio
if (!exists("mat_slot")) { mat_slot <- as.numeric(as.character(resultsVit$ age_stocks [1:min_num_classes, 8] ))
} else { mat_slot <- c(mat_slot, as.numeric(as.character(resultsVit$ age_stocks [1:min_num_classes, 8] )) ) }
mat_slot[length(mat_slot)] <- mean(as.numeric(as.character(resultsVit$ age_stocks [min_num_classes:num_classes, 8] )) )
# mean weight
if (!exists("mean_weight_slot")) { mean_weight_slot <- as.numeric(as.character(resultsVit$ age_stocks [1:min_num_classes, 7] )) /1000
} else { mean_weight_slot <- c(mean_weight_slot, as.numeric(as.character(resultsVit$ age_stocks [1:min_num_classes, 7] ))/1000 ) }
mean_weight_slot[length(mean_weight_slot)] <- mean( as.numeric(as.character(resultsVit$ age_stocks [min_num_classes:num_classes, 7] ))/1000 )
# catches in numbers
if (!exists("catch_n_slot")) { catch_n_slot <- as.numeric(as.character(resultsVit$catches_nb[1:min_num_classes, 2])) / 1000
} else { catch_n_slot <- c(catch_n_slot, as.numeric(as.character(resultsVit$catches_nb[1:min_num_classes, 2])) / 1000 ) }
catch_n_slot[length(catch_n_slot)] <- sum( as.numeric(as.character(resultsVit$catches_nb [min_num_classes:num_classes, 2] ))/1000 )
# m
fV_Z <- as.numeric(as.character((resultsVit$VPA_results_mortalities [1:num_classes, 2])))
fV_F <- as.numeric(as.character((resultsVit$VPA_results_mortalities [1:num_classes, 3])))
diff_Z_F <- fV_Z - fV_F
if (!exists("m_slot")) { m_slot <- diff_Z_F[1:min_num_classes]
} else { m_slot <- c(m_slot, diff_Z_F[1:min_num_classes] ) }
m_slot[length(m_slot)] <- mean( diff_Z_F[min_num_classes:num_classes] )
# harvest
if (!exists("harvest_slot")) { harvest_slot <- fV_F[1:min_num_classes]
} else { harvest_slot <- c(harvest_slot, fV_F[1:min_num_classes] ) }
harvest_slot[length(harvest_slot)] <- mean( fV_F[min_num_classes:num_classes] )
# stock in number
if (!exists("stock_n_slot")) { stock_n_slot <- as.numeric(as.character(resultsVit$VPA_results_nb [1:min_num_classes, 2] )) / 1000
} else { stock_n_slot <- c(stock_n_slot, as.numeric(as.character(resultsVit$VPA_results_nb [1:min_num_classes, 2] )) / 1000 ) }
stock_n_slot[length(stock_n_slot)] <- sum( as.numeric(as.character(resultsVit$VPA_results_nb [min_num_classes:num_classes, 2] )) / 1000 )
# catch in weight by year
if (!exists("catch_slot")) { catch_slot <- as.numeric(as.character(resultsVit$catches_w[num_classes+1,2]) ) / 1000
} else { catch_slot <- c(catch_slot, as.numeric(as.character(resultsVit$catches_w[num_classes+1,2]) ) / 1000 ) }
# stock in weight by year
if (!exists("stock_slot")) { stock_slot <- as.numeric(as.character(resultsVit$VPA_results_w [(num_classes+1), 3] ) ) / 1000
} else { stock_slot <- c(stock_slot, as.numeric(as.character(resultsVit$VPA_results_w [(num_classes+1), 3] ) ) / 1000 ) }
if (!exists("zero_vector")) { zero_vector <- rep(0, min_num_classes)
} else { zero_vector <- c(zero_vector, rep(0, min_num_classes) ) }
} else {
# VIT by sex
ages_F <- length(VITinfo[[m_int]][[y_int]]$results[[1]]$age_classes)
ages_M <- length(VITinfo[[m_int]][[y_int]]$results[[2]]$age_classes)
num_classes_all_years <- c(0)
for (y_int in 1:simperiod) {
num_classes_all_years <- c(num_classes_all_years, length(VITinfo[[m_int]][[y_int]]$results[[1]]$age_classes) )
num_classes_all_years <- c(num_classes_all_years, length(VITinfo[[m_int]][[y_int]]$results[[2]]$age_classes) )
}
num_classes_all_years <- num_classes_all_years[num_classes_all_years !=0]
min_num_classes <- min(num_classes_all_years)
num_classes <- max(ages_F, ages_M)
lifespan_MF <- max(lifespan_F, lifespan_M)
# results of females
resultsVit <- VITinfo[[m_int]][[y_int]]$results[[1]]
resultsVit_M <- VITinfo[[m_int]][[y_int]]$results[[2]]
# maturity ratio
if (!exists("mat_slot")) { mat_slot <- as.numeric(as.character(resultsVit$ age_stocks [1:min_num_classes, 8] ))
} else { mat_slot <- c(mat_slot, as.numeric(as.character(resultsVit$ age_stocks [1:min_num_classes, 8] )) ) }
mat_slot[length(mat_slot)] <- mean(as.numeric(as.character(resultsVit$ age_stocks [min_num_classes:num_classes, 8] )) )
if (!exists("mat_slot_M")) { mat_slot_M <- as.numeric(as.character(resultsVit_M$ age_stocks [1:num_classes, 8] ))
} else { mat_slot_M <- c(mat_slot_M, as.numeric(as.character(resultsVit_M$ age_stocks [1:num_classes, 8] )) ) }
mat_slot_M[length(mat_slot_M)] <- mean(as.numeric(as.character(resultsVit_M$ age_stocks [min_num_classes:num_classes, 8] )) )
# mean weight
if (!exists("mean_weight_slot")) { mean_weight_slot <- as.numeric(as.character(resultsVit$ age_stocks [1:min_num_classes, 7] )) /1000
} else { mean_weight_slot <- c(mean_weight_slot, as.numeric(as.character(resultsVit$ age_stocks [1:min_num_classes, 7] ))/1000 ) }
mean_weight_slot[length(mean_weight_slot)] <- mean( as.numeric(as.character(resultsVit$ age_stocks [min_num_classes:num_classes, 7] ))/1000 )
if (!exists("mean_weight_slot_M")) { mean_weight_slot_M <- as.numeric(as.character(resultsVit_M$ age_stocks [1:min_num_classes, 7] )) /1000
} else { mean_weight_slot_M <- c(mean_weight_slot_M, as.numeric(as.character(resultsVit_M$ age_stocks [1:min_num_classes, 7] ))/1000 ) }
mean_weight_slot_M[length(mean_weight_slot_M)] <- mean( as.numeric(as.character(resultsVit_M$ age_stocks [min_num_classes:num_classes, 7] ))/1000 )
# catches in numbers
if (!exists("catch_n_slot")) { catch_n_slot <- as.numeric(as.character(resultsVit$catches_nb[1:min_num_classes, 2])) / 1000
} else { catch_n_slot <- c(catch_n_slot, as.numeric(as.character(resultsVit$catches_nb[1:min_num_classes, 2])) / 1000 ) }
catch_n_slot[length(catch_n_slot)] <- sum( as.numeric(as.character(resultsVit$catches_nb [min_num_classes:num_classes, 2] ))/1000 )
if (!exists("catch_n_slot_M")) { catch_n_slot_M <- as.numeric(as.character(resultsVit_M$catches_nb[1:min_num_classes, 2])) / 1000
} else { catch_n_slot_M <- c(catch_n_slot_M, as.numeric(as.character(resultsVit_M$catches_nb[1:min_num_classes, 2])) / 1000 ) }
catch_n_slot_M[length(catch_n_slot_M)] <- sum( as.numeric(as.character(resultsVit_M$catches_nb [min_num_classes:num_classes, 2] ))/1000 )
# m
fV_Z <- as.numeric(as.character((resultsVit$VPA_results_mortalities [1:num_classes, 2])))
fV_F <- as.numeric(as.character((resultsVit$VPA_results_mortalities [1:num_classes, 3])))
diff_Z_F <- fV_Z - fV_F
if (!exists("m_slot")) { m_slot <- diff_Z_F[1:min_num_classes]
} else { m_slot <- c(m_slot, diff_Z_F[1:min_num_classes] ) }
m_slot[length(m_slot)] <- mean( diff_Z_F[min_num_classes:num_classes] )
fV_Z_M <- as.numeric(as.character((resultsVit_M$VPA_results_mortalities [1:num_classes, 2])))
fV_F_M <- as.numeric(as.character((resultsVit_M$VPA_results_mortalities [1:num_classes, 3])))
diff_Z_F_M <- fV_Z_M - fV_F_M
if (!exists("m_slot_M")) { m_slot_M <- diff_Z_F_M[1:min_num_classes]
} else { m_slot_M <- c(m_slot_M, diff_Z_F_M[1:min_num_classes] ) }
m_slot_M[length(m_slot_M)] <- mean( diff_Z_F_M[min_num_classes:num_classes] )
# harvest
if (!exists("harvest_slot")) { harvest_slot <- fV_F[1:min_num_classes]
} else { harvest_slot <- c(harvest_slot, fV_F[1:min_num_classes] ) }
harvest_slot[length(harvest_slot)] <- mean( fV_F[min_num_classes:num_classes] )
if (!exists("harvest_slot_M")) { harvest_slot_M <- fV_F_M[1:min_num_classes]
} else { harvest_slot_M <- c(harvest_slot_M, fV_F_M[1:min_num_classes] ) }
harvest_slot_M[length(harvest_slot_M)] <- mean( fV_F_M[min_num_classes:num_classes] )
# stock in number
if (!exists("stock_n_slot")) { stock_n_slot <- as.numeric(as.character(resultsVit$VPA_results_nb [1:(min_num_classes), 2] )) / 1000
} else { stock_n_slot <- c(stock_n_slot, as.numeric(as.character(resultsVit$VPA_results_nb [1:(min_num_classes), 2] )) / 1000 ) }
stock_n_slot[length(stock_n_slot)] <- sum( as.numeric(as.character(resultsVit$VPA_results_nb [min_num_classes:num_classes, 2] )) / 1000 )
if (!exists("stock_n_slot_M")) { stock_n_slot_M <- as.numeric(as.character(resultsVit_M$VPA_results_nb [1:(min_num_classes), 2] )) / 1000
} else { stock_n_slot_M <- c(stock_n_slot_M, as.numeric(as.character(resultsVit_M$VPA_results_nb [1:(min_num_classes), 2] )) / 1000 ) }
stock_n_slot_M[length(stock_n_slot_M)] <- sum( as.numeric(as.character(resultsVit_M$VPA_results_nb [min_num_classes:num_classes, 2] )) / 1000 )
# catch in weight by year
if (!exists("catch_slot")) { catch_slot <- as.numeric(as.character(resultsVit$catches_w[num_classes+1,2]) ) / 1000
} else { catch_slot <- c(catch_slot, as.numeric(as.character(resultsVit$catches_w[num_classes+1,2]) ) / 1000 ) }
if (!exists("catch_slot_M")) { catch_slot_M <- as.numeric(as.character(resultsVit_M$catches_w[num_classes+1,2]) ) / 1000
} else { catch_slot_M <- c(catch_slot_M, as.numeric(as.character(resultsVit_M$catches_w[num_classes+1,2]) ) / 1000 ) }
# stock in weight by year
if (!exists("stock_slot")) { stock_slot <- as.numeric(as.character(resultsVit$VPA_results_w [(num_classes+1), 3] ) ) / 1000
} else { stock_slot <- c(stock_slot, as.numeric(as.character(resultsVit$VPA_results_w [(num_classes+1), 3] ) ) / 1000 ) }
if (!exists("stock_slot_M")) { stock_slot_M <- as.numeric(as.character(resultsVit_M$VPA_results_w [(num_classes+1), 3] ) ) / 1000
} else { stock_slot_M <- c(stock_slot_M, as.numeric(as.character(resultsVit_M$VPA_results_w [(num_classes+1), 3] ) ) / 1000 ) }
if (!exists("zero_vector")) { zero_vector <- rep(0, min_num_classes)
} else { zero_vector <- c(zero_vector, rep(0, min_num_classes) ) }
}
}
if (VIT.sex) {
# lifespan <- lifespan_MF
catch_slot <- catch_slot + catch_slot_M
catch_n_slot <- catch_n_slot + catch_n_slot_M
mean_weight_slot <- mean(mean_weight_slot, mean_weight_slot_M)
stock_slot <- stock_slot + stock_slot_M
stock_n_slot <- stock_n_slot + stock_n_slot_M
m_slot <- mean(m_slot, m_slot_M )
mat_slot <- mean(mat_slot, mat_slot_M)
harvest_slot <- mean(mat_slot, mat_slot_M)
}
bmtXSAstock <- new(Class= "FLStock")
bmtXSAstock@catch <- FLQuant(catch_slot, dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@catch.n <- FLQuant(catch_n_slot, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@catch.wt <- FLQuant(mean_weight_slot, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@discards <- FLQuant(c(0,0,0,0), dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@discards.n <- FLQuant(zero_vector, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@discards.wt <- FLQuant(zero_vector, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@landings <- FLQuant(catch_slot, dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@landings.n <- FLQuant(catch_n_slot, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@landings.wt <- FLQuant(mean_weight_slot, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@stock <- FLQuant(stock_slot, dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@stock.n <- FLQuant(stock_n_slot, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@stock.wt <- FLQuant(mean_weight_slot, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@m <- FLQuant(m_slot, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@mat <- FLQuant(mat_slot, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@harvest <- FLQuant(harvest_slot, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="f")
bmtXSAstock@harvest.spwn <- FLQuant(zero_vector, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@m.spwn <- FLQuant(zero_vector, dimnames=list(age=(0:(min_num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@name <- paste("Index File", BMT_SPECIES[m_int], "in GSA", BMT_GSA)
bmtXSAstock@desc <- paste("Generated by BEMTOOL software",Sys.Date() )
range_vector <- c(0,(min_num_classes-1),(min_num_classes-1),years[1],years[simperiod],minAge,maxAge)
names(range_vector) <- c("min", "max", "plusgroup", "minyear", "maxyear", "minfbar", "maxfbar")
bmtXSAstock@range <- range_vector
save_path <- paste(casestudy_path, "/", harvest_rule_id,"/Biological Pressure Impact/MSTF - ", BMT_SPECIES[m_int],"/", casestudy_name, " - ", BMT_SPECIES[m_int], " XSA object FORE", harvest_rule_id,".dat", sep="")
dput(bmtXSAstock, file=save_path)
XSAi[[m_int]]$results <- list(results=bmtXSAstock)
} else if (SAtool_dummy == "Report") {
print(paste("Trasforming BMT objects (from Report assessment) in XSA object [",BMT_SPECIES[m_int],"]...", sep=""), quote=FALSE)
# print(paste("(stock assessment tool = ",SAtool, ")", sep=""), quote=FALSE)
num_classes <- as.numeric(as.character(cfg[rownames(cfg) == paste("casestudy.S", m_int, ".StockAssessmentTool", sep=""),5]))
resultsReport <- ReportINFO[[m_int]]$results
for (yea in 1:simperiod) {
# maturity ratio
if (!exists("mat_slot")) { mat_slot <- as.numeric(as.character(resultsReport$ maturity [, yea] ))
} else { mat_slot <- c(mat_slot, as.numeric(as.character(resultsReport$ maturity [, yea] )) ) }
}
for (yea in 1:simperiod) {
# mean weight for catch
if (!exists("catch_mean_weight_slot")) { catch_mean_weight_slot <- as.numeric(as.character(resultsReport$ catches_wt [, yea] ))
} else { catch_mean_weight_slot <- c(catch_mean_weight_slot, as.numeric(as.character(resultsReport$ catches_wt [, yea] )) ) }
}
for (yea in 1:simperiod) {
# mean weight for stock
if (!exists("stock_mean_weight_slot")) { stock_mean_weight_slot <- as.numeric(as.character(resultsReport$stock_wt [, yea] ))
} else { stock_mean_weight_slot <- c(stock_mean_weight_slot, as.numeric(as.character(resultsReport$stock_wt [, yea] )) ) }
}
for (yea in 1:simperiod) {
# catch in numbers
if (!exists("catch_n_slot")) { catch_n_slot <- as.numeric(as.character(resultsReport$catches_nb [, yea] ))
} else { catch_n_slot <- c(catch_n_slot, as.numeric(as.character(resultsReport$catches_nb [, yea] )) ) }
}
for (yea in 1:simperiod) {
# natural mortality
if (!exists("m_slot")) { m_slot <- as.numeric(as.character(resultsReport$natural_mortality [, yea] ))
} else { m_slot <- c(m_slot, as.numeric(as.character(resultsReport$natural_mortality [, yea] )) ) }
}
for (yea in 1:simperiod) {
# fishing mortality
if (!exists("harvest_slot")) { harvest_slot <- as.numeric(as.character(resultsReport$fishing_mortality [, yea] ))
} else { harvest_slot <- c(harvest_slot, as.numeric(as.character(resultsReport$fishing_mortality [, yea] )) ) }
}
for (yea in 1:simperiod) {
# # stock in number
if (!exists("stock_n_slot")) { stock_n_slot <- as.numeric(as.character(resultsReport$stock_nb [, yea] ))
} else { stock_n_slot <- c(stock_n_slot, as.numeric(as.character(resultsReport$stock_nb [, yea] )) ) }
}
for (yea in 1:simperiod) {
# catch in weight by year
if (!exists("catch_slot")) { catch_slot <- sum(as.numeric(as.character(resultsReport$catches_nb [, yea] )) * as.numeric(as.character(resultsReport$catches_wt [, yea] )) )
} else { catch_slot <- c(catch_slot, sum(as.numeric(as.character(resultsReport$catches_nb [, yea] )) * as.numeric(as.character(resultsReport$catches_wt [, yea] )) ) ) }
}
for (yea in 1:simperiod) {
# stock in weight by year
if (!exists("stock_slot")) { stock_slot <- sum(as.numeric(as.character(resultsReport$stock_nb [, yea] )) * as.numeric(as.character(resultsReport$stock_wt [, yea] )) )
} else { stock_slot <- c(stock_slot, sum(as.numeric(as.character(resultsReport$stock_nb [, yea] )) * as.numeric(as.character(resultsReport$stock_wt [, yea] )) ) ) }
}
for (yea in 1:simperiod) {
if (!exists("zero_vector")) { zero_vector <- rep(0, num_classes)
} else { zero_vector <- c(zero_vector, rep(0, num_classes) ) }
}
bmtXSAstock <- new(Class= "FLStock")
bmtXSAstock@catch <- FLQuant(catch_slot, dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@catch.n <- FLQuant(catch_n_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@catch.wt <- FLQuant(catch_mean_weight_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@discards <- FLQuant(c(0,0,0,0), dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@discards.n <- FLQuant(zero_vector, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@discards.wt <- FLQuant(zero_vector, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@landings <- FLQuant(catch_slot, dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@landings.n <- FLQuant(catch_n_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@landings.wt <- FLQuant(catch_mean_weight_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@stock <- FLQuant(stock_slot, dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@stock.n <- FLQuant(stock_n_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@stock.wt <- FLQuant(stock_mean_weight_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@m <- FLQuant(m_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@mat <- FLQuant(mat_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@harvest <- FLQuant(harvest_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@harvest.spwn <- FLQuant(zero_vector, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@m.spwn <- FLQuant(zero_vector, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@name <- paste("Index File", BMT_SPECIES[m_int], "in GSA", BMT_GSA)
bmtXSAstock@desc <- paste("Generated by BEMTOOL software",Sys.Date() )
minAge <- as.numeric(as.character(resultsReport$age_rangeF$min))
maxAge <- as.numeric(as.character(resultsReport$age_rangeF$max))
range_vector <- c(0,(num_classes-1),NA,years[1],years[simperiod],minAge,maxAge)
names(range_vector) <- c("min", "max", "plusgroup", "minyear", "maxyear", "minfbar", "maxfbar")
bmtXSAstock@range <- range_vector
save_path <- paste(casestudy_path, "/", harvest_rule_id,"/Biological Pressure Impact/MSTF - ", BMT_SPECIES[m_int],"/", casestudy_name, " - ", BMT_SPECIES[m_int], " XSA object FORE ", harvest_rule_id,".dat", sep="")
dput(bmtXSAstock, file=save_path)
print(paste("Saving XSA object in", save_path))
XSAi[[m_int]]$results <- bmtXSAstock
} else if ( (SAtool_dummy == "NONE" | SAtool_dummy == "SURBA" | SAtool_dummy == "") & ALADYM_sim ) { # bmt to aladym
phase <<- "SIMULATION"
ALADYM_spe <<- m_int
source(paste(ALADYM_home, "/src/paths.r", sep=""))
phase <<- "FORECAST"
source(paste(getwd(), "/src/biol/bmtALADYM/reloadEnvSpecies.r", sep=""))
ind_weightsM <- data.frame(cbind(BAS$MAge, BAS$MWeight))
colnames(ind_weightsM) <- c("age_fraction","weight")
ind_weightsM$age <- trunc(ind_weightsM$age_fraction)
weights_M <- aggregate(ind_weightsM$weight, by=list(ind_weightsM$age), FUN="mean")
first_age <- trunc(INP$tr/12)
num_classes <- max(as.numeric(as.character(Populations[[m_int]]@lifespan$lifespan)))
weights_M_rep <- rep(NA, num_classes)
weights_M_rep[(first_age+1):num_classes] <- weights_M[1:(nrow(weights_M)-1),2]
weights_M <- weights_M_rep
weights_M[which(is.na(weights_M))] <- weights_M[which(!is.na(weights_M))][1]
# weights_M <- weights_M[1:(nrow(weights_M)-1),2]
ind_weightsF <- data.frame(cbind(BAS$FAge, BAS$FWeight))
colnames(ind_weightsF) <- c("age_fraction","weight")
ind_weightsF$age <- trunc(ind_weightsF$age_fraction)
weights_F <- aggregate(ind_weightsF$weight, by=list(ind_weightsF$age), FUN="mean")
first_age <- trunc(INP$tr/12)
num_classes <- max(as.numeric(as.character(Populations[[m_int]]@lifespan$lifespan)))
weights_F_rep <- rep(NA, num_classes)
weights_F_rep[(first_age+1):num_classes] <- weights_F[1:(nrow(weights_F)-1),2]
weights_F <- weights_F_rep
weights_F[which(is.na(weights_F))] <- weights_F[which(!is.na(weights_F))][1]
# weights_F <- weights_F[1:(nrow(weights_F)-1),2]
all_weights <- data.frame(rbind(weights_M, weights_F) )
cw_sw <- colMeans(all_weights, na.rm=T)
mortM <- rowMeans(Populations[[m_int]]@M.vect$M, na.rm=T)
mortM[which(is.na(mortM))] <- mortM[which(!is.na(mortM))][1]
mortF <- rowMeans(Populations[[m_int]]@M.vect$F, na.rm=T)
mortF[which(is.na(mortF))] <- mortF[which(!is.na(mortF))][1]
all_mort <- data.frame(rbind(mortM, mortF) )
mort_xsa <- colMeans(all_mort, na.rm=T)
all_F <- read.csv(F_BYGEAR_table, sep=";")
all_F_m <- all_F[all_F$sex== "M",]
all_F_f <- all_F[all_F$sex== "F",]
all_F_m[all_F_m == 0] <- NA
all_F_f[all_F_f == 0] <- NA
all_F_allsex <- all_F_m
for (nro in 1:nrow(all_F_allsex)) {
mm <- data.frame(rbind(all_F_m[nro,], all_F_f[nro,] ))
all_F_allsex[nro, colnames(all_F_allsex) != "Year.Age" & colnames(all_F_allsex) != "sex" & colnames(all_F_allsex) != "Gear"] <- colMeans(mm[,(colnames(mm) != "Year.Age" & colnames(mm) != "sex" & colnames(mm) != "Gear")], na.rm=T)
}
unique(all_F_allsex$Gear)
all_F_total <- all_F_allsex[all_F_allsex$Gear == unique(all_F_allsex$Gear)[1],]
for (nro in 1:nrow(all_F_total)) {
mm <- all_F_allsex[all_F_allsex$Year.Age == all_F_total$Year.Age[nro],colnames(all_F_allsex) != "Year.Age" & colnames(all_F_allsex) != "sex" & colnames(all_F_allsex) != "Gear"]
all_F_total[nro, colnames(all_F_total) != "Year.Age" & colnames(all_F_total) != "sex" & colnames(all_F_total) != "Gear"] <- colSums(mm[,(colnames(mm) != "Year.Age" & colnames(mm) != "sex" & colnames(mm) != "Gear")], na.rm=T)
}
print(paste("Trasforming BMT objects (from ALADYM simulation) in XSA object [",BMT_SPECIES[m_int],"]...", sep=""), quote=FALSE)
# print(paste("(stock assessment tool = ",SAtool, ")", sep=""), quote=FALSE)
num_classes <- max(as.numeric(as.character(Populations[[m_int]]@lifespan$lifespan)))
for (yea in 1:simperiod) {
# maturity ratio
if (!exists("mat_slot")) { mat_slot <- as.numeric(as.character(colMeans(Populations[[m_int]]@maturity.vect) ))
} else { mat_slot <- c(mat_slot, as.numeric(as.character(colMeans(Populations[[m_int]]@maturity.vect) )) ) }
}
for (yea in 1:simperiod) {
# mean weight for catch rep(Interactionsyear[[yea]][[m_int]]@totalcatch@meanWeight , num_classes)
if (!exists("catch_mean_weight_slot")) { catch_mean_weight_slot <- as.numeric(as.character(cw_sw ))
} else { catch_mean_weight_slot <- c(catch_mean_weight_slot, as.numeric(as.character(cw_sw )) ) }
}
for (yea in 1:simperiod) {
# mean weight for stock
if (!exists("stock_mean_weight_slot")) { stock_mean_weight_slot <- as.numeric(as.character(cw_sw ))
} else { stock_mean_weight_slot <- c(stock_mean_weight_slot, as.numeric(as.character(cw_sw )) ) }
}
for (yea in 1:simperiod) {
# catch in numbers
if (!exists("catch_n_slot")) { catch_n_slot <- as.numeric(as.character(Interactionsyear[[yea]][[m_int]]@totalcatch@numbers ))
} else { catch_n_slot <- c(catch_n_slot, as.numeric(as.character(Interactionsyear[[yea]][[m_int]]@totalcatch@numbers )) ) }
}
for (yea in 1:simperiod) {
# natural mortality
if (!exists("m_slot")) { m_slot <- as.numeric(as.character(mort_xsa))
} else { m_slot <- c(m_slot, as.numeric(as.character(mort_xsa)) ) }
}
for (yea in 1:simperiod) {
# fishing mortality
if (!exists("harvest_slot")) { harvest_slot <- as.numeric(as.character(all_F_total[yea, colnames(all_F_total) != "Year.Age" & colnames(all_F_total) != "sex" & colnames(all_F_total) != "Gear"] ))
} else { harvest_slot <- c(harvest_slot, as.numeric(as.character(all_F_total[yea, colnames(all_F_total) != "Year.Age" & colnames(all_F_total) != "sex" & colnames(all_F_total) != "Gear"] )) ) }
}
for (yea in 1:simperiod) {
# # stock in number
numb_M <- rowMeans(Interactionsyear[[yea]][[m_int]]@exploitedStock@numbers$M )
numb_F <- rowMeans(Interactionsyear[[yea]][[m_int]]@exploitedStock@numbers$F )
all_numb <- data.frame(rbind(numb_M, numb_F) )
stocknumbers_xsa <- colSums(all_numb, na.rm=T)/1000
if (!exists("stock_n_slot")) { stock_n_slot <- as.numeric(as.character(stocknumbers_xsa ))
} else { stock_n_slot <- c(stock_n_slot, as.numeric(as.character(stocknumbers_xsa )) ) }
}
stock_slot <- stock_n_slot * stock_mean_weight_slot
catch_slot <- catch_n_slot/1000 * stock_mean_weight_slot
# for (yea in 1:simperiod) {
# # catch in weight by year
# if (!exists("catch_slot")) { catch_slot <- sum(as.numeric(as.character(resultsReport$catches_nb[, yea] )) * as.numeric(as.character(resultsReport$catches_wt[, yea] )) )
# } else { catch_slot <- c(catch_slot, sum(as.numeric(as.character(resultsReport$catches_nb[, yea] )) * as.numeric(as.character(resultsReport$catches_wt[, yea] )) ) ) }
# }
# for (yea in 1:simperiod) {
# # stock in weight by year
# if (!exists("stock_slot")) { stock_slot <- sum(as.numeric(as.character(resultsReport$stock_nb [, yea] )) * as.numeric(as.character(resultsReport$stock_wt [, yea] )) )
# } else { stock_slot <- c(stock_slot, sum(as.numeric(as.character(resultsReport$stock_nb [, yea] )) * as.numeric(as.character(resultsReport$stock_wt [, yea] )) ) ) }
# }
for (yea in 1:simperiod) {
if (!exists("zero_vector")) { zero_vector <- rep(0, num_classes)
} else { zero_vector <- c(zero_vector, rep(0, num_classes) ) }
}
bmtXSAstock <- new(Class= "FLStock")
bmtXSAstock@catch <- FLQuant(catch_slot, dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@catch.n <- FLQuant(catch_n_slot/1000, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@catch.wt <- FLQuant(catch_mean_weight_slot/1000, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@discards <- FLQuant(c(0,0,0,0), dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@discards.n <- FLQuant(zero_vector, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@discards.wt <- FLQuant(zero_vector, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@landings <- FLQuant(catch_slot, dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@landings.n <- FLQuant(catch_n_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@landings.wt <- FLQuant(catch_mean_weight_slot/1000, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@stock <- FLQuant(stock_slot, dimnames=list(age="all", year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@stock.n <- FLQuant(stock_n_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@stock.wt <- FLQuant(stock_mean_weight_slot/1000, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@m <- FLQuant(m_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@mat <- FLQuant(mat_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@harvest <- FLQuant(harvest_slot, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@harvest@units <- "f"
bmtXSAstock@harvest.spwn <- FLQuant(zero_vector, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@m.spwn <- FLQuant(zero_vector, dimnames=list(age=(0:(num_classes-1)), year=years[1]:years[simperiod]), units="NA")
bmtXSAstock@name <- paste("Index File", BMT_SPECIES[m_int], "in GSA", BMT_GSA)
bmtXSAstock@desc <- paste("Generated by BEMTOOL software",Sys.Date() )
minAge <- as.numeric(as.character(max(as.numeric(ALADYM_GUI_simulations[[m_int]]@fishingmortality$min))))
maxAge <- as.numeric(as.character(min(as.numeric(ALADYM_GUI_simulations[[m_int]]@fishingmortality$max))))
range_vector <- c(0,(num_classes-1),NA,years[1],years[simperiod],minAge,maxAge)
names(range_vector) <- c("min", "max", "plusgroup", "minyear", "maxyear", "minfbar", "maxfbar")
bmtXSAstock@range <- range_vector
save_path <- paste(casestudy_path, "/", harvest_rule_id,"/Biological Pressure Impact/MSTF - ", BMT_SPECIES[m_int],"/", casestudy_name, " - ", BMT_SPECIES[m_int], " XSA object FORE ", harvest_rule_id,".dat", sep="")
dput(bmtXSAstock, file=save_path)
print(paste("Saving XSA object in", save_path))
XSAi[[m_int]]$results <- bmtXSAstock
} # end Report loop
} # end species loop
return(XSAi)
} |
0d2fbd8472c2f65806890d3c1813b73ab8de590e | 0a9a7915d1cf4ef6f1d85d4d604ea08711be08d9 | /mapas_google.R | 72e7e1f9806be6c5b52fd6a700f99f4f3f82cdd6 | [] | no_license | joleonar/Norte_Maracay | 28c814953ff7152442abce8c32af66c3922ce221 | 4d838fea459371273336144e887b5391765b0043 | refs/heads/master | 2020-05-19T16:20:23.744791 | 2015-05-12T14:43:15 | 2015-05-12T14:43:15 | 23,705,481 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 973 | r | mapas_google.R | library(ggplot2)
library(ggmap)
# creating a sample data.frame with your lat/lon points
lon <- c(-68,-66)
lat <- c(10, 11)
df <- as.data.frame(cbind(lon,lat))
#Leyendo base de datos
base <- read.table("collect_2.out",header=T)
names(base)[1]<- "ANO"
base$MAG <-as.numeric(sub("Mw","",base$MAG))
base$PROF <-as.numeric(sub("F","",base$PROF))
# Evento de mayor magnitud
bmax <- base[which(base$MAG==max(base$MAG)),]
# getting the map
mapsismo <- get_map(location = c(lon = bmax$LON, lat = bmax$LAT), zoom = 9,
maptype = "terrain", scale = 2)
# plotting the map with some points on it
ggmap(mapsismo) + geom_point(data = base, aes(x = LON, y = LAT, fill = "red"), size = 2, shape = 21) +
guides(fill=FALSE, alpha=FALSE, size=FALSE)+labs(title = "Sismo: Agosto 2014") + xlab("Longitud") + ylab("Latitud") +
geom_line(data=Fallas,aes(x=long,y=lat,group=group),colour="red") + geom_point(aes(x=bmax$LON,y=bmax$LAT),shape=8,size=5,color="red")
|
2ad3e6890245f4124d95a4c8aed3f43d8533d2f5 | d69bdac27343896d7781ab4a4da2a47e810ff2ef | /plot2.R | 923e1e00f441b902d14d9f1e7a16625fb00eea4b | [] | no_license | dannyfraser/ExData_Plotting1 | dbec51efdd169ae8f5f51403e2b8ec7aea6e95f6 | 168094b74f917203841a083396456d67fb9b8c46 | refs/heads/master | 2021-01-24T14:18:38.783502 | 2015-10-09T08:02:30 | 2015-10-09T08:02:30 | 43,806,540 | 0 | 0 | null | 2015-10-07T09:34:19 | 2015-10-07T09:34:19 | null | UTF-8 | R | false | false | 533 | r | plot2.R | # create chart 2
library(readr)
library(dplyr)
library(lubridate)
plot_period <- interval(ymd("2007-02-01"), ymd("2007-02-03"))
power <- read_delim("data/household_power_consumption.txt", delim=";") %>%
mutate(DateTime=dmy_hms(paste(Date, Time))) %>%
filter(DateTime %within% plot_period)
png(filename="plot2.png", height=480, width=480)
with(power,
plot(
x=DateTime,
y=Global_active_power,
ylab="Global Active Power (kilowatts)",
xlab="",
type="l"
)
)
dev.off() |
6aea8566a03d2dc2c98dd163c882ac0993f2cec9 | d48427c3eef0bb887f6b54161822e7da74481574 | /server.R | 0ed1bb399ea022da06a29fe8a8c0cd3162e5964d | [] | no_license | SupermercadoEmporium/Mayo2014 | 69c9bf04ec2fa7eefb4e653d7613aef7826472bc | 44ab9990b55ed6d2e55d2130a28cc2c1a3aeead9 | refs/heads/master | 2016-08-12T11:53:59.836885 | 2016-01-15T15:51:02 | 2016-01-15T15:51:02 | 49,673,742 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,633 | r | server.R | #Primer Semestre
#install.packages("shiny")
library(shiny)
shinyServer(function(input, output) {
output$mayo<-renderPrint({
y<-input$select
paste(y,round(a_matrix_Mayo[y,y], digits=4))
})
output$mayo2<-renderPrint({
y<-input$select2
paste(y,round(a_matrix_Mayo[y,y], digits=4))
})
output$confidencemayo<-renderPrint({
x<-input$select
y<-input$select2
paste("Confidence",round(a_matrix_Mayo[x,y]/a_matrix_Mayo[x,x], digits=4))
})
output$liftmayo<-renderPrint({
x<-input$select
y<-input$select2
paste("Lift",round(round(a_matrix_Mayo[x,y]/a_matrix_Mayo[x,x], digits=4)/round(a_matrix_Mayo[y,y], digits=4), digits=4))
})
output$tablanamecat1mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:3]/length(sub)
nam<-vector(mode="character")
for(i in 1:3){
nam[i]<-names(t[i])
}
paste( names(t[1]))
})
output$tablaprobcat1mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:3]/length(sub)
paste(round(t[1], digits=4))
})
output$tablanamecat2mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:3]/length(sub)
nam<-vector(mode="character")
for(i in 1:3){
nam[i]<-names(t[i])
}
paste( names(t[2]))
})
output$tablaprobcat2mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:3]/length(sub)
paste( round(t[2], digits=4))
})
output$tablanamecat3mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:5]/length(sub)
nam<-vector(mode="character")
for(i in 1:5){
nam[i]<-names(t[i])
}
paste( names(t[3]))
})
output$tablaprobcat3mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:3]/length(sub)
paste(round(t[3], digits=4))
})
output$tablanamecat4mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:4]/length(sub)
nam<-vector(mode="character")
for(i in 1:4){
nam[i]<-names(t[i])
}
paste( names(t[4]))
})
output$tablaprobcat4mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:4]/length(sub)
paste(round(t[4], digits=4))
})
output$tablanamecat5mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:5]/length(sub)
nam<-vector(mode="character")
for(i in 1:5){
nam[i]<-names(t[i])
}
paste( names(t[5]))
})
output$tablaprobcat5mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:5]/length(sub)
paste(round(t[5], digits=4))
})
output$tabla1namecat1mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select2,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:3]/length(sub)
nam<-vector(mode="character")
for(i in 1:3){
nam[i]<-names(t[i])
}
paste( names(t[1]))
})
output$tabla1probcat1mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select2,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:5]/length(sub)
paste( round(t[1], digits=4))
})
output$tabla1namecat2mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select2,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:3]/length(sub)
nam<-vector(mode="character")
for(i in 1:3){
nam[i]<-names(t[i])
}
paste( names(t[2]))
})
output$tabla1probcat2mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select2,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:5]/length(sub)
paste( round(t[2], digits=4))
})
output$tabla1namecat3mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select2,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:3]/length(sub)
nam<-vector(mode="character")
for(i in 1:3){
nam[i]<-names(t[i])
}
paste( names(t[3]))
})
output$tabla1probcat3mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select2,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:3]/length(sub)
paste(round(t[3], digits=4))
})
output$tabla1namecat4mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select2,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:4]/length(sub)
nam<-vector(mode="character")
for(i in 1:4){
nam[i]<-names(t[i])
}
paste( names(t[4]))
})
output$tabla1probcat4mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select2,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:4]/length(sub)
paste(round(t[4], digits=4))
})
output$tabla1namecat5mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select2,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:5]/length(sub)
nam<-vector(mode="character")
for(i in 1:5){
nam[i]<-names(t[i])
}
paste( names(t[5]))
})
output$tabla1probcat5mayo<-renderText({
Subconjuntos<-na.omit(subset(catg_sin_rep_Mayo,
Mayo.Categoriaf3==input$select2,
select=c(Mayo.CATEGORIA1)))
sub<-Subconjuntos[,1]; tab_sub<-table(sub);
tab_sub_order<-sort(tab_sub, decreasing=T)
t<-tab_sub_order[1:5]/length(sub)
paste(round(t[5], digits=4))
})
})
#Mayo
catg_Mayo<-data.frame(Mayo$CATEGORIA1, Mayo$Categoriaf3,
Mayo$SLSEQ, Mayo$TICKET)
catg_sin_rep_Mayo<-unique(catg_Mayo)
|
70e2fe284b02807ada7c2047099af7ab4c5255b9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/imbalance/examples/mwmote.Rd.R | 4b284c142fb9b012cc4b15d029e6c7e093d87a8b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 294 | r | mwmote.Rd.R | library(imbalance)
### Name: mwmote
### Title: Majority weighted minority oversampling technique for imbalance
### dataset learning
### Aliases: mwmote
### ** Examples
data(iris0)
# Generates new minority examples
newSamples <- mwmote(iris0, numInstances = 100, classAttr = "Class")
|
442d66180dc7a0157e0ef6714f9c7dbc8a55ce46 | 1bb2de11a09ef71cfeae4e41488ed088dd7663c9 | /man/sigmaPA.Rd | 29374544f2d786a07aea812a3a4cd3f27e313e4f | [] | no_license | cran/icesAdvice | 4a6303a0e6e6a75514f445c3f5d802492e52af65 | 45e5fdb3907eeba51ba3feef94aec26cbab0c852 | refs/heads/master | 2022-02-28T17:54:25.057042 | 2022-02-18T09:00:02 | 2022-02-18T09:00:02 | 59,135,482 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,552 | rd | sigmaPA.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sigmaPA.R
\name{sigmaPA}
\alias{sigmaPA}
\title{Sigma from PA Reference Points}
\usage{
sigmaPA(lim, pa)
}
\arguments{
\item{lim}{the value of the limit reference point, e.g., Blim or Flim.}
\item{pa}{the value of the PA reference point, e.g., Bpa or Fpa.}
}
\value{
Implicit value of sigma.
}
\description{
Calculate the implicit sigma that was used to calculate PA reference points
from limit reference points (Xpa from Xlim).
}
\details{
The order of the parameters does not matter, so \code{sigmaPA(Fpa, Flim)} and
\code{sigmaPA(Flim, Fpa)} are equivalent.
}
\note{
The purpose of PA reference points is to apply a precautionary approach in
fisheries management.
This function is useful for reviewing PA reference points, when the advice
sheet provides the value of Xlim and Xpa but not the value of sigma.
The inference is based on the following relationships:
\deqn{B_\mathrm{pa} = B_\mathrm{lim} \exp(1.645\sigma_B)}{
Bpa = Blim * exp(1.645*sigmaB)}
\deqn{F_\mathrm{pa} = F_\mathrm{lim} \exp(-1.645\sigma_F)}{
Fpa = Flim * exp(-1.645*sigmaF)}
}
\examples{
sigmaPA(100, 120)
}
\seealso{
\code{\link{sigmaCI}} calculates the implicit sigma from a confidence
interval.
\code{\link{Bpa}} and \code{\link{Fpa}} calculate those reference points from
the limit reference points, based on a given sigma.
\code{\link{icesAdvice-package}} gives an overview of the package.
}
\author{
Arni Magnusson.
}
|
2bb16409bd2ad745c6e613d32e654d943c470109 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/springer/R/reformat.R | 1bc33c15e2bfa7b026b7b1fe2aa38c8e7a3f7424 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 772 | r | reformat.R | #' This function changes the format of the longitudinal data from wide format to long format
#'
#' @param k the number of repeated measurements/time points.
#' @param y the longitudinal response.
#' @param x a matrix of predictors, consisting of clinical covariates, genetic and environment factors, as well as gene-environment interactions.
#' @export
reformat <- function(k,y,x){
n=dim(y)[1]
response=y
id=rep(0,n*k)
y=rep(0,n*k)
for (i in 1:n) {
for (j in 1:k) {
id[(i-1)*k+j]=i
y[(i-1)*k+j]=response[i,j]
}
}
data=cbind(id=id,y,x[rep(1:nrow(x), times = rep(k,n)), ])
data=as.data.frame(data)
y=data[,2]
x=data[,-c(1,2)]
x=cbind(data.frame(rep(1,length(data$id))),x)
x=data.matrix(x)
return(list("y"=y,"x"=x,"id"=id))
}
|
ccbfa3c7d8001345e0894e460585c04b801f07dc | 1a52bdf933e7926a22e831f786e168ff995ffe9e | /App2/Code/plot_num_ER_NOgel_blackColor.R | bd7d61a435fd6bbf08a15bfb586b2a119f65f46b | [] | no_license | lizhu06/MOG_exp | 03d7844a3b86e1868b66fbc9844b66054c4bdf1c | 0ef852a499cc806cddaea337540672474d90a791 | refs/heads/master | 2020-06-01T04:41:01.088791 | 2019-06-06T19:57:17 | 2019-06-06T19:57:17 | 190,640,889 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,343 | r | plot_num_ER_NOgel_blackColor.R | rm(list=ls())
##### duplicate genes belonging to multiple groups
setwd("/mnt/glusterfs/liz86/MOG_Regression/Exp_revision1/App2/")
old_dir <- "/mnt/glusterfs/liz86/MOG_Regression/Exp/app1_morePathways/"
### plot all variable selection
load("Results/num_nonzero_MOG_balanced_nodup_lassoInit.RData")
load("Results/num_nonzero_ER_MOG_balanced_nodup_lassoInit.RData")
nonzero_MOG <- num_nonzero_MOG_nodup
nonzero_ER_MOG <- num_nonzero_ER_MOG_nodup
load(paste0(old_dir, "Results/num_nonzero_SOG_balanced.RData"))
load(paste0(old_dir, "Results/num_nonzero_ER_SOG_balanced.RData"))
nonzero_SOG <- num_nonzero_SOG
nonzero_ER_SOG <- num_nonzero_ER_SOG
for(i in 1:5){
load(paste0(old_dir, "Results/Lasso_nonzero_balanced.RData"))
load(paste0(old_dir, "Results/Lasso_nonzero_ER_balanced.RData"))
nonzero_lasso <- nonzero[i,]
nonzero_ER_lasso <- nonzero_ER[i,]
load(paste0(old_dir, "Lasso_nonzero_GL_balanced.RData"))
load(paste0(old_dir, "Lasso_nonzero_ER_GL_balanced.RData"))
nonzero_GL <- nonzero_GL[i,]
nonzero_ER_GL <- nonzero_ER_GL[i,]
load(paste0(old_dir, "nonzero_SGL_balanced.RData"))
load(paste0(old_dir, "nonzero_ER_SGL_balanced.RData"))
nonzero_SGL <- nonzero_SGL[i,]
nonzero_ER_SGL <- nonzero_ER_SGL[i,]
load("Results/nonzero_GEL_balanced.RData")
load("Results/nonzero_ER_GEL_balanced.RData")
nonzero_GEL <- nonzero_GEL[i,]
nonzero_ER_GEL <- nonzero_ER_GEL[i,]
load("Results/nonzero_TSG_balanced.RData")
load("Results/nonzero_ER_TSG_balanced.RData")
nonzero_TGL <- nonzero_TSG[i,]
nonzero_ER_TGL <- nonzero_ER_TSG[i,]
pdf(paste("Results/ER_genesInER_cv", i,"_balanced_noGEL_blackColor.pdf",sep=""),width=5,height=5)
plot(nonzero_MOG,nonzero_ER_MOG,xlim=c(1,200),ylim=c(1,150),
ylab="Number of features in ER pathway",xlab="Number of features selected",
col=1,type="b",pch=1, cex=0.5)
lines(nonzero_SOG,nonzero_ER_SOG,col=1,type="b",pch=2, cex=0.5)
lines(nonzero_lasso,nonzero_ER_lasso,col=1,type="b",pch=3, cex=0.5)
lines(nonzero_GL,nonzero_ER_GL,col=1,type="b",pch=4, cex=0.5)
lines(nonzero_SGL,nonzero_ER_SGL,col=1,type="b",pch=5, cex=0.5)
#lines(nonzero_GEL,nonzero_ER_GEL,col=6,type="b",pch=6)
lines(nonzero_TGL,nonzero_ER_TGL,col=1,type="b",pch=6, cex=0.5)
abline(0,1,lty=2)
#abline(0,0.11,lty=2)
legend("topleft",c("MOG","SOG","Lasso","GL","SGL", "TGL"),col=1,
lty=1,pch=1:6)
dev.off()
}
|
150783f23473d5a63f98bff317478cbeea1cdfc3 | 932c5276d167de9c5c5347efd5bb6ead1e1cf422 | /E3/test/scatterTest.R | f2e57032b63ee917100edc5905e599a543bde770 | [] | no_license | Freedomxf01/Rpackage | 72a3c90cc71265715d30cbf28dfb79156e3bfdb9 | 70b8002865d8c469c7ecda8fc5a7fb2e5773ca3b | refs/heads/master | 2023-04-12T04:03:37.877603 | 2021-05-08T03:16:04 | 2021-05-08T03:16:04 | 363,077,268 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,535 | r | scatterTest.R | # setwd('C:\\material\\MBMA\\myPackage\\test\\scatterPlot\\E3')
# devtools::install()
library(E3)
library(shiny)
library(jsonlite)
runApp(list(
ui = bootstrapPage(
E3ScatterOutput("gauge1")
),
server = function(input, output) {
# reactive that generates a random value for the gauge
# example use of the automatically generated render function
output$gauge1 <- renderE3Scatter({
# df <- data.frame(xcol = c(1,2,3,4,5),
# ycol = c(6,7,8,9,10),
# gcol = c("A", "A", "C", "B", "B"), stringsAsFactors = F)
file_path <- "C:\\material\\MBMA\\myPackage\\test\\scatterPlot\\20160226_186_CDF-Final_V2_0_PSA_Efficacy_41papers.csv"
df <- read.csv(file_path,header = TRUE, sep = ",", quote = "\"",stringsAsFactors = FALSE,na.strings = c("NA", "na", "", " "))
df$xcol <- df$ARM.TIME1
df$ycol <- df$RSP.VAL
df$gcol <- df$ARM.TRT
df <- df[,c("xcol", "ycol", "gcol")]
unique_group <- unique(df$gcol)
unique_group <- unique_group[!is.na(unique_group)]
row_num <- nrow(df)
for (x in unique_group) {
v <- rep(NA, row_num)
v[which(df$gcol == x)] <- df$ycol[which(df$gcol == x)]
df[[x]] <- v
}
df <- df[, -which(names(df) %in% c('ycol', 'gcol'))]
ycols <- names(df)
ycols <- ycols[-which(ycols == "xcol")]
E3Scatter(list(data = toJSON(df), cols = ycols))
})
}
)) |
730680be76a6cbe8735ec8e96004c69d9ad4b2b1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/StressStrength/examples/SSR.Rd.R | 98c8dd850c3f00945a498903ca7b71c56bb03abd | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 525 | r | SSR.Rd.R | library(StressStrength)
### Name: SSR
### Title: Computation of reliability of stress-strength models
### Aliases: SSR
### Keywords: distribution models
### ** Examples
# let X be a normal r.v. with mean 1 and sd 1;
# and Y a normal r.v. with mean 0 and sd 2
# X and Y independent
parx<-c(1, 1)
pary<-c(0, 2)
# reliability of the stress-strength model (X=strength, Y=stress)
SSR(parx,pary)
# changing the parameters of Y
pary<-c(1.5, 2)
# reliability of the stress-strength model (X=strength, Y=stress)
SSR(parx,pary)
|
a967c65193b59634ceb71baf050432b196a5ef16 | e91d3e01663cea7314679cad9d7fae8e4387253a | /Cross_validation_lm_CDA.R | 72cb41f6e1915f89fb66a311d8838663beea80c1 | [] | no_license | HelloFloor/CaseStudyLineair2018 | 736a95aec14240e19027ac04bb7d724adc4693de | 37b8d1600e45a688f504a30e4a7d58e733f7ea46 | refs/heads/master | 2020-04-07T08:50:36.713519 | 2019-01-21T12:38:03 | 2019-01-21T12:38:03 | 158,229,938 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,099 | r | Cross_validation_lm_CDA.R | ################################################################################
# Run-Time Environment: R version 3.4.2
# Author: Ilse van Beelen
# Script: Model_final.R
# Purpose of script: Cross-validation of final model CDA
# Datafiles used: Clean_data_CDA_2018-12-14.csv;
# Data downloaded: Data downloaded from statline.cbs.nl
#
# Date: 2018-12-17
# Version: V.1.0
################################################################################
#### Libraries ####
library(car)
#### Set up ####
rm(list = ls()) # empty work space
Data_CDA <- read.csv("1_clean_data/Clean_data_CDA_2018-12-21.csv",
stringsAsFactors=F, header = T)
#### Final model ####
final_model_lm_CDA <- lm(log(CDA_frac) ~ Urban_index+ Mean_income+
Non_west + Frac_60plus,data = Data_CDA[-16,])
summary(final_model_lm_CDA)
#### Make folds ####
K <- 5
index <- rep(1:K, floor(nrow(Data_CDA)/K)+1)[1:nrow(Data_CDA)]
summary(as.factor(index))
fold.index <- sample(index)
Loss <- function(x, y){
sum((x-y)^2)/length(x)
}
loss <- numeric(K)
for (k in 1:K){
training <- Data_CDA[fold.index!=k, ]
validation <- Data_CDA[fold.index==k, ]
training.fit <- final_model_lm_CDA
validation.predict <- predict(training.fit, newdata=validation, type='response')
loss[k] <- Loss(log(validation$CDA_frac), validation.predict)
}
#average, with weights equal to the number of objects used to calculate the loss at each fold:
mean(loss)
mean(final_model_lm_CDA$residuals^2)
K <- 5
index <- rep(1:K, floor(nrow(Data_CDA)/K)+1)[1:nrow(Data_CDA)]
fold.index <- sample(index)
Loss <- function(x, y){
sum((x-y)^2)/length(x)
}
loss2 <- numeric(K)
for (k in 1:K){
training <- Data_CDA[fold.index!=k, ]
validation <- Data_CDA[fold.index==k, ]
training.fit <- lm_CDA_2
validation.predict <- predict(training.fit, newdata=validation, type='response')
loss2[k] <- Loss(log(validation$CDA_frac), validation.predict)
}
mean(loss2)
mean(loss)
|
c1a7b182c058511f0bdb1e4d6f84441c5e9f4e6d | 6151fe4a5f93601b2164f90167ec9bd266fb331d | /man/get_GO_list_from_ontologies_with_evidence_codes.Rd | 4ba5846d47d853d518f20d3d45b8388c085ef4f4 | [] | no_license | VCCRI/XGSA | 2dd72f583bf118fc32740a5aa84aeb2b2ff5609c | 5f8ddd72c39831939eeb01e25bf3b1af2886a5c9 | refs/heads/master | 2021-06-16T17:51:50.410771 | 2021-03-01T10:05:40 | 2021-03-01T10:05:40 | 54,952,407 | 9 | 4 | null | 2020-05-15T08:52:10 | 2016-03-29T06:44:58 | R | UTF-8 | R | false | true | 1,537 | rd | get_GO_list_from_ontologies_with_evidence_codes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/XGSA.R
\name{get_GO_list_from_ontologies_with_evidence_codes}
\alias{get_GO_list_from_ontologies_with_evidence_codes}
\title{get_GO_list_from_ontologies_with_evidence_codes}
\usage{
get_GO_list_from_ontologies_with_evidence_codes(species,
evidence.codes = c("EXP", "IDA", "IPI", "IMP", "IGI", "IEP", "TAS", "IC"),
ontologies = c("biological_process", "molecular_function",
"cellular_component"))
}
\arguments{
\item{species}{Species name in the form 'hsapiens'}
\item{evidence.codes}{A character vector of evidence codes, defaults to 'direct' evidence = c("EXP","IDA","IPI","IMP","IGI","IEP", "TAS", "IC"). A description of GO evidence codes can be found here: http://geneontology.org/page/guide-go-evidence-codes}
\item{ontologies}{A character vector of the desired ontologies to query, defaults to all three of them - i.e. c("biological_process", "molecular_function", "cellular_component")}
}
\value{
This function returns a named list of GO terms with annotated Ensembl gene IDs within each element.
}
\description{
This function retrieves GO mappings with only the supplied evidence codes and from only the supplied ontologies and converts the result into a list.
}
\details{
This function retrieves GO mappings with only the supplied evidence codes and from only the supplied ontologies and converts the result into a list.
}
\examples{
Human_trim_GO <- get_GO_list_from_ontologies_with_evidence_codes('hsapiens')
summary(Human_trim_GO[1:5])
}
|
52b997cc369e1c1afc238a0aed44921342202ce1 | 750cacb8a12d2ef36c343072f979be48a01e3209 | /rCode/finalScripts/functionSBO.R | 697d5637afc989b1cfb10c2f8d46fb540e537c9d | [] | no_license | mterion/capstoneProj | b192de37c7128677d6bfa5dbf8247f92bf1646f3 | 219f13700db7ced0b0a6331e9a59d217a1847e66 | refs/heads/master | 2023-03-02T09:37:19.990896 | 2021-01-28T06:47:51 | 2021-01-28T06:47:51 | 319,615,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,221 | r | functionSBO.R | # ngram creation with ngram FreqDf
ngramFreqDfFun <- function(tokensOfTheCorpus, ngramValue){
gc()
tokNgram <- tokens_ngrams(tokensOfTheCorpus, ngramValue)
ngramDfm <- dfm(tokNgram)
featNames <- featnames(ngramDfm)
freq <- colSums(ngramDfm)
ngramFreqDf <- data.frame(featNames, freq)
rm(ngramDfm, featNames, freq)
rownames(ngramFreqDf) <- c() # Remove row names and replace them with ID nr
return(ngramFreqDf)
}
#=============================
## Ngram3
#=============================
# get df: freq(wi-2, wi-1, wi)
getNgram3HitFreqtDf <- function(ngram2Prefix_, ngram3FreqDf_){
# create empty Df with col featNames and freq
hitIndexDf <- data.frame(featNames = character(), freq = integer())
# make regex to grab the first words (biPrefix) in the trigrams df
regex <- sprintf("%s%s%s", "^", ngram2Prefix_, "_")
# vect with indices of the hit
ngram3_indices <- grep(regex, ngram3FreqDf_$featNames)
if(length(ngram3_indices) > 0) {
hitIndexDf <- ngram3FreqDf_[ngram3_indices, ]
}
return(hitIndexDf)
}
# get value: freq(wi-2, wi-1) in ngram2
getNgram2HitFreq <- function(ngram2Prefix_, ngram2FreqDf_){
hitPrefFrequency <- filter(ngram2FreqDf_, featNames==ngram2Prefix_)$freq[1]
return(hitPrefFrequency)
}
# get df sbo freq scores for ngram3: freq(wi-2, wi-1, wi) / freq(wi-2, wi-1)
getNGram3FreqScoresDf <- function(ngram2HitFreq_, ngram3HitFreqDf_){
# call and modify ngram3hitFreqDf to add a score col
getNgram3HitScoretDf <- ngram3HitFreqDf_ %>%
mutate(score = freq / ngram2HitFreq_)
return(getNgram3HitScoretDf)
}
# get df sbo scores for ngram3: freq(wi-2, wi-1, wi) / freq(wi-2, wi-1)
getWiScoresDfngram3 <- function(ngram3FreqScoresDf_){
# call and modify ngram3hitFreqDf to add a score col
wiScoresDfngram3 <- ngram3FreqScoresDf_ %>%
arrange(desc(score)) %>%
mutate(featNames = str_split_fixed(featNames, "_", 3)[, 3]) %>%
select(-freq)
return(wiScoresDfngram3)
}
#=============================
## Ngram2
#=============================
# get df: freq(wi-1, wi)
getNgram2HitFreqtDf <- function(ngram2Prefix_, ngram2FreqDf_){
# create empty Df with col featNames and freq
hitIndexDf <- data.frame(featNames = character(), freq = integer())
# Get the first word fom the ngram2prefix
ngram2PrefixLastWord <- str_split(ngram2Prefix_, "_")[[1]][2]
# make regex to grab the first words (biPrefix) in the trigrams df
regex <- sprintf("%s%s%s", "^", ngram2PrefixLastWord, "_")
# vect with indices of the hit
ngram2_indices <- grep(regex, ngram2FreqDf_$featNames)
if(length(ngram2_indices) > 0) {
hitIndexDf <- ngram2FreqDf_[ngram2_indices, ]
}
return(hitIndexDf)
}
# get value: freq(wi-1) in ngram1
getNgram1HitFreq <- function(ngram2Prefix_, ngram1FreqDf_){
# Get the first word fom the ngram2prefix
ngram2PrefixLastWord <- str_split(ngram2Prefix_, "_")[[1]][2]
hitPrefFrequency <- filter(ngram1FreqDf_, featNames==ngram2PrefixLastWord)$freq[1]
return(hitPrefFrequency)
}
# get df sbo freq scores for ngram2: freq(wi-1, wi) / freq(wi-1)
getNGram2FreqScoresDf <- function(ngram1HitFreq_, ngram2HitFreqDf_, backOffFactorAlpha_){
# call and modify ngram3hitFreqDf to add a score col
getNgram2HitScoretDf <- ngram2HitFreqDf_ %>%
mutate(score = freq / ngram1HitFreq_) %>%
mutate(alphaScore = score * backOffFactorAlpha) %>%
arrange(desc(alphaScore))
return(getNgram2HitScoretDf)
}
# get df sbo scores for ngram2: freq(wi-1, wi) / freq(wi-1)
getWiScoresDfngram2 <- function(ngram2FreqScoresDf_){
# call and modify ngram3hitFreqDf to add a score col
wiScoresDfngram2 <- ngram2FreqScoresDf_ %>%
arrange(desc(score)) %>%
mutate(featNames = str_split_fixed(featNames, "_", 2)[, 2]) %>%
select(-freq)
return(wiScoresDfngram2)
}
#=============================
## ML ngram1
#=============================
# get df: freq(wi)
getNgram1HitFreqtDf <- function(ngram1FreqDf_, backOffFactorAlpha_){
# create empty Df with col featNames and freq
ngram1FreqScoresDf <- ngram1FreqDf_ %>%
mutate(score = freq / sum(freq)) %>%
mutate(alphaScore = score * backOffFactorAlpha) %>%
arrange(desc(alphaScore))
return(ngram1FreqScoresDf)
}
#=============================
## Return best score
#=============================
# wiScoresDfNgram3
# wiScoresDfNgram2
#
# getBestScoreRowNgram3Df <- function(wiScoresDf_){
# bestScoreRowDf <- wiScoresDf_ %>%
# mutate(featNames = str_split_fixed(featNames, "_", 2)[, 2]) %>%
# filter(row_number()==1)
# return(bestScoreRowDf)
# }
#
# # get df sbo scores but just first row
# getBestScoreRowNgram2Df <- function(wiScoresDf_){
# bestScoreRowDf <- wiScoresDf_ %>%
# filter(row_number()==1)
# return(bestScoreRowDf)
# }
|
fb576b354e5097867309e821edc01bd6bb8e1ef7 | 117ec21c68e99e5451bc67327b057cf4d8026a97 | /server.R | 06c3ae07128b209938c5de964738bcd954c06425 | [] | no_license | JonnyPhillips/spatial2_partidos | 4ed341afce8bc1cb61a977f2eae9f5a1ca10d147 | b953c38e98eb195057783200f21fc322d0fe9776 | refs/heads/master | 2020-12-29T04:55:25.023085 | 2020-02-05T20:18:22 | 2020-02-05T20:18:22 | 238,460,888 | 0 | 2 | null | 2020-02-05T17:09:11 | 2020-02-05T13:48:32 | R | UTF-8 | R | false | false | 26,927 | r | server.R | library(plyr)
library(data.table)
library(sp)
library(sf)
library(spdep)
library(scales)
library(leaflet)
library(rgeos)
library(raster)
library(maptools)
library(ggplot2)
library(httr)
library(ape)
library(RCurl)
library(digest)
library(dplyr)
library(DT)
library(magrittr)
library(shinyalert)
library(tidyr)
library(shiny)
#setwd("C:/Users/jonny/Google Drive/Academic/FGV-SP/CEPESPData/Spatial2 Shiny App/New_apps_2018/Spatial2 Oct19/spatial_parties")
source("global.R")
source("database.R")
#input <- c()
#input$State <- "CE"
#input$cargo <- 6
#input$turno_value <- turno <- 1
#input$Party <- "PT"
#input$Year <- 2014
#input$Indicator <- 1
spatial2Server <- function(input, output, session) {
### Turno ###
turno <- reactive({
cargo <- as.numeric(input$cargo)
if(cargo %in% c(1,3)){
return(input$turno_value)
} else {
return(1)
}
})
output$turno_UI <- renderUI({
cargo <- as.numeric(input$cargo)
if(cargo %in% c(1,3)){
selectizeInput("turno_value",
label = NULL,
selected = NULL,
choices = list("",
"1º Turno" = 1,
"2º Turno" = 2),
options = list(
placeholder = "Selecione um turno"
))
}
})
### Partido ###
partidos_escolhas <- reactive({
cat("Parsing partidos_escolhas\n")
cargo <- as.numeric(input$cargo)
ano <- as.numeric(input$Year)
turno_use <- turno()
uf <- input$State
print(cargo)
if(uf == "" | is.na(ano) | is.na(cargo)){
cat("Parsing partidos_escolhas. NULL\n")
return(NULL)
}
choices <- (party_template$SIGLA_PARTIDO[party_template$CODIGO_CARGO == cargo &
party_template$SIGLA_UF == uf &
party_template$ANO_ELEICAO == ano &
party_template$NUM_TURNO == turno_use])
cat("Parsing partidos_escolhas. CHECK!!!\n")
return(choices)
})
output$party_UI <- renderUI({
cat("Outputing party_UI.\n")
partidos <- partidos_escolhas()
if(is.null(partidos)){
cat("Outputing party_UI. NULL\n")
return(NULL)
}
UI <- selectizeInput("Party",
label = NULL,
choices = partidos,
selected = 1,
options = list(placeholder = 'Escolha um partido:',allowEmptyOption=F))
cat("Outputing party_UI. CHECK!!!\n")
return(UI)
})
### Query ###
state_totals <- reactive({
start <- Sys.time()
cat("Downloading State Totals. ")
### Inputs ###
ano <- input$Year
cargo <- as.numeric(input$cargo)
### Loading State Totals
state_totals <- readr::read_rds(paste0("data/state_totals/",ano,"_",cargo,".rds"))
end_start <- difftime(Sys.time(), start, units = "secs")
cat("CHECK!!! (",end_start, "seconds).\n", sep = "")
return(state_totals)
})
mun_totals <- reactive({
start <- Sys.time()
cat("Downloading Municipal Totals. \n")
### Input ###
ano <- input$Year
uf <- input$State
cargo <- as.numeric(input$cargo)
### Load municipal voting totals
mun_totals <- readr::read_rds(paste0("data/mun_totals/", ano,"_", cargo,"_" , uf, ".rds"))
end <- Sys.time()
end_start <- round(difftime(end, start, units = "secs"),2)
cat("Downloading Municipal Totals. CHECK!!! (",end_start, " seconds).\n", sep = "")
return(mun_totals)
})
banco <- eventReactive(input$button, {
cat("Starting to download banco.\n")
start <- Sys.time()
withProgress(message="Por favor, espere...",
detail="Download dos dados",
value=0.3,{
uf <- input$State
partido <- switch(input$Party,"PRB"=10,"PP"=11,"PDT"=12,"PT"=13,"PTB"=14,
"PMDB"=15,"PSTU"=16,"PSL"=17,"REDE"=18,"PTN"=19,
"PSC"=20,"PCB"=21,"PR"=22,"PPS"=23,"DEM"=25,
"PSDC"=27,"PRTB"=28,"PCO"=29,"NOVO"=30,"PHS"=31,
"PMN"=33,"PMB"=35,"PTC"=36,"PSB"=40,"PV"=43,
"PRP"=44,"PSDB"=45,"PSOL"=50,"PEN"=51,"PPL"=54,
"PSD"=55,"PC do B"=65,"PT do B"=70,"SD"=77,"PROS"=90,
"PAN"=26,"PGT"=30,"PST"=18,"PL"=22,"PRONA"=56,"PRP"=44,
"PEN"=44,"PPL"=54,"PHS"=31,"MDB"=15,"CDN"=23,"REP"=10,
"PMN"=33,"AVANTE"=70,"DC"=27,"SD"=77,"PODE"=19,"PATRI"=51)
#Adjustment because old PSD (up to 2002) is 41 but clashes with new PSD
partido <- case_when(input$Party=="PSD" & input$Year<=2002~41,
input$Party=="PSD" & input$Year>=2010~55,
TRUE~partido)
cargo <- as.numeric(input$cargo)
if(is.null(partidos_escolhas())){
cat("Starting to download banco. NULL\n")
return(1)
}
cat("Downloading main data (uf=", uf, "; partido=", partido, "; cargo=", cargo,"; ano=",input$Year,"; position=",cargo,"; turno=",turno(),")\n", sep = "")
banco <- db_get_party_elections(year = input$Year,
position = cargo,
candidate_or_party_number = partido,
state = uf,
turn = turno())
end_beginning <- round(difftime(Sys.time(), start, units = "secs"), 2)
cat("CHECK!!! (Num rows: ",dim(banco)[1],", ", end_beginning, "seconds)\n", sep = "")
})
return(banco)
})
d <- reactive({
cat("Calculating 'd' value. \n")
banco_use <- banco()
if(any(class(banco_use) == c("reactive"))){
cat("Calculating 'd' value. NULL\n")
return(NULL)
}
start <- Sys.time()
withProgress(message="Por favor, espere...",detail="Download dos dados", value=0.3,{
d <- data.table::as.data.table(banco_use)
cat("Calculating 'd' value (pre), ",dim(d)[1]," rows. CHECK!!! (")
if(dim(d)[1] != 0){
#Ideally will be faster when can request specific state
setkeyv(d,c('ANO_ELEICAO','COD_MUN_IBGE','NUMERO_PARTIDO'))
#### Aggregations
d <- merge(d,isolate(mun_totals()), by=c("COD_MUN_IBGE","NUM_TURNO"))
#d <- merge(d,isolate(mun_totals), by=c("COD_MUN_IBGE","NUM_TURNO"))
d <- merge(d,isolate(state_totals()), by=c("UF","NUM_TURNO"))
#d <- merge(d,isolate(state_totals), by=c("UF","NUM_TURNO"))
d[,Tot_Partido := sum(QTDE_VOTOS), by=.(ANO_ELEICAO,UF,NUMERO_PARTIDO)]
d[,Mun_Vote_Share := (QTDE_VOTOS/Tot_Mun)*100]
d[,Party_Vote_Share := (QTDE_VOTOS/Tot_Partido)*100]
incProgress(amount = 0.7)
#### G-Index Calcs
d[,G_temp := (QTDE_VOTOS/Tot_Partido - Tot_Mun/Tot_State)^2]
d[,G_Index := sum(G_temp),by=.(ANO_ELEICAO,UF,NUMERO_PARTIDO)] #Correct? CHECK
#### LQ Calcs
d[,LQ := (QTDE_VOTOS/Tot_Partido)/(Tot_Mun/Tot_State),by=.(ANO_ELEICAO,UF,NUMERO_PARTIDO)] #Correct?
} else {
d <- data.table("UF" = character(),
"NUMERO_PARTIDO" = integer(),
"ANO_ELEICAO" = integer(),
"COD_MUN_IBGE" = integer(),
"QTDE_VOTOS" = integer())
}
end <- Sys.time()
end_beginning <- round(difftime(end,start, units = "secs"), 2)
cat("Calculating 'd' value, ",dim(d)[1]," rows. CHECK!!! (", end_beginning, "seconds)\n")
return(d)
})
})
mun_state_contig <- reactive({
uf <- input$State
## Break
if(uf == ""){
return(NULL)
}
beginning <- Sys.time()
names(mun)[which(names(mun)=="UF")] <- "UF_shape"
mun_state <- mun[mun$UF_shape == uf,]
mun_state_contig <- mun_state
end <- Sys.time()
cat("Time for trimming shapefile to state and first screening for neighbours:",end-beginning,".\n")
return(mun_state_contig)
})
dz3 <- reactive({
beginning <- Sys.time()
dz2 <- d()
if(is.null(dz2)){
return(NULL)
}
year <- isolate(input$Year)
dz3_temp <- merge(isolate(mun_state_contig()),dz2, by.x="GEOCOD",by.y="COD_MUN_IBGE",all.x=TRUE,all.y=FALSE)
#dz3_temp <- merge(isolate(mun_state_contig),dz2, by.x="GEOCOD",by.y="COD_MUN_IBGE",all.x=TRUE,all.y=FALSE)
dz3_temp@data[is.na(dz3_temp@data[,"LQ"])==TRUE,"LQ"] <- 0
dz3_temp@data[is.na(dz3_temp@data[,"QTDE_VOTOS"])==TRUE,"Mun_Vote_Share"] <- 0
dz3_temp@data[is.na(dz3_temp@data[,"QTDE_VOTOS"])==TRUE,"Tot_State"] <- mean(dz3_temp@data[,"Tot_State"],na.rm=TRUE)
dz3_temp@data[is.na(dz3_temp@data[,"QTDE_VOTOS"])==TRUE,"Tot_Partido"] <- mean(dz3_temp@data[,"Tot_Partido"],na.rm=TRUE)
dz3_temp$Tot_Mun <- NULL
dz3_temp <- merge(dz3_temp,isolate(mun_totals()),by.x="GEOCOD",by.y="COD_MUN_IBGE")
#dz3_temp <- merge(dz3_temp,isolate(mun_totals),by.x="GEOCOD",by.y="COD_MUN_IBGE")
dz3_temp@data[is.na(dz3_temp@data[,"QTDE_VOTOS"])==TRUE,"QTDE_VOTOS"] <- 0
end <- Sys.time()
cat("Time for merging candidate data with shapefile:",end-beginning,".\n")
dz3 <- dz3_temp
return(dz3)
})
state_nb2 <- reactive({
if(is.null(mun_state_contig())){
return(NULL)
}
state_nb2 <- knn2nb(knearneigh(coordinates(mun_state_contig()), k = 6))
#state_nb2 <- knn2nb(knearneigh(coordinates(mun_state_contig), k = 6))
return(state_nb2)
})
state_nb2listw <- reactive({
beginning <- Sys.time()
if(is.null(state_nb2())){
return(NULL)
}
state_nb2listw <- nb2listw(state_nb2(),zero.policy=TRUE)
#state_nb2listw <- nb2listw(state_nb2,zero.policy=TRUE)
end <- Sys.time()
cat("Time for identifying neightbours list: ",end-beginning,".\n")
return(state_nb2listw)
})
dz5 <- reactive({
beginning <- Sys.time()
## Reactive events
dz4 <- dz3()
## Break
if(is.null(dz4)){
return(NULL)
}
state_nb2listw <- isolate(state_nb2listw())
#dz4 <- dz3
lisa <- as.data.frame(localmoran(dz4$LQ,state_nb2listw))
dz4$LISA_I <- lisa[,"Ii"]
dz4$LISA_p <- lisa[,"Pr(z > 0)"]
dz4$LQ_stdzd <- as.vector(scale(dz4$LQ))
dz4$LQ_stdzd_lag <- lag.listw(state_nb2listw,dz4$LQ_stdzd, NAOK=TRUE) #NAOK here helps or hinders?
dz4$category <- "Insignificant"
dz4$category[dz4$LISA_p<0.05 & dz4$LQ_stdzd>=0 & dz4$LQ_stdzd_lag>=0] <- "High-High"
dz4$category[dz4$LISA_p<0.05 & dz4$LQ_stdzd>=0 & dz4$LQ_stdzd_lag<=0] <- "High-Low"
dz4$category[dz4$LISA_p<0.05 & dz4$LQ_stdzd<=0 & dz4$LQ_stdzd_lag>=0] <- "Low-High"
dz4$category[dz4$LISA_p<0.05 & dz4$LQ_stdzd<=0 & dz4$LQ_stdzd_lag<=0] <- "Low-Low"
dz4$category <- as.factor(dz4$category)
end <- Sys.time()
print(c("Time to calculate Moran's I and LISA: ",end-beginning))
dz5 <- dz4
})
output$map <- renderLeaflet({
leaflet(options = leafletOptions(zoomControl = FALSE)) %>%
addProviderTiles(providers$CartoDB.Positron)
})
state_shp <- reactive({
uf <- input$State
if(uf == "")
uf <- "br"
state_shp <- readr::read_rds(paste0("data/shape_states/", uf,".rds"))
})
observe({
uf <- input$State
geo <- as.numeric(st_bbox(state_shp()))
#geo <- as.numeric(st_bbox(state_shp))
### Base Map ###
leafletProxy("map") %>%
clearShapes() %>%
clearControls() %>%
addPolygons(data = state_shp(),
fillOpacity = 0,
weight = 3,
color = "black",
fillColor = NULL) %>%
flyToBounds(geo[3], geo[4], geo[1], geo[2])
})
observe({
dz5_use <- dz5()
if(is.null(dz5_use)){
return(NULL)
}
proxy <- leafletProxy("map")
proxy %>%
clearShapes() %>%
addPolygons(data = state_shp(),
color = "black",
fillColor = NULL,
fillOpacity = 0)
if (input$Indicator == "Medida QL"){
pal <- colorBin(palette = c("white","light blue","#fcbba1","#fb6a4a","#ef3b2c","#cb181d"),
domain = c(0,1000),
bins = c(0,0.01,1,5,10,50,1000),
na.color = "white")
} else if(input$Indicator == "1") {
pal <- colorNumeric(palette = c("white","red"),
domain = c(0,max(dz5_use@data[["Party_Vote_Share"]],na.rm=TRUE)),
na.color = "white")
} else {
pal <- colorNumeric(palette = c("white","red"),
domain = c(0,max(dz5_use@data[["Mun_Vote_Share"]],na.rm=TRUE)),
na.color = "white")
}
popup_text <- paste0("<h4>", dz5_use@data[,"NOME"], "</h2>",
"</br>",
dz5_use@data[,"NUMERO_PARTIDO"],
" recebeu ",
"<strong>", dz5_use@data[,"QTDE_VOTOS"], "</strong>",
" votos (",
round((dz5_use@data[,"QTDE_VOTOS"] / dz5_use@data[,"Tot_Partido"])*100,1),
"% do total recebido pelo partido no estado). </br>",
"</br> Votos váliados no município: ",
dz5_use@data[,"Tot_Mun"],
" (",
round((dz5_use@data[,"Tot_Mun"] / dz5_use@data[,"Tot_State"])*100,1),
"% do total do Estado).",
"<br>",
"<br> Medida QL: ", round(dz5_use@data[,"LQ"],3))
popup_text_hihi <- paste0("<h4>", dz5_use@data[dz5_use@data$category=="High-High","NOME"], "</h4>",
dz5_use@data[,"NUMERO_PARTIDO"],
" recebeu ",
dz5_use@data[dz5_use@data$category=="High-High","QTDE_VOTOS"],
" votos (",
round((dz5_use@data[dz5_use@data$category=="High-High","QTDE_VOTOS"]/dz5_use@data[dz5_use@data$category=="High-High","Tot_Deputado"])*100,1),
"% do total recebido pelo partido no estado)",
"</br> </br> Votos válidos no município: ",
dz5_use@data[dz5_use@data$category=="High-High","Tot_Mun"],
" (",
round((dz5_use@data[dz5_use@data$category=="High-High","Tot_Mun"]/dz5_use@data[dz5_use@data$category=="High-High","Tot_State"])*100,1),
"% do total do Estado)",
"<br>",
"<br> Medida QL: ",
round(dz5_use@data[dz5_use@data$category=="High-High","LQ"],3))
proxy %>%
clearControls() %>%
addPolygons(data = dz5_use,
fillOpacity = 0.8,
weight = 0.1,
color = "black",
fillColor = pal(dz5_use@data[[switch(input$Indicator,"2"="Mun_Vote_Share",
"Medida QL" = "LQ",
"1" = "Party_Vote_Share")]]),
popup = popup_text) %>%
addLegend(title = switch(input$Indicator,
"Medida QL" = "Medida QL",
"2" = "% Votos no <br>Município",
"1" = "% Votos do(a)<br>Partido"),
pal = pal,
values = dz5_use@data[[switch(input$Indicator,"2"="Mun_Vote_Share",
"Medida QL"="LQ",
"1" = "Party_Vote_Share")]],
opacity = 0.8,
labFormat = labelFormat(suffix = "%")) %>%
addPolygons(data = dz5_use[dz5_use@data$category=="High-High",],
fillOpacity = 0,
weight = 2,
color = "green",
stroke = TRUE,
popup = popup_text_hihi)
})
### Map for Download ###
map_reactive <- eventReactive(input$button, {
dz5_use <- dz5()
if(is.null(dz5_use)){
return(NULL)
}
geo <- as.numeric(st_bbox(state_shp()))
if (input$Indicator == "Medida QL"){
pal <- colorBin(palette = c("white","light blue","#fcbba1","#fb6a4a","#ef3b2c","#cb181d"),
domain = c(0,1000),
bins = c(0,0.01,1,5,10,50,1000),
na.color = "white")
} else if(input$Indicator == "1") {
pal <- colorNumeric(palette = c("white","red"),
domain = c(0,max(dz5_use@data[["Party_Vote_Share"]],na.rm=TRUE)),
na.color = "white")
pal <- colorBin(palette = c("white","#fcbba1","#fc9272","#fb6a4a","#ef3b2c"),
domain = quantile(dz5_use@data[["Party_Vote_Share"]],probs=c(0,0.2,0.4,0.6,0.8,1),na.rm=T),
na.color = "white")
} else {
pal <- colorNumeric(palette = c("white","red"),
domain = c(0,max(dz5_use@data[["Mun_Vote_Share"]],na.rm=TRUE)),
na.color = "white")
pal <- colorBin(palette = c("white","#fcbba1","#fc9272","#fb6a4a","#ef3b2c"),
domain = quantile(dz5_use@data[["Mun_Vote_Share"]],probs=c(0,0.2,0.4,0.6,0.8,1),na.rm=T),
na.color = "white")
}
leaflet(options = leafletOptions(zoomControl = FALSE)) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addPolygons(data = state_shp(),
fillOpacity = 0,
weight = 3,
color = "black",
fillColor = NULL) %>%
flyToBounds(geo[3], geo[4], geo[1], geo[2]) %>%
addPolygons(data = dz5_use,
fillOpacity = 0.8,
weight = 0.1,
color = "black",
fillColor = pal(dz5_use@data[[switch(input$Indicator,"2"="Mun_Vote_Share",
"Medida QL" = "LQ",
"1" = "Party_Vote_Share")]])) %>%
addLegend(title = switch(input$Indicator,
"Medida QL" = "Medida QL",
"2" = "% Votos no <br>Município",
"1" = "% Votos do(a)<br>Partido"),
pal = pal,
values = dz5_use@data[[switch(input$Indicator,"2"="Mun_Vote_Share",
"Medida QL"="LQ",
"1" = "Party_Vote_Share")]],
opacity = 0.8,
labFormat = labelFormat(suffix = "%")) %>%
addPolygons(data = dz5_use[dz5_use@data$category=="High-High",],
fillOpacity = 0,
weight = 2,
color = "green",
stroke = TRUE)
})
output$map_down <- downloadHandler(
filename = paste0(paste("CepespData",
input$Year,
input$State,
input$cargo,
"Turno",
input$turno_value,
input$Party,
sep="_"),
".png")
, content = function(file) {
mapshot( x = map_reactive()
, file = file
, selfcontained = FALSE # when this was not specified, the function for produced a PDF of two pages: one of the leaflet map, the other a blank page.
)
} # end of content() function
)
### End ###
clusters <- reactive({
dz5_HH <- dz5()[dz5()$category=="High-High",]
#dz5_HH <- dz5[dz5$category=="High-High",]
if (dim(dz5_HH)[1]!=0){
clusters <- gUnion(dz5_HH,dz5_HH)
} else {
clusters <- NULL
}
})
clusters_sp <- reactive({
if (!(is.null(clusters()))){
clusters_sep <- slot(clusters()@polygons[[1]],"Polygons")
#clusters_sep <- slot(clusters@polygons[[1]],"Polygons")
clusters_sep <- clusters_sep[unlist(lapply(clusters_sep, function(x) x@hole==FALSE))] #Have to make sure aren't picking up holes too!
polygons_list <- list()
for (i in 1:length(clusters_sep)){
polygons_list[[i]] <- Polygons(list(clusters_sep[[i]]),"test")
polygons_list[[i]]@ID <- paste0(i)
}
clusters_sp <- SpatialPolygons(polygons_list)
}
})
clusters_sp_cent_table <- reactive({
if (!(is.null(clusters()))){
clusters_sp_cent <- gCentroid(clusters_sp(),byid=TRUE)
#clusters_sp_cent <- gCentroid(clusters_sp,byid=TRUE)
clusters_sp_cent_table_temp <- as.data.frame(clusters_sp_cent@coords)
clusters_sp_cent_table_temp$Cluster_num <- rownames(clusters_sp_cent_table_temp)
clusters_sp_cent_table <- clusters_sp_cent_table_temp
}
})
clusters_list <- reactive({
if (!(is.null(clusters()))){
clusters_list_temp <- list()
num_clust <- length(clusters_sp())
#num_clust <- length(clusters_sp)
for (i in 1:num_clust){
clusters_list_temp[[i]] <- raster::intersect(dz5()[dz5()$category=="High-High",],clusters_sp()[i])
#clusters_list_temp[[i]] <- raster::intersect(dz5[dz5$category=="High-High",],clusters_sp[i])
clusters_list_temp[[i]]@data$Cluster_Num <- i
}
clusters_list <- clusters_list_temp
}
})
output$Num_clusters <- renderUI({
if (!(is.null(clusters()))){
Num_clusters <- paste0("<b> Number of High-High Clusters: ",length(clusters_list()),"<b>")
#Num_clusters <- paste0("<b> Number of High-High Clusters: ",length(clusters_list),"<b>")
HTML(Num_clusters)
} else {
HTML(paste0("<b> No clusters <b>"))
}
})
output$Result <- renderUI({
if(is.null(dz3())){
output_error <- "Por favor, informe os parâmetros <b>estado</b>, <b>cargo</b>, <b>ano</b> e <b>partido</b> antes atualizar o mapa."
return(HTML(output_error))
}
str_Result <- paste0("<br><b>Votos: </b>",unique(dz3()@data$Tot_Partido[is.na(dz3()@data$Tot_Partido)==FALSE]),
"<br><b>Porcentagem dos votos válidos: </b>",round((unique(dz3()@data$Tot_Partido[is.na(dz3()@data$Tot_Partido)==FALSE])/unique(dz3()@data$Tot_State[is.na(dz3()@data$Tot_State)==FALSE]))*100,1), "%")
#str_Result <- paste0("<br><b>Votos: </b>",unique(dz3@data$Tot_Partido[is.na(dz3@data$Tot_Partido)==FALSE]),
# "<br><b>Porcentagem dos votos válidos: </b>",round((unique(dz3@data$Tot_Partido[is.na(dz3@data$Tot_Partido)==FALSE])/unique(dz3@data$Tot_State[is.na(dz3@data$Tot_State)==FALSE]))*100,1), "%")
HTML(str_Result)
})
output$G_Index <- renderUI({
str_G_Index <- paste0("<h4>Estatísticas Geoespaciais: </h4><b>Índice G:</b> ",round(unique(dz3()@data$G_Index[is.na(dz3()@data$G_Index)==FALSE]),3))
HTML(str_G_Index)
})
moran_I <- reactive({
dz3_local <- dz3()
state_nb2_local <- state_nb2()
state_nb2listw_local <- state_nb2listw()
moran_I <- moran(dz3_local$LQ,state_nb2listw_local,n=length(state_nb2_local),Szero(state_nb2listw_local),zero.policy=TRUE,NAOK=TRUE)$I
})
output$Note <- renderUI({
note <- paste0("<font size='3'> As mapas eleitorais foram desenvolvidos utilizando os dados coletados e limpos pelo <a href='http://cepesp.io/'> CepespData </a>. Desenvolvido por Jonathan Phillips e Rafael de Castro Coelho Silva com apoio do equipe CEPESP. </font>")
HTML(note)
})
output$moran <- renderUI({
str_moran <- paste0("<b> Moran's I: </b>", round(moran_I(),3))
HTML(str_moran)
})
observeEvent(input$map_zoom_out ,{
leafletProxy("map") %>%
setView(lat = (input$map_bounds$north + input$map_bounds$south) / 2,
lng = (input$map_bounds$east + input$map_bounds$west) / 2,
zoom = input$map_zoom - 1)
})
# Zoom control - zoom in
observeEvent(input$map_zoom_in ,{
leafletProxy("map") %>%
setView(lat = (input$map_bounds$north + input$map_bounds$south) / 2,
lng = (input$map_bounds$east + input$map_bounds$west) / 2,
zoom = input$map_zoom + 1)
})
output$Indicators1 <- renderUI({
str_Result <- HTML(paste0("<b>Resultado: </b>: ",
unique(dz3()@data$DESC_SIT_TOT_TURNO[is.na(dz3()@data$DESC_SIT_TOT_TURNO)==FALSE]),
"<br><b>Votos válidos: </b>",format(unique(dz3()@data$Tot_Partido[is.na(dz3()@data$Tot_Partido)==FALSE]),big.mark=" "),
"<br><b>% dos votos: </b>",round((unique(dz3()@data$Tot_Partido[is.na(dz3()@data$Tot_Partido)==FALSE])/unique(dz3()@data$Tot_State[is.na(dz3()@data$Tot_State)==FALSE]))*100,1), "%",
"<h4>Estatísticas Geoespaciais: </h4>"))
})
output$Indicators2 <- renderUI({
str_G_Index <- HTML(paste0("<b>Índice G:</b> ",round(unique(dz3()@data$G_Index[is.na(dz3()@data$G_Index)==FALSE]),3),"<br> </br>"))
})
output$Indicators3 <- renderUI({
str_moran <- HTML(paste0("<b> Moran's I: </b>", round(moran_I(),3),"<br> </br>"))
#str_moran <- HTML(paste0("<b> Moran's I: </b>", round(moran_I,3),"<br> </br>"))
})
output$Indicators4 <- renderUI({
str_moran <- HTML(paste0("<b> Número de Clusters: ",length(clusters_list()),"<b>"))
})
} |
783f55b4cbd0e8f784a889fb890c15d45e1d39fc | 377c8851390a7e85f2cca30246e0f9f6df7d13cc | /r_scripts/NGS2_WITNESS_Cycle1_confirmatory_exp2.R | 013e0025fdd6ba62fe193c85c97a2a29a3735bf1 | [] | no_license | GallupGovt/ngs2 | bb9eca850dc0d76b39d7aa16aeb1ef59d6d640fb | ed9443400bcd2a46907dae6701a7bd4580499772 | refs/heads/master | 2023-05-26T16:45:47.820293 | 2020-12-17T09:48:56 | 2020-12-17T09:48:56 | 147,573,574 | 4 | 4 | null | 2023-05-22T21:36:06 | 2018-09-05T20:07:10 | R | UTF-8 | R | false | false | 2,160 | r | NGS2_WITNESS_Cycle1_confirmatory_exp2.R | #Created by Pablo Diego Rosell, PhD, for Gallup inc. in March 2017
# Load data for analysis
exp2_cooperation <- read.csv(url("https://raw.githubusercontent.com/gallup/NGS2/master/cooperation_exp2.csv"),
header = TRUE,
sep = ',')
exp2_rewire <- read.csv(url("https://raw.githubusercontent.com/gallup/NGS2/master/rewire_exp2.csv"),
header = TRUE,
sep = ',')
########################################################################################################################
#Hypothesis tests
########################################################################################################################
#1.1. Individuals will be more likely to form connections with in-group members than with out-group members
glmm1.1 <- glmer(connect~ingroup + (1|playerid), data = exp2_rewire, family="binomial")
Hypothesis.1.1 <- summary(glmm1.1, corr = FALSE)
#1.2 Overall cooperation level will increase with successive rounds
glmm1.2 <- glmer(decision0d1c~round_num + (1|playerid), data = exp2_cooperation, family="binomial")
Hypothesis.1.2 <- summary(glmm1.2, corr = FALSE)
#Average cooperation by round
#plot(aggregate(cooperation$decision0d1c, list(cooperation$round_num), mean))
#2.1 In-group favoritism will be more likely in the biased pairing condition
glmm2.1 <- glmer(decision0d1c~ingroup*biased + (1|playerid), data = exp2_cooperation, family="binomial")
Hypothesis.2.1 <- summary(glmm2.1, corr = FALSE)
#3.1 Individuals in the 2 avatar condition will be more likely to form connections with in-group members than those in the 4 avatar condition
glmm3.1 <- glmer(connect~ingroup*identities + (1|playerid), data = exp2_rewire, family="binomial")
Hypothesis.3.1 <- summary(glmm3.1, corr = FALSE)
#3.2 Individuals in the 2 avatar condition will be less likely to cooperate with in-group members than those in the 4 avatar condition
glmm3.2 <- glmer(decision0d1c~ingroup*identities + (1|playerid), data = exp2_cooperation, family="binomial")
Hypothesis.3.2 <- summary(glmm3.2, corr = FALSE)
|
cb01c07963945a873962cb96e5603b588ff22ef8 | c98f04222e96d57ea9e78c7a0dc7bd8147bc64ef | /man/getStockInfo.Rd | 5d5b8d55128abc128cb57a4de7285625973c4882 | [] | no_license | alecthekulak/smif.package | 31913347bfd0fe9add290fc199cb00dabcb477ee | ba781b2a19460c277dfee33b3e016fdf191de7e4 | refs/heads/master | 2021-09-22T17:54:48.427050 | 2018-09-13T02:14:07 | 2018-09-13T02:14:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,606 | rd | getStockInfo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getStockInfo.R
\name{getStockInfo}
\alias{getStockInfo}
\alias{getStockInfo.sector}
\alias{getStockInfo.name}
\alias{getStockInfo.last}
\alias{getStockInfo.industry}
\alias{getStockInfo.exchange}
\alias{getStockInfo.IPO.year}
\alias{getStockInfo.market.cap}
\alias{getStockInfo.last.trade}
\alias{getStockInfo.price}
\alias{getStockInfo.last.price}
\alias{getStockInfo.mcap}
\alias{getStockInfo.IPO}
\alias{getStockInfo.name}
\alias{getStockInfo.sector}
\alias{getStockInfo.exchange}
\alias{getStockInfo.last}
\alias{getStockInfo.industry}
\alias{getStockInfo.IPO.year}
\alias{getStockInfo.market.cap}
\title{Retreives info for a stock}
\usage{
getStockInfo(ticker, clean.mcap = TRUE, clean.sector = TRUE,
auto.assign = FALSE, env = .GlobalEnv)
getStockInfo.name(ticker)
getStockInfo.sector(ticker, clean.sector = TRUE)
getStockInfo.exchange(ticker)
getStockInfo.last(ticker)
getStockInfo.industry(ticker)
getStockInfo.IPO.year(ticker)
getStockInfo.market.cap(ticker, clean.mcap = TRUE)
}
\arguments{
\item{ticker}{Character; the ticker for the stock you need info about}
\item{clean.mcap}{Logical; should the Market.Cap element be returned cleaned. If \code{TRUE}:
Market.Cap returns as a numeric. If \code{FALSE}: Market.Cap returns as a character string.
Defaults to \code{TRUE}}
\item{clean.sector}{Logical; should the Sector element be returned cleaned. Sector cleaning is done using
\code{\link{cleanSector}}. Defaults to \code{TRUE}}
\item{auto.assign}{Logical; should the results be assigned to \code{env}. If \code{FALSE} returns results.
Defaults to \code{FALSE}}
\item{env}{Environment; where to auto.assign objects. Setting \code{env=NULL} is equal to
\code{auto.assign=FALSE}. Defaults to \code{.GlobalEnv}.}
}
\value{
Returns a list containing the elements:
\item{Name}{Character; name of the company}
\item{Last.Trade}{Numeric; price of the last trade for the company}
\item{Sector}{Character; sector of the company}
\item{Industry}{Character; descriptive industry name}
\item{IPO.Year}{Integer; year of the company's IPO}
\item{Exchange}{Character; exchange that the company currently trades on. One of: \code{"AMEX"},
\code{"NASDAQ"}, \code{NYSE}.}
\item{Market.Cap}{Numeric (or Character); current market capitalization for the company}
}
\description{
Function family to retrieve specific info for a given ticker.
}
\details{
Based off of the \code{TTR} function \code{stockSymbols} it compensates for different data types and difficulty
in accessing specific ticker data. Data retrieval depends on the \code{TTR} package, which retrieves its data
from \href{http://www.nasdaq.com/}{NASDAQ}
}
\section{Functions}{
\itemize{
\item \code{getStockInfo.name}: Loads the company name for the given ticker (if available)
\item \code{getStockInfo.sector}: Loads the sector for the given ticker, allows changing the \code{clean.sector} variable
\item \code{getStockInfo.exchange}: Loads the exchange for the given ticker (if available)
\item \code{getStockInfo.last}: Loads the last trade price for the given ticker
\item \code{getStockInfo.industry}: Loads the last descriptive industry name for the given ticker
\item \code{getStockInfo.IPO.year}: Loads the IPO year for the given ticker (if available)
\item \code{getStockInfo.market.cap}: Loads the current market capitalization for the given ticker (if available), allows changing the \code{clean.mcap} variable
}}
\note{
Values for \code{Sector} may not be identical to sectors listed in \code{\link{getSectorWeights}}.
Values for \code{Sector}, \code{IPO.Year}, and \code{Market.Cap} are occasionally missing and will default to
\emph{NA}. \code{getStockInfo} caches the data for \code{stockSymbols} in a hidden global variable (\code{.ss})
to massively increase speed on subsequent uses of the function.
}
\examples{
getStockInfo("NVDA")
getStockInfo.name("NVDA")
\donttest{getStockInfo.sector("NVDA") }
\donttest{getStockInfo.exchange("NVDA") }
\donttest{getStockInfo.last("NVDA") }
\donttest{getStockInfo.industry("NVDA") }
\donttest{getStockInfo.IPO.year("NVDA") }
\donttest{getStockInfo.market.cap("NVDA", clean.mcap=FALSE) }
}
\seealso{
Other data retrieval functions: \code{\link{getHoldings.SMIF}},
\code{\link{getSectorETF}}, \code{\link{getSymbols.SMIF}}
}
\concept{
getStockInfo.last.trade getStockInfo.price getStockInfo.last.price
}
\keyword{data}
\keyword{internal}
\keyword{misc}
|
469daf0502658b1da81934c7ecc3c16c6b9ad7c4 | d334c1725613be0faae0b9c89aa1ce4d99e5a2dc | /R/tot_plot.R | 8474e2c8c929e5d2f252993aa0f832c1fda635e1 | [] | no_license | abresler/qdap | 6ae712c440c42e8e40a667de8c7ff20cf4f07d29 | c38f2dcd37fa50ec90cbf6bf9c47316edea5fb30 | refs/heads/master | 2021-01-16T20:02:03.742464 | 2013-08-04T16:33:52 | 2013-08-04T16:33:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,044 | r | tot_plot.R | #' Visualize Word Length by Turn of Talk
#'
#' Uses a bar graph to visualize patterns in sentence length and grouping
#' variables by turn of talk.
#'
#' @param dataframe A dataframe that contains the text variable and optionally
#' the grouping.var and tot variables.
#' @param text.var The text variable (character string).
#' @param grouping.var The grouping variables. Default \code{NULL} generates
#' one word list for all text. Also takes a single grouping variable or a list
#' of 1 or more grouping variables.
#' @param facet.vars An optional single vector or list of 1 or 2 to facet by.
#' @param tot The turn of talk variable (character string). May be \code{TRUE}
#' (assumes "tot" is the variable name), \code{FALSE} (use row numbers), or a
#' character string of the turn of talk column.
#' @param ncol if an integer value is passed to this
#' \code{\link[qdap]{gantt_wrap}} uses \code{\link[ggplot2]{facet_wrap}}
#' rather than \code{\link[ggplot2]{facet_grid}}.
#' @param transform logical. If \code{TRUE} the repeated facets will be
#' transformed from stacked to side by side.
#' @param ylab Optional y label.
#' @param xlab Optional x label.
#' @param bar.space The amount space between bars (ranging between 1 and 0).
#' @param scale Should scales be fixed (\code{"fixed"}, the default), free
#' (\code{"free"}), or free in one dimension (\code{"free_x"}, \code{"free_y"})
#' @param space If \code{"fixed"}, the default, all panels have the same size.
#' If \code{"free_y"} their height will be proportional to the length of the y
#' scale; if \code{"free_x"} their width will be proportional to the length of
#' the x scale; or if \code{"free"} both height and width will vary. This
#' setting has no effect unless the appropriate scales also vary.
#' @return Invisibly returns the ggplot2 object.
#' @keywords sentence, split, turn-of-talk
#' @import ggplot2
#' @export
#' @examples
#' \dontrun{
#' dataframe <- sentSplit(DATA, "state")
#' tot_plot(dataframe, "state")
#' tot_plot(DATA, "state", tot=FALSE)
#' tot_plot(dataframe, "state", space=.03)
#' tot_plot(dataframe, "state", "sex")
#' tot_plot(mraja1, "dialogue", "fam.aff", tot=FALSE)
#' tot_plot(mraja1, "dialogue", "died", tot=FALSE)
#' tot_plot(mraja1, "dialogue", c("sex", "fam.aff"), tot=FALSE) +
#' scale_fill_hue(l=40)
#' tot_plot(mraja1, "dialogue", c("sex", "fam.aff"), tot=FALSE)+
#' scale_fill_brewer(palette="Spectral")
#' tot_plot(mraja1, "dialogue", c("sex", "fam.aff"), tot=FALSE)+
#' scale_fill_brewer(palette="Set1")
#' }
tot_plot <- function(dataframe, text.var, grouping.var = NULL, facet.vars = NULL,
tot = TRUE, transform = FALSE, ncol = NULL, ylab=NULL, xlab=NULL, bar.space=0,
scale = NULL, space = NULL) {
group <- caps <- NULL
DF <- dataframe
if (isTRUE(tot)) {
if(!any(colnames(dataframe) %in% "tot")) {
warning("Turn of talk (\"tot\") column not found; using rows instead")
tot2 <- dataframe[, "tot"] <- 1:nrow(dataframe)
dataframe <- dataframe[, c("tot", text.var)]
} else {
tot2 <- tot <- TOT(dataframe[, "tot"])
dataframe <- sentCombine(dataframe[, text.var], tot)
tot <- TRUE
}
}
if (!tot) {
tot2 <- dataframe[, "tot"] <- 1:nrow(dataframe)
dataframe <- dataframe[, c("tot", text.var)]
}
if (is.character(tot)) {
if(!any(colnames(dataframe) %in% tot)) {
warning("Turn of talk (", tot, ") column not found; using rows instead")
tot2 <- dataframe[, "tot"] <- 1:nrow(dataframe)
dataframe <- dataframe[, c("tot", text.var)]
} else {
tot2 <- tot
dataframe <- sentCombine(dataframe[, text.var], tot)
}
}
if (!is.null(grouping.var)) {
G <- paste(grouping.var, collapse="&")
if (ncol(DF[, grouping.var, drop=FALSE]) > 1) {
dataframe[, "group"] <- sapply(split(paste2(DF[, grouping.var]), tot2), unique)
} else {
dataframe[, "group"] <- sapply(split(DF[, grouping.var], tot2), unique)
}
colnames(dataframe)[3] <- G
}
if (!is.null(facet.vars)) {
G2 <- paste(facet.vars, collapse="&")
if (ncol(DF[, facet.vars, drop=FALSE]) > 1) {
dataframe[, "new2"] <- sapply(split(paste2(DF[, facet.vars[1]]), tot2), unique)
} else {
dataframe[, "new2"] <- sapply(split(DF[, facet.vars[1]], tot2), unique)
}
if (length(facet.vars) == 2) {
if (ncol(DF[, facet.vars, drop=FALSE]) > 1) {
dataframe[, "new3"] <- sapply(split(paste2(DF[, facet.vars[2]]), tot2), unique)
} else {
dataframe[, "new3"] <- sapply(split(DF[, facet.vars[2]], tot2), unique)
}
}
}
colnames(dataframe)[2] <- "text.var"
dataframe[, "word.count"] <- wc(dataframe[, "text.var"])
if (is.null(xlab)) {
Xlab <- "Turn of Talk"
}
if (is.null(ylab)) {
Ylab <- "Word Count"
}
dataframe <- na.omit(dataframe)
dataframe[, "tot"] <- factor(dataframe[, "tot"],
levels=sort(as.numeric(as.character(dataframe[, "tot"]))))
dataframe <- dataframe[order(dataframe[, "tot"]), ]
dataframe <- droplevels(dataframe)
if (!is.null(facet.vars)) {
if (length(facet.vars == 1)) {
sdat <- split(dataframe, dataframe[, "new2"])
} else {
sdat <- split(dataframe, paste2(dataframe[, c("new2", "new3")]))
}
sdat <- lapply(sdat, function(x) {
x[, "tot"] <- factor(1:nrow(x), levels=1:nrow(x))
x
})
dataframe <- do.call(rbind.data.frame, sdat)
}
dataframe["bar.space"] <- rep(bar.space, nrow(dataframe))
theplot <- ggplot(dataframe, aes(tot, word.count, width=1-bar.space))
if (!is.null(grouping.var)) {
GR <- colnames(dataframe)[3]
colnames(dataframe)[3] <- "group"
theplot <- theplot + geom_bar(stat="identity", aes(fill=group), data=dataframe) +
labs(fill = caps(gsub("&", " & ", GR, fixed=TRUE), all=TRUE))
} else {
theplot <- theplot + geom_bar(stat="identity")
}
theplot <- theplot + ylab(Ylab) + xlab(Xlab) +
scale_y_continuous(expand = c(0,0)) +
theme(axis.text.x=element_blank(), axis.ticks.x=element_blank())
if (!is.null(facet.vars)) {
if(!is.null(ncol)){
theplot <- theplot + facet_wrap(~new2, scales = scale, ncol=ncol)
} else {
if (length(facet.vars) == 1) {
if (transform) {
theplot <- theplot + facet_grid(.~new2, scales = scale, space = space)
} else {
theplot <- theplot + facet_grid(new2~., scales = scale, space = space)
}
} else {
theplot <- theplot + facet_grid(new2~new3, scales = scale, space = space)
}
}
}
return(theplot)
}
|
4dec0aa1477a58e6760d7b9aeb71b9f6a2669e12 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rela/examples/paf.Rd.R | 5ca3157adef504bea3aac3bb13ee04c8d79b662d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 317 | r | paf.Rd.R | library(rela)
### Name: paf
### Title: Principal Axis Factoring
### Aliases: paf
### Keywords: manip misc
### ** Examples
library(rela)
Belts <- Seatbelts[,1:7]
summary(Belts)
paf.belt <- paf(Belts)
summary(paf.belt)
Belts2 <- Belts[,-5]
Belts2 <- Belts2[,-5]
paf.belt2 <- paf(Belts2)
summary(paf.belt2)
|
daf0d9a57ab84a56dc6efa9e3dcd0bb2ea5cc9e9 | 88db68ad7439180a22df307a6d5d47c646c6af10 | /analysis.R | 625036e9ac24d4ead203c1ebce2e605de211f880 | [] | no_license | jberninger/UCSC_Stat207_COVID | 9946ab87c5585148c79d652ecfc88490d94b083b | 000d3489fe618b8c177e69659ad26aa0bf8c7103 | refs/heads/master | 2023-01-04T06:29:41.043630 | 2020-11-04T03:11:56 | 2020-11-04T03:11:56 | 258,053,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,046 | r | analysis.R | ## Stat 207
## Final Take Home
## Covid Analysis
####################################################################################################################################
library(readr)
library(dplyr)
library(truncnorm)
library(MASS)
library(tmvtnorm)
library(invgamma)
library(mvtnorm)
library(janitor)
library(LearnBayes)
set.seed(11)
library(latex2exp)
library(tmvtnorm)
library(LearnBayes)
library(plyr)
library(dplyr)
library(janitor)
library(readr)
library(ggmap)
library(ggplot2)
library(gridExtra)
library(ggmap)
library(maps)
library(mapdata)
library(geoR)
setwd("~/Desktop/Stat 207/Final/")
# dataset
data<-read_csv("CountyData.csv")%>%clean_names()
####################################################################################################################################
# name the fields
covid<-data%>%dplyr::mutate(ny=total_cases,
y=log(total_cases),
cy=population_original,
county=tolower(county))%>%
rename(c(group="census.group"))
ny<-covid$total_cases
ny[ny==0]<-0.01
y<-log(ny) # has some -Inf values that must be handled
#y[y==-Inf]<--9999
cy<-covid$population_original
d<-covid$density
group<-covid$census.group
N<-nrow(covid)
####################################################################################################################################
# EDA
## Perform an exploratory data analysis involving
## yij (log cases), the five different groups, the population, and the population density.
## Discuss possible associations and clusters.
# data prep for the map
{
states <- map_data("state")
cali <- subset(states, region == "california")
counties <- map_data("county")
ca_county <- subset(counties, region == "california") %>%
inner_join(covid, by = c("subregion" = "county")) %>%
mutate(death.rate = 100*round(deaths/population_original, 10))
ditch_the_axes <- theme(
axis.text = element_blank(),
axis.line = element_blank(),
axis.ticks = element_blank(),
panel.border = element_blank(),
panel.grid = element_blank(),
axis.title = element_blank()
)
ca_base <- ggplot(data = cali, mapping = aes(x = long, y = lat, group = group)) +
coord_fixed(1.3) +
geom_polygon(color = "black", fill = "gray")
}
# maps
{
# Death rate
ca_base +
geom_polygon(data = ca_county, aes(fill =ordered(round(100*death.rate, 2))), color = "white") +
geom_polygon(color = "black", fill = NA) +
ggtitle("Death Rates by County") +
theme_bw() +
labs(fill = "Deaths / Infected (%)") +
ditch_the_axes
# Regions
ca_base +
geom_polygon(data = ca_county, aes(fill =factor(census.group)), color = 'white') +
geom_polygon(color = "black", fill = NA) +
ggtitle("Census Groups in CA") +
theme_bw() +
labs(fill = "Census Group") +
ditch_the_axes
# Population Density
ca_base +
geom_polygon(data = ca_county, aes(fill =ordered(round(density/100, 0)))) +
geom_polygon(color = "black", fill = NA) +
ggtitle("Population Density by County") +
theme_bw() +
labs(fill = "Scaled Density (Density/100)") +
ditch_the_axes
}
# plots
p1<-ggplot(data=covid)+geom_point(aes(x=population_original,y=density,color=factor(census.group)),size=2) +
ggtitle("Density vs Population") +
labs(color = "Census Group") +
xlab("Population")
p2<-ggplot(data=covid)+geom_point(aes(x=log(population_original),y=log(density),color=factor(census.group)),size=2)+
ggtitle("log(Density) vs log(Population)") +
labs(color = "Census Group")
xlab("log(Population)")
grid.arrange(p1, p2, nrow=2)
# population vs log cases
plot(cy,y,col=group,type="p",pch=4)
legend(group)
plot(log(cy),y,col=group,type="p",pch=4)
ggplot(data=covid)+geom_point(aes(x=log(population_original),y=log(total_cases),color=factor(census.group)),size=2) +
ggtitle("Response vs log(population)") +
labs(color = "Census Group") +
xlab("log(population)") +
ylab("log(cases)")
ggplot(data=covid)+geom_point(aes(x=log(density),y=log(total_cases),color=factor(census.group)),size=2) +
ggtitle("Response vs log(density)") +
labs(color = "Census Group") +
xlab("log(density)") +
ylab("log(cases)")
# population density vs log cases
plot(d,y,col=group,type="p",pch=4)
plot(log(d),y,col=group,type="p")
ggplot(data=covid,aes(x=census.group,y=log(density),group=census.group,fill=factor(census.group)))+geom_boxplot()+
theme(legend.position = "none") +
ggtitle("Distribution of log(Density) by Group")+
xlab("Census group")+
ylab("log(Density)")
ggplot(data=covid,aes(x=census.group,y=log(population_original),group=census.group,fill=factor(census.group)))+geom_boxplot()+
theme(legend.position = "none") +
ggtitle("Distribution of log(Population) by Group")+
xlab("Census group")+
ylab("log(Population)")
####################################################################################################################################
# model 1 : y = \mu + e
X=matrix(rep(1,N),ncol=1)
Q=diag(10^3/cy)
beta.hat<-solve(t(X)%*%solve(Q)%*%X)%*%t(X)%*%solve(Q)%*%y
V.beta<-solve(t(X)%*%solve(Q)%*%X)
# mu
beta.hat.sample<-rmvnorm(n=1000,mean=beta.hat,sigma=V.beta)
hist(beta.hat.sample,main=TeX('Model 1 Posterior Distribution for $\\mu$'),xlab=TeX("$\\mu$"))
abline(v=mean(beta.hat.sample),col="red")
abline(v=quantile(beta.hat.sample,0.05),col="blue")
abline(v=quantile(beta.hat.sample,0.95),col="blue")
# sigma sq
k<-1
s.sq<-(1/(N-k))*t(y-X%*%beta.hat)%*%solve(Q)%*%(y-X%*%beta.hat)
sigma.sq.sample<-geoR::rinvchisq(n=1000,df=N-k,scale=s.sq)
hist(sigma.sq.sample,main=TeX('Model 1 Posterior Distribution for $\\sigma^2$'),xlab=TeX("$\\sigma^2$"))
abline(v=mean(sigma.sq.sample),col="red")
abline(v=quantile(sigma.sq.sample,0.05),col="blue")
abline(v=quantile(sigma.sq.sample,0.95),col="blue")
# g prior
L=chol(Q)
W=solve(L)%*%X
Z=solve(L)%*%y
beta.hat<-solve(t(W)%*%W)%*%t(W)%*%Z
V.beta<-solve(t(W)%*%W)
# mu
beta.hat.sample<-rmvnorm(n=10000,mean=beta.hat,sigma=V.beta)
hist(beta.hat.sample[,1])
abline(v=mean(beta.hat.sample[,1]),col="red")
# R squared
Yhat= X %*% beta.hat
# Compute Residuals
Resid=y-Yhat
SSE=t(Resid) %*% Resid
R.sq.1 <- 1-SSE/sum((y-mean(y))^2)
R.sq.1 # it does worse than the mean, it can be negative because it does not have a centering parameter
plot(log(covid$population_original),Resid,xlab="log(population)",main="Model 1 Residuals vs log(Population)")
# clear trend in the residuals, this model is garbage, as expected need to include population
mu<-scale(W[,1])
X<-scale(W[,-1])
Z<-scale(Z) # doesnt work if Z isnt scaled, but it def should be
G<-N
# Double check this in office hour
beta.hat.g<-solve(t(X)%*%X)%*%t(X)%*%Z
# is this the right beta.hat? think so
# they have shrunk from the other ones
sigma.sq.sample.g<-rinvgamma(n=1000,N/2,0.5*var(Z)+(1/(2*G+1))*t(beta.hat.g)%*%t(X)%*%X%*%beta.hat.g)
# it might not be correct that I had to sample sigma.squared in order to sample beta
mu.samples<-rnorm(n=1000,mean=mean(y),sd=sd(y)/sqrt(N))
hist(mu.samples,main=TeX('Model 1 Posterior Distribution for $\\mu$'),xlab=TeX("$\\mu$"))
abline(v=mean(mu.samples),col="red")
abline(v=quantile(mu.samples,0.05),col="blue")
abline(v=quantile(mu.samples,0.95),col="blue")
####################################################################################################################################
# model 2 : y = \mu + B*d + e
X=matrix(c(rep(1,N),d),ncol=2)
Q=diag(10^3/cy)
beta.hat<-solve(t(X)%*%solve(Q)%*%X)%*%t(X)%*%solve(Q)%*%y
V.beta<-solve(t(X)%*%solve(Q)%*%X)
# mu
beta.hat.sample<-rmvnorm(n=1000,mean=beta.hat,sigma=V.beta)
hist(beta.hat.sample[,1])
abline(v=lm(y~1+d,weights=1/sqrt(10^3/cy))$coef[1],col="red")
# beta
hist(beta.hat.sample[,2])
abline(v=lm(y~1+d,weights=1/sqrt(10^3/cy))$coef[2],col="red")
# sigma sq
k<-2
s.sq<-(1/(N-k))*t(y-X%*%beta.hat)%*%solve(Q)%*%(y-X%*%beta.hat)
sigma.sq.sample<-geoR::rinvchisq(n=1000,df=N-k,scale=s.sq)
hist(sigma.sq.sample)
# unequal var
# g prior
L=chol(Q)
W=solve(L)%*%X
Z=solve(L)%*%y
beta.hat<-solve(t(W)%*%W)%*%t(W)%*%Z
V.beta<-solve(t(W)%*%W)
# mu
beta.hat.sample<-rmvnorm(n=10000,mean=beta.hat,sigma=V.beta)
hist(beta.hat.sample[,1])
abline(v=mean(beta.hat.sample[,1]),col="red")
# G-prior
{
# G Prior
# design matrix needs to have mu removed and colmeans of 0
# should the mu be scaled?
mu<-scale(W[,1])
X<-scale(W[,-1])
Z<-scale(Z) # doesnt work if Z isnt scaled, but it def should be
G<-N
# Double check this in office hour
beta.hat.g<-solve(t(X)%*%X)%*%t(X)%*%Z
# is this the right beta.hat? think so
# they have shrunk from the other ones
sigma.sq.sample.g<-rinvgamma(n=1000,N/2,0.5*var(Z)+(1/(2*G+1))*t(beta.hat.g)%*%t(X)%*%X%*%beta.hat.g)
# it might not be correct that I had to sample sigma.squared in order to sample beta
beta.samples.g<-matrix(NA,1000,length(beta.hat.g))
for(i in 1:1000){
beta.samples.g[i,]<-rmvnorm(n=1,mean=(G/(1+G))*beta.hat.g,sigma=(G/(1+G))*sigma.sq.sample.g[i]*solve(t(X)%*%X))
}
mu.samples<-rnorm(n=1000,mean=mean(y),sd=sd(y)/sqrt(N))
hist(beta.samples.g[,1],main=TeX('Model 2 Posterior Distribution for $\\beta$'),xlab=TeX("$\\beta$"))
abline(v=quantile(beta.samples.g[,1],0.05),col="blue")
abline(v=quantile(beta.samples.g[,1],0.95),col="blue")
abline(v=mean(beta.samples.g[,1]),col="red")
hist(mu.samples,main=TeX('Model 2 Posterior Distribution for $\\mu$'),xlab=TeX("$\\mu$"))
abline(v=mean(mu.samples),col="red")
abline(v=quantile(mu.samples,0.05),col="blue")
abline(v=quantile(mu.samples,0.95),col="blue")
# G Prior R^2
Yhat= X %*% beta.hat.g
# Compute Residuals
Resid=Z-Yhat
SSE=t(Resid) %*% Resid
R.sq.2 <- 1-SSE/sum((Z-mean(Z))^2)
}
R.sq.2
plot(log(covid$population_original),Resid,xlab="log(population)",main="Model 2 Residuals vs log(Population)")
# residuals increasing wrt log(pop) and are heteroskedastic
plot(Yhat,Resid,xlab=TeX("$\\hat{y}$"),main="Model 2 Residuals vs Fitted Values")
####################################################################################################################################
# model 3 : y = \mu + eta + e
S=matrix(0,5,4)
diag(S)=rep(1,4)
S[5,]=rep(-1,4)
k=5
# Define Design Matrix
X=matrix(0,N,k)
X[,1]=1
X[,2]=ifelse(group==1,1,0)
X[,3]=ifelse(group==2,1,0)
X[,4]=ifelse(group==3,1,0)
X[,5]=ifelse(group==4,1,0)
for(i in 1:N){
if(group[i]==5){X[i,2:5]=-1}
}
Q=diag(10^3/cy)
beta.hat<-solve(t(X)%*%solve(Q)%*%X)%*%t(X)%*%solve(Q)%*%y
V.beta<-solve(t(X)%*%solve(Q)%*%X)
# mu
beta.hat.sample<-rmvnorm(n=10000,mean=beta.hat,sigma=V.beta)
hist(beta.hat.sample[,1])
abline(v=mean(beta.hat.sample[,1]),col="red")
hist(beta.hat.sample[,2])
abline(v=mean(beta.hat.sample[,2]),col="red")
# abline(v=lm(y~1+factor(group),weights=1/sqrt(10^3/cy))$coef[2],col="red")
# regression model puts the group 1 factor into the intercept term
# beta
# unequal var
L=chol(Q)
W=solve(L)%*%X
Z=solve(L)%*%y
beta.hat<-solve(t(W)%*%W)%*%t(W)%*%Z
V.beta<-solve(t(W)%*%W)
# mu
beta.hat.sample<-rmvnorm(n=10000,mean=beta.hat,sigma=V.beta)
hist(beta.hat.sample[,1])
abline(v=mean(beta.hat.sample[,1]),col="red")
# G prior
{
# model: L^-1y = L^-1XB + v, v \sim N(0,I)
# L = cholesky demcomp of the error covariance matrix
# beta hat is given by
# 3 steps from slides:
# (1) cholesky decomp of V (error covaraince)
# (2) solve LW = X and LZ=y
# (3) compute LSE
# G Prior
# design matrix needs to have mu removed and colmeans of 0
# should the mu be scaled?
mu<-scale(W[,1])
X<-scale(W[,-1])
Z<-scale(Z) # doesnt work if Z isnt scaled, but it def should be
G<-22
G<-N
# Double check this in office hour
beta.hat.g<-solve(t(X)%*%X)%*%t(X)%*%Z
# is this the right beta.hat? think so
# they have shrunk from the other ones
sigma.sq.sample.g<-rinvgamma(n=1000,N/2,0.5*var(Z)+(1/(2*G+1))*t(beta.hat.g)%*%t(X)%*%X%*%beta.hat.g)
# it might not be correct that I had to sample sigma.squared in order to sample beta
beta.samples.g<-matrix(NA,1000,length(beta.hat.g))
for(i in 1:1000){
beta.samples.g[i,]<-rmvnorm(n=1,mean=(G/(1+G))*beta.hat.g,sigma=(G/(1+G))*sigma.sq.sample.g[i]*solve(t(X)%*%X))
}
par(mfrow=c(2,2))
for(i in 1:6){
hist(beta.samples.g[,i])
abline(v=mean(beta.samples.g[,i]),col="red")
abline(v=quantile(beta.samples.g[,i],0.05),col="blue")
abline(v=quantile(beta.samples.g[,i],0.95),col="blue")
}
dev.off()
etas<-cbind(beta.samples.g,-rowSums(beta.samples.g))
boxplot(etas,use.cols=TRUE,
main=TeX("Model 3 Posterior Distribution of Group Coefficients"),xlab=TeX("$\\eta _i$"))
abline(h=0,col="red")
mu.samples<-rnorm(n=1000,mean=mean(y),sd=sd(y)/sqrt(N))
hist(mu.samples,main=TeX('Model 2 Posterior Distribution for $\\mu$'),xlab=TeX("$\\mu$"))
abline(v=mean(mu.samples),col="red")
abline(v=quantile(mu.samples,0.05),col="blue")
abline(v=quantile(mu.samples,0.95),col="blue")
abline(v=mean(mu.samples[,1]),col="red")
# distribution for mu is not changing?
# G Prior R^2
Yhat= X %*% beta.hat.g
# Compute Residuals
Resid=Z-Yhat
SSE=t(Resid) %*% Resid
R.sq.3 <- 1-SSE/sum((Z-mean(Z))^2)
}
R.sq.3
plot(log(covid$population_original),Resid,xlab="log(population)",main="Model 3 Residuals vs log(Population)")
# residuals clearly have a trend and are heteroskedastic
plot(Yhat,Resid,xlab=TeX("$\\hat{y}$"),main="Model 3 Residuals vs Fitted Values")
####################################################################################################################################
# model 4 : y = \mu + B*d + eta_j + e
k=6
# Define Design Matrix
X=matrix(0,N,k)
X[,1]=1
X[,2]=d
X[,3]=ifelse(group==1,1,0)
X[,4]=ifelse(group==2,1,0)
X[,5]=ifelse(group==3,1,0)
X[,6]=ifelse(group==4,1,0)
for(i in 1:N){
if(group[i]==5){X[i,3:6]=-1}
}
Q=diag(10^3/cy)
beta.hat<-solve(t(X)%*%solve(Q)%*%X)%*%t(X)%*%solve(Q)%*%y
V.beta<-solve(t(X)%*%solve(Q)%*%X)
# mu
beta.hat.sample<-rmvnorm(n=10000,mean=beta.hat,sigma=V.beta)
hist(beta.hat.sample[,1])
abline(v=mean(beta.hat.sample[,1]),col="red")
par(mfrow=c(2,3))
for(i in 1:5){
hist(beta.hat.sample[,1+i])
abline(v=mean(beta.hat.sample[,1+i]),col="red")
}
# one of the params post distributions is centered on zero.
# G prior
{
# model: L^-1y = L^-1XB + v, v \sim N(0,I)
# L = cholesky demcomp of the error covariance matrix
# beta hat is given by
# 3 steps from slides:
# (1) cholesky decomp of V (error covaraince)
# (2) solve LW = X and LZ=y
# (3) compute LSE
L=chol(Q)
W=solve(L)%*%X
Z=solve(L)%*%y
# ok this seems to work
# confirmed it re-captures beta.hat
# G Prior
# design matrix needs to have mu removed and colmeans of 0
# should the mu be scaled?
mu<-scale(W[,1])
X<-scale(W[,-1])
Z<-scale(Z) # doesnt work if Z isnt scaled, but it def should be
G<-32
# Double check this in office hour
beta.hat.g<-solve(t(X)%*%X)%*%t(X)%*%Z
# is this the right beta.hat? think so
# they have shrunk from the other ones
sigma.sq.sample.g<-rinvgamma(n=1000,N/2,0.5*var(Z)+(1/(2*G+1))*t(beta.hat.g)%*%t(X)%*%X%*%beta.hat.g)
# it might not be correct that I had to sample sigma.squared in order to sample beta
beta.samples.g<-matrix(NA,1000,length(beta.hat.g))
for(i in 1:1000){
beta.samples.g[i,]<-rmvnorm(n=1,mean=(G/(1+G))*beta.hat.g,sigma=(G/(1+G))*sigma.sq.sample.g[i]*solve(t(X)%*%X))
}
par(mfrow=c(2,3))
for(i in 1:6){
hist(beta.samples.g[,i])
abline(v=mean(beta.samples.g[,i]),col="red")
}
# G Prior R^2
Yhat= X %*% beta.hat.g
# Compute Residuals
Resid=Z-Yhat
SSE=t(Resid) %*% Resid
R.sq.4 <- 1-SSE/sum((Z-mean(Z))^2)
}
R.sq.4
# plots
dev.off()
etas<-cbind(beta.samples.g[,2:5],-rowSums(beta.samples.g[,2:5]))
boxplot(etas,use.cols=TRUE,
main=TeX("Model 4 Posterior Distribution of Group Coefficients"),xlab=TeX("$\\eta _i$"))
abline(h=0,col="red")
hist(beta.samples.g[,2],main=TeX('Model 4 Posterior Distribution for $\\beta$'),xlab=TeX("$\\beta$"))
abline(v=mean(beta.samples.g[,2]),col="red")
abline(v=quantile(beta.samples.g[,2],0.05),col="blue")
abline(v=quantile(beta.samples.g[,2],0.95),col="blue")
plot(log(covid$population_original),Resid,xlab="log(population)",main="Model 4 Residuals vs log(Population)")
# errors have a trend
plot(Yhat,Resid,xlab=TeX("$\\hat{y}$"),main="Model 4 Residuals vs Fitted Values")
####################################################################################################################################
# model 5 : y = \mu + B*d +C*log(d) + e
X=matrix(c(rep(1,N),d,log(d)),ncol=3)
Q=diag(10^3/cy)
beta.hat<-solve(t(X)%*%solve(Q)%*%X)%*%t(X)%*%solve(Q)%*%y
V.beta<-solve(t(X)%*%solve(Q)%*%X)
# mu
beta.hat.sample<-rmvnorm(n=1000,mean=beta.hat,sigma=V.beta)
hist(beta.hat.sample[,1])
abline(v=lm(y~1+d,weights=1/sqrt(10^3/cy))$coef[1],col="red")
# beta
hist(beta.hat.sample[,2])
# C
hist(beta.hat.sample[,3])
# sigma sq
k<-3
s.sq<-(1/(N-k))*t(y-X%*%beta.hat)%*%solve(Q)%*%(y-X%*%beta.hat)
sigma.sq.sample<-geoR::rinvchisq(n=1000,df=N-k,scale=s.sq)
hist(sigma.sq.sample)
# unequal var
# g prior
L=chol(Q)
W=solve(L)%*%X
Z=solve(L)%*%y
beta.hat<-solve(t(W)%*%W)%*%t(W)%*%Z
V.beta<-solve(t(W)%*%W)
# mu
beta.hat.sample<-rmvnorm(n=10000,mean=beta.hat,sigma=V.beta)
hist(beta.hat.sample[,1])
abline(v=mean(beta.hat.sample[,1]),col="red")
# G-prior
{
# G Prior
# design matrix needs to have mu removed and colmeans of 0
# should the mu be scaled?
mu<-scale(W[,1])
X<-scale(W[,-1])
Z<-scale(Z) # doesnt work if Z isnt scaled, but it def should be
G<-643
# Double check this in office hour
beta.hat.g<-solve(t(X)%*%X)%*%t(X)%*%Z
# is this the right beta.hat? think so
# they have shrunk from the other ones
sigma.sq.sample.g<-rinvgamma(n=1000,N/2,0.5*var(Z)+(1/(2*G+1))*t(beta.hat.g)%*%t(X)%*%X%*%beta.hat.g)
# it might not be correct that I had to sample sigma.squared in order to sample beta
beta.samples.g<-matrix(NA,1000,length(beta.hat.g))
for(i in 1:1000){
beta.samples.g[i,]<-rmvnorm(n=1,mean=(G/(1+G))*beta.hat.g,sigma=(G/(1+G))*sigma.sq.sample.g[i]*solve(t(X)%*%X))
}
mu.samples<-rnorm(n=1000,mean=mean(y),sd=sd(y)/sqrt(N))
hist(beta.samples.g[,1],main=TeX('Model 5 Posterior Distribution for $\\beta$'),xlab=TeX("$\\beta$"))
abline(v=quantile(beta.samples.g[,1],0.05),col="blue")
abline(v=quantile(beta.samples.g[,1],0.95),col="blue")
abline(v=mean(beta.samples.g[,1]),col="red")
# quadratic term
hist(beta.samples.g[,2],main=TeX('Model 5 Posterior Distribution for $\\gamma$'),xlab=TeX("$\\gamma$"))
abline(v=quantile(beta.samples.g[,2],0.05),col="blue")
abline(v=quantile(beta.samples.g[,2],0.95),col="blue")
abline(v=mean(beta.samples.g[,2]),col="red")
hist(mu.samples,main=TeX('Model 2 Posterior Distribution for $\\mu$'),xlab=TeX("$\\mu$"))
abline(v=mean(mu.samples),col="red")
abline(v=quantile(mu.samples,0.05),col="blue")
abline(v=quantile(mu.samples,0.95),col="blue")
# G Prior R^2
Yhat= X %*% beta.hat.g
# Compute Residuals
Resid=Z-Yhat
SSE=t(Resid) %*% Resid
R.sq.5 <- 1-SSE/sum((Z-mean(Z))^2)
}
R.sq.5
plot(log(covid$population_original),Resid,xlab="log(population)",main="Model 5 Residuals vs log(Population)")
plot(Yhat,Resid,xlab=TeX("$\\hat{y}$"),main="Model 5 Residuals vs Fitted Values")
####################################################################################################################################
# model 6 : y = \mu + Beta*d +gamma*log(d) + eta_j + e
k=7
# Define Design Matrix
X=matrix(0,N,k)
X[,1]=1
X[,2]=d
X[,3]=log(d)
X[,4]=ifelse(group==1,1,0)
X[,5]=ifelse(group==2,1,0)
X[,6]=ifelse(group==3,1,0)
X[,7]=ifelse(group==4,1,0)
for(i in 1:N){
if(group[i]==5){X[i,3:6]=-1}
}
Q=diag(10^3/cy)
beta.hat<-solve(t(X)%*%solve(Q)%*%X)%*%t(X)%*%solve(Q)%*%y
V.beta<-solve(t(X)%*%solve(Q)%*%X)
# mu
beta.hat.sample<-rmvnorm(n=10000,mean=beta.hat,sigma=V.beta)
hist(beta.hat.sample[,1])
abline(v=mean(beta.hat.sample[,1]),col="red")
par(mfrow=c(2,3))
for(i in 1:5){
hist(beta.hat.sample[,1+i])
abline(v=mean(beta.hat.sample[,1+i]),col="red")
}
# one of the params post distributions is centered on zero.
# G prior
{
# model: L^-1y = L^-1XB + v, v \sim N(0,I)
# L = cholesky demcomp of the error covariance matrix
# beta hat is given by
# 3 steps from slides:
# (1) cholesky decomp of V (error covaraince)
# (2) solve LW = X and LZ=y
# (3) compute LSE
L=chol(Q)
W=solve(L)%*%X
Z=solve(L)%*%y
# ok this seems to work
# confirmed it re-captures beta.hat
# G Prior
# design matrix needs to have mu removed and colmeans of 0
# should the mu be scaled?
mu<-scale(W[,1])
X<-scale(W[,-1])
Z<-scale(Z) # doesnt work if Z isnt scaled, but it def should be
G<-273
# Double check this in office hour
beta.hat.g<-solve(t(X)%*%X)%*%t(X)%*%Z
# is this the right beta.hat? think so
# they have shrunk from the other ones
sigma.sq.sample.g<-rinvgamma(n=1000,N/2,0.5*var(Z)+(1/(2*G+1))*t(beta.hat.g)%*%t(X)%*%X%*%beta.hat.g)
# it might not be correct that I had to sample sigma.squared in order to sample beta
beta.samples.g<-matrix(NA,1000,length(beta.hat.g))
for(i in 1:1000){
beta.samples.g[i,]<-rmvnorm(n=1,mean=(G/(1+G))*beta.hat.g,sigma=(G/(1+G))*sigma.sq.sample.g[i]*solve(t(X)%*%X))
}
par(mfrow=c(2,3))
for(i in 1:6){
hist(beta.samples.g[,i])
abline(v=mean(beta.samples.g[,i]),col="red")
}
# G Prior R^2
Yhat= X %*% beta.hat.g
# Compute Residuals
Resid=Z-Yhat
SSE=t(Resid) %*% Resid
R.sq.6 <- 1-SSE/sum((Z-mean(Z))^2)
}
R.sq.6
# plots
dev.off()
hist(beta.samples.g[,1],main=TeX('Model 6 Posterior Distribution for $\\beta$'),xlab=TeX("$\\beta$"))
abline(v=quantile(beta.samples.g[,1],0.05),col="blue")
abline(v=quantile(beta.samples.g[,1],0.95),col="blue")
abline(v=mean(beta.samples.g[,1]),col="red")
# quadratic term
hist(beta.samples.g[,2],main=TeX('Model 6 Posterior Distribution for $\\gamma$'),xlab=TeX("$\\gamma$"))
abline(v=quantile(beta.samples.g[,2],0.05),col="blue")
abline(v=quantile(beta.samples.g[,2],0.95),col="blue")
abline(v=mean(beta.samples.g[,2]),col="red")
etas<-cbind(beta.samples.g[,3:6],-rowSums(beta.samples.g[,3:6]))
boxplot(etas,use.cols=TRUE,
main=TeX("Model 6 Posterior Distribution of Group Coefficients"),xlab=TeX("$\\eta _i$"))
abline(h=0,col="red")
hist(beta.samples.g[,2],main=TeX('Model 6 Posterior Distribution for $\\gamma$'),xlab=TeX("$\\gamma$"))
abline(v=mean(beta.samples.g[,2]),col="red")
abline(v=quantile(beta.samples.g[,2],0.05),col="blue")
abline(v=quantile(beta.samples.g[,2],0.95),col="blue")
plot(log(covid$population_original),Resid,xlab="log(population)",main="Model 6 Residuals vs log(Population)")
# errors have a trend
plot(Yhat,Resid,xlab=TeX("$\\hat{y}$"),main="Model 6 Residuals vs Fitted Values")
####################################################################################################################################
# Bayes Factor for G-prior model comparison
# change G
# compare models with the reference prior?
# F-test for nested models?
# posterior prediction?
# LaTeX?
p1<-1
p2<-2
p3<-5
p4<-6
p5<-3
p6<-7
BF.2.0<-(1+G)^((N-p2-1)/2)/(1+G*(1-R.sq.2))^((N-1)/2)
BF.3.0<-(1+G)^((N-p3-1)/2)/(1+G*(1-R.sq.3))^((N-1)/2)
BF.4.0<-(1+G)^((N-p4-1)/2)/(1+G*(1-R.sq.4))^((N-1)/2)
BF.5.0<-(1+G)^((N-p4-1)/2)/(1+G*(1-R.sq.5))^((N-1)/2)
# calling model 1 as Model 0 here, since it is the null model w/ intercept only
# formulas on the G-prior slides
BF.2.3<-(1+G)^((p3-p2)/2)*((1+G*(1-R.sq.3))/(1+G*(1-R.sq.2)))^((N-1)/2)
BF.2.4<-(1+G)^((p4-p2)/2)*((1+G*(1-R.sq.4))/(1+G*(1-R.sq.2)))^((N-1)/2)
BF.2.5<-(1+G)^((p5-p2)/2)*((1+G*(1-R.sq.5))/(1+G*(1-R.sq.2)))^((N-1)/2)
BF.2.6<-(1+G)^((p6-p2)/2)*((1+G*(1-R.sq.6))/(1+G*(1-R.sq.2)))^((N-1)/2)
BF.3.4<-(1+G)^((p4-p3)/2)*((1+G*(1-R.sq.4))/(1+G*(1-R.sq.3)))^((N-1)/2)
BF.3.5<-(1+G)^((p5-p3)/2)*((1+G*(1-R.sq.5))/(1+G*(1-R.sq.3)))^((N-1)/2)
BF.3.6<-(1+G)^((p6-p3)/2)*((1+G*(1-R.sq.6))/(1+G*(1-R.sq.3)))^((N-1)/2)
BF.4.5<-(1+G)^((p5-p4)/2)*((1+G*(1-R.sq.5))/(1+G*(1-R.sq.4)))^((N-1)/2)
BF.4.6<-(1+G)^((p6-p4)/2)*((1+G*(1-R.sq.6))/(1+G*(1-R.sq.4)))^((N-1)/2)
BF.5.6<-(1+G)^((p6-p5)/2)*((1+G*(1-R.sq.6))/(1+G*(1-R.sq.5)))^((N-1)/2)
BF.2.0
BF.3.0
BF.4.0
BF.5.0
BF.2.3
BF.2.4
BF.2.5
BF.2.6
BF.3.4
BF.3.5
BF.3.6
BF.4.5
BF.4.6
BF.5.6
# 5 is the best now
R.sq.2
R.sq.3
R.sq.4
R.sq.5
R.sq.6
####################################################################################################################################
# Conclusions
# are there other models worth considering?
# is the grouping significant?
# Is the population density significant?
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Residual plots for all the models
# Bayes Factor for the uninformative reference prior models?
# Posterior distributions of beta - do the credible intervals include 0?
# bayes factor does not include the prior information
# marginal does include the prior information
####################################################################################################################################
# marginal2
m2<-((gamma((N-1)/0.5)/(pi^((N+1)/2)*sqrt(N)))*norm(Z-mean(Z),type="2")^(1-N))*(((1+G)^((N-1-p2))/2)/(1+G*(1-R.sq.2))^((N-1)/2))
m3<-((gamma((N-1)/0.5)/(pi^((N+1)/2)*sqrt(N)))*norm(Z-mean(Z),type="2")^(1-N))*(((1+G)^((N-1-p3))/2)/(1+G*(1-R.sq.3))^((N-1)/2))
m4<-((gamma((N-1)/0.5)/(pi^((N+1)/2)*sqrt(N)))*norm(Z-mean(Z),type="2")^(1-N))*(((1+G)^((N-1-p4))/2)/(1+G*(1-R.sq.4))^((N-1)/2))
m5<-((gamma((N-1)/0.5)/(pi^((N+1)/2)*sqrt(N)))*norm(Z-mean(Z),type="2")^(1-N))*(((1+G)^((N-1-p5))/2)/(1+G*(1-R.sq.5))^((N-1)/2))
m6<-((gamma((N-1)/0.5)/(pi^((N+1)/2)*sqrt(N)))*norm(Z-mean(Z),type="2")^(1-N))*(((1+G)^((N-1-p6))/2)/(1+G*(1-R.sq.6))^((N-1)/2))
m2
m3
m4
m5
m6
# marginal for 5 is the biggest?
####################################################################################################################################
# G chosen from empirical bayes
g.eb.2<-max((R.sq.2/p2)/((1-R.sq.2)/(N-1-p2))-1,0)
g.eb.3<-max((R.sq.3/p3)/((1-R.sq.3)/(N-1-p3))-1,0)
g.eb.4<-max((R.sq.4/p4)/((1-R.sq.4)/(N-1-p4))-1,0)
g.eb.5<-max((R.sq.5/p5)/((1-R.sq.5)/(N-1-p5))-1,0)
g.eb.6<-max((R.sq.6/p6)/((1-R.sq.6)/(N-1-p6))-1,0)
BF.eb.2.0<-(1+g.eb.2)^((N-p2-1)/2)/(1+g.eb.2*(1-R.sq.2))^((N-1)/2)
BF.eb.3.0<-(1+g.eb.3)^((N-p3-1)/2)/(1+g.eb.3*(1-R.sq.3))^((N-1)/2)
BF.eb.4.0<-(1+g.eb.4)^((N-p4-1)/2)/(1+g.eb.4*(1-R.sq.4))^((N-1)/2)
BF.eb.5.0<-(1+g.eb.5)^((N-p5-1)/2)/(1+g.eb.5*(1-R.sq.5))^((N-1)/2)
BF.eb.6.0<-(1+g.eb.6)^((N-p6-1)/2)/(1+g.eb.6*(1-R.sq.6))^((N-1)/2)
BF.eb.2.3<-BF.eb.2.0/BF.eb.3.0
BF.eb.2.4<-BF.eb.2.0/BF.eb.4.0
BF.eb.2.5<-BF.eb.2.0/BF.eb.5.0
BF.eb.2.6<-BF.eb.2.0/BF.eb.6.0
BF.eb.3.4<-BF.eb.3.0/BF.eb.4.0
BF.eb.3.5<-BF.eb.3.0/BF.eb.5.0
BF.eb.3.6<-BF.eb.3.0/BF.eb.6.0
BF.eb.4.5<-BF.eb.4.0/BF.eb.5.0
BF.eb.4.6<-BF.eb.4.0/BF.eb.6.0
BF.eb.5.6<-BF.eb.5.0/BF.eb.6.0
BF.eb.2.3
BF.eb.2.4
BF.eb.2.5
BF.eb.2.6
BF.eb.3.4
BF.eb.3.5
BF.eb.3.6
BF.eb.4.5
BF.eb.4.6
BF.eb.5.6
# model 5 is best by the G-prior chosen by empirical bayes
####################################################################################################################################
# hyper g-prior
a<-4 # how to choose a?
((gamma((N-1)/0.5)/(pi^((N+1)/2)*sqrt(N)))*norm(Z-mean(Z),type="2")^(1-N))*((a-2)/(p2+a-2))*hypergeo((N-1)/2,1,(p2+a)/2,R.sq.2)
((gamma((N-1)/0.5)/(pi^((N+1)/2)*sqrt(N)))*norm(Z-mean(Z),type="2")^(1-N))*((a-2)/(p3+a-2))*hypergeo((N-1)/2,1,(p3+a)/2,R.sq.3)
# marginal for hyper-g
|
1e0e7bd5c68bad7514df9d00d52e0243b0e54242 | 76f7c8520d26012e6d90a798320b263d5dd9a8a1 | /man/render_graph.Rd | 0fc5ab8eb27ccd1da975382eb7041a32859a6031 | [] | no_license | timelyportfolio/DiagrammeR | 0480c5925208f91422152de9595d58001f4c9783 | 1d1a515cd4498eac1b69e48d5c68a4d1440ef243 | refs/heads/master | 2021-01-13T06:49:00.473829 | 2016-08-26T17:14:58 | 2016-08-26T17:14:58 | 28,595,575 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,387 | rd | render_graph.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render_graph.R
\name{render_graph}
\alias{render_graph}
\title{Render the graph or output in various formats}
\usage{
render_graph(graph, output = NULL, layout = NULL, width = NULL,
height = NULL)
}
\arguments{
\item{graph}{a \code{dgr_graph} object, created
using the \code{create_graph} function.}
\item{output}{a string specifying the output type;
\code{graph} (the default) renders the graph using
the \code{grViz} function, \code{vivagraph}
renders the graph using the \code{vivagraph}
function, \code{visNetwork} renders the graph using
the \code{visnetwork} function, and \code{DOT}
outputs DOT code for the graph.}
\item{layout}{a string specifying a layout type for
a \code{vivagraph} rendering of the graph, either
\code{forceDirected} or \code{constant}.}
\item{width}{an optional parameter for specifying
the width of the resulting graphic in pixels.}
\item{height}{an optional parameter for specifying
the height of the resulting graphic in pixels.}
}
\description{
Using a \code{dgr_graph} object, either
render graph in the Viewer or output in various
formats.
}
\examples{
\dontrun{
# Create a node data frame (ndf)
nodes <-
create_nodes(
nodes = LETTERS,
label = TRUE,
type = "letter",
shape = sample(c("circle", "square"),
length(LETTERS),
replace = TRUE),
fillcolor = sample(c("aqua", "orange",
"pink", "lightgreen",
"black", "yellow"),
length(LETTERS),
replace = TRUE))
# Create an edge data frame (edf)
edges <-
create_edges(
from = sample(LETTERS, replace = TRUE),
to = sample(LETTERS, replace = TRUE),
rel = "letter_to_letter")
# Create a graph object using the ndf and edf, and,
# add a few attributes for the graph appearance
graph <-
create_graph(
nodes_df = nodes,
edges_df = edges,
graph_attrs = "layout = twopi",
node_attrs = c("fontname = Helvetica",
"style = filled"),
edge_attrs = c("color = gray20",
"arrowsize = 0.5"))
# Render the graph using Graphviz
render_graph(graph)
# Render the graph using VivaGraph
render_graph(graph, output = "vivagraph")
# Render the graph using visNetwork
render_graph(graph, output = "visNetwork")
}
}
|
f5fcd8be39cc22405306811e77acfaa4b84fdfe1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ROI/examples/ROI_registered_reader.Rd.R | 78911ccca86edba5ff600a72f8b0f42c98b25553 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 196 | r | ROI_registered_reader.Rd.R | library(ROI)
### Name: ROI_registered_reader
### Title: List Registered Reader
### Aliases: ROI_registered_reader
### ** Examples
ROI_registered_reader()
ROI_registered_reader("mps_fixed")
|
7e6684cf14302d55d27dea3200458ad05ae10f29 | 875c89121e065a01ffe24d865f549d98463532f8 | /man/combine.Rd | 61d668535a57002519ff21412aeb403d67813de8 | [] | no_license | hugomflavio/actel | ba414a4b16a9c5b4ab61e85d040ec790983fda63 | 2398a01d71c37e615e04607cc538a7c154b79855 | refs/heads/master | 2023-05-12T00:09:57.106062 | 2023-05-07T01:30:19 | 2023-05-07T01:30:19 | 190,181,871 | 25 | 6 | null | 2021-03-31T01:47:24 | 2019-06-04T10:42:27 | R | UTF-8 | R | false | true | 486 | rd | combine.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{combine}
\alias{combine}
\title{Combine a list of vectors}
\usage{
combine(input)
}
\arguments{
\item{input}{a list of vectors with non-overlapping data.}
}
\value{
A vectorized combination of the data in the different list vectors.
}
\description{
Intended to combine vectors where, for each position, only one of the vectors contains data (i.e. the remaining are NA's).
}
\keyword{internal}
|
72b8574d2b604177762db8f0139ed249d2435b1e | 816c6c8f04227713c2b150bcb0b3b228d93f10bc | /RP_Stata.R | 1a920410b5336b2b760e7849530ab24a983fa5ad | [] | no_license | arbiax/STATA | ff5210fc15efd8af037c5a18e8c8ef511b24b840 | 708e1172ba835e76171b3806289b6a71c505bd19 | refs/heads/main | 2023-04-18T23:40:23.680466 | 2021-05-03T14:20:27 | 2021-05-03T14:20:27 | 363,949,272 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,721 | r | RP_Stata.R | # This code was written by Arbian Halilaj for XXX at the
# University of St. Gallen.
# Supervisor: XXX
# For questions contact arbian.halilaj@student.unisg.ch
# Last tested: 23/06/2020
#####################################################################
# Setup and data preparation
library(xtable)
library(naniar) # replace_with_na
library(tidyverse)
library(car) #Multicollinearity test
library(reshape2) #Correlation matrix
library(stargazer) #Output
library(ggpubr)
library(ggfortify) #autoplot diagnostics
library(lmtest) #Breusch-Pagan Test (Heteroskedasticity)
library(mfx) #marginal effects
library(dplyr)
library(plyr)
library(ggplot2)
library(reshape2)
library(quantreg) #quantile regression
rm(list = ls()) # memory cleaning
# Preparing data import from stata
# Loading foreign library
library(foreign)
# Importing data from stata file
#data <- read.dta("/Users/arbiax/Desktop/BA_firsttry/trashba/Albania2019.dta") #Stata
data <- read.csv(file = "/Users/arbiun/Desktop/RP/dataset_clean.csv") #CSV
# Select variables of interest
selected <- c("key",
"Target.Full.Name",
"Target.Macro.Industry",
"Acquiror.Full.Name",
"Acquiror.Macro.Industry",
"Acquiror.Stock.Price.180.Days.After.Announcement..USD.",
"Acquiror.Stock.Price.on.Announcement.Day..USD.",
"EBITDA.Margin",
"EBIT.Margin",
"Deal.Type......",
"Acquiror.Number.of.Employees",
"Acquiror.Employees",
"Target.Number.of.Employees",
"Net.Assets.1.Year.Prior..USD..Millions.",
"Net.Income.1.Year.Prior..USD..Millions.",
"Research...Development.Expense.3.Years.Prior..USD..Millions.",
"Research...Development.Expense.Last.12.Months..USD..Millions.",
"Return.on.Assets.Last.12.Months",
"Return.on.Equity.1.Fiscal.Year.Prior",
"Return.on.Equity.5.Fiscal.Years.Prior",
"Ratio.Of.Firm.Value.To.Sales",
"Net.Sales.1.Year.Prior..USD..Millions.",
"Target.Net.Sales.Last.12.Months..USD..Millions.",
"YEARREUTERS",
"TITLEANN",
"SALARY",
"BONUS",
"AGE",
"YEAREXECUCOMP",
"TITLE",
"REASON",
"GENDER",
"No_Acquisitions"
)
# Subset data
data.subset <- data[selected]
data.subset <- as.data.frame(data.subset)
df <- data.subset
df <- df[apply(df, 1, function(x) !any(is.na(x))),] #Remove nan
str(df)
#####################################################################
# Rename variables
#####################################################################
df<- data.subset %>%
rename(Hallo = x,
Ich = y,
Bims = z,
)
#####################################################################
# Recode variables
#####################################################################
## Dependent variables
df <- df%>%
mutate(GENDER=case_when(
GENDER=="MALE" ~ 1,
GENDER=="FEMALE" ~ 0
))
df$RD <- ifelse(df$Research...Development.Expense.3.Years.Prior..USD..Millions. > 0, 1, 0)
df$ChangeInPrice <- ifelse(df$Acquiror.Stock.Price.on.Announcement.Day..USD. < df$Acquiror.Stock.Price.180.Days.After.Announcement..USD., 1, 0)
df <- df %>% replace_with_na(replace = list(Acquiror.Employees = 0))
df$lnEmployees <- log(df$Acquiror.Employees)
df$Serial <- ifelse(df$No_Acquisitions > 4, 1, 0)
#####################################################################
df_model1 <- c("No_Acquisitions",
"GENDER",
"AGE",
"Return.on.Assets.Last.12.Months",
"RD",
"ChangeInPrice",
"SALARY"
)
df_model1 <- df[df_model1]
xtabs(~ Serial + GENDER, data=df)
cor(df$RD, df$GENDER, use = "complete.obs")
OLS <- lm(No_Acquisitions ~ GENDER + SALARY + AGE + Return.on.Assets.Last.12.Months + RD + ChangeInPrice + EBITDA.Margin + lnEmployees, data=df)
summary(OLS)
OLS <- lm(No_Acquisitions ~ GENDER*AGE + SALARY + Return.on.Assets.Last.12.Months + RD + ChangeInPrice + EBITDA.Margin + lnEmployees, data=df)
summary(OLS)
Logit <- glm(ChangeInPrice ~ GENDER + SALARY + AGE + Return.on.Assets.Last.12.Months + RD + EBITDA.Margin + lnEmployees, data=df, family = binomial(link = "logit"))
Logit <- glm(Serial ~ GENDER + SALARY + AGE + Return.on.Assets.Last.12.Months + RD + ChangeInPrice + EBITDA.Margin + lnEmployees, data=df, family = binomial(link = "logit"))
summary(Logit)
rqfit <- rq(No_Acquisitions ~ GENDER, data = df, tau = 0.99)
summary(rqfit)
#####################################################################
df$SalesGrowth <- 1/3*((df$Sales.1-df$Sales.0)/((df$Sales.1+df$Sales.0)/2))
df$Performance <- (df$Sales.1-df$Costofsales)/df$Employees.1
df$Performance <- ifelse (df$Performance < 0, NA, df$Performance)
x <- 1
df$Index <- as.numeric(df$x %in% x | df$y %in% x | df$z %in% x)
df <- df%>%
mutate(TaxAdmin=case_when(
TaxAdmin=="No obstacle" ~ 0,
TaxAdmin=="Minor obstacle" ~ 1,
TaxAdmin=="Moderate obstacle" ~2,
TaxAdmin=="Major obstacle" ~ 3,
TaxAdmin=="Very severe obstacle" ~ 4
))
df$TaxAdmin <- as.numeric(df$TaxAdmin)
df$PolicyObstacle <- with(df, ifelse(is.na(TaxAdmin), NA, ifelse(is.na(CustomTrade), NA, ifelse(is.na(BusinessPermit), NA, ifelse(is.na(LaborReg), NA, (TaxAdmin+CustomTrade+BusinessPermit+LaborReg)/4)))))
## Control variables
df$Age <- 2021-df$Year.0
df$lnAge <- log(df$Age)
df <- df%>%
mutate(Sector=case_when(
Sector=="Manufacturing" ~ 1,
Sector=="Retail services" ~ 0,
Sector=="Other services" ~ 0
))
df$Size <- as.numeric(df$Size)
df$Small <- ifelse(df$Size <= 2, 1, ifelse((df$Size <= 3) & (df$Size <= 4), 0, 0))
df$Medium <- ifelse(df$Size <= 2, 0, ifelse((df$Size <= 3) & (df$Size <= 4), 1, 0))
df$Large <- ifelse(df$Size <= 2, 0, ifelse((df$Size <= 3) & (df$Size <= 4), 0, 1))
df$Export <- as.numeric(df$Export)
df <- df %>% replace_with_na(replace = list(Export = c(-9,-7)))
df$Experience <- as.numeric(df$Experience)
df <- df %>% replace_with_na(replace = list(Experience = -9))
df$lnExperience <- log(df$Experience)
df <- df %>% replace_with_na(replace = list(lnExperience = "NaN"))
#####################################################################
#####################################################################
# REGRESSION MODELS
###################################################################
# Descriptive Stats
descript <- c("Hallo", "Ich", "Bims")
df_descript <- df[descript]
df_descript <- df_descript[apply(df_descript, 1, function(x) !any(is.na(x))),] #Remove nan
## Summary
summary(df_descript)
stargazer(df_descript)
##Correlation matrix
Modcor1 <- c("SalesGrowth", "Bribes", "RD",
"PolicyObstacle", "Sector", "Small", "Medium", "Large", "lnAge", "lnExperience",
"Foreign", "Export", "TrainingEmployees", "InformalCompetition")
Modcor1 <- df[Modcor1]
Modcor1 <- Modcor1[apply(Modcor1, 1, function(x) !any(is.na(x))),]
Modcor2 <- c("InnovationIndex", "Bribes", "Inspection.Bribe", "BribeIndex",
"PolicyObstacle", "InformalCompetition", "RD", "TechLicense",
"QualityCertificate", "Sector", "Small", "Medium", "Large",
"lnAge", "lnExperience", "Foreign", "Export", "TrainingEmployees")
sds <- df_model1
sds[] <- lapply(sds,as.numeric)
cormatweek <- round(cor(sds, method = "spearman"),2)
### Get upper triangle of the correlation matrix
get_upper_tri_week <- function(cormatweek){
cormatweek[lower.tri(cormatweek)]<- NA
return(cormatweek)
}
upper_tri_week <- get_upper_tri_week(cormatweek)
upper_tri_week
melted_cormat_week <- melt(upper_tri_week, na.rm = TRUE)
ggheatmap <- ggplot(data = melted_cormat_week, aes(Var2, Var1, fill = value))+
geom_tile(color = "white")+
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal()+
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 8, hjust = 1))+
coord_fixed()+
theme(axis.text.y = element_text(vjust = 1,
size = 8, hjust = 1))
### add numbers
ggheatmap +
geom_text(aes(Var2, Var1, label = value), color = "black", size = 3) +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.justification = c(1, 0),
legend.position = c(0.6, 0.75),
legend.direction = "horizontal")+
guides(fill = guide_colorbar(barwidth = 7, barheight = 1,
title.position = "top", title.hjust = 0.5))
###################################################################
# MODEL 1: y=SalesGrowth, x=Bribes
model1 <- c("Hallo", "Ich", "Bims")
df_model1 <- df[model1]
df_model1 <- df_model1[apply(df_model1, 1, function(x) !any(is.na(x))),]
str(df_model1)
##Outliers
hist(df_model1$Hallo)
ggboxplot(df_model1, y = "Hallo", width = 0.2)
###Hallo
outliers <- boxplot(df_model1$Hallo, plot=FALSE)$out
df_model1 <- df_model1[-which(df_model1$Hallo %in% outliers),]
ggboxplot(df_model1, y = "Hallo", width = 0.2)
df_model1 <- subset(df_model1,!(df_model1$Hallo > quantile(df_model1$Hallo, probs=c(.15, .85))[2] | df_model1$Hallo < quantile(df_model1$Hallo, probs=c(.1, .9))[1]) )
#####################################################################
##Regression
#####################################################################
OLS <- lm(y ~ x, data=df_model1)
summary(OLS)
stargazer(OLS, title="Results", align=TRUE, no.space=TRUE)
#####################################################################
Logit <- glm(y ~ x, data=df_model4, family = binomial(link = "logit"))
summary(Logit)
stargazer(Logit, title="Results", align=TRUE, no.space=TRUE)
###confinterval
logistic <- Logit
confint(logistic)
plot(logistic)
#### odds
exp(logistic$coefficients)
#### Now calculate the overall "Pseudo R-squared" and its p-value
ll.null <- logistic$null.deviance/-2
ll.proposed <- logistic$deviance/-2
#### McFadden's Pseudo R^2 = [ LL(Null) - LL(Proposed) ] / LL(Null)
(ll.null - ll.proposed) / ll.null
#### chi-square value = 2*(LL(Proposed) - LL(Null))
#### p-value = 1 - pchisq(chi-square value, df = 2-1)
1 - pchisq(2*(ll.proposed - ll.null), df=1)
1 - pchisq((logistic$null.deviance - logistic$deviance), df=1)
#####################################################################
##Tests
#####################################################################
###Multicollinearity
cor(OLS$Hallo, OLS$SalesGrowth, use = "complete.obs")
cor <- cor(OLS)
correlation.matrix <- cor(OLS[, c(1, 2, 5, 6, 7, 8, 9, 11, 12, 13, 14, 18)], use = "complete.obs")
stargazer(correlation.matrix, title="Correlation Matrix")
vif_OLS <- vif(OLS)
vif_OLS <- round(vif_OLS, digits=2)
vif_OLS
###Diagnostics
par(mfrow=c(2,2))
plot(OLS) #or autoplot(model)
plot(OLS, 1) #Linearity
plot(OLS$Hallo, OLS$Ich)
plot(OLS, 2) #Normality
plot(OLS, 3) #Homoskedasticity
plot(OLS, 5) #Outliers
cooksd <- cooks.distance(OLS)
OLS %>%
top_n(3, wt = cooksd)
###Heteroskedasticity
bptest(OLS)
###Normality
shapiro.test(resid(OLS))
###Autocorrelation
Box.test(resid(OLS), lag = 10, type = "Ljung")
#####################################################################
#####################################################################
# Instrumental Variable Approach
#####################################################################
library(AER)
library(ivpack)
ModelIV <- c()
ModelIV <- df[ModelIV]
ModelIV <- ModelIV[apply(ModelIV, 1, function(x) !any(is.na(x))),]
IV = ivreg(y ~ x1 + a1 + a2+ a3 | a1 + a2+ a3 + z1,
data=ModelIV)
IV = ivreg(SalesGrowth ~ x1 | z1, data=ModelIV)
summary(IV, vcov = sandwich, diagnostics = TRUE)
#####################################################################
#####################################################################
# Visualize Interaction Effects
#####################################################################
library(sjPlot)
library(sjmisc)
theme_set(theme_sjplot())
plot_model(Model1.2.4, type = "pred", terms = c("Bribes", "PolicyObstacle"))
tips %>%
ggplot() +
aes(x = Bribes, color = PolicyObstacle, group = PolicyObstacle, y = SalesGrowth, data=df_model1) +
stat_summary(fun.y = mean, geom = "point") +
stat_summary(fun.y = mean, geom = "line")
#####################################################################
library(effects)
#Run the interaction
Inter.HandPick <- effect('Inspection.Bribe*PolicyObstacle', Model1.3.4,
xlevels=list(PolicyObstacle = c(0, 1, 2, 3, 4),
Inspection.Bribe = c(0, 1)),
se=TRUE, confidence.level=.95, typical=mean)
#Put data in data frame
Inter.HandPick <- as.data.frame(Inter.HandPick)
#Check out what the "head" (first 6 rows) of your data looks like
head(Inter.HandPick)
#Create a factor of the IQ variable used in the interaction
Inter.HandPick$PolicyObstacle <- factor(Inter.HandPick$PolicyObstacle,
levels=c(0, 1, 2, 3, 4),
labels=c("No", "Minor", "Moderate", "Major", "Very severe"))
#Create a factor of the Work Ethic variable used in the interaction
Inter.HandPick$Inspection.Bribe <- factor(Inter.HandPick$Inspection.Bribe,
levels=c(0, 1),
labels=c("No", "Yes"))
Plot.HandPick<-ggplot(data=Inter.HandPick, aes(x=Inspection.Bribe, y=fit, group=PolicyObstacle))+
geom_line(size=1, aes(color=PolicyObstacle))+
#scale_color_manual(values=wes_palette(n=5, name="Darjeeling2"))+
scale_color_brewer(palette="RdYlGn", direction = -1)+
ylim(-0.2,0.2)+
ylab("SalesGrowth")+
xlab("Inspection.Bribe")+
ggtitle("Interaction Effect (Model 1.3.4)")+
theme_stata(base_size = 10.7)
Plot.HandPick
#################################
#Run the interaction
Inter.HandPick2 <- effect('Bribes*InformalCompetition', Model1.1.6,
xlevels=list(InformalCompetition = c(1, 0),
Bribes = c(0, 0.2)),
se=TRUE, confidence.level=.95, typical=mean)
#Put data in data frame
Inter.HandPick2 <- as.data.frame(Inter.HandPick2)
#Check out what the "head" (first 6 rows) of your data looks like
head(Inter.HandPick2)
#Create a factor of the IQ variable used in the interaction
Inter.HandPick2$InformalCompetition <- factor(Inter.HandPick2$InformalCompetition,
levels=c(1, 0),
labels=c("Yes", "No"))
#Create a factor of the Work Ethic variable used in the interaction
Inter.HandPick2$Bribes <- factor(Inter.HandPick2$Bribes,
levels=c(0, 0.2),
labels=c("No Bribes", "High Bribes"))
Plot.HandPick2<-ggplot(data=Inter.HandPick2, aes(x=Bribes, y=fit, group=InformalCompetition))+
geom_line(size=1, aes(color=InformalCompetition))+
scale_color_manual(name = "Informal Competition", values=c("red", "darkgreen"), labels = c("Yes", "No"))+
ylim(0,1)+
ylab("Sales Growth")+
xlab("Bribes")+
ggtitle("Interaction Effect (Model 1.1.6)")+
theme_stata(base_size = 10.7)
Plot.HandPick2
|
ff9875d876cd1fae16624070daffabb20b8497f9 | 07ff53239fc01fcda3151dc2505203491dac8656 | /cachematrix.R | e5f2d49af89d047098da2525b476525249cc9bbd | [] | no_license | MKhanMuneer/ProgrammingAssignment2 | cbd3495b3a608df037c83a0f46afeefa4b7b21a2 | 849c7a4c832871dfa1f413ef22a5f0d18ff93bf1 | refs/heads/master | 2022-05-05T08:43:53.128208 | 2015-04-26T19:17:45 | 2015-04-26T19:17:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,188 | r | cachematrix.R | # Inversion of Matrix in R is usually time consuming,
# and caching the inverse of a matrix can be beneficial
# rather than computing it again and again.
# The following two functions are used to
# 1. Retrieve the inverse of a matrix if it has alreay been computed.
# 2. If the inverse of matrix has not been computed previously;
# compute the inverse of the matrix, and
# save the result in the cache for future function calling.
# Function "makeCacheMatrix()" creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix = function(x = matrix()) {
inv = NULL
set = function(y) {
x <<- y
inv <<- NULL
}
get = function() x
setinverse = function(inverse) inv <<- inverse
getinverse = function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# Function "cacheSolve()" is written is such a way that it will
# return the inverse of the matrix.
# It will first check that if the inverse of the given matrix
# has already been computed. If so, it'll retrieve
# the previous result from the cache via "getinverse()" function
# and skips the computation. If not, it will compute the inverse,
# set the value in the cache via "setinverse()" function.
cacheSolve = function(x, ...) {
inv = x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data = x$get()
inv = solve(data)
x$setinverse(inv)
inv
}
# For testing of the functions and code:
# > SAMPLE = c(3,0,2,2,0,-2,0,1,1)
# > x = matrix(SAMPLE, 3, 3, byrow = T)
# > m = makeCacheMatrix(x)
# > m$get()
# [,1] [,2] [,3]
# [1,] 3 0 2
# [2,] 2 0 -2
# [3,] 0 1 1
# No cache in the first function call:
# > cacheSolve(m)
# [,1] [,2] [,3]
# [1,] 0.2 0.2 0
# [2,] -0.2 0.3 1
# [3,] 0.2 -0.3 0
# Retrieving from the cache in the second function call:
# > cacheSolve(m)
# getting cached data.
# [,1] [,2] [,3]
# [1,] 0.2 0.2 0
# [2,] -0.2 0.3 1
# [3,] 0.2 -0.3 0
|
133c9f72f2e011ea7560a03455db1157f395d6fc | fe7168587db6ef3c191032b8ed317bd8dd5a23e3 | /Bootcamp-materials/Day2-Bayesian-Modeling/bowling.R | 1375ddc4eaf1aaea00214e383767d6ad8c85d96a | [
"CC0-1.0"
] | permissive | DS-BootCamp-DSI-Columbia/AY2019-2020-Winter-Collaboratory-Faculty | 811371070de4deb4684e57b6a3db2c1b44186b68 | 46d7a8edee14c02db7d78e672d444d35ab84305b | refs/heads/master | 2020-09-06T08:17:04.167732 | 2020-01-18T13:43:11 | 2020-01-18T13:43:11 | 220,372,969 | 12 | 14 | CC0-1.0 | 2020-01-16T06:38:07 | 2019-11-08T02:46:11 | HTML | UTF-8 | R | false | false | 721 | r | bowling.R | # computes the x-th Fibonacci number without recursion and with vectorization
F <- function(x) {
stopifnot(is.numeric(x), all(x == as.integer(x)))
sqrt_5 <- sqrt(5) # defined once, used twice
golden_ratio <- (1 + sqrt_5) / 2
return(round(golden_ratio ^ (x + 1) / sqrt_5))
}
# probability of knocking down x out of n pins
Pr <- function(x, n = 10) return(ifelse(x > n, 0, F(x)) / (-1 + F(n + 2)))
Omega <- 0:10 # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
names(Omega) <- as.character(Omega)
joint_Pr <- matrix(0, nrow = 11, ncol = 11)
rownames(joint_Pr) <- colnames(joint_Pr) <- as.character(Omega)
for (x1 in Omega) {
Pr_x1 <- Pr(x1)
for (x2 in 0:(10 - x1))
joint_Pr[x1 + 1, x2 + 1] <- Pr_x1 * Pr(x2, 10 - x1)
}
|
0047b68bdaf5bff5eb60ee9f327f6a364399c70e | ab62bbb36ee030e69cecd30c8a7a1c56b4799864 | /man/pollutionhealthdata.Rd | f49c9d1b0d2853ed589f628c09c419e22b1dabb2 | [] | no_license | cran/CARBayesdata | db539deda5c8ce11e84389cc3097ccf2358681b8 | 07b0d6e86a2a5bcbbc4701bc4a620f54a596eef5 | refs/heads/master | 2022-07-03T13:42:02.656857 | 2022-05-12T12:30:01 | 2022-05-12T12:30:01 | 33,190,886 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,776 | rd | pollutionhealthdata.Rd | \name{pollutionhealthdata}
\alias{pollutionhealthdata}
\docType{data}
\title{Respiratory hospitalisation, air pollution and covariate data for the
Greater Glasgow and Clyde health board between 2007 and 2011.}
\description{
A data.frame object containing spatio-temporal data on respiratory
hospitalisations, air pollution concentrations and socio-economic deprivation
covariates for the 271 Intermediate Zones (IZ) that make up the Greater
Glasgow and Clyde health board in Scotland. Yearly data are available between
2007 and 2011 inclusive. These data are used in a worked example in the
vignette accompanying the CARBayesST package.
}
\usage{data(pollutionhealthdata)}
\format{
A data.frame object containing 1355 observations on the following 7 variables.
\describe{
\item{\code{IZ}}{The unique identifier for each IZ.}
\item{\code{year}}{The year the data relate to.}
\item{\code{observed}}{The observed numbers of hospitalisations due to
respiratory disease.}
\item{\code{expected}}{The expected numbers of hospitalisations due to
respiratory disease computed using indirect standardisation
from Scotland-wide respiratory hospitalisation rates.}
\item{\code{pm10}}{Average particulate matter (less than 10 microns)
concentrations.}
\item{\code{jsa}}{The percentage of working age people who are in receipt of
Job Seekers Allowance, a benefit paid to unemployed people looking for work.}
\item{\code{price}}{Average property price (divided by 100,000).}
}
}
\source{
These data were provided by the Scottish Government via http://statistics.gov.scot.
}
\examples{
data(pollutionhealthdata)
}
\keyword{datasets}
|
1b1c40518f74e92e7e1141afd6cbd711f1a5fe1d | 44a71491f4ebc032aaabf236f9740d149c84cafd | /Chapter_4/Chp_4_Example_5.R | 25867d5da12512d69c300b9bf36c16f2f9bd082a | [] | no_license | artofstat/RCode | 82ae8f7b3319888d3a5774fe2bcafbae3ed17419 | 8e8d55d1ac4bc111e5d14798f59033d755959ae5 | refs/heads/main | 2023-03-22T03:17:45.284671 | 2022-08-15T18:09:08 | 2022-08-15T18:09:08 | 503,484,584 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 829 | r | Chp_4_Example_5.R | #############################################################
## R code to reproduce statistical analysis in the textbook:
## Agresti, Franklin, Klingenberg
## Statistics: The Art & Science of Learning from Data
## 5th Edition, Pearson 2021
## Web: ArtofStat.com
## Copyright: Bernhard Klingenberg
############################################################
###################
### Chapter 4 ###
### Example 5 ###
###################
#########################################
## Selecting a Simple Random Sample ##
#########################################
# To draw a sample of size 10 accounts from 67 accounts (without replacement)
sample(67, 10)
# Note that `replace = FALSE` is a default argument in the `sample()` function
# To draw with replacement, you can set `replace = TRUE`
sample(67, 10, replace = TRUE) |
0617f3d7952e4a7a9e2674590df3b707c834a89f | c8c6fa007193c26b4a93cab4dc40b81350a5a23c | /3x.Get-data-for-percent-cover.R | a3d4f9350f6efe5629084cd116f90f467f4a1110 | [] | no_license | anitas-giraldo/GB_Habitat_Classification | ba15fa35deae813f6bdb787e76965962eb9579bc | e3def2c2d6fefe84af436e629dc8de771f92e16c | refs/heads/master | 2023-07-13T05:08:24.823385 | 2021-08-15T22:54:33 | 2021-08-15T22:54:33 | 253,971,022 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 19,804 | r | 3x.Get-data-for-percent-cover.R | ### ### ### ### ###
### Script to GET THE DATA to calculate percent cover of habitat classes ###
### Load libraries ----
library(ggplot2)
library(ggthemes)
library(cowplot)
library(sp)
library(spDta)
library(sf)
library(rgdal)
library(raster)
library(rgeos)
library(mapview)
library(tmap)
library(mapdata)
library(leaflet)
library(caTools)
library(reshape2)
library(tidyr)
library(car)
library(lattice)
library(latticeExtra)
library(dplyr)
library(raster)
library(rasterVis)
library(zoo)
library(sf)
library(fields)
library(geoR)
library(gstat)
library(ggsn)
library(ggspatial)
library(ggrepel)
library(patchwork)
#library(elsa)
#install.packages("corrplot")
#library(corrplot)
library(broman)
# Clear memory ----
rm(list=ls())
### Set directories ----
w.dir<- dirname(rstudioapi::getActiveDocumentContext()$path)
d.dir <- paste(w.dir, "data", sep='/')
dt.dir <- paste(w.dir, "data/tidy", sep='/')
s.dir <- paste(w.dir, "spatial_data", sep='/')
p.dir <- paste(w.dir, "plots", sep='/')
o.dir <- paste(w.dir, "outputs", sep='/')
# http://oswaldosantos.github.io/ggsn/
# Read gb cmr poly ----
gb <- st_read(dsn="C:/Users/00093391/Dropbox/UWA/Research Associate/PowAnalysis_for1sParksMeeting/Desktop/shapefiles")
gb <- readOGR(dsn="C:/Users/00093391/Dropbox/UWA/Research Associate/PowAnalysis_for1sParksMeeting/Desktop/shapefiles")
plot(gb)
levels(gb$ZoneName)
crs1 <- proj4string(gb) # "+proj=longlat +ellps=GRS80 +no_defs"
# for transformations --
gbu <- readOGR(dsn="G:/My Drive/Anita/Shapefiles/GeoBay_CMR_UTM.shp")
crs2 <- proj4string(gbu) # "+proj=utm +zone=50 +south +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"
# get poly for each zone --
NPZ <- gbu[gbu$ZoneName=="National Park Zone",]
plot(NPZ)
HPZ <- gbu[gbu$ZoneName=="Habitat Protection Zone",]
MUZ <- gbu[gbu$ZoneName=="Multiple Use Zone",]
plot(MUZ)
SPZ <- gbu[gbu$ZoneName=="Special Purpose Zone (Mining Exclusion)",]
# Pick colors ----
sg <- brocolors("crayons")["Fern"] # "#78dbe2"
alg <- brocolors("crayons")["Raw Umber"] # "#1dacd6"
sand <- brocolors("crayons")["Unmellow Yellow"] # "#f75394"
pal1 <- c(sand, sg, alg )
## ## ## ## ## ##
#### GET DATA : FINE NPZ ####
#### BRUVs Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-Bruvs.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2, method ='ngb')
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, NPZ)
plot(npzb)
plot(gbu, add=T)
b <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
b
#### AUV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-AUV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, NPZ)
plot(npzb)
plot(gbu, add=T)
a <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
a
#### FTV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-FTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, NPZ)
plot(npzb)
plot(gbu, add=T)
f <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
f
#### DTV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-DTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, NPZ)
plot(npzb)
plot(gbu, add=T)
d <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
d
## ## ## ## ## ## ##
#### Combine fine bathy data ####
b$method <- "Stereo-BRUVs"
b
a$method <- "AUV"
a
f$method <- "FTV"
f
d$method <- "DTV"
d
# join --
fine.npz <- rbind(b, a, f, d)
head(fine.npz)
# add zone --
fine.npz$zone <- "NPZ"
#### GET DATA : FINE HPZ ####
#### BRUVs Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-Bruvs.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# HPZ ----
npzb <- mask(predu, HPZ)
plot(npzb)
plot(gbu, add=T)
b <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
b
#### AUV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-AUV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, HPZ)
plot(npzb)
plot(gbu, add=T)
a <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
a
#### FTV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-FTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, HPZ)
plot(npzb)
plot(gbu, add=T)
f <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
f
#### DTV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-DTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, HPZ)
plot(npzb)
plot(gbu, add=T)
d <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
d
## ## ## ## ## ## ##
#### Combine fine bathy data ####
b$method <- "Stereo-BRUVs"
b
a$method <- "AUV"
a
f$method <- "FTV"
f
d$method <- "DTV"
d
# join --
fine.hpz <- rbind(b, a, f, d)
head(fine.hpz)
# add zone --
fine.hpz$zone <- "HPZ"
fine.hpz
#### GET DATA : FINE MUZ ####
#### BRUVs Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-Bruvs.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# MUZ ----
npzb <- mask(predu, MUZ)
plot(npzb)
plot(gbu, add=T)
b <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
b
#### AUV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-AUV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# MUZ ----
npzb <- mask(predu, MUZ)
plot(npzb)
plot(gbu, add=T)
a <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
a
#### FTV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-FTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# MUZ ----
npzb <- mask(predu, MUZ)
plot(npzb)
plot(gbu, add=T)
f <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
f
#### DTV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-DTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# MUZ ----
npzb <- mask(predu, MUZ)
plot(npzb)
plot(gbu, add=T)
d <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
d
## ## ## ## ## ## ##
#### Combine fine bathy data ####
b$method <- "Stereo-BRUVs"
b
a$method <- "AUV"
a
f$method <- "FTV"
f
d$method <- "DTV"
d
# join --
fine.muz <- rbind(b, a, f, d)
head(fine.muz)
# add zone --
fine.muz$zone <- "MUZ"
fine.muz
#### GET DATA : FINE SPZ ####
#### BRUVs Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-Bruvs.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# SPZ ----
npzb <- mask(predu, SPZ)
plot(npzb)
plot(gbu, add=T)
b <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
b
#### AUV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-AUV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# SPZ ----
npzb <- mask(predu, SPZ)
plot(npzb)
plot(gbu, add=T)
a <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
a
#### FTV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-FTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# SPZ ----
npzb <- mask(predu, SPZ)
plot(npzb)
plot(gbu, add=T)
f <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
f
#### DTV Fine ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Fine-DTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# SPZ ----
npzb <- mask(predu, SPZ)
plot(npzb)
plot(gbu, add=T)
d <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
d
## ## ## ## ## ## ##
#### Combine fine bathy data ####
b$method <- "Stereo-BRUVs"
b
a$method <- "AUV"
a
f$method <- "FTV"
f
d$method <- "DTV"
d
# join --
fine.spz <- rbind(b, a, f, d)
head(fine.spz)
# add zone --
fine.spz$zone <- "SPZ"
fine.spz
## ## ## ## ## ###
## Combine all fine data ----
fineall <- rbind(fine.npz, fine.hpz, fine.muz, fine.spz)
fineall
class(fineall)
# save --
write.csv(fineall, paste(dt.dir, "Areal-coverage-fine.csv", sep='/'))
### ### ### ### ### ###
#### GET DATA : COARSE NPZ ####
#### BRUVs Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-Bruvs.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
pred@data@values=1
# project --
predu <- projectRaster(pred, crs = crs2, method='ngb')
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, NPZ)
plot(npzb)
plot(gbu, add=T)
b <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
b
#### AUV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-AUV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, NPZ)
plot(npzb)
plot(gbu, add=T)
a <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
a
#### FTV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-FTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, NPZ)
plot(npzb)
plot(gbu, add=T)
f <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
f
#### DTV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-DTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, NPZ)
plot(npzb)
plot(gbu, add=T)
d <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
d
## ## ## ## ## ## ##
#### Combine Coarse bathy data ####
b$method <- "Stereo-BRUVs"
b
a$method <- "AUV"
a
f$method <- "FTV"
f
d$method <- "DTV"
d
# join --
coarse.npz <- rbind(b, a, f, d)
head(coarse.npz)
# add zone --
coarse.npz$zone <- "NPZ"
coarse.npz
#### GET DATA : COARSE HPZ ####
#### BRUVs Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-Bruvs.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# HPZ ----
npzb <- mask(predu, HPZ)
plot(npzb)
plot(gbu, add=T)
b <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
b
#### AUV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-AUV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, HPZ)
plot(npzb)
plot(gbu, add=T)
a <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
a
#### FTV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-FTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, HPZ)
plot(npzb)
plot(gbu, add=T)
f <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
f
#### DTV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-DTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# NPZ ----
npzb <- mask(predu, HPZ)
plot(npzb)
plot(gbu, add=T)
d <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
d
## ## ## ## ## ## ##
#### Combine Coarse bathy data ####
b$method <- "Stereo-BRUVs"
b
a$method <- "AUV"
a
f$method <- "FTV"
f
d$method <- "DTV"
d
# join --
coarse.hpz <- rbind(b, a, f, d)
head(coarse.hpz)
# add zone --
coarse.hpz$zone <- "HPZ"
coarse.hpz
#### GET DATA : COARSE MUZ ####
#### BRUVs Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-Bruvs.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# MUZ ----
npzb <- mask(predu, MUZ)
plot(npzb)
plot(gbu, add=T)
b <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
b
#### AUV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-AUV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# MUZ ----
npzb <- mask(predu, MUZ)
plot(npzb)
plot(gbu, add=T)
a <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
a
#### FTV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-FTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# MUZ ----
npzb <- mask(predu, MUZ)
plot(npzb)
plot(gbu, add=T)
f <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
f
#### DTV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-DTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# MUZ ----
npzb <- mask(predu, MUZ)
plot(npzb)
plot(gbu, add=T)
d <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
d
## ## ## ## ## ## ##
#### Combine Coarse bathy data ####
b$method <- "Stereo-BRUVs"
b
a$method <- "AUV"
a
f$method <- "FTV"
f
d$method <- "DTV"
d
# join --
coarse.muz <- rbind(b, a, f, d)
head(coarse.muz)
# add zone --
coarse.muz$zone <- "MUZ"
coarse.muz
#### GET DATA : COARSE SPZ ####
#### BRUVs Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-Bruvs.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# SPZ ----
npzb <- mask(predu, SPZ)
plot(npzb)
plot(gbu, add=T)
b <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
b
#### AUV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-AUV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# SPZ ----
npzb <- mask(predu, SPZ)
plot(npzb)
plot(gbu, add=T)
a <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
a
#### FTV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-FTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# SPZ ----
npzb <- mask(predu, SPZ)
plot(npzb)
plot(gbu, add=T)
f <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
f
#### DTV Coarse ####
# Read data ----
pred <- raster(paste(o.dir, "GBpred-Coarse-DTV.tif", sep='/'))
plot(pred) # 1= algae, 2=seagrass, 3=Unveg
xx <- levels(pred[[1]])
xx
# project --
predu <- projectRaster(pred, crs = crs2)
plot(predu)
levels(predu) <- xx
# SPZ ----
npzb <- mask(predu, SPZ)
plot(npzb)
plot(gbu, add=T)
d <- as.data.frame(npzb) %>%
group_by(category) %>%
tally() %>%
mutate(area = n * res(npzb)[1] * res(npzb)[2])
d
## ## ## ## ## ## ##
#### Combine Coarse bathy data ####
b$method <- "Stereo-BRUVs"
b
a$method <- "AUV"
a
f$method <- "FTV"
f
d$method <- "DTV"
d
# join --
coarse.spz <- rbind(b, a, f, d)
head(coarse.spz)
# add zone --
coarse.spz$zone <- "SPZ"
coarse.spz
## ## ## ## ## ###
## Combine all fine data ----
coarseall <- rbind(coarse.npz, coarse.hpz, coarse.muz, coarse.spz)
coarseall
class(coarseall)
# save --
write.csv(coarseall, paste(dt.dir, "Areal-coverage-coarse.csv", sep='/'))
|
1f1c2622a8c6e495c35eb8d06aee937840db9a8a | 9db2fc31add11c86b1712421ee0c85f41120092f | /R/create_simulate_norm.R | 7cd17d8334349eda346ce4110e11c0aa07df2a67 | [] | no_license | Sayani07/hakear-drake | 21be865e8a7872772a79cc8dc6b2123000601c90 | 4b4fe52f69c2489e1dc0958f022c20a60d1c2d53 | refs/heads/master | 2023-01-18T21:55:01.400684 | 2020-11-19T07:05:30 | 2020-11-19T07:05:30 | 294,099,407 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 767 | r | create_simulate_norm.R | ##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##' @param nlevel
##' @param ncol
##' @param sim_dist
##' @param nsim
##' @return
##' @author Sayani07
##' @export
create_simulate_norm <- function(nlevel = 6,
ncol = 3,
nrow = 2,
sim_dist = dist_normal(5, 10),
nperm = 500,
nsim = 200) {
(1:nsim) %>%
map_df(function(i) {
sim_data <- create_data_sim1(nlevel, sim_dist) %>%
create_panel(ncol,nrow) %>%
boot_panel(nperm) %>%
compute_norm()
bind_cols(sim_id = i, sim_data = sim_data)
})
}
|
bd5a10e5aa5e663503ed4945102d9d2c8203217d | 1d85ea0fd495bbb892175f20676ae38f61baa475 | /R/readctval.R | 626a588f75c095e02698145be922891d3cd5e255 | [] | no_license | steingod/R-mipolsat | e6a3ddedd31f0eaf26f6f56bb5b30219cc63968a | a19c0c34557cb81faa4f9297c44413af8e59488b | refs/heads/master | 2021-01-19T20:29:57.560832 | 2013-05-28T20:33:58 | 2013-05-28T20:33:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,610 | r | readctval.R | #
# NAME:
# readctval
#
# PURPOSE:
# To read collocate HDF4 files containing validation data for SAFNWC CM.
#
# NOTES:
# Not fully finished...
#
# BUGS:
# Probably in the underlying C-function...
# C functions does not transfer strings back properly...
#
# CVS_ID:
# $Id: readctval.R,v 1.3 2013-04-11 20:29:04 steingod Exp $
#
# AUTHOR:
# Øystein Godøy, MET/FOU, 30.01.2003
#
# MODIFIED:
# Øystein Godøy, METNO/FOU, 2013-04-11
#
readctval <- function(filename,classname="cloud",station="NA",start="NA",end="NA") {
if (missing(filename)) {
cat("Husk at filnavn må oppgis...\n")
return;
}
start <- as.POSIXct(strptime(start,format="%d%b%Y"))
end <- as.POSIXct(strptime(end,format="%d%b%Y"))
if (is.na(start)) start <- 0
if (is.na(end)) end <- 0
noobs <- 0
tmp <- .C("checkrec",
filename=as.character(filename),
noobs=as.integer(noobs),
classname=as.character(classname),
station=as.character(station),
start=as.integer(start),end=as.integer(end))
#dyn.unload("/home/steingod/software/R-functions/dumpcol/checkrec.so")
if (tmp$noobs <= 0) return
cat(paste("Fant:",tmp$noobs,"lagrede enheter\n"))
nopix <- 13*13
size <- tmp$noobs*nopix
stid <- vector(mode="integer",length=size)
tid <- vector(mode="integer",length=size)
N <- vector(mode="integer",length=size)
CL <- vector(mode="integer",length=size)
CM <- vector(mode="integer",length=size)
CH <- vector(mode="integer",length=size)
E <- vector(mode="integer",length=size)
sss <- vector(mode="integer",length=size)
cm <- vector(mode="integer",length=size)
#dyn.load("/home/steingod/software/R-functions/dumpcol/readctval.so")
tmp <- .C("readctval",
filename=as.character(filename),
noobs=as.integer(tmp$noobs),nopix=as.integer(nopix),
station=as.character(station),
start=as.integer(start),end=as.integer(end),
stid=as.integer(stid),tid=as.integer(tid),
N=as.integer(N),
CL=as.integer(CL), CM=as.integer(CM), CH=as.integer(CH),
E=as.integer(E), sss=as.integer(sss),
cm=as.integer(cm))
cat(paste("Number of records found:",tmp$noobs,"\n"))
if (tmp$noobs <= 0) {
return(cat("Bailing out...\n"))
}
cat(paste("Fant:",tmp$noobs,"lagrede enheter etter forkastning\n"))
validata <- tmp$noobs*169
tmp$tid <- ISOdate(1970,1,1)+tmp$tid
return(data.frame(
stid=tmp$stid[1:validata],
noobs=tmp$noobs[1:validata],
tid=tmp$tid[1:validata],
N=tmp$N[1:validata],
CL=tmp$CL[1:validata],
CM=tmp$CM[1:validata],
CH=tmp$CH[1:validata],
E=tmp$E[1:validata],
sss=tmp$sss[1:validata],
cm=tmp$cm[1:validata]
))
}
|
1f6bc9851951c7ea8a092b1ad68ba2da21fd487d | 36d3b6f2349ebdad12a996acfc21090130695a1b | /R/Venn.R | 70da26717b8556af1755575d6f33b626fb87d929 | [] | no_license | flajole/MApckg | da7be5c41e13cbf5c03b100e40ac0a1521306d34 | 229959e1b9e76034411dc8513cd5f7e9e63c3ef0 | refs/heads/master | 2021-01-18T16:36:38.247786 | 2016-02-10T14:11:23 | 2016-02-10T14:11:23 | 41,876,214 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,213 | r | Venn.R | # VENN DIAGRAM COUNTS AND PLOTS
getVennCounts <- function(x,include="both") {
x <- as.matrix(x)
include <- match.arg(include,c("both","up","down"))
x <- sign(switch(include,
both = abs(x),
up = x > 0,
down = x < 0
))
nprobes <- nrow(x)
ncontrasts <- ncol(x)
names <- colnames(x)
if(is.null(names)) names <- paste("Group",1:ncontrasts)
noutcomes <- 2^ncontrasts
outcomes <- matrix(0,noutcomes,ncontrasts)
colnames(outcomes) <- names
for (j in 1:ncontrasts)
outcomes[,j] <- rep(0:1,times=2^(j-1),each=2^(ncontrasts-j))
xlist <- list()
for (i in 1:ncontrasts) xlist[[i]] <- factor(x[,ncontrasts-i+1],levels=c(0,1))
counts <- as.vector(table(xlist))
structure(cbind(outcomes,Counts=counts),class="VennCounts")
}
# Plot Venn diagram
# Gordon Smyth, James Wettenhall.
# Capabilities for multiple counts and colors by Francois Pepin.
# 4 July 2003. Last modified 12 March 2010.
plotVennDiagram <- function(object,include="both",names,mar=rep(0,4),cex=1.2,lwd=1,circle.col,counts.col,show.include,...)
{
if (!is(object, "VennCounts")){
if (length(include)>2) stop("Cannot plot Venn diagram for more than 2 sets of counts")
if (length(include)==2) object.2 <- getVennCounts(object, include = include[2])
object <- getVennCounts(object, include = include[1])
}
else if(length(include==2)) include <- include[1]
nsets <- ncol(object)-1
if(nsets > 3) stop("Can't plot Venn diagram for more than 3 sets")
if(missing(names)) names <- colnames(object)[1:nsets]
counts <- object[,"Counts"]
if(length(include)==2) counts.2 <- object.2[, "Counts"]
if(missing(circle.col)) circle.col <- par('col')
if(length(circle.col)<nsets) circle.col <- rep(circle.col,length.out=nsets)
if(missing(counts.col)) counts.col <- par('col')
if(length(counts.col)<length(include)) counts.col <- rep(counts.col,length.out=length(include))
if(missing(show.include)) show.include <- as.logical(length(include)-1)
theta <- 2*pi*(0:360)/360
xcentres <- list(0,c(-1,1),c(-1,1,0))[[nsets]]
ycentres <- list(0,c(0,0),c(1/sqrt(3),1/sqrt(3),-2/sqrt(3)))[[nsets]]
r <- c(1.5,1.5,1.5)[nsets]
xtext <- list(-1.2,c(-1.2,1.2),c(-1.2,1.2,0))[[nsets]]
ytext <- list(1.8,c(1.8,1.8),c(2.4,2.4,-3))[[nsets]]
old.par <- par(mar=mar)
on.exit(par(old.par))
plot(x=0,y=0,type="n",xlim=c(-4,4),ylim=c(-4,4),xlab="",ylab="",axes=FALSE,...);
circle.col <- col2rgb(circle.col) / 255
circle.col <- rgb(circle.col[1,], circle.col[2,], circle.col[3,], 0.3)
for(i in 1:nsets) {
lines(xcentres[i]+r*cos(theta),ycentres[i]+r*sin(theta),lwd=lwd,col=circle.col[i])
polygon(xcentres[i] + r*cos(theta), ycentres[i] + r*sin(theta), col = circle.col[i], border = NULL)
text(xtext[i],ytext[i],names[i],cex=cex)
}
switch(nsets,
{
rect(-3,-2.5,3,2.5)
printing <- function(counts, cex, adj,col,leg){
text(2.3,-2.1,counts[1],cex=cex,col=col,adj=adj)
text(0,0,counts[2],cex=cex,col=col,adj=adj)
if(show.include) text(-2.3,-2.1,leg,cex=cex,col=col,adj=adj)
}
}, {
rect(-3,-2.5,3,2.5)
printing <- function(counts, cex, adj,col,leg){
text(2.3,-2.1,counts[1],cex=cex,col=col,adj=adj)
text(1.5,0.1,counts[2],cex=cex,col=col,adj=adj)
text(-1.5,0.1,counts[3],cex=cex,col=col,adj=adj)
text(0,0.1,counts[4],cex=cex,col=col,adj=adj)
if(show.include) text(-2.3,-2.1,leg,cex=cex,col=col,adj=adj)
}
}, {
rect(-3,-3.5,3,3.3)
printing <- function(counts, cex, adj,col,leg){
text(2.5,-3,counts[1],cex=cex,col=col,adj=adj)
text(0,-1.7,counts[2],cex=cex,col=col,adj=adj)
text(1.5,1,counts[3],cex=cex,col=col,adj=adj)
text(.75,-.35,counts[4],cex=cex,col=col,adj=adj)
text(-1.5,1,counts[5],cex=cex,col=col,adj=adj)
text(-.75,-.35,counts[6],cex=cex,col=col,adj=adj)
text(0,.9,counts[7],cex=cex,col=col,adj=adj)
text(0,0,counts[8],cex=cex,col=col,adj=adj)
if(show.include) text(-2.5,-3,leg,cex=cex,col=col,adj=adj)
}
}
)
adj <- c(0.5,0.5)
if (length(include)==2)
adj <- c(0.5,0)
printing(counts,cex,adj,counts.col[1],include[1])
if (length(include)==2) printing(counts.2,cex,c(0.5,1),counts.col[2],include[2])
invisible()
} |
e9903493a758f6f2ee81a8cf4e504dbf8aef220e | 62d445e283e7a0d5f5980572d2fb592e72eb7e4a | /ML_Ass_1.R | bb49d3526384f6a0b6afa91e95cd7fe144711280 | [] | no_license | clustersdata/MachineLearning-25 | 1d94b621fca8153848fcd43ff4a09eb54201d063 | 75199fb728f75bfbf10951856e7ae0fb0c4cfccb | refs/heads/master | 2022-06-01T09:56:59.188050 | 2014-04-15T22:55:52 | 2014-04-15T22:55:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,250 | r | ML_Ass_1.R | data1 <- read.table("ex1data1.txt", sep =",")
colnames(data1) <- c("Population","Profit")
plot(data1$Population, data1$Profit, pch=19, col="blue",
ylab = "Profit in $10,000s",
xlab = "Population of City in 10,000s")
data1.m <- as.matrix(data1)
dim(data1.m)
X.ini <- data1.m[, 1]
y <- data1.m[, 2]
plot(X.ini,y, pch=19, col="blue")
m = length(y.ini);
ones <- rep(1, m)
zeros <- rep(0, 2)
X <- cbind(ones, X.ini)
theta <- matrix(zeros, nrow = 2, ncol = 1)
iterations = 1500
alpha = 0.01
Cost_Function(X, y, theta)
theta <- GradientDescent(X, y, theta, alpha, iterations)[[1]]
predict <- matrix(c(1,3.5), nrow = 2, ncol =1)
t(predict) %*% theta
lines(X.ini, X %*% theta, type = "l", col="red")
abline(lm(y ~ X.ini))
########################################################
# Multi
data2 <- read.table("ex1data2.txt", sep =",")
data2.m <- as.matrix(data2)
X <- data2.m[, c(1,2)]
y = data2.m[, 3]
m = length(y);
X <- featureNormalize(X)
ones <- rep(1, m)
X <- cbind(ones, X)
alpha = 0.01;
iterations = 400
theta <- matrix(0, nrow = ncol(X), ncol = 1)
J_hist <- GradientDescent(X, y, theta, alpha, iterations)[[2]]
theta <- GradientDescent(X, y, theta, alpha, iterations)[[1]]
plot(J_hist[,1], type = "l")
|
f6616421a5d24c168db4460ac419b09fae67738d | 0919cfe03fb33cc4f2f59167536d948198d1635c | /man/occ_count.Rd | eea05b27bc43556de867041f1b8ec03c301e8529 | [] | no_license | cran/rgbif | d15490edf1df820891bc7022ecaa311c6d6aab9e | adfddbb05959bb6d549eca5369c5c3da644a395e | refs/heads/master | 2023-06-24T13:57:39.429513 | 2023-04-03T14:00:02 | 2023-04-03T14:00:02 | 17,699,179 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,438 | rd | occ_count.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/occ_count.r
\name{occ_count}
\alias{occ_count}
\title{Get number of occurrence records.}
\usage{
occ_count(..., occurrenceStatus = "PRESENT", curlopts = list())
}
\arguments{
\item{...}{parameters passed to \code{occ_search()}.}
\item{occurrenceStatus}{(character) Default is "PRESENT". Specify whether
search should return "PRESENT" or "ABSENT" data.}
\item{curlopts}{(list) curl options.}
}
\value{
The occurrence count of the \code{occ_search()} query.
}
\description{
Get number of occurrence records.
}
\details{
\code{occ_count()} is a short convenience wrapper for
\code{occ_search(limit=0)$meta$count}.
The current version (since rgbif 3.7.6) of \code{occ_count()} uses a different
GBIF API endpoint from previous versions. This change greatly improves
the usability of \code{occ_count()}. Legacy parameters \code{georeferenced}, \code{type},
\code{date}, \code{to}, \code{from} are no longer supported and not guaranteed to work
correctly.
Multiple values of the type \code{c("a","b")} will give an error,
but \code{"a;b"} will work.
}
\examples{
\dontrun{
# total occurrences mediated by GBIF
occ_count() # should be > 2 billion!
# number of plant occurrences
occ_count(kingdomKey=name_backbone("Plantea")$usageKey)
occ_count(scientificName = 'Ursus americanus')
occ_count(country="DK") # found in Denmark
occ_count(country="DK;US") # found in Denmark and United States
occ_count(publishingCountry="US") # published by the United States
# number of repatriated eBird records in India
occ_count(repatriated = TRUE,country="IN")
occ_count(taxonKey=212) # number of bird occurrences
# between years 1800-1900
occ_count(basisOfRecord="PRESERVED_SPECIMEN", year="1800,1900")
occ_count(recordedBy="John Waller") # recorded by John Waller
occ_count(decimalLatitude=0, decimalLongitude=0) # exactly on 0,0
# close to a known iso2 centroid
occ_count(distanceFromCentroidInMeters="0,2000")
# close to a known iso2 centroid in Sweden
occ_count(distanceFromCentroidInMeters="0,2000",country="SE")
occ_count(hasCoordinate=TRUE) # with coordinates
occ_count(protocol = "DIGIR") # published using DIGIR format
occ_count(mediaType = 'StillImage') # with images
# number of occurrences iucn status "critically endangered"
occ_count(iucnRedListCategory="CR")
occ_count(verbatimScientificName="Calopteryx splendens;Calopteryx virgo")
occ_count(
geometry="POLYGON((24.70938 48.9221,24.71056 48.92175,24.71107
48.92296,24.71002 48.92318,24.70938 48.9221))")
# getting a table of counts using the facets interface
# occurrence counts by year
occ_count(facet="year")
occ_count(facet="year",facetLimit=400)
# top scientificNames from Japan
occ_count(facet="scientificName",country="JP")
# top countries publishing specimen bird records between 1850 and 1880
occ_count(facet="scientificName",taxonKey=212,basisOfRecord="PRESERVED_SPECIMEN"
,year="1850,1880")
# Number of present or absence records of Elephants
occ_count(facet="occurrenceStatus",scientificName="Elephantidae")
# top 100 datasets publshing occurrences to GBIF
occ_count(facet="datasetKey",facetLimit=100)
# top datasets publishing country centroids on GBIF
occ_count(facet="datasetKey",distanceFromCentroidInMeters="0")
# common values for coordinateUncertaintyInMeters for museum specimens
occ_count(facet="coordinateUncertaintyInMeters",basisOfRecord="PRESERVED_SPECIMEN")
# number of iucn listed bird and insect occurrences in Mexico
occ_count(facet="iucnRedListCategory",taxonKey="212;216",country="MX")
# most common latitude values mediated by GBIF
occ_count(facet="decimalLatitude")
# top iNaturalist users publishing research-grade obs to GBIF
occ_count(facet="recordedBy",datasetKey="50c9509d-22c7-4a22-a47d-8c48425ef4a7")
# top 100 iNaturalist users from Ukraine
occ_count(facet="recordedBy",datasetKey="50c9509d-22c7-4a22-a47d-8c48425ef4a7"
,country="UA",facetLimit=100)
# top institutions publishing specimen occurrences to GBIF
occ_count(facet="institutionCode",basisOfRecord="PRESERVED_SPECIMEN")
}
}
\seealso{
\code{\link[=occ_count_year]{occ_count_year()}}, \code{\link[=occ_count_country]{occ_count_country()}}, \code{\link[=occ_count_pub_country]{occ_count_pub_country()}},
\code{\link[=occ_count_basis_of_record]{occ_count_basis_of_record()}}
}
|
3b1161c770bfd2127b4863bfed27cf3f7797dfd8 | 40c79c40ba4eecf9406203e19e6add03970caa81 | /Example2/Frank_m1/BF_optX.R | 9ee58c71528481612525a3476d7741a0cea882d5 | [] | no_license | SenarathneSGJ/Design_for_Copula | a8286a1622d2aad0069e71c67790e11e898aa3b2 | 5cf41d6a734ea23bdc63568158bacd2c82cffe57 | refs/heads/master | 2020-04-22T02:07:07.054722 | 2020-01-31T04:51:05 | 2020-01-31T04:51:05 | 170,037,003 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 697 | r | BF_optX.R | BF_optX=function(theta,W,post_model_probs,LogZs) # Next design point via ACE
{
tempX <- data.frame(X=seq(0.05,2,by=0.05))
print(tempX)
utes=foreach(i = 1:nrow(tempX),.packages= c("mvtnorm","rootSolve","stats","Rcpp"),.noexport = c("LogLike_m2_Gum","LogLike_m3_Cl","LogLike_m4_Pr"),.export=c("Sigma","u_Entropy","PredictY_Fr","PredictY_Gum","fn2","integrand","PredictY_Cl","PredictY_Pr","likelihood_m1_Fr"),.verbose=TRUE,.combine = c) %dopar%
{
u_Entropy(design_all=tempX[i,],theta=theta,W=W,post_model_probs=post_model_probs,LogZs=LogZs,B=5000)
}
print(utes)
item=which.max(utes)
OptX=tempX[item,]
out=list(X=OptX,utility=utes[item],utes_t=utes)
return(out)
} |
2ce1813e8453dcc9845f56f87ea5fe9b3a1e89e7 | 995a119204394a02ea5829e8aafba3755f61b306 | /Rcwl/tl_lancet.R | 9af2b0cfcc6d03cd733ac4ae4508d2e9fba9b900 | [] | no_license | truwl/roswellpark | de18b994dac583e339e1061bcd3e84aa52e1d7c1 | 3a0de3ee9995779cb29e07230e3d8e8f153567ca | refs/heads/master | 2023-03-04T08:41:09.670907 | 2020-12-16T23:02:08 | 2020-12-16T23:02:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 934 | r | tl_lancet.R | ## https://github.com/nygenome/lancet
p1 <- InputParam(id = "tbam", type = "File", prefix = "--tumor",
secondaryFiles = ".bai")
p2 <- InputParam(id = "nbam", type = "File", prefix = "--normal",
secondaryFiles = ".bai")
p3 <- InputParam(id = "ref", type = "File",
prefix = "--ref", secondaryFiles = ".fai")
p4 <- InputParam(id = "region", type = "File", prefix = "--bed")
p5 <- InputParam(id = "threads", type = "int", prefix = "--num-threads")
o1 <- OutputParam(id = "vcf", type = "stdout")
req1 <- list(class = "DockerRequirement",
dockerPull = "kfdrc/lancet:1.0.7")
lancet <- cwlParam(baseCommand = "/lancet-1.0.7/lancet",
requirements = list(req1),
inputs = InputParamList(p1, p2, p3, p4, p5),
outputs = OutputParamList(o1),
stdout = "$(inputs.tbam.nameroot)_$(inputs.nbam.nameroot).vcf")
|
4869e3308a10635f54d1d7f5d1ee7f4a6a4e194c | c1034eb8f34b18105acf3244bf9a0b0339d6ca8d | /R/mdaplotg.R | 750c907d540040f9cd142a419b4c9b3d11fe0e88 | [
"MIT"
] | permissive | svkucheryavski/mdatools | f8d4eafbb34d57283ee753eceea1584aed6da3b9 | 2e3d262e8ac272c254325a0a56e067ebf02beb59 | refs/heads/master | 2023-08-17T16:11:14.122769 | 2023-08-12T16:58:49 | 2023-08-12T16:58:49 | 11,718,739 | 31 | 11 | NOASSERTION | 2020-07-23T18:50:22 | 2013-07-28T11:10:36 | R | UTF-8 | R | false | false | 16,671 | r | mdaplotg.R | #' Show legend for mdaplotg
#'
#' @description
#' Shows a legend for plot elements or their groups.
#'
#' @param legend
#' vector with text elements for the legend items
#' @param col
#' vector with color values for the legend items
#' @param pt.bg
#' vector with background colors for the legend items (e.g. for pch = 21:25)
#' @param pch
#' vector with marker symbols for the legend items
#' @param lty
#' vector with line types for the legend items
#' @param lwd
#' vector with line width values for the legend items
#' @param cex
#' vector with cex factor for the points
#' @param bty
#' border type for the legend
#' @param position
#' legend position ("topright", "topleft', "bottomright", "bottomleft", "top", "bottom")
#' @param plot
#' logical, show legend or just calculate and return its size
#' @param ...
#' other parameters
#'
mdaplotg.showLegend <- function(legend, col, pt.bg = NA, pch = NULL, lty = NULL, lwd = NULL,
cex = 1, bty = "o", position = "topright", plot = TRUE, ...) {
# which positions need multiple columns
onecolpos <- c("topright", "topleft", "bottomright", "bottomleft", "right", "left")
multcolpos <- c("top", "bottom")
if (!(position %in% c(onecolpos, multcolpos))) {
stop("Wrong values for 'legend.position' argument!")
}
# compute number of columns
ncol <- if (position %in% onecolpos) 1 else length(legend)
# calculate inset values depending on a ration between width and height of a plot
lim <- par("plt")
dx <- lim[2] - lim[1]
dy <- lim[4] - lim[3]
inset <- c(0.02, 0.02 * (dx / dy))
# show legend
legend(position, legend, col = col, pt.bg = pt.bg, pch = pch, lty = lty, pt.cex = cex, lwd = lwd,
cex = 0.85, plot = plot, inset = inset, bg = "white", box.lwd = 0.75, box.col = "gray",
ncol = ncol, ...)
}
#' Prepare data for mdaplotg
#'
#' @param data
#' datasets (in form of list, matrix or data frame)
#' @param type
#' vector with type for dataset
#' @param groupby
#' factor or data frame with factors - used to split data matrix into groups
#'
#' @return
#' list of datasets
#'
#' The method should prepare data as a list of datasets (matrices or data frames). One list
#' element will be used to create one plot series.
#'
#' If `data` is matrix or data frame and not `groupby` parameter is provided, then every row
#' will be taken as separate set. This option is available only for line or bar plots.
#'
mdaplotg.prepareData <- function(data, type, groupby) {
# if already a list - remove NULL elements and return
if (is.list(data) && !is.data.frame(data)) return(data[!sapply(data, is.null)])
if (is.null(groupby)) {
# take every row of matrix or data frame as separate group
if (!all(type %in% c("h", "l", "b"))) {
stop("Group plot with matrix or data frame can be made only for types 'h', 'l' and 'b'.")
}
# add fake row names
if (is.null(rownames(data))) {
rownames(data) <- seq_len(nrow(data))
}
# split data into a list of subsets for each group
data_list <- list()
for (i in seq_len(nrow(data))) {
data_list[[rownames(data)[i]]] <- mda.subset(data, subset = i)
}
# redefine the data with list
return(data_list)
}
# if groupby is provided - use it to split rows into groups
## check that groupby is a factor or data frame with factor columns
if (!is.data.frame(groupby)) groupby <- as.data.frame(groupby)
if (!all(unlist(lapply(groupby, is.factor)))) {
stop("Parameter 'groupby' should be a factor or data frame with several factors.")
}
attrs <- mda.getattr(data)
data <- as.data.frame(data)
# in this case if labels = indices generate labels for each case
data$row.ind <- seq_len(nrow(data))
data_list <- split(data, groupby)
for (i in seq_len(length(data_list))) {
row.ind <- data_list[[i]]$row.ind
data_list[[i]] <- subset(data_list[[i]], select = -row.ind)
data_list[[i]] <- mda.setattr(data_list[[i]], attrs)
attr(data_list[[i]], "exclrows") <- which(row.ind %in% attrs$exclrows)
attr(data_list[[i]], "labels") <- row.ind
}
return(data_list)
}
#' Check mdaplotg parameters and replicate them if necessary
#'
#' @param param
#' A parameter to check
#' @param name
#' name of the parameter (needed for error message)
#' @param is.type
#' function to use for checking parameter type
#' @param ngroups
#' number of groups (plot series)
#'
mdaplotg.processParam <- function(param, name, is.type, ngroups) {
param <- if (length(param) == 1) rep(param, ngroups) else param
if (!all(is.type(param))) {
stop(paste0('Parameter "', name, '" mush be numeric!'))
}
if (length(param) != ngroups)
stop(paste0('Parameter "', name, '" should be specified for each group or one for all!'))
return(param)
}
#' Create and return vector with legend values
#'
#' @param ps
#' list with plot series
#' @param data.names
#' names of the data sets
#' @param legend
#' legend values provided by user
#'
#' @return
#' vector of text values for the legend
#'
mdaplotg.getLegend <- function(ps, data.names, legend = NULL) {
# if legend is not provided - get legend items from data names or plotseries names
if (is.null(legend)) {
legend <- if (is.null(data.names)) unlist(lapply(ps, function(x) x$data_attrs$name))
else data.names
}
if (is.null(legend)) {
stop("Can not find values for the legend items.")
}
if (length(legend) != length(ps)) {
stop("Number of values for 'legend' is not the same as number of plot series.")
}
return(legend)
}
#' Compute x-axis limits for mdaplotg
#'
#' @param ps
#' list with plotseries
#' @param xlim
#' limits provided by user
#' @param show.excluded
#' logical, will excluded values also be shown
#' @param show.legend
#' will legend be shown on the plot
#' @param show.labels
#' will labels be shown on the plot
#' @param legend.position
#' position of legend on the plot (if shown)
#' @param bwd
#' size of bar for bar plot
#'
#' @return
#' vector with two values
#'
mdaplotg.getXLim <- function(ps, xlim, show.excluded, show.legend, show.labels,
legend.position, bwd = NULL) {
# if user provided xlim values - use them
if (!is.null(xlim)) {
return(xlim)
}
# function which returns xlim values for given plotseries
f <- function(p) {
return(
mdaplot.getXAxisLim(p, xlim = NULL, show.labels = show.labels,
show.excluded = show.excluded, bwd = bwd)
)
}
# compute limits for all plot series and combine into matrix
xlim <- matrix(unlist(lapply(ps, f)), ncol = 2, byrow = TRUE)
# get the smallest of min and larges of max
xlim <- c(min(xlim[, 1]), max(xlim[, 2]))
# add extra margins if legend must be shown
if (show.legend) {
# calculate margins: (10% of current limits)
margin <- c(
(regexpr("left", legend.position) > 0) * -0.1,
(regexpr("right", legend.position) > 0) * 0.1
)
xlim <- xlim + diff(xlim) * margin
}
return(xlim)
}
#' Compute y-axis limits for mdaplotg
#'
#' @param ps
#' list with plotseries
#' @param ylim
#' limits provided by user
#' @param show.excluded
#' logical, will excluded values also be shown
#' @param show.legend
#' will legend be shown on the plot
#' @param legend.position
#' position of legend on the plot (if shown)
#' @param show.labels
#' logical, will data ponit labels also be shown
#'
#' @return
#' vector with two values
#'
mdaplotg.getYLim <- function(ps, ylim, show.excluded, show.legend, legend.position, show.labels) {
# if user provided ylim values - use them
if (!is.null(ylim)) {
return(ylim)
}
# function which returns ylim values for given plotseries
f <- function(p) {
return(
mdaplot.getYAxisLim(p, ylim = NULL, show.excluded = show.excluded,
show.labels = show.labels)
)
}
# compute limits for all plot series and combine into matrix
ylim <- matrix(unlist(lapply(ps, f)), ncol = 2, byrow = TRUE)
# get the smallest of min and larges of max
ylim <- c(min(ylim[, 1]), max(ylim[, 2]))
# add extra margins if legend must be shown
if (show.legend) {
# calculate margins: dx and dy
margin <- c(
(regexpr("bottom", legend.position) > 0) * -0.1,
(regexpr("top", legend.position) > 0) * 0.1
)
ylim <- ylim + diff(ylim) * margin
}
return(ylim)
}
#' Plotting function for several plot series
#'
#' @description
#' \code{mdaplotg} is used to make different kinds of plots or their combination for several sets
#' of objects.
#'
#' @param data
#' a matrix, data frame or a list with data values (see details below).
#' @param type
#' type of the plot ('p', 'l', 'b', 'h', 'e').
#' @param pch
#' a character for markers (same as \code{plot} parameter).
#' @param lty
#' the line type (same as \code{plot} parameter).
#' @param lwd
#' the line width (thickness) (same as \code{plot} parameter).
#' @param cex
#' the cex factor for the markers (same as \code{plot} parameter).
#' @param bwd
#' a width of a bar as a percent of a maximum space available for each bar.
#' @param legend
#' a vector with legend elements (if NULL, no legend will be shown).
#' @param xlab
#' a title for the x axis (same as \code{plot} parameter).
#' @param ylab
#' a title for the y axis (same as \code{plot} parameter).
#' @param main
#' an overall title for the plot (same as \code{plot} parameter).
#' @param labels
#' what to use as labels ('names' - row names, 'indices' - row indices, 'values' - values).
#' @param ylim
#' limits for the y axis (if NULL, will be calculated automatically).
#' @param xlim
#' limits for the x axis (if NULL, will be calculated automatically).
#' @param col
#' colors for the plot series
#' @param colmap
#' a colormap to generate colors if \code{col} is not provided
#' @param legend.position
#' position of the legend ('topleft', 'topright', 'top', 'bottomleft', 'bottomright', 'bottom').
#' @param show.legend
#' logical, show or not legend for the data objects.
#' @param show.labels
#' logical, show or not labels for the data objects.
#' @param show.lines
#' vector with two coordinates (x, y) to show horizontal and vertical line cross the point.
#' @param show.grid
#' logical, show or not a grid for the plot.
#' @param grid.lwd
#' line thinckness (width) for the grid
#' @param grid.col
#' line color for the grid
#' @param xticks
#' tick values for x axis.
#' @param xticklabels
#' labels for x ticks.
#' @param yticks
#' tick values for y axis.
#' @param yticklabels
#' labels for y ticks.
#' @param xlas
#' orientation of xticklabels
#' @param ylas
#' orientation of yticklabels
#' @param lab.col
#' color for data point labels.
#' @param lab.cex
#' size for data point labels.
#' @param show.excluded
#' logical, show or hide rows marked as excluded (attribute `exclrows`)
#' @param groupby
#' one or several factors used to create groups of data matrix rows (works if data is a matrix)
#' @param opacity
#' opacity for plot colors (value between 0 and 1)
#' @param ...
#' other plotting arguments.
#'
#' @details
#' The \code{mdaplotg} function is used to make a plot with several sets of objects. Simply
#' speaking, use it when you need a plot with legend. For example to show line plot with spectra
#' from calibration and test set, scatter plot with height and weight values for women and men, and
#' so on.
#'
#' Most of the parameters are similar to \code{\link{mdaplot}}, the difference is described below.
#'
#' The data should be organized as a list, every item is a matrix (or data frame) with data for one
#' set of objects. Alternatively you can provide data as a matrix and use parameter
#' \code{groupby} to create the groups. See tutorial for more details.
#'
#' There is no color grouping option, because color is used to separate the sets. Marker symbol,
#' line style and type, etc. can be defined as a single value (one for all sets) and as a vector
#' with one value for each set.
#'
#' @author
#' Sergey Kucheryavskiy (svkucheryavski@@gmail.com)
#'
#' @export
mdaplotg <- function(
data, groupby = NULL, type = "p", pch = 16, lty = 1, lwd = 1, cex = 1,
col = NULL, bwd = 0.8, legend = NULL, xlab = NULL, ylab = NULL, main = NULL, labels = NULL,
ylim = NULL, xlim = NULL, colmap = "default", legend.position = "topright",
show.legend = TRUE, show.labels = FALSE, show.lines = FALSE, show.grid = TRUE, grid.lwd = 0.5,
grid.col = "lightgray", xticks = NULL, xticklabels = NULL, yticks = NULL, yticklabels = NULL,
show.excluded = FALSE, lab.col = "darkgray", lab.cex = 0.65, xlas = 1,
ylas = 1, opacity = 1, ...) {
# split data into groups
name <- attr(data, "name", exact = TRUE)
data <- mdaplotg.prepareData(data, type, groupby)
ngroups <- length(data)
# check if plot.new() should be called first
if (dev.cur() == 1) plot.new()
type <- mdaplotg.processParam(type, "type", is.character, ngroups)
pch <- mdaplotg.processParam(pch, "pch", is.numeric, ngroups)
lty <- mdaplotg.processParam(lty, "lty", is.numeric, ngroups)
lwd <- mdaplotg.processParam(lwd, "lwd", is.numeric, ngroups)
cex <- mdaplotg.processParam(cex, "cex", is.numeric, ngroups)
opacity <- mdaplotg.processParam(opacity, "opacity", is.numeric, ngroups)
lab.col <- mdaplotg.processParam(lab.col, "lab.col", mdaplot.areColors, ngroups)
# check and define colors if necessary
if (is.null(col)) col <- mdaplot.getColors(ngroups = ngroups, colmap = colmap)
col <- mdaplotg.processParam(col, "col", mdaplot.areColors, ngroups)
# get plot data for each group
ps <- vector("list", ngroups)
for (i in seq_len(ngroups)) {
ps[[i]] <- plotseries(data[[i]], type = type[i], col = col[i], opacity = opacity[i],
labels = labels)
}
# get axis limits
ylim <- mdaplotg.getYLim(ps, ylim, show.excluded, show.legend, legend.position, show.labels)
xlim <- mdaplotg.getXLim(ps, xlim, show.excluded, show.legend, show.labels, legend.position, bwd)
# check and prepare xticklabels
xticklabels <- mdaplot.getXTickLabels(xticklabels, xticks, NULL)
xticks <- mdaplot.getXTicks(xticks, xlim = xlim)
# check and prepare yticklabels
yticklabels <- mdaplot.getYTickLabels(yticklabels, yticks, NULL)
yticks <- mdaplot.getYTicks(yticks, ylim = ylim)
# define main title if not provided (either as "name" or as "name" attr of first dataset)
main <- if (is.null(main)) name else main
main <- if (is.null(main)) ps[[1]]$name else main
# define labels for axes
xlab <- if (is.null(xlab)) attr(ps[[1]]$x_values, "name", exact = TRUE) else xlab
ylab <- if (is.null(ylab)) attr(ps[[1]]$y_values, "name", exact = TRUE) else ylab
# make an empty plot with proper limits and axis labels
mdaplot.plotAxes(xticklabels = xticklabels, yticklabels = yticklabels, xticks = xticks,
yticks = yticks, xlim = xlim, ylim = ylim, main = main, xlab = xlab, ylab = ylab,
xlas = xlas, ylas = ylas, show.grid = show.grid,
grid.lwd = grid.lwd, grid.col = grid.col
)
# show lines if needed
if (is.numeric(show.lines) && length(show.lines) == 2) {
mdaplot.showLines(show.lines)
}
# count how many plots are bar plots
nbarplots <- sum(type == "h")
# make a plot for each group
for (i in seq_len(ngroups)) {
# decide if x values should be forced as group index
force.x.values <- if (type[i] == "h") c(i, nbarplots) else NA
# if error bars are shown and i > 1 do not show labels
show.labels <- if (i > 1 && type[i] == "e") FALSE else show.labels
# use mdaplot with show.axes = FALSE to create the plot
mdaplot(ps = ps[[i]], type = type[i], col = col[i], pch = pch[i], lty = lty[i],
lwd = lwd[i], cex = cex[i], force.x.values = force.x.values, bwd = bwd,
show.grid = FALSE, show.labels = show.labels, opacity = opacity[i],
lab.col = lab.col[i], lab.cex = lab.cex, show.axes = FALSE,
show.excluded = show.excluded, ...
)
}
# show legend if required
if (show.legend == TRUE) {
legend <- mdaplotg.getLegend(ps, names(data), legend)
if (length(legend) != ngroups) {
stop("Number of values for 'legend' is not the same as number of plot series.")
}
lty[type == "p" | type == "h"] <- 0
pch[type == "l"] <- NA_integer_
pch[type == "h"] <- 15
mdaplotg.showLegend(
legend, col = col, pch = pch, lty = lty, lwd = lwd, cex = 0.85,
position = legend.position
)
}
return(invisible(ps))
}
|
f6e5fba617b94ffbb9e9f9f2f36b1ae718d8827b | a660e9053827afa29f27e2bbd3c324dc6d9cfc9d | /SD1/deseq_T1.R | 1cd238402f83e8f570744ae53d20a060633b0741 | [] | no_license | sagw/R-scripts | f755edd159f5c990044f0a6c6ed5546f3b32626a | 8f3db4c8ebe59259bb733ec042a257f49c61073f | refs/heads/master | 2021-01-18T23:20:49.793457 | 2016-03-21T13:50:37 | 2016-03-21T13:50:37 | 29,606,098 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,473 | r | deseq_T1.R | rm(list=ls())
require(vegan)
require(DESeq2)
setwd("~/Documents/SD1/SD1_analyses/")
c=read.csv("input_files//SD1_OTU_table_97.csv", h=T, row.names=1)
#information about samples
s=read.csv("input_files//SD1_map.csv", h=T, as.is=T)
b=t(c6)
d=cbind.data.frame(s,b)
#check order of samples
check<-cbind(row.names(d), d[1:1])
t=read.csv("input_files//rep_set_tax_assignments.csv", header=T)
# Remove first column
d=d[2:(NCOL(d))]
#filter out chloroplast DNA
dtax<-merge(t, c6, by.x="OTUID", by.y="row.names", all.y=T)
dtax1<-dtax[- grep("Chloroplast", dtax$Tax),]
row.names(dtax1)<-dtax1$OTUID
c<-dtax1[5:NCOL(dtax1)]
write.csv(c, file="dtax_test.csv")
b=t(c)
d=cbind.data.frame(s,b)
timetwo<-subset(d, Timepoint=="two")
d<-timetwo
c<-d[10:NCOL(d)]
rs = rowSums (c)
use = (rs > 1)
c = c [ use, ]
c<-t(c)
s<-d[0:9]
dds <-DESeqDataSetFromMatrix(countData = c, colData=s,
design= ~ Site*Dose_disease_state*Dose.Site)
colData(dds)$Dose_disease_state <- factor(colData(dds)$Dose_disease_state, levels=c("Healthy","Diseased"))
colData(dds)$Site <- factor(colData(dds)$Site, levels=c("CK4","CK14"))
colData(dds)$Dose.Site <- factor(colData(dds)$Dose.Site, levels=c("CK4","CK14"))
dds<-estimateSizeFactors(dds)
rs = rowSums (counts(dds))
use = (rs > 1)
dds = dds [ use, ]
dds<-DESeq(dds)
dds<-estimateDispersions(dds)
plotDispEsts(dds)
dds <- nbinomWaldTest(dds, maxit=100)
resultsNames(dds)
plotMA(dds, ylim=c(-10,10), "Dose_disease_stateDiseased.Dose.SiteCK14", pvalCutoff=0.05)
plotMA(dds, ylim=c(-10,10), "Dose_disease_state_Diseased_vs_Healthy", pvalCutoff=0.05)
comm.normalized=(counts(dds, normalized=TRUE))
res<-results(dds, name="Dose_disease_state_Diseased_vs_Healthy")
ressig <- subset(res, padj<0.05)
ressig<-as.data.frame(ressig)
ressigtax<-merge(ressig, t, by.x="row.names", by.y="OTUID", all.x=T)
write.csv(as.data.frame(ressigtax), file="Output_files/T10_Dose_disease_state.csv")
ressigotu<-merge(ressigtax, comm.normalized, by.x="Row.names", by.y="row.names")
write.csv(as.data.frame(ressigotu), file="Output_files/T10_Dose_disease_state_otutable.csv")
resdosesite<-results(dds, name="Dose_disease_stateDiseased.Dose.SiteCK14")
resdosesitesig <- subset(resdosesite, padj<0.05)
resdosesitesig<-as.data.frame(resdosesitesig)
resdosesitetaxsig<-merge(resdosesitesig, t, by.x="row.names", by.y="OTUID", all.x=T)
all<-merge(resdosesitesig, ressig, by="row.names")
alltax<-merge(all, t, by.x="Row.names", by.y="OTUID") |
0791f1fa1f8b208355135d4c5223243c0b06d080 | b9856976c90b24d4e6266b5e54af7751eba926c0 | /January_tessellated_menagerie/rstudioconf_logo.R | 99bb29ded34287f15d59d98cb88bf524c85e7573 | [] | no_license | batpigandme/aRt | 8033326cfa68fab8a4c07d3f5e329a55405465a2 | 2bd6e3183159fe646cd3673402fcb1a7816d1656 | refs/heads/master | 2020-04-20T15:35:58.645494 | 2019-02-02T16:36:00 | 2019-02-02T16:36:00 | 168,935,371 | 1 | 0 | null | 2019-02-03T10:53:16 | 2019-02-03T10:53:16 | null | UTF-8 | R | false | false | 11,768 | r | rstudioconf_logo.R | library(tidyverse)
library(gganimate)
library(colourlovers)
library(scales)
##sorry, this code is a dumpster fire and I don't have time to clean it up
##DM me on twitter @W_R_Chase if you need to know something
armadillo <- readRDS("rds_files/armadillo_points.rds")
longhorn <- readRDS("rds_files/longhorn_points.rds")
rstudio <- readRDS("rds_files/rstudio_points.rds")
austin <- readRDS("rds_files/austin_points.rds")
longhorn_pal <- c("#BC4E29", "#7F563C", "#B3805A", "#F6F7BD", "#B4B676", "#88BB9D", "#A29C7B",
"#BD5832", "#A2B888", "#ECBB4B", "#E4AD2D", "#81563C", "#AA906D", "#D4B04C",
"#B4502C", "#95AB8E", "#76563F", "#925437", "#9D5333", "#BFB469", "#B86F48",
"#AA512F", "#B47C56", "#B54F2C", "#8B5539", "#B74F2B", "#965436")
austin_pal <- c("#8fb9ff", "#204374", "#B22234", "#b21616", "#7C0F0F", "#1833B5", "#011f4b", "#03396c", "#0000ff", "#0e68ce", "#0c457d", "#13066d", "#060a47", "#05acc8")
longhorn$hex <- sample(longhorn_pal, nrow(longhorn), replace = TRUE)
austin2 <- austin %>% filter(hex == "#FEFEFE")
austin2$hex <- sample(austin_pal, nrow(austin2), replace = TRUE)
whites <- c("#FEFEFE", "#FFFFFF", "#FDFDFD", "#FCFCFC", "#F5F5F5", "#EBEBEB",
"#F4F4F4", "#FAFAFA", "#F7F7F7", "#F9F9F9", "#F8F8F8", "#F6F6F6",
"#FBFBFB", "#F1F1F1", "#E8E8E8", "#E9E9E9", "#F3F3F3", "#EAEAEA",
"#E6E6E6", "#EDEDED", "#F2F2F2", "#DADADA", "#E5E5E5", "#F0F0F0",
"#EFEFEF", "#E7E7E7", "#E4E4E4", "#ECECEC")
austin2 <- austin %>% filter(!(hex %in% whites))
austin2$hex <- sample(austin_pal, nrow(austin2), replace = TRUE)
armadillo2 <- armadillo %>% filter(!(hex %in% whites))
rstudio$hex <- "#005b96"
rstudio$y <- -rstudio$y
ggplot(rstudio, aes(x=x,y=y,group=id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() +
theme_void()
ggplot(austin2, aes(x=x,y=y,group=id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() + ## you need to reverse y-axis
theme_void()
ggplot(longhorn, aes(x = x, y = y)) +
geom_polygon(aes(group = id, fill = hex),
show.legend = FALSE, size=0)+
scale_fill_identity() +
coord_equal() +
theme_void() +
scale_y_reverse()
ggplot(armadillo2, aes(x=x,y=y,group=id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() + ## you need to reverse y-axis
theme_void()
maxes_x <- c(max(rstudio$x), max(armadillo$x), max(austin$x), max(longhorn$x))
maxes_y <- c(max(rstudio$y), max(armadillo$y), max(austin$y), max(longhorn$y))
mins_x <- c(min(rstudio$x), min(armadillo$x), min(austin$x), min(longhorn$x))
mins_y <- c(min(rstudio$y), min(armadillo$y), min(austin$y), min(longhorn$y))
names <- c("rstudio", "armadillo", "austin", "longhorn")
dims <- data.frame(names, maxes_x, maxes_y, mins_x, mins_y)
#try rescaling a couple
rstudio$x_new <- rescale(rstudio$x, c(0,1.5))
rstudio$y_new <- rescale(rstudio$y, c(0,1))
ggplot(rstudio, aes(x=x_new,y=y_new,group=id)) +
geom_polygon(aes(fill=hex, size = size), color = "black") +
scale_fill_identity() +
scale_size_identity() +
coord_equal() +
scale_y_reverse() +
theme_void()
armadillo2$x_new <- rescale(armadillo2$x, c(0,1.2))
armadillo2$y_new <- rescale(armadillo2$y, c(0,1))
ggplot(armadillo2, aes(x=x_new,y=y_new,group=id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() +
theme_void()
austin2$x_new <- rescale(austin2$x, c(0,1.5))
austin2$y_new <- rescale(austin2$y, c(0,1))
ggplot(austin2, aes(x=x_new,y=y_new,group=id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() +
theme_void()
longhorn$x_new <- rescale(longhorn$x, c(0,1.5))
longhorn$y_new <- rescale(longhorn$y, c(0,1))
ggplot(longhorn, aes(x=x_new,y=y_new,group=id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() +
theme_void()
# #adding border
# rstudio$size <- 0.5
# austin2$size <- 0
# armadillo2$size <- 0
# longhorn$size <- 0
#add states
rstudio$state <- 1
armadillo2$state <- 1
austin2$state <- 2
longhorn$state <- 2
#add ids
rstudio$pic <- "rstudio"
armadillo2$pic <- "armadillo"
austin2$pic <- "austin"
longhorn$pic <- "longhorn"
# #adding color
# rstudio$col <- "#666666"
# austin2$col <- austin2$hex
# armadillo2$col <- armadillo2$hex
# longhorn$col <- longhorn$hex
ggplot(longhorn, aes(x=x_new,y=y_new,group=id)) +
geom_polygon(aes(fill=hex), color = "black") +
scale_fill_identity() +
scale_size_identity() +
coord_equal() +
scale_y_reverse() +
theme_void()
#arrange one below the other
armadillo2$y_new <- armadillo2$y_new + max(rstudio$y_new) + 0.2
armadillo2$x_new <- armadillo2$x_new + 0.2
armadillo3 <- armadillo2 %>% select(x_new, y_new, id, hex, state, pic)
rstudio2 <- rstudio %>% select(x_new, y_new, id, hex, state, pic)
armadillo_split <- split(armadillo3, armadillo3$id)
newid1 <- 1:length(armadillo_split)
y1 <- 1:length(armadillo_split)
armadillo4 <- map2_dfr(armadillo_split, y1, ~mutate(.x, new_id = newid1[.y]))
rstudio_split <- split(rstudio2, rstudio2$id)
newid2 <- 1:length(rstudio_split)
y2 <- 1:length(rstudio_split)
rstudio3 <- map2_dfr(rstudio_split, y2, ~mutate(.x, new_id = newid2[.y]))
rstudio3$id <- rstudio3$new_id
rstudio3 <- rstudio3 %>% select(-c("new_id"))
armadillo4$id <- armadillo4$new_id + max(rstudio3$id)
armadillo4 <- armadillo4 %>% select(-c("new_id"))
state1 <- rbind(rstudio3, armadillo4)
ggplot(state1, aes(x=x_new,y=y_new,group=id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() +
theme_void()
#arrange one below the other
longhorn$y_new <- longhorn$y_new + max(austin2$y_new) + 0.3
longhorn$x_new <- longhorn$x_new + 0.1
longhorn2 <- longhorn %>% select(x_new, y_new, id, hex, state, pic)
longhorn2$id <- as.numeric(longhorn2$id)
austin3 <- austin2 %>% select(x_new, y_new, id, hex, state, pic)
austin_split <- split(austin3, austin3$id)
newid3 <- 1:length(austin_split)
y3 <- 1:length(austin_split)
austin4 <- map2_dfr(austin_split, y3, ~mutate(.x, new_id = newid3[.y]))
austin4$id <- austin4$new_id
austin4 <- austin4 %>% select(-c("new_id"))
longhorn_split <- split(longhorn2, longhorn2$id)
newid4 <- 1:length(longhorn_split)
y4 <- 1:length(longhorn_split)
longhorn3 <- map2_dfr(longhorn_split, y4, ~mutate(.x, new_id = newid4[.y]))
longhorn3$id <- longhorn3$new_id
longhorn3 <- longhorn3 %>% select(-c("new_id"))
longhorn3$id <- longhorn3$id + max(austin4$id)
state2 <- rbind(austin4, longhorn3)
ggplot(state2, aes(x=x_new,y=y_new,group=id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() +
theme_void()
state2$x_new <- state2$x_new + max(state1$x_new)
all_states <- rbind(state1, state2)
state1_ids <- state1 %>% distinct(id) %>% pull(id)
state1_ids <- as.integer(state1_ids)
sanity <- 1:1642
identical(state1_ids, sanity)
state2_ids <- state2 %>% distinct(id) %>% pull(id)
state2_ids <- as.integer(state2_ids)
sanity <- 1:1741
identical(state2_ids, sanity)
state1_in_state2 <- state1_ids[state1_ids %in% state2_ids]
state2_not_state1 <- state2_ids[!(state2_ids %in% state1_ids)]
dummys <- data.frame(x_new = rep(c(0.5, 0.5000002, 0.5000001)), y_new = rep(c(1, 1, 1.000002)), id = rep(state2_not_state1, each = 3), hex = "#FFFFFF", state = 1, pic = "rstudio")
ggplot(dummys, aes(x = x_new, y = y_new, group = id)) +geom_polygon()
all_plus_dummys <- rbind(all_states, dummys)
anim <-
ggplot(all_plus_dummys, aes(x=x_new,y=y_new,group=id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() +
theme_void() +
transition_states(state, transition_length = 2, state_length = 2) +
ease_aes('cubic-in-out')
animate(anim, nframes = 100, fps = 10, detail = 2, type = "cairo", height = 1600, width = 1900)
anim_save("logo_test.gif")
#testing
#trying
state1_split <- split(state1, state1$id)
newid5 <- sample(1:length(state1_split), length(state1_split))
y5 <- 1:length(state1_split)
state1_randomized <- map2_dfr(state1_split, y5, ~mutate(.x, new_id = newid5[.y]))
state2_split <- split(state2, state2$id)
newid6 <- sample(1:length(state2_split), length(state2_split))
y6 <- 1:length(state2_split)
state2_randomized <- map2_dfr(state2_split, y6, ~mutate(.x, new_id = newid6[.y]))
all_states_randomized <- rbind(state1_randomized, state2_randomized)
ggplot(all_states_randomized, aes(x=x_new,y=y_new,group=new_id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() +
theme_void()
state1_split <- split(state1, state1$id)
newid1 <- 1:1642
y1 <- 1:length(state1_split)
state1_randomized <- map2_dfr(state1_split, y1, ~mutate(.x, new_id = newid1[.y]))
state2_split <- split(state2, state2$id)
newid2 <- 1:1741
y2 <- 1:length(state2_split)
state2_randomized <- map2_dfr(state2_split, y2, ~mutate(.x, new_id = newid2[.y]))
all_states_randomized <- rbind(state1_randomized, state2_randomized)
ggplot(all_states, aes(x=x_new,y=y_new,group=id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() +
theme_void()
#does this work? sort of, but doesn't look great
anim_random <-
ggplot(all_states_randomized, aes(x=x_new,y=y_new,group=new_id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() +
theme_void() +
transition_states(state, transition_length = 2, state_length = 2) +
ease_aes('cubic-in-out') +
enter_grow() +
enter_fade() +
exit_shrink() +
exit_fade()
animate(anim_random, nframes = 100, fps = 10, type = "cairo")
#checking stuff
rstudio_ids <- all_states %>% filter(pic == "rstudio") %>% distinct(id) %>% pull(id)
rstudio_ids <- as.integer(rstudio_ids)
sanity <- 1:338
identical(rstudio_ids, sanity)
state1_ids <- state1 %>% distinct(id) %>% pull(id)
state1_ids <- as.integer(state1_ids)
sanity <- 1:1642
identical(state1_ids, sanity)
state2_ids <- state2 %>% distinct(id) %>% pull(id)
state2_ids <- as.integer(state2_ids)
sanity <- 1:1741
identical(state2_ids, sanity)
state2_ids[!(state2_ids%in%sanity)]
state2_ids
austin_ids <- austin4 %>% distinct(id) %>% pull(id)
austin_ids <- as.integer(austin_ids)
sanity <- 1:1567
identical(austin_ids, sanity)
max(austin_ids)
length(austin_ids)
longhorn_ids <- longhorn3 %>% distinct(id) %>% pull(id)
longhorn_ids <- as.integer(longhorn_ids)
sanity <- 1:174
identical(longhorn_ids, sanity)
max(longhorn_ids)
length(longhorn_ids)
state1_in_state2 <- state1_ids[state1_ids %in% state2_ids]
state2_not_state1 <- state2_ids[!(state2_ids %in% state1_ids)]
#figured it out, some not in state 1 that are in state 2
all_states <- rbind(state1, state2) %>% filter(!(id %in% state2_not_state1))
#try adding dummy points to make equal # of polys in both states
dummys <- data.frame(x_new = rep(c(0.5, 0.5000002, 0.5000001)), y_new = rep(c(1, 1, 1.000002)), id = rep(state2_not_state1, each = 3), hex = "#FFFFFF", state = 1, pic = "rstudio")
ggplot(dummys, aes(x = x_new, y = y_new, group = id)) +geom_polygon()
all_plus_dummys <- rbind(all_states, dummys)
anim <-
ggplot(all_plus_dummys, aes(x=x_new,y=y_new,group=id)) +
geom_polygon(aes(fill=hex)) +
scale_fill_identity() +
coord_equal() +
scale_y_reverse() +
theme_void() +
transition_states(state, transition_length = 2, state_length = 2) +
ease_aes('cubic-in-out') +
enter_grow() +
enter_fade() +
exit_shrink() +
exit_fade()
animate(anim, nframes = 100, fps = 10, detail = 2, type = "cairo", height = 10, width = 12, units = "in")
anim_save("logo_test.gif")
|
656dff2e876da896a44bf7578dbf35ecea61ca57 | 175eb946c83222de59c138bc3352ce41f1b13fd3 | /Created_code/RawZT8_ZT12_combine.R | 7c92177a03243db4cf4d740d189f668adbb3f66a | [] | no_license | Kfalash/CVP-Testing | bc12e3469f5a680102aca9105d0cf0dcd8ca6e20 | 9412f69c22ddb839d7c7501c628a20aba3b2877e | refs/heads/main | 2023-04-22T04:38:50.216992 | 2021-04-24T15:31:21 | 2021-04-24T15:31:21 | 301,788,908 | 0 | 0 | null | 2020-12-04T17:19:09 | 2020-10-06T16:29:00 | R | UTF-8 | R | false | false | 12,063 | r | RawZT8_ZT12_combine.R | #Preparing data to combine
#Take only raw counts from the data
#---------------------------ZT8 seedings----------------------------------------
#ZT8 1
ZT8_1 <- read.table("lab/RawData/GSM3184429_WT-ZT8-seedling-1_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_1count <- ZT8_1[,ncol(ZT8_1)]
#ZT8 2
ZT8_2 <- read.table("lab/RawData/GSM3184436_WT-ZT8-seedling-2_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_2count <- ZT8_2[,ncol(ZT8_2)]
#ZT8 3
ZT8_3 <- read.table("lab/RawData/GSM3184437_WT-ZT8-seedling-3_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_3count <- ZT8_3[,ncol(ZT8_3)]
#ZT8 4
ZT8_4 <- read.table("lab/RawData/GSM3184438_WT-ZT8-seedling-4-bis_S14_L001_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_4count <- ZT8_4[,ncol(ZT8_4)]
#ZT8 5
ZT8_5 <- read.table("lab/RawData/GSM3184439_WT-ZT8-seedling-5_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_5count <- ZT8_5[,ncol(ZT8_5)]
#ZT8 6
ZT8_6 <- read.table("lab/RawData/GSM3184440_WT-ZT8-seedling-6_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_6count <- ZT8_6[,ncol(ZT8_6)]
#ZT8 7
ZT8_7 <- read.table("lab/RawData/GSM3184441_WT-ZT8-seedling-7_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_7count <- ZT8_7[,ncol(ZT8_7)]
#ZT8 9
ZT8_9 <- read.table("lab/RawData/GSM3184442_WT-ZT8-seedling-9_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_9count <- ZT8_9[,ncol(ZT8_9)]
#ZT8 10
ZT8_10 <- read.table("lab/RawData/GSM3184430_WT-ZT8-seedling-10_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_10count <- ZT8_10[,ncol(ZT8_10)]
#ZT8 11
ZT8_11 <- read.table("lab/RawData/GSM3184431_WT-ZT8-seedling-11_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_11count <- ZT8_11[,ncol(ZT8_11)]
#ZT8 12
ZT8_12 <- read.table("lab/RawData/GSM3184432_WT-ZT8-seedling-12_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_12count <- ZT8_12[,ncol(ZT8_12)]
#ZT8 13
ZT8_13 <- read.table("lab/RawData/GSM3184433_WT-ZT8-seedling-13_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_13count <- ZT8_13[,ncol(ZT8_13)]
#ZT8 14
ZT8_14 <- read.table("lab/RawData/GSM3184434_WT-ZT8-seedling-14_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_14count <- ZT8_14[,ncol(ZT8_14)]
#ZT8 16
ZT8_16 <- read.table("lab/RawData/GSM3184435_WT-ZT8-seedling-16_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT8_16count <- ZT8_16[,ncol(ZT8_16)]
#--------------------------ZT10 seedings----------------------------------------
#ZT10 1
ZT10_1 <- read.table("lab/RawData/GSM3184443_WT-ZT10-seedling-1_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_1count <- ZT10_1[,ncol(ZT10_1)]
#ZT10 2
ZT10_2 <- read.table("lab/RawData/GSM3184449_WT-ZT10-seedling-2_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_2count <- ZT10_2[,ncol(ZT10_2)]
#ZT10 3
ZT10_3 <- read.table("lab/RawData/GSM3184450_WT-ZT10-seedling-3_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_3count <- ZT10_3[,ncol(ZT10_3)]
#ZT10 4
ZT10_4 <- read.table("lab/RawData/GSM3184451_WT-ZT10-seedling-4_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_4count <- ZT10_4[,ncol(ZT10_4)]
#ZT10 5
ZT10_5 <- read.table("lab/RawData/GSM3184452_WT-ZT10-seedling-5_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_5count <- ZT10_5[,ncol(ZT10_5)]
#ZT10 6
ZT10_6 <- read.table("lab/RawData/GSM3184453_WT-ZT10-seedling-6_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_6count <- ZT10_6[,ncol(ZT10_6)]
#ZT10 7
ZT10_7 <- read.table("lab/RawData/GSM3184454_WT-ZT10-seedling-7_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_7count <- ZT10_7[,ncol(ZT10_7)]
#ZT10 8
ZT10_8 <- read.table("lab/RawData/GSM3184455_WT-ZT10-seedling-8_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_8count <- ZT10_8[,ncol(ZT10_8)]
#ZT10 9
ZT10_9 <- read.table("lab/RawData/GSM3184456_WT-ZT10-seedling-9_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_9count <- ZT10_9[,ncol(ZT10_9)]
#ZT10 10
ZT10_10 <- read.table("lab/RawData/GSM3184444_WT-ZT10-seedling-10_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_10count <- ZT10_10[,ncol(ZT10_10)]
#ZT10 11
ZT10_11 <- read.table("lab/RawData/GSM3184445_WT-ZT10-seedling-11_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_11count <- ZT10_11[,ncol(ZT10_11)]
#ZT10 13
ZT10_13 <- read.table("lab/RawData/GSM3184446_WT-ZT10-seedling-13_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_13count <- ZT10_13[,ncol(ZT10_13)]
#ZT10 14
ZT10_14 <- read.table("lab/RawData/GSM3184447_WT-ZT10-seedling-14_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_14count <- ZT10_14[,ncol(ZT10_14)]
#ZT10 16
ZT10_16 <- read.table("lab/RawData/GSM3184448_WT-ZT10-seedling-16_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT10_16count <- ZT10_16[,ncol(ZT10_16)]
#--------------------------ZT12 seedings----------------------------------------
#ZT12 1
ZT12_1 <- read.table("lab/RawData/GSM3184458_WT-ZT12-seedling-10_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_1count <- ZT12_1[,ncol(ZT12_1)]
#ZT12 2
ZT12_2 <- read.table("lab/RawData/GSM3184465_WT-ZT12-seedling-2_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_2count <- ZT12_2[,ncol(ZT12_2)]
#ZT12 5
ZT12_5 <- read.table("lab/RawData/GSM3184466_WT-ZT12-seedling-5_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_5count <- ZT12_5[,ncol(ZT12_5)]
#ZT12 6
ZT12_6 <- read.table("lab/RawData/GSM3184467_WT-ZT12-seedling-6_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_6count <- ZT12_6[,ncol(ZT12_6)]
#ZT12 7
ZT12_7 <- read.table("lab/RawData/GSM3184468_WT-ZT12-seedling-7_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_7count <- ZT12_7[,ncol(ZT12_7)]
#ZT12 8
ZT12_8 <- read.table("lab/RawData/GSM3184469_WT-ZT12-seedling-8_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_8count <- ZT12_8[,ncol(ZT12_8)]
#ZT12 9
ZT12_9 <- read.table("lab/RawData/GSM3184470_WT-ZT12-seedling-9_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_9count <- ZT12_9[,ncol(ZT12_9)]
#ZT12 10
ZT12_10 <- read.table("lab/RawData/GSM3184458_WT-ZT12-seedling-10_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_10count <- ZT12_10[,ncol(ZT12_10)]
#ZT12 11
ZT12_11 <- read.table("lab/RawData/GSM3184459_WT-ZT12-seedling-11_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_11count <- ZT12_11[,ncol(ZT12_11)]
#ZT12 12
ZT12_12 <- read.table("lab/RawData/GSM3184460_WT-ZT12-seedling-12_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_12count <- ZT12_12[,ncol(ZT12_12)]
#ZT12 13
ZT12_13 <- read.table("lab/RawData/GSM3184461_WT-ZT12-seedling-13_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_13count <- ZT12_13[,ncol(ZT12_13)]
#ZT12 14
ZT12_14 <- read.table("lab/RawData/GSM3184462_WT-ZT12-seedling-14_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_14count <- ZT12_14[,ncol(ZT12_14)]
#ZT12 15
ZT12_15 <- read.table("lab/RawData/GSM3184463_WT-ZT12-seedling-15_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_15count <- ZT12_15[,ncol(ZT12_15)]
#ZT12 16
ZT12_16 <- read.table("lab/RawData/GSM3184464_WT-ZT12-seedling-16_trimmo_paired_2_10_5_1_tophat_ensembl_TAIR10_nomixed_unstranded_sorted_rmdup_picard_combined_read.txt",
header=TRUE, sep='\t')
ZT12_16count <- ZT12_16[,ncol(ZT12_16)]
#-----------------------------Combine Count Data--------------------------------
#Have to make files for each pair we want to anaylze.
ZT8 <- cbind(ZT8_1count,ZT8_2count,ZT8_3count,ZT8_4count,ZT8_5count,ZT8_6count,ZT8_7count,ZT8_9count,ZT8_10count,ZT8_11count,ZT8_12count,ZT8_13count,ZT8_14count,ZT8_16count)
ZT10 <- c(ZT10_1count,ZT10_2count,ZT10_3count,ZT10_4count,ZT10_5count,ZT10_6count,ZT10_7count,ZT10_8count,ZT10_9count,ZT10_10count,ZT10_11count,ZT10_13count,ZT10_14count,ZT10_16count)
ZT12 <- c(ZT12_1count,ZT12_2count,ZT12_5count,ZT12_6count,ZT12_7count,ZT12_8count,ZT12_9count,ZT12_10count,ZT12_11count,ZT12_12count,ZT12_13count,ZT12_14count,ZT12_15count,ZT12_16count)
count <- cbind(ZT8,ZT10,ZT12)
#Creates a new file with only the count data for each time point
write.table(finalcount, file="lab/RawData/finalcountZT8_ZT12.txt", sep='\t')
|
ef8375e55a1bab03356db841970086a48e927b96 | 44e79ae499e5da613c7fc765277cc42cb0c463e6 | /program/count_AT_GC_gene_trait.R | 8911bc8a9f6edaac948c738655f416da97428a73 | [] | no_license | wang-q/pars | 9ff3520ff85aa596adb41d49b5d6ad567d99992e | e673977d8ae6728890320af040add0a1c5dda9ad | refs/heads/master | 2022-07-28T00:28:43.264655 | 2022-07-04T06:37:23 | 2022-07-04T06:37:23 | 45,329,678 | 0 | 4 | null | 2022-07-03T09:49:55 | 2015-11-01T05:43:15 | R | UTF-8 | R | false | false | 7,722 | r | count_AT_GC_gene_trait.R | #!/usr/bin/env Rscript
library(getopt)
library(ape)
library(ggplot2)
library(scales)
library(reshape)
library(pander)
library(gridExtra)
library(plyr)
library(dplyr)
library(proto)
library(gsubfn)
library(RSQLite)
library(sqldf)
spec = matrix(
c(
"help",
"h",
0,
"logical",
"brief help message",
"name",
"n",
1,
"character",
"input name",
"outfile",
"o",
1,
"character",
"output filename"
),
byrow = TRUE,
ncol = 5
)
opt = getopt(spec)
name <- opt$name
path <- paste0("~/data/mrna-structure/result/", name, collapse = NULL)
setwd(path)
#stem_length
#输入csv
file_SNPs_PARS_mRNA <- paste0(path,'/data_SNPs_PARS_mRNA_pos.csv',collapse = NULL)
data_SNPs_PARS_mRNA <- read.csv(file_SNPs_PARS_mRNA,header = TRUE,sep = ",")
data_SNPs_PARS_mRNA_stem <- subset(data_SNPs_PARS_mRNA,structure == "stem")
data_SNPs_PARS_mRNA_1 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 1)
data_SNPs_PARS_mRNA_2 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 2)
data_SNPs_PARS_mRNA_3 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 3)
data_SNPs_PARS_mRNA_4 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 4)
data_SNPs_PARS_mRNA_5 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 5)
data_SNPs_PARS_mRNA_6 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 6)
data_SNPs_PARS_mRNA_7 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 7)
data_SNPs_PARS_mRNA_8 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 8)
data_SNPs_PARS_mRNA_9 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 9)
data_SNPs_PARS_mRNA_10 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 10)
data_SNPs_PARS_mRNA_11 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 11)
data_SNPs_PARS_mRNA_12 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 12)
data_SNPs_PARS_mRNA_13 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 13)
data_SNPs_PARS_mRNA_14 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 14)
data_SNPs_PARS_mRNA_15 <- subset(data_SNPs_PARS_mRNA,data_SNPs_PARS_mRNA$island_length == 15)
group = c("mRNA_1","mRNA_2","mRNA_3","mRNA_4","mRNA_5","mRNA_6","mRNA_7","mRNA_8","mRNA_9","mRNA_10","mRNA_11","mRNA_12","mRNA_13","mRNA_14","mRNA_15")
for (g in group){
t <- get(paste0('data_SNPs_PARS_',g,collapse = NULL))
if(max(t$freq)>=10){
for(i in 1:10){
# 统计每个freq的总SNPs和总gene的情况
n <- assign(paste0('data_SNPs_PARS_',g,'_',i,collapse = NULL),subset(t,freq <= max(freq)*(i/10) & freq > max(freq)*((i-1)/10)))
if(i==1){
dd_SNPs_freq <- data.frame(name = paste0("0-",i,"0%",collapse = NULL), SNPs = c(nrow(n)))
}else{
dd_SNPs_freq <- rbind(dd_SNPs_freq, data.frame(name = paste0(i-1,"0","-",i,"0%",collapse = NULL), SNPs = nrow(n)))
}
data_gene_process <- n["gene"]
data_gene_process <- unique(data_gene_process,fromLast=TRUE)
if(i==1){
dd_gene_freq <- data.frame(name = paste0("0-",i,"0%",collapse = NULL), gene = c(nrow(data_gene_process)))
}else{
dd_gene_freq <- rbind(dd_gene_freq, data.frame(name = paste0(i-1,"0","-",i,"0%",collapse = NULL), gene = nrow(data_gene_process) ))
}
# 统计每个freq的stem-loop的SNPs和gene的情况
'stem'
data_stem <- subset(n,(structure == "stem"))
if(i==1){
dd_SNPs_freq_stem <- data.frame(name = paste0("0-",i,"0%",collapse = NULL), SNPs = c(nrow(data_stem)))
}else{
dd_SNPs_freq_stem <- rbind(dd_SNPs_freq_stem, data.frame(name = paste0(i-1,"0","-",i,"0%",collapse = NULL), SNPs = nrow(data_stem)))
}
data_stem_AT_GC <- sqldf('SELECT * FROM [data_stem] where mutant_to == "A->G" OR mutant_to == "A->C" OR mutant_to == "T->G" OR mutant_to == "T->C"' )
if(i==1){
dd_SNPs_freq_stem_AT_GC <- data.frame(name = paste0("0-",i,"0%",collapse = NULL), SNPs = c(nrow(data_stem_AT_GC)))
}else{
dd_SNPs_freq_stem_AT_GC <- rbind(dd_SNPs_freq_stem_AT_GC, data.frame(name = paste0(i-1,"0","-",i,"0%",collapse = NULL), SNPs = nrow(data_stem_AT_GC)))
}
data_stem_GC_AT <- sqldf('SELECT * FROM [data_stem] where mutant_to == "G->A" OR mutant_to == "C->A" OR mutant_to == "G->T" OR mutant_to == "C->T"')
if(i==1){
dd_SNPs_freq_stem_GC_AT <- data.frame(name = paste0("0-",i,"0%",collapse = NULL), SNPs = c(nrow(data_stem_GC_AT)))
}else{
dd_SNPs_freq_stem_GC_AT <- rbind(dd_SNPs_freq_stem_GC_AT, data.frame(name = paste0(i-1,"0","-",i,"0%",collapse = NULL), SNPs = nrow(data_stem_GC_AT)))
}
'loop'
data_loop <- subset(n,(structure == "loop"))
if(i==1){
dd_SNPs_freq_loop <- data.frame(name = paste0("0-",i,"0%",collapse = NULL), SNPs = c(nrow(data_loop)))
}else{
dd_SNPs_freq_loop <- rbind(dd_SNPs_freq_loop, data.frame(name = paste0(i-1,"0","-",i,"0%",collapse = NULL), SNPs = nrow(data_loop)))
}
data_loop_AT_GC <- sqldf('SELECT * FROM [data_loop] where mutant_to == "A->G" OR mutant_to == "A->C" OR mutant_to == "T->G" OR mutant_to == "T->C"' )
if(i==1){
dd_SNPs_freq_loop_AT_GC <- data.frame(name = paste0("0-",i,"0%",collapse = NULL), SNPs = c(nrow(data_loop_AT_GC)))
}else{
dd_SNPs_freq_loop_AT_GC <- rbind(dd_SNPs_freq_loop_AT_GC, data.frame(name = paste0(i-1,"0","-",i,"0%",collapse = NULL), SNPs = nrow(data_loop_AT_GC)))
}
data_loop_GC_AT <- sqldf('SELECT * FROM [data_loop] where mutant_to == "G->A" OR mutant_to == "C->A" OR mutant_to == "G->T" OR mutant_to == "C->T"')
if(i==1){
dd_SNPs_freq_loop_GC_AT <- data.frame(name = paste0("0-",i,"0%",collapse = NULL), SNPs = c(nrow(data_loop_GC_AT)))
}else{
dd_SNPs_freq_loop_GC_AT <- rbind(dd_SNPs_freq_loop_GC_AT, data.frame(name = paste0(i-1,"0","-",i,"0%",collapse = NULL), SNPs = nrow(data_loop_GC_AT)))
}
assign(paste0('data_stat_',i),data.frame(structure=c("stem","loop"),AT_GC=c(nrow(data_stem_AT_GC),nrow(data_loop_AT_GC)),GC_AT=c(nrow(data_stem_GC_AT),nrow(data_loop_GC_AT))))
}
# 合并多个数据框
data <- lapply(paste0('data_stat_',1:10), function(data_stat_) eval(as.name(data_stat_)))
data_stat <- do.call("rbind", data)
write.csv(data_stat, file=paste0(path,'/freq_10/stem_length/PARS_',g,'_stat_freq_10.csv'), row.names = FALSE)
write.csv(dd_SNPs_freq, file=paste0(path,'/freq_10/stem_length/PARS_',g,'_stat_SNPs_freq_10.csv'), row.names = FALSE)
write.csv(dd_gene_freq, file=paste0(path,'/freq_10/stem_length/PARS_',g,'_stat_gene_freq_10.csv'), row.names = FALSE)
write.csv(dd_SNPs_freq_stem, file=paste0(path,'/freq_10/stem_length/PARS_',g,'_stat_stem_freq_10.csv'), row.names = FALSE)
write.csv(dd_SNPs_freq_loop, file=paste0(path,'/freq_10/stem_length/PARS_',g,'_stat_loop_freq_10.csv'), row.names = FALSE)
write.csv(dd_SNPs_freq_stem_AT_GC, file=paste0(path,'/freq_10/stem_length/PARS_',g,'_stat_stem_AT_GC_freq_10.csv'), row.names = FALSE)
write.csv(dd_SNPs_freq_loop_AT_GC, file=paste0(path,'/freq_10/stem_length/PARS_',g,'_stat_loop_AT_GC_freq_10.csv'), row.names = FALSE)
write.csv(dd_SNPs_freq_stem_GC_AT, file=paste0(path,'/freq_10/stem_length/PARS_',g,'_stat_stem_GC_AT_freq_10.csv'), row.names = FALSE)
write.csv(dd_SNPs_freq_loop_GC_AT, file=paste0(path,'/freq_10/stem_length/PARS_',g,'_stat_loop_GC_AT_freq_10.csv'), row.names = FALSE)
}
}
|
1c4e34503018925edaf6de63ebadc2f97e685b0f | ead209f46dfb817bb7cd3acd9ebaf5d9b4c97b7e | /R/plot.R | 7ceeba0bcece82c1d56ebe58ae3fdcc3b2c9d815 | [
"CC-BY-4.0"
] | permissive | igankevich/subordination-benchmarks | 9ee7df3c34528ea17093acbf0ce110bc235bdf73 | 859722fccef8bec4fd00b8f91af68122479001f7 | refs/heads/master | 2021-09-01T11:59:26.240570 | 2017-12-26T21:27:34 | 2017-12-26T21:27:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 284 | r | plot.R | #!/usr/bin/Rscript
args = commandArgs(trailingOnly=TRUE)
n <- max(1, floor(sqrt(length(args))))
m <- ceiling(length(args) / n)
print(m)
print(n)
print(length(args))
pdf(width=20, height=20)
par(mfrow=c(n, m))
for (file in args) {
data <- read.table(file)
plot(data)
title(file)
}
|
cf15f6d4cb944363f92303dbaf7b3e4736518e5c | 5f4a6f8d0eb0e22669a2e79dd3d66588077f7d0e | /08-qqplot.R | 924402394cf90b3f5b86cfa8ec77054139239080 | [] | no_license | agibilis/r-course | 7f8e46068b45e0b4c4441e58a6ea3c43ec530dea | b54dca78d1650c5c2ed8ab6d7afc7358068bdac5 | refs/heads/master | 2020-03-09T01:15:06.830033 | 2018-06-03T17:39:28 | 2018-06-03T17:39:28 | 128,509,209 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,482 | r | 08-qqplot.R | s <- seq (0.01, 0.99, 0.01)
s
# sacamos los valores normales
qn <-qnorm(s)
qn
df <- data.frame (p = s, q = qn)
df
# podriamos generar una distribucion aleatoria segun una distribucion normal
sample <- rnorm(200)
sample
quantile(sample,probs=s)
# los cauntiles se empiezan a ajustar a la distribucion teorica
# para hacer el grafico qqplot se ordenan la muestra y se enfrenta con la normal.
# tenemos qqnorm
trees # dataset de r
# ¿podemos asumir que la altura tiene distribucion normal?
qqnorm(trees$Height)
# se ve bastante recta y por tanto podriamos decir que sigue una distribucion normal
# tenemos qqplot
randu # numeros aleatorios (no son normales sino uniforme)
n <- length(randu$x)
n
y <- qunif(ppoints(n)) # he generado 400 puntos de distribucion uniforme
# ppoints me da 400 probabilidades
# hacemos un qqplot enfrentando los teoricos con la muestra
qqplot(y,randu$x)
# por ejemplo
qqnorm(randu$x)
# vemos que no es normal. ...
# vamos a sacar una discrtibucion de chi de 3 grados de libertad
chi3 <- qchisq(ppoints(30),df =3)
n30 <- qnorm(ppoints(30))
qqplot(n30,chi3) # estan sesgados hacia la derecha
# no es una normal
# vemos una distribucion con una larga cola como es cauchy
cauchy <- qcauchy(ppoints(30))
qqplot(n30,cauchy)
# los centrales se parecen a las normales pero las colas son pesadas
par(mfrow=c(1,2))
x <-seq(-3,3,0.01)
plot(x,dnorm(x))
plot(x,pnorm(x))
plot(x,dchisq(x,df=3))
plot(x,pchisq(x,df=3))
plot(x,dcauchy(x))
plot(x,pcauchy(x))
|
d40b8d69cd51cfb096d94b6f8d051a7307d95533 | b39a3af719704b59bf6be20494efa511115f153e | /man/is.ratetable.Rd | f0ecdb44452ddccbc4c76968573f51a1a0a4e600 | [] | no_license | cran/survival4 | c41403b105fa506d99893987ed578eba02d9c839 | 70ff67f5153bc21daeff7b27d3ef94c82e50c755 | refs/heads/master | 2016-09-06T09:44:16.772359 | 1977-08-08T00:00:00 | 1977-08-08T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 968 | rd | is.ratetable.Rd | \name{is.ratetable}
\title{Is an Object of Class ``ratetable''?}
\usage{
is.ratetable(x, verbose=F)
}
\arguments{
\item{x}{the object to be verified.}
\item{verbose}{
If \code{TRUE} and the object is not a ratetable, then return a
character string describing the way(s) in which \code{x} fails to be
a proper ratetable object.}
}
\description{
Verify that an object is of class \code{ratetable}. The function
verifies not only the \code{class} attribute, but the structure of the
object.
}
\value{
Returns \code{TRUE} if \code{x} is a ratetable, and \code{FALSE} or a
description if it is not. Rate tables are used by the \code{pyears}
and \code{survexp} functions, and normally contain death rates for
some population, categorized by age, sex, or other variables. They
have a fairly rigid structure, and the \code{verbose} option can help
in creating a new rate table.
}
\seealso{
\code{\link{pyears}}, \code{\link{survexp}}
}
|
61546d3758bd20a1b3dca2cf773a697b5433fd92 | 670575bc7aadd3c1bd7e206851908314c1a0ec78 | /final_shiny.R | 0b08c9fbbea0a14a8b3ded7b5dd6ef60890e0514 | [] | no_license | henriksergoyan/Team-builder | a3f419e0b3ff3517c5c3a91d115748f850c6d870 | 9b242fe14505bbdeaca6403265d92e1b11c3174e | refs/heads/master | 2020-04-22T07:16:44.809988 | 2019-02-11T22:44:57 | 2019-02-11T22:44:57 | 170,214,380 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,443 | r | final_shiny.R |
# install.packages("readxl")
library(ggplot2)
library(shiny)
library(shinydashboard)
library(rsconnect)
library(lpSolve)
options(shiny.maxRequestSize=30*1024^2)
ui <- dashboardPage(
dashboardHeader(),
dashboardSidebar(
tabsetPanel(id = 'first_ch',
tabPanel("Search Engine",
selectInput(inputId = "League",label="Select the league",
choices= c("Any", "Premier League","La Liga","Bundesliga","Seria A")),
selectizeInput("Club", "Select the club",choices=c("Any")),
sliderInput("Age", label = "Choose the range of age", min = 16,
max = 42, value = c(16, 42)),
selectInput(inputId = "Position",label="Select the Position of the player",
choices= c("Any","GK","Defender","Midfielder","Forward"))
),
tabPanel("Squad Builder",
textInput("money", label = "How much is your budget? (Mill. €)"),
sliderInput("gk", label = "Choose number of goalkeepers you want to buy", min = 0,
max = 2, value = 0),
sliderInput("def", label = "Choose number of defenders you want to buy", min = 0,
max = 5, value = 0),
sliderInput("mid", label = "Choose number of midfielders you want to buy", min = 0,
max = 5, value = 0),
sliderInput("fw", label = paste0("Choose number of","forwards", "you want to buy"), min = 0,
max = 4, value = 0),
actionButton(
inputId = "submit_loc",
label = "Submit"
)))
# br(),
# br(),
# downloadButton("downloadData", "Download",class="butt1")
),
dashboardBody(
h3(textOutput("text")),
br(),
br(),
dataTableOutput("table")
))
server <- shinyServer(function(input, output,session) {
lpsolver <- function(df,num, cap){
p <- df$Rating
w <- df$Price
exact.num.elt <- num
cap <- cap
mod <- lp(direction = "max",
objective.in = p,
const.mat = rbind(w, rep(1, length(p))),
const.dir = c("<=", "="),
const.rhs = c(cap, exact.num.elt),
all.bin = TRUE)
# Solution
df <- df[which(mod$solution >= 0.999),]
}
choice <- reactive({
df <- readxl::read_xlsx("final_data3.xlsx", col_names = TRUE) #Reading the file
if (input$League != "Any"){
df <- df[df$League == input$League,]
}
choices <- c("Any",sort(unique(df$Clubs)))
return (choices)
})
observe({
updateSelectizeInput(session,"Club", choices=choice())
})
agg_data <- reactive({
keep_cols <- c("Players","Clubs","Mins", "Age","Values","Rating","Position")
df <- readxl::read_xlsx("final_data3.xlsx", col_names = TRUE) #Reading the file
if (input$League != "Any"){
df <- df[df$League == input$League,]
}
if (input$Club != "Any"){
df <- df[df$Clubs == input$Club,]
}
# print(input$Age)
ages = input$Age
# print(ages)
# print(ages[1])
df <- df[(df$Age >= ages[1]) & (df$Age <= ages[2]),]
choices= c("Any","GK","Defender","Midfielder","Forward")
real_choices = c("Any","GK","D","M","FW")
ind = match(input$Position, choices)
real_position = real_choices[ind]
if (real_position != "Any"){
df <- df[grepl(real_position,df$Position1),]
}
return (df[keep_cols])
})
squad_builder <- reactive({
keep_cols <- c("Players","Clubs","Mins", "Age","Values","Rating","Position","Price")
final <- readxl::read_xlsx("final_data3.xlsx", col_names = TRUE) #Reading the file
final <- final[order(-final$Rating),]
GK <- mean(final[grepl("GK",final$Position1),]$Price)/10**6
defenders <- mean(final[grepl("D",final$Position1),]$Price)/10**6
midfields <- mean(final[grepl("M",final$Position1),]$Price)/10**6
forwards <- mean(final[grepl("FW",final$Position1),]$Price)/10**6
num_GK = input$gk
num_DEF = input$def
num_MID = input$mid
num_FW = input$fw
total = as.numeric(input$money)*10**6
GK_val <- total * GK * num_GK/(GK * num_GK + defenders*num_DEF + midfields*num_MID + forwards*num_FW)
DEF_val <- total * defenders * num_DEF/(GK * num_GK + defenders*num_DEF + midfields*num_MID + forwards*num_FW)
MID_val <- total * midfields * num_MID/(GK * num_GK + defenders*num_DEF + midfields*num_MID + forwards*num_FW)
FW_val <- total * forwards * num_FW/(GK * num_GK + defenders*num_DEF + midfields*num_MID + forwards*num_FW)
final_gk <- final[(final$Price <= GK_val) & grepl("GK",final$Position1) ,]
final_def <- final[(final$Price <= DEF_val) & grepl("D",final$Position1),]
final_mid <- final[(final$Price <= MID_val) & grepl("M",final$Position1),]
final_fw <- final[(final$Price <= FW_val) & grepl("FW",final$Position1),]
# paste(length(final_gk),length(final_def),length(final_mid), length(final_fw))
common_mid_fw <- intersect(final_mid$Players,final_fw$Players)
common_mid_def <- intersect(final_mid$Players,final_def$Players)
common_fw_def <- intersect(final_def$Players,final_fw$Players)
final_mid <- final_mid[!(final_mid$Players %in% c(common_mid_fw,common_mid_def)), ]
final_def <- final_def[!(final_def$Players %in% common_fw_def),]
final_gk <- lpsolver(final_gk, num_GK, GK_val)
final_def <- lpsolver(final_def, num_DEF, DEF_val)
final_mid <- lpsolver(final_mid, num_MID, MID_val)
final_fw <- lpsolver(final_fw, num_FW, FW_val)
final_dt <- rbind(final_gk,final_def, final_mid,final_fw)
return (final_dt[keep_cols])
})
observeEvent(input$first_ch,
if (input$first_ch == "Search Engine"){
output$table <- renderDataTable({
agg_data()
})
}
)
observeEvent(
eventExpr = input[["submit_loc"]],
handlerExpr = {
output$text <- renderText({
if(input$money != ""){
df <- squad_builder()
saving <- as.numeric(input$money)*10**6 - sum(df["Price"])
paste("Total savings are:",saving/10**6, "Mill. Euro")
}
})
output$table <- renderDataTable({
if(input$money != ""){
squad_builder()[-8]
}
})
}
)
})
shinyApp(ui, server)
|
1e892cf6d5457b3b90ffb6deef5e45a1e06917ac | 5b064eff76b6751755574e5ec72b1c4fe7327c86 | /Country Data/plotsv2.R | 4a5d49b5e09e83162bedd916ded91c2d83e434c8 | [] | no_license | brachunok/sa-migration | 6accf7aa383d5c72ab57b0c5d266a1df47d15e38 | 91373757eeb16e92dc931cdc086150cac468f370 | refs/heads/master | 2021-01-03T23:38:16.094607 | 2020-03-06T23:27:23 | 2020-03-06T23:27:23 | 240,284,935 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,560 | r | plotsv2.R | library(tidyverse)
data = read_csv("Country Data/API_Download_DS2_en_csv_v2_792501.csv")
colnames(data) = c("country", "indicator", c(1960:2007))
countries = unique(data$country)
indicators = unique(data$indicator)
important = c(
"Population growth (annual %)",
"Official exchange rate (LCU per US$, period average)",
"Fertility rate, total (births per woman)",
"Life expectancy at birth, total (years)",
"GDP per capita (constant LCU)"
)
countries = countries[!countries %in% "ECU"]
total = data[0,]
for (country in countries) {
df = data[which(data$country == country),]
df = df[match(important, indicators),]
total = rbind(total, df)
}
library(reshape2)
df = melt(total)
df$variable = as.numeric(as.character(df$variable))
ven_data = df[df$country == "VEN",]
df = merge(df, ven_data, by = c("variable", 'indicator'))
colnames(df) = c('year', 'indicator', 'country', 'val', 'VEN', 'valVEN')
library(ggplot2)
p = ggplot() +
geom_line(data = df, aes(x = year, y = val), color = 'blue') +
geom_line(data = df, aes(x = year, y = valVEN), color = 'red') +
facet_grid(
cols = vars(country),
rows = vars(indicator),
scales = 'free'
) +
theme_bw()
p
dataSubset = df[union(which(df$country %in% 'ARG'), which(df$country %in% 'VEN')), ]
testPlot = ggplot() +
geom_line(data = dataSubset, aes(x = year, y = val), color = 'blue') +
geom_line(data = dataSubset, aes(x = year, y = valVEN), color = 'red') +
facet_grid(
cols = vars(country),
rows = vars(indicator),
scales = 'free'
) +
theme_bw()
testPlot |
e6a79178b49ca696ec8142d07cb6c0cf8197bd6b | 580b95b4425537c55e0972e761b7c6d2b7180d5f | /man/Xy_task.Rd | 5e42c619018cdffbd75a5eb56d732e72fe3470c9 | [] | no_license | waith/Xy | 556f479a8591e65759d61103390377b50fee47d8 | 955039e8f13e80b5a489b436c45d78573f766cc0 | refs/heads/master | 2020-06-01T07:04:34.708088 | 2019-03-20T22:38:41 | 2019-03-20T22:38:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,069 | rd | Xy_task.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Xy_task.R
\name{Xy_task}
\alias{Xy_task}
\title{Xy Task}
\usage{
Xy_task(name = "regression", link = NULL, cutoff = NULL)
}
\arguments{
\item{name}{the name of the learning task}
\item{link}{a link function to be used to transform the target}
\item{cutoff}{a cutoff function. This function is applied after the link function.}
}
\value{
The function returns a list with a learning name and two transformation functions
}
\description{
This function creates a task list. Functions within this list are used to
transform the target variable of
}
\details{
The cutoff function is applied after transforming the target with the link function.
It can be any function, however, in a classification learning task this function is used
to classify the linktransformed target into zeroes and ones.
}
\examples{
# Regression
# In the regression case the link function is the identity link which
# means no transformation at all
my_regression_task <- Xy_task(name = "regression")
}
|
aee16b1741b9e3b1ee33f3207d741cc86148eff4 | e9ba3e4f528a1242997622d10111bd3fa2c9f956 | /munge/Munge.R | 72549d9d46885bbef3b3349b54b78f7c0bfb8cfe | [
"MIT"
] | permissive | NanisTe/EnergySystemVisualisationTool | b7fabcc9efd4348147806413494d2cdf681861bc | 41015e7997a9844b2fc3f192889347f44dc9f08f | refs/heads/master | 2020-06-18T20:00:09.011671 | 2020-01-06T12:55:35 | 2020-01-06T12:55:35 | 196,428,243 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,554 | r | Munge.R | # DATA MUNGING
#
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# Notes / ReadMe ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# load project and raw data ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# rename and delete raw data ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# select columns ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# filter rows ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# change variable types (e.g. as.numeric) ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# adjust coordinates (projection) ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# do this and that 1 ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# do this and that 2 ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# create new data manually ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# remove tmp variables ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# save munged data to cache ----
# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
|
ede4269af2b812d586aa1238c545cb9481937581 | a8f4eb91dec71624c5ca79383ed62217c1eb95c6 | /R Programming/Week 1 and week 3/Hospital Data analysis/rankall.R | 59d025e102420995d4eff562b0e96a9b9a6765d3 | [] | no_license | RaunakKochar/Data-Science-Coursera | 99d088004a0a87b374b5bc812e3b78254910cf26 | 4ca73f2693fd8a70562ed83b3659ff89c940b7cb | refs/heads/master | 2021-01-10T16:14:51.853538 | 2015-11-24T13:03:43 | 2015-11-24T13:03:43 | 46,789,421 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,208 | r | rankall.R | rankall <- function(outcome, num = "best"){
df <- read.csv("rprog-data-ProgAssignment3-data (1)//outcome-of-care-measures.csv")
dffinal <- data.frame(hospital = character(), state = character())
s <- split(df, df$State)
if(outcome == "heart attack"){
for(i in 1:50){
dff <- as.data.frame(s[[i]])
dff <- dff[order(as.numeric(as.character(dff$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)), dff$Hospital.Name),]
stt <- as.character(dff[1, 7])
if(i == 1){
if(num == "best"){
dffinal <- dff[1, c(2, 7)]
}
else if(num == "worst"){
dffinal <- dff[nrow(dff),c(2,7)]
}
else if(num <= nrow(dff)){
dffinal <- dff[num,c(2,7)]
}
else{
dffinal <- c("NA", stt)
}
}
else{
if(num == "best"){
dffinal <- rbind(dffinal, dff[1,c(2,7)])
}
else if(num == "worst"){
dffinal <- rbind(dffinal, dff[nrow(dff),c(2,7)])
}
else if(num <= nrow(dff)){
dffinal <- rbind(dffinal, dff[num,c(2,7)])
}
else{
dffinal <- rbind(dffinal, c("NA", stt))
}
}
}
}
if(outcome == "heart failure"){
for(i in 1:50){
dff <- as.data.frame(s[[i]])
dff <- dff[order(as.numeric(as.character(dff$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)), dff$Hospital.Name),]
stt <- as.character(dff[1, 7])
if(i == 1){
if(num == "best"){
dffinal <- dff[1, c(2, 7)]
}
else if(num == "worst"){
dffinal <- dff[nrow(dff),c(2,7)]
}
else if(num <= nrow(dff)){
dffinal <- dff[num,c(2,7)]
}
else{
dffinal <- c("NA", stt)
}
}
else{
if(num == "best"){
dffinal <- rbind(dffinal, dff[1,c(2,7)])
}
else if(num == "worst"){
dffinal <- rbind(dffinal, dff[nrow(dff),c(2,7)])
}
else if(num <= nrow(dff)){
dffinal <- rbind(dffinal, dff[num,c(2,7)])
}
else{
dffinal <- rbind(dffinal, c("NA", stt))
}
}
}
}
if(outcome == "pneumonia"){
for(i in 1:50){
dff <- as.data.frame(s[[i]])
dff <- dff[order(as.numeric(as.character(dff$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)), dff$Hospital.Name),]
stt <- as.character(dff[1, 7])
if(i == 1){
if(num == "best"){
dffinal <- dff[1, c(2, 7)]
}
else if(num == "worst"){
dffinal <- dff[nrow(dff),c(2,7)]
}
else if(num <= nrow(dff)){
dffinal <- dff[num,c(2,7)]
}
else{
dffinal <- c("NA", stt)
}
}
else{
if(num == "best"){
dffinal <- rbind(dffinal, dff[1,c(2,7)])
}
else if(num == "worst"){
dffinal <- rbind(dffinal, dff[nrow(dff),c(2,7)])
}
else if(num <= nrow(dff)){
dffinal <- rbind(dffinal, dff[num,c(2,7)])
}
else{
dffinal <- rbind(dffinal, c("NA", stt))
}
}
}
}
colnames(dffinal) <- c("hospital", "state")
dffinal
} |
7ac51ba33de33eb0aa815e38860a934dd70e0b57 | e4a365932c1bed3b02dbf078d21bed165264bc9b | /linear_regression.R | 0650f0113f63fef7cc5d3192a56127c82ea255be | [] | no_license | nwtuck/Algorithm-Recipes-in-R | 951db9dd3ad399c1f5ee1f9dd6ae74cc67e71525 | 23f0797e0f4e3307393aba8fb0547f65e96c5d88 | refs/heads/master | 2020-07-14T09:52:26.744069 | 2016-09-07T09:24:08 | 2016-09-07T09:24:08 | 67,592,075 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,068 | r | linear_regression.R | ## Linear Regression
## Ordinary Least Square Regression
## linear model that seeks to find a set of coefficients for a line/hyper-plane
## that minimise the sum of the squared errors.
# load data
data(longley)
# fit model
fit <- lm(Employed~., longley)
# summarize the fit
summary(fit)
# make predictions
predictions <- predict(fit, longley)
# summarize accuracy
rmse <- mean((longley$Employed - predictions)^2)
print(rmse)
## Stepwise Linear Regression
## each iteration of the method makes a change to the set of attributes
## and creates a model to evaluate the performance of the set.
# load data
data(longley)
# fit model
base <- lm(Employed~., longley)
# summarize the fit
summary(base)
# perform step-wise feature selection
fit <- step(base)
# summarize the selected model
summary(fit)
# make predictions
predictions <- predict(fit, longley)
# summarize accuracy
rmse <- mean((longley$Employed - predictions)^2)
print(rmse)
## Principal Component Regression
## creates a linear regression model using the outputs of a
## Principal Component Analysis (PCA) to estimate the coefficients of the model.
## useful when the data has highly-correlated predictors.
# load the package
#install.packages("pls")
library(pls)
# load data
data(longley)
# fit model
fit <- pcr(Employed~., data=longley, validation="CV")
# summarize the fit
summary(fit)
# make predictions
predictions <- predict(fit, longley, ncomp=6)
# summarize accuracy
rmse <- mean((longley$Employed - predictions)^2)
print(rmse)
## Partial Least Square Regression
## creates a linear model of the data in a transformed projection of problem space.
## appropriate for data with highly-correlated predictors.
# load the package
library(pls)
# load data
data(longley)
# fit model
fit <- plsr(Employed~., data=longley, validation="CV")
# summarize the fit
summary(fit)
# make predictions
predictions <- predict(fit, longley, ncomp=6)
# summarize accuracy
rmse <- mean((longley$Employed - predictions)^2)
print(rmse)
|
4e5ae79dfaa2769afca7d6f29762c02ed22a4451 | 84420d9b3363f42a69b744d408f2de96aa843abf | /CodePortfolio/week11.R | ab1746d00b81b0b020c37107a50c439dbfd21ac1 | [] | no_license | asabade/ANLY_506_EDA | b23afdd1410798990b2afdb762599c16ecef4e82 | ad39a8cbdab306564ef5f24702cf45ad0031cf2a | refs/heads/master | 2020-05-31T14:24:08.591117 | 2019-06-17T04:04:52 | 2019-06-17T04:04:52 | 190,326,655 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 481 | r | week11.R | #LIBRARY IMPORT
library(tidyverse)
library(cluster)
library(factoextra)
library(dendextend)
#DATA
df <- USArrests
#SCALE DF
df <- scale(df)
head(df)
#EUCLIDEAN DISTANCE
d <- dist(df, method = "euclidean")
# Hierarchical CLUSTURING
HCL1 <- hclust(d, method = "complete" )
# PLOTING
plot(HCL1, cex = 0.6, hang = -1)
# COMPUTE AGNES
HCL2 <- agnes(df, method = "complete")
# Agglomerative coefficient
HCL2$ac
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.