content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
rm(list=ls(all=TRUE))
data <- read.csv("customer.csv", header=T, sep=",")
summary(data)
data$region = factor(data$region)
data$gender = factor(data$gender)
data$edcat = factor(data$edcat)
data$age = factor(data$age)
table(data$gender, data$region)
# Question 1
Q1 = select(data, region, card2spent) %>%
group_by(region) %>%
summarise(card2spent = mean(card2spent))
Result1 = Q1$region[which(Q1$card2spent == max(Q1$card2spent))]
# Question 2
data$spentRate = (1+data$card2spent) / data$cardspent
Q2 = select(data, region, spentRate, gender, edcat, age) %>%
group_by(region, gender, edcat) %>%
summarise(spentRate = mean(spentRate))
Result2 = Q2[which(Q2$spentRate == min(Q2$spentRate)),]
| /customer.R | no_license | NTU-CSX-DataScience/pecu | R | false | false | 723 | r | rm(list=ls(all=TRUE))
data <- read.csv("customer.csv", header=T, sep=",")
summary(data)
data$region = factor(data$region)
data$gender = factor(data$gender)
data$edcat = factor(data$edcat)
data$age = factor(data$age)
table(data$gender, data$region)
# Question 1
Q1 = select(data, region, card2spent) %>%
group_by(region) %>%
summarise(card2spent = mean(card2spent))
Result1 = Q1$region[which(Q1$card2spent == max(Q1$card2spent))]
# Question 2
data$spentRate = (1+data$card2spent) / data$cardspent
Q2 = select(data, region, spentRate, gender, edcat, age) %>%
group_by(region, gender, edcat) %>%
summarise(spentRate = mean(spentRate))
Result2 = Q2[which(Q2$spentRate == min(Q2$spentRate)),]
|
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
## 1. Loading Required Libraries
library(readxl)
library(tidyverse)
library(xgboost)
library(caret)
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
## 2. Loading required train and test data
data = read.csv('R_calculated_v2_11_Day_Interval_train.csv')
set.seed(122)
dt = sort(sample(nrow(data), nrow(data)*0.7))
train = data[dt,]
test = data[-dt,]
test_data = readxl::read_excel('R_calculated_v2_11_Day_Interval_test.xlsx')
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
## 3. Xtreme Gradient Boosting Model
### 3.1 Configuring the hyperparameter grid with range of values
set.seed(122)
xgbGrid <- expand.grid(nrounds = c(400,600,800), #no of iterations
max_depth = c(5, 10, 15, 20, 25), #maximum tree depth
colsample_bytree = seq(0.2, 0.9, length.out = 5), #column sampling
eta = seq(0.1, 0.9), #learning rate for adjusting weights at each step
gamma=c(0, 0.05, 0.1, 0.5), #regularization parameter
min_child_weight = c(1,2,3), #minimum number of instances needed to be in each node
subsample = c(0.5, 0.75, 1.0) #row sampling
)
### 3.2 Fitting/Training multiple XGBoost Models on complete train data for each set of hyperparameter value from the hyperparameter grid. Test MSE will be computed for each model, and at the end model with least Test MSE will be chosen.
xgbGrid$Test_MSE <- 1 #Adding a new column in xgbGrid for storing Test MSE for each row (i.e. each set of hyperparameter values)
set.seed(122)
for (i in 1:nrow(xgbGrid)){
set.seed(122)
xgb_model_full_grid <- xgboost(
data.matrix(data[,-1]),
label = data$Mean.R.,
nround = xgbGrid$nrounds[i],
max_depth = xgbGrid$max_depth[i],
colsample_bytree = xgbGrid$colsample_bytree[i],
eta = xgbGrid$eta[i],
subsample = xgbGrid$subsample[i]
)
pred_values <- predict(xgb_model_full_grid, data.matrix(test_data[,-1]))
Test_MSE <- mean((pred_values - test_data$`Mean(R)`)^2)
xgbGrid$Test_MSE[i] <- Test_MSE
print(i)
}
### 3.3 Fitting/Training a XGBoost Model on complete train data for hyperparameters are as chosen by the above grid for lowest Test MSE
set.seed(122)
xgb_model_best_grid <- xgboost(
data.matrix(data[,-1]),
label = data$Mean.R.,
nround = xgbGrid$nrounds[which.min(xgbGrid$Test_MSE)],
max_depth = xgbGrid$max_depth[which.min(xgbGrid$Test_MSE)],
colsample_bytree = xgbGrid$colsample_bytree[which.min(xgbGrid$Test_MSE)],
eta = xgbGrid$eta[which.min(xgbGrid$Test_MSE)],
subsample = xgbGrid$subsample[which.min(xgbGrid$Test_MSE)]
)
### 3.4 Final Test MSE using the best model as fitted above.
set.seed(122)
predicted_test_data = predict(xgb_model_best_grid, data.matrix(test_data[,-1]))
test_mse_xgb = mean((predicted_test_data - test_data$`Mean(R)`)^2)
print(paste("The Final Test MSE for Xtreme Gradient Boosting is:",test_mse_xgb))
| /Best_Model.R | no_license | shreya-apte/Prediction-of-rate-of-spread-of-Covid--19 | R | false | false | 3,117 | r | #$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
## 1. Loading Required Libraries
library(readxl)
library(tidyverse)
library(xgboost)
library(caret)
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
## 2. Loading required train and test data
data = read.csv('R_calculated_v2_11_Day_Interval_train.csv')
set.seed(122)
dt = sort(sample(nrow(data), nrow(data)*0.7))
train = data[dt,]
test = data[-dt,]
test_data = readxl::read_excel('R_calculated_v2_11_Day_Interval_test.xlsx')
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#
## 3. Xtreme Gradient Boosting Model
### 3.1 Configuring the hyperparameter grid with range of values
set.seed(122)
xgbGrid <- expand.grid(nrounds = c(400,600,800), #no of iterations
max_depth = c(5, 10, 15, 20, 25), #maximum tree depth
colsample_bytree = seq(0.2, 0.9, length.out = 5), #column sampling
eta = seq(0.1, 0.9), #learning rate for adjusting weights at each step
gamma=c(0, 0.05, 0.1, 0.5), #regularization parameter
min_child_weight = c(1,2,3), #minimum number of instances needed to be in each node
subsample = c(0.5, 0.75, 1.0) #row sampling
)
### 3.2 Fitting/Training multiple XGBoost Models on complete train data for each set of hyperparameter value from the hyperparameter grid. Test MSE will be computed for each model, and at the end model with least Test MSE will be chosen.
xgbGrid$Test_MSE <- 1 #Adding a new column in xgbGrid for storing Test MSE for each row (i.e. each set of hyperparameter values)
set.seed(122)
for (i in 1:nrow(xgbGrid)){
set.seed(122)
xgb_model_full_grid <- xgboost(
data.matrix(data[,-1]),
label = data$Mean.R.,
nround = xgbGrid$nrounds[i],
max_depth = xgbGrid$max_depth[i],
colsample_bytree = xgbGrid$colsample_bytree[i],
eta = xgbGrid$eta[i],
subsample = xgbGrid$subsample[i]
)
pred_values <- predict(xgb_model_full_grid, data.matrix(test_data[,-1]))
Test_MSE <- mean((pred_values - test_data$`Mean(R)`)^2)
xgbGrid$Test_MSE[i] <- Test_MSE
print(i)
}
### 3.3 Fitting/Training a XGBoost Model on complete train data for hyperparameters are as chosen by the above grid for lowest Test MSE
set.seed(122)
xgb_model_best_grid <- xgboost(
data.matrix(data[,-1]),
label = data$Mean.R.,
nround = xgbGrid$nrounds[which.min(xgbGrid$Test_MSE)],
max_depth = xgbGrid$max_depth[which.min(xgbGrid$Test_MSE)],
colsample_bytree = xgbGrid$colsample_bytree[which.min(xgbGrid$Test_MSE)],
eta = xgbGrid$eta[which.min(xgbGrid$Test_MSE)],
subsample = xgbGrid$subsample[which.min(xgbGrid$Test_MSE)]
)
### 3.4 Final Test MSE using the best model as fitted above.
set.seed(122)
predicted_test_data = predict(xgb_model_best_grid, data.matrix(test_data[,-1]))
test_mse_xgb = mean((predicted_test_data - test_data$`Mean(R)`)^2)
print(paste("The Final Test MSE for Xtreme Gradient Boosting is:",test_mse_xgb))
|
#!/usr/bin/env Rscript
library(methylKit)
library(graphics)
library(tools)
#install.packages("dendextend")
library(dendextend)
setwd("/home/stenger/stenger_data/EPIGENETIC_DATA/05_02_bismark")
load("Allmeth.norm.rda")
hc2 <- clusterSamples(Allmeth.norm, dist="correlation", method="ward", plot=TRUE)
#dat <- read.table("genolike.beagle_04.txt", header=T, row.names=1)
#dat2 <- dat[ , -c(1:2)]
#colnames(dat2) <- c("Acclimation_1_31_5a", "Acclimation_1_31_5b", "Acclimation_1_31_5c", "Acclimation_3_30a", "Acclimation_3_30b", "Acclimation_3_30c", "Control_1_30a", "Control_1_30b", "Control_1_30c", "Control_3_30a", "Control_3_30b", "Control_3_30c")
#dat3 <- t(dat2)
# Euclidean distance
#dist <- dist(dat3 , diag=TRUE)
# Hierarchical Clustering with hclust
#hc <- hclust(dist)
dat <- read.table("all.ibs", header=T, row.names=1)
#dat
#tail(dat)
head(dat)
row.names(dat) <- c("Acclimation_1_31_5a",
"Acclimation_1_31_5b",
"Acclimation_1_31_5c",
"Acclimation_3_30a",
"Acclimation_3_30b",
"Acclimation_3_30c",
"Control_1_30a",
"Control_1_30b",
"Control_1_30c",
"Control_3_30a",
"Control_3_30b",
"Control_3_30c")
head(dat)
str(dat)
# Test for only A/G Snps
dat2 <- data.frame(dat$nSites, dat$Llike, dat$nAG)
head(dat2)
row.names(dat2) <- c("Acclimation_1_31_5a",
"Acclimation_1_31_5b",
"Acclimation_1_31_5c",
"Acclimation_3_30a",
"Acclimation_3_30b",
"Acclimation_3_30c",
"Control_1_30a",
"Control_1_30b",
"Control_1_30c",
"Control_3_30a",
"Control_3_30b",
"Control_3_30c")
head(dat2)
# Euclidean distance
dist <- dist(dat2 , diag=TRUE)/10000000
# Hierarchical Clustering with hclust
hc <- hclust(dist, method = "average")
# Create two dendrograms
d1 <- as.dendrogram (hc)
d2 <- as.dendrogram (hc2)
# Create a list to hold dendrograms
dend_list <- dendlist(d1, d2)
pdf(file= 'Comparative_dendrogram_02_01.pdf' ,onefile=T,paper='A4')
# Align and plot two dendrograms side by side
dendlist(d1, d2) %>%
untangle(method = "step1side") %>% # Find the best alignment layout
tanglegram() # Draw the two dendrograms
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans4.pdf', onefile=T, paper='a4r')
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 2),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red", "green"), k=4) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red", "green"), k = 4)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans2_2.pdf', onefile=T, paper='a4r')
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 2),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red", "green"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red", "green"), k = 2)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans3_2.pdf', onefile=T, paper='a4r')
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=3) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 3),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red", "green"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red", "green"), k = 2)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans5.pdf', onefile=T, paper='a4r')
# Custom these kendo, and place them in a list
#dl <- dendlist(
# d1 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3),
# d2 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3)
#)
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 2),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red", "green", "grey"), k=5) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red", "green", "grey"), k = 5)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans6.pdf', onefile=T, paper='a4r')
# Custom these kendo, and place them in a list
#dl <- dendlist(
# d1 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3),
# d2 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3)
#)
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 2),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red", "green", "grey", "blue"), k=6) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red", "green", "grey", "blue"), k = 6)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans3.pdf', onefile=T, paper='a4r')
# Custom these kendo, and place them in a list
#dl <- dendlist(
# d1 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3),
# d2 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3)
#)
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 2),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red"), k=3) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red"), k = 3)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
| /00_scripts/21_comparative_hclust_02.R | no_license | PLStenger/Acropora_digitifera_BS_Seq | R | false | false | 8,163 | r | #!/usr/bin/env Rscript
library(methylKit)
library(graphics)
library(tools)
#install.packages("dendextend")
library(dendextend)
setwd("/home/stenger/stenger_data/EPIGENETIC_DATA/05_02_bismark")
load("Allmeth.norm.rda")
hc2 <- clusterSamples(Allmeth.norm, dist="correlation", method="ward", plot=TRUE)
#dat <- read.table("genolike.beagle_04.txt", header=T, row.names=1)
#dat2 <- dat[ , -c(1:2)]
#colnames(dat2) <- c("Acclimation_1_31_5a", "Acclimation_1_31_5b", "Acclimation_1_31_5c", "Acclimation_3_30a", "Acclimation_3_30b", "Acclimation_3_30c", "Control_1_30a", "Control_1_30b", "Control_1_30c", "Control_3_30a", "Control_3_30b", "Control_3_30c")
#dat3 <- t(dat2)
# Euclidean distance
#dist <- dist(dat3 , diag=TRUE)
# Hierarchical Clustering with hclust
#hc <- hclust(dist)
dat <- read.table("all.ibs", header=T, row.names=1)
#dat
#tail(dat)
head(dat)
row.names(dat) <- c("Acclimation_1_31_5a",
"Acclimation_1_31_5b",
"Acclimation_1_31_5c",
"Acclimation_3_30a",
"Acclimation_3_30b",
"Acclimation_3_30c",
"Control_1_30a",
"Control_1_30b",
"Control_1_30c",
"Control_3_30a",
"Control_3_30b",
"Control_3_30c")
head(dat)
str(dat)
# Test for only A/G Snps
dat2 <- data.frame(dat$nSites, dat$Llike, dat$nAG)
head(dat2)
row.names(dat2) <- c("Acclimation_1_31_5a",
"Acclimation_1_31_5b",
"Acclimation_1_31_5c",
"Acclimation_3_30a",
"Acclimation_3_30b",
"Acclimation_3_30c",
"Control_1_30a",
"Control_1_30b",
"Control_1_30c",
"Control_3_30a",
"Control_3_30b",
"Control_3_30c")
head(dat2)
# Euclidean distance
dist <- dist(dat2 , diag=TRUE)/10000000
# Hierarchical Clustering with hclust
hc <- hclust(dist, method = "average")
# Create two dendrograms
d1 <- as.dendrogram (hc)
d2 <- as.dendrogram (hc2)
# Create a list to hold dendrograms
dend_list <- dendlist(d1, d2)
pdf(file= 'Comparative_dendrogram_02_01.pdf' ,onefile=T,paper='A4')
# Align and plot two dendrograms side by side
dendlist(d1, d2) %>%
untangle(method = "step1side") %>% # Find the best alignment layout
tanglegram() # Draw the two dendrograms
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans4.pdf', onefile=T, paper='a4r')
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 2),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red", "green"), k=4) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red", "green"), k = 4)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans2_2.pdf', onefile=T, paper='a4r')
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 2),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red", "green"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red", "green"), k = 2)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans3_2.pdf', onefile=T, paper='a4r')
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=3) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 3),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red", "green"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red", "green"), k = 2)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans5.pdf', onefile=T, paper='a4r')
# Custom these kendo, and place them in a list
#dl <- dendlist(
# d1 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3),
# d2 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3)
#)
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 2),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red", "green", "grey"), k=5) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red", "green", "grey"), k = 5)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans6.pdf', onefile=T, paper='a4r')
# Custom these kendo, and place them in a list
#dl <- dendlist(
# d1 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3),
# d2 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3)
#)
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 2),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red", "green", "grey", "blue"), k=6) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red", "green", "grey", "blue"), k = 6)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
pdf(file= 'Comparative_dendrogram_02_03_kmeans3.pdf', onefile=T, paper='a4r')
# Custom these kendo, and place them in a list
#dl <- dendlist(
# d1 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3),
# d2 %>%
# set("labels_col", value = c("skyblue", "orange", "grey"), k=3) %>%
# set("branches_lty", 1) %>%
# set("branches_k_color", value = c("skyblue", "orange", "grey"), k = 3)
#)
dl <- dendlist(
d1 %>%
set("labels_col", value = c("skyblue", "orange"), k=2) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange"), k = 2),
d2 %>%
set("labels_col", value = c("skyblue", "orange", "red"), k=3) %>%
set("branches_lty", 1) %>%
set("branches_k_color", value = c("skyblue", "orange", "red"), k = 3)
) %>%
untangle(method = "step1side") # Find the best alignment layout
# Plot them together
tanglegram(dl,
common_subtrees_color_lines = TRUE,
highlight_distinct_edges = TRUE,
highlight_branches_lwd = FALSE,
margin_inner=7,
lwd=2
)
dev.off()
|
if (!file.exists("./project1"))
dir.create("./project1")
# setwd("./project1")
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",destfile="./project1/data.zip",method='curl')
file <- as.character(unzip("./project1/data.zip", list=TRUE)[1])
unzip("./project1/data.zip", files=file,exdir="./project1")
# cols <- read.table(paste0("./project1/",file), sep=";", nrows=1);
df <- read.table(paste0("./project1/",file), sep=";", skip=66637, nrows=2880, na.strings="?")
colnames(df) <- c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage", "Global_intensity",
"Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
df$Date <- as.Date(df$Date, format = "%d/%m/%Y")
tmp <- paste(df$Date,df$Time)
df$Time <- strptime(tmp, format="%Y-%m-%d %H:%M:%S")
#head(df,5)
#tail(df,5)
#plot 2
png(file="./plot2.png",width=480,height=480)
plot(y=df$Global_active_power, x=df$Time,
ylab = "Global Active Power (kilowatts)",
xlab = "",
type="l")
dev.off()
| /plot2.R | no_license | daghan/ExData_Plotting1 | R | false | false | 1,062 | r | if (!file.exists("./project1"))
dir.create("./project1")
# setwd("./project1")
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",destfile="./project1/data.zip",method='curl')
file <- as.character(unzip("./project1/data.zip", list=TRUE)[1])
unzip("./project1/data.zip", files=file,exdir="./project1")
# cols <- read.table(paste0("./project1/",file), sep=";", nrows=1);
df <- read.table(paste0("./project1/",file), sep=";", skip=66637, nrows=2880, na.strings="?")
colnames(df) <- c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage", "Global_intensity",
"Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
df$Date <- as.Date(df$Date, format = "%d/%m/%Y")
tmp <- paste(df$Date,df$Time)
df$Time <- strptime(tmp, format="%Y-%m-%d %H:%M:%S")
#head(df,5)
#tail(df,5)
#plot 2
png(file="./plot2.png",width=480,height=480)
plot(y=df$Global_active_power, x=df$Time,
ylab = "Global Active Power (kilowatts)",
xlab = "",
type="l")
dev.off()
|
#install.packages("text2vec")
#install.packages("tm")
#install.packages("magrittr")
#install.packages("wordcloud")
#install.packages("stopwords", dependencies = TRUE)
library("stopwords")
library(text2vec)
library(tm)
library(magrittr)
library(wordcloud)
#EDIT this row
my_file <- "my_Scopus_TSE_articles_clean_data.RData"
#draw_myWordCloud = function(my_file){
my_temp_file = paste(my_data_dir, "/", sep="")
my_temp_file = paste(my_temp_file, my_file, sep="")
load(my_temp_file)
print(paste("Creating Word cloud, my_file: ", my_file))
my_text <- paste(my_articles$Title, my_articles$Abstract_clean)
my_text = tolower(my_text)
#remove more words that we do not care about
my_stopwords = c(stopwords::stopwords(language = "en", source = "snowball"),"myStopword1", "myStopword2")
my_text = removeWords(my_text, my_stopwords)
wordcloud(my_text, max.words=50, min.freq=5, random.order=FALSE, rot.per=0)
rm(my_text)
print("Finished Word cloud")
#} | /WordCloud.R | no_license | Skippari/TrendMining | R | false | false | 993 | r | #install.packages("text2vec")
#install.packages("tm")
#install.packages("magrittr")
#install.packages("wordcloud")
#install.packages("stopwords", dependencies = TRUE)
library("stopwords")
library(text2vec)
library(tm)
library(magrittr)
library(wordcloud)
#EDIT this row
my_file <- "my_Scopus_TSE_articles_clean_data.RData"
#draw_myWordCloud = function(my_file){
my_temp_file = paste(my_data_dir, "/", sep="")
my_temp_file = paste(my_temp_file, my_file, sep="")
load(my_temp_file)
print(paste("Creating Word cloud, my_file: ", my_file))
my_text <- paste(my_articles$Title, my_articles$Abstract_clean)
my_text = tolower(my_text)
#remove more words that we do not care about
my_stopwords = c(stopwords::stopwords(language = "en", source = "snowball"),"myStopword1", "myStopword2")
my_text = removeWords(my_text, my_stopwords)
wordcloud(my_text, max.words=50, min.freq=5, random.order=FALSE, rot.per=0)
rm(my_text)
print("Finished Word cloud")
#} |
#install.packages("rdrobust")
library("rdrobust")
data(rdrobust_RDsenate)
attach(rdrobust_RDsenate)
rdbwselect(vote,margin)
rdrobust(vote,margin)
rdplot(vote,margin,x.label = "Violence",y.label = "Arboviral Risk",title = "RD Arboviral risk vs Violence",c=0)
| /k01/rd model.R | no_license | amyrobyn/LaBeaud_Lab | R | false | false | 258 | r | #install.packages("rdrobust")
library("rdrobust")
data(rdrobust_RDsenate)
attach(rdrobust_RDsenate)
rdbwselect(vote,margin)
rdrobust(vote,margin)
rdplot(vote,margin,x.label = "Violence",y.label = "Arboviral Risk",title = "RD Arboviral risk vs Violence",c=0)
|
# STUDY 2: YOUNGER CHILDREN
# read in & tidy data
d2_46 <- read.csv("./anonymized_data/study2_children46_anonymized.csv") %>%
mutate(age = as.numeric(as.character(age))) %>%
filter(((age >= 4 & age < 7) | is.na(age)),
# character %in% c("beetle", "robot"),
!grepl("metal", capWording),
!grepl("turned on", capWording)) %>%
select(subid, age, gender, ethnicity,
character, capWording, response, rt, sessionDuration) %>%
rename(duration = sessionDuration) %>%
mutate(age_group = "children46") %>%
mutate(response_num = case_when(
tolower(response) == "no" ~ 0,
tolower(response) %in% c("kinda", "kida") ~ 0.5,
tolower(response) == "yes" ~ 1)) %>%
mutate(capWording = as.character(trimws(capWording)),
capacity = case_when(
# grepl("\\--", capWording) ~ gsub(" \\--.*$", "...", capWording),
grepl("close by or far away", capWording) ~ "sense...far away",
grepl("understand how somebody else is feeling", capWording) ~
"understand how someone...feeling",
grepl("pleasure", capWording) ~ "feel pleasure...",
grepl("sick", capWording) ~ "feel sick...",
grepl("desires", capWording) ~ "have desires...",
grepl("self-control", capWording) ~ "have self-control...",
grepl("goals", capWording) ~ "have goals...",
grepl("personality", capWording) ~ "have a personality...",
grepl("beliefs", capWording) ~ "have beliefs...",
TRUE ~ capWording)) %>%
mutate(ethnicity = tolower(as.character(ethnicity)),
ethnicity = gsub("sn", "", ethnicity),
ethnicity = trimws(ethnicity),
ethnicity = case_when(
grepl(" ", ethnicity) |
(grepl("\\/", ethnicity) & !grepl("hisp", ethnicity)) ~ "multi",
ethnicity %in% c("a", "chinese", "east asian") ~ "east asian",
ethnicity %in% c("af", "ethiopian american") ~ "black",
ethnicity %in% c("c", "cj") ~ "white",
ethnicity == "h" ~ "hispanic latino",
ethnicity == "i" ~ "south or southeast asian",
ethnicity == "me" ~ "middle eastern",
ethnicity == "na" ~ "native american",
TRUE ~ ethnicity)) %>%
distinct()
# clean data
d2_46 <- d2_46 %>%
filter(rt >= 250 | is.na(rt))
# make wideform
d2_46_wide <- d2_46 %>%
mutate(subid_char = paste(subid, character, sep = "_")) %>%
select(subid_char, capacity, response_num) %>%
spread(capacity, response_num) %>%
column_to_rownames("subid_char")
# impute missing values using the mean by character and capacity
d2_46_wide_i <- d2_46_wide %>%
rownames_to_column("subid_char") %>%
mutate(subid = gsub("_.*$", "", subid_char),
character = gsub("^.*_", "", subid_char)) %>%
group_by(character) %>%
mutate_at(vars(-c(subid, character, subid_char)),
funs(replace(., which(is.na(.)), mean(., na.rm = T)))) %>%
ungroup() %>%
select(-subid, -character) %>%
column_to_rownames("subid_char")
| /manuscript/scripts/data_s2_46.R | no_license | kgweisman/dimkid | R | false | false | 3,037 | r | # STUDY 2: YOUNGER CHILDREN
# read in & tidy data
d2_46 <- read.csv("./anonymized_data/study2_children46_anonymized.csv") %>%
mutate(age = as.numeric(as.character(age))) %>%
filter(((age >= 4 & age < 7) | is.na(age)),
# character %in% c("beetle", "robot"),
!grepl("metal", capWording),
!grepl("turned on", capWording)) %>%
select(subid, age, gender, ethnicity,
character, capWording, response, rt, sessionDuration) %>%
rename(duration = sessionDuration) %>%
mutate(age_group = "children46") %>%
mutate(response_num = case_when(
tolower(response) == "no" ~ 0,
tolower(response) %in% c("kinda", "kida") ~ 0.5,
tolower(response) == "yes" ~ 1)) %>%
mutate(capWording = as.character(trimws(capWording)),
capacity = case_when(
# grepl("\\--", capWording) ~ gsub(" \\--.*$", "...", capWording),
grepl("close by or far away", capWording) ~ "sense...far away",
grepl("understand how somebody else is feeling", capWording) ~
"understand how someone...feeling",
grepl("pleasure", capWording) ~ "feel pleasure...",
grepl("sick", capWording) ~ "feel sick...",
grepl("desires", capWording) ~ "have desires...",
grepl("self-control", capWording) ~ "have self-control...",
grepl("goals", capWording) ~ "have goals...",
grepl("personality", capWording) ~ "have a personality...",
grepl("beliefs", capWording) ~ "have beliefs...",
TRUE ~ capWording)) %>%
mutate(ethnicity = tolower(as.character(ethnicity)),
ethnicity = gsub("sn", "", ethnicity),
ethnicity = trimws(ethnicity),
ethnicity = case_when(
grepl(" ", ethnicity) |
(grepl("\\/", ethnicity) & !grepl("hisp", ethnicity)) ~ "multi",
ethnicity %in% c("a", "chinese", "east asian") ~ "east asian",
ethnicity %in% c("af", "ethiopian american") ~ "black",
ethnicity %in% c("c", "cj") ~ "white",
ethnicity == "h" ~ "hispanic latino",
ethnicity == "i" ~ "south or southeast asian",
ethnicity == "me" ~ "middle eastern",
ethnicity == "na" ~ "native american",
TRUE ~ ethnicity)) %>%
distinct()
# clean data
d2_46 <- d2_46 %>%
filter(rt >= 250 | is.na(rt))
# make wideform
d2_46_wide <- d2_46 %>%
mutate(subid_char = paste(subid, character, sep = "_")) %>%
select(subid_char, capacity, response_num) %>%
spread(capacity, response_num) %>%
column_to_rownames("subid_char")
# impute missing values using the mean by character and capacity
d2_46_wide_i <- d2_46_wide %>%
rownames_to_column("subid_char") %>%
mutate(subid = gsub("_.*$", "", subid_char),
character = gsub("^.*_", "", subid_char)) %>%
group_by(character) %>%
mutate_at(vars(-c(subid, character, subid_char)),
funs(replace(., which(is.na(.)), mean(., na.rm = T)))) %>%
ungroup() %>%
select(-subid, -character) %>%
column_to_rownames("subid_char")
|
#fread
#suppose to be the fastest, will see.
library(data.table)
b=fread("BigDiamonds.csv")
head(b)
library(readr)
system.time(read_csv("BigDiamonds.csv"))
system.time(fread("BigDiamonds.csv"))
system.time(read.csv("BigDiamonds.csv"))
| /R/fread.R | no_license | Decision-Stats/s15_codes | R | false | false | 236 | r | #fread
#suppose to be the fastest, will see.
library(data.table)
b=fread("BigDiamonds.csv")
head(b)
library(readr)
system.time(read_csv("BigDiamonds.csv"))
system.time(fread("BigDiamonds.csv"))
system.time(read.csv("BigDiamonds.csv"))
|
\name{Multinomial regression}
\alias{multinom.reg}
\title{
Multinomial regression
}
\description{
Multinomial regression.
}
\usage{
multinom.reg(y, x, tol = 1e-07, maxiters = 50)
}
\arguments{
\item{y}{
The response variable. A numerical or a factor type vector.
}
\item{x}{
A matrix or a data.frame with the predictor variables.
}
\item{tol}{
This tolerance value to terminate the Newton-Raphson algorithm.
}
\item{maxiters}{
The maximum number of iterations Newton-Raphson will perform.
}
}
\value{
A list including:
\item{iters}{
The number of iterations required by the Newton-Raphson.
}
\item{loglik}{
The value of the maximised log-likelihood.
}
\item{be}{
A matrix with the estimated regression coefficients.
}
}
\references{
Bohning, D. (1992). Multinomial logistic regression algorithm. Annals of the
Institute of Statistical Mathematics, 44(1): 197-200.
}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris <mtsagris@yahoo.gr> and Manos
Papadakis <papadakm95@gmail.com>.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{glm_logistic}, \link{score.multinomregs} \link{logistic_only}
}
}
\examples{
\dontrun{
y <- iris[, 5]
x <- matrnorm(150, 3)
res <- multinom.reg(y, x)
}
}
\keyword{ Multinomial distribution }
\keyword{ regression } | /fuzzedpackages/Rfast/man/multinom.reg.Rd | no_license | akhikolla/testpackages | R | false | false | 1,362 | rd | \name{Multinomial regression}
\alias{multinom.reg}
\title{
Multinomial regression
}
\description{
Multinomial regression.
}
\usage{
multinom.reg(y, x, tol = 1e-07, maxiters = 50)
}
\arguments{
\item{y}{
The response variable. A numerical or a factor type vector.
}
\item{x}{
A matrix or a data.frame with the predictor variables.
}
\item{tol}{
This tolerance value to terminate the Newton-Raphson algorithm.
}
\item{maxiters}{
The maximum number of iterations Newton-Raphson will perform.
}
}
\value{
A list including:
\item{iters}{
The number of iterations required by the Newton-Raphson.
}
\item{loglik}{
The value of the maximised log-likelihood.
}
\item{be}{
A matrix with the estimated regression coefficients.
}
}
\references{
Bohning, D. (1992). Multinomial logistic regression algorithm. Annals of the
Institute of Statistical Mathematics, 44(1): 197-200.
}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris <mtsagris@yahoo.gr> and Manos
Papadakis <papadakm95@gmail.com>.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{glm_logistic}, \link{score.multinomregs} \link{logistic_only}
}
}
\examples{
\dontrun{
y <- iris[, 5]
x <- matrnorm(150, 3)
res <- multinom.reg(y, x)
}
}
\keyword{ Multinomial distribution }
\keyword{ regression } |
library(nnet)
data <- read.csv("customers.csv");
data <- data[
data$Gender!='NULL' &
data$Marital.Status!='NULL' &
data$Home.Ownership!='NULL' &
data$Education.Level!='NULL',
];
data$Gender <- factor(data$Gender)
data$Marital.Status <- factor(data$Marital.Status)
data$Home.Ownership <- factor(data$Home.Ownership)
data$Education.Level <- factor(data$Education.Level)
mmFormula <- ~ Home.Ownership + Education.Level + Gender + Marital.Status;
modelData <- model.matrix(mmFormula, data)
colnames(modelData)
data <- as.data.frame(modelData[, -1])
total <- nrow(data)
index <- sample(1:total, total*0.7)
data.train <- data[index, ]
data.test <- data[-index, ]
names(data)
data.train.nnet = nnet(
formula = Home.OwnershipRent ~ .,
data = data.train,
size = 10,
decay = 0.1,
linout = T,
trace = F
)
data.test.predict <- predict(
data.train.nnet,
newdata = data.test
)
data.test.predict <- ifelse(data.test.predict>0.5, "Rent", "Own")
table(data.test$Home.Ownership, data.test.predict)
| /3.5/code1.R | no_license | adam077/data_mining_by_R | R | false | false | 1,077 | r | library(nnet)
data <- read.csv("customers.csv");
data <- data[
data$Gender!='NULL' &
data$Marital.Status!='NULL' &
data$Home.Ownership!='NULL' &
data$Education.Level!='NULL',
];
data$Gender <- factor(data$Gender)
data$Marital.Status <- factor(data$Marital.Status)
data$Home.Ownership <- factor(data$Home.Ownership)
data$Education.Level <- factor(data$Education.Level)
mmFormula <- ~ Home.Ownership + Education.Level + Gender + Marital.Status;
modelData <- model.matrix(mmFormula, data)
colnames(modelData)
data <- as.data.frame(modelData[, -1])
total <- nrow(data)
index <- sample(1:total, total*0.7)
data.train <- data[index, ]
data.test <- data[-index, ]
names(data)
data.train.nnet = nnet(
formula = Home.OwnershipRent ~ .,
data = data.train,
size = 10,
decay = 0.1,
linout = T,
trace = F
)
data.test.predict <- predict(
data.train.nnet,
newdata = data.test
)
data.test.predict <- ifelse(data.test.predict>0.5, "Rent", "Own")
table(data.test$Home.Ownership, data.test.predict)
|
#' Search PLoS Journals by article views.
#'
#' @export
#' @import httr
#' @param search search terms (character)
#' @param byfield field to search by, e.g., subject, author, etc. (character)
#' @param views views all time (alltime) or views last 30 days (last30)
#' (character)
#' @param limit number of results to return (integer)
#' @param ... Optional additional curl options (debugging tools mostly)
#' @examples \dontrun{
#' plosviews('10.1371/journal.pone.0002154', 'id', 'alltime')
#' plosviews('10.1371/journal.pone.0002154', 'id', 'last30')
#' plosviews('10.1371/journal.pone.0002154', 'id', 'alltime,last30')
#' plosviews(search='marine ecology', byfield='subject', limit=50)
#' plosviews(search='evolution', views = 'alltime', limit = 99)
#' plosviews('bird', views = 'alltime', limit = 99)
#' }
plosviews <- function(search, byfield = NULL, views = 'alltime', limit = NULL, ...)
{
args <- ploscompact(list(wt = "json", fq = "doc_type:full", rows = limit))
if(is.null(byfield)) {byfield_ <- byfield} else
{byfield_ <- paste(byfield, ":", sep="")}
if(!is.na(search))
args$q <- paste(byfield_, '"', search, '"', sep="")
if(!is.na(views))
if (views == 'alltime') {args$fl <- 'id,counter_total_all'} else
if (views == 'last30') {args$fl <- 'id,counter_total_month'} else
{args$fl <- 'id,counter_total_all,counter_total_month'}
tt <- GET(pbase(), query=args, ...)
stop_for_status(tt)
temp <- content(tt)$response$docs
df <- do.call(rbind, lapply(temp, function(x) data.frame(x)))
df[order(df[,2]), ]
}
| /R/plosviews.R | permissive | akram-mohammed/rplos | R | false | false | 1,553 | r | #' Search PLoS Journals by article views.
#'
#' @export
#' @import httr
#' @param search search terms (character)
#' @param byfield field to search by, e.g., subject, author, etc. (character)
#' @param views views all time (alltime) or views last 30 days (last30)
#' (character)
#' @param limit number of results to return (integer)
#' @param ... Optional additional curl options (debugging tools mostly)
#' @examples \dontrun{
#' plosviews('10.1371/journal.pone.0002154', 'id', 'alltime')
#' plosviews('10.1371/journal.pone.0002154', 'id', 'last30')
#' plosviews('10.1371/journal.pone.0002154', 'id', 'alltime,last30')
#' plosviews(search='marine ecology', byfield='subject', limit=50)
#' plosviews(search='evolution', views = 'alltime', limit = 99)
#' plosviews('bird', views = 'alltime', limit = 99)
#' }
plosviews <- function(search, byfield = NULL, views = 'alltime', limit = NULL, ...)
{
args <- ploscompact(list(wt = "json", fq = "doc_type:full", rows = limit))
if(is.null(byfield)) {byfield_ <- byfield} else
{byfield_ <- paste(byfield, ":", sep="")}
if(!is.na(search))
args$q <- paste(byfield_, '"', search, '"', sep="")
if(!is.na(views))
if (views == 'alltime') {args$fl <- 'id,counter_total_all'} else
if (views == 'last30') {args$fl <- 'id,counter_total_month'} else
{args$fl <- 'id,counter_total_all,counter_total_month'}
tt <- GET(pbase(), query=args, ...)
stop_for_status(tt)
temp <- content(tt)$response$docs
df <- do.call(rbind, lapply(temp, function(x) data.frame(x)))
df[order(df[,2]), ]
}
|
##' Split a Vector of Strings Following a Regular Structure
##'
##' This function takes a vector of strings following a regular
##' structure, and converts that vector into a \code{data.frame}, split
##' on that delimiter. A nice wrapper to \code{\link{strsplit}}, essentially
##' - the primary bonus is the automatic coersion to a \code{data.frame}.
##'
##' Note that the preferred method for reading text data with a single, one
##' character delimiter is through \code{read.table(text=...)} or
##' \code{data.table::fread}; however, this function is helpful in the case of
##' non-regular delimiters (that you wish to specify with a regex)
##'
##' @param x a vector of strings.
##' @param sep the delimiter / \code{\link{regex}} you wish to split your strings on.
##' @param fixed logical. If \code{TRUE}, we match \code{sep} exactly;
##' otherwise, we use regular expressions. Has priority over \code{perl}.
##' @param perl logical. Should perl-compatible regexps be used? Ignored when
##' \code{fixed} is \code{TRUE}.
##' @param useBytes logical. If \code{TRUE}, matching is done byte-by-byte rather than
##' character-by-character.
##' @param names optional: a vector of names to pass to the returned \code{data.frame}.
##' @seealso \code{\link{strsplit}}
##' @export
##' @examples
##' str_split(
##' c("regular_structure", "in_my", "data_here"),
##' sep="_",
##' names=c("apple", "banana")
##' )
##' x <- c("somewhat_different.structure", "in_this.guy")
##' str_split( x, "[_\\.]", names=c("first", "second", "third") )
str_split <- function(x, sep, fixed=FALSE, perl=TRUE, useBytes=FALSE, names=NULL) {
if (fixed)
perl <- FALSE
return( .Call( Ccharlist_transpose_to_df,
strsplit(as.character(x), sep, fixed=fixed, perl=perl, useBytes=useBytes),
names
) )
}
##' @rdname str_split
##' @export
split2df <- str_split
| /R/str_split.R | no_license | dpastoor/Kmisc | R | false | false | 1,862 | r | ##' Split a Vector of Strings Following a Regular Structure
##'
##' This function takes a vector of strings following a regular
##' structure, and converts that vector into a \code{data.frame}, split
##' on that delimiter. A nice wrapper to \code{\link{strsplit}}, essentially
##' - the primary bonus is the automatic coersion to a \code{data.frame}.
##'
##' Note that the preferred method for reading text data with a single, one
##' character delimiter is through \code{read.table(text=...)} or
##' \code{data.table::fread}; however, this function is helpful in the case of
##' non-regular delimiters (that you wish to specify with a regex)
##'
##' @param x a vector of strings.
##' @param sep the delimiter / \code{\link{regex}} you wish to split your strings on.
##' @param fixed logical. If \code{TRUE}, we match \code{sep} exactly;
##' otherwise, we use regular expressions. Has priority over \code{perl}.
##' @param perl logical. Should perl-compatible regexps be used? Ignored when
##' \code{fixed} is \code{TRUE}.
##' @param useBytes logical. If \code{TRUE}, matching is done byte-by-byte rather than
##' character-by-character.
##' @param names optional: a vector of names to pass to the returned \code{data.frame}.
##' @seealso \code{\link{strsplit}}
##' @export
##' @examples
##' str_split(
##' c("regular_structure", "in_my", "data_here"),
##' sep="_",
##' names=c("apple", "banana")
##' )
##' x <- c("somewhat_different.structure", "in_this.guy")
##' str_split( x, "[_\\.]", names=c("first", "second", "third") )
str_split <- function(x, sep, fixed=FALSE, perl=TRUE, useBytes=FALSE, names=NULL) {
if (fixed)
perl <- FALSE
return( .Call( Ccharlist_transpose_to_df,
strsplit(as.character(x), sep, fixed=fixed, perl=perl, useBytes=useBytes),
names
) )
}
##' @rdname str_split
##' @export
split2df <- str_split
|
# taken from EM_function_v6.R , and adapted
fitEM <- function(y, X.t, Z.t, K = NULL, Vg = NULL, Ve = NULL,
cov.error = TRUE, stop.if.significant = FALSE,
null.dev = NULL, alpha = 0.01, max.iter = 100,
Vg.start = NULL, Ve.start = NULL, cov.gen = TRUE) {
#y = em.vec; K = NULL; null.dev = NULL; Vg.start = as.numeric(Vg.manova)[c(1,4,2)]; stop.if.significant= F; Vg = NULL; Ve = NULL; Ve.start = c(as.numeric(Ve.manova)[c(1,4)], 0); cov.error = FALSE; max.iter = 5
if (stop.if.significant == TRUE & is.null(null.dev)) {stop('No null.dev given')}
if (!is.null(Vg.start) & !is.null(Vg)) {
warning('Vg set to NULL')
Vg <- NULL
}
if (!is.null(Ve.start) & !is.null(Ve)) {
warning('Ve set to NULL')
Ve <- NULL
}
####################################################
# Remove missing data: according to y, X.t and Z.t
####################################################
na.X <- apply(X.t, 1, function(x) any(is.na(x)))
na.Z <- apply(Z.t, 1, function(z) any(is.na(z)))
is.na <- is.na(y[1:nrow(X.t)]) | is.na(y[(nrow(X.t)+1):(2*nrow(X.t))]) | na.X | na.Z
y <- y[rep(!is.na,2)]
X.t <- X.t[!is.na,]
Z.t <- Z.t[!is.na,]
Ve.aux <- Ve
Vg.aux <- Vg
#start <- proc.time()[3]
X.t <- Matrix(X.t)
if(!is.null(K)) {
eig <- eigen(K)
U <- eig$vectors
d <- eig$values
K.trans <- U %*% diag(1/sqrt(d))
Z.t <- Matrix(Z.t%*% K.trans)
#Z.t <- Matrix(Z.t%*%U%*%diag(1/sqrt(d)))
} else {
Z.t <- Matrix(Z.t)
}
# Design matrices
Z <- Diagonal(2)%x%Z.t
X <- Diagonal(2)%x%X.t
# Extract some information
ngeno <- ncol(Z.t)
N <- nrow(Z.t)
# Needed matrices: we take the advantage of the kronecker product
MM <- cbind(X,Z)
XtX. <- crossprod(X.t) #t(X.t)%*%X.t
XtZ. <- crossprod(X.t, Z.t) #t(X.t)%*%Z.t
ZtZ. <- crossprod(Z.t) #t(Z.t)%*%Z.t
# Weights: for the moment all ones
w <- rep(1, length(y))
# Number of coefficients (fixed and random, per trait)
np <- c(ncol(X), ngeno, ngeno)
# Initial values
devold <- 1e10
thr <- 1e-3
###########################
if (is.null(Vg)) {
est.Vg <- TRUE
est.Vg.var <- TRUE
if (is.null(Vg.start)) {
Vg <- c(1,1,0.1)
} else {
Vg <- Vg.start
}
} else {
if (length(Vg.aux) == 2) {
est.Vg <- TRUE
est.Vg.var <- FALSE
Vg <- c(Vg.aux[1], Vg.aux[2], 0.1)
} else {
if (length(Vg.aux) == 3) {
est.Vg <- FALSE
} else {
stop('The specified variance/covariance matrix for the error component is not correct')
}
}
}
###########################
if (is.null(Ve)) {
est.Ve <- TRUE
est.Ve.var <- TRUE
if (cov.error) {
if (is.null(Ve.start)) {
Ve <- c(1,1,0.1)
} else {
Ve <- Ve.start
}
} else {
if (is.null(Ve.start)) {
Ve <- c(1,1,0)
} else {
Ve <- Ve.start
Ve[3] <- 0
}
}
} else {
if (length(Ve.aux) == 2) {
est.Ve <- TRUE
cov.error <- TRUE
est.Ve.var <- FALSE
Ve <- c(Ve.aux[1], Ve.aux[2], 0.1)
} else if (length(Ve.aux) == 3) {
est.Ve <- FALSE
} else {
stop('The specified variance/covariance matrix for the error component is not correct')
}
}
# Precision matrices for the genetic variances (needed for SAP/Schall algorithm)
g1 <- rep(c(Vg[1], 0), each = ngeno)
g2 <- rep(c(0, Vg[2]), each = ngeno)
for (it in 1:max.iter) {
# Genotypic covariance matrix # it=1
Gi <- matrix(c(Vg[1], Vg[3], Vg[3], Vg[2]), ncol = 2)
G <- Gi%x%Diagonal(ngeno)
Giinv <- solve(Gi)
Ginv <- Giinv%x%Diagonal(ngeno)
# Error covariance matrix
Ri <- matrix(c(Ve[1], Ve[3], Ve[3], Ve[2]), ncol = 2)
R <- Ri%x%Diagonal(N)
Riinv <- solve(Ri)
Rinv <- Riinv%x%Diagonal(N)
# X'X X'Z
# Z'X Z'Z
# We take the advantage of the kronecker product
XtRinvX. <- Riinv%x%XtX.
XtRinvZ. <- Riinv%x%XtZ.
ZtRinvZ. <- Riinv%x%ZtZ.
XtRinvy. <- ((Riinv%x%t(X.t))%*%y)[,1]
ZtRinvy. <- ((Riinv%x%t(Z.t))%*%y)[,1]
u <- c(XtRinvy.,ZtRinvy.)
V <- construct.block(XtRinvX., XtRinvZ., t(XtRinvZ.), ZtRinvZ.)
#D <- Matrix:::bdiag(diag(rep(0,np[1])), Ginv)
D <- bdiag(diag(rep(0,np[1])), Ginv)
# Henderson system of equations
H <- V + D
Hinv <- try(solve(H))
if(class(Hinv) == "try-error") {
Hinv <- ginv(as.matrix(H))
}
# Fixed and random coefficients
b <- Hinv%*%u
b.fixed <- b[1:np[1]]
b.random <- b[-(1:np[1])]
# Compute the deviance
res <- (y - MM%*%b) # residuals
dev <- deviance2(H, Gi, ngeno, Ri, N, Rinv, res, t(b.random)%*%Ginv%*%b.random)[1]
#dev <- deviance(H, G, R, Rinv, res, t(b.random)%*%Ginv%*%b.random)[1]
if(!est.Ve & !est.Vg) {
break
}
#########################################################
# Genotypic variance components
#########################################################
if (est.Vg) {
#########################################################
# EM algorithm
#########################################################
Ak <- Hinv[-(1:np[1]),-(1:np[1])]
#########################################################
# Schall: only for the variances (apparently faster)
#########################################################
if (est.Vg.var) {
aux <- diag(G) - diag(Hinv[-(1:np[1]),-(1:np[1])])
# First trait
g.inv.d <- (1/Vg[1])*g1
ed1 <- sum(aux*g.inv.d)
ssv1 <- sum(b.random^2*g1)
tau1 <- ssv1/ed1
# Second trait
g.inv.d <- (1/Vg[2])*g2
ed2 <- sum(aux*g.inv.d)
ssv2 <- sum(b.random^2*g2)
tau2 <- ssv2/ed2
} else {
tau1 <- Vg[1]
tau2 <- Vg[2]
}
# covariance
if (cov.gen) {
A <- Ak[1:ngeno, (ngeno+1):(2*ngeno)]
tau3 <- (1/ngeno)*(sum(diag(A)) + sum(b.random[1:ngeno]*b.random[(ngeno+1):(2*ngeno)]))
} else {
tau3 <- 0
}
Vg.new <- c(tau1, tau2, tau3)
} else {
Vg.new = Vg
}
#########################################################
# Error variance components
#########################################################
if (est.Ve) {
#########################################################
# EM algorithm
#########################################################
resm <- matrix(res, ncol = 2) # One column per trait
ress <- t(resm)%*%resm
aux <- MM%*%Hinv #####time consuming try crossprod {base}
if(est.Ve.var) {
diag.var <- rowSums(aux*MM)
# First trait
aux1 <- sum(diag.var[1:N])
aux2 <- ress[1,1]
sig21 <- (1/N)*(aux1 + aux2)
# Second trait
aux1 <- sum(diag.var[(N+1):(2*N)])
aux2 <- ress[2,2]
sig22 <- (1/N)*(aux1 + aux2)
# To be further studied
#sig21 <- ress[1,1]/(N - ncol(X.t) - ed1)
#sig22 <- ress[2,2]/(N - ncol(X.t) - ed2)
} else {
sig21 <- Ve[1]
sig22 <- Ve[2]
}
if (cov.error) {
#aux <- MM%*%Hinv
diag.covar <- rowSums(aux[1:N,]*MM[(N+1):(2*N),])
# Covariance
aux1 <- sum(diag.covar)
aux2 <- ress[1,2]
sig212 = (1/N)*(aux1 + aux2)
} else {
sig212 = 0
}
Ve.new <- c(sig21, sig22, sig212)
} else {
Ve.new <- Ve
}
dla <- abs(devold - dev)
#cat(sprintf("%1$3d %2$10.6f", it, dev))
#cat(sprintf("%8.3f", c(Ve, Vg)), '\n')
Ve <- Ve.new #!
Vg <- Vg.new
devold <- dev
if (stop.if.significant) {
loglik_Full <- -0.5 * dev
loglik_Reduced <- -0.5 * null.dev
REMLLRT <- 2 * max(loglik_Full - loglik_Reduced, 0)
pvalue <- (1 - pchisq(REMLLRT, df = 1))
if (pvalue < alpha) break
if (it > 20 & pvalue > 0.1) break
}
if (dla < thr) break
}
#end <- proc.time()[3]
#cat(paste('Computing time:', end - start, '\n'))
if(!is.null(K)) {
b.random <- (Diagonal(2)%x%K.trans)%*%b.random
}
res <- list()
res$coeff <- list(fixed = b.fixed, random = b.random)
res$variances <- list(Ve = Ve, Vg = Vg)
res$deviance <- dev
res$it <- it
if (stop.if.significant) {res$pvalue <- pvalue}
res
} | /R/fitEM.R | no_license | cran/pcgen | R | false | false | 8,217 | r | # taken from EM_function_v6.R , and adapted
fitEM <- function(y, X.t, Z.t, K = NULL, Vg = NULL, Ve = NULL,
cov.error = TRUE, stop.if.significant = FALSE,
null.dev = NULL, alpha = 0.01, max.iter = 100,
Vg.start = NULL, Ve.start = NULL, cov.gen = TRUE) {
#y = em.vec; K = NULL; null.dev = NULL; Vg.start = as.numeric(Vg.manova)[c(1,4,2)]; stop.if.significant= F; Vg = NULL; Ve = NULL; Ve.start = c(as.numeric(Ve.manova)[c(1,4)], 0); cov.error = FALSE; max.iter = 5
if (stop.if.significant == TRUE & is.null(null.dev)) {stop('No null.dev given')}
if (!is.null(Vg.start) & !is.null(Vg)) {
warning('Vg set to NULL')
Vg <- NULL
}
if (!is.null(Ve.start) & !is.null(Ve)) {
warning('Ve set to NULL')
Ve <- NULL
}
####################################################
# Remove missing data: according to y, X.t and Z.t
####################################################
na.X <- apply(X.t, 1, function(x) any(is.na(x)))
na.Z <- apply(Z.t, 1, function(z) any(is.na(z)))
is.na <- is.na(y[1:nrow(X.t)]) | is.na(y[(nrow(X.t)+1):(2*nrow(X.t))]) | na.X | na.Z
y <- y[rep(!is.na,2)]
X.t <- X.t[!is.na,]
Z.t <- Z.t[!is.na,]
Ve.aux <- Ve
Vg.aux <- Vg
#start <- proc.time()[3]
X.t <- Matrix(X.t)
if(!is.null(K)) {
eig <- eigen(K)
U <- eig$vectors
d <- eig$values
K.trans <- U %*% diag(1/sqrt(d))
Z.t <- Matrix(Z.t%*% K.trans)
#Z.t <- Matrix(Z.t%*%U%*%diag(1/sqrt(d)))
} else {
Z.t <- Matrix(Z.t)
}
# Design matrices
Z <- Diagonal(2)%x%Z.t
X <- Diagonal(2)%x%X.t
# Extract some information
ngeno <- ncol(Z.t)
N <- nrow(Z.t)
# Needed matrices: we take the advantage of the kronecker product
MM <- cbind(X,Z)
XtX. <- crossprod(X.t) #t(X.t)%*%X.t
XtZ. <- crossprod(X.t, Z.t) #t(X.t)%*%Z.t
ZtZ. <- crossprod(Z.t) #t(Z.t)%*%Z.t
# Weights: for the moment all ones
w <- rep(1, length(y))
# Number of coefficients (fixed and random, per trait)
np <- c(ncol(X), ngeno, ngeno)
# Initial values
devold <- 1e10
thr <- 1e-3
###########################
if (is.null(Vg)) {
est.Vg <- TRUE
est.Vg.var <- TRUE
if (is.null(Vg.start)) {
Vg <- c(1,1,0.1)
} else {
Vg <- Vg.start
}
} else {
if (length(Vg.aux) == 2) {
est.Vg <- TRUE
est.Vg.var <- FALSE
Vg <- c(Vg.aux[1], Vg.aux[2], 0.1)
} else {
if (length(Vg.aux) == 3) {
est.Vg <- FALSE
} else {
stop('The specified variance/covariance matrix for the error component is not correct')
}
}
}
###########################
if (is.null(Ve)) {
est.Ve <- TRUE
est.Ve.var <- TRUE
if (cov.error) {
if (is.null(Ve.start)) {
Ve <- c(1,1,0.1)
} else {
Ve <- Ve.start
}
} else {
if (is.null(Ve.start)) {
Ve <- c(1,1,0)
} else {
Ve <- Ve.start
Ve[3] <- 0
}
}
} else {
if (length(Ve.aux) == 2) {
est.Ve <- TRUE
cov.error <- TRUE
est.Ve.var <- FALSE
Ve <- c(Ve.aux[1], Ve.aux[2], 0.1)
} else if (length(Ve.aux) == 3) {
est.Ve <- FALSE
} else {
stop('The specified variance/covariance matrix for the error component is not correct')
}
}
# Precision matrices for the genetic variances (needed for SAP/Schall algorithm)
g1 <- rep(c(Vg[1], 0), each = ngeno)
g2 <- rep(c(0, Vg[2]), each = ngeno)
for (it in 1:max.iter) {
# Genotypic covariance matrix # it=1
Gi <- matrix(c(Vg[1], Vg[3], Vg[3], Vg[2]), ncol = 2)
G <- Gi%x%Diagonal(ngeno)
Giinv <- solve(Gi)
Ginv <- Giinv%x%Diagonal(ngeno)
# Error covariance matrix
Ri <- matrix(c(Ve[1], Ve[3], Ve[3], Ve[2]), ncol = 2)
R <- Ri%x%Diagonal(N)
Riinv <- solve(Ri)
Rinv <- Riinv%x%Diagonal(N)
# X'X X'Z
# Z'X Z'Z
# We take the advantage of the kronecker product
XtRinvX. <- Riinv%x%XtX.
XtRinvZ. <- Riinv%x%XtZ.
ZtRinvZ. <- Riinv%x%ZtZ.
XtRinvy. <- ((Riinv%x%t(X.t))%*%y)[,1]
ZtRinvy. <- ((Riinv%x%t(Z.t))%*%y)[,1]
u <- c(XtRinvy.,ZtRinvy.)
V <- construct.block(XtRinvX., XtRinvZ., t(XtRinvZ.), ZtRinvZ.)
#D <- Matrix:::bdiag(diag(rep(0,np[1])), Ginv)
D <- bdiag(diag(rep(0,np[1])), Ginv)
# Henderson system of equations
H <- V + D
Hinv <- try(solve(H))
if(class(Hinv) == "try-error") {
Hinv <- ginv(as.matrix(H))
}
# Fixed and random coefficients
b <- Hinv%*%u
b.fixed <- b[1:np[1]]
b.random <- b[-(1:np[1])]
# Compute the deviance
res <- (y - MM%*%b) # residuals
dev <- deviance2(H, Gi, ngeno, Ri, N, Rinv, res, t(b.random)%*%Ginv%*%b.random)[1]
#dev <- deviance(H, G, R, Rinv, res, t(b.random)%*%Ginv%*%b.random)[1]
if(!est.Ve & !est.Vg) {
break
}
#########################################################
# Genotypic variance components
#########################################################
if (est.Vg) {
#########################################################
# EM algorithm
#########################################################
Ak <- Hinv[-(1:np[1]),-(1:np[1])]
#########################################################
# Schall: only for the variances (apparently faster)
#########################################################
if (est.Vg.var) {
aux <- diag(G) - diag(Hinv[-(1:np[1]),-(1:np[1])])
# First trait
g.inv.d <- (1/Vg[1])*g1
ed1 <- sum(aux*g.inv.d)
ssv1 <- sum(b.random^2*g1)
tau1 <- ssv1/ed1
# Second trait
g.inv.d <- (1/Vg[2])*g2
ed2 <- sum(aux*g.inv.d)
ssv2 <- sum(b.random^2*g2)
tau2 <- ssv2/ed2
} else {
tau1 <- Vg[1]
tau2 <- Vg[2]
}
# covariance
if (cov.gen) {
A <- Ak[1:ngeno, (ngeno+1):(2*ngeno)]
tau3 <- (1/ngeno)*(sum(diag(A)) + sum(b.random[1:ngeno]*b.random[(ngeno+1):(2*ngeno)]))
} else {
tau3 <- 0
}
Vg.new <- c(tau1, tau2, tau3)
} else {
Vg.new = Vg
}
#########################################################
# Error variance components
#########################################################
if (est.Ve) {
#########################################################
# EM algorithm
#########################################################
resm <- matrix(res, ncol = 2) # One column per trait
ress <- t(resm)%*%resm
aux <- MM%*%Hinv #####time consuming try crossprod {base}
if(est.Ve.var) {
diag.var <- rowSums(aux*MM)
# First trait
aux1 <- sum(diag.var[1:N])
aux2 <- ress[1,1]
sig21 <- (1/N)*(aux1 + aux2)
# Second trait
aux1 <- sum(diag.var[(N+1):(2*N)])
aux2 <- ress[2,2]
sig22 <- (1/N)*(aux1 + aux2)
# To be further studied
#sig21 <- ress[1,1]/(N - ncol(X.t) - ed1)
#sig22 <- ress[2,2]/(N - ncol(X.t) - ed2)
} else {
sig21 <- Ve[1]
sig22 <- Ve[2]
}
if (cov.error) {
#aux <- MM%*%Hinv
diag.covar <- rowSums(aux[1:N,]*MM[(N+1):(2*N),])
# Covariance
aux1 <- sum(diag.covar)
aux2 <- ress[1,2]
sig212 = (1/N)*(aux1 + aux2)
} else {
sig212 = 0
}
Ve.new <- c(sig21, sig22, sig212)
} else {
Ve.new <- Ve
}
dla <- abs(devold - dev)
#cat(sprintf("%1$3d %2$10.6f", it, dev))
#cat(sprintf("%8.3f", c(Ve, Vg)), '\n')
Ve <- Ve.new #!
Vg <- Vg.new
devold <- dev
if (stop.if.significant) {
loglik_Full <- -0.5 * dev
loglik_Reduced <- -0.5 * null.dev
REMLLRT <- 2 * max(loglik_Full - loglik_Reduced, 0)
pvalue <- (1 - pchisq(REMLLRT, df = 1))
if (pvalue < alpha) break
if (it > 20 & pvalue > 0.1) break
}
if (dla < thr) break
}
#end <- proc.time()[3]
#cat(paste('Computing time:', end - start, '\n'))
if(!is.null(K)) {
b.random <- (Diagonal(2)%x%K.trans)%*%b.random
}
res <- list()
res$coeff <- list(fixed = b.fixed, random = b.random)
res$variances <- list(Ve = Ve, Vg = Vg)
res$deviance <- dev
res$it <- it
if (stop.if.significant) {res$pvalue <- pvalue}
res
} |
\name{predict}
\alias{predict.JMbayes}
\title{Predictions for Joint Models}
\description{
Calculates predicted values for the longitudinal part of a joint model.
}
\usage{
\method{predict}{JMbayes}(object, newdata, type = c("Marginal", "Subject"),
interval = c("none", "confidence", "prediction"), level = 0.95, idVar = "id",
FtTimes = NULL, last.time = NULL, LeftTrunc_var = NULL,
M = 300, returnData = FALSE, scale = 1.6,
weight = rep(1, nrow(newdata)), invlink = NULL, seed = 1, \dots)
}
\arguments{
\item{object}{an object inheriting from class \code{JMBayes}.}
\item{newdata}{a data frame in which to look for variables with which to predict.}
\item{type}{a character string indicating the type of predictions to compute,
marginal or subject-specific. See \bold{Details}.}
\item{interval}{a character string indicating what type of intervals should be computed.}
\item{level}{a numeric scalar denoting the tolerance/confidence level.}
\item{idVar}{a character string indicating the name of the variable in
\code{newdata} that corresponds to the subject identifier; required
when \code{type = "Subject"}.}
\item{FtTimes}{a list with components numeric vectors denoting the time points
for which we wish to compute subject-specific predictions after the last
available measurement provided in \code{newdata}. For each subject in
\code{newdata} the default is a sequence of 25 equally spaced time points
from the last available measurement to the maximum follow-up time of all
subjects (plus a small quantity). This argument is only used when
\code{type = "Subject"}.}
\item{last.time}{a numeric vector. This specifies the known time at which each of the subjects in \code{newdata}
was known to be alive. If \code{NULL}, then this is automatically taken as the last time each subject provided a longitudinal
measurement. If a numeric vector, then it is assumed to contain this last time point for each subject.}
\item{LeftTrunc_var}{character string indicating the name of the variable in \code{newdata} that denotes the left-truncation
time.}
\item{M}{numeric scalar denoting the number of Monte Carlo samples.
See \bold{Details}.}
\item{returnData}{logical; if \code{TRUE} the data frame supplied in
\code{newdata} is returned augmented with the outputs of the function.}
\item{scale}{a numeric value setting the scaling of the covariance matrix
of the empirical Bayes estimates in the Metropolis step during the
Monte Carlo sampling.}
\item{weight}{a numeric vector of weights to be applied to the predictions of each subject.}
\item{invlink}{a function to tranform the linear predictor of the mixed model to fitted means;
relevant when the user has specified her own density for the longitudinal outcome in
\code{\link{jointModelBayes}}.}
\item{seed}{numeric scalar, the random seed used to produce the results.}
\item{\dots}{additional arguments; currently none is used.}
}
\details{
When \code{type = "Marginal"}, this function computes predicted values for the
fixed-effects part of the longitudinal submodel. In particular,
let \eqn{X} denote the fixed-effects design matrix calculated using
\code{newdata}. The \code{predict()} calculates \eqn{\hat{y} = X \hat{\beta}},
and if \code{interval = "confidence"}, then it calculates the confidence intervals
based on the percentiles of the MCMC sample for \eqn{\beta}.
When \code{type = "Subject"}, this functions computes subject-specific
predictions for the longitudinal outcome based on the joint model.
This accomplished with a Monte Carlo simulation scheme, similar to the one
described in \code{\link{survfitJM}}. The only difference is in Step 3, where
for \code{interval = "confidence"} \eqn{y_i^* = X_i \beta^* + Z_i b_i^*}, whereas
for \code{interval = "prediction"} \eqn{y_i^*} is a random vector from a normal
distribution with mean \eqn{X_i \beta^* + Z_i b_i^*} and standard deviation
\eqn{\sigma^*}. Based on this Monte Carlo simulation scheme we take as
estimate of \eqn{\hat{y}_i} the average of the \code{M} estimates \eqn{y_i^*}
from each Monte Carlo sample. Confidence intervals are constructed using the
percentiles of \eqn{y_i^*} from the Monte Carlo samples.
}
\value{
If \code{se.fit = FALSE} a numeric vector of predicted values, otherwise a
list with components \code{pred} the predicted values, \code{se.fit} the
standard error for the fitted values, and \code{low} and \code{upp} the lower
and upper limits of the confidence interval. If \code{returnData = TRUE}, it
returns the data frame \code{newdata} with the previously mentioned components
added.
}
\note{
The user is responsible to appropriately set the \code{invlink} argument when a user-specified
mixed effects model has been fitted.
}
\author{Dimitris Rizopoulos \email{d.rizopoulos@erasmusmc.nl}}
\references{
Rizopoulos, D. (2012) \emph{Joint Models for Longitudinal and Time-to-Event Data: with
Applications in R}. Boca Raton: Chapman and Hall/CRC.
}
\seealso{\code{\link{survfitJM.JMbayes}}, \code{\link{jointModelBayes}}}
\examples{
\dontrun{
# linear mixed model fit
fitLME <- lme(log(serBilir) ~ drug * year, data = pbc2,
random = ~ year | id)
# survival regression fit
fitSURV <- coxph(Surv(years, status2) ~ drug, data = pbc2.id,
x = TRUE)
# joint model fit, under the (default) Weibull model
fitJOINT <- jointModelBayes(fitLME, fitSURV, timeVar = "year")
DF <- with(pbc2, expand.grid(drug = levels(drug),
year = seq(min(year), max(year), len = 100)))
Ps <- predict(fitJOINT, DF, interval = "confidence", return = TRUE)
require(lattice)
xyplot(pred + low + upp ~ year | drug, data = Ps,
type = "l", col = c(2,1,1), lty = c(1,2,2), lwd = 2,
ylab = "Average log serum Bilirubin")
# Subject-specific predictions
ND <- pbc2[pbc2$id == 2, ]
Ps.ss <- predict(fitJOINT, ND, type = "Subject",
interval = "confidence", return = TRUE)
xyplot(pred + low + upp ~ year | id, data = Ps.ss,
type = "l", col = c(2,1,1), lty = c(1,2,2), lwd = 2,
ylab = "Average log serum Bilirubin")
}
}
\keyword{methods}
| /man/predict.Rd | no_license | TobiasPolak/JMbayes | R | false | false | 6,244 | rd | \name{predict}
\alias{predict.JMbayes}
\title{Predictions for Joint Models}
\description{
Calculates predicted values for the longitudinal part of a joint model.
}
\usage{
\method{predict}{JMbayes}(object, newdata, type = c("Marginal", "Subject"),
interval = c("none", "confidence", "prediction"), level = 0.95, idVar = "id",
FtTimes = NULL, last.time = NULL, LeftTrunc_var = NULL,
M = 300, returnData = FALSE, scale = 1.6,
weight = rep(1, nrow(newdata)), invlink = NULL, seed = 1, \dots)
}
\arguments{
\item{object}{an object inheriting from class \code{JMBayes}.}
\item{newdata}{a data frame in which to look for variables with which to predict.}
\item{type}{a character string indicating the type of predictions to compute,
marginal or subject-specific. See \bold{Details}.}
\item{interval}{a character string indicating what type of intervals should be computed.}
\item{level}{a numeric scalar denoting the tolerance/confidence level.}
\item{idVar}{a character string indicating the name of the variable in
\code{newdata} that corresponds to the subject identifier; required
when \code{type = "Subject"}.}
\item{FtTimes}{a list with components numeric vectors denoting the time points
for which we wish to compute subject-specific predictions after the last
available measurement provided in \code{newdata}. For each subject in
\code{newdata} the default is a sequence of 25 equally spaced time points
from the last available measurement to the maximum follow-up time of all
subjects (plus a small quantity). This argument is only used when
\code{type = "Subject"}.}
\item{last.time}{a numeric vector. This specifies the known time at which each of the subjects in \code{newdata}
was known to be alive. If \code{NULL}, then this is automatically taken as the last time each subject provided a longitudinal
measurement. If a numeric vector, then it is assumed to contain this last time point for each subject.}
\item{LeftTrunc_var}{character string indicating the name of the variable in \code{newdata} that denotes the left-truncation
time.}
\item{M}{numeric scalar denoting the number of Monte Carlo samples.
See \bold{Details}.}
\item{returnData}{logical; if \code{TRUE} the data frame supplied in
\code{newdata} is returned augmented with the outputs of the function.}
\item{scale}{a numeric value setting the scaling of the covariance matrix
of the empirical Bayes estimates in the Metropolis step during the
Monte Carlo sampling.}
\item{weight}{a numeric vector of weights to be applied to the predictions of each subject.}
\item{invlink}{a function to tranform the linear predictor of the mixed model to fitted means;
relevant when the user has specified her own density for the longitudinal outcome in
\code{\link{jointModelBayes}}.}
\item{seed}{numeric scalar, the random seed used to produce the results.}
\item{\dots}{additional arguments; currently none is used.}
}
\details{
When \code{type = "Marginal"}, this function computes predicted values for the
fixed-effects part of the longitudinal submodel. In particular,
let \eqn{X} denote the fixed-effects design matrix calculated using
\code{newdata}. The \code{predict()} calculates \eqn{\hat{y} = X \hat{\beta}},
and if \code{interval = "confidence"}, then it calculates the confidence intervals
based on the percentiles of the MCMC sample for \eqn{\beta}.
When \code{type = "Subject"}, this functions computes subject-specific
predictions for the longitudinal outcome based on the joint model.
This accomplished with a Monte Carlo simulation scheme, similar to the one
described in \code{\link{survfitJM}}. The only difference is in Step 3, where
for \code{interval = "confidence"} \eqn{y_i^* = X_i \beta^* + Z_i b_i^*}, whereas
for \code{interval = "prediction"} \eqn{y_i^*} is a random vector from a normal
distribution with mean \eqn{X_i \beta^* + Z_i b_i^*} and standard deviation
\eqn{\sigma^*}. Based on this Monte Carlo simulation scheme we take as
estimate of \eqn{\hat{y}_i} the average of the \code{M} estimates \eqn{y_i^*}
from each Monte Carlo sample. Confidence intervals are constructed using the
percentiles of \eqn{y_i^*} from the Monte Carlo samples.
}
\value{
If \code{se.fit = FALSE} a numeric vector of predicted values, otherwise a
list with components \code{pred} the predicted values, \code{se.fit} the
standard error for the fitted values, and \code{low} and \code{upp} the lower
and upper limits of the confidence interval. If \code{returnData = TRUE}, it
returns the data frame \code{newdata} with the previously mentioned components
added.
}
\note{
The user is responsible to appropriately set the \code{invlink} argument when a user-specified
mixed effects model has been fitted.
}
\author{Dimitris Rizopoulos \email{d.rizopoulos@erasmusmc.nl}}
\references{
Rizopoulos, D. (2012) \emph{Joint Models for Longitudinal and Time-to-Event Data: with
Applications in R}. Boca Raton: Chapman and Hall/CRC.
}
\seealso{\code{\link{survfitJM.JMbayes}}, \code{\link{jointModelBayes}}}
\examples{
\dontrun{
# linear mixed model fit
fitLME <- lme(log(serBilir) ~ drug * year, data = pbc2,
random = ~ year | id)
# survival regression fit
fitSURV <- coxph(Surv(years, status2) ~ drug, data = pbc2.id,
x = TRUE)
# joint model fit, under the (default) Weibull model
fitJOINT <- jointModelBayes(fitLME, fitSURV, timeVar = "year")
DF <- with(pbc2, expand.grid(drug = levels(drug),
year = seq(min(year), max(year), len = 100)))
Ps <- predict(fitJOINT, DF, interval = "confidence", return = TRUE)
require(lattice)
xyplot(pred + low + upp ~ year | drug, data = Ps,
type = "l", col = c(2,1,1), lty = c(1,2,2), lwd = 2,
ylab = "Average log serum Bilirubin")
# Subject-specific predictions
ND <- pbc2[pbc2$id == 2, ]
Ps.ss <- predict(fitJOINT, ND, type = "Subject",
interval = "confidence", return = TRUE)
xyplot(pred + low + upp ~ year | id, data = Ps.ss,
type = "l", col = c(2,1,1), lty = c(1,2,2), lwd = 2,
ylab = "Average log serum Bilirubin")
}
}
\keyword{methods}
|
# script to examine efficacy of surface sterilization procedure
# init: 2019-AUG-28
# PTH
# ------ #
# header #
# ------ #
library(dplyr)
library(tidyr)
library(ggplot2)
library(lme4)
library(lmerTest)
# ---------------- #
# data preparation #
# ---------------- #
# first, load up output from DADA2 which contains samples from Diversity plots
data.path <- "/Users/phumph/Dropbox/Phyllosphere_project/analysis_phy/16S_seq/" # change to local data path for repeating this analysis.
outdir <- "/Users/phumph/Dropbox/Phyllosphere_project/analysis_phy/coinfection/figs/"
TAB <- read.csv(paste0(data.path,"seqs_through_pipe_v1.csv"),T) # output from 00_DADA2_pipeline
ASV <- read.csv(paste0(data.path,"dada2_ASV_table_v3.csv"),T, row.names = 1) # output from 00_DADA2_pipeline
TAX <- read.csv(paste0(data.path,"dada2_taxa_Silva_wSpp_uniques_v1.csv"),T) # output from 00_DADA2_pipeline
# grab subset of ASV table corresponding to DIV plots
# this is the set which got the non-sterilization control treatment
# for testing efficacy of this procedure
DIV <- ASV[grep('D',row.names(ASV)),]
DIV <- DIV[!grepl('NPB',row.names(DIV)), ]
DIV$CONTROL <- 0
DIV$CONTROL[grep('C',row.names(DIV))] <- 1
DIV$sample_id <- sapply(row.names(DIV), function(x) gsub('C','',x))
DIV$row.sum <- rowSums(DIV[, !names(DIV) %in% c('CONTROL','sample_id')])
DIVC <- DIV[DIV$sample_id %in% (DIV$sample_id[DIV$CONTROL==1]), ]
# select only those with paired data:
paired_samples <- rowSums(table(DIVC$sample_id, DIVC$CONTROL))
to_exclude <- names(paired_samples)[paired_samples < 2]
if (to_exclude > 0) {
DIVC <- DIVC[!DIVC$sample_id %in% to_exclude, ]
}
divc_hits <- DIVC %>%
dplyr::select(-CONTROL, -row.sum) %>%
dplyr::summarise_if(is.numeric, sum)
# get those with no hits
no_hits <- names(divc_hits)[divc_hits==0]
TAB2 <- dplyr::filter(TAB, X %in% row.names(DIVC))
TAX2 <- dplyr::filter(TAX, !X %in% no_hits)
rownames(TAX2) <- TAX2[,'X']
# find cp and mt:
cp <- grep("Chloroplast", TAX2[,'unique.id'])
mt <- grep("Mitochondria", TAX2[,'unique.id'])
###
### APPROACH 1: assume cp and mt seqs are correct; sum and calculate gamma
###
# sum host-derived reads in DIVC
DIVC$host <- rowSums(DIVC[, names(DIVC) %in% paste0(TAX2[cp,'X']) | names(DIVC) %in% TAX2[mt,'X']])
DIVC$all <- rowSums(DIVC[, !names(DIVC) %in% tail(names(DIVC), 4)])
DIVC$bact <- DIVC$all - DIVC$host
DIVC$gamma <- log(DIVC$bact / DIVC$host)
# do Bayesian model to report posterior of coefficient estimate;
# re-sample logCFU model to estimate median effect size of surface sterilization
DIVC$CONTROL <- factor(DIVC$CONTROL)
library(brms)
gamma_m2 <- brm(bf(gamma ~ CONTROL + (1 | sample_id)),
data = DIVC,
iter = 8000,
warmup = 4000,
cores = 4)
summary(gamma_m2)
# preparing data for plotting
DIVCb <-
DIVC %>%
dplyr::select(sample_id, CONTROL, gamma) %>%
tidyr::spread(key = 'CONTROL', value = 'gamma', fill = NA) %>%
tidyr::gather(key = 'CONTROL', value = 'gamma', -sample_id) %>%
tidyr::spread(key = 'CONTROL', value = 'gamma') %>%
dplyr::mutate(diff = `1` - `0`)
pp_diff <- data.frame(coef = as.matrix(gamma_m2)[,2])
# generate plots for supplement:
# panel (a): paired gamma
ss_p1 <- ggplot(DIVC, aes(x = CONTROL, y = gamma, group = sample_id)) +
geom_line(alpha = 0.5) +
geom_point(alpha = 0.5) +
theme_minimal() +
xlab('surface sterilized') +
ylab(expression(gamma)) +
theme(panel.grid = element_blank(),
axis.line = element_line(),
plot.margin = unit(c(1,0,1,1),'lines')) +
scale_y_continuous(limits = c(-10,5), breaks = seq(-10,5,2.5)) +
scale_x_discrete(labels = c('yes','no'))
# panel (b): distribution of differences over-plotted with marginal effect
ss_p2 <- ggplot(DIVCb, aes(x = 1, y = diff)) +
geom_jitter(width = 0.05, alpha = 0.5) +
geom_boxplot(width = 0.2, alpha = 0.5) +
theme_minimal() +
theme(panel.grid = element_blank(),
axis.line = element_line(),
axis.text.x = element_blank(),
plot.margin = unit(c(1,0,0,0),'lines')) +
xlab('') +
ylab(expression(Delta ~ gamma)) +
geom_hline(yintercept = 0, lty = 2, col = 'black') +
scale_y_continuous(limits = c(-2, 5), breaks = seq(-2,5,1)) +
scale_x_continuous(limits = c(0.8,1.2))
ss_p3 <- ggplot(pp_diff, aes(x = coef)) +
geom_density(fill = 'gray40', col = NA, alpha = 0.5) +
theme_minimal() +
coord_flip() +
theme(panel.grid = element_blank(),
axis.line = element_blank(),
axis.text = element_blank(),
plot.margin = unit(c(0,1,0,-1),'lines')) +
xlab('') +
ylab('') +
scale_x_continuous(limits = c(-2, 5), breaks = seq(-2,5,1)) +
geom_vline(xintercept = quantile(pp_diff$coef, probs = c(0.025, 0.975)), lty = 3, col = 'gray40') +
geom_vline(xintercept = quantile(pp_diff$coef, probs = c(0.5)), lty = 1, col = 'gray40')
ss_plot_all <- ggpubr::ggarrange(plotlist = list(ss_p1, ss_p2, ss_p3), ncol = 3,
align = 'h',
widths = c(1,0.4,0.25), labels = c('a','b',''))
ggsave(ss_plot_all, file = file.path(outdir,'ss_plot_all.pdf'),
width = 4,
height = 2.5,
device = 'pdf',
units = 'in')
###
### APPROACH 2: manually inspect cp and mt sequences
###
# export .fasta of cp and mt sequences
cp_seq_name <- paste0('>seq',rownames(TAX2)[cp])
cp_seqs <- rownames(TAX2)[cp]
mt_seq_name <- paste0('>seq',rownames(TAX2)[mt])
mt_seqs <- rownames(TAX2)[mt]
# create interleaved character vectors of sequences and names:
MT <- character(length(mt_seqs) * 2)
MT[c(TRUE, FALSE)] <- mt_seq_name
MT[c(FALSE, TRUE)] <- mt_seqs
writeLines(MT, con = paste0(data.path,"mtASV_seqs_DIVC.fasta"), sep = "\n")
CP <- character(length(cp_seqs) * 2)
CP[c(TRUE, FALSE)] <- cp_seq_name
CP[c(FALSE, TRUE)] <- cp_seqs
writeLines(CP, con = paste0(data.path,"cpASV_seqs_DIVC.fasta"), sep = "\n")
library(taxize)
# import blastn hit tables of results
mtbr <- read.csv(paste0(data.path,"mt_hit_table_DIVC.csv"), F)
names(mtbr) <- c("seqid", "subject", "identity", "coverage", "mismatches", "gaps", "seq_start", "seq_end", "sub_start", "sub_end", "e", "score")
cpbr <- read.csv(paste0(data.path,"cp_hit_table_DIVC.csv"), F)
names(cpbr) <- c("seqid", "subject", "identity", "coverage", "mismatches", "gaps", "seq_start", "seq_end", "sub_start", "sub_end", "e", "score")
write.hit.num <- function(df){
df[,'hit.num'] <- NA
uniques <- unique(df[,'seqid'])
for(s in 1:length(uniques)){
len <- length(df[,'seqid'][df[,'seqid'] == paste0(uniques[s])])
df[,'hit.num'][df[,'seqid'] == paste0(uniques[s])] <- c(1:len)
}
return(df)
}
# write hit number in order for later ranking taxonomic matches
mtbr <- write.hit.num(mtbr)
cpbr <- write.hit.num(cpbr)
# define function to assign taxonomy to the GI numbers of the hits
get_taxonomy <- function(x) {
#ENTREZ_KEY="90cc1824a010404743ee8240935b44464207"
paste0(genbank2uid(x, key = ENTREZ_KEY)[[1]][1]) # taxonomy ID
}
# now get taxID from NCBI using taxize function genbank2uid
# grab unique gi numbers:
mtbr_gi <- data.frame(gi = unique(mtbr[,'subject']), tax_ID = NA) # n = 593
cpbr_gi <- data.frame(gi = unique(cpbr[,'subject']), tax_ID = NA) # n = 243
mtbr_gi[,'tax_ID'] <- sapply(mtbr_gi[,'gi'], get_taxonomy) # this is slow, since function makes n queries to ncbi. ~3 min.
cpbr_gi[,'tax_ID'] <- sapply(cpbr_gi[,'gi'], get_taxonomy) # this is slow, since function makes n queries to ncbi. ~2 min.
mt_class.all <- classification(unique(mtbr_gi[,'tax_ID']), callopts = list(), return_id = TRUE, db = 'ncbi')
#cpbr_gi2 <- cpbr_gi[cpbr_gi[,'tax_ID']!="NA",]
cp_class.all <- classification(unique(cpbr_gi[,'tax_ID']), callopts = list(), return_id = TRUE, db = 'ncbi')
# turn list into data.frame
mt_df.all <- do.call(rbind, mt_class.all)
cp_df.all <- do.call(rbind, cp_class.all)
# merge to match GI number with tax_ID, and then grab taxonomic name that matches tax_ID
# mtbr.all <- merge(mtbr,mtbr_gi, by = 'gi', sort = F)
mtbr[,'tax_ID'] <- sapply(mtbr[,'subject'], function(x) mtbr_gi[,'tax_ID'][match(x, mtbr_gi[,'gi'])])
mtbr[,'name'] <- sapply(mtbr[,'tax_ID'], function(x) mt_df.all[,'name'][match(x, mt_df.all[,'id'])])
cpbr[,'tax_ID'] <- sapply(cpbr[,'subject'], function(x) cpbr_gi[,'tax_ID'][match(x, cpbr_gi[,'gi'])])
cpbr[,'name'] <- sapply(cpbr[,'tax_ID'], function(x) cp_df.all[,'name'][match(x, cp_df.all[,'id'])])
# ------------------------------------------ #
# analyzing effects of surface sterilization #
# ------------------------------------------ #
# plotting library size differences
lib_size_diffs <- ggplot(DIVC, aes(x = factor(CONTROL), y = log(row.sum,10), group = sample_id)) +
geom_point() +
geom_line()
a1 <- lmerTest::lmer(log(row.sum,10) ~ CONTROL + (1 | sample_id), data = DIVC)
summary(a1)
# OK so first finding: sterilization decreases number of reads. That's fairly clear from these data.
# Next I need to determine gamma by identifying cp + mt sequences in this set.
# I'll basically copy down the routine from the post-processing script
# and re-apply it here to these samples.
#
| /scripts/run_surface_steril_analysis.R | permissive | xiaotuxiaotu520/coinfection | R | false | false | 9,076 | r | # script to examine efficacy of surface sterilization procedure
# init: 2019-AUG-28
# PTH
# ------ #
# header #
# ------ #
library(dplyr)
library(tidyr)
library(ggplot2)
library(lme4)
library(lmerTest)
# ---------------- #
# data preparation #
# ---------------- #
# first, load up output from DADA2 which contains samples from Diversity plots
data.path <- "/Users/phumph/Dropbox/Phyllosphere_project/analysis_phy/16S_seq/" # change to local data path for repeating this analysis.
outdir <- "/Users/phumph/Dropbox/Phyllosphere_project/analysis_phy/coinfection/figs/"
TAB <- read.csv(paste0(data.path,"seqs_through_pipe_v1.csv"),T) # output from 00_DADA2_pipeline
ASV <- read.csv(paste0(data.path,"dada2_ASV_table_v3.csv"),T, row.names = 1) # output from 00_DADA2_pipeline
TAX <- read.csv(paste0(data.path,"dada2_taxa_Silva_wSpp_uniques_v1.csv"),T) # output from 00_DADA2_pipeline
# grab subset of ASV table corresponding to DIV plots
# this is the set which got the non-sterilization control treatment
# for testing efficacy of this procedure
DIV <- ASV[grep('D',row.names(ASV)),]
DIV <- DIV[!grepl('NPB',row.names(DIV)), ]
DIV$CONTROL <- 0
DIV$CONTROL[grep('C',row.names(DIV))] <- 1
DIV$sample_id <- sapply(row.names(DIV), function(x) gsub('C','',x))
DIV$row.sum <- rowSums(DIV[, !names(DIV) %in% c('CONTROL','sample_id')])
DIVC <- DIV[DIV$sample_id %in% (DIV$sample_id[DIV$CONTROL==1]), ]
# select only those with paired data:
paired_samples <- rowSums(table(DIVC$sample_id, DIVC$CONTROL))
to_exclude <- names(paired_samples)[paired_samples < 2]
if (to_exclude > 0) {
DIVC <- DIVC[!DIVC$sample_id %in% to_exclude, ]
}
divc_hits <- DIVC %>%
dplyr::select(-CONTROL, -row.sum) %>%
dplyr::summarise_if(is.numeric, sum)
# get those with no hits
no_hits <- names(divc_hits)[divc_hits==0]
TAB2 <- dplyr::filter(TAB, X %in% row.names(DIVC))
TAX2 <- dplyr::filter(TAX, !X %in% no_hits)
rownames(TAX2) <- TAX2[,'X']
# find cp and mt:
cp <- grep("Chloroplast", TAX2[,'unique.id'])
mt <- grep("Mitochondria", TAX2[,'unique.id'])
###
### APPROACH 1: assume cp and mt seqs are correct; sum and calculate gamma
###
# sum host-derived reads in DIVC
DIVC$host <- rowSums(DIVC[, names(DIVC) %in% paste0(TAX2[cp,'X']) | names(DIVC) %in% TAX2[mt,'X']])
DIVC$all <- rowSums(DIVC[, !names(DIVC) %in% tail(names(DIVC), 4)])
DIVC$bact <- DIVC$all - DIVC$host
DIVC$gamma <- log(DIVC$bact / DIVC$host)
# do Bayesian model to report posterior of coefficient estimate;
# re-sample logCFU model to estimate median effect size of surface sterilization
DIVC$CONTROL <- factor(DIVC$CONTROL)
library(brms)
gamma_m2 <- brm(bf(gamma ~ CONTROL + (1 | sample_id)),
data = DIVC,
iter = 8000,
warmup = 4000,
cores = 4)
summary(gamma_m2)
# preparing data for plotting
DIVCb <-
DIVC %>%
dplyr::select(sample_id, CONTROL, gamma) %>%
tidyr::spread(key = 'CONTROL', value = 'gamma', fill = NA) %>%
tidyr::gather(key = 'CONTROL', value = 'gamma', -sample_id) %>%
tidyr::spread(key = 'CONTROL', value = 'gamma') %>%
dplyr::mutate(diff = `1` - `0`)
pp_diff <- data.frame(coef = as.matrix(gamma_m2)[,2])
# generate plots for supplement:
# panel (a): paired gamma
ss_p1 <- ggplot(DIVC, aes(x = CONTROL, y = gamma, group = sample_id)) +
geom_line(alpha = 0.5) +
geom_point(alpha = 0.5) +
theme_minimal() +
xlab('surface sterilized') +
ylab(expression(gamma)) +
theme(panel.grid = element_blank(),
axis.line = element_line(),
plot.margin = unit(c(1,0,1,1),'lines')) +
scale_y_continuous(limits = c(-10,5), breaks = seq(-10,5,2.5)) +
scale_x_discrete(labels = c('yes','no'))
# panel (b): distribution of differences over-plotted with marginal effect
ss_p2 <- ggplot(DIVCb, aes(x = 1, y = diff)) +
geom_jitter(width = 0.05, alpha = 0.5) +
geom_boxplot(width = 0.2, alpha = 0.5) +
theme_minimal() +
theme(panel.grid = element_blank(),
axis.line = element_line(),
axis.text.x = element_blank(),
plot.margin = unit(c(1,0,0,0),'lines')) +
xlab('') +
ylab(expression(Delta ~ gamma)) +
geom_hline(yintercept = 0, lty = 2, col = 'black') +
scale_y_continuous(limits = c(-2, 5), breaks = seq(-2,5,1)) +
scale_x_continuous(limits = c(0.8,1.2))
ss_p3 <- ggplot(pp_diff, aes(x = coef)) +
geom_density(fill = 'gray40', col = NA, alpha = 0.5) +
theme_minimal() +
coord_flip() +
theme(panel.grid = element_blank(),
axis.line = element_blank(),
axis.text = element_blank(),
plot.margin = unit(c(0,1,0,-1),'lines')) +
xlab('') +
ylab('') +
scale_x_continuous(limits = c(-2, 5), breaks = seq(-2,5,1)) +
geom_vline(xintercept = quantile(pp_diff$coef, probs = c(0.025, 0.975)), lty = 3, col = 'gray40') +
geom_vline(xintercept = quantile(pp_diff$coef, probs = c(0.5)), lty = 1, col = 'gray40')
ss_plot_all <- ggpubr::ggarrange(plotlist = list(ss_p1, ss_p2, ss_p3), ncol = 3,
align = 'h',
widths = c(1,0.4,0.25), labels = c('a','b',''))
ggsave(ss_plot_all, file = file.path(outdir,'ss_plot_all.pdf'),
width = 4,
height = 2.5,
device = 'pdf',
units = 'in')
###
### APPROACH 2: manually inspect cp and mt sequences
###
# export .fasta of cp and mt sequences
cp_seq_name <- paste0('>seq',rownames(TAX2)[cp])
cp_seqs <- rownames(TAX2)[cp]
mt_seq_name <- paste0('>seq',rownames(TAX2)[mt])
mt_seqs <- rownames(TAX2)[mt]
# create interleaved character vectors of sequences and names:
MT <- character(length(mt_seqs) * 2)
MT[c(TRUE, FALSE)] <- mt_seq_name
MT[c(FALSE, TRUE)] <- mt_seqs
writeLines(MT, con = paste0(data.path,"mtASV_seqs_DIVC.fasta"), sep = "\n")
CP <- character(length(cp_seqs) * 2)
CP[c(TRUE, FALSE)] <- cp_seq_name
CP[c(FALSE, TRUE)] <- cp_seqs
writeLines(CP, con = paste0(data.path,"cpASV_seqs_DIVC.fasta"), sep = "\n")
library(taxize)
# import blastn hit tables of results
mtbr <- read.csv(paste0(data.path,"mt_hit_table_DIVC.csv"), F)
names(mtbr) <- c("seqid", "subject", "identity", "coverage", "mismatches", "gaps", "seq_start", "seq_end", "sub_start", "sub_end", "e", "score")
cpbr <- read.csv(paste0(data.path,"cp_hit_table_DIVC.csv"), F)
names(cpbr) <- c("seqid", "subject", "identity", "coverage", "mismatches", "gaps", "seq_start", "seq_end", "sub_start", "sub_end", "e", "score")
write.hit.num <- function(df){
df[,'hit.num'] <- NA
uniques <- unique(df[,'seqid'])
for(s in 1:length(uniques)){
len <- length(df[,'seqid'][df[,'seqid'] == paste0(uniques[s])])
df[,'hit.num'][df[,'seqid'] == paste0(uniques[s])] <- c(1:len)
}
return(df)
}
# write hit number in order for later ranking taxonomic matches
mtbr <- write.hit.num(mtbr)
cpbr <- write.hit.num(cpbr)
# define function to assign taxonomy to the GI numbers of the hits
get_taxonomy <- function(x) {
#ENTREZ_KEY="90cc1824a010404743ee8240935b44464207"
paste0(genbank2uid(x, key = ENTREZ_KEY)[[1]][1]) # taxonomy ID
}
# now get taxID from NCBI using taxize function genbank2uid
# grab unique gi numbers:
mtbr_gi <- data.frame(gi = unique(mtbr[,'subject']), tax_ID = NA) # n = 593
cpbr_gi <- data.frame(gi = unique(cpbr[,'subject']), tax_ID = NA) # n = 243
mtbr_gi[,'tax_ID'] <- sapply(mtbr_gi[,'gi'], get_taxonomy) # this is slow, since function makes n queries to ncbi. ~3 min.
cpbr_gi[,'tax_ID'] <- sapply(cpbr_gi[,'gi'], get_taxonomy) # this is slow, since function makes n queries to ncbi. ~2 min.
mt_class.all <- classification(unique(mtbr_gi[,'tax_ID']), callopts = list(), return_id = TRUE, db = 'ncbi')
#cpbr_gi2 <- cpbr_gi[cpbr_gi[,'tax_ID']!="NA",]
cp_class.all <- classification(unique(cpbr_gi[,'tax_ID']), callopts = list(), return_id = TRUE, db = 'ncbi')
# turn list into data.frame
mt_df.all <- do.call(rbind, mt_class.all)
cp_df.all <- do.call(rbind, cp_class.all)
# merge to match GI number with tax_ID, and then grab taxonomic name that matches tax_ID
# mtbr.all <- merge(mtbr,mtbr_gi, by = 'gi', sort = F)
mtbr[,'tax_ID'] <- sapply(mtbr[,'subject'], function(x) mtbr_gi[,'tax_ID'][match(x, mtbr_gi[,'gi'])])
mtbr[,'name'] <- sapply(mtbr[,'tax_ID'], function(x) mt_df.all[,'name'][match(x, mt_df.all[,'id'])])
cpbr[,'tax_ID'] <- sapply(cpbr[,'subject'], function(x) cpbr_gi[,'tax_ID'][match(x, cpbr_gi[,'gi'])])
cpbr[,'name'] <- sapply(cpbr[,'tax_ID'], function(x) cp_df.all[,'name'][match(x, cp_df.all[,'id'])])
# ------------------------------------------ #
# analyzing effects of surface sterilization #
# ------------------------------------------ #
# plotting library size differences
lib_size_diffs <- ggplot(DIVC, aes(x = factor(CONTROL), y = log(row.sum,10), group = sample_id)) +
geom_point() +
geom_line()
a1 <- lmerTest::lmer(log(row.sum,10) ~ CONTROL + (1 | sample_id), data = DIVC)
summary(a1)
# OK so first finding: sterilization decreases number of reads. That's fairly clear from these data.
# Next I need to determine gamma by identifying cp + mt sequences in this set.
# I'll basically copy down the routine from the post-processing script
# and re-apply it here to these samples.
#
|
# Author: Robert J. Hijmans
# Date : June 2008
# Version 0.9
# Licence GPL v3
setMethod("plot", signature(x='Raster', y='Raster'),
function(x, y, maxpixels=100000, cex, xlab, ylab, nc, nr, maxnl=16, main, add=FALSE, gridded=FALSE, ncol=25, nrow=25, ...) {
compareRaster(c(x, y), extent=TRUE, rowcol=TRUE, crs=FALSE, stopiffalse=TRUE)
nlx <- nlayers(x)
nly <- nlayers(y)
maxnl <- max(1, round(maxnl))
nl <- max(nlx, nly)
if (nl > maxnl) {
nl <- maxnl
if (nlx > maxnl) {
x <- x[[1:maxnl]]
nlx <- maxnl
}
if (nly > maxnl) {
y <- y[[1:maxnl]]
nly <- maxnl
}
}
if (missing(main)) {
main <- ''
}
if (missing(xlab)) {
ln1 <- names(x)
} else {
ln1 <- xlab
if (length(ln1) == 1) {
ln1 <- rep(ln1, nlx)
}
}
if (missing(ylab)) {
ln2 <- names(y)
} else {
ln2 <- ylab
if (length(ln1) == 1) {
ln2 <- rep(ln2, nly)
}
}
cells <- ncell(x)
# gdal selects a slightly different set of cells than raster does for other formats.
# using gdal directly to subsample is faster.
if (gridded) {
if ((ncell(x) * (nlx + nly)) < .maxmemory()) {
maxpixels <- ncell(x)
}
}
dx <- .driver(x, warn=FALSE)
dy <- .driver(y, warn=FALSE)
if ( all(dx =='gdal') & all(dy == 'gdal')) {
x <- sampleRegular(x, size=maxpixels, useGDAL=TRUE)
y <- sampleRegular(y, size=maxpixels, useGDAL=TRUE)
} else {
x <- sampleRegular(x, size=maxpixels)
y <- sampleRegular(y, size=maxpixels)
}
if (NROW(x) < cells) {
warning(paste('plot used a sample of ', round(100*NROW(x)/cells, 1), '% of the cells. You can use "maxpixels" to increase the sample)', sep=""))
}
if (missing(cex)) {
if (NROW(x) < 100) {
cex <- 1
} else if (NROW(x) < 1000) {
cex <- 0.5
} else {
cex <- 0.2
}
}
if (nlx != nly) {
# recycling
d <- cbind(as.vector(x), as.vector(y))
x <- matrix(d[,1], ncol=nl)
y <- matrix(d[,2], ncol=nl)
lab <- vector(length=nl)
lab[] <- ln1
ln1 <- lab
lab[] <- ln2
ln2 <- lab
}
if (nl > 1) {
if (missing(nc)) {
nc <- ceiling(sqrt(nl))
} else {
nc <- max(1, min(nl, round(nc)))
}
if (missing(nr)) {
nr <- ceiling(nl / nc)
} else {
nr <- max(1, min(nl, round(nr)))
nc <- ceiling(nl / nr)
}
old.par <- graphics::par(no.readonly = TRUE)
on.exit(graphics::par(old.par))
graphics::par(mfrow=c(nr, nc), mar=c(4, 4, 2, 2))
if (! gridded) {
if (add) {
for (i in 1:nl) {
points(x[,i], y[,i], cex=cex, ...)
}
} else {
for (i in 1:nl) {
plot(x[,i], y[,i], cex=cex, xlab=ln1[i], ylab=ln2[i], main=main[i], ...)
}
}
} else {
for (i in 1:nl) {
.plotdens(x[,i], y[,i], nc=ncol, nr=nrow, main=main[i], xlab=ln1[i], ylab=ln2[i], add=add, ...)
}
}
} else {
if (! gridded) {
if (add) {
points(x, y, cex=cex, ...)
} else {
plot(x, y, cex=cex, xlab=ln1[1], ylab=ln2[1], main=main[1], ...)
}
} else {
.plotdens(x, y, nc=ncol, nr=nrow, main=main[1], xlab=ln1[1], ylab=ln2[1], ...)
}
}
}
)
.plotdens <- function(x, y, nc, nr, asp=NULL, xlim=NULL, ylim=NULL, ...) {
xy <- stats::na.omit(cbind(x,y))
if (nrow(xy) == 0) {
stop('only NA values (in this sample?)')
}
r <- apply(xy, 2, range)
rx <- r[,1]
if (rx[1] == rx[2]) {
rx[1] <- rx[1] - 0.5
rx[2] <- rx[2] + 0.5
}
ry <- r[,2]
if (ry[1] == ry[2]) {
ry[1] <- ry[1] - 0.5
ry[2] <- ry[2] + 0.5
}
out <- raster(xmn=rx[1], xmx=rx[2], ymn=ry[1], ymx=ry[2], ncol=nc, nrow=nr)
out <- rasterize(xy, out, fun=function(x, ...) length(x), background=0)
if (!is.null(xlim) | !is.null(ylim)) {
if (is.null(xlim)) xlim <- c(xmin(x), xmax(x))
if (is.null(ylim)) ylim <- c(ymin(x), ymax(x))
e <- extent(xlim, ylim)
out <- extend(crop(out, e), e, value=0)
}
.plotraster2(out, maxpixels=nc*nr, asp=asp, ...)
}
| /R/plot2rasters.R | no_license | cran/raster | R | false | false | 4,106 | r | # Author: Robert J. Hijmans
# Date : June 2008
# Version 0.9
# Licence GPL v3
setMethod("plot", signature(x='Raster', y='Raster'),
function(x, y, maxpixels=100000, cex, xlab, ylab, nc, nr, maxnl=16, main, add=FALSE, gridded=FALSE, ncol=25, nrow=25, ...) {
compareRaster(c(x, y), extent=TRUE, rowcol=TRUE, crs=FALSE, stopiffalse=TRUE)
nlx <- nlayers(x)
nly <- nlayers(y)
maxnl <- max(1, round(maxnl))
nl <- max(nlx, nly)
if (nl > maxnl) {
nl <- maxnl
if (nlx > maxnl) {
x <- x[[1:maxnl]]
nlx <- maxnl
}
if (nly > maxnl) {
y <- y[[1:maxnl]]
nly <- maxnl
}
}
if (missing(main)) {
main <- ''
}
if (missing(xlab)) {
ln1 <- names(x)
} else {
ln1 <- xlab
if (length(ln1) == 1) {
ln1 <- rep(ln1, nlx)
}
}
if (missing(ylab)) {
ln2 <- names(y)
} else {
ln2 <- ylab
if (length(ln1) == 1) {
ln2 <- rep(ln2, nly)
}
}
cells <- ncell(x)
# gdal selects a slightly different set of cells than raster does for other formats.
# using gdal directly to subsample is faster.
if (gridded) {
if ((ncell(x) * (nlx + nly)) < .maxmemory()) {
maxpixels <- ncell(x)
}
}
dx <- .driver(x, warn=FALSE)
dy <- .driver(y, warn=FALSE)
if ( all(dx =='gdal') & all(dy == 'gdal')) {
x <- sampleRegular(x, size=maxpixels, useGDAL=TRUE)
y <- sampleRegular(y, size=maxpixels, useGDAL=TRUE)
} else {
x <- sampleRegular(x, size=maxpixels)
y <- sampleRegular(y, size=maxpixels)
}
if (NROW(x) < cells) {
warning(paste('plot used a sample of ', round(100*NROW(x)/cells, 1), '% of the cells. You can use "maxpixels" to increase the sample)', sep=""))
}
if (missing(cex)) {
if (NROW(x) < 100) {
cex <- 1
} else if (NROW(x) < 1000) {
cex <- 0.5
} else {
cex <- 0.2
}
}
if (nlx != nly) {
# recycling
d <- cbind(as.vector(x), as.vector(y))
x <- matrix(d[,1], ncol=nl)
y <- matrix(d[,2], ncol=nl)
lab <- vector(length=nl)
lab[] <- ln1
ln1 <- lab
lab[] <- ln2
ln2 <- lab
}
if (nl > 1) {
if (missing(nc)) {
nc <- ceiling(sqrt(nl))
} else {
nc <- max(1, min(nl, round(nc)))
}
if (missing(nr)) {
nr <- ceiling(nl / nc)
} else {
nr <- max(1, min(nl, round(nr)))
nc <- ceiling(nl / nr)
}
old.par <- graphics::par(no.readonly = TRUE)
on.exit(graphics::par(old.par))
graphics::par(mfrow=c(nr, nc), mar=c(4, 4, 2, 2))
if (! gridded) {
if (add) {
for (i in 1:nl) {
points(x[,i], y[,i], cex=cex, ...)
}
} else {
for (i in 1:nl) {
plot(x[,i], y[,i], cex=cex, xlab=ln1[i], ylab=ln2[i], main=main[i], ...)
}
}
} else {
for (i in 1:nl) {
.plotdens(x[,i], y[,i], nc=ncol, nr=nrow, main=main[i], xlab=ln1[i], ylab=ln2[i], add=add, ...)
}
}
} else {
if (! gridded) {
if (add) {
points(x, y, cex=cex, ...)
} else {
plot(x, y, cex=cex, xlab=ln1[1], ylab=ln2[1], main=main[1], ...)
}
} else {
.plotdens(x, y, nc=ncol, nr=nrow, main=main[1], xlab=ln1[1], ylab=ln2[1], ...)
}
}
}
)
.plotdens <- function(x, y, nc, nr, asp=NULL, xlim=NULL, ylim=NULL, ...) {
xy <- stats::na.omit(cbind(x,y))
if (nrow(xy) == 0) {
stop('only NA values (in this sample?)')
}
r <- apply(xy, 2, range)
rx <- r[,1]
if (rx[1] == rx[2]) {
rx[1] <- rx[1] - 0.5
rx[2] <- rx[2] + 0.5
}
ry <- r[,2]
if (ry[1] == ry[2]) {
ry[1] <- ry[1] - 0.5
ry[2] <- ry[2] + 0.5
}
out <- raster(xmn=rx[1], xmx=rx[2], ymn=ry[1], ymx=ry[2], ncol=nc, nrow=nr)
out <- rasterize(xy, out, fun=function(x, ...) length(x), background=0)
if (!is.null(xlim) | !is.null(ylim)) {
if (is.null(xlim)) xlim <- c(xmin(x), xmax(x))
if (is.null(ylim)) ylim <- c(ymin(x), ymax(x))
e <- extent(xlim, ylim)
out <- extend(crop(out, e), e, value=0)
}
.plotraster2(out, maxpixels=nc*nr, asp=asp, ...)
}
|
\name{readTableNoRowNames}
\alias{readTableNoRowNames}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Quickly read a rectangular table that has column names.
}
\description{
Quickly read a rectangular table that has column names.
}
\usage{
readTableNoRowNames(tableFile, what = character(), setNumeric = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{tableFile}{
If \code{tableFile} ends in '.gz', then this file will be read as a gzip-ed text file.
}
\item{what}{
%% ~~Describe \code{what} here~~
}
\item{setNumeric}{
%% ~~Describe \code{what} here~~
}
}
\examples{
\dontrun{readTableNoRowNames("tableWithHeader.txt")}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /reference/scripts/xhmmScripts/man/readTableNoRowNames.Rd | no_license | jixuan-wang/jxcnv | R | false | false | 856 | rd | \name{readTableNoRowNames}
\alias{readTableNoRowNames}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Quickly read a rectangular table that has column names.
}
\description{
Quickly read a rectangular table that has column names.
}
\usage{
readTableNoRowNames(tableFile, what = character(), setNumeric = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{tableFile}{
If \code{tableFile} ends in '.gz', then this file will be read as a gzip-ed text file.
}
\item{what}{
%% ~~Describe \code{what} here~~
}
\item{setNumeric}{
%% ~~Describe \code{what} here~~
}
}
\examples{
\dontrun{readTableNoRowNames("tableWithHeader.txt")}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#loading packages
library(ranger)
library(caret)
library(ROCR)
library(e1071)
library(doParallel)
library(randomForest)
#using all cores
cl <- makePSOCKcluster(4)
registerDoParallel(cl)
##########################################################
#splitting data set to train and test set
split_index <- createDataPartition(titan_clean$Survived, p=0.8, list = FALSE)
testing_set <- titan_clean[-split_index,]
training_set <- titan_clean[split_index,]
#tunning random forest
nodesize.vals <- c(2, 3, 4, 5)
ntree.vals <- c(200, 500, 1000, 2000)
tuning.results <- tune.randomForest(Survived~.,
data = training_set,
mtry=3,
nodesize=nodesize.vals,
ntree=ntree.vals)
#saving tuned result as rds
saveRDS(tuning.results, "./tuning.results.rds")
print(tuning.results)
#training random forest using R Recipes
control <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
randfor_tuned <- train(Survived~., data = training_set, method = "rf",
metric = "Accuracy", ntree = 2000)
#print trained random forest
print(randfor_tuned)
#gettting details of final model
print(randfor_tuned$finalModel)
randfor_tuned$results
#saving trained model
saveRDS(randfor_tuned, "./randfor_tuned.rds")
#using trained model recipe result
#to run randomforest model
set.seed(127)
final_modelN <- randomForest(Survived~.,training_set, mtry = 2, ntree = 2000)
#confusion matrix
predictions <- predict(final_modelN, newdata = testing_set)
confusionMatrix(predictions, testing_set$Survived)
########################################################################
#using different mtry and ntree to run random forest
set.seed(127)
final_model3 <- randomForest(Survived~.,training_set, mtry = 1, ntree = 1800)
saveRDS(final_model3, "./inal_model_rf.rds")
rf.model <- readRDS("./inal_model_rf.rds")
print(rf.model)
predictions <- predict(rf.model, newdata = testing_set)
confusionMatrix(predictions, testing_set$Survived)
#plotting ROC Curve.
rf.preds.values <- predict(rf.model, testing_set[,-1],
type = "prob")
rf.predictions.values <- rf.preds.values[,2]
predictions <- prediction(rf.predictions.values,
testing_set$Survived)
par(mfrow=c(1,2))
plot.roc.curve(predictions, title.text="Random Forest ROC Curve")
plot.pr.curve(predictions, title.text="Random Forest Precision/Recall Curve")
| /R_CODE_PROJECT_FILES/randomForest.R | no_license | musanyaks/Titanic-Machine-Learning | R | false | false | 2,494 | r | #loading packages
library(ranger)
library(caret)
library(ROCR)
library(e1071)
library(doParallel)
library(randomForest)
#using all cores
cl <- makePSOCKcluster(4)
registerDoParallel(cl)
##########################################################
#splitting data set to train and test set
split_index <- createDataPartition(titan_clean$Survived, p=0.8, list = FALSE)
testing_set <- titan_clean[-split_index,]
training_set <- titan_clean[split_index,]
#tunning random forest
nodesize.vals <- c(2, 3, 4, 5)
ntree.vals <- c(200, 500, 1000, 2000)
tuning.results <- tune.randomForest(Survived~.,
data = training_set,
mtry=3,
nodesize=nodesize.vals,
ntree=ntree.vals)
#saving tuned result as rds
saveRDS(tuning.results, "./tuning.results.rds")
print(tuning.results)
#training random forest using R Recipes
control <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
randfor_tuned <- train(Survived~., data = training_set, method = "rf",
metric = "Accuracy", ntree = 2000)
#print trained random forest
print(randfor_tuned)
#gettting details of final model
print(randfor_tuned$finalModel)
randfor_tuned$results
#saving trained model
saveRDS(randfor_tuned, "./randfor_tuned.rds")
#using trained model recipe result
#to run randomforest model
set.seed(127)
final_modelN <- randomForest(Survived~.,training_set, mtry = 2, ntree = 2000)
#confusion matrix
predictions <- predict(final_modelN, newdata = testing_set)
confusionMatrix(predictions, testing_set$Survived)
########################################################################
#using different mtry and ntree to run random forest
set.seed(127)
final_model3 <- randomForest(Survived~.,training_set, mtry = 1, ntree = 1800)
saveRDS(final_model3, "./inal_model_rf.rds")
rf.model <- readRDS("./inal_model_rf.rds")
print(rf.model)
predictions <- predict(rf.model, newdata = testing_set)
confusionMatrix(predictions, testing_set$Survived)
#plotting ROC Curve.
rf.preds.values <- predict(rf.model, testing_set[,-1],
type = "prob")
rf.predictions.values <- rf.preds.values[,2]
predictions <- prediction(rf.predictions.values,
testing_set$Survived)
par(mfrow=c(1,2))
plot.roc.curve(predictions, title.text="Random Forest ROC Curve")
plot.pr.curve(predictions, title.text="Random Forest Precision/Recall Curve")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subsetByCNVprofile.R
\name{subsetByCNVprofile}
\alias{subsetByCNVprofile}
\title{Get IDs of a subset of models}
\usage{
subsetByCNVprofile(hmms, profile)
}
\arguments{
\item{hmms}{A list of \code{\link{aneuHMM}} objects or a character vector with files that contain such objects.}
\item{profile}{A \code{\link{GRanges-class}} object with metadata column 'expected.state' and optionally columns 'expected.mstate' and 'expected.pstate'.}
}
\value{
A named logical vector with \code{TRUE} for all models that are concordant with the given \code{profile}.
}
\description{
Get the IDs of models that have a certain CNV profile. The result will be \code{TRUE} for models that overlap all specified ranges in \code{profile} by at least one base pair with the correct state.
}
\examples{
## Get results from a small-cell-lung-cancer
lung.folder <- system.file("extdata", "primary-lung", "hmms", package="AneuFinderData")
lung.files <- list.files(lung.folder, full.names=TRUE)
## Get all files that have a 3-somy on chromosome 1 and 4-somy on chromosome 2
profile <- GRanges(seqnames=c('1','2'), ranges=IRanges(start=c(1,1), end=c(195471971,182113224)),
expected.state=c('3-somy','4-somy'))
ids <- subsetByCNVprofile(lung.files, profile)
print(which(ids))
}
| /man/subsetByCNVprofile.Rd | no_license | ataudt/aneufinder | R | false | true | 1,347 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subsetByCNVprofile.R
\name{subsetByCNVprofile}
\alias{subsetByCNVprofile}
\title{Get IDs of a subset of models}
\usage{
subsetByCNVprofile(hmms, profile)
}
\arguments{
\item{hmms}{A list of \code{\link{aneuHMM}} objects or a character vector with files that contain such objects.}
\item{profile}{A \code{\link{GRanges-class}} object with metadata column 'expected.state' and optionally columns 'expected.mstate' and 'expected.pstate'.}
}
\value{
A named logical vector with \code{TRUE} for all models that are concordant with the given \code{profile}.
}
\description{
Get the IDs of models that have a certain CNV profile. The result will be \code{TRUE} for models that overlap all specified ranges in \code{profile} by at least one base pair with the correct state.
}
\examples{
## Get results from a small-cell-lung-cancer
lung.folder <- system.file("extdata", "primary-lung", "hmms", package="AneuFinderData")
lung.files <- list.files(lung.folder, full.names=TRUE)
## Get all files that have a 3-somy on chromosome 1 and 4-somy on chromosome 2
profile <- GRanges(seqnames=c('1','2'), ranges=IRanges(start=c(1,1), end=c(195471971,182113224)),
expected.state=c('3-somy','4-somy'))
ids <- subsetByCNVprofile(lung.files, profile)
print(which(ids))
}
|
\name{extract.deltas}
\alias{extract.deltas}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Extract Master's Delta parameters from a TAM model.
}
\description{
This function takes as its input a TAM object. It adds reads the TAM item parameters and organizes them into a matrix that can be used as input in the \code{\link{CCCfit}} function.
}
\usage{
extract.deltas(tamObject)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{tamObject}{
TAM object containing the results of a a Rasch model or Partial Credit model.
}
}
\details{
This function organizes the item parameter results into a matrix where each row is contains the parameters associated with an item and each columns is contains the parameters associated with a specific step (score 0 vs score 1, score 1 vs score 2, etc.). The resulting matrix will have as many rows as items and as many columns as the maximum number of steps among the items.
}
\value{
A matrix in which each row is an item and each column is a step
}
\references{
Masters, G. N. (1982). A Rasch model for partial credit scoring. \emph{Psychometrika}, 47(2), 149-174.
}
\author{
David Torres Irribarra
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{CCCfit}}
\code{\link{make.thresholds}}
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (tamObject)
{
delta.long <- tamObject$xsi
n.deltas <- apply(tamObject$B, 1, max)
delta.mat <- matrix(NA, nrow = length(n.deltas), ncol = max(n.deltas))
matCoords.row <- rep(1:length(n.deltas), n.deltas)
matCoords.col <- c()
for (i in 1:length(n.deltas)) {
for (j in 1:n.deltas[i]) {
matCoords.col <- c(matCoords.col, j)
}
}
delta.long$matCoords.row <- matCoords.row
delta.long$matCoords.col <- matCoords.col
for (k in 1:nrow(delta.long)) {
delta.mat[delta.long$matCoords.row[k], delta.long$matCoords.col[k]] <- delta.long$xsi[k]
}
delta.mat
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
% \concept{ ~cpt1 }
% \concept{ ~cpt2 }
% Use only one concept per line.
| /man/extract.deltas.Rd | permissive | david-ti/wrightmap | R | false | false | 2,526 | rd | \name{extract.deltas}
\alias{extract.deltas}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Extract Master's Delta parameters from a TAM model.
}
\description{
This function takes as its input a TAM object. It adds reads the TAM item parameters and organizes them into a matrix that can be used as input in the \code{\link{CCCfit}} function.
}
\usage{
extract.deltas(tamObject)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{tamObject}{
TAM object containing the results of a a Rasch model or Partial Credit model.
}
}
\details{
This function organizes the item parameter results into a matrix where each row is contains the parameters associated with an item and each columns is contains the parameters associated with a specific step (score 0 vs score 1, score 1 vs score 2, etc.). The resulting matrix will have as many rows as items and as many columns as the maximum number of steps among the items.
}
\value{
A matrix in which each row is an item and each column is a step
}
\references{
Masters, G. N. (1982). A Rasch model for partial credit scoring. \emph{Psychometrika}, 47(2), 149-174.
}
\author{
David Torres Irribarra
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{CCCfit}}
\code{\link{make.thresholds}}
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (tamObject)
{
delta.long <- tamObject$xsi
n.deltas <- apply(tamObject$B, 1, max)
delta.mat <- matrix(NA, nrow = length(n.deltas), ncol = max(n.deltas))
matCoords.row <- rep(1:length(n.deltas), n.deltas)
matCoords.col <- c()
for (i in 1:length(n.deltas)) {
for (j in 1:n.deltas[i]) {
matCoords.col <- c(matCoords.col, j)
}
}
delta.long$matCoords.row <- matCoords.row
delta.long$matCoords.col <- matCoords.col
for (k in 1:nrow(delta.long)) {
delta.mat[delta.long$matCoords.row[k], delta.long$matCoords.col[k]] <- delta.long$xsi[k]
}
delta.mat
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
% \concept{ ~cpt1 }
% \concept{ ~cpt2 }
% Use only one concept per line.
|
## Storing the data
powerConsumption <- read.table("household_power_consumption.txt", skip=1, sep=";")
## Giving heading names to the data
names(powerConsumption) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
## Getting the relevant data dates and storing it
febPowerConsumption <- subset(powerConsumption, powerConsumption$Date == "1/2/2007" | powerConsumption$Date == "2/2/2007")
## Require the data to be converted from characters to date/POSIX1t
febPowerConsumption$Date <- as.Date(febPowerConsumption$Date, format="%d/%m/%Y")
febPowerConsumption$Time <- strptime(febPowerConsumption$Time, format="%H:%M:%S")
## Need to divide time into minutes
febPowerConsumption[1:1440,"Time"] <- format(febPowerConsumption[1:1440,"Time"],"2007-02-01 %H:%M:%S")
febPowerConsumption[1441:2880,"Time"] <- format(febPowerConsumption[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# Determining the copy file format and its size
png("plot3.png", width = 480, height = 480)
## Plotting the Graph
plot(febPowerConsumption$Time, febPowerConsumption$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
## Adding the points
points(febPowerConsumption$Time, febPowerConsumption$Sub_metering_2, type = "l", col = "red")
points(febPowerConsumption$Time, febPowerConsumption$Sub_metering_3, type = "l", col = "blue")
## Adding the key for the lines
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# dev.off
dev.off()
| /plot3.R | no_license | dkchacha/ExData_Plotting1 | R | false | false | 1,601 | r | ## Storing the data
powerConsumption <- read.table("household_power_consumption.txt", skip=1, sep=";")
## Giving heading names to the data
names(powerConsumption) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
## Getting the relevant data dates and storing it
febPowerConsumption <- subset(powerConsumption, powerConsumption$Date == "1/2/2007" | powerConsumption$Date == "2/2/2007")
## Require the data to be converted from characters to date/POSIX1t
febPowerConsumption$Date <- as.Date(febPowerConsumption$Date, format="%d/%m/%Y")
febPowerConsumption$Time <- strptime(febPowerConsumption$Time, format="%H:%M:%S")
## Need to divide time into minutes
febPowerConsumption[1:1440,"Time"] <- format(febPowerConsumption[1:1440,"Time"],"2007-02-01 %H:%M:%S")
febPowerConsumption[1441:2880,"Time"] <- format(febPowerConsumption[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# Determining the copy file format and its size
png("plot3.png", width = 480, height = 480)
## Plotting the Graph
plot(febPowerConsumption$Time, febPowerConsumption$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
## Adding the points
points(febPowerConsumption$Time, febPowerConsumption$Sub_metering_2, type = "l", col = "red")
points(febPowerConsumption$Time, febPowerConsumption$Sub_metering_3, type = "l", col = "blue")
## Adding the key for the lines
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# dev.off
dev.off()
|
#
# This test file has been generated by kwb.test::create_test_files()
#
test_that("import_table_metadata() works", {
expect_error(
kwb.readxl:::import_table_metadata()
# argument "file" is missing, with no default
)
})
| /tests/testthat/test-function-import_table_metadata.R | permissive | KWB-R/kwb.readxl | R | false | false | 236 | r | #
# This test file has been generated by kwb.test::create_test_files()
#
test_that("import_table_metadata() works", {
expect_error(
kwb.readxl:::import_table_metadata()
# argument "file" is missing, with no default
)
})
|
#' thredds: Crawler for Navigating THREDDS Catalogs
#'
#' A limited crawler for programmatically navigating THREDDS catalogs.
#'
#' @name thredds
#' @docType package
#' @import R6
#' @import rlang
#' @import httr
#' @importFrom magrittr %>%
NULL
| /R/thredds-package.R | permissive | BigelowLab/thredds | R | false | false | 246 | r | #' thredds: Crawler for Navigating THREDDS Catalogs
#'
#' A limited crawler for programmatically navigating THREDDS catalogs.
#'
#' @name thredds
#' @docType package
#' @import R6
#' @import rlang
#' @import httr
#' @importFrom magrittr %>%
NULL
|
# Building a Prod-Ready, Robust Shiny Application.
#
# README: each step of the dev files is optional, and you don't have to
# fill every dev scripts before getting started.
# 01_start.R should be filled at start.
# 02_dev.R should be used to keep track of your development during the project.
# 03_deploy.R should be used once you need to deploy your app.
#
#
########################################
#### CURRENT FILE: ON START SCRIPT #####
########################################
## Fill the DESCRIPTION ----
## Add meta data about your application
##
## /!\ Note: if you want to change the name of your app during development,
## either re-run this function, call golem::set_golem_name(), or don't forget
## to change the name in the app_sys() function in app_config.R /!\
##
golem::fill_desc(
pkg_name = "shinymde", # The Name of the package containing the App
pkg_title = "A shiny interface to mde: the missing data explorer.", # The Title of the package containing the App
pkg_description = "Missing data exploration using mde, via a GUI.", # The Description of the package containing the App
author_first_name = "Nelson", # Your First Name
author_last_name = "Gonzabato", # Your Last Name
author_email = "gonzabato@hotmail.com", # Your Email
repo_url = "https://github.com/Nelson-Gon/shinymde"
)
usethis::use_pipe()
## Set {golem} options ----
golem::set_golem_options()
## Create Common Files ----
## See ?usethis for more information
usethis::use_mit_license( "Golem User" ) # You can set another license here
usethis::use_readme_rmd( open = FALSE )
usethis::use_code_of_conduct()
usethis::use_lifecycle_badge( "Experimental" )
usethis::use_news_md( open = FALSE )
## Use git ----
usethis::use_git()
## Init Testing Infrastructure ----
## Create a template for tests
golem::use_recommended_tests()
## Use Recommended Packages ----
golem::use_recommended_deps()
## Favicon ----
# If you want to change the favicon (default is golem's one)
golem::use_favicon() # path = "path/to/ico". Can be an online file.
golem::remove_favicon()
## Add helper functions ----
golem::use_utils_ui()
golem::use_utils_server()
# You're now set! ----
# go to dev/02_dev.R
rstudioapi::navigateToFile( "dev/02_dev.R" )
| /dev/01_start.R | permissive | ChrisBeeley/shinymde | R | false | false | 2,252 | r | # Building a Prod-Ready, Robust Shiny Application.
#
# README: each step of the dev files is optional, and you don't have to
# fill every dev scripts before getting started.
# 01_start.R should be filled at start.
# 02_dev.R should be used to keep track of your development during the project.
# 03_deploy.R should be used once you need to deploy your app.
#
#
########################################
#### CURRENT FILE: ON START SCRIPT #####
########################################
## Fill the DESCRIPTION ----
## Add meta data about your application
##
## /!\ Note: if you want to change the name of your app during development,
## either re-run this function, call golem::set_golem_name(), or don't forget
## to change the name in the app_sys() function in app_config.R /!\
##
golem::fill_desc(
pkg_name = "shinymde", # The Name of the package containing the App
pkg_title = "A shiny interface to mde: the missing data explorer.", # The Title of the package containing the App
pkg_description = "Missing data exploration using mde, via a GUI.", # The Description of the package containing the App
author_first_name = "Nelson", # Your First Name
author_last_name = "Gonzabato", # Your Last Name
author_email = "gonzabato@hotmail.com", # Your Email
repo_url = "https://github.com/Nelson-Gon/shinymde"
)
usethis::use_pipe()
## Set {golem} options ----
golem::set_golem_options()
## Create Common Files ----
## See ?usethis for more information
usethis::use_mit_license( "Golem User" ) # You can set another license here
usethis::use_readme_rmd( open = FALSE )
usethis::use_code_of_conduct()
usethis::use_lifecycle_badge( "Experimental" )
usethis::use_news_md( open = FALSE )
## Use git ----
usethis::use_git()
## Init Testing Infrastructure ----
## Create a template for tests
golem::use_recommended_tests()
## Use Recommended Packages ----
golem::use_recommended_deps()
## Favicon ----
# If you want to change the favicon (default is golem's one)
golem::use_favicon() # path = "path/to/ico". Can be an online file.
golem::remove_favicon()
## Add helper functions ----
golem::use_utils_ui()
golem::use_utils_server()
# You're now set! ----
# go to dev/02_dev.R
rstudioapi::navigateToFile( "dev/02_dev.R" )
|
#######################
### Einleitung und Wiederholung KliPPs
# von Kai Nehler
#### Wiederholung in R ----
1 + 2 # Addition
3 == 4 # Logische Abfrage auf Gleichheit
## Funktionen und Argumente
sum(1, 2) # Addition durch Funktion
log(x = 23, base = 10) # Benennung von Argumenten
## Hilfe
?log
## Objekte und das Environment
my_num <- sum(3, 4, 1, 2) # Objekt zuweisen
my_num # Objekt anzeigen
sqrt(my_num) # Objekt in Funktion einbinden
sqrt(sum(3, 4, 1, 2)) # Verschachtelte Funktionen
sum(3, 4, 1, 2) |> sqrt() # Nutzung Pipe
my_vec <- c(1, 2, 3, 4) # Erstellung Vektor
## Daten einlesen und verarbeiten
load(url("https://pandar.netlify.app/post/Depression.rda"))
### Datenscreening
head(Depression) # ersten 6 Zeilen
names(Depression) # Namen der Variablen
dim(Depression) # Anzahl der Zeilen und Spalten
str(Depression) # Informationen zu Variablentypen
is.factor(Depression$Geschlecht) # überprüfen, ob das Objekt ein Faktor ist
levels(Depression$Geschlecht) # verschiedene Stufen
levels(Depression$Geschlecht) <- c("maennlich", "weiblich") # Faktorstufen Bedeutung zuordnen
### Datenextraktion
Depression$Depressivitaet[5] # Extrahieren
Depression$Depressivitaet[c(1, 3:5)] # Mehrfach Extrahieren
Depression[c(1:2), c(1:2)] # Extrahieren aus Matrix
Depression[1, ] # 1. Zeile, alle Spalten
### Daten verändern
Depression[5, 6] # Aktuellen Inhalt abfragen
Depression[5, 6] <- "maennlich" # Aktuellen Inhalt überschreiben
Depression[, 6] # Alle Geschlechter abfragen
## Einfache Analysen
### Einfache Deskriptivstatistiken
mean(Depression$Depressivitaet) # Mittwelert
var(Depression$Depressivitaet) # Varianz
summary(Depression$Depressivitaet) # Zusammenfassung numerisch
summary(Depression$Geschlecht) # Zusammenfassung factor
colMeans(Depression[1:4]) # Spaltenmittelwerte
### Zusammenhang und lineare Regression
plot(Depression$Lebenszufriedenheit, Depression$Depressivitaet, xlab = "Lebenszufriedenheit", ylab = "Depressivitaet")
lm(Depressivitaet ~ Lebenszufriedenheit, Depression) # lineare Regression
model <- lm(Depressivitaet ~ Lebenszufriedenheit, Depression) # Objektzuweisung
summary(model)
names(model) #andere Inhalte der Liste
### Der t-Test
t.test(Depressivitaet ~ Geschlecht, # abhängige Variable ~ unabhängige Variable
data = Depression, # Datensatz
paired = FALSE, # Stichproben sind unabhängig
alternative = "two.sided", # zweiseitige Testung (Default)
var.equal = TRUE, # Homoskedastizität liegt vor (-> Levene-Test)
conf.level = .95) # alpha = .05 (Default)
ttest <- t.test(Depressivitaet ~ Geschlecht, # abhängige Variable ~ unabhängige Variable
data = Depression, # Datensatz
paired = FALSE, # Stichproben sind unabhängig
alternative = "two.sided", # zweiseitige Testung (Default)
var.equal = TRUE, # Homoskedastizität liegt vor (-> Levene-Test)
conf.level = .95) # alpha = .05 (Default)
names(ttest) # alle möglichen Argumente, die wir diesem Objekt entlocken können
ttest$statistic # (empirischer) t-Wert
ttest$p.value # zugehöriger p-Wert
| /content/post/KliPPs_MSc5a_R_Files/1_einleitung-und-wiederholung_RCode.R | no_license | martscht/projekte | R | false | false | 3,256 | r | #######################
### Einleitung und Wiederholung KliPPs
# von Kai Nehler
#### Wiederholung in R ----
1 + 2 # Addition
3 == 4 # Logische Abfrage auf Gleichheit
## Funktionen und Argumente
sum(1, 2) # Addition durch Funktion
log(x = 23, base = 10) # Benennung von Argumenten
## Hilfe
?log
## Objekte und das Environment
my_num <- sum(3, 4, 1, 2) # Objekt zuweisen
my_num # Objekt anzeigen
sqrt(my_num) # Objekt in Funktion einbinden
sqrt(sum(3, 4, 1, 2)) # Verschachtelte Funktionen
sum(3, 4, 1, 2) |> sqrt() # Nutzung Pipe
my_vec <- c(1, 2, 3, 4) # Erstellung Vektor
## Daten einlesen und verarbeiten
load(url("https://pandar.netlify.app/post/Depression.rda"))
### Datenscreening
head(Depression) # ersten 6 Zeilen
names(Depression) # Namen der Variablen
dim(Depression) # Anzahl der Zeilen und Spalten
str(Depression) # Informationen zu Variablentypen
is.factor(Depression$Geschlecht) # überprüfen, ob das Objekt ein Faktor ist
levels(Depression$Geschlecht) # verschiedene Stufen
levels(Depression$Geschlecht) <- c("maennlich", "weiblich") # Faktorstufen Bedeutung zuordnen
### Datenextraktion
Depression$Depressivitaet[5] # Extrahieren
Depression$Depressivitaet[c(1, 3:5)] # Mehrfach Extrahieren
Depression[c(1:2), c(1:2)] # Extrahieren aus Matrix
Depression[1, ] # 1. Zeile, alle Spalten
### Daten verändern
Depression[5, 6] # Aktuellen Inhalt abfragen
Depression[5, 6] <- "maennlich" # Aktuellen Inhalt überschreiben
Depression[, 6] # Alle Geschlechter abfragen
## Einfache Analysen
### Einfache Deskriptivstatistiken
mean(Depression$Depressivitaet) # Mittwelert
var(Depression$Depressivitaet) # Varianz
summary(Depression$Depressivitaet) # Zusammenfassung numerisch
summary(Depression$Geschlecht) # Zusammenfassung factor
colMeans(Depression[1:4]) # Spaltenmittelwerte
### Zusammenhang und lineare Regression
plot(Depression$Lebenszufriedenheit, Depression$Depressivitaet, xlab = "Lebenszufriedenheit", ylab = "Depressivitaet")
lm(Depressivitaet ~ Lebenszufriedenheit, Depression) # lineare Regression
model <- lm(Depressivitaet ~ Lebenszufriedenheit, Depression) # Objektzuweisung
summary(model)
names(model) #andere Inhalte der Liste
### Der t-Test
t.test(Depressivitaet ~ Geschlecht, # abhängige Variable ~ unabhängige Variable
data = Depression, # Datensatz
paired = FALSE, # Stichproben sind unabhängig
alternative = "two.sided", # zweiseitige Testung (Default)
var.equal = TRUE, # Homoskedastizität liegt vor (-> Levene-Test)
conf.level = .95) # alpha = .05 (Default)
ttest <- t.test(Depressivitaet ~ Geschlecht, # abhängige Variable ~ unabhängige Variable
data = Depression, # Datensatz
paired = FALSE, # Stichproben sind unabhängig
alternative = "two.sided", # zweiseitige Testung (Default)
var.equal = TRUE, # Homoskedastizität liegt vor (-> Levene-Test)
conf.level = .95) # alpha = .05 (Default)
names(ttest) # alle möglichen Argumente, die wir diesem Objekt entlocken können
ttest$statistic # (empirischer) t-Wert
ttest$p.value # zugehöriger p-Wert
|
#' Generate IndX and mR charts.
#'
#' @references Engineering Statistics Handbook 6.3.2, NIST/SEMATECH e-Handbook of Statistical Methods National Institute of Standards and Technology, Dec 2006
#' @references https://en.wikipedia.org/wiki/Nelson_rules
#' @references Lloyd S. Nelson, "Technical Aids," Journal of Quality Technology 16, no. 4 (October 1984), 238-239.
#' @references The 8 rules are:
#' 1 One point is more than 3 standard deviations from the mean.
#' 2 Nine (or more) points in a row are on the same side of the mean.
#' 3 Six (or more) points in a row are continually increasing (or decreasing).
#' 4 Fourteen (or more) points in a row alternate in direction, increasing then decreasing.
#' 5 Two (or three) out of three points in a row are more than 2 standard deviations from the mean in the same direction.
#' 6 Four (or five) out of five points in a row are more than 1 standard deviation from the mean in the same direction.
#' 7 Fifteen points in a row are all within 1 standard deviation of the mean on either side of the mean.
#' 8 Eight points in a row exist with none within 1 standard deviation of the mean and the points are in both directions from the mean.
#' @param x (mandatory) A data frame with the individual values in the first column and the time in the second column. It can be either a factor, a date or a string and it will be ordered automatically. \cr See \code{?spcTimeSeries}
#' @param linesColors (optional) A vector with 7 colors in order from the average + 3 standard deviations to the average - 3 standard deviations, including the average itself in the center. \cr Default value is \code{c("gray50", "gray65", "gray85", "black", "gray85", "gray65", "gray50")}
#' @param applyRules (optional) A vector with 8 boolean values indicating which rules must be applied. \cr Default value is \code{c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE)}
#' @param rulesColors (optional) A vector with colors, one for each rule. The last point of each violating run will be colored indicating the violation and the corresponding rule. \cr Default value is \code{c("red", "yellow2", "green", "magenta", "blue", "orange", "brown", "cyan")}
#' @param seg (optional) A vector with the positions of the points where there should be breaks and another pair of charts should be plotted. It may be used for better visualization when the series is too long. \cr Default value is \code{c()}
#' @param keepStats (optional) A boolean indicating if each segment's plot should be considered as part of the same series or independelty, as different series. If TRUE, it will be considered as part of the same series. If FALSE, each plot will have it's limits calculated independently, as well as the application of the rules. It's useful to compare different scenarios. \cr Default value is \code{TRUE}
#' @param verbose (optional) A boolean indicating if mean, standard deviation/UCL and number of violations should be printed. \cr Default value is \code{FALSE}
#' @examples
#' data("spcTimeSeries")
#' six_sigma_ctrl_chart(spcTimeSeries)
#' six_sigma_ctrl_chart(spcTimeSeries, verbose=TRUE)
#' six_sigma_ctrl_chart(spcTimeSeries, seg=c(25, 50, 75))
#' six_sigma_ctrl_chart(spcTimeSeries, seg=c(25, 50, 75), keepStats=FALSE, verbose=TRUE)
#' @return None
#' @export
six_sigma_ctrl_chart <- function(x,
linesColors = c("gray50", "gray65", "gray85", "black", "gray85", "gray65", "gray50"),
applyRules = c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE),
rulesColors = c("red", "yellow2", "green", "magenta", "blue", "orange", "brown", "cyan"),
seg = c(), keepStats=TRUE, verbose=FALSE) {
x = x[order(x[2]),]
x[,2] <- as.factor(x[,2])
if (length(seg) == 0) {
mR = abs(x[,1][1:(length(x[,1])-1)] - x[,1][2:length(x[,1])])
meanmR = mean(mR)
mean = mean(x[,1])
sd = sd(x[,1])
.plotSixSigma(x, mR, linesColors, rulesColors, applyRules, meanmR, mean, sd, verbose)
} else {
if (seg[1] != 1) {
seg = append(seg, 1, after=0)
}
if (seg[length(seg)] != length(x)) {
seg = append(seg, length(x[,1]))
}
seg[1] <- -1
for (i in 1:(length(seg)-1)) {
grDevices::dev.new()
xseg = x[c((seg[i]+1):seg[i+1]),]
if (keepStats) {
mR = abs(x[,1][1:(length(x[,1])-1)] - x[,1][2:length(x[,1])])
meanmR = mean(mR)
mean = mean(x[,1])
sd = sd(x[,1])
mRseg = abs(xseg[,1][1:(length(xseg[,1])-1)] - xseg[,1][2:length(xseg[,1])])
.plotSixSigma(xseg, mRseg, linesColors, rulesColors, applyRules, meanmR, mean, sd, verbose)
} else {
mR = abs(xseg[,1][1:(length(xseg[,1])-1)] - xseg[,1][2:length(xseg[,1])])
meanmR = mean(mR)
mean = mean(xseg[,1])
sd = sd(xseg[,1])
.plotSixSigma(xseg, mR, linesColors, rulesColors, applyRules, meanmR, mean, sd, verbose)
}
}
}
}
.plotSixSigma <- function (x, mR, linesColors, rulesColors, applyRules, meanmR, mean, sd, verbose) {
zonesBoundaries = .findZones(x[,1], mean, sd)
graphics::par(mfrow=c(2,1))
graphics::plot(x[,1], xlim=c(x[,2][1],x[,2][length(x[,2])]), ylab="IndX", xlab="", cex.lab=0.8, yaxt="n", xaxt="n", ylim=c(min(zonesBoundaries), max(zonesBoundaries)), type="n", pch=16, cex=0.7)
graphics::axis(x[,2],side=1, las=2, at=x[,2], cex.axis=0.8)
graphics::axis(side=2, cex.axis=0.8)
for (i in 1:7) {
graphics::abline(zonesBoundaries[,i][1] + sd, b=0, col=linesColors[i])
graphics::mtext(at=zonesBoundaries[,8-i][1] + sd,text=paste(" ",round(zonesBoundaries[,8-i][1] + sd,digits=2)), side=4, cex = 0.8, las=1)
}
colors <- .paintViolators(x[,1], rulesColors, applyRules, zonesBoundaries)
graphics::points(y=x[,1], x=x[,2], col=colors$color, pch=16, cex=0.7)
for (i in 1:(length(colors)-1)) {
for (j in 1:length(colors$color)) {
if (colors[j,i]) {
graphics::text(x=x[,2][j], y=x[,1][j], col = rulesColors[i], labels = round(x[,1][j],2), pos=1, cex=0.8)
}
}
}
graphics::mtext(text=paste("Mean = ", round(mean,2)), side=1, padj=7, adj=0, cex = 0.8)
graphics::mtext(text=paste("Std. dev. = ", round(sd,2)), side=1, padj=8, adj=0, cex = 0.8)
graphics::mtext(text=paste("Violations = ", sum(colors == TRUE)), side=1, padj=9, adj=0, cex = 0.8)
if (verbose) {
print(paste("IndX Mean = ", mean))
print(paste("IndX Std. dev. = ", sd))
print(paste("IndX Violations = ", sum(colors == TRUE)))
}
ucl = 3.267*meanmR
violations <- ifelse(mR > ucl, rulesColors[1], "black")
graphics::plot(ylab="mR", xlab="", mR, yaxt="n", ylim=c(-1, max(ucl, max(mR)+1)), xaxt="n", type="n", cex.lab = 0.8)
graphics::axis(side=2, cex.axis=0.8)
graphics::axis(side=1, las=2, at=c(1:length(mR)), cex.axis=0.8)
graphics::abline(a=ucl,b=0, col=linesColors[1])
graphics::mtext(at=ucl,text=paste(" ",round(ucl,digits=2)), side=4, cex = 0.8, las=1)
graphics::points(mR, col=violations, pch=16, cex=0.7)
for (i in 1:length(violations)) {
if (violations[i] != "black") {
graphics::text(x=i, y=mR[i], col = rulesColors[1], labels = round(mR[i],2), pos=1, cex=0.8)
}
}
graphics::mtext(text=paste("Mean = ", round(mean(mR),2)), side=1, padj=3, adj=0, cex = 0.8)
graphics::mtext(text=paste("UCL = ", round(ucl,2)), side=1, padj=4, adj=0, cex = 0.8)
graphics::mtext(text=paste("Violations = ", sum(violations == rulesColors[1])), side=1, padj=5, adj=0, cex = 0.8)
if (verbose) {
print(paste("mR Mean = ", mean(mR)))
print(paste("mR UCL = ", ucl))
print(paste("mR Violations = ", sum(violations == rulesColors[1])))
}
}
.paintViolators <- function(x, rulesColors, applyRules, zones) {
zones <- rowSums(x >= zones) - 4
results <- plyr::ldply(1:length(x), function(i) {
.runTests(x, zones, i, applyRules)
})
results$color <- ifelse(results$rule1!=0, rulesColors[1],
ifelse(results$rule2!=0, rulesColors[2],
ifelse(results$rule3!=0, rulesColors[3],
ifelse(results$rule4!=0, rulesColors[4],
ifelse(results$rule5!=0, rulesColors[5],
ifelse(results$rule6!=0, rulesColors[6],
ifelse(results$rule7!=0, rulesColors[7],
ifelse(results$rule8!=0, rulesColors[8],
"black"))))))))
results
}
.findZones <- function(x, mean, sd) {
boundaries <- seq(-4, 4)
zones <- sapply(boundaries, function(i) {
i * rep(sd, length(x)) + mean
})
zones
}
.runTests <- function(x, zones, i, applyRules) {
if (applyRules[1]) {
values <- zones[i]
rule1 <- any(values > 3) || any(values < -2)
} else {
rule1 = FALSE
}
if (applyRules[2]) {
values <- zones[max(i-8, 1):i] # Rule 2
rule2 <- length(values) == 9 && (all(values > 0) || all(values < 1))
} else {
rule2 = FALSE
}
if (applyRules[3]) {
values <- x[max((i-5),1):i] # Rule 3
rule3 <- length(values) == 6 && (all(values == cummax(values)) || all(values == cummin(values)))
} else {
rule3 = FALSE
}
if (applyRules[4]) {
values <- x[max(i-13, 1):i] # Rule 4
rule4 <- length(values) == 14 && (all((values[1:(length(values)-1)] - values[2:length(values)] < 0) == rep(c(TRUE, FALSE), length.out=13)) || all((values[1:(length(values)-1)] - values[2:length(values)] < 0) == rep(c(FALSE, TRUE), length.out=13)))
} else {
rule4 = FALSE
}
if (applyRules[5]) {
values <- zones[max(i-2, 1):i] # Rule 5
rule5 <- length(values) == 3 && (sum(values >= 3) >= 2 || sum(values <= -2) >= 2)
} else {
rule5 = FALSE
}
if (applyRules[6]) {
values <- zones[max(i-4, 1):i] # Rule 6
rule6 <- length(values) == 5 && (sum(values >= 2) >= 4 || sum(values <= -1) >= 4)
} else {
rule6 = FALSE
}
if (applyRules[7]) {
values <- zones[max(i-14, 1):i] # Rule 7
rule7 <- length(values) == 15 && sum(values == 0) + sum( values == 1) == 15
} else {
rule7 = FALSE
}
if (applyRules[8]) {
values <- zones[max(i-7, 1):i] # Rule 8
rule8 <- sum(values < 0) + sum(values > 1) == 8
} else {
rule8 = FALSE
}
c("rule1"=rule1, "rule2"=rule2, "rule3"=rule3, "rule4"=rule4, "rule5"=rule5, "rule6"=rule6, "rule7"=rule7, "rule8"=rule8)
}
| /R/spcRules.R | no_license | pplupo/processcontrol | R | false | false | 10,206 | r | #' Generate IndX and mR charts.
#'
#' @references Engineering Statistics Handbook 6.3.2, NIST/SEMATECH e-Handbook of Statistical Methods National Institute of Standards and Technology, Dec 2006
#' @references https://en.wikipedia.org/wiki/Nelson_rules
#' @references Lloyd S. Nelson, "Technical Aids," Journal of Quality Technology 16, no. 4 (October 1984), 238-239.
#' @references The 8 rules are:
#' 1 One point is more than 3 standard deviations from the mean.
#' 2 Nine (or more) points in a row are on the same side of the mean.
#' 3 Six (or more) points in a row are continually increasing (or decreasing).
#' 4 Fourteen (or more) points in a row alternate in direction, increasing then decreasing.
#' 5 Two (or three) out of three points in a row are more than 2 standard deviations from the mean in the same direction.
#' 6 Four (or five) out of five points in a row are more than 1 standard deviation from the mean in the same direction.
#' 7 Fifteen points in a row are all within 1 standard deviation of the mean on either side of the mean.
#' 8 Eight points in a row exist with none within 1 standard deviation of the mean and the points are in both directions from the mean.
#' @param x (mandatory) A data frame with the individual values in the first column and the time in the second column. It can be either a factor, a date or a string and it will be ordered automatically. \cr See \code{?spcTimeSeries}
#' @param linesColors (optional) A vector with 7 colors in order from the average + 3 standard deviations to the average - 3 standard deviations, including the average itself in the center. \cr Default value is \code{c("gray50", "gray65", "gray85", "black", "gray85", "gray65", "gray50")}
#' @param applyRules (optional) A vector with 8 boolean values indicating which rules must be applied. \cr Default value is \code{c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE)}
#' @param rulesColors (optional) A vector with colors, one for each rule. The last point of each violating run will be colored indicating the violation and the corresponding rule. \cr Default value is \code{c("red", "yellow2", "green", "magenta", "blue", "orange", "brown", "cyan")}
#' @param seg (optional) A vector with the positions of the points where there should be breaks and another pair of charts should be plotted. It may be used for better visualization when the series is too long. \cr Default value is \code{c()}
#' @param keepStats (optional) A boolean indicating if each segment's plot should be considered as part of the same series or independelty, as different series. If TRUE, it will be considered as part of the same series. If FALSE, each plot will have it's limits calculated independently, as well as the application of the rules. It's useful to compare different scenarios. \cr Default value is \code{TRUE}
#' @param verbose (optional) A boolean indicating if mean, standard deviation/UCL and number of violations should be printed. \cr Default value is \code{FALSE}
#' @examples
#' data("spcTimeSeries")
#' six_sigma_ctrl_chart(spcTimeSeries)
#' six_sigma_ctrl_chart(spcTimeSeries, verbose=TRUE)
#' six_sigma_ctrl_chart(spcTimeSeries, seg=c(25, 50, 75))
#' six_sigma_ctrl_chart(spcTimeSeries, seg=c(25, 50, 75), keepStats=FALSE, verbose=TRUE)
#' @return None
#' @export
six_sigma_ctrl_chart <- function(x,
linesColors = c("gray50", "gray65", "gray85", "black", "gray85", "gray65", "gray50"),
applyRules = c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE),
rulesColors = c("red", "yellow2", "green", "magenta", "blue", "orange", "brown", "cyan"),
seg = c(), keepStats=TRUE, verbose=FALSE) {
x = x[order(x[2]),]
x[,2] <- as.factor(x[,2])
if (length(seg) == 0) {
mR = abs(x[,1][1:(length(x[,1])-1)] - x[,1][2:length(x[,1])])
meanmR = mean(mR)
mean = mean(x[,1])
sd = sd(x[,1])
.plotSixSigma(x, mR, linesColors, rulesColors, applyRules, meanmR, mean, sd, verbose)
} else {
if (seg[1] != 1) {
seg = append(seg, 1, after=0)
}
if (seg[length(seg)] != length(x)) {
seg = append(seg, length(x[,1]))
}
seg[1] <- -1
for (i in 1:(length(seg)-1)) {
grDevices::dev.new()
xseg = x[c((seg[i]+1):seg[i+1]),]
if (keepStats) {
mR = abs(x[,1][1:(length(x[,1])-1)] - x[,1][2:length(x[,1])])
meanmR = mean(mR)
mean = mean(x[,1])
sd = sd(x[,1])
mRseg = abs(xseg[,1][1:(length(xseg[,1])-1)] - xseg[,1][2:length(xseg[,1])])
.plotSixSigma(xseg, mRseg, linesColors, rulesColors, applyRules, meanmR, mean, sd, verbose)
} else {
mR = abs(xseg[,1][1:(length(xseg[,1])-1)] - xseg[,1][2:length(xseg[,1])])
meanmR = mean(mR)
mean = mean(xseg[,1])
sd = sd(xseg[,1])
.plotSixSigma(xseg, mR, linesColors, rulesColors, applyRules, meanmR, mean, sd, verbose)
}
}
}
}
.plotSixSigma <- function (x, mR, linesColors, rulesColors, applyRules, meanmR, mean, sd, verbose) {
zonesBoundaries = .findZones(x[,1], mean, sd)
graphics::par(mfrow=c(2,1))
graphics::plot(x[,1], xlim=c(x[,2][1],x[,2][length(x[,2])]), ylab="IndX", xlab="", cex.lab=0.8, yaxt="n", xaxt="n", ylim=c(min(zonesBoundaries), max(zonesBoundaries)), type="n", pch=16, cex=0.7)
graphics::axis(x[,2],side=1, las=2, at=x[,2], cex.axis=0.8)
graphics::axis(side=2, cex.axis=0.8)
for (i in 1:7) {
graphics::abline(zonesBoundaries[,i][1] + sd, b=0, col=linesColors[i])
graphics::mtext(at=zonesBoundaries[,8-i][1] + sd,text=paste(" ",round(zonesBoundaries[,8-i][1] + sd,digits=2)), side=4, cex = 0.8, las=1)
}
colors <- .paintViolators(x[,1], rulesColors, applyRules, zonesBoundaries)
graphics::points(y=x[,1], x=x[,2], col=colors$color, pch=16, cex=0.7)
for (i in 1:(length(colors)-1)) {
for (j in 1:length(colors$color)) {
if (colors[j,i]) {
graphics::text(x=x[,2][j], y=x[,1][j], col = rulesColors[i], labels = round(x[,1][j],2), pos=1, cex=0.8)
}
}
}
graphics::mtext(text=paste("Mean = ", round(mean,2)), side=1, padj=7, adj=0, cex = 0.8)
graphics::mtext(text=paste("Std. dev. = ", round(sd,2)), side=1, padj=8, adj=0, cex = 0.8)
graphics::mtext(text=paste("Violations = ", sum(colors == TRUE)), side=1, padj=9, adj=0, cex = 0.8)
if (verbose) {
print(paste("IndX Mean = ", mean))
print(paste("IndX Std. dev. = ", sd))
print(paste("IndX Violations = ", sum(colors == TRUE)))
}
ucl = 3.267*meanmR
violations <- ifelse(mR > ucl, rulesColors[1], "black")
graphics::plot(ylab="mR", xlab="", mR, yaxt="n", ylim=c(-1, max(ucl, max(mR)+1)), xaxt="n", type="n", cex.lab = 0.8)
graphics::axis(side=2, cex.axis=0.8)
graphics::axis(side=1, las=2, at=c(1:length(mR)), cex.axis=0.8)
graphics::abline(a=ucl,b=0, col=linesColors[1])
graphics::mtext(at=ucl,text=paste(" ",round(ucl,digits=2)), side=4, cex = 0.8, las=1)
graphics::points(mR, col=violations, pch=16, cex=0.7)
for (i in 1:length(violations)) {
if (violations[i] != "black") {
graphics::text(x=i, y=mR[i], col = rulesColors[1], labels = round(mR[i],2), pos=1, cex=0.8)
}
}
graphics::mtext(text=paste("Mean = ", round(mean(mR),2)), side=1, padj=3, adj=0, cex = 0.8)
graphics::mtext(text=paste("UCL = ", round(ucl,2)), side=1, padj=4, adj=0, cex = 0.8)
graphics::mtext(text=paste("Violations = ", sum(violations == rulesColors[1])), side=1, padj=5, adj=0, cex = 0.8)
if (verbose) {
print(paste("mR Mean = ", mean(mR)))
print(paste("mR UCL = ", ucl))
print(paste("mR Violations = ", sum(violations == rulesColors[1])))
}
}
.paintViolators <- function(x, rulesColors, applyRules, zones) {
zones <- rowSums(x >= zones) - 4
results <- plyr::ldply(1:length(x), function(i) {
.runTests(x, zones, i, applyRules)
})
results$color <- ifelse(results$rule1!=0, rulesColors[1],
ifelse(results$rule2!=0, rulesColors[2],
ifelse(results$rule3!=0, rulesColors[3],
ifelse(results$rule4!=0, rulesColors[4],
ifelse(results$rule5!=0, rulesColors[5],
ifelse(results$rule6!=0, rulesColors[6],
ifelse(results$rule7!=0, rulesColors[7],
ifelse(results$rule8!=0, rulesColors[8],
"black"))))))))
results
}
.findZones <- function(x, mean, sd) {
boundaries <- seq(-4, 4)
zones <- sapply(boundaries, function(i) {
i * rep(sd, length(x)) + mean
})
zones
}
.runTests <- function(x, zones, i, applyRules) {
if (applyRules[1]) {
values <- zones[i]
rule1 <- any(values > 3) || any(values < -2)
} else {
rule1 = FALSE
}
if (applyRules[2]) {
values <- zones[max(i-8, 1):i] # Rule 2
rule2 <- length(values) == 9 && (all(values > 0) || all(values < 1))
} else {
rule2 = FALSE
}
if (applyRules[3]) {
values <- x[max((i-5),1):i] # Rule 3
rule3 <- length(values) == 6 && (all(values == cummax(values)) || all(values == cummin(values)))
} else {
rule3 = FALSE
}
if (applyRules[4]) {
values <- x[max(i-13, 1):i] # Rule 4
rule4 <- length(values) == 14 && (all((values[1:(length(values)-1)] - values[2:length(values)] < 0) == rep(c(TRUE, FALSE), length.out=13)) || all((values[1:(length(values)-1)] - values[2:length(values)] < 0) == rep(c(FALSE, TRUE), length.out=13)))
} else {
rule4 = FALSE
}
if (applyRules[5]) {
values <- zones[max(i-2, 1):i] # Rule 5
rule5 <- length(values) == 3 && (sum(values >= 3) >= 2 || sum(values <= -2) >= 2)
} else {
rule5 = FALSE
}
if (applyRules[6]) {
values <- zones[max(i-4, 1):i] # Rule 6
rule6 <- length(values) == 5 && (sum(values >= 2) >= 4 || sum(values <= -1) >= 4)
} else {
rule6 = FALSE
}
if (applyRules[7]) {
values <- zones[max(i-14, 1):i] # Rule 7
rule7 <- length(values) == 15 && sum(values == 0) + sum( values == 1) == 15
} else {
rule7 = FALSE
}
if (applyRules[8]) {
values <- zones[max(i-7, 1):i] # Rule 8
rule8 <- sum(values < 0) + sum(values > 1) == 8
} else {
rule8 = FALSE
}
c("rule1"=rule1, "rule2"=rule2, "rule3"=rule3, "rule4"=rule4, "rule5"=rule5, "rule6"=rule6, "rule7"=rule7, "rule8"=rule8)
}
|
##' cohort2pool function
##'Calculates total biomass using veg cohort file.
##'
##' @export
##' @param veg_file path to standard cohort veg_file
##' @param allom_param parameters for allometric equation, a and b. Based on base-10 log-log linear model (power law)
##' @author Saloni Shah
##' @examples
##' \dontrun{
##' veg_file <- "~/downloads/FFT_site_1-25665/FFT.2008.veg.rds"
##' cohort2pool(veg_File = veg_file, allom_param = NULL)
##' }
cohort2pool <- function(veg_file, allom_param = NULL) {
## Building Site ID from past directories
path <- dirname(veg_file)
last_dir <- basename(path)
nums_id <- strsplit(last_dir,"[^[:digit:]]")
base_id <- nums_id[[1]][length(nums_id[[1]])]
suffix <- nums_id[[1]][(length(nums_id[[1]])-1)]
siteid = as.numeric(suffix)*1e9 + as.numeric(base_id)
## load data
dat <- readRDS(veg_file)
## Grab DBH
dbh <- dat[[2]]$DBH
## Grab allometry
if(is.null(allom_param)){
a <- 2
b <- 0.3
} else {
print("user provided allometry parameters not yet supported")
return(NULL)
}
#Calculate AGB
biomass = 10^(a + b*log10(dbh))
biomass[is.na(biomass)] <- 0
tot_biomass <- sum(biomass)
AGB <- tot_biomass
#Prep Arguments for pool_ic function
dims <- list(time =1) #Time dimension may be irrelevant
variables <-list(AGB = tot_biomass)
input <- list(dims = dims,
vals = variables)
# Execute pool_ic function
result <- PEcAn.data.land::pool_ic_list2netcdf(input = input, outdir = path, siteid = siteid)
return(result)
} | /modules/data.land/R/cohort2pool.R | permissive | ashiklom/pecan | R | false | false | 1,576 | r | ##' cohort2pool function
##'Calculates total biomass using veg cohort file.
##'
##' @export
##' @param veg_file path to standard cohort veg_file
##' @param allom_param parameters for allometric equation, a and b. Based on base-10 log-log linear model (power law)
##' @author Saloni Shah
##' @examples
##' \dontrun{
##' veg_file <- "~/downloads/FFT_site_1-25665/FFT.2008.veg.rds"
##' cohort2pool(veg_File = veg_file, allom_param = NULL)
##' }
cohort2pool <- function(veg_file, allom_param = NULL) {
## Building Site ID from past directories
path <- dirname(veg_file)
last_dir <- basename(path)
nums_id <- strsplit(last_dir,"[^[:digit:]]")
base_id <- nums_id[[1]][length(nums_id[[1]])]
suffix <- nums_id[[1]][(length(nums_id[[1]])-1)]
siteid = as.numeric(suffix)*1e9 + as.numeric(base_id)
## load data
dat <- readRDS(veg_file)
## Grab DBH
dbh <- dat[[2]]$DBH
## Grab allometry
if(is.null(allom_param)){
a <- 2
b <- 0.3
} else {
print("user provided allometry parameters not yet supported")
return(NULL)
}
#Calculate AGB
biomass = 10^(a + b*log10(dbh))
biomass[is.na(biomass)] <- 0
tot_biomass <- sum(biomass)
AGB <- tot_biomass
#Prep Arguments for pool_ic function
dims <- list(time =1) #Time dimension may be irrelevant
variables <-list(AGB = tot_biomass)
input <- list(dims = dims,
vals = variables)
# Execute pool_ic function
result <- PEcAn.data.land::pool_ic_list2netcdf(input = input, outdir = path, siteid = siteid)
return(result)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/asDataFrame.R
\name{as.data.frame}
\alias{as.data.frame}
\alias{as.data.frame.listOfBlueprints}
\alias{as.data.frame.listOfFeaturelists}
\alias{as.data.frame.listOfModels}
\alias{as.data.frame.projectSummaryList}
\alias{as.data.frame.listOfDataRobotPredictionDatasets}
\title{DataRobot S3 object methods for R's generic as.data.frame function}
\usage{
\method{as.data.frame}{listOfBlueprints}(x, row.names = NULL,
optional = FALSE, ...)
\method{as.data.frame}{listOfFeaturelists}(x, row.names = NULL,
optional = FALSE, ...)
\method{as.data.frame}{listOfModels}(x, row.names = NULL,
optional = FALSE, simple = TRUE, ...)
\method{as.data.frame}{projectSummaryList}(x, row.names = NULL,
optional = FALSE, simple = TRUE, ...)
\method{as.data.frame}{listOfDataRobotPredictionDatasets}(x,
row.names = NULL, optional = FALSE, ...)
}
\arguments{
\item{x}{S3 object to be converted into a dataframe.}
\item{row.names}{character. Optional. Row names for the dataframe returned by
the method.}
\item{optional}{logical. Optional. If TRUE, setting row
names and converting column names to syntactic names: see help for
\code{make.names} function.}
\item{\dots}{list. Additional optional parameters to be passed to the
generic as.data.frame function (not used at present).}
\item{simple}{logical. Optional. if TRUE (the default), a
simplified dataframe is returned for objects of class listOfModels
or projectSummaryList.}
}
\value{
A dataframe containing some or all of the data from the
original S3 object; see Details.
}
\description{
These functions extend R's generic as.data.frame function to the
DataRobot S3 object classes listOfBlueprints, listOfFeaturelists,
listOfModels, and projectSummaryList.
If simple = TRUE (the default), this method returns a dataframe with
one row for each model and the following columns: projectName, projectId,
created, fileName, target, targetType, positiveClass, metric,
autopilotMode, stage, maxTrainPct, and holdoutUnlocked.
If simple = FALSE, a dataframe is constructed from all elements of
projectSummaryList.
}
\details{
All of the DataRobot S3 `listOf' class objects have relatively
complex structures and are often easier to work with as dataframes.
The methods described here extend R's generic as.data.frame function
to convert objects of these classes to convenient dataframes. For
objects of class listOfBlueprints and listOfFeaturelists or objects
of class listOfModels and projectSummaryList with simple = FALSE,
the dataframes contain all information from the original S3 object.
The default value simple = TRUE provides simpler dataframes for
objects of class listOfModels and projectSummaryList.
If simple = TRUE (the default), this method returns a dataframe with
one row for each model and the following columns: modelType, expandedModel
(constructed from modelType and processes from the listOfModels elements),
modelId, blueprintId, featurelistName, featurelistId, samplePct, and the
metrics validation value for projectMetric. If simple = FALSE, the method
returns a complete dataframe with one row for each model and columns
constructed from all fields in the original listOfModels object
}
| /man/as.data.frame.Rd | no_license | anno526/datarobot | R | false | true | 3,248 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/asDataFrame.R
\name{as.data.frame}
\alias{as.data.frame}
\alias{as.data.frame.listOfBlueprints}
\alias{as.data.frame.listOfFeaturelists}
\alias{as.data.frame.listOfModels}
\alias{as.data.frame.projectSummaryList}
\alias{as.data.frame.listOfDataRobotPredictionDatasets}
\title{DataRobot S3 object methods for R's generic as.data.frame function}
\usage{
\method{as.data.frame}{listOfBlueprints}(x, row.names = NULL,
optional = FALSE, ...)
\method{as.data.frame}{listOfFeaturelists}(x, row.names = NULL,
optional = FALSE, ...)
\method{as.data.frame}{listOfModels}(x, row.names = NULL,
optional = FALSE, simple = TRUE, ...)
\method{as.data.frame}{projectSummaryList}(x, row.names = NULL,
optional = FALSE, simple = TRUE, ...)
\method{as.data.frame}{listOfDataRobotPredictionDatasets}(x,
row.names = NULL, optional = FALSE, ...)
}
\arguments{
\item{x}{S3 object to be converted into a dataframe.}
\item{row.names}{character. Optional. Row names for the dataframe returned by
the method.}
\item{optional}{logical. Optional. If TRUE, setting row
names and converting column names to syntactic names: see help for
\code{make.names} function.}
\item{\dots}{list. Additional optional parameters to be passed to the
generic as.data.frame function (not used at present).}
\item{simple}{logical. Optional. if TRUE (the default), a
simplified dataframe is returned for objects of class listOfModels
or projectSummaryList.}
}
\value{
A dataframe containing some or all of the data from the
original S3 object; see Details.
}
\description{
These functions extend R's generic as.data.frame function to the
DataRobot S3 object classes listOfBlueprints, listOfFeaturelists,
listOfModels, and projectSummaryList.
If simple = TRUE (the default), this method returns a dataframe with
one row for each model and the following columns: projectName, projectId,
created, fileName, target, targetType, positiveClass, metric,
autopilotMode, stage, maxTrainPct, and holdoutUnlocked.
If simple = FALSE, a dataframe is constructed from all elements of
projectSummaryList.
}
\details{
All of the DataRobot S3 `listOf' class objects have relatively
complex structures and are often easier to work with as dataframes.
The methods described here extend R's generic as.data.frame function
to convert objects of these classes to convenient dataframes. For
objects of class listOfBlueprints and listOfFeaturelists or objects
of class listOfModels and projectSummaryList with simple = FALSE,
the dataframes contain all information from the original S3 object.
The default value simple = TRUE provides simpler dataframes for
objects of class listOfModels and projectSummaryList.
If simple = TRUE (the default), this method returns a dataframe with
one row for each model and the following columns: modelType, expandedModel
(constructed from modelType and processes from the listOfModels elements),
modelId, blueprintId, featurelistName, featurelistId, samplePct, and the
metrics validation value for projectMetric. If simple = FALSE, the method
returns a complete dataframe with one row for each model and columns
constructed from all fields in the original listOfModels object
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/svd_cov.R
\name{postprocess_preds}
\alias{postprocess_preds}
\title{Extract predictions from X_hat matrix}
\usage{
postprocess_preds(X_hat, newdata)
}
\arguments{
\item{X_hat}{A user x track matrix with predictions.}
\item{newdata}{A list containing the following elements, paralleling the
input provided by Kaggle,\cr
$train: A data.frame giving the artist, track, user, and time info. \cr
$words: A matrix giving word indicators for artist-user pairs. \cr
$users: A matrix givin gsurvey results for each user. \cr}
}
\value{
A vector of ratings corresponding to the user x track pairs in $train
from the newdata.
}
\description{
Extract predictions from X_hat matrix
}
| /emi/man/postprocess_preds.Rd | no_license | krisrs1128/multitable_emi | R | false | false | 762 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/svd_cov.R
\name{postprocess_preds}
\alias{postprocess_preds}
\title{Extract predictions from X_hat matrix}
\usage{
postprocess_preds(X_hat, newdata)
}
\arguments{
\item{X_hat}{A user x track matrix with predictions.}
\item{newdata}{A list containing the following elements, paralleling the
input provided by Kaggle,\cr
$train: A data.frame giving the artist, track, user, and time info. \cr
$words: A matrix giving word indicators for artist-user pairs. \cr
$users: A matrix givin gsurvey results for each user. \cr}
}
\value{
A vector of ratings corresponding to the user x track pairs in $train
from the newdata.
}
\description{
Extract predictions from X_hat matrix
}
|
###################################################
# ecological functions, graphed #
# adapted from B. Bolker, 2008 #
# #
# 0. print directory, parameter values, and par() #
# 1. piecewise polynomial functions #
# 2. rational functions #
# 3. exponential functions #
# 4. power-law functions #
# #
# *note to run 0 before anything #
# #
# ~written by isadore nabi~#
###################################################
# 0. print directory, parameter values, and par()
print.dir <- "~/Dropbox/Projects/Ecological functions"
setwd(print.dir)
a1 <- 1
a2 <- 2
s <- 5
a <- 1
b <- 1
c <- -1
set.seed(6174)
pdf(paste(print.dir, "/EcoFun_onepage.pdf", sep = ""), width = 11, height = 8.5)
par(mfrow = c(4, 4), mar = c(1,1,1,1), oma = c(1,5,1,0))
# 1. piecewise polynomial functions
# threshold
threshold <- function(a1 , a2 , s , x){
ifelse(x < s , a1 , a2)
}
curve(threshold(a1 = a1 , a2 = a2 , s = s , x = x) , from = 0 , to = 10 , n = 10000 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
mtext("Threshold" , side = 3)
abline(v = s , col = "grey" , lty = 2)
mtext("s" , side = 1 , at = s , line = -1, adj = c(-0.5))
mtext(expression(a[1]) , side = 2 , at = a1 , line = 0.25 , las = 1)
mtext(expression(a[2]) , side = 4 , at = a2 , line = 0.25 , las = 1)
legend("bottomright" , legend = expression(f(x)==bgroup("{",atop(a[1]~", if x < s",a[2]~", if x > s"),"")) , bty = "n" , adj = c(0,-.25))
mtext(side = 2, text = "Piecewise\nfunctions", cex = 1.25, line = 1.75, outer = F)
#hockey stick
hockeystick <- function(a , s , x){
ifelse(x < s , a*x , a*s)
}
curve(hockeystick(a = a , s = s , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
mtext("Hockey stick" , side = 3)
abline(v = s , col = "grey" , lty = 2)
mtext("s" , side = 1 , at = s , line = -1)
mtext(expression(a*s) , side = 4 , at = a*s , line = 0.25 , las = 1)
legend("bottomright" , legend = expression(f(x)==bgroup("{",atop(a*x~", if x < s",a*s~", if x > s"),"")) , bty = "n" , adj = c(0,-.25))
#general piecewise linear
b <- 0.5
genpiecelin <- function(a , s , b , x){
ifelse(x < s , a*x , a*s-b*(x-s))
}
curve(genpiecelin(a = a , s = s , b = b , x = x) , from = 0 , to = 15 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0 , a*s*1.25))
mtext("General piecewise linear" , side = 3)
abline(v = s , col = "grey" , lty = 2)
mtext("s" , side = 1 , at = s , line = -1)
legend("topright" , legend = expression(f(x)==bgroup("{",atop(a*x~", if x < s",a*s-b*(x-s)~", if x > s"),"")) , bty = "n" , adj = c(0,.2))
# spline
x <- c(runif(8 , 0 , 10))
y <- c(runif(8 , 0 , 10))
plot(x,y , ylim = c(-10,20) , pch = 16 , xaxt = "n" , yaxt = "n", frame = T , xlab = "" , ylab = "n")
mtext("Splines" , side = 3)
lines(spline(x,y))
legend("topright" , legend = "f(x) is complicated" , bty = "n" , adj = c(0,.2))
mtext("Piecewise polynomial functions" , side = 3 , outer = T , line = 1)
# 2. rational functions
# hyperbolic
b <- 1
hyperbolic <- function(a , b , x){
y <- a/(b + x)
}
curve(hyperbolic(a = a , b = b , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
mtext("Hyperbolic" , side = 3)
abline(v = 0 , col = "grey")
abline(v = b , col = "grey" , lty = 2)
abline(h = c(a/2 , a/b) , col = "grey" , lty = 2)
mtext("b" , side = 1 , at = b , line = -1)
mtext(expression(over(a,2*b)) , side = 2 , at = a/2*b , las = 1 , line = 0.25)
mtext(expression(over(a,b)) , side = 2 , at = a/b , las = 1 , line = 0.25)
legend("topright" , legend = expression(f(x) == frac(a, b + x)) , bty = "n")
mtext(side = 2, text = "Rational\nfunctions", cex = 1.25, line = 1.75, outer = F)
#michaelis-menton
michaelismenton <- function(a , b , x){
y <- (a*x)/(b + x)
}
curve(michaelismenton(a = a , b = b , x = x) , from = 0 , to = 20 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0,a) )
mtext("Michaelis-Menton (Holling type II)" , side = 3)
abline(v = 0 , col = "grey")
abline(v = b , col = "grey" , lty = 2)
abline(h = c(a/2 , a) , col = "grey" , lty = 2)
mtext("b" , side = 1 , at = b , line = -1)
mtext(expression(over(a,2)) , side = 2 , at = a/2 , las = 1 , line = 0.25)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("topright" , legend = expression(f(x) == frac(ax, b + x)) , bty = "n" , adj = c(0,1))
# holling type iii
hollingIII <- function(a , b , x){
y <- (a*x)^2/(b^2 + x^2)
}
curve(hollingIII(a = a , b = b , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0,a))
mtext("Holling type III" , side = 3)
abline(v = 0 , col = "grey")
abline(v = b , col = "grey" , lty = 2)
abline(h = c(a/2 , a) , col = "grey" , lty = 2)
mtext("b" , side = 1 , at = b , line = -1)
mtext(expression(over(a,2)) , side = 2 , at = a/2 , las = 1 , line = 0.25)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("topright" , legend = expression(f(x) == frac(ax^2, b^2 + x^2)) , bty = "n" , adj = c(0,1))
# holling type iv
hollingIV <- function(a , b , c , x){
y <- a*x^2/(b + c*x + x^2)
}
curve(hollingIV(a = a , b = b , c = c , x = x) , from = 0 , to = 20 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0 , 1.5*a))
mtext("Holling type IV" , side = 3)
abline(v = 0 , col = "grey")
abline(v = (-2*b/c) , col = "grey" , lty = 2)
abline(h = c(a) , col = "grey" , lty = 2)
mtext(text = expression(over("-2b",c)) , side = 1 , at = (-2*b/c) , line = -1, cex = 0.75)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("topright" , legend = expression(f(x) == frac(ax^2, b + cx + x^2)) , bty = "n" , adj = c(0,.25))
mtext("Rational functions" , side = 3 , outer = T , line = 1)
# 3. exponential functions
b <- 1
#negative expoential
negativeexponential <- function(a , b , x){
y <- a*exp(-b*x)
}
curve(negativeexponential(a = a , b = b , x = x) , from = 0 , to = 5 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
mtext("Negative exponential" , side = 3)
abline(v = 0 , col = "grey")
abline(v = (1/b) , col = "grey" , lty = 2)
abline(h = c(a , (a/exp(1))) , col = "grey" , lty = 2)
mtext(expression(frac(1,b)) , side = 1 , at = (1/b) , line = -1, cex = 0.75)
mtext(expression(frac(a,italic(e))) , side = 2 , at = (a/exp(1)) , las = 1 , line = 0.25)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("right" , legend = expression(f(x) == a*italic(e)^-bx) , bty = "n")
mtext(side = 2, text = "Exponential\nfunctions", cex = 1.25, line = 1.75, outer = F)
# monomolecular
monomolecular <- function(a , b , x){
y <- a*(1-exp(-b*x))
}
curve(monomolecular(a = a , b = b , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0,a) )
mtext("Monomolecular" , side = 3)
abline(v = 0 , col = "grey")
abline(h = a , col = "grey" , lty = 2)
mtext("a" , side = 2 , at = a , las = 1 , line = -1)
legend("right" , legend = expression(f(x) == a*(1-italic(e)^-bx)) , bty = "n" , adj = c(0,1))
# ricker
ricker <- function(a , b , x){
y <- a*x*exp(-b*x)
}
curve(ricker(a = a , b = b , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 )
mtext("Ricker" , side = 3)
abline(v = 0 , col = "grey")
abline(v = 1/b , col = "grey" , lty = 2)
abline(h = a/b/exp(1) , col = "grey" , lty = 2)
mtext(expression(frac(1,b)) , side = 1 , at = 1/b , line = -1, cex = 0.75)
mtext(expression(over(a,b)~italic(e)^-1) , side = 2 , at = a/b/exp(1) , las = 1 , line = 0.25)
legend("right" , legend = expression(f(x) == a*x*italic(e)^-bx) , bty = "n" , adj = c(0,1))
# logistic
logisitic <- function(a , b , x){
y <- exp(a+(b*x))/(1 + exp(a+(b*x)))
}
b <- 1
curve(logisitic(a = a , b = b , x = x) , from = -5 , to = 5 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0,1))
mtext("Logistic" , side = 3)
abline(v = (-a/b) , col = "grey" , lty = 2)
abline(h = c(0.5,1) , col = "grey" , lty = 2)
mtext(text = expression(over("-a",b)) , side = 1 , at = (-a/b) , line = -1, cex = 0.75)
mtext(expression(frac(1,2)) , side = 2 , at = 0.5 , las = 1 , line = 0.25)
mtext(1 , side = 2 , at = 1 , line = 0.25 , las = 1)
legend("bottomright" , legend = expression(f(x) == frac(italic(e)^"a+bx",1+italic(e)^"a+bx")) , bty = "n" , adj = c(0,.25))
mtext("Exponential functions" , side = 3 , outer = T , line = 1)
# 4. power-law functions
# power-law
powerlaw <- function(a , b , x){
y <- a*x^b
}
b <- -2
curve(powerlaw(a = a , b = b , x = x) , from = 0 , to = 5 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , lty = 3 , ylim = c(0,5))
b <- 0.5
curve(powerlaw(a = a , b = b , x = x) , from = 0 , to = 5 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , add = T , lty = 2)
b <- 2
curve(powerlaw(a = a , b = b , x = x) , from = 0 , to = 5 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , add = T)
mtext("Power laws" , side = 3)
abline(v = 0 , col = "grey")
abline(v = 1 , col = "grey" , lty = 2)
abline(h = a , col = "grey" , lty = 2)
mtext(1 , side = 1 , at = 1 , line = -1)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("topright" , legend = c("b<0","0<b<1","b>1") , bty = "n" , lty = c(3,2,1))
legend("right" , legend = expression(f(x) == a*x^b) , bty = "n" , adj = c(0.25,0))
mtext(side = 2, text = "Power-law\nfunctions", cex = 1.25, line = 1.75, outer = F)
# von bertalanffy
vonbertalanffy <- function(a , k , d , x){
y <- a*(1-exp(-k*(a-d)*x))^(1/(1-d))
}
a <-1
d <- 2/3
k <- .5
curve(vonbertalanffy(a = a , k = k , d = d , x = x) , from = 0 , to = 30 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0,a))
mtext("Von Bertalanffy" , side = 3)
abline(v = 0 , col = "grey")
abline(h = a , col = "grey" , lty = 2)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("bottomright" , legend = expression(f(x) == a(1-italic(e)^-k(a-d))^frac(1,1-d)) , bty = "n" , adj = c(0,0))
# shepherd, hassell
shepherd <- function(a , b , c , x){
y <- (a*x) / (b+x^c)
}
hassel <- function(a , b , c , x){
y <- (a*x) / (b+x)^c
}
c <- 1.5
curve(shepherd(a = a , b = b , c = c , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
curve(hassel(a = a , b = b , c = c , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , add = T , lty = 2)
mtext("Shepherd, Hassell" , side = 3)
abline(v = 0 , col = "grey")
legend("bottomright" , legend = c(expression(f(x) == frac(a*x , b + x^c)) , expression(f(x) == frac(a*x , (b + x)^c))) , bty = "n" , y.intersp = 1.5)
# non-rectangular hyperbola
nonrectangularhyperbola <- function(a , theta , p , x){
y <- (1/2*theta)*(a*x + p - ((((a*x)+p)^2)-(4*theta*a*x*p))^0.5 )
}
p <- 1
theta <- 0.5
curve(nonrectangularhyperbola(a = a , theta = theta , p = p , x = x) , from = 0 , to = 100 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
mtext("Non-rectangular hyperbola" , side = 3)
abline(v = 0 , col = "grey")
legend("bottomright" , legend = expression(f(x) == frac(1,2~theta)~(alpha*x + p[max] - sqrt((alpha*x+p[max])^2-4*theta*alpha*x*p[max]))) , bty = "n" , adj = c(0,-0.1) , cex = 0.85)
mtext("Power-law functions" , side = 3 , outer = T , line = 1)
mtext("Modified from Bolker, 2008, Ecological Models and Data in R, Princeton University Press" , side = 1 , outer = T , line = -1/3, cex =0.5, col = "grey70", at = 0.825)
dev.off() | /R/EcologicalFunctions_OnePage.R | no_license | dispersing/dispersing.github.io_old | R | false | false | 11,811 | r | ###################################################
# ecological functions, graphed #
# adapted from B. Bolker, 2008 #
# #
# 0. print directory, parameter values, and par() #
# 1. piecewise polynomial functions #
# 2. rational functions #
# 3. exponential functions #
# 4. power-law functions #
# #
# *note to run 0 before anything #
# #
# ~written by isadore nabi~#
###################################################
# 0. print directory, parameter values, and par()
print.dir <- "~/Dropbox/Projects/Ecological functions"
setwd(print.dir)
a1 <- 1
a2 <- 2
s <- 5
a <- 1
b <- 1
c <- -1
set.seed(6174)
pdf(paste(print.dir, "/EcoFun_onepage.pdf", sep = ""), width = 11, height = 8.5)
par(mfrow = c(4, 4), mar = c(1,1,1,1), oma = c(1,5,1,0))
# 1. piecewise polynomial functions
# threshold
threshold <- function(a1 , a2 , s , x){
ifelse(x < s , a1 , a2)
}
curve(threshold(a1 = a1 , a2 = a2 , s = s , x = x) , from = 0 , to = 10 , n = 10000 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
mtext("Threshold" , side = 3)
abline(v = s , col = "grey" , lty = 2)
mtext("s" , side = 1 , at = s , line = -1, adj = c(-0.5))
mtext(expression(a[1]) , side = 2 , at = a1 , line = 0.25 , las = 1)
mtext(expression(a[2]) , side = 4 , at = a2 , line = 0.25 , las = 1)
legend("bottomright" , legend = expression(f(x)==bgroup("{",atop(a[1]~", if x < s",a[2]~", if x > s"),"")) , bty = "n" , adj = c(0,-.25))
mtext(side = 2, text = "Piecewise\nfunctions", cex = 1.25, line = 1.75, outer = F)
#hockey stick
hockeystick <- function(a , s , x){
ifelse(x < s , a*x , a*s)
}
curve(hockeystick(a = a , s = s , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
mtext("Hockey stick" , side = 3)
abline(v = s , col = "grey" , lty = 2)
mtext("s" , side = 1 , at = s , line = -1)
mtext(expression(a*s) , side = 4 , at = a*s , line = 0.25 , las = 1)
legend("bottomright" , legend = expression(f(x)==bgroup("{",atop(a*x~", if x < s",a*s~", if x > s"),"")) , bty = "n" , adj = c(0,-.25))
#general piecewise linear
b <- 0.5
genpiecelin <- function(a , s , b , x){
ifelse(x < s , a*x , a*s-b*(x-s))
}
curve(genpiecelin(a = a , s = s , b = b , x = x) , from = 0 , to = 15 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0 , a*s*1.25))
mtext("General piecewise linear" , side = 3)
abline(v = s , col = "grey" , lty = 2)
mtext("s" , side = 1 , at = s , line = -1)
legend("topright" , legend = expression(f(x)==bgroup("{",atop(a*x~", if x < s",a*s-b*(x-s)~", if x > s"),"")) , bty = "n" , adj = c(0,.2))
# spline
x <- c(runif(8 , 0 , 10))
y <- c(runif(8 , 0 , 10))
plot(x,y , ylim = c(-10,20) , pch = 16 , xaxt = "n" , yaxt = "n", frame = T , xlab = "" , ylab = "n")
mtext("Splines" , side = 3)
lines(spline(x,y))
legend("topright" , legend = "f(x) is complicated" , bty = "n" , adj = c(0,.2))
mtext("Piecewise polynomial functions" , side = 3 , outer = T , line = 1)
# 2. rational functions
# hyperbolic
b <- 1
hyperbolic <- function(a , b , x){
y <- a/(b + x)
}
curve(hyperbolic(a = a , b = b , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
mtext("Hyperbolic" , side = 3)
abline(v = 0 , col = "grey")
abline(v = b , col = "grey" , lty = 2)
abline(h = c(a/2 , a/b) , col = "grey" , lty = 2)
mtext("b" , side = 1 , at = b , line = -1)
mtext(expression(over(a,2*b)) , side = 2 , at = a/2*b , las = 1 , line = 0.25)
mtext(expression(over(a,b)) , side = 2 , at = a/b , las = 1 , line = 0.25)
legend("topright" , legend = expression(f(x) == frac(a, b + x)) , bty = "n")
mtext(side = 2, text = "Rational\nfunctions", cex = 1.25, line = 1.75, outer = F)
#michaelis-menton
michaelismenton <- function(a , b , x){
y <- (a*x)/(b + x)
}
curve(michaelismenton(a = a , b = b , x = x) , from = 0 , to = 20 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0,a) )
mtext("Michaelis-Menton (Holling type II)" , side = 3)
abline(v = 0 , col = "grey")
abline(v = b , col = "grey" , lty = 2)
abline(h = c(a/2 , a) , col = "grey" , lty = 2)
mtext("b" , side = 1 , at = b , line = -1)
mtext(expression(over(a,2)) , side = 2 , at = a/2 , las = 1 , line = 0.25)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("topright" , legend = expression(f(x) == frac(ax, b + x)) , bty = "n" , adj = c(0,1))
# holling type iii
hollingIII <- function(a , b , x){
y <- (a*x)^2/(b^2 + x^2)
}
curve(hollingIII(a = a , b = b , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0,a))
mtext("Holling type III" , side = 3)
abline(v = 0 , col = "grey")
abline(v = b , col = "grey" , lty = 2)
abline(h = c(a/2 , a) , col = "grey" , lty = 2)
mtext("b" , side = 1 , at = b , line = -1)
mtext(expression(over(a,2)) , side = 2 , at = a/2 , las = 1 , line = 0.25)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("topright" , legend = expression(f(x) == frac(ax^2, b^2 + x^2)) , bty = "n" , adj = c(0,1))
# holling type iv
hollingIV <- function(a , b , c , x){
y <- a*x^2/(b + c*x + x^2)
}
curve(hollingIV(a = a , b = b , c = c , x = x) , from = 0 , to = 20 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0 , 1.5*a))
mtext("Holling type IV" , side = 3)
abline(v = 0 , col = "grey")
abline(v = (-2*b/c) , col = "grey" , lty = 2)
abline(h = c(a) , col = "grey" , lty = 2)
mtext(text = expression(over("-2b",c)) , side = 1 , at = (-2*b/c) , line = -1, cex = 0.75)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("topright" , legend = expression(f(x) == frac(ax^2, b + cx + x^2)) , bty = "n" , adj = c(0,.25))
mtext("Rational functions" , side = 3 , outer = T , line = 1)
# 3. exponential functions
b <- 1
#negative expoential
negativeexponential <- function(a , b , x){
y <- a*exp(-b*x)
}
curve(negativeexponential(a = a , b = b , x = x) , from = 0 , to = 5 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
mtext("Negative exponential" , side = 3)
abline(v = 0 , col = "grey")
abline(v = (1/b) , col = "grey" , lty = 2)
abline(h = c(a , (a/exp(1))) , col = "grey" , lty = 2)
mtext(expression(frac(1,b)) , side = 1 , at = (1/b) , line = -1, cex = 0.75)
mtext(expression(frac(a,italic(e))) , side = 2 , at = (a/exp(1)) , las = 1 , line = 0.25)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("right" , legend = expression(f(x) == a*italic(e)^-bx) , bty = "n")
mtext(side = 2, text = "Exponential\nfunctions", cex = 1.25, line = 1.75, outer = F)
# monomolecular
monomolecular <- function(a , b , x){
y <- a*(1-exp(-b*x))
}
curve(monomolecular(a = a , b = b , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0,a) )
mtext("Monomolecular" , side = 3)
abline(v = 0 , col = "grey")
abline(h = a , col = "grey" , lty = 2)
mtext("a" , side = 2 , at = a , las = 1 , line = -1)
legend("right" , legend = expression(f(x) == a*(1-italic(e)^-bx)) , bty = "n" , adj = c(0,1))
# ricker
ricker <- function(a , b , x){
y <- a*x*exp(-b*x)
}
curve(ricker(a = a , b = b , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 )
mtext("Ricker" , side = 3)
abline(v = 0 , col = "grey")
abline(v = 1/b , col = "grey" , lty = 2)
abline(h = a/b/exp(1) , col = "grey" , lty = 2)
mtext(expression(frac(1,b)) , side = 1 , at = 1/b , line = -1, cex = 0.75)
mtext(expression(over(a,b)~italic(e)^-1) , side = 2 , at = a/b/exp(1) , las = 1 , line = 0.25)
legend("right" , legend = expression(f(x) == a*x*italic(e)^-bx) , bty = "n" , adj = c(0,1))
# logistic
logisitic <- function(a , b , x){
y <- exp(a+(b*x))/(1 + exp(a+(b*x)))
}
b <- 1
curve(logisitic(a = a , b = b , x = x) , from = -5 , to = 5 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0,1))
mtext("Logistic" , side = 3)
abline(v = (-a/b) , col = "grey" , lty = 2)
abline(h = c(0.5,1) , col = "grey" , lty = 2)
mtext(text = expression(over("-a",b)) , side = 1 , at = (-a/b) , line = -1, cex = 0.75)
mtext(expression(frac(1,2)) , side = 2 , at = 0.5 , las = 1 , line = 0.25)
mtext(1 , side = 2 , at = 1 , line = 0.25 , las = 1)
legend("bottomright" , legend = expression(f(x) == frac(italic(e)^"a+bx",1+italic(e)^"a+bx")) , bty = "n" , adj = c(0,.25))
mtext("Exponential functions" , side = 3 , outer = T , line = 1)
# 4. power-law functions
# power-law
powerlaw <- function(a , b , x){
y <- a*x^b
}
b <- -2
curve(powerlaw(a = a , b = b , x = x) , from = 0 , to = 5 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , lty = 3 , ylim = c(0,5))
b <- 0.5
curve(powerlaw(a = a , b = b , x = x) , from = 0 , to = 5 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , add = T , lty = 2)
b <- 2
curve(powerlaw(a = a , b = b , x = x) , from = 0 , to = 5 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , add = T)
mtext("Power laws" , side = 3)
abline(v = 0 , col = "grey")
abline(v = 1 , col = "grey" , lty = 2)
abline(h = a , col = "grey" , lty = 2)
mtext(1 , side = 1 , at = 1 , line = -1)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("topright" , legend = c("b<0","0<b<1","b>1") , bty = "n" , lty = c(3,2,1))
legend("right" , legend = expression(f(x) == a*x^b) , bty = "n" , adj = c(0.25,0))
mtext(side = 2, text = "Power-law\nfunctions", cex = 1.25, line = 1.75, outer = F)
# von bertalanffy
vonbertalanffy <- function(a , k , d , x){
y <- a*(1-exp(-k*(a-d)*x))^(1/(1-d))
}
a <-1
d <- 2/3
k <- .5
curve(vonbertalanffy(a = a , k = k , d = d , x = x) , from = 0 , to = 30 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , ylim = c(0,a))
mtext("Von Bertalanffy" , side = 3)
abline(v = 0 , col = "grey")
abline(h = a , col = "grey" , lty = 2)
mtext("a" , side = 2 , at = a , las = 1 , line = 0.25)
legend("bottomright" , legend = expression(f(x) == a(1-italic(e)^-k(a-d))^frac(1,1-d)) , bty = "n" , adj = c(0,0))
# shepherd, hassell
shepherd <- function(a , b , c , x){
y <- (a*x) / (b+x^c)
}
hassel <- function(a , b , c , x){
y <- (a*x) / (b+x)^c
}
c <- 1.5
curve(shepherd(a = a , b = b , c = c , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
curve(hassel(a = a , b = b , c = c , x = x) , from = 0 , to = 10 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2 , add = T , lty = 2)
mtext("Shepherd, Hassell" , side = 3)
abline(v = 0 , col = "grey")
legend("bottomright" , legend = c(expression(f(x) == frac(a*x , b + x^c)) , expression(f(x) == frac(a*x , (b + x)^c))) , bty = "n" , y.intersp = 1.5)
# non-rectangular hyperbola
nonrectangularhyperbola <- function(a , theta , p , x){
y <- (1/2*theta)*(a*x + p - ((((a*x)+p)^2)-(4*theta*a*x*p))^0.5 )
}
p <- 1
theta <- 0.5
curve(nonrectangularhyperbola(a = a , theta = theta , p = p , x = x) , from = 0 , to = 100 , xaxt = "n" , xlab = "" , yaxt = "n" , ylab = "" , lwd = 2)
mtext("Non-rectangular hyperbola" , side = 3)
abline(v = 0 , col = "grey")
legend("bottomright" , legend = expression(f(x) == frac(1,2~theta)~(alpha*x + p[max] - sqrt((alpha*x+p[max])^2-4*theta*alpha*x*p[max]))) , bty = "n" , adj = c(0,-0.1) , cex = 0.85)
mtext("Power-law functions" , side = 3 , outer = T , line = 1)
mtext("Modified from Bolker, 2008, Ecological Models and Data in R, Princeton University Press" , side = 1 , outer = T , line = -1/3, cex =0.5, col = "grey70", at = 0.825)
dev.off() |
#' Add variant effect prediction
#'
#' Experimental, use with caution
#'
#' @param cds Coding sequence coordinates
#' @param vcf Data frame of variants from a variant call format file
#' @param genome A genome reference compatible with [get_genomic_sequence]
#'
#' @details
#'
#' `cds`, `vcf`, and `genome` should have matching chromosome naming
#' conventions.
#'
#' `cds` must contain the following columns (rows represent exon coordinates):
#' `tx`, `exon`, `chr`, `strand`, `start`, and `end`
#'
#' `vcf` must contain the following standard VCF columns (rows represent variants):
#' `CHROM`, `POS`, `REF`, `ALT`.
#'
#' @examples
#' if (requireNamespace('BSgenome.Hsapiens.UCSC.hg38')) {
#' library(tidyverse)
#' library(mutagenesis)
#'
#' # Example files provided in this package
#' cds_file <- system.file('extdata/CDS.csv', package = 'mutagenesis')
#' vcf_file <- system.file('extdata/VCF.vcf', package = 'mutagenesis')
#'
#' # Coding sequence coordinates, variants, and a reference genome
#' # are required to predict variant effects
#' cds <- read_csv(cds_file)
#' vcf <- read_vcf(vcf_file)
#' genome <- BSgenome.Hsapiens.UCSC.hg38::Hsapiens
#' vep <- predict_variant_effect(cds, vcf, genome)
#'
#' # An example summary of effects
#' vep %>%
#' select(
#' gene, ID:INFO, ref_cds, alt_cds, ref_aa, alt_aa, mutation_type,
#' exon_boundary_dist, vcf_start, vcf_end, exon_start, exon_end
#' ) %>%
#' distinct() %>%
#' count(ref_aa, mutation_type) %>%
#' arrange(mutation_type, -n)
#' }
#'
#' @export
#' @md
predict_variant_effect <- function(cds, vcf, genome) {
# Prepare cds and vcf tables
if (!hasName(cds, 'exon_frame')) cds <- add_exon_details(cds)
vcf <-
vcf %>%
mutate(
# Used to track variants after join
vcf_id = 1:n(),
# Width of ref used to determine CDS join (i.e. start/end)
ref_length = str_length(REF),
# Define VCF end and start based on ref_length
# Caveats: fails for complex variants and missing should be encoded with
# an empty string.
start = POS,
end = start + (ref_length - 1L)
)
# Join vcf and cds exons by overlapping coordinates
cds_keys <- c('chromosome' = 'chr', 'start' = 'start', 'end' = 'end')
vcf_keys <- c('chromosome' = 'CHROM' , 'start' = 'start', 'end' = 'end')
joined <- mutagenesis::inner_join_cds_vcf(cds, vcf, cds_keys, vcf_keys)
# Annotate variants
joined %>%
mutate(
# Must calculate ALT length after separating alleles (done by inner_join_cds_vcf)
alt_length = str_length(ALT),
# Determine distance of Ref and alt boundaries to 5' of exon
exon_5prime = ifelse(exon_strand == '+', exon_start, exon_end),
vcf_5prime = ifelse(exon_strand == '+', vcf_start, vcf_end),
vcf_3prime = ifelse(exon_strand == '+', vcf_end, vcf_start),
alt_5prime = vcf_5prime,
# used only to calculate frame of 3' end relative to exon 5' end
alt_3prime = ifelse(exon_strand == '+', vcf_start + (alt_length - 1L), vcf_end - (alt_length - 1L)),
# Given distance to exon's 5' end determine frame for REF and ALT boundaries
ref_5prime_frame = get_frame(abs(vcf_5prime - exon_5prime) + (exon_frame - 1L)),
ref_3prime_frame = get_frame(abs(vcf_3prime - exon_5prime) + (exon_frame - 1L)),
alt_5prime_frame = ref_5prime_frame,
alt_3prime_frame = get_frame(abs(alt_3prime - exon_5prime) + (exon_frame - 1L)),
# Given frame of boundaries, determine range for REF and ALT with complete codons
inframe_ref_start = ifelse(exon_strand == '+', vcf_start + c(0, -1, -2)[ref_5prime_frame], vcf_start + c(-2, -1, 0)[ref_3prime_frame]),
inframe_ref_end = ifelse(exon_strand == '+', vcf_end + c(2, 1, 0)[ref_3prime_frame], vcf_end + c( 0, 1, 2)[ref_5prime_frame]),
inframe_alt_start = ifelse(exon_strand == '+', vcf_start + c(0, -1, -2)[alt_5prime_frame], vcf_start + c(-2, -1, 0)[alt_3prime_frame]),
inframe_alt_end = ifelse(exon_strand == '+', vcf_end + c(2, 1, 0)[alt_3prime_frame], vcf_end + c( 0, 1, 2)[alt_5prime_frame]),
# Extract boundary genomic sequence for REF and ALT
ref_up = ifelse(vcf_start - 1L < inframe_ref_start, '', get_genomic_sequence(CHROM, '+', inframe_ref_start, vcf_start - 1L, genome)),
ref_dn = ifelse(vcf_end + 1L > inframe_ref_end, '', get_genomic_sequence(CHROM, '+', vcf_end + 1L, inframe_ref_end, genome)),
alt_up = ifelse(vcf_start - 1L < inframe_alt_start, '', get_genomic_sequence(CHROM, '+', inframe_alt_start, vcf_start - 1L, genome)),
alt_dn = ifelse(vcf_end + 1L > inframe_alt_end, '', get_genomic_sequence(CHROM, '+', vcf_end + 1L, inframe_alt_end, genome)),
ref_seq = str_c(tolower(ref_up), REF, tolower(ref_dn)),
alt_seq = str_c(tolower(alt_up), ALT, tolower(alt_dn)),
# Variant types describe the general change in genomic sequence
variant_type = case_when(
str_detect(ALT, '[<>\\[\\]]') ~ 'Complex',
ref_length == 1L & alt_length == 1L ~ 'Single nucleotide',
ref_length == 2L & alt_length == 2L ~ 'Dinucleotide',
ref_length == 3L & alt_length == 3L ~ 'Trinucleotide',
ref_length == alt_length ~ 'Substitution',
ref_length != alt_length ~ 'Indel'
),
# Minimum distance to exon boundary, -1 if crosses junction, missing if complex
exon_boundary_dist = case_when(
# No attempt to determine distance for complex variants
variant_type == 'Complex' ~ NA_integer_,
vcf_start < exon_start | vcf_end > exon_end ~ -1L, # overlaps a junction
TRUE ~ pmin(
abs(vcf_start - exon_start),
abs(vcf_start - exon_end),
abs(vcf_end - exon_start),
abs(vcf_end - exon_end)
)
),
splicing_type = case_when(
variant_type == 'Complex' ~ NA_character_,
exon_boundary_dist < 0L ~ 'Overlaps splice junction',
exon_boundary_dist < 2L ~ 'Adjacent to splice junction',
TRUE ~ 'Within exon'
),
ref_cds = ifelse(exon_strand == '+', ref_seq, reverse_complement(ref_seq)),
alt_cds = ifelse(exon_strand == '+', alt_seq, reverse_complement(alt_seq)),
ref_aa = ifelse(splicing_type == 'Within exon', mutagenesis::translate(ifelse(splicing_type == 'Within exon', ref_cds, '')), NA_character_),
alt_aa = ifelse(splicing_type == 'Within exon', mutagenesis::translate(ifelse(splicing_type == 'Within exon', alt_cds, '')), NA_character_),
mutation_type = case_when(
variant_type == 'Complex' ~ 'Complex',
splicing_type != 'Within exon' ~ 'Splicing',
variant_type == 'Indel' & (ref_length - alt_length) %% 3 != 0 ~ 'Frameshift',
ref_aa == alt_aa ~ 'Silent',
ref_aa != alt_aa & str_detect(alt_aa, '[*]') ~ 'Nonsense',
ref_aa != alt_aa ~ 'Missense'
)
) %>%
select(
-exon_5prime, -vcf_5prime, -vcf_3prime, -alt_5prime, -alt_3prime,
-ref_5prime_frame, -ref_3prime_frame, -alt_5prime_frame, -alt_3prime_frame,
-ref_up, -ref_dn, -alt_up, -alt_dn
)
}
get_frame <- function(x) {
frame <- c(3, 1, 2) # Multiple of 3 is frame 3 followed by frame 1 then 2
frame[(x %% 3) + 1L] # 1 is added to allow indexing of `frame`
}
| /R/predict-variant-effect.R | no_license | EricBryantPhD/mutagenesis | R | false | false | 7,588 | r | #' Add variant effect prediction
#'
#' Experimental, use with caution
#'
#' @param cds Coding sequence coordinates
#' @param vcf Data frame of variants from a variant call format file
#' @param genome A genome reference compatible with [get_genomic_sequence]
#'
#' @details
#'
#' `cds`, `vcf`, and `genome` should have matching chromosome naming
#' conventions.
#'
#' `cds` must contain the following columns (rows represent exon coordinates):
#' `tx`, `exon`, `chr`, `strand`, `start`, and `end`
#'
#' `vcf` must contain the following standard VCF columns (rows represent variants):
#' `CHROM`, `POS`, `REF`, `ALT`.
#'
#' @examples
#' if (requireNamespace('BSgenome.Hsapiens.UCSC.hg38')) {
#' library(tidyverse)
#' library(mutagenesis)
#'
#' # Example files provided in this package
#' cds_file <- system.file('extdata/CDS.csv', package = 'mutagenesis')
#' vcf_file <- system.file('extdata/VCF.vcf', package = 'mutagenesis')
#'
#' # Coding sequence coordinates, variants, and a reference genome
#' # are required to predict variant effects
#' cds <- read_csv(cds_file)
#' vcf <- read_vcf(vcf_file)
#' genome <- BSgenome.Hsapiens.UCSC.hg38::Hsapiens
#' vep <- predict_variant_effect(cds, vcf, genome)
#'
#' # An example summary of effects
#' vep %>%
#' select(
#' gene, ID:INFO, ref_cds, alt_cds, ref_aa, alt_aa, mutation_type,
#' exon_boundary_dist, vcf_start, vcf_end, exon_start, exon_end
#' ) %>%
#' distinct() %>%
#' count(ref_aa, mutation_type) %>%
#' arrange(mutation_type, -n)
#' }
#'
#' @export
#' @md
predict_variant_effect <- function(cds, vcf, genome) {
# Prepare cds and vcf tables
if (!hasName(cds, 'exon_frame')) cds <- add_exon_details(cds)
vcf <-
vcf %>%
mutate(
# Used to track variants after join
vcf_id = 1:n(),
# Width of ref used to determine CDS join (i.e. start/end)
ref_length = str_length(REF),
# Define VCF end and start based on ref_length
# Caveats: fails for complex variants and missing should be encoded with
# an empty string.
start = POS,
end = start + (ref_length - 1L)
)
# Join vcf and cds exons by overlapping coordinates
cds_keys <- c('chromosome' = 'chr', 'start' = 'start', 'end' = 'end')
vcf_keys <- c('chromosome' = 'CHROM' , 'start' = 'start', 'end' = 'end')
joined <- mutagenesis::inner_join_cds_vcf(cds, vcf, cds_keys, vcf_keys)
# Annotate variants
joined %>%
mutate(
# Must calculate ALT length after separating alleles (done by inner_join_cds_vcf)
alt_length = str_length(ALT),
# Determine distance of Ref and alt boundaries to 5' of exon
exon_5prime = ifelse(exon_strand == '+', exon_start, exon_end),
vcf_5prime = ifelse(exon_strand == '+', vcf_start, vcf_end),
vcf_3prime = ifelse(exon_strand == '+', vcf_end, vcf_start),
alt_5prime = vcf_5prime,
# used only to calculate frame of 3' end relative to exon 5' end
alt_3prime = ifelse(exon_strand == '+', vcf_start + (alt_length - 1L), vcf_end - (alt_length - 1L)),
# Given distance to exon's 5' end determine frame for REF and ALT boundaries
ref_5prime_frame = get_frame(abs(vcf_5prime - exon_5prime) + (exon_frame - 1L)),
ref_3prime_frame = get_frame(abs(vcf_3prime - exon_5prime) + (exon_frame - 1L)),
alt_5prime_frame = ref_5prime_frame,
alt_3prime_frame = get_frame(abs(alt_3prime - exon_5prime) + (exon_frame - 1L)),
# Given frame of boundaries, determine range for REF and ALT with complete codons
inframe_ref_start = ifelse(exon_strand == '+', vcf_start + c(0, -1, -2)[ref_5prime_frame], vcf_start + c(-2, -1, 0)[ref_3prime_frame]),
inframe_ref_end = ifelse(exon_strand == '+', vcf_end + c(2, 1, 0)[ref_3prime_frame], vcf_end + c( 0, 1, 2)[ref_5prime_frame]),
inframe_alt_start = ifelse(exon_strand == '+', vcf_start + c(0, -1, -2)[alt_5prime_frame], vcf_start + c(-2, -1, 0)[alt_3prime_frame]),
inframe_alt_end = ifelse(exon_strand == '+', vcf_end + c(2, 1, 0)[alt_3prime_frame], vcf_end + c( 0, 1, 2)[alt_5prime_frame]),
# Extract boundary genomic sequence for REF and ALT
ref_up = ifelse(vcf_start - 1L < inframe_ref_start, '', get_genomic_sequence(CHROM, '+', inframe_ref_start, vcf_start - 1L, genome)),
ref_dn = ifelse(vcf_end + 1L > inframe_ref_end, '', get_genomic_sequence(CHROM, '+', vcf_end + 1L, inframe_ref_end, genome)),
alt_up = ifelse(vcf_start - 1L < inframe_alt_start, '', get_genomic_sequence(CHROM, '+', inframe_alt_start, vcf_start - 1L, genome)),
alt_dn = ifelse(vcf_end + 1L > inframe_alt_end, '', get_genomic_sequence(CHROM, '+', vcf_end + 1L, inframe_alt_end, genome)),
ref_seq = str_c(tolower(ref_up), REF, tolower(ref_dn)),
alt_seq = str_c(tolower(alt_up), ALT, tolower(alt_dn)),
# Variant types describe the general change in genomic sequence
variant_type = case_when(
str_detect(ALT, '[<>\\[\\]]') ~ 'Complex',
ref_length == 1L & alt_length == 1L ~ 'Single nucleotide',
ref_length == 2L & alt_length == 2L ~ 'Dinucleotide',
ref_length == 3L & alt_length == 3L ~ 'Trinucleotide',
ref_length == alt_length ~ 'Substitution',
ref_length != alt_length ~ 'Indel'
),
# Minimum distance to exon boundary, -1 if crosses junction, missing if complex
exon_boundary_dist = case_when(
# No attempt to determine distance for complex variants
variant_type == 'Complex' ~ NA_integer_,
vcf_start < exon_start | vcf_end > exon_end ~ -1L, # overlaps a junction
TRUE ~ pmin(
abs(vcf_start - exon_start),
abs(vcf_start - exon_end),
abs(vcf_end - exon_start),
abs(vcf_end - exon_end)
)
),
splicing_type = case_when(
variant_type == 'Complex' ~ NA_character_,
exon_boundary_dist < 0L ~ 'Overlaps splice junction',
exon_boundary_dist < 2L ~ 'Adjacent to splice junction',
TRUE ~ 'Within exon'
),
ref_cds = ifelse(exon_strand == '+', ref_seq, reverse_complement(ref_seq)),
alt_cds = ifelse(exon_strand == '+', alt_seq, reverse_complement(alt_seq)),
ref_aa = ifelse(splicing_type == 'Within exon', mutagenesis::translate(ifelse(splicing_type == 'Within exon', ref_cds, '')), NA_character_),
alt_aa = ifelse(splicing_type == 'Within exon', mutagenesis::translate(ifelse(splicing_type == 'Within exon', alt_cds, '')), NA_character_),
mutation_type = case_when(
variant_type == 'Complex' ~ 'Complex',
splicing_type != 'Within exon' ~ 'Splicing',
variant_type == 'Indel' & (ref_length - alt_length) %% 3 != 0 ~ 'Frameshift',
ref_aa == alt_aa ~ 'Silent',
ref_aa != alt_aa & str_detect(alt_aa, '[*]') ~ 'Nonsense',
ref_aa != alt_aa ~ 'Missense'
)
) %>%
select(
-exon_5prime, -vcf_5prime, -vcf_3prime, -alt_5prime, -alt_3prime,
-ref_5prime_frame, -ref_3prime_frame, -alt_5prime_frame, -alt_3prime_frame,
-ref_up, -ref_dn, -alt_up, -alt_dn
)
}
get_frame <- function(x) {
frame <- c(3, 1, 2) # Multiple of 3 is frame 3 followed by frame 1 then 2
frame[(x %% 3) + 1L] # 1 is added to allow indexing of `frame`
}
|
## This function culculates the inverse of a matrix and if the inversed of this matrix has already ## been culculated previously returns the results from the cache (where has store the results
## from previous calculation).
## This function stores an inverse of matrix x in the variable 'xinv'.
## Then uses a set function to set a new matrix to the object has been created by the function ##'makeCacheMatrix'. Also uses the function 'get' which returns the matrix and the functions
## 'setInv' and 'getInv' to set and get the inversed matrix accordingly.
makeCacheMatrix <- function(x = matrix()) {
xinv <- NULL
set <- function(z) {
x <<- z
xinv <<- NULL
}
get <- function() x
setInv <- function(inv) xinv <<- inv
getInv <- function() xinv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## This function gets the inversed matrix from x and if is not null that means has been culculated ## previously, will return the result from cache. Otherwise will culculated the inverse and ##returns the result.
cacheSolve <- function(x, ...) {
n <- x$getInv()
if(!is.null(n)) {
message("get data from cache")
return(n)
}
data <- x$get()
n <- solve(data)
x$setInv(n)
n
} | /cachematrix.R | no_license | kate19/ProgrammingAssignment2 | R | false | false | 1,256 | r | ## This function culculates the inverse of a matrix and if the inversed of this matrix has already ## been culculated previously returns the results from the cache (where has store the results
## from previous calculation).
## This function stores an inverse of matrix x in the variable 'xinv'.
## Then uses a set function to set a new matrix to the object has been created by the function ##'makeCacheMatrix'. Also uses the function 'get' which returns the matrix and the functions
## 'setInv' and 'getInv' to set and get the inversed matrix accordingly.
makeCacheMatrix <- function(x = matrix()) {
xinv <- NULL
set <- function(z) {
x <<- z
xinv <<- NULL
}
get <- function() x
setInv <- function(inv) xinv <<- inv
getInv <- function() xinv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## This function gets the inversed matrix from x and if is not null that means has been culculated ## previously, will return the result from cache. Otherwise will culculated the inverse and ##returns the result.
cacheSolve <- function(x, ...) {
n <- x$getInv()
if(!is.null(n)) {
message("get data from cache")
return(n)
}
data <- x$get()
n <- solve(data)
x$setInv(n)
n
} |
#' Minimal TMB example, MLE estimation to find the mean and variance of a normally distributed variable
#'
#' @return Nothing
#' @importFrom stats nlminb
#' @importFrom TMB sdreport
#' @export
run_minimal <- function(){
data <- list(x = rivers)
parameters <- list(mu = 0, logSigma = 0)
obj <- MakeADFun(data, parameters, DLL = "minimal")
obj$env$tracemgc <- FALSE
fit <- nlminb(obj$par, obj$fn, obj$gr)
sdr <- sdreport(obj)
summary(sdr)
}
| /R/minimal.R | no_license | cgrandin/tmbcompilationmwe | R | false | false | 453 | r | #' Minimal TMB example, MLE estimation to find the mean and variance of a normally distributed variable
#'
#' @return Nothing
#' @importFrom stats nlminb
#' @importFrom TMB sdreport
#' @export
run_minimal <- function(){
data <- list(x = rivers)
parameters <- list(mu = 0, logSigma = 0)
obj <- MakeADFun(data, parameters, DLL = "minimal")
obj$env$tracemgc <- FALSE
fit <- nlminb(obj$par, obj$fn, obj$gr)
sdr <- sdreport(obj)
summary(sdr)
}
|
### likes of post_root get all the likes of nested replies.
## get_likes, get_reply_names, get_replies_and_type are the functions for get_one_page
get_likes <- function(posts, post_action, post_count) {
if (post_action & post_count) {
likes <- rvest::html_node(posts, ".post__actions") %>%
rvest::html_node(".post__count") %>% rvest::html_text()
} else if (post_action & !post_count) {
likes <- 0L
} else {
likes <- NA
}
}
get_reply_names <- function(posts, author_recipient) {
if (author_recipient) {
reply_names <- rvest::html_node(posts, ".author__recipient") %>% rvest::html_text()
} else {
reply_names <- NA
}
}
get_replies_and_type <- function(posts, class_post_root, class_post) {
if (class_post_root) {
# count how many "<article class="post" are in a post
matches <- gregexpr('<article class="post"', posts, fixed = TRUE)[[1]]
if (matches[1] > 0L) {
replies <- length(matches)
} else {
replies <- 0L
}
types <- "reply"
} else if (class_post) {
replies <- as.numeric(sub(".*\\s(\\d+)\\sreplies", "\\1",
rvest::html_text(
rvest::html_nodes(posts, ".post__stats")
)[2]
)
)
types <- "main_post"
} else {
replies <- 0L
types <- "nested_reply"
}
return(c(replies, types))
}
## get users' information
get_users_information <- function(user_profile_url) {
profile_page <- xml2::read_html(user_profile_url)
date_posts <- rvest::html_nodes(profile_page, ".masthead__actions__link") %>%
rvest::html_text(trim = TRUE)
join_date <- sub("Joined ", "", date_posts[1], fixed = TRUE) %>%
as.POSIXct(tryFormats = "%d-%m-%Y")
posts_num <- as.numeric(sub(" posts", "", date_posts[2], fixed = TRUE))
profile_text <- rvest::html_node(profile_page, ".my-profile__row__summary") %>%
rvest::html_text(trim = TRUE)
group_names <- rvest::html_nodes(profile_page, ".groups-row") %>%
rvest::html_text(trim = TRUE)
group_names <- unlist(strsplit(group_names, "\r\n")) %>%
stringr::str_trim(side = "both")
user_profile <- data.frame(join_date = join_date,
posts_num = posts_num,
profile_text = profile_text,
group_names = paste(group_names, collapse = ", "),
stringsAsFactors = FALSE)
return(user_profile)
}
## scrape data from the first page of one post
get_one_page <- function(url, get_user_info = TRUE) {
page <- xml2::read_html(url)
## get all the posts
posts <- rvest::html_nodes(page, ".post")
posts_id <- unlist(purrr::map(rvest::html_attrs(posts), 2))
## likes
post_action = grepl('post__actions', posts, fixed = TRUE)
post_count = grepl('post__count', posts, fixed = TRUE)
likes <- base::mapply(get_likes, posts, post_action, post_count) # get_likes is a function
## get user names
names <- rvest::html_text(rvest::html_nodes(page, ".author__name"))
## reply to names
author_recipient <- grepl('author__recipient', posts, fixed = TRUE)
reply_names <- base::mapply(get_reply_names, posts, author_recipient) # get_reply_names is a function
## date and time
post_time <- rvest::html_attr(rvest::html_nodes(page, "time"), "datetime")
post_time <- gsub("T|\\+00", " ", post_time) %>%
as.POSIXct(tryFormats = "%Y-%m-%d %H:%M")
## reply count and type
class_post_root <- grepl('<article class=\"post post__root\"', posts, fixed = TRUE)
class_post <- grepl('<article class=\"post mb-0"', posts, fixed = TRUE)
replies_and_type <- base::mapply(get_replies_and_type, posts, class_post_root, class_post)
## post title and text
post_title <- rvest::html_text(rvest::html_node(posts, ".post__title"), trim = TRUE)
text <- rvest::html_text(rvest::html_nodes(posts, ".post__content"), trim = TRUE)
### clean the text
text <- text %>%
gsub(pattern = "\n|\r|[\\^]|\\s+", replacement = " ") %>%
gsub(pattern = "(\\d+ likes)|(\\d+ replies)|Report|Reply", replacement = "") %>%
stringr::str_trim(side = "both")
## combine to a dataframe
df <- data.frame(posts_id = posts_id,
post_time = post_time,
types = as.character(replies_and_type[2, ]),
user_names = names,
reply_names = reply_names,
likes = as.numeric(likes),
replies = as.numeric(replies_and_type[1, ]),
text = text,
stringsAsFactors = FALSE,
check.names = F, fix.empty.names = F)
df$post_title <- post_title[1]
## get users' information
if (get_user_info) {
user_profile_urls <- rvest::html_nodes(page, ".author__name") %>% rvest::html_attr("href")
user_profile_urls <- paste0("https://patient.info", user_profile_urls)
users_profile <- lapply(user_profile_urls, get_users_information)
users_profile <- do.call("rbind", users_profile)
df <- cbind(df, users_profile)
return(df)
} else {
return(df)
}
}
## scrape the total page numbers
get_page_numbers <- function(x) {
p <- rvest::html_node(x, ".reply__control.reply-pagination") %>%
rvest::html_text()
m <- gregexpr("\\d+(?=/)", p, perl = TRUE)
as.numeric(regmatches(p, m)[[1]])
}
## scrape the post urls from the first page of one topic group
get_posts_urls_in_one_topic_page <- function(topic_url) {
post_urls <- xml2::read_html(topic_url) %>% rvest::html_nodes(".post__title") %>%
rvest::html_nodes("a") %>% rvest::html_attr("href")
return(post_urls)
}
## scrape all the post urls from a topic group
get_posts_urls <- function(topic_url, n1=1, n2=length(topic_urls)) {
topic_page <- xml2::read_html(topic_url)
page_numbers<- get_page_numbers(topic_page)
if (length(page_numbers) == 0L) {
post_urls <- rvest::html_nodes(topic_page, ".post__title") %>% rvest::html_nodes("a") %>%
rvest::html_attr("href")
post_urls <- lapply(post_urls, function (x) paste0("https://patient.info", x))
} else {
topic_urls <- sprintf("%s?page=%s", topic_url, page_numbers-1)
post_urls <- lapply(topic_urls[n1:n2], get_posts_urls_in_one_topic_page)
post_urls <- unlist(post_urls)
post_urls <- lapply(post_urls, function (x) paste0("https://patient.info", x))
}
return(post_urls)
}
## get all groups urls in one index page
get_group_urls_in_one_index_page <- function(index_url) {
group_urls <- xml2::read_html(index_url) %>% rvest::html_nodes(".row-0") %>%
rvest::html_nodes("a") %>% rvest::html_attr("href")
group_urls <- paste0("https://patient.info", group_urls)
return(group_urls)
}
## get all groups urls of one or more innitial letter
get_group_urls_by_initial_letter <- function(index = letters) {
index_list <- paste0("https://patient.info/forums/index-", index)
group_urls <- lapply(index_list, get_group_urls_in_one_index_page)
group_urls <- unlist(group_urls)
group_names <- sub(".*browse/(.+)-\\d+", "\\1", group_urls)
groups <- data.frame(group_names = group_names,
group_urls = group_urls,
stringsAsFactors = FALSE)
return(groups)
}
## get all groups urls in one category
get_group_urls_in_one_category <- function(cat_url) {
group_urls <- xml2::read_html(cat_url) %>% rvest::html_nodes(".title") %>%
rvest::html_nodes("a") %>% rvest::html_attr("href")
group_urls <- paste0("https://patient.info", group_urls)
cat_name <- sub(".*categories/(.+)-\\d+", "\\1", cat_url)
group_names <- sub(".*categories/(.+)-\\d+", "\\1", cat_url)
return(group_urls)
}
## get category urls
get_category_urls <- function() {
cat_urls <- xml2::read_html("https://patient.info/forums") %>%
rvest::html_nodes(".con-meds-lnk") %>%
rvest::html_attr("href")
cat_urls <- paste0("https://patient.info", cat_urls)
cat_names <- sub(".*categories/(.+)-\\d+", "\\1", cat_urls)
categories <- data.frame(cat_names = cat_names,
cat_urls = cat_urls,
stringsAsFactors = FALSE)
return(categories)
}
## get a user's reply information from one topic post
get_user_reply <- function(re_url) {
page <- xml2::read_html(re_url)
content_id <- sub(".*commentid=(\\d+)", "\\1", re_url)
content_id <- sprintf('[id="%s"]', content_id)
## get this user's content
this_user <- rvest::html_node(page, content_id)
## get topic post content
topic_post <- rvest::html_node(page, ".post__main")
## get user names
name <- rvest::html_text(rvest::html_nodes(this_user, ".author__name"))
## reply to name
reply_name <- rvest::html_text(rvest::html_nodes(this_user, ".author__recipient"))
## time
time <- rvest::html_attr(rvest::html_node(this_user, "time"), "datetime")
time <- gsub("T|\\+00", " ", time) %>%
as.POSIXct(tryFormats = "%Y-%m-%d %H:%M")
## topic post title
topic_title <- rvest::html_text(rvest::html_node(page, ".post__title"), trim = TRUE)
## topic post author
topic_author <- rvest::html_node(topic_post, ".author__name") %>% rvest::html_text()
## topic post time
topic_post_time <- rvest::html_attr(rvest::html_node(topic_post, "time"), "datetime")
topic_post_time <- gsub("T|\\+00", " ", topic_post_time) %>%
as.POSIXct(tryFormats = "%Y-%m-%d %H:%M")
## number of topic post likes and replies
topic_post_content <- rvest::html_node(topic_post, ".post__content") %>%
rvest::html_nodes("p") %>% rvest::html_text(trim = TRUE)
topic_post_likes <- sub("^(\\d+)\\slikes.*", "\\1", utils::tail(topic_post_content, n = 1))
topic_post_replies <- sub(".*\\s(\\d+)\\sreplies", "\\1", utils::tail(topic_post_content, n = 1))
## topic post text
topic_post_text <- paste(utils::head(topic_post_content, -1), sep = ' ', collapse = ' ')
## number of likes of the reply post
post_action = grepl('post__actions', this_user, fixed = TRUE)
post_count = grepl('post__count', this_user, fixed = TRUE)
likes <- get_likes(posts = this_user, post_action, post_count)
## number of replies of the reply post and the type of reply post
class_post_root <- grepl('<article class=\"post post__root\"', this_user, fixed = TRUE)
replies_and_type <- get_replies_and_type(posts = this_user,
class_post_root, class_post = FALSE)
## text
text <- this_user %>%
rvest::html_node(".post__content") %>%
rvest::html_nodes("p") %>% rvest::html_text(trim = TRUE)
text <- paste(text, sep = ' ', collapse = ' ')
df_user_reply <- data.frame(user = name,
reply_name = reply_name,
time = time,
likes = likes,
replies = replies_and_type[1],
text = text,
type = replies_and_type[2],
topic_title = topic_title,
topic_author = topic_author,
topic_post_time = topic_post_time,
topic_post_likes = topic_post_likes,
topic_post_replies = topic_post_replies,
topic_post_text = topic_post_text,
stringsAsFactors = FALSE)
return(df_user_reply)
}
## get a user's recent reply urls (re_urls)
get_re_urls <- function(user_profile_url) {
replies_list_url <- paste0(user_profile_url, "/replies")
#following_list_url <- paste0(user_profile_url, "/discussions/following")
page1 <- xml2::read_html(replies_list_url)
#page2 <- xml2::read_html(following_list_url)
re_urls <- rvest::html_nodes(page1, ".recent-list") %>%
rvest::html_nodes("a") %>% rvest::html_attr("href")
re_urls <- re_urls[grepl(".*discuss.*", re_urls)]
re_urls <- paste0("https://patient.info", re_urls)
#re_urls2 <- rvest::html_nodes(page2, "h3") %>%
# rvest::html_nodes("a") %>% rvest::html_attr("href")
return(re_urls)
}
## get a user's topic posts information
get_user_topic_post <- function(tp_url) {
page <- xml2::read_html(tp_url)
## post type
type <- "main_post"
## get topic post content
topic_post <- rvest::html_node(page, ".post__main")
## topic post title
topic_title <- rvest::html_text(rvest::html_node(page, ".post__title"), trim = TRUE)
## topic post author
topic_author <- rvest::html_node(topic_post, ".author__name") %>% rvest::html_text()
## topic post time
topic_post_time <- rvest::html_attr(rvest::html_node(topic_post, "time"), "datetime")
topic_post_time <- gsub("T|\\+00", " ", topic_post_time) %>%
as.POSIXct(tryFormats = "%Y-%m-%d %H:%M")
## number of topic post likes and replies
topic_post_content <- rvest::html_node(topic_post, ".post__content") %>%
rvest::html_nodes("p") %>% rvest::html_text(trim = TRUE)
topic_post_likes <- sub("^(\\d+)\\slikes.*", "\\1", utils::tail(topic_post_content, n = 1))
topic_post_replies <- sub(".*\\s(\\d+)\\sreplies", "\\1", utils::tail(topic_post_content, n = 1))
## topic post text
topic_post_text <- paste(utils::head(topic_post_content, -1), sep = ' ', collapse = ' ')
df_user_tpost <- data.frame(user = topic_author,
reply_name = NA,
time = NA,
likes = NA,
replies = NA,
text = NA,
type = type,
topic_title = topic_title,
topic_author = topic_author,
topic_post_time = topic_post_time,
topic_post_likes = topic_post_likes,
topic_post_replies = topic_post_replies,
topic_post_text = topic_post_text,
stringsAsFactors = FALSE)
return(df_user_tpost)
}
## get a user's recent topic post urls (re_urls)
get_tp_urls <- function(user_profile_url) {
tp_list_url <- paste0(user_profile_url, "/discussions/startedbyme")
page <- xml2::read_html(tp_list_url)
tp_urls <- rvest::html_nodes(page, "h3") %>%
rvest::html_node("a") %>% rvest::html_attr("href")
tp_urls <- paste0("https://patient.info", tp_urls)
return(tp_urls)
}
## function to count words matches in a dictionary
word_match <- function(x, dict) {
if (is.character(x)) {
## this removes URLs
x <- gsub("https?://\\S+|@\\S+", "", x)
x <- tokenizers::tokenize_words(
x, lowercase = TRUE, strip_punct = TRUE, strip_numeric = FALSE
)
}
word_count <- function(token) {
total_words_count <- length(token)
med_words_count <- sum(dict$value[match(token, dict$word)], na.rm = TRUE)
med_words_ratio <- med_words_count/total_words_count
data.frame(total_words_count = total_words_count,
med_words_count = med_words_count,
med_words_ratio = med_words_ratio,
stringsAsFactors = FALSE)
}
count <- lapply(x, word_count)
count <- do.call("rbind", count)
}
| /R/hidden_functions.R | permissive | mkearney/patientforum | R | false | false | 15,121 | r |
### likes of post_root get all the likes of nested replies.
## get_likes, get_reply_names, get_replies_and_type are the functions for get_one_page
get_likes <- function(posts, post_action, post_count) {
if (post_action & post_count) {
likes <- rvest::html_node(posts, ".post__actions") %>%
rvest::html_node(".post__count") %>% rvest::html_text()
} else if (post_action & !post_count) {
likes <- 0L
} else {
likes <- NA
}
}
get_reply_names <- function(posts, author_recipient) {
if (author_recipient) {
reply_names <- rvest::html_node(posts, ".author__recipient") %>% rvest::html_text()
} else {
reply_names <- NA
}
}
get_replies_and_type <- function(posts, class_post_root, class_post) {
if (class_post_root) {
# count how many "<article class="post" are in a post
matches <- gregexpr('<article class="post"', posts, fixed = TRUE)[[1]]
if (matches[1] > 0L) {
replies <- length(matches)
} else {
replies <- 0L
}
types <- "reply"
} else if (class_post) {
replies <- as.numeric(sub(".*\\s(\\d+)\\sreplies", "\\1",
rvest::html_text(
rvest::html_nodes(posts, ".post__stats")
)[2]
)
)
types <- "main_post"
} else {
replies <- 0L
types <- "nested_reply"
}
return(c(replies, types))
}
## get users' information
get_users_information <- function(user_profile_url) {
profile_page <- xml2::read_html(user_profile_url)
date_posts <- rvest::html_nodes(profile_page, ".masthead__actions__link") %>%
rvest::html_text(trim = TRUE)
join_date <- sub("Joined ", "", date_posts[1], fixed = TRUE) %>%
as.POSIXct(tryFormats = "%d-%m-%Y")
posts_num <- as.numeric(sub(" posts", "", date_posts[2], fixed = TRUE))
profile_text <- rvest::html_node(profile_page, ".my-profile__row__summary") %>%
rvest::html_text(trim = TRUE)
group_names <- rvest::html_nodes(profile_page, ".groups-row") %>%
rvest::html_text(trim = TRUE)
group_names <- unlist(strsplit(group_names, "\r\n")) %>%
stringr::str_trim(side = "both")
user_profile <- data.frame(join_date = join_date,
posts_num = posts_num,
profile_text = profile_text,
group_names = paste(group_names, collapse = ", "),
stringsAsFactors = FALSE)
return(user_profile)
}
## scrape data from the first page of one post
get_one_page <- function(url, get_user_info = TRUE) {
page <- xml2::read_html(url)
## get all the posts
posts <- rvest::html_nodes(page, ".post")
posts_id <- unlist(purrr::map(rvest::html_attrs(posts), 2))
## likes
post_action = grepl('post__actions', posts, fixed = TRUE)
post_count = grepl('post__count', posts, fixed = TRUE)
likes <- base::mapply(get_likes, posts, post_action, post_count) # get_likes is a function
## get user names
names <- rvest::html_text(rvest::html_nodes(page, ".author__name"))
## reply to names
author_recipient <- grepl('author__recipient', posts, fixed = TRUE)
reply_names <- base::mapply(get_reply_names, posts, author_recipient) # get_reply_names is a function
## date and time
post_time <- rvest::html_attr(rvest::html_nodes(page, "time"), "datetime")
post_time <- gsub("T|\\+00", " ", post_time) %>%
as.POSIXct(tryFormats = "%Y-%m-%d %H:%M")
## reply count and type
class_post_root <- grepl('<article class=\"post post__root\"', posts, fixed = TRUE)
class_post <- grepl('<article class=\"post mb-0"', posts, fixed = TRUE)
replies_and_type <- base::mapply(get_replies_and_type, posts, class_post_root, class_post)
## post title and text
post_title <- rvest::html_text(rvest::html_node(posts, ".post__title"), trim = TRUE)
text <- rvest::html_text(rvest::html_nodes(posts, ".post__content"), trim = TRUE)
### clean the text
text <- text %>%
gsub(pattern = "\n|\r|[\\^]|\\s+", replacement = " ") %>%
gsub(pattern = "(\\d+ likes)|(\\d+ replies)|Report|Reply", replacement = "") %>%
stringr::str_trim(side = "both")
## combine to a dataframe
df <- data.frame(posts_id = posts_id,
post_time = post_time,
types = as.character(replies_and_type[2, ]),
user_names = names,
reply_names = reply_names,
likes = as.numeric(likes),
replies = as.numeric(replies_and_type[1, ]),
text = text,
stringsAsFactors = FALSE,
check.names = F, fix.empty.names = F)
df$post_title <- post_title[1]
## get users' information
if (get_user_info) {
user_profile_urls <- rvest::html_nodes(page, ".author__name") %>% rvest::html_attr("href")
user_profile_urls <- paste0("https://patient.info", user_profile_urls)
users_profile <- lapply(user_profile_urls, get_users_information)
users_profile <- do.call("rbind", users_profile)
df <- cbind(df, users_profile)
return(df)
} else {
return(df)
}
}
## scrape the total page numbers
get_page_numbers <- function(x) {
p <- rvest::html_node(x, ".reply__control.reply-pagination") %>%
rvest::html_text()
m <- gregexpr("\\d+(?=/)", p, perl = TRUE)
as.numeric(regmatches(p, m)[[1]])
}
## scrape the post urls from the first page of one topic group
get_posts_urls_in_one_topic_page <- function(topic_url) {
post_urls <- xml2::read_html(topic_url) %>% rvest::html_nodes(".post__title") %>%
rvest::html_nodes("a") %>% rvest::html_attr("href")
return(post_urls)
}
## scrape all the post urls from a topic group
get_posts_urls <- function(topic_url, n1=1, n2=length(topic_urls)) {
topic_page <- xml2::read_html(topic_url)
page_numbers<- get_page_numbers(topic_page)
if (length(page_numbers) == 0L) {
post_urls <- rvest::html_nodes(topic_page, ".post__title") %>% rvest::html_nodes("a") %>%
rvest::html_attr("href")
post_urls <- lapply(post_urls, function (x) paste0("https://patient.info", x))
} else {
topic_urls <- sprintf("%s?page=%s", topic_url, page_numbers-1)
post_urls <- lapply(topic_urls[n1:n2], get_posts_urls_in_one_topic_page)
post_urls <- unlist(post_urls)
post_urls <- lapply(post_urls, function (x) paste0("https://patient.info", x))
}
return(post_urls)
}
## get all groups urls in one index page
get_group_urls_in_one_index_page <- function(index_url) {
group_urls <- xml2::read_html(index_url) %>% rvest::html_nodes(".row-0") %>%
rvest::html_nodes("a") %>% rvest::html_attr("href")
group_urls <- paste0("https://patient.info", group_urls)
return(group_urls)
}
## get all groups urls of one or more innitial letter
get_group_urls_by_initial_letter <- function(index = letters) {
index_list <- paste0("https://patient.info/forums/index-", index)
group_urls <- lapply(index_list, get_group_urls_in_one_index_page)
group_urls <- unlist(group_urls)
group_names <- sub(".*browse/(.+)-\\d+", "\\1", group_urls)
groups <- data.frame(group_names = group_names,
group_urls = group_urls,
stringsAsFactors = FALSE)
return(groups)
}
## get all groups urls in one category
get_group_urls_in_one_category <- function(cat_url) {
group_urls <- xml2::read_html(cat_url) %>% rvest::html_nodes(".title") %>%
rvest::html_nodes("a") %>% rvest::html_attr("href")
group_urls <- paste0("https://patient.info", group_urls)
cat_name <- sub(".*categories/(.+)-\\d+", "\\1", cat_url)
group_names <- sub(".*categories/(.+)-\\d+", "\\1", cat_url)
return(group_urls)
}
## get category urls
get_category_urls <- function() {
cat_urls <- xml2::read_html("https://patient.info/forums") %>%
rvest::html_nodes(".con-meds-lnk") %>%
rvest::html_attr("href")
cat_urls <- paste0("https://patient.info", cat_urls)
cat_names <- sub(".*categories/(.+)-\\d+", "\\1", cat_urls)
categories <- data.frame(cat_names = cat_names,
cat_urls = cat_urls,
stringsAsFactors = FALSE)
return(categories)
}
## get a user's reply information from one topic post
get_user_reply <- function(re_url) {
page <- xml2::read_html(re_url)
content_id <- sub(".*commentid=(\\d+)", "\\1", re_url)
content_id <- sprintf('[id="%s"]', content_id)
## get this user's content
this_user <- rvest::html_node(page, content_id)
## get topic post content
topic_post <- rvest::html_node(page, ".post__main")
## get user names
name <- rvest::html_text(rvest::html_nodes(this_user, ".author__name"))
## reply to name
reply_name <- rvest::html_text(rvest::html_nodes(this_user, ".author__recipient"))
## time
time <- rvest::html_attr(rvest::html_node(this_user, "time"), "datetime")
time <- gsub("T|\\+00", " ", time) %>%
as.POSIXct(tryFormats = "%Y-%m-%d %H:%M")
## topic post title
topic_title <- rvest::html_text(rvest::html_node(page, ".post__title"), trim = TRUE)
## topic post author
topic_author <- rvest::html_node(topic_post, ".author__name") %>% rvest::html_text()
## topic post time
topic_post_time <- rvest::html_attr(rvest::html_node(topic_post, "time"), "datetime")
topic_post_time <- gsub("T|\\+00", " ", topic_post_time) %>%
as.POSIXct(tryFormats = "%Y-%m-%d %H:%M")
## number of topic post likes and replies
topic_post_content <- rvest::html_node(topic_post, ".post__content") %>%
rvest::html_nodes("p") %>% rvest::html_text(trim = TRUE)
topic_post_likes <- sub("^(\\d+)\\slikes.*", "\\1", utils::tail(topic_post_content, n = 1))
topic_post_replies <- sub(".*\\s(\\d+)\\sreplies", "\\1", utils::tail(topic_post_content, n = 1))
## topic post text
topic_post_text <- paste(utils::head(topic_post_content, -1), sep = ' ', collapse = ' ')
## number of likes of the reply post
post_action = grepl('post__actions', this_user, fixed = TRUE)
post_count = grepl('post__count', this_user, fixed = TRUE)
likes <- get_likes(posts = this_user, post_action, post_count)
## number of replies of the reply post and the type of reply post
class_post_root <- grepl('<article class=\"post post__root\"', this_user, fixed = TRUE)
replies_and_type <- get_replies_and_type(posts = this_user,
class_post_root, class_post = FALSE)
## text
text <- this_user %>%
rvest::html_node(".post__content") %>%
rvest::html_nodes("p") %>% rvest::html_text(trim = TRUE)
text <- paste(text, sep = ' ', collapse = ' ')
df_user_reply <- data.frame(user = name,
reply_name = reply_name,
time = time,
likes = likes,
replies = replies_and_type[1],
text = text,
type = replies_and_type[2],
topic_title = topic_title,
topic_author = topic_author,
topic_post_time = topic_post_time,
topic_post_likes = topic_post_likes,
topic_post_replies = topic_post_replies,
topic_post_text = topic_post_text,
stringsAsFactors = FALSE)
return(df_user_reply)
}
## get a user's recent reply urls (re_urls)
get_re_urls <- function(user_profile_url) {
replies_list_url <- paste0(user_profile_url, "/replies")
#following_list_url <- paste0(user_profile_url, "/discussions/following")
page1 <- xml2::read_html(replies_list_url)
#page2 <- xml2::read_html(following_list_url)
re_urls <- rvest::html_nodes(page1, ".recent-list") %>%
rvest::html_nodes("a") %>% rvest::html_attr("href")
re_urls <- re_urls[grepl(".*discuss.*", re_urls)]
re_urls <- paste0("https://patient.info", re_urls)
#re_urls2 <- rvest::html_nodes(page2, "h3") %>%
# rvest::html_nodes("a") %>% rvest::html_attr("href")
return(re_urls)
}
## get a user's topic posts information
get_user_topic_post <- function(tp_url) {
page <- xml2::read_html(tp_url)
## post type
type <- "main_post"
## get topic post content
topic_post <- rvest::html_node(page, ".post__main")
## topic post title
topic_title <- rvest::html_text(rvest::html_node(page, ".post__title"), trim = TRUE)
## topic post author
topic_author <- rvest::html_node(topic_post, ".author__name") %>% rvest::html_text()
## topic post time
topic_post_time <- rvest::html_attr(rvest::html_node(topic_post, "time"), "datetime")
topic_post_time <- gsub("T|\\+00", " ", topic_post_time) %>%
as.POSIXct(tryFormats = "%Y-%m-%d %H:%M")
## number of topic post likes and replies
topic_post_content <- rvest::html_node(topic_post, ".post__content") %>%
rvest::html_nodes("p") %>% rvest::html_text(trim = TRUE)
topic_post_likes <- sub("^(\\d+)\\slikes.*", "\\1", utils::tail(topic_post_content, n = 1))
topic_post_replies <- sub(".*\\s(\\d+)\\sreplies", "\\1", utils::tail(topic_post_content, n = 1))
## topic post text
topic_post_text <- paste(utils::head(topic_post_content, -1), sep = ' ', collapse = ' ')
df_user_tpost <- data.frame(user = topic_author,
reply_name = NA,
time = NA,
likes = NA,
replies = NA,
text = NA,
type = type,
topic_title = topic_title,
topic_author = topic_author,
topic_post_time = topic_post_time,
topic_post_likes = topic_post_likes,
topic_post_replies = topic_post_replies,
topic_post_text = topic_post_text,
stringsAsFactors = FALSE)
return(df_user_tpost)
}
## get a user's recent topic post urls (re_urls)
get_tp_urls <- function(user_profile_url) {
tp_list_url <- paste0(user_profile_url, "/discussions/startedbyme")
page <- xml2::read_html(tp_list_url)
tp_urls <- rvest::html_nodes(page, "h3") %>%
rvest::html_node("a") %>% rvest::html_attr("href")
tp_urls <- paste0("https://patient.info", tp_urls)
return(tp_urls)
}
## function to count words matches in a dictionary
word_match <- function(x, dict) {
if (is.character(x)) {
## this removes URLs
x <- gsub("https?://\\S+|@\\S+", "", x)
x <- tokenizers::tokenize_words(
x, lowercase = TRUE, strip_punct = TRUE, strip_numeric = FALSE
)
}
word_count <- function(token) {
total_words_count <- length(token)
med_words_count <- sum(dict$value[match(token, dict$word)], na.rm = TRUE)
med_words_ratio <- med_words_count/total_words_count
data.frame(total_words_count = total_words_count,
med_words_count = med_words_count,
med_words_ratio = med_words_ratio,
stringsAsFactors = FALSE)
}
count <- lapply(x, word_count)
count <- do.call("rbind", count)
}
|
library(gotmtools)
library(LakeEnsemblR)
library(ggplot2)
library(LakeEnsemblR)
library(ggpubr)
library(dplyr)
library(rLakeAnalyzer)
library(reshape)
library(reshape2)
library(RColorBrewer)
library(lubridate)
library(Metrics)
library(plotrix)
library(here)
setwd(paste0(here()))
ncdf <- "./LER_validation/output/ensemble_output.nc"
out <- load_var(ncdf = ncdf, var = "temp")
df <- melt(out, id.vars = 1)
colnames(df)[4] <- "model"
df$yday <- yday(df$datetime)
df$year <- year(df$datetime)
df <- filter(df, variable == "wtr_1")
wideform <- dcast(df, datetime~model, value.var = "value")
wideform <- filter(wideform, is.na(Obs) == FALSE & is.na(GLM) == FALSE &
is.na(GOTM) == FALSE & is.na(FLake) == FALSE &
is.na(Simstrat) == FALSE & is.na(MyLake) == FALSE)
wideformmean <- (wideform$FLake + wideform$GLM + wideform$GOTM + wideform$MyLake + wideform$Simstrat)/5
wideform$mean <- wideformmean
write.csv(wideform ,"./LER_validation/vali_calcs/surface_1m_wideform_vali.csv", row.names = FALSE)
df <- melt(out, id.vars = 1)
colnames(df)[4] <- "model"
df$yday <- yday(df$datetime)
df$year <- year(df$datetime)
df <- filter(df, variable == "wtr_30", model != "FLake")
wideform <- dcast(df, datetime~model, value.var = "value")
wideform <- filter(wideform, is.na(Obs) == FALSE & is.na(GLM) == FALSE &
is.na(GOTM) == FALSE &
is.na(Simstrat) == FALSE & is.na(MyLake) == FALSE)
wideformmean <- (wideform$GLM + wideform$GOTM + wideform$MyLake + wideform$Simstrat)/4
wideform$mean <- wideformmean
write.csv(wideform ,"./LER_validation/vali_calcs/bottom_33m_wideform_vali.csv", row.names = FALSE)
| /scripts/stepthrough/step_3_validation/s3.2_surface_bottom_temp_vali.R | no_license | jacob8776/sunapee_LER_projections | R | false | false | 1,692 | r | library(gotmtools)
library(LakeEnsemblR)
library(ggplot2)
library(LakeEnsemblR)
library(ggpubr)
library(dplyr)
library(rLakeAnalyzer)
library(reshape)
library(reshape2)
library(RColorBrewer)
library(lubridate)
library(Metrics)
library(plotrix)
library(here)
setwd(paste0(here()))
ncdf <- "./LER_validation/output/ensemble_output.nc"
out <- load_var(ncdf = ncdf, var = "temp")
df <- melt(out, id.vars = 1)
colnames(df)[4] <- "model"
df$yday <- yday(df$datetime)
df$year <- year(df$datetime)
df <- filter(df, variable == "wtr_1")
wideform <- dcast(df, datetime~model, value.var = "value")
wideform <- filter(wideform, is.na(Obs) == FALSE & is.na(GLM) == FALSE &
is.na(GOTM) == FALSE & is.na(FLake) == FALSE &
is.na(Simstrat) == FALSE & is.na(MyLake) == FALSE)
wideformmean <- (wideform$FLake + wideform$GLM + wideform$GOTM + wideform$MyLake + wideform$Simstrat)/5
wideform$mean <- wideformmean
write.csv(wideform ,"./LER_validation/vali_calcs/surface_1m_wideform_vali.csv", row.names = FALSE)
df <- melt(out, id.vars = 1)
colnames(df)[4] <- "model"
df$yday <- yday(df$datetime)
df$year <- year(df$datetime)
df <- filter(df, variable == "wtr_30", model != "FLake")
wideform <- dcast(df, datetime~model, value.var = "value")
wideform <- filter(wideform, is.na(Obs) == FALSE & is.na(GLM) == FALSE &
is.na(GOTM) == FALSE &
is.na(Simstrat) == FALSE & is.na(MyLake) == FALSE)
wideformmean <- (wideform$GLM + wideform$GOTM + wideform$MyLake + wideform$Simstrat)/4
wideform$mean <- wideformmean
write.csv(wideform ,"./LER_validation/vali_calcs/bottom_33m_wideform_vali.csv", row.names = FALSE)
|
#' CSWTransaction
#'
#' @docType class
#' @export
#' @keywords OGC CSW Transaction
#' @return Object of \code{\link{R6Class}} for modelling a CSW Transaction request
#' @format \code{\link{R6Class}} object.
#'
#' @section Methods:
#' \describe{
#' \item{\code{new(url, version, id)}}{
#' This method is used to instantiate a CSWTransaction object
#' }
#' }
#'
#' @note Class used internally by \pkg{ows4R} to trigger a CSW Transaction request
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
CSWTransaction <- R6Class("CSWTransaction",
lock_objects = FALSE,
inherit = OWSRequest,
private = list(
xmlElement = "Transaction",
xmlNamespace = c(csw = "http://www.opengis.net/cat/csw")
),
public = list(
initialize = function(op, url, serviceVersion, type, user = NULL, pwd = NULL,
record = NULL, recordProperty = NULL, constraint = NULL,
logger = NULL, ...) {
nsVersion <- ifelse(serviceVersion=="3.0.0", "3.0", serviceVersion)
private$xmlNamespace = paste(private$xmlNamespace, nsVersion, sep="/")
names(private$xmlNamespace) <- ifelse(serviceVersion=="3.0.0", "csw30", "csw")
self[[type]] = list(
record = record,
recordProperty = recordProperty,
constraint = constraint
)
super$initialize(op, "POST", url, request = private$xmlElement,
user = user, pwd = pwd,
contentType = "text/xml", mimeType = "text/xml",
logger = logger, ...)
self$wrap <- TRUE
self$attrs <- list(service = "CSW", version = serviceVersion)
self$execute()
}
)
) | /R/CSWTransaction.R | no_license | SIBeckers/ows4R | R | false | false | 1,693 | r | #' CSWTransaction
#'
#' @docType class
#' @export
#' @keywords OGC CSW Transaction
#' @return Object of \code{\link{R6Class}} for modelling a CSW Transaction request
#' @format \code{\link{R6Class}} object.
#'
#' @section Methods:
#' \describe{
#' \item{\code{new(url, version, id)}}{
#' This method is used to instantiate a CSWTransaction object
#' }
#' }
#'
#' @note Class used internally by \pkg{ows4R} to trigger a CSW Transaction request
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
CSWTransaction <- R6Class("CSWTransaction",
lock_objects = FALSE,
inherit = OWSRequest,
private = list(
xmlElement = "Transaction",
xmlNamespace = c(csw = "http://www.opengis.net/cat/csw")
),
public = list(
initialize = function(op, url, serviceVersion, type, user = NULL, pwd = NULL,
record = NULL, recordProperty = NULL, constraint = NULL,
logger = NULL, ...) {
nsVersion <- ifelse(serviceVersion=="3.0.0", "3.0", serviceVersion)
private$xmlNamespace = paste(private$xmlNamespace, nsVersion, sep="/")
names(private$xmlNamespace) <- ifelse(serviceVersion=="3.0.0", "csw30", "csw")
self[[type]] = list(
record = record,
recordProperty = recordProperty,
constraint = constraint
)
super$initialize(op, "POST", url, request = private$xmlElement,
user = user, pwd = pwd,
contentType = "text/xml", mimeType = "text/xml",
logger = logger, ...)
self$wrap <- TRUE
self$attrs <- list(service = "CSW", version = serviceVersion)
self$execute()
}
)
) |
# Download the file
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if(!file.exists("data")) {
dir.create("data")
}
download.file(fileUrl, destfile = "./data/data.zip", method = "curl")
dateDownloaded <- date()
print(dateDownloaded)
# Unzip the file
unzip("./data/data.zip",exdir="./data")
base_dir <- "./data/UCI HAR Dataset"
mynames <- read.table(file.path(base_dir,"features.txt"))[[2]]
## step 1 Merges the training and the test sets to create one data set
# load x test data into a data.frame (mytable1).
# set column names to the ones in the features.txt (prev. loaded in mynames)
# add y (activity) and subject test data into the data.frame
mytable1 <- read.table(file.path(base_dir,"test/X_test.txt"))
names(mytable1)<-mynames
mytable1$activity <- read.table(file.path(base_dir,"test/y_test.txt"))[[1]]
mytable1$subject <- read.table(file.path(base_dir,"test/subject_test.txt"))[[1]]
# load train data in a new data.frame (mytable2)
# using the same steps than test
mytable2 <- read.table(file.path(base_dir,"train/X_train.txt"))
names(mytable2)<-mynames
mytable2$activity <- read.table(file.path(base_dir,"train/y_train.txt"))[[1]]
mytable2$subject <- read.table(file.path(base_dir,"train/subject_train.txt"))[[1]]
# merge both tables in a new data.frame called mytable
step1<-rbind(mytable1,mytable2)
# step 2 Extracts only the measurements on the mean and standard deviation
# for each measurement.
# note: also activity and subject as they are required in further steps
step2<-step1[,grep("activity|subject|mean\\(\\)|std\\(\\)",names(step1))]
# step 3 Uses descriptive activity names to name the activities in the data set
activity<-read.table(file.path(base_dir,"activity_labels.txt"))
names(activity)<-c("id","activityDescription")
step3<-merge(step2,activity,by.x = "activity", by.y = "id")
# note: I decided to remove the activity column as now is redundant with
# activityDescription
step3$activity<-NULL
# step 4 Appropriately labels the data set with descriptive variable names.
# note: as part of the step 1 names in features.txt where added, that
# already are descriptive nothing to do in this step
step4<-step3
# step 5 From the data set in step 4, creates a second, independent tidy data
# set with the average of each variable for each activity and each subject.
library(dplyr)
step5<-summarize_at(group_by(step4,subject,activityDescription),
vars(1:(length(step4)-2)),
list(mean))
# final save datasets for further use
write.table(step4,"tidy.txt",row.names = FALSE)
write.table(step5,"summary.txt",row.names = FALSE)
| /run_analysis.R | no_license | jorditejedor/gettingandcleaningdata | R | false | false | 2,659 | r | # Download the file
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if(!file.exists("data")) {
dir.create("data")
}
download.file(fileUrl, destfile = "./data/data.zip", method = "curl")
dateDownloaded <- date()
print(dateDownloaded)
# Unzip the file
unzip("./data/data.zip",exdir="./data")
base_dir <- "./data/UCI HAR Dataset"
mynames <- read.table(file.path(base_dir,"features.txt"))[[2]]
## step 1 Merges the training and the test sets to create one data set
# load x test data into a data.frame (mytable1).
# set column names to the ones in the features.txt (prev. loaded in mynames)
# add y (activity) and subject test data into the data.frame
mytable1 <- read.table(file.path(base_dir,"test/X_test.txt"))
names(mytable1)<-mynames
mytable1$activity <- read.table(file.path(base_dir,"test/y_test.txt"))[[1]]
mytable1$subject <- read.table(file.path(base_dir,"test/subject_test.txt"))[[1]]
# load train data in a new data.frame (mytable2)
# using the same steps than test
mytable2 <- read.table(file.path(base_dir,"train/X_train.txt"))
names(mytable2)<-mynames
mytable2$activity <- read.table(file.path(base_dir,"train/y_train.txt"))[[1]]
mytable2$subject <- read.table(file.path(base_dir,"train/subject_train.txt"))[[1]]
# merge both tables in a new data.frame called mytable
step1<-rbind(mytable1,mytable2)
# step 2 Extracts only the measurements on the mean and standard deviation
# for each measurement.
# note: also activity and subject as they are required in further steps
step2<-step1[,grep("activity|subject|mean\\(\\)|std\\(\\)",names(step1))]
# step 3 Uses descriptive activity names to name the activities in the data set
activity<-read.table(file.path(base_dir,"activity_labels.txt"))
names(activity)<-c("id","activityDescription")
step3<-merge(step2,activity,by.x = "activity", by.y = "id")
# note: I decided to remove the activity column as now is redundant with
# activityDescription
step3$activity<-NULL
# step 4 Appropriately labels the data set with descriptive variable names.
# note: as part of the step 1 names in features.txt where added, that
# already are descriptive nothing to do in this step
step4<-step3
# step 5 From the data set in step 4, creates a second, independent tidy data
# set with the average of each variable for each activity and each subject.
library(dplyr)
step5<-summarize_at(group_by(step4,subject,activityDescription),
vars(1:(length(step4)-2)),
list(mean))
# final save datasets for further use
write.table(step4,"tidy.txt",row.names = FALSE)
write.table(step5,"summary.txt",row.names = FALSE)
|
bbox_to_matrix <- function(bbox){
bb_lst <- lapply(strsplit(bbox, ",\\s?|\\s+"), as.numeric)
do.call(rbind, bb_lst)
}
#' Convert bbox to magick-geometry
#'
#' @param x character vector with bounding boxes (top-left coordinates). Expects four integers separated by comma or space
#'
#' @return character vector of magick-compliant geometry string representing bounding box area in the format "width x height +x_off +y_off" (https://www.imagemagick.org/Magick++/Geometry.html)
#' @seealso [magick::geometry] for converting integers (four individual columns) to geometry
#' @export
#'
#'
#' @examples
#' # area from c(0,0) to c(100,200) counting from top left
#' bbox_to_geometry("0 0 100 200")
#' bbox_to_geometry(c("0 0 100 200", "100,100,200, 200"))
#'
bbox_to_geometry <- function(x){
m <- bbox_to_matrix(x)
paste0(m[,3] - m[,1], "x", m[,4] - m[,2], "+", m[,1], "+", m[,2])
}
#' Functions for horizontal and vertical slicing of bbox columns
#' These functions are not vectorized and should be used to compute
#' @param bbox string or character vector of bbox'es
#' @param x,y coordinates to sclice the bouding box at
#'
#' @return list of bbox'es appliccable for particular split
#' @export
#'
#' @examples
#' bbox_slice_x("0 0 100 200", 80)
#' bbox_slice_x(c("0 0 100 200", "100 100 200 200"), c(80, 150))
#'
#' @rdname bbox_slice
#'
bbox_slice_x <- function(bbox, x){
m <- bbox_to_matrix(bbox)
list(left=bbox_validate(paste(m[,1], m[,2], x-1, m[,4])),
right=bbox_validate(paste(x, m[,2], m[,3], m[,4])))
}
#' @export
#'
#' @examples
#' bbox_slice_y("0 0 100 200", 120)
#' bbox_slice_y(c("0 0 100 200", "100 100 200 200"), c(120,150))
#' @rdname bbox_slice
bbox_slice_y <- function(bbox, y){
m <- bbox_to_matrix(bbox)
list(top=bbox_validate(paste(m[,1], m[,2], m[,3], y-1)),
bottom=bbox_validate(paste(m[,1], y, m[,3], m[,4])))
}
#' @export
#'
#' @examples
#' bbox_slice_xy("0 0 100 200", 50, 100)
#' bbox_slice_xy(c("0 0 100 200", "100,100, 200, 200"), c(50, 150), c(100, 150))
#' @rdname bbox_slice
bbox_slice_xy <- function(bbox, x, y){
m <- bbox_to_matrix(bbox)
list(top_left =bbox_validate(paste(m[,1], m[,2], x-1, y-1)),
top_right =bbox_validate(paste(x, m[,2], m[,3], y-1)),
bottom_left=bbox_validate(paste(m[,1], y, x-1, m[,4])),
bottom_right=bbox_validate(paste(x, y, m[,3], m[,4])))
}
#' Functions for aggregating bbox objects
#' These functions can perform union and intersection operations on bbox objects
#' @param bbox character vector of bounding boxes to perform operation on
#' @param fx1,fy1,fx2,fy2 functions to use for aggregating x1, y1, x2, y2, respectively. Defaults to `min` for x1,y1 and `max` for x2,y2
#' @param bbox2 optional character vector of bounding boxes to element-wise aggregation with `bbox`.
#' If specified, needs to be length 1 or equal in length to `bbox`.
#'
#' @return bbox or missing value, if result is invalid bounding box
#' @export
#'
#' @examples
#' bbox_union(c("5 1 7 3", "2 4 6 8"))
#' bbox_union2(c("5 1 7 3", "2 4 6 8"), c("1 1 1 1"))
#' bbox_intersect(c("5 1 7 3", "2 4 6 8")) # should return NA
#' bbox_intersect("5 1 7 3", "2 2 6 8")
#' @rdname bbox_aggregate
#'
bbox_union <- function(bbox, fx1=min, fy1=min, fx2=max, fy2=max){
m <- bbox_to_matrix(bbox)
bbox_validate(paste(fx1(m[,1]), fy1(m[,2]), fx2(m[,3]), fy2(m[,4])))
}
#' @rdname bbox_aggregate
#' @export
bbox_union2 <- function(bbox, bbox2){
if(length(bbox)==1L && !is.null(bbox2))
bbox <- rep.int(bbox, times=length(bbox2))
if(length(bbox2)==1L)
bbox2 <- rep.int(bbox2, times=length(bbox))
stopifnot(length(bbox)==length(bbox2))
m <- bbox_to_matrix(bbox)
m2 <- bbox_to_matrix(bbox2)
return(bbox_validate(paste(pmin(m[,1], m2[,1]), pmin(m[,2], m2[,2]),
pmax(m[,3], m2[,3]), pmax(m[,4], m2[,4]))))
}
#' @rdname bbox_aggregate
#' @export
bbox_intersect <- function(bbox, bbox2=NULL){
if(length(bbox)==1L && !is.null(bbox2))
bbox <- rep.int(bbox, times=length(bbox2))
if(length(bbox2)==1L)
bbox2 <- rep.int(bbox2, times=length(bbox))
m <- bbox_to_matrix(bbox)
if(!is.null(bbox2)){
m2 <- bbox_to_matrix(bbox2)
return(bbox_validate(paste(pmax(m[,1], m2[,1]), pmax(m[,2], m2[,2]),
pmin(m[,3], m2[,3]), pmin(m[,4], m2[,4]))))
}
bbox_validate(paste(max(m[,1]), max(m[,2]), min(m[,3]), min(m[,4])))
}
#' Predicate functions for matching bboxes
#' These functions can check whether intersection operation on bbox objects returns non-NA result
#' @param bbox character vector of bounding boxes to perform operation on
#' @param bbox2 optional character vector of bounding boxes to element-wise aggregation with `bbox`.
#' If specified, needs to be length 1 or equal in length to `bbox`.
#'
#' @return logical value of whether or not the pair of bboxes intersect
#' @export
#'
#' @examples
#' bbox_intersects(c("5 1 7 3", "2 4 6 8")) # should return FALSE
#' bbox_intersects("5 1 7 3", "2 2 6 8") # should return TRUE
#' @rdname bbox_predicates
#'
bbox_intersects <- function(bbox, bbox2=NULL){
bbox_i <- bbox_intersect(bbox, bbox2)
!is.na(bbox_i)
}
#' Functions for validating bbox
#' These functions can check whether specified bbox is valid, i.e. x1 <= x2 and y1 <= y2
#' @param bbox character vector bounding boxes to validate
#'
#' @return a vector of logical values
#' @export
#'
#' @examples
#' bbox_is_valid("0 0 100 200")
#' bbox_validate(c("5,4,6,3", "1,1,5,6"))
#' @rdname bbox_valid
#'
bbox_is_valid <- function(bbox){
m <- bbox_to_matrix(bbox)
m[,3]>=m[,1] & m[,4]>=m[,2]
}
#' @rdname bbox_valid
#' @return original vector with NA for invalid bboxes
#' @export
bbox_validate <- function(bbox){
ifelse(bbox_is_valid(bbox), bbox, NA_character_)
}
#' Functions for padding bbox
#' These functions can "pad" (increase size of) bbox
#' @param bbox character vector of bounding boxes to pad
#' @param word character vector of words contained in bboxes
#' @param n integer number of symbols to add
#' @param side "left", "right" (for `bbox_pad_width()`), "up", "down" (for `bbox_pad_height()`) or "both" which side to pad
#'
#' @return a vector of validated bboxes
#' @rdname bbox_pad
#'
#' @examples
#' bbox_pad_width("0 0 10 20", "There")
#' bbox_pad_height("0 0 10 20", "There")
#' @export
bbox_pad_width <- function(bbox, word, n=1, side="both"){
m <- bbox_to_matrix(bbox)
p <- (m[,3]-m[,1])/nchar(word)
if(side=="left"|side=="both")
m[,1] <- m[,1]-p*n
if(side=="right"|side=="both")
m[,3] <- m[,3]+p*n
bbox_validate(paste(m[,1], m[,2], m[,3], m[,4]))
}
#' @rdname bbox_pad
#' @export
bbox_pad_height <- function(bbox, word, n=0.5, side="both"){
m <- bbox_to_matrix(bbox)
n_lines <- lengths(regmatches(word, gregexpr("\n", word)))+1
p <- (m[,4]-m[,2])/n_lines
if(side=="up"|side=="both")
m[,2] <- m[,2]-p*n
if(side=="down"|side=="both")
m[,4] <- m[,4]+p*n
bbox_validate(paste(m[,1], m[,2], m[,3],m[,4]))
}
#' Functions for calculating with bbox
#' These functions can calculate various metrix of bbox
#' @param bbox character vector of bounding boxes to pad
#'
#' @return a vector of validated bboxes
#' @rdname bbox_math
#'
#' @examples
#' bbox_area("100 100 200 200")
#'
#' @export
bbox_area <- function(bbox){
m <- bbox_to_matrix(bbox)
(m[,3]-m[,1])*(m[,4]-m[,2])
}
#' Functions for updating certain coordinates of bbox
#' These functions can modify a vector of bbox
#' @param bbox character vector of bounding boxes to update
#' @param x1 scalar or numeric vector to update bbox with
#' @param y1 scalar or numeric vector to update bbox with
#' @param x2 scalar or numeric vector to update bbox with
#' @param y2 scalar or numeric vector to update bbox with
#'
#' @return a vector of updated and validated bboxes
#' @rdname bbox_modify
#'
#' @examples
#' bbox_reset(bbox=c("100 100 200 200", "300 400 500 600"), x2=800)
#'
#' @export
bbox_reset <- function(bbox, x1=NULL, y1=NULL, x2=NULL, y2=NULL){
if(is.null(x1) && is.null(y1) && is.null(x2) && is.null(y2))
return(bbox)
if(length(x1)==4 && is.null(y1) && is.null(x2) && is.null(y2)){
y1 <- x1[[2]]; x2 <- x1[[3]]; y2 <- x1[[4]]; x1 <- x1[[1]]}
if(length(x1)==1) x1 <- rep.int(x1, times = length(bbox))
if(length(y1)==1) y1 <- rep.int(y1, times = length(bbox))
if(length(x2)==1) x2 <- rep.int(x2, times = length(bbox))
if(length(y2)==1) y2 <- rep.int(y2, times = length(bbox))
m <- bbox_to_matrix(bbox)
if(!is.null(x1))
m[,1] <- x1
if(!is.null(y1))
m[,2] <- y1
if(!is.null(x2))
m[,3] <- x2
if(!is.null(y2))
m[,4] <- y2
bbox_validate(paste(m[,1], m[,2], m[,3],m[,4]))
}
| /R/bbox.R | permissive | dmi3kno/hocr | R | false | false | 8,646 | r | bbox_to_matrix <- function(bbox){
bb_lst <- lapply(strsplit(bbox, ",\\s?|\\s+"), as.numeric)
do.call(rbind, bb_lst)
}
#' Convert bbox to magick-geometry
#'
#' @param x character vector with bounding boxes (top-left coordinates). Expects four integers separated by comma or space
#'
#' @return character vector of magick-compliant geometry string representing bounding box area in the format "width x height +x_off +y_off" (https://www.imagemagick.org/Magick++/Geometry.html)
#' @seealso [magick::geometry] for converting integers (four individual columns) to geometry
#' @export
#'
#'
#' @examples
#' # area from c(0,0) to c(100,200) counting from top left
#' bbox_to_geometry("0 0 100 200")
#' bbox_to_geometry(c("0 0 100 200", "100,100,200, 200"))
#'
bbox_to_geometry <- function(x){
m <- bbox_to_matrix(x)
paste0(m[,3] - m[,1], "x", m[,4] - m[,2], "+", m[,1], "+", m[,2])
}
#' Functions for horizontal and vertical slicing of bbox columns
#' These functions are not vectorized and should be used to compute
#' @param bbox string or character vector of bbox'es
#' @param x,y coordinates to sclice the bouding box at
#'
#' @return list of bbox'es appliccable for particular split
#' @export
#'
#' @examples
#' bbox_slice_x("0 0 100 200", 80)
#' bbox_slice_x(c("0 0 100 200", "100 100 200 200"), c(80, 150))
#'
#' @rdname bbox_slice
#'
bbox_slice_x <- function(bbox, x){
m <- bbox_to_matrix(bbox)
list(left=bbox_validate(paste(m[,1], m[,2], x-1, m[,4])),
right=bbox_validate(paste(x, m[,2], m[,3], m[,4])))
}
#' @export
#'
#' @examples
#' bbox_slice_y("0 0 100 200", 120)
#' bbox_slice_y(c("0 0 100 200", "100 100 200 200"), c(120,150))
#' @rdname bbox_slice
bbox_slice_y <- function(bbox, y){
m <- bbox_to_matrix(bbox)
list(top=bbox_validate(paste(m[,1], m[,2], m[,3], y-1)),
bottom=bbox_validate(paste(m[,1], y, m[,3], m[,4])))
}
#' @export
#'
#' @examples
#' bbox_slice_xy("0 0 100 200", 50, 100)
#' bbox_slice_xy(c("0 0 100 200", "100,100, 200, 200"), c(50, 150), c(100, 150))
#' @rdname bbox_slice
bbox_slice_xy <- function(bbox, x, y){
m <- bbox_to_matrix(bbox)
list(top_left =bbox_validate(paste(m[,1], m[,2], x-1, y-1)),
top_right =bbox_validate(paste(x, m[,2], m[,3], y-1)),
bottom_left=bbox_validate(paste(m[,1], y, x-1, m[,4])),
bottom_right=bbox_validate(paste(x, y, m[,3], m[,4])))
}
#' Functions for aggregating bbox objects
#' These functions can perform union and intersection operations on bbox objects
#' @param bbox character vector of bounding boxes to perform operation on
#' @param fx1,fy1,fx2,fy2 functions to use for aggregating x1, y1, x2, y2, respectively. Defaults to `min` for x1,y1 and `max` for x2,y2
#' @param bbox2 optional character vector of bounding boxes to element-wise aggregation with `bbox`.
#' If specified, needs to be length 1 or equal in length to `bbox`.
#'
#' @return bbox or missing value, if result is invalid bounding box
#' @export
#'
#' @examples
#' bbox_union(c("5 1 7 3", "2 4 6 8"))
#' bbox_union2(c("5 1 7 3", "2 4 6 8"), c("1 1 1 1"))
#' bbox_intersect(c("5 1 7 3", "2 4 6 8")) # should return NA
#' bbox_intersect("5 1 7 3", "2 2 6 8")
#' @rdname bbox_aggregate
#'
bbox_union <- function(bbox, fx1=min, fy1=min, fx2=max, fy2=max){
m <- bbox_to_matrix(bbox)
bbox_validate(paste(fx1(m[,1]), fy1(m[,2]), fx2(m[,3]), fy2(m[,4])))
}
#' @rdname bbox_aggregate
#' @export
bbox_union2 <- function(bbox, bbox2){
if(length(bbox)==1L && !is.null(bbox2))
bbox <- rep.int(bbox, times=length(bbox2))
if(length(bbox2)==1L)
bbox2 <- rep.int(bbox2, times=length(bbox))
stopifnot(length(bbox)==length(bbox2))
m <- bbox_to_matrix(bbox)
m2 <- bbox_to_matrix(bbox2)
return(bbox_validate(paste(pmin(m[,1], m2[,1]), pmin(m[,2], m2[,2]),
pmax(m[,3], m2[,3]), pmax(m[,4], m2[,4]))))
}
#' @rdname bbox_aggregate
#' @export
bbox_intersect <- function(bbox, bbox2=NULL){
if(length(bbox)==1L && !is.null(bbox2))
bbox <- rep.int(bbox, times=length(bbox2))
if(length(bbox2)==1L)
bbox2 <- rep.int(bbox2, times=length(bbox))
m <- bbox_to_matrix(bbox)
if(!is.null(bbox2)){
m2 <- bbox_to_matrix(bbox2)
return(bbox_validate(paste(pmax(m[,1], m2[,1]), pmax(m[,2], m2[,2]),
pmin(m[,3], m2[,3]), pmin(m[,4], m2[,4]))))
}
bbox_validate(paste(max(m[,1]), max(m[,2]), min(m[,3]), min(m[,4])))
}
#' Predicate functions for matching bboxes
#' These functions can check whether intersection operation on bbox objects returns non-NA result
#' @param bbox character vector of bounding boxes to perform operation on
#' @param bbox2 optional character vector of bounding boxes to element-wise aggregation with `bbox`.
#' If specified, needs to be length 1 or equal in length to `bbox`.
#'
#' @return logical value of whether or not the pair of bboxes intersect
#' @export
#'
#' @examples
#' bbox_intersects(c("5 1 7 3", "2 4 6 8")) # should return FALSE
#' bbox_intersects("5 1 7 3", "2 2 6 8") # should return TRUE
#' @rdname bbox_predicates
#'
bbox_intersects <- function(bbox, bbox2=NULL){
bbox_i <- bbox_intersect(bbox, bbox2)
!is.na(bbox_i)
}
#' Functions for validating bbox
#' These functions can check whether specified bbox is valid, i.e. x1 <= x2 and y1 <= y2
#' @param bbox character vector bounding boxes to validate
#'
#' @return a vector of logical values
#' @export
#'
#' @examples
#' bbox_is_valid("0 0 100 200")
#' bbox_validate(c("5,4,6,3", "1,1,5,6"))
#' @rdname bbox_valid
#'
bbox_is_valid <- function(bbox){
m <- bbox_to_matrix(bbox)
m[,3]>=m[,1] & m[,4]>=m[,2]
}
#' @rdname bbox_valid
#' @return original vector with NA for invalid bboxes
#' @export
bbox_validate <- function(bbox){
ifelse(bbox_is_valid(bbox), bbox, NA_character_)
}
#' Functions for padding bbox
#' These functions can "pad" (increase size of) bbox
#' @param bbox character vector of bounding boxes to pad
#' @param word character vector of words contained in bboxes
#' @param n integer number of symbols to add
#' @param side "left", "right" (for `bbox_pad_width()`), "up", "down" (for `bbox_pad_height()`) or "both" which side to pad
#'
#' @return a vector of validated bboxes
#' @rdname bbox_pad
#'
#' @examples
#' bbox_pad_width("0 0 10 20", "There")
#' bbox_pad_height("0 0 10 20", "There")
#' @export
bbox_pad_width <- function(bbox, word, n=1, side="both"){
m <- bbox_to_matrix(bbox)
p <- (m[,3]-m[,1])/nchar(word)
if(side=="left"|side=="both")
m[,1] <- m[,1]-p*n
if(side=="right"|side=="both")
m[,3] <- m[,3]+p*n
bbox_validate(paste(m[,1], m[,2], m[,3], m[,4]))
}
#' @rdname bbox_pad
#' @export
bbox_pad_height <- function(bbox, word, n=0.5, side="both"){
m <- bbox_to_matrix(bbox)
n_lines <- lengths(regmatches(word, gregexpr("\n", word)))+1
p <- (m[,4]-m[,2])/n_lines
if(side=="up"|side=="both")
m[,2] <- m[,2]-p*n
if(side=="down"|side=="both")
m[,4] <- m[,4]+p*n
bbox_validate(paste(m[,1], m[,2], m[,3],m[,4]))
}
#' Functions for calculating with bbox
#' These functions can calculate various metrix of bbox
#' @param bbox character vector of bounding boxes to pad
#'
#' @return a vector of validated bboxes
#' @rdname bbox_math
#'
#' @examples
#' bbox_area("100 100 200 200")
#'
#' @export
bbox_area <- function(bbox){
m <- bbox_to_matrix(bbox)
(m[,3]-m[,1])*(m[,4]-m[,2])
}
#' Functions for updating certain coordinates of bbox
#' These functions can modify a vector of bbox
#' @param bbox character vector of bounding boxes to update
#' @param x1 scalar or numeric vector to update bbox with
#' @param y1 scalar or numeric vector to update bbox with
#' @param x2 scalar or numeric vector to update bbox with
#' @param y2 scalar or numeric vector to update bbox with
#'
#' @return a vector of updated and validated bboxes
#' @rdname bbox_modify
#'
#' @examples
#' bbox_reset(bbox=c("100 100 200 200", "300 400 500 600"), x2=800)
#'
#' @export
bbox_reset <- function(bbox, x1=NULL, y1=NULL, x2=NULL, y2=NULL){
if(is.null(x1) && is.null(y1) && is.null(x2) && is.null(y2))
return(bbox)
if(length(x1)==4 && is.null(y1) && is.null(x2) && is.null(y2)){
y1 <- x1[[2]]; x2 <- x1[[3]]; y2 <- x1[[4]]; x1 <- x1[[1]]}
if(length(x1)==1) x1 <- rep.int(x1, times = length(bbox))
if(length(y1)==1) y1 <- rep.int(y1, times = length(bbox))
if(length(x2)==1) x2 <- rep.int(x2, times = length(bbox))
if(length(y2)==1) y2 <- rep.int(y2, times = length(bbox))
m <- bbox_to_matrix(bbox)
if(!is.null(x1))
m[,1] <- x1
if(!is.null(y1))
m[,2] <- y1
if(!is.null(x2))
m[,3] <- x2
if(!is.null(y2))
m[,4] <- y2
bbox_validate(paste(m[,1], m[,2], m[,3],m[,4]))
}
|
<?xml version="1.0" encoding="utf-8"?>
<serviceModel xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="BlobSample" generation="1" functional="0" release="0" Id="f9a7f00a-1169-43b7-bf2e-99af528e7b80" dslVersion="1.2.0.0" xmlns="http://schemas.microsoft.com/dsltools/RDSM">
<groups>
<group name="BlobSampleGroup" generation="1" functional="0" release="0">
<componentports>
<inPort name="MvcWebRole1:Endpoint1" protocol="http">
<inToChannel>
<lBChannelMoniker name="/BlobSample/BlobSampleGroup/LB:MvcWebRole1:Endpoint1" />
</inToChannel>
</inPort>
<inPort name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" protocol="tcp">
<inToChannel>
<lBChannelMoniker name="/BlobSample/BlobSampleGroup/LB:MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</inToChannel>
</inPort>
</componentports>
<settings>
<aCS name="Certificate|MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapCertificate|MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</maps>
</aCS>
<aCS name="MvcWebRole1:ImageStorageAccountConn" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:ImageStorageAccountConn" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" />
</maps>
</aCS>
<aCS name="MvcWebRole1Instances" defaultValue="[1,1,1]">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1Instances" />
</maps>
</aCS>
</settings>
<channels>
<lBChannel name="LB:MvcWebRole1:Endpoint1">
<toPorts>
<inPortMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Endpoint1" />
</toPorts>
</lBChannel>
<lBChannel name="LB:MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput">
<toPorts>
<inPortMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</toPorts>
</lBChannel>
<sFSwitchChannel name="SW:MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp">
<toPorts>
<inPortMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" />
</toPorts>
</sFSwitchChannel>
</channels>
<maps>
<map name="MapCertificate|MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" kind="Identity">
<certificate>
<certificateMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificate>
</map>
<map name="MapMvcWebRole1:ImageStorageAccountConn" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/ImageStorageAccountConn" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" />
</setting>
</map>
<map name="MapMvcWebRole1Instances" kind="Identity">
<setting>
<sCSPolicyIDMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1Instances" />
</setting>
</map>
</maps>
<components>
<groupHascomponents>
<role name="MvcWebRole1" generation="1" functional="0" release="0" software="C:\Users\suvin\Desktop\Work\Learning\BlobSample\BlobSample\csx\Debug\roles\MvcWebRole1" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaIISHost.exe " memIndex="1792" hostingEnvironment="frontendadmin" hostingEnvironmentVersion="2">
<componentports>
<inPort name="Endpoint1" protocol="http" portRanges="80" />
<inPort name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" protocol="tcp" />
<inPort name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" protocol="tcp" portRanges="3389" />
<outPort name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" protocol="tcp">
<outToChannel>
<sFSwitchChannelMoniker name="/BlobSample/BlobSampleGroup/SW:MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" />
</outToChannel>
</outPort>
</componentports>
<settings>
<aCS name="ImageStorageAccountConn" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" defaultValue="" />
<aCS name="__ModelData" defaultValue="<m role="MvcWebRole1" xmlns="urn:azure:m:v1"><r name="MvcWebRole1"><e name="Endpoint1" /><e name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" /><e name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" /></r></m>" />
</settings>
<resourcereferences>
<resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" />
<resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" />
</resourcereferences>
<storedcertificates>
<storedCertificate name="Stored0Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" certificateStore="My" certificateLocation="System">
<certificate>
<certificateMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificate>
</storedCertificate>
</storedcertificates>
<certificates>
<certificate name="Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificates>
</role>
<sCSPolicy>
<sCSPolicyIDMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1Instances" />
<sCSPolicyUpdateDomainMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1UpgradeDomains" />
<sCSPolicyFaultDomainMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1FaultDomains" />
</sCSPolicy>
</groupHascomponents>
</components>
<sCSPolicy>
<sCSPolicyUpdateDomain name="MvcWebRole1UpgradeDomains" defaultPolicy="[5,5,5]" />
<sCSPolicyFaultDomain name="MvcWebRole1FaultDomains" defaultPolicy="[2,2,2]" />
<sCSPolicyID name="MvcWebRole1Instances" defaultPolicy="[1,1,1]" />
</sCSPolicy>
</group>
</groups>
<implements>
<implementation Id="228974c1-3871-4302-b555-f349608c31fe" ref="Microsoft.RedDog.Contract\ServiceContract\BlobSampleContract@ServiceDefinition">
<interfacereferences>
<interfaceReference Id="852d7ec4-59d4-4604-a48f-6ea2ad03b53e" ref="Microsoft.RedDog.Contract\Interface\MvcWebRole1:Endpoint1@ServiceDefinition">
<inPort>
<inPortMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1:Endpoint1" />
</inPort>
</interfaceReference>
<interfaceReference Id="c18c6647-2149-4bba-b5b7-618cbd4170dd" ref="Microsoft.RedDog.Contract\Interface\MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput@ServiceDefinition">
<inPort>
<inPortMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</inPort>
</interfaceReference>
</interfacereferences>
</implementation>
</implements>
</serviceModel> | /BlobSample/BlobSample/csx/Debug/ServiceDefinition.rd | no_license | SudhirVinjamuri/BlobStore | R | false | false | 11,852 | rd | <?xml version="1.0" encoding="utf-8"?>
<serviceModel xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="BlobSample" generation="1" functional="0" release="0" Id="f9a7f00a-1169-43b7-bf2e-99af528e7b80" dslVersion="1.2.0.0" xmlns="http://schemas.microsoft.com/dsltools/RDSM">
<groups>
<group name="BlobSampleGroup" generation="1" functional="0" release="0">
<componentports>
<inPort name="MvcWebRole1:Endpoint1" protocol="http">
<inToChannel>
<lBChannelMoniker name="/BlobSample/BlobSampleGroup/LB:MvcWebRole1:Endpoint1" />
</inToChannel>
</inPort>
<inPort name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" protocol="tcp">
<inToChannel>
<lBChannelMoniker name="/BlobSample/BlobSampleGroup/LB:MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</inToChannel>
</inPort>
</componentports>
<settings>
<aCS name="Certificate|MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapCertificate|MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</maps>
</aCS>
<aCS name="MvcWebRole1:ImageStorageAccountConn" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:ImageStorageAccountConn" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" />
</maps>
</aCS>
<aCS name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" defaultValue="">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" />
</maps>
</aCS>
<aCS name="MvcWebRole1Instances" defaultValue="[1,1,1]">
<maps>
<mapMoniker name="/BlobSample/BlobSampleGroup/MapMvcWebRole1Instances" />
</maps>
</aCS>
</settings>
<channels>
<lBChannel name="LB:MvcWebRole1:Endpoint1">
<toPorts>
<inPortMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Endpoint1" />
</toPorts>
</lBChannel>
<lBChannel name="LB:MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput">
<toPorts>
<inPortMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</toPorts>
</lBChannel>
<sFSwitchChannel name="SW:MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp">
<toPorts>
<inPortMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" />
</toPorts>
</sFSwitchChannel>
</channels>
<maps>
<map name="MapCertificate|MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" kind="Identity">
<certificate>
<certificateMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificate>
</map>
<map name="MapMvcWebRole1:ImageStorageAccountConn" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/ImageStorageAccountConn" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" />
</setting>
</map>
<map name="MapMvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" kind="Identity">
<setting>
<aCSMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" />
</setting>
</map>
<map name="MapMvcWebRole1Instances" kind="Identity">
<setting>
<sCSPolicyIDMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1Instances" />
</setting>
</map>
</maps>
<components>
<groupHascomponents>
<role name="MvcWebRole1" generation="1" functional="0" release="0" software="C:\Users\suvin\Desktop\Work\Learning\BlobSample\BlobSample\csx\Debug\roles\MvcWebRole1" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaIISHost.exe " memIndex="1792" hostingEnvironment="frontendadmin" hostingEnvironmentVersion="2">
<componentports>
<inPort name="Endpoint1" protocol="http" portRanges="80" />
<inPort name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" protocol="tcp" />
<inPort name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" protocol="tcp" portRanges="3389" />
<outPort name="MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" protocol="tcp">
<outToChannel>
<sFSwitchChannelMoniker name="/BlobSample/BlobSampleGroup/SW:MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" />
</outToChannel>
</outPort>
</componentports>
<settings>
<aCS name="ImageStorageAccountConn" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" defaultValue="" />
<aCS name="__ModelData" defaultValue="<m role="MvcWebRole1" xmlns="urn:azure:m:v1"><r name="MvcWebRole1"><e name="Endpoint1" /><e name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" /><e name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" /></r></m>" />
</settings>
<resourcereferences>
<resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" />
<resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" />
</resourcereferences>
<storedcertificates>
<storedCertificate name="Stored0Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" certificateStore="My" certificateLocation="System">
<certificate>
<certificateMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1/Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificate>
</storedCertificate>
</storedcertificates>
<certificates>
<certificate name="Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificates>
</role>
<sCSPolicy>
<sCSPolicyIDMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1Instances" />
<sCSPolicyUpdateDomainMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1UpgradeDomains" />
<sCSPolicyFaultDomainMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1FaultDomains" />
</sCSPolicy>
</groupHascomponents>
</components>
<sCSPolicy>
<sCSPolicyUpdateDomain name="MvcWebRole1UpgradeDomains" defaultPolicy="[5,5,5]" />
<sCSPolicyFaultDomain name="MvcWebRole1FaultDomains" defaultPolicy="[2,2,2]" />
<sCSPolicyID name="MvcWebRole1Instances" defaultPolicy="[1,1,1]" />
</sCSPolicy>
</group>
</groups>
<implements>
<implementation Id="228974c1-3871-4302-b555-f349608c31fe" ref="Microsoft.RedDog.Contract\ServiceContract\BlobSampleContract@ServiceDefinition">
<interfacereferences>
<interfaceReference Id="852d7ec4-59d4-4604-a48f-6ea2ad03b53e" ref="Microsoft.RedDog.Contract\Interface\MvcWebRole1:Endpoint1@ServiceDefinition">
<inPort>
<inPortMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1:Endpoint1" />
</inPort>
</interfaceReference>
<interfaceReference Id="c18c6647-2149-4bba-b5b7-618cbd4170dd" ref="Microsoft.RedDog.Contract\Interface\MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput@ServiceDefinition">
<inPort>
<inPortMoniker name="/BlobSample/BlobSampleGroup/MvcWebRole1:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</inPort>
</interfaceReference>
</interfacereferences>
</implementation>
</implements>
</serviceModel> |
rm(list=ls(all=T))
library(zoo)
library(longitudinal)
library(foreign)
library(nlme)
library(plyr)
library(lattice)
#library(lme4)
library(mgcv)
library(ggplot2)
library(reshape2)
setwd("/home/evaliliane/Documents/PhD/Codes")
# Read in the two children dataset
#addata <- read.csv("/home/evaliliane/Documents/PhD/Codes/NewData/CD4Cat_Adults2015-04-20.csv")
chdata <- read.csv("/home/evaliliane/Documents/PhD/Codes/NewData/CD4Cat_Children2015-04-20.csv")
# str(chdata)
# rel.columns <- c("patient", "diff", "zscore", "lab_v","height","weight","viral", "fhv_who_stage", "gender")
# ddat2red <- chdata[chdata$base == 1, c("patient", "height","weight","viral", "fhv_stage_who", "gender")]
# names(ddat2red) <- c("patient", "baseheight","baseweight","baseviral", "basefhv_stage_who", "basegender")
# Replacing small neg zscores by -12
chdata$zscore[chdata$zscore < -10] <- -10
# Removing row with TOFU > 15
chdata <- chdata[chdata$diff < 5476,]
dat <- chdata[,c(2,35,44,48,50)]
dat <- dat[!(is.na(dat$z.categ)),]
# Select a subgroup of people
useSubset <- T
subselect <- function(addata,n){
num.to.do <- n ## to save computing time when playing with analyses
pids.to.run1 <- sample(unique(addata$patient), num.to.do)
addata <- addata[addata$patient %in% pids.to.run1,]
return(addata)
}
datt <- subselect(dat, 1000)
#subchdata <- dat
currentDate <- Sys.Date()
# ##############################################################################################
# ################ Model Building #################################################
# ##############################################################################################
# ===================== Children per CD4 Categories ==========================
testchdata <- datt #[addata$suppress == 1,] #subselect(addata,2000)
nn <- nlevels(testchdata$z.categ)
mydataa <- groupedData(lab_v ~ diff | z.categ/patient, data = testchdata,order.groups=F) # Only On-ART period
# Order the data and get intial values for the parameter estimates
mydataa <- mydataa[order(mydataa$z.categ, mydataa$patient,mydataa$diff),]
# model1<-nls(zscore ~ SSasymp(diff,Asym,R0,lrc), data=mydataa,na.action="na.omit",
# start=c(Asym=-2,R0=-3,lrc=-5), control = nls.control(maxiter = 50, tol = 1e-05, minFactor = 1/4096,
# printEval = FALSE, warnOnly = FALSE))
#Individual fits for Old Model
model1.lis <- nlsList(zscore ~ SSasymp(diff,Asym,R0,lrc)|z.categ, data=mydataa,
start=c(Asym=-2,R0=-4,lrc=-1),
pool = FALSE)
model2.lis <- nlsList(zscore ~ SSasymp(diff,Asym,R0,lrc)|z.categ, data=mydataa,
start=c(Asym=-2,R0=-4,lrc=-1))
model3.lis <- nlsList(zscore ~ SSasymp(diff,Asym,R0,lrc)|patient, data=mydataa,
start=c(Asym=-2,R0=-4,lrc=-1))
model4.lis <- nlsList(zscore ~ SSasymp(diff,Asym,R0,lrc)|patient, data=mydataa,
start=c(Asym=-1,R0=-3,lrc=-1))
model5.lis <- nlsList(zscore ~ SSasymp(diff,Asym,R0,lrc)|patient, data=mydataa,
start=c(Asym=-1.8,R0=-3.9,lrc=-1.7))
# Comments : Pool does not help
# Plot confidence intervals
plot(intervals(modeltest), layout = c(5,1), na.rm = TRUE)
plot(intervals(model5.lis), layout = c(3,1), na.rm = TRUE)
plot(intervals(model4.lis), layout = c(3,1), na.rm = TRUE)
plot(intervals(model3.lis), layout = c(3,1))
plot(intervals(model2.lis), layout = c(3,1))
#anova(model2.lis,model1.lis)
# Results show that random effects are needed for all three parameters.
cor(coef(model3.lis), use = "pairwise.complete.obs")
sds <- sappply(model2.lis, sigmaHat)
tt <- coef(model5.lis)
tt <- cbind(patient = rownames(tt), tt)
convt <- tt$patient[!(is.na(tt$R0))]
pp <- coef(model4.lis)
pp <- cbind(patient = rownames(pp), pp)
convp <- pp$patient[!(is.na(pp$R0))]
convdata <- mydataa[mydataa$patient %in% convt,]
myconv <- groupedData(zscore ~ diff | z.categ/patient, data = convdata,order.groups=F)
plot(myconv)
notconvdata <- mydataa[!(mydataa$patient %in% convp),]
mynotconv <- groupedData(zscore ~ diff | z.categ/patient, data = notconvdata,order.groups=F)
plot(mynotconv[1:100,])
# Look at the residual per z.categ
plot(model2.lis, z.categ ~ resid(.), abline = 0)
# Make individual plots
plot(mydataa[550:600,])
plot(mydataa, outer = ~ diff * z.categ)
plot(augPred(model2.lis[580:600,]))
# Individual sum of LogLik
logLik(model2.lis) # Does not work in presence of NULL fits
# Scatter plot of the estimated parameters relationships
pairs(model2.lis)
pairs(model3.lis)
cof <- coef(summary(model1))
init = cof[1:3] #c(Asym = 450, R0 = 380, lrc = -6)
# # ======================================================================
# # Single level models
# #Random effect defined for CD4 categories level
#model2a <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa,
# na.action="na.omit",
# random = Asym + R0 ~ 1|cd4a.categ,
# fixed = list(Asym ~ 1, R0 ~ 1, lrc ~ 1),
# start= init, verbose = FALSE )
#
##Random effect defined for patient level
#model2b <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa,
# na.action="na.omit",
# random = Asym + R0 ~ 1|patient,
# fixed = list(Asym ~ 1, R0 ~ 1, lrc ~ 1),
# start= init, verbose = FALSE )
#
# # Look at both outputs.
# summary(model2a)
# # StdDev Corr
# # Asym 0.5431035 Asym
# # R0 2.4207631 0.989
# # Residual 1.9394735
# summary(model2b)
# # StdDev Corr
# # Asym 1.748278 Asym
# # R0 2.847446 0.482
# # Residual 1.369344
# ======================================================================
# Multiple levels models
# Random effect defined for CD4 categories and patient levels
model3 <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
data = mydataa,
na.action="na.omit",
random = Asym + R0 ~ 1|z.categ/patient,
fixed = list(Asym ~ 1, R0 ~ 1, lrc ~ 1),
start= init, verbose = FALSE )
print("Model 3 with fixed effect and a two levels random effect.")
summary(model3)
plot(model3)
# High correlation between the parametres Asym & R0
# Plot shows an increase in the variability of the residuals
# modeltest <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa,
# na.action="na.omit",
# random = Asym + R0 + lrc ~ 1 , #|z.categ/patient, lrc ~ 1 ),#|z.categ/patient),
# fixed = list(Asym ~ 1, R0 ~ 1, lrc ~ 1),
# start= init, verbose = TRUE )
#
# Error in nlme.formula(zscore ~ SSasymp(diff, Asym, R0, lrc), data = mydataa, :
# Singularity in backsolve at level 0, block 1
# In addition: Warning message:
# In nlme.formula(zscore ~ SSasymp(diff, Asym, R0, lrc), data = mydataa, :
# Singular precision matrix in level -1, block 4
# --------------------------------------
# Model 3 corrected for independant random effects on both levels
model4 <- update(model3, random = list(z.categ = pdDiag(Asym + R0 ~ 1),patient = pdDiag(Asym + R0 ~ 1)))
print("Model3 corrected for independant random effects on both levels.")
summary(model4)
#Compare bothe model 3 & model4
anova(model3, model4)
# Results: According to the AIC values, there is no improvement from model3 to model4. However, addressing the
# correlation problem between the parameters seem of higher priority than reducing the AIC
# ----------------------------
# Inclusion of a covarience matrix in Model 4
# !!!!!!! NOT WORKING
# Error in Initialize.corARMA(X[[2L]], ...) :
# covariate must have unique values within groups for "corARMA" objects
# model5 <- update(model4, corr = corARMA(c(0.5,0.5),form = ~ diff |z.categ/patient , p=1,q=1))
# # print("Inclusion of a covarience matrix in Model 4.")
# summary(model5)
# # Compare model 4 & model 5
# anova(model4, model5)
# results:
# ----------------------------
# Model 5 corrected for heteroscedasticity
model66 <- update(model3, weights = varPower())
model6 <- update(model3, weights = varIdent( 0.2, ~ 1|z.categ))
print("Model5 corrected for heteroscedasticity.")
summary(model6)
# Compare model 5 & model 6
anova(model4, model6)
# results:
# -------------------------------------------------------
# Try Univariate models for different covariates
# Age at baseline
new.cof <- model3$coeff
new.init1 <- c(as.numeric(new.cof$fixed[1:3]),0,0,0)
model6age1 <- update(model3, fixed = list(Asym ~ age, R0 ~ age, lrc ~ age), start = new.init1)
new.init <- c(as.numeric(new.cof$fixed[1:3]),0,0)
model6age2 <- update(model3, fixed = list(Asym ~ age, R0 ~ age, lrc ~ 1), start = new.init)
print("Model6 with Age covariate.")
# Compare model 6 & model.Age
#anova(model3, model6age)
anova(model6age1, model6age2)
summary(model6age1)
# Results: There is a significant improvement when including Age as a confounder for the Fixed parameters
# # Gender
model6gender <- update(model3, fixed = list(Asym + R0 ~ gender, lrc ~ 1), start = new.init)
print("Model6 with Gender covariate.")
summary(model6gender)
# Compare model 6 & model.gender
anova(model3, model6gender)
# # Results:
#
# # Baseline WHO stage
# model6who <- update(model3, fixed = list(Asym + R0 ~ fhv_stage_who, lrc ~ 1), start = new.init)
# print("Model6 with Who stage covariate.")
# summary(model6who)
# # Compare model 6 & model.who
# anova(model3, model6who) # Not comparable bcz of missing values in WHO stage variable
# Results:
# # Baseline BMI
# model6bmi <- update(model3, fixed = list(Asym + R0 ~ bmi, lrc ~ 1), start = new.init)
# print("Model6 with bmi covariate.")
# summary(model6bmi)
# # Compare model 6 & model.bmi
# anova(model3, model6bmi)
# # Results:
# # Baseline RNA
# model6rna <- update(model3, fixed = list(Asym + R0 ~ viral, lrc ~ 1), start = new.init)
# print("Model6 with RNA covariate.")
# summary(model6rna)
# # Compare model 6 & model.rna
# anova(model3, model6rna)
# # Results:
# # Baseline OIs
# model6rna <- update(model3, fixed = list(Asym + R0 ~ oi, lrc ~ 1), start = new.init)
# print("Model6 with Ois covariate.")
# summary(model6oi)
# # Compare model 6 & model.oi
# anova(model3, model6oi)
# # Results:
# -------------------------------------------------------
# Try full model of covariates
new.init <- c(as.numeric(new.cof$fixed[1:3]),0,0,0,0,0)
model6two <- update(model3, fixed = list(Asym ~ age + gender, R0 ~ age + gender, lrc ~ age), start = new.init)
print("Model6 with Age and gender covariates.")
summary(model6two)
# Compare model one cov & model with two
anova(model6age, model6two)
# results: No improvement by correcting for gender
# EXIT
#
# model33 <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa, #we want this to be mydataa right? not mydata?
# na.action="na.omit",
# random = Asym + R0 ~ 1|cd4a.categ,
# fixed = list(Asym ~ age + gender, R0 ~ age + gender, lrc ~ 1),#,+ fhv_stage_who + gender + suppress + weight + height, R0 ~ 1, lrc ~ 1),
# start= c(Asym=-2,5,1,R0=-4,5,1,lrc=-5), verbose = FALSE )
#
# # ================================ Asy .vs. Int ===============================
# r <- ranef(model3)
# f <- fixef(model3)
# r<- c( -2.840674, -1.049151, -2.377164, -1.908371, -1.602952)
# i <- c( -8.298243, -0.790337, -4.767319, -3.075800, -2.160361)
#
# png("Output/Asym_Int.png", w=480,h = 480)
# plot(i,r, xlab = "Baseline z-score",pch = 15, ylab = "Long term z-score", col=1:6, main = "Relation between Asy & Int")
# #legend("toplef", levels(testchdata)[-6], col = 1:6, lty = 1, cex=0.7)
# dev.off()
#
# # =============================================================================
#
# barplot(r$Asym, names.arg = rownames(r))
# barplot(r$R0, names.arg = rownames(r))
# png("Output/Rebound_Int-Asym.png", w=480,h = 480)
# barplot(r$Asym - r$R0, names.arg = rownames(r))
# dev.off
# # =============================================================================
#
# # Plot residualss
# png("Output/Residuals.png", w=480,h = 480)
# plot(fitted(model33),residuals(model33),main="Residuals vs Fitted", cex=0.5, xlab = "Predictions", ylab= "Standardized Residuals")
# dev.off()
#
# # ##############################################################################
#
#
# # ===================== Children per CD4 Categories ==========================
#
# testchdata <- modchdata #[addata$suppress == 1,] #subselect(addata,2000)
# # #testchdata <- groupedData(zscore ~ diff | patient, data = testchdata,order.groups=F)
# #
# #
# nn <- nlevels(testchdata$cd4a.categ)
# mydataa <- groupedData(zscore ~ diff |patient, data = testchdata,order.groups=F) # Only On-ART period
# mydataa <- mydataa[order(mydataa$patient,mydataa$diff),]
# #print(paste0(length(unique(mydataa$patient)),' ',sd(mydataa$lab_v), " patients count, std of CD4 counts, used for LME models"))
# model1<-nls(zscore ~ SSasymp(diff,Asym,R0,lrc), data=mydataa,na.action="na.omit",
# start=c(Asym=-2,R0=-4,lrc=-5), control = nls.control(maxiter = 50, tol = 1e-05, minFactor = 1/4096,
# printEval = FALSE, warnOnly = FALSE))
# cof <- coef(summary(model1))
# #print(paste("Starting with category : ", curr.cat))
# print(cof)
# init = cof[1:3] #c(Asym = 450, R0 = 380, lrc = -6)
#
# model1 <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa, #we want this to be mydataa right? not mydata?
# na.action="na.omit",
# random = Asym + R0 ~ 1,
# fixed = list(Asym ~ 1, R0 ~ 1, lrc ~ 1),#,+ fhv_stage_who + gender + suppress + weight + height, R0 ~ 1, lrc ~ 1),
# start= c(Asym=-2,R0=-4,lrc=-5), verbose = FALSE )
#
#
# model2 <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa, #we want this to be mydataa right? not mydata?
# na.action="na.omit",
# random = Asym + R0 ~ 1,
# fixed = list(Asym ~ age + gender, R0 ~ age + gender, lrc ~ 1),#,+ fhv_stage_who + gender + suppress + weight + height, R0 ~ 1, lrc ~ 1),
# start= c(Asym=-2,5,1,R0=-4,5,1,lrc=-5), verbose = FALSE )
#
# # ================================ Asy .vs. Int ===============================
# r <- ranef(model2)
# f <- fixef(model2)
# plot(r$R0,r$Asym, xlab = "Baseline z-score", ylab = "Long term z-score", main = "Relation between Asy & Int")
# legend("toplef", rownames(r), lty = 1, cex=0.7)
#
# # =============================================================================
#
# barplot(r$Asym, names.arg = rownames(r))
# barplot(r$R0, names.arg = rownames(r))
# barplot(r$Asym - r$R0, names.arg = rownames(r))
#
# # =============================================================================
# #plot(model33, resid(., type = "p") ~ fitted(.) | cd4a.categ, abline = 0)
# plot(fitted(model33),residuals(model33),main="Residuals vs Fitted", cex=0.5)
# plot(model33) # Same as above. This looks better
#
# ####################################################
# # COMMENTS:
# # By comparing all nlme models, model5 sems to be the best fit to our data.
# # For the model formula, see refs Beaudrap (2008) and Lewis et al. (2011) in our Mendeley folder
# # KR: this formula is the same as the default SSasym. The only difference between model 4 and 5 is that
# # model 5 does not have a random R0 (intercept) which seems like it would be important in our
# # model since people start ART at many different starting CD4 counts and this has been shown to be
# # fairly important in determining what CD4 count people level off at
#
#
# ## Plot Model results - working
# modelpred.c1 <-as.numeric(predict(model2))
# modelpred.c2 <-labels(predict(model2))
# modelpred.c3 <-mydataa$diff
# modelpred <- data.frame(modelpred.c1,modelpred.c2, modelpred.c3)
#
# nam <- c("predictions","Ids","years")
# names(modelpred) <- nam
#
# # ====================================================== Functions
#
# # Defined functions
# med_CI <- function (x, ci = 0.90) {
# a <- median(x)
# s <- sd(x)
# n <- length(x)
# if (n == 1){
# s <- 0
# n <- 2
# }
# error <- qt(ci + (1 - ci)/2, df = n - 1) * s/sqrt(n)
# return(c(upper = a + error, med = a, lower = a - error))
# }
#
# myCI_u <- function(x){
# #print(length(x))
# # x <- x[!is.na(x)]
# bb <- med_CI(x, ci = 0.90)
# return(as.numeric(bb[1]))
# }
#
# myCI_l <- function(x){
# #print(length(x))
# # x <- x[!is.na(x)]
# bb <- med_CI(x, ci = 0.90)
# return(as.numeric(bb[3]))
# }
#
#
# ln.fxn <- function(xx, yy, pid, order.by.xx=T) {
# #browser()
# missing.data <- is.na(xx) | is.na(yy)
# xx <- xx[!missing.data]
# yy <- yy[!missing.data]
# if(order.by.xx) {
# ord <- order(xx)
# xx <- xx[ord]
# yy <- yy[ord]
# }
# #lines(xx, yy, col = pid[1])
# }
#
# # =================================================================================
#
# ## Figures 1 - Model Output
# pdf('Ch_Modelpredictions.pdf', w =10, h = 7)
# xs <- seq(0, 6, by = .1)
# xlim <- range(xs)
# par('ps' = 16, mfrow = c(1,1)) ## graphical parameters
# plot(0,0, type='n', xlab = 'Years since ART initiation', ylab = 'CD4 z-scores',xaxt='n', yaxt='n', bty = 'n',
# xlim = c(0,6000), ylim = c(-8,2), cex.main =0.9) # , main = "Suppressed viral load")
# axis(2, at = seq(-8,2, by = 2), las = 2) ## x axis
# axis(1, at = seq(0,5110, by = 730), labels = seq(0,14, by= 2) ) #,las = 2) ## x axis
# ## line for each pid
# print("Start ddply")
# #test <- ddply(modelpred, .(Ids), with, ln.fxn(years, predictions, Ids, order.by.xx=T))
# test <- modelpred
# test <- test[order(test$years),] # Order with resepct to time since HAART initiation
# dat2 <- data.matrix(test) # Transform dataframe into a Matrix
# length(test$lab_v) == length(dat2[,3]) # (diff variable)
#
# # Transform matrix into longitudinal object's class
# print("Build longitudinal object")
# dat <- as.longitudinal(dat2 , repeats = as.numeric(table(as.numeric(dat2[,3]))), unique(as.numeric(dat2[,3])))
# #is.longitudinal(dat)
#
# # Calculate the medians
# med <- condense.longitudinal(dat, 1, median)
# confIntu <- condense.longitudinal(dat, 1, myCI_u) #CI(dat[,33],ci = 0.95)
# confIntl <- condense.longitudinal(dat, 1, myCI_l) #CI(dat[,33],ci = 0.95)
# tim <- get.time.repeats(dat)
# # sp <- smooth.spline(tim$time, med, spar=0.35)
# # lines(sp, col = col.vec[ii])
# lines(tim$time[180:length(tim$time)], rollmean(med, 180))#, col = col.vec[ii])
# # if (ii == nn){
# # lines(tim$time[180:length(tim$time)], rollmean(confIntu,180), col = gray(0.7), lty = 2)
# # }
# # if (ii == 1){
# # lines(tim$time[180:length(tim$time)], rollmean(confIntl,180), col = gray(0.7), lty = 2)
# # }
# # }
# title("CD4 z-scores medians' trajectory" ) #, outer=FALSE)
# #legend("topright", levels(testchdata$cd4a.categ), col = 1:nn, lty = 1)
# dev.off()
#
#
# # AIC
# anova(model1)
# anova(model1,model2)
# anova(model33,model3) # Not comparable
# qqnorm(model1)
# qqnorm(model2)
# qqnorm(model3)
# qqnorm(model33)
# # ============================================================================
#
| /NlmeModels.R | no_license | EvaLiliane/Data-Preparation | R | false | false | 19,630 | r | rm(list=ls(all=T))
library(zoo)
library(longitudinal)
library(foreign)
library(nlme)
library(plyr)
library(lattice)
#library(lme4)
library(mgcv)
library(ggplot2)
library(reshape2)
setwd("/home/evaliliane/Documents/PhD/Codes")
# Read in the two children dataset
#addata <- read.csv("/home/evaliliane/Documents/PhD/Codes/NewData/CD4Cat_Adults2015-04-20.csv")
chdata <- read.csv("/home/evaliliane/Documents/PhD/Codes/NewData/CD4Cat_Children2015-04-20.csv")
# str(chdata)
# rel.columns <- c("patient", "diff", "zscore", "lab_v","height","weight","viral", "fhv_who_stage", "gender")
# ddat2red <- chdata[chdata$base == 1, c("patient", "height","weight","viral", "fhv_stage_who", "gender")]
# names(ddat2red) <- c("patient", "baseheight","baseweight","baseviral", "basefhv_stage_who", "basegender")
# Replacing small neg zscores by -12
chdata$zscore[chdata$zscore < -10] <- -10
# Removing row with TOFU > 15
chdata <- chdata[chdata$diff < 5476,]
dat <- chdata[,c(2,35,44,48,50)]
dat <- dat[!(is.na(dat$z.categ)),]
# Select a subgroup of people
useSubset <- T
subselect <- function(addata,n){
num.to.do <- n ## to save computing time when playing with analyses
pids.to.run1 <- sample(unique(addata$patient), num.to.do)
addata <- addata[addata$patient %in% pids.to.run1,]
return(addata)
}
datt <- subselect(dat, 1000)
#subchdata <- dat
currentDate <- Sys.Date()
# ##############################################################################################
# ################ Model Building #################################################
# ##############################################################################################
# ===================== Children per CD4 Categories ==========================
testchdata <- datt #[addata$suppress == 1,] #subselect(addata,2000)
nn <- nlevels(testchdata$z.categ)
mydataa <- groupedData(lab_v ~ diff | z.categ/patient, data = testchdata,order.groups=F) # Only On-ART period
# Order the data and get intial values for the parameter estimates
mydataa <- mydataa[order(mydataa$z.categ, mydataa$patient,mydataa$diff),]
# model1<-nls(zscore ~ SSasymp(diff,Asym,R0,lrc), data=mydataa,na.action="na.omit",
# start=c(Asym=-2,R0=-3,lrc=-5), control = nls.control(maxiter = 50, tol = 1e-05, minFactor = 1/4096,
# printEval = FALSE, warnOnly = FALSE))
#Individual fits for Old Model
model1.lis <- nlsList(zscore ~ SSasymp(diff,Asym,R0,lrc)|z.categ, data=mydataa,
start=c(Asym=-2,R0=-4,lrc=-1),
pool = FALSE)
model2.lis <- nlsList(zscore ~ SSasymp(diff,Asym,R0,lrc)|z.categ, data=mydataa,
start=c(Asym=-2,R0=-4,lrc=-1))
model3.lis <- nlsList(zscore ~ SSasymp(diff,Asym,R0,lrc)|patient, data=mydataa,
start=c(Asym=-2,R0=-4,lrc=-1))
model4.lis <- nlsList(zscore ~ SSasymp(diff,Asym,R0,lrc)|patient, data=mydataa,
start=c(Asym=-1,R0=-3,lrc=-1))
model5.lis <- nlsList(zscore ~ SSasymp(diff,Asym,R0,lrc)|patient, data=mydataa,
start=c(Asym=-1.8,R0=-3.9,lrc=-1.7))
# Comments : Pool does not help
# Plot confidence intervals
plot(intervals(modeltest), layout = c(5,1), na.rm = TRUE)
plot(intervals(model5.lis), layout = c(3,1), na.rm = TRUE)
plot(intervals(model4.lis), layout = c(3,1), na.rm = TRUE)
plot(intervals(model3.lis), layout = c(3,1))
plot(intervals(model2.lis), layout = c(3,1))
#anova(model2.lis,model1.lis)
# Results show that random effects are needed for all three parameters.
cor(coef(model3.lis), use = "pairwise.complete.obs")
sds <- sappply(model2.lis, sigmaHat)
tt <- coef(model5.lis)
tt <- cbind(patient = rownames(tt), tt)
convt <- tt$patient[!(is.na(tt$R0))]
pp <- coef(model4.lis)
pp <- cbind(patient = rownames(pp), pp)
convp <- pp$patient[!(is.na(pp$R0))]
convdata <- mydataa[mydataa$patient %in% convt,]
myconv <- groupedData(zscore ~ diff | z.categ/patient, data = convdata,order.groups=F)
plot(myconv)
notconvdata <- mydataa[!(mydataa$patient %in% convp),]
mynotconv <- groupedData(zscore ~ diff | z.categ/patient, data = notconvdata,order.groups=F)
plot(mynotconv[1:100,])
# Look at the residual per z.categ
plot(model2.lis, z.categ ~ resid(.), abline = 0)
# Make individual plots
plot(mydataa[550:600,])
plot(mydataa, outer = ~ diff * z.categ)
plot(augPred(model2.lis[580:600,]))
# Individual sum of LogLik
logLik(model2.lis) # Does not work in presence of NULL fits
# Scatter plot of the estimated parameters relationships
pairs(model2.lis)
pairs(model3.lis)
cof <- coef(summary(model1))
init = cof[1:3] #c(Asym = 450, R0 = 380, lrc = -6)
# # ======================================================================
# # Single level models
# #Random effect defined for CD4 categories level
#model2a <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa,
# na.action="na.omit",
# random = Asym + R0 ~ 1|cd4a.categ,
# fixed = list(Asym ~ 1, R0 ~ 1, lrc ~ 1),
# start= init, verbose = FALSE )
#
##Random effect defined for patient level
#model2b <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa,
# na.action="na.omit",
# random = Asym + R0 ~ 1|patient,
# fixed = list(Asym ~ 1, R0 ~ 1, lrc ~ 1),
# start= init, verbose = FALSE )
#
# # Look at both outputs.
# summary(model2a)
# # StdDev Corr
# # Asym 0.5431035 Asym
# # R0 2.4207631 0.989
# # Residual 1.9394735
# summary(model2b)
# # StdDev Corr
# # Asym 1.748278 Asym
# # R0 2.847446 0.482
# # Residual 1.369344
# ======================================================================
# Multiple levels models
# Random effect defined for CD4 categories and patient levels
model3 <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
data = mydataa,
na.action="na.omit",
random = Asym + R0 ~ 1|z.categ/patient,
fixed = list(Asym ~ 1, R0 ~ 1, lrc ~ 1),
start= init, verbose = FALSE )
print("Model 3 with fixed effect and a two levels random effect.")
summary(model3)
plot(model3)
# High correlation between the parametres Asym & R0
# Plot shows an increase in the variability of the residuals
# modeltest <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa,
# na.action="na.omit",
# random = Asym + R0 + lrc ~ 1 , #|z.categ/patient, lrc ~ 1 ),#|z.categ/patient),
# fixed = list(Asym ~ 1, R0 ~ 1, lrc ~ 1),
# start= init, verbose = TRUE )
#
# Error in nlme.formula(zscore ~ SSasymp(diff, Asym, R0, lrc), data = mydataa, :
# Singularity in backsolve at level 0, block 1
# In addition: Warning message:
# In nlme.formula(zscore ~ SSasymp(diff, Asym, R0, lrc), data = mydataa, :
# Singular precision matrix in level -1, block 4
# --------------------------------------
# Model 3 corrected for independant random effects on both levels
model4 <- update(model3, random = list(z.categ = pdDiag(Asym + R0 ~ 1),patient = pdDiag(Asym + R0 ~ 1)))
print("Model3 corrected for independant random effects on both levels.")
summary(model4)
#Compare bothe model 3 & model4
anova(model3, model4)
# Results: According to the AIC values, there is no improvement from model3 to model4. However, addressing the
# correlation problem between the parameters seem of higher priority than reducing the AIC
# ----------------------------
# Inclusion of a covarience matrix in Model 4
# !!!!!!! NOT WORKING
# Error in Initialize.corARMA(X[[2L]], ...) :
# covariate must have unique values within groups for "corARMA" objects
# model5 <- update(model4, corr = corARMA(c(0.5,0.5),form = ~ diff |z.categ/patient , p=1,q=1))
# # print("Inclusion of a covarience matrix in Model 4.")
# summary(model5)
# # Compare model 4 & model 5
# anova(model4, model5)
# results:
# ----------------------------
# Model 5 corrected for heteroscedasticity
model66 <- update(model3, weights = varPower())
model6 <- update(model3, weights = varIdent( 0.2, ~ 1|z.categ))
print("Model5 corrected for heteroscedasticity.")
summary(model6)
# Compare model 5 & model 6
anova(model4, model6)
# results:
# -------------------------------------------------------
# Try Univariate models for different covariates
# Age at baseline
new.cof <- model3$coeff
new.init1 <- c(as.numeric(new.cof$fixed[1:3]),0,0,0)
model6age1 <- update(model3, fixed = list(Asym ~ age, R0 ~ age, lrc ~ age), start = new.init1)
new.init <- c(as.numeric(new.cof$fixed[1:3]),0,0)
model6age2 <- update(model3, fixed = list(Asym ~ age, R0 ~ age, lrc ~ 1), start = new.init)
print("Model6 with Age covariate.")
# Compare model 6 & model.Age
#anova(model3, model6age)
anova(model6age1, model6age2)
summary(model6age1)
# Results: There is a significant improvement when including Age as a confounder for the Fixed parameters
# # Gender
model6gender <- update(model3, fixed = list(Asym + R0 ~ gender, lrc ~ 1), start = new.init)
print("Model6 with Gender covariate.")
summary(model6gender)
# Compare model 6 & model.gender
anova(model3, model6gender)
# # Results:
#
# # Baseline WHO stage
# model6who <- update(model3, fixed = list(Asym + R0 ~ fhv_stage_who, lrc ~ 1), start = new.init)
# print("Model6 with Who stage covariate.")
# summary(model6who)
# # Compare model 6 & model.who
# anova(model3, model6who) # Not comparable bcz of missing values in WHO stage variable
# Results:
# # Baseline BMI
# model6bmi <- update(model3, fixed = list(Asym + R0 ~ bmi, lrc ~ 1), start = new.init)
# print("Model6 with bmi covariate.")
# summary(model6bmi)
# # Compare model 6 & model.bmi
# anova(model3, model6bmi)
# # Results:
# # Baseline RNA
# model6rna <- update(model3, fixed = list(Asym + R0 ~ viral, lrc ~ 1), start = new.init)
# print("Model6 with RNA covariate.")
# summary(model6rna)
# # Compare model 6 & model.rna
# anova(model3, model6rna)
# # Results:
# # Baseline OIs
# model6rna <- update(model3, fixed = list(Asym + R0 ~ oi, lrc ~ 1), start = new.init)
# print("Model6 with Ois covariate.")
# summary(model6oi)
# # Compare model 6 & model.oi
# anova(model3, model6oi)
# # Results:
# -------------------------------------------------------
# Try full model of covariates
new.init <- c(as.numeric(new.cof$fixed[1:3]),0,0,0,0,0)
model6two <- update(model3, fixed = list(Asym ~ age + gender, R0 ~ age + gender, lrc ~ age), start = new.init)
print("Model6 with Age and gender covariates.")
summary(model6two)
# Compare model one cov & model with two
anova(model6age, model6two)
# results: No improvement by correcting for gender
# EXIT
#
# model33 <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa, #we want this to be mydataa right? not mydata?
# na.action="na.omit",
# random = Asym + R0 ~ 1|cd4a.categ,
# fixed = list(Asym ~ age + gender, R0 ~ age + gender, lrc ~ 1),#,+ fhv_stage_who + gender + suppress + weight + height, R0 ~ 1, lrc ~ 1),
# start= c(Asym=-2,5,1,R0=-4,5,1,lrc=-5), verbose = FALSE )
#
# # ================================ Asy .vs. Int ===============================
# r <- ranef(model3)
# f <- fixef(model3)
# r<- c( -2.840674, -1.049151, -2.377164, -1.908371, -1.602952)
# i <- c( -8.298243, -0.790337, -4.767319, -3.075800, -2.160361)
#
# png("Output/Asym_Int.png", w=480,h = 480)
# plot(i,r, xlab = "Baseline z-score",pch = 15, ylab = "Long term z-score", col=1:6, main = "Relation between Asy & Int")
# #legend("toplef", levels(testchdata)[-6], col = 1:6, lty = 1, cex=0.7)
# dev.off()
#
# # =============================================================================
#
# barplot(r$Asym, names.arg = rownames(r))
# barplot(r$R0, names.arg = rownames(r))
# png("Output/Rebound_Int-Asym.png", w=480,h = 480)
# barplot(r$Asym - r$R0, names.arg = rownames(r))
# dev.off
# # =============================================================================
#
# # Plot residualss
# png("Output/Residuals.png", w=480,h = 480)
# plot(fitted(model33),residuals(model33),main="Residuals vs Fitted", cex=0.5, xlab = "Predictions", ylab= "Standardized Residuals")
# dev.off()
#
# # ##############################################################################
#
#
# # ===================== Children per CD4 Categories ==========================
#
# testchdata <- modchdata #[addata$suppress == 1,] #subselect(addata,2000)
# # #testchdata <- groupedData(zscore ~ diff | patient, data = testchdata,order.groups=F)
# #
# #
# nn <- nlevels(testchdata$cd4a.categ)
# mydataa <- groupedData(zscore ~ diff |patient, data = testchdata,order.groups=F) # Only On-ART period
# mydataa <- mydataa[order(mydataa$patient,mydataa$diff),]
# #print(paste0(length(unique(mydataa$patient)),' ',sd(mydataa$lab_v), " patients count, std of CD4 counts, used for LME models"))
# model1<-nls(zscore ~ SSasymp(diff,Asym,R0,lrc), data=mydataa,na.action="na.omit",
# start=c(Asym=-2,R0=-4,lrc=-5), control = nls.control(maxiter = 50, tol = 1e-05, minFactor = 1/4096,
# printEval = FALSE, warnOnly = FALSE))
# cof <- coef(summary(model1))
# #print(paste("Starting with category : ", curr.cat))
# print(cof)
# init = cof[1:3] #c(Asym = 450, R0 = 380, lrc = -6)
#
# model1 <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa, #we want this to be mydataa right? not mydata?
# na.action="na.omit",
# random = Asym + R0 ~ 1,
# fixed = list(Asym ~ 1, R0 ~ 1, lrc ~ 1),#,+ fhv_stage_who + gender + suppress + weight + height, R0 ~ 1, lrc ~ 1),
# start= c(Asym=-2,R0=-4,lrc=-5), verbose = FALSE )
#
#
# model2 <- nlme(zscore ~ SSasymp(diff,Asym,R0,lrc),
# data = mydataa, #we want this to be mydataa right? not mydata?
# na.action="na.omit",
# random = Asym + R0 ~ 1,
# fixed = list(Asym ~ age + gender, R0 ~ age + gender, lrc ~ 1),#,+ fhv_stage_who + gender + suppress + weight + height, R0 ~ 1, lrc ~ 1),
# start= c(Asym=-2,5,1,R0=-4,5,1,lrc=-5), verbose = FALSE )
#
# # ================================ Asy .vs. Int ===============================
# r <- ranef(model2)
# f <- fixef(model2)
# plot(r$R0,r$Asym, xlab = "Baseline z-score", ylab = "Long term z-score", main = "Relation between Asy & Int")
# legend("toplef", rownames(r), lty = 1, cex=0.7)
#
# # =============================================================================
#
# barplot(r$Asym, names.arg = rownames(r))
# barplot(r$R0, names.arg = rownames(r))
# barplot(r$Asym - r$R0, names.arg = rownames(r))
#
# # =============================================================================
# #plot(model33, resid(., type = "p") ~ fitted(.) | cd4a.categ, abline = 0)
# plot(fitted(model33),residuals(model33),main="Residuals vs Fitted", cex=0.5)
# plot(model33) # Same as above. This looks better
#
# ####################################################
# # COMMENTS:
# # By comparing all nlme models, model5 sems to be the best fit to our data.
# # For the model formula, see refs Beaudrap (2008) and Lewis et al. (2011) in our Mendeley folder
# # KR: this formula is the same as the default SSasym. The only difference between model 4 and 5 is that
# # model 5 does not have a random R0 (intercept) which seems like it would be important in our
# # model since people start ART at many different starting CD4 counts and this has been shown to be
# # fairly important in determining what CD4 count people level off at
#
#
# ## Plot Model results - working
# modelpred.c1 <-as.numeric(predict(model2))
# modelpred.c2 <-labels(predict(model2))
# modelpred.c3 <-mydataa$diff
# modelpred <- data.frame(modelpred.c1,modelpred.c2, modelpred.c3)
#
# nam <- c("predictions","Ids","years")
# names(modelpred) <- nam
#
# # ====================================================== Functions
#
# # Defined functions
# med_CI <- function (x, ci = 0.90) {
# a <- median(x)
# s <- sd(x)
# n <- length(x)
# if (n == 1){
# s <- 0
# n <- 2
# }
# error <- qt(ci + (1 - ci)/2, df = n - 1) * s/sqrt(n)
# return(c(upper = a + error, med = a, lower = a - error))
# }
#
# myCI_u <- function(x){
# #print(length(x))
# # x <- x[!is.na(x)]
# bb <- med_CI(x, ci = 0.90)
# return(as.numeric(bb[1]))
# }
#
# myCI_l <- function(x){
# #print(length(x))
# # x <- x[!is.na(x)]
# bb <- med_CI(x, ci = 0.90)
# return(as.numeric(bb[3]))
# }
#
#
# ln.fxn <- function(xx, yy, pid, order.by.xx=T) {
# #browser()
# missing.data <- is.na(xx) | is.na(yy)
# xx <- xx[!missing.data]
# yy <- yy[!missing.data]
# if(order.by.xx) {
# ord <- order(xx)
# xx <- xx[ord]
# yy <- yy[ord]
# }
# #lines(xx, yy, col = pid[1])
# }
#
# # =================================================================================
#
# ## Figures 1 - Model Output
# pdf('Ch_Modelpredictions.pdf', w =10, h = 7)
# xs <- seq(0, 6, by = .1)
# xlim <- range(xs)
# par('ps' = 16, mfrow = c(1,1)) ## graphical parameters
# plot(0,0, type='n', xlab = 'Years since ART initiation', ylab = 'CD4 z-scores',xaxt='n', yaxt='n', bty = 'n',
# xlim = c(0,6000), ylim = c(-8,2), cex.main =0.9) # , main = "Suppressed viral load")
# axis(2, at = seq(-8,2, by = 2), las = 2) ## x axis
# axis(1, at = seq(0,5110, by = 730), labels = seq(0,14, by= 2) ) #,las = 2) ## x axis
# ## line for each pid
# print("Start ddply")
# #test <- ddply(modelpred, .(Ids), with, ln.fxn(years, predictions, Ids, order.by.xx=T))
# test <- modelpred
# test <- test[order(test$years),] # Order with resepct to time since HAART initiation
# dat2 <- data.matrix(test) # Transform dataframe into a Matrix
# length(test$lab_v) == length(dat2[,3]) # (diff variable)
#
# # Transform matrix into longitudinal object's class
# print("Build longitudinal object")
# dat <- as.longitudinal(dat2 , repeats = as.numeric(table(as.numeric(dat2[,3]))), unique(as.numeric(dat2[,3])))
# #is.longitudinal(dat)
#
# # Calculate the medians
# med <- condense.longitudinal(dat, 1, median)
# confIntu <- condense.longitudinal(dat, 1, myCI_u) #CI(dat[,33],ci = 0.95)
# confIntl <- condense.longitudinal(dat, 1, myCI_l) #CI(dat[,33],ci = 0.95)
# tim <- get.time.repeats(dat)
# # sp <- smooth.spline(tim$time, med, spar=0.35)
# # lines(sp, col = col.vec[ii])
# lines(tim$time[180:length(tim$time)], rollmean(med, 180))#, col = col.vec[ii])
# # if (ii == nn){
# # lines(tim$time[180:length(tim$time)], rollmean(confIntu,180), col = gray(0.7), lty = 2)
# # }
# # if (ii == 1){
# # lines(tim$time[180:length(tim$time)], rollmean(confIntl,180), col = gray(0.7), lty = 2)
# # }
# # }
# title("CD4 z-scores medians' trajectory" ) #, outer=FALSE)
# #legend("topright", levels(testchdata$cd4a.categ), col = 1:nn, lty = 1)
# dev.off()
#
#
# # AIC
# anova(model1)
# anova(model1,model2)
# anova(model33,model3) # Not comparable
# qqnorm(model1)
# qqnorm(model2)
# qqnorm(model3)
# qqnorm(model33)
# # ============================================================================
#
|
Height = c(170,169,182,175,173)
Weight = c(66,59,72,78,90)
# choose color
# 1:black 2:red 3:green 4:blue 5:pink
plot(Height,Weight,col=4) | /03Plotting/07Parameter__col.R | no_license | MomusChao/R | R | false | false | 147 | r | Height = c(170,169,182,175,173)
Weight = c(66,59,72,78,90)
# choose color
# 1:black 2:red 3:green 4:blue 5:pink
plot(Height,Weight,col=4) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/texreg.R
\name{knitreg}
\alias{knitreg}
\title{Flexibly choose the right table output format for use with \pkg{knitr}}
\usage{
knitreg(...)
}
\arguments{
\item{...}{Arguments to be handed over to the \link{texreg}, \link{htmlreg},
\link{screenreg}, or \link{matrixreg} function. See the respective help
page for details.}
}
\value{
A table as a \code{character} string in the respective output format.
}
\description{
Flexibly choose the right table output format for use with \pkg{knitr}.
}
\details{
This function automatically selects the right function (\link{texreg},
\link{screenreg}, \link{htmlreg}, or \link{matrixreg}) with the right set of
arguments for use with the \pkg{knitr} package, for example in RStudio. The
advantage of using this function with \pkg{knitr} is that the user does not
need to replace the \link{texreg}, \link{htmlreg} etc. function call in the
document when a different output format is selected.
\link{knitreg} works with...
\itemize{
\item \R HTML documents (\code{.Rhtml} extension)
\item \R Sweave documents (\code{.Rnw} extension) for PDF output via LaTeX,
rendered using...
\itemize{
\item the \pkg{knitr} package
\item the \pkg{Sweave} package
}
\item \R Markdown documents (\code{.Rmd} extension), rendered as...
\itemize{
\item HTML documents
\item PDF documents
\item Word documents
\item Powerpoint presentations
\item Presentations (\code{.Rpres} extension, not \code{.Rmd})
}
\item \R Notebooks, including preview
}
If Markdown and HTML rendering are selected, \link{htmlreg} arguments
\code{doctype = FALSE} and \code{star.symbol = "\\*"} are set to enable
compatibility with Markdown. With \R HTML documents (but not Markdown) or
presentations (\code{.Rpres} extension), only \code{doctype = FALSE} is set.
For PDF/LaTeX documents, the \link{texreg} argument
\code{use.packages = FALSE} is set to suppress any package loading
instructions in the preamble. The user must load any packages manually in the
preamble of the document.
The \pkg{knitr} and \pkg{rmarkdown} packages must be installed for this
function to work.
}
\examples{
require("nlme")
model.1 <- lme(distance ~ age, data = Orthodont, random = ~ 1)
model.2 <- lme(distance ~ age + Sex, data = Orthodont, random = ~ 1)
knitreg(list(model.1, model.2), center = FALSE, caption = "", table = FALSE)
}
\seealso{
\code{\link{texreg-package}} \code{\link{extract}}
Other texreg:
\code{\link{htmlreg}()},
\code{\link{huxtablereg}()},
\code{\link{matrixreg}()},
\code{\link{plotreg}()},
\code{\link{screenreg}()},
\code{\link{texreg}},
\code{\link{wordreg}()}
}
\author{
Philip Leifeld, with input from David Hugh-Jones
}
\concept{texreg}
| /man/knitreg.Rd | no_license | fmerhout/texreg | R | false | true | 2,797 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/texreg.R
\name{knitreg}
\alias{knitreg}
\title{Flexibly choose the right table output format for use with \pkg{knitr}}
\usage{
knitreg(...)
}
\arguments{
\item{...}{Arguments to be handed over to the \link{texreg}, \link{htmlreg},
\link{screenreg}, or \link{matrixreg} function. See the respective help
page for details.}
}
\value{
A table as a \code{character} string in the respective output format.
}
\description{
Flexibly choose the right table output format for use with \pkg{knitr}.
}
\details{
This function automatically selects the right function (\link{texreg},
\link{screenreg}, \link{htmlreg}, or \link{matrixreg}) with the right set of
arguments for use with the \pkg{knitr} package, for example in RStudio. The
advantage of using this function with \pkg{knitr} is that the user does not
need to replace the \link{texreg}, \link{htmlreg} etc. function call in the
document when a different output format is selected.
\link{knitreg} works with...
\itemize{
\item \R HTML documents (\code{.Rhtml} extension)
\item \R Sweave documents (\code{.Rnw} extension) for PDF output via LaTeX,
rendered using...
\itemize{
\item the \pkg{knitr} package
\item the \pkg{Sweave} package
}
\item \R Markdown documents (\code{.Rmd} extension), rendered as...
\itemize{
\item HTML documents
\item PDF documents
\item Word documents
\item Powerpoint presentations
\item Presentations (\code{.Rpres} extension, not \code{.Rmd})
}
\item \R Notebooks, including preview
}
If Markdown and HTML rendering are selected, \link{htmlreg} arguments
\code{doctype = FALSE} and \code{star.symbol = "\\*"} are set to enable
compatibility with Markdown. With \R HTML documents (but not Markdown) or
presentations (\code{.Rpres} extension), only \code{doctype = FALSE} is set.
For PDF/LaTeX documents, the \link{texreg} argument
\code{use.packages = FALSE} is set to suppress any package loading
instructions in the preamble. The user must load any packages manually in the
preamble of the document.
The \pkg{knitr} and \pkg{rmarkdown} packages must be installed for this
function to work.
}
\examples{
require("nlme")
model.1 <- lme(distance ~ age, data = Orthodont, random = ~ 1)
model.2 <- lme(distance ~ age + Sex, data = Orthodont, random = ~ 1)
knitreg(list(model.1, model.2), center = FALSE, caption = "", table = FALSE)
}
\seealso{
\code{\link{texreg-package}} \code{\link{extract}}
Other texreg:
\code{\link{htmlreg}()},
\code{\link{huxtablereg}()},
\code{\link{matrixreg}()},
\code{\link{plotreg}()},
\code{\link{screenreg}()},
\code{\link{texreg}},
\code{\link{wordreg}()}
}
\author{
Philip Leifeld, with input from David Hugh-Jones
}
\concept{texreg}
|
#! /usr/bin/env Rscript
#
#-------------------------------------------------------------------#
# Run the entire pipeline
# ! Transform metabolome data into a computable format !
# Store in MongoDB
#-------------------------------------------------------------------#
.author = "Aaron Brooks"
.copyright = "Copyright 2015"
.credits = "Aaron Brooks"
.license = "GPL"
.version = "0.0.1"
.maintainer = "Aaron Brooks"
.email = "aaron.brooks@embl.de"
.status = "Development"
library(devtools)
# Definitions ---------------------------------------------------
# Data
DATADIR = "/g/steinmetz/project/GenPhen/data/"
# MongoDB
HOST = "127.1.1.1:27017"
DBNAME = "genphen"
# Load components ---------------------------------------------------
source_url("https://raw.githubusercontent.com/scalefreegan/steinmetz-lab/master/genphen/metabolome/pipeline/readXLS.R")
source_url("https://raw.githubusercontent.com/scalefreegan/steinmetz-lab/master/genphen/metabolome/pipeline/insertMongoDB.R")
# Load and process data from xls ---------------------------------------------------
f1 = paste(DATADIR, "endometabolome/data/Endometabolome_1B_46B_sorted by cultivation phase.xlsx", sep="")
f2 = paste(DATADIR, "endometabolome/data/Endometabolome_46B_sorted by cultivation time.xlsx", sep="")
thisdata = processData(f1, f2)
mongo = mongoConnect(HOST, DBNAME)
| /genphen/metabolome/pipeline/main.R | no_license | scalefreegan/steinmetz-lab | R | false | false | 1,348 | r | #! /usr/bin/env Rscript
#
#-------------------------------------------------------------------#
# Run the entire pipeline
# ! Transform metabolome data into a computable format !
# Store in MongoDB
#-------------------------------------------------------------------#
.author = "Aaron Brooks"
.copyright = "Copyright 2015"
.credits = "Aaron Brooks"
.license = "GPL"
.version = "0.0.1"
.maintainer = "Aaron Brooks"
.email = "aaron.brooks@embl.de"
.status = "Development"
library(devtools)
# Definitions ---------------------------------------------------
# Data
DATADIR = "/g/steinmetz/project/GenPhen/data/"
# MongoDB
HOST = "127.1.1.1:27017"
DBNAME = "genphen"
# Load components ---------------------------------------------------
source_url("https://raw.githubusercontent.com/scalefreegan/steinmetz-lab/master/genphen/metabolome/pipeline/readXLS.R")
source_url("https://raw.githubusercontent.com/scalefreegan/steinmetz-lab/master/genphen/metabolome/pipeline/insertMongoDB.R")
# Load and process data from xls ---------------------------------------------------
f1 = paste(DATADIR, "endometabolome/data/Endometabolome_1B_46B_sorted by cultivation phase.xlsx", sep="")
f2 = paste(DATADIR, "endometabolome/data/Endometabolome_46B_sorted by cultivation time.xlsx", sep="")
thisdata = processData(f1, f2)
mongo = mongoConnect(HOST, DBNAME)
|
## Loading Data from 2/1/2007 to 2/2/2007
filePower <- "C:/Users/Johnny/Desktop/course/1-R Programming/R/data/household_power_consumption.txt"
data <- read.table(filePower, header = TRUE,sep=";", stringsAsFactors=FALSE, dec=".")
data1 <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
## Plot4
#str(subSetData)
DateTime <- strptime(paste(data1$Date, data1$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(data1$Global_active_power)
globalReactivePower <- as.numeric(data1$Global_reactive_power)
voltage <- as.numeric(data1$Voltage)
Sub_metering_1 <- as.numeric(data1$Sub_metering_1)
Sub_metering_2 <- as.numeric(data1$Sub_metering_2)
Sub_metering_3 <- as.numeric(data1$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(DateTime, globalActivePower,
type="l", xlab="", ylab="Global Active Power")
plot(DateTime, voltage, type="l",
xlab="DateTime", ylab="Voltage")
plot(DateTime, Sub_metering_1, type="l",
xlab="", ylab="Energy Submetering")
lines(DateTime, Sub_metering_2,
type="l",
col="red")
lines(DateTime,
Sub_metering_3,
type="l",
col="blue")
legend("topright",
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=, lwd=2.5,
col=c("black", "red", "blue"),
bty="o")
plot(DateTime, globalReactivePower, type="l",
xlab="DateTime", ylab="Global_reactive_power")
dev.off()
| /plot4.R | no_license | macauclay/ExData_Plotting1 | R | false | false | 1,444 | r | ## Loading Data from 2/1/2007 to 2/2/2007
filePower <- "C:/Users/Johnny/Desktop/course/1-R Programming/R/data/household_power_consumption.txt"
data <- read.table(filePower, header = TRUE,sep=";", stringsAsFactors=FALSE, dec=".")
data1 <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
## Plot4
#str(subSetData)
DateTime <- strptime(paste(data1$Date, data1$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(data1$Global_active_power)
globalReactivePower <- as.numeric(data1$Global_reactive_power)
voltage <- as.numeric(data1$Voltage)
Sub_metering_1 <- as.numeric(data1$Sub_metering_1)
Sub_metering_2 <- as.numeric(data1$Sub_metering_2)
Sub_metering_3 <- as.numeric(data1$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(DateTime, globalActivePower,
type="l", xlab="", ylab="Global Active Power")
plot(DateTime, voltage, type="l",
xlab="DateTime", ylab="Voltage")
plot(DateTime, Sub_metering_1, type="l",
xlab="", ylab="Energy Submetering")
lines(DateTime, Sub_metering_2,
type="l",
col="red")
lines(DateTime,
Sub_metering_3,
type="l",
col="blue")
legend("topright",
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=, lwd=2.5,
col=c("black", "red", "blue"),
bty="o")
plot(DateTime, globalReactivePower, type="l",
xlab="DateTime", ylab="Global_reactive_power")
dev.off()
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024047131L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) | /IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609873678-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 729 | r | testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024047131L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result) |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## The following function makecachematrix creates a special matrix that can caches its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<-solve
getinverse <- function() m
list (set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special matrix returned by makeCacheMatrix, if the inverse has already been calculated, then cachesolve will
## retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) { message("Getting cached data")
return(m)}
data <- x$get()
m <- solve(data,...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | Abdulrahman-Saleh/ProgrammingAssignment2 | R | false | false | 1,024 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## The following function makecachematrix creates a special matrix that can caches its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<-solve
getinverse <- function() m
list (set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special matrix returned by makeCacheMatrix, if the inverse has already been calculated, then cachesolve will
## retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) { message("Getting cached data")
return(m)}
data <- x$get()
m <- solve(data,...)
x$setinverse(m)
m
}
|
rm(list=ls())
setwd("/srv/ccrc/data34/z3478332/CMIP-WRF-ECLs/")
library(ggplot2)
library(reshape2)
library(abind)
library(RNetCDF)
wrf=c("R1","R2","R3")
namelist=rep("aaa",12)
events<-fixes<-list()
n=1
for(proj in c(100,240))
for(r in 1:3)
{
if(proj==100) cv=1 else cv=1.35
events[[n]]=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_WRF",wrf[r],"_50_rad2cv1/ECLevents_umelb_ncep_wrf",wrf[r],"_proj",proj,"_rad2cv",cv,"_9009.csv",sep=""))
events[[n]]$Year=floor(events[[n]]$Date1/10000)
events[[n]]$Month=floor(events[[n]]$Date1/100)%%100
fixes[[n]]=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_WRF",wrf[r],"_50_rad2cv1/ECLfixes_umelb_ncep_wrf",wrf[r],"_proj",proj,"_rad2cv",cv,"_9009.csv",sep=""))
fixes[[n]]$Year=floor(fixes[[n]]$Date/10000)
fixes[[n]]$Month=floor(fixes[[n]]$Date/100)%%100
fixes[[n]]$Date2=as.POSIXct(paste(as.character(fixes[[n]]$Date),substr(fixes[[n]]$Time,1,2),sep=""),format="%Y%m%d%H",tz="GMT")
n=n+1
}
proj=100
eventsN=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_rad2cv1/ECLevents_umelb_ncep_proj",proj,"_rad2cv1_9009.csv",sep=""))
eventsN$Year=floor(eventsN$Date1/10000)
eventsN$Month=floor(eventsN$Date1/100)%%100
fixesN=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_rad2cv1/ECLfixes_umelb_ncep_proj",proj,"_rad2cv1_9009.csv",sep=""))
fixesN$Year=floor(fixesN$Date/10000)
fixesN$Month=floor(fixesN$Date/100)%%100
fixesN$Date2=as.POSIXct(paste(as.character(fixesN$Date),substr(fixesN$Time,1,2),sep=""),format="%Y%m%d%H",tz="GMT")
eventsE=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_erai_150_topo_rad2_proj100/ECLevents_umelb_erai_150_topo_rad2_proj100.csv",stringsAsFactors = F)
eventsE$Year=floor(eventsE$Date1/10000)
eventsE$Month=floor(eventsE$Date1/100)%%100
eventsE=eventsE[eventsE$Year>=1990 & eventsE$Year<=2009,]
fixesE=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_erai_150_topo_rad2_proj100/ECLfixes_umelb_erai_150_topo_rad2_proj100.csv",stringsAsFactors = F)
fixesE$Year=floor(fixesE$Date/10000)
fixesE$Month=floor(fixesE$Date/100)%%100
fixesE$Date2=as.POSIXct(paste(as.character(fixesE$Date),substr(fixesE$Time,1,2),sep=""),format="%Y%m%d%H",tz="GMT")
###### Step 1 - Count by year/month
years=cbind(1990:2009,matrix(0,20,8))
colnames(years)<-c("Year","NCEP p100","ERAI p100","R1 p100","R2 p100","R3 p100","R1 p240","R2 p240","R3 p240")
months=cbind(1:12,matrix(0,12,8))
colnames(months)<-c("Month","NCEP p100","ERAI p100","R1 p100","R2 p100","R3 p100","R1 p240","R2 p240","R3 p240")
for(i in 1:20)
{
I=which(eventsN$Year==years[i,1])
years[i,2]=length(I)
I=which(eventsE$Year==years[i,1])
years[i,3]=length(I)
for(j in 1:6)
{
I=which(events[[j]]$Year==years[i,1])
years[i,j+3]=length(I)
}
}
apply(years,2,mean)
for(i in 1:6) print(cor(years[,2],years[,i+2]))
for(i in 1:12)
{
I=which(eventsN$Month==months[i,1])
months[i,2]=length(I)
I=which(eventsE$Month==months[i,1])
months[i,3]=length(I)
for(j in 1:6)
{
I=which(events[[j]]$Month==months[i,1])
months[i,j+3]=length(I)
}
}
months2=months
for(i in 1:12) months2[i,2:9]=100*months[i,2:9]/apply(months[,2:9],2,sum)
apply(months2[5:10,],2,sum)
plot(months2[,1],months2[,2],col=1,lwd=3,type="l",xlab="Month",ylab="% of ECLs",ylim=c(0,max(months2)))
for(i in 2:8) lines(months2[,1],months2[,i+1],col=i,lwd=3)
##########
yearsA=years
monthsA=months
for(i in 4:6) events[[i]]$CV2=events[[i]]$CVmax
thresh=22
thresh2=rep(0,6)
for(i in 1:6)
{
data=events[[i]]
b=order(data$CV2,decreasing=T)
thresh2[i]=data$CV2[b[20*thresh]]
if(is.na(thresh2[i])) thresh2[i]=min(data$CV2,na.rm=T)
for(y in 1:20) yearsA[y,i+3]=length(which(data$Year==years[y,1] & data$CV2>=thresh2[i]))
for(m in 1:12) monthsA[m,i+3]=length(which(data$Month==m & data$CV2>=thresh2[i]))
}
corrs=matrix(0,8,8)
for(i in 1:8)
for(j in 1:8)
corrs[i,j]=cor(yearsA[,i+1],yearsA[,j+1])
corrs[corrs==1]=NaN
months2=monthsA
for(i in 1:12) months2[i,2:9]=100*monthsA[i,2:9]/apply(monthsA[,2:9],2,sum)
apply(months2[5:10,],2,sum)
clist=c("black","grey","red","blue","purple")
plot(months2[,1],months2[,2],col=1,lwd=3,type="l",xlab="Month",ylab="% of ECLs",ylim=c(0,max(months2)))
for(i in 2:5) lines(months2[,1],months2[,i+1],col=clist[i],lwd=3)
###################### ECL matching?
matches=matrix(0,length(eventsN$ID),7)
for(i in 1:length(eventsN$ID))
{
tmp=fixesN[(fixesN$ID==eventsN$ID[i] & fixesN$Location==1),]
rn=range(tmp$Date2)
I=which(fixesE$Date2<=rn[2]+(60*60*6) & fixesE$Date2>=rn[1]-(60*60*6) & fixesE$Location==1)
if(length(I)>0) matches[i,1]=1
for(j in 1:6)
{
J=which(events[[j]]$CV2>=thresh2[j])
I=which(fixes[[j]]$Date2<=rn[2]+(60*60*6) & fixes[[j]]$Date2>=rn[1]-(60*60*6) &
fixes[[j]]$Location==1 & fixes[[j]]$ID%in%events[[j]]$ID[J])
if(length(I)>0) matches[i,1+j]=1
}
}
apply(matches,2,mean)
### Same version, different projection
for(n in 1:3)
{
K=which(events[[n]]$CV2>=thresh2[n])
matches=rep(0,length(K))
for(i in 1:length(K))
{
tmp=fixes[[n]][(fixes[[n]]$ID==events[[n]]$ID[K[i]] & fixes[[n]]$Location==1),]
rn=range(tmp$Date2)
j=n+3
J=which(events[[j]]$CV2>=thresh2[j])
I=which(fixes[[j]]$Date2<=rn[2]+(60*60*6) & fixes[[j]]$Date2>=rn[1]-(60*60*6) &
fixes[[j]]$Location==1 & fixes[[j]]$ID%in%events[[j]]$ID[J])
if(length(I)>0) matches[i]=1
}
print(mean(matches))
}
########## Locations
lat=seq(-40,-24,2)
lon=seq(148,160,2)
loc=array(0,c(9,7,8))
for(y in 1:length(lat))
for(x in 1:length(lon))
{
I=which(fixesN$Lon>=lon[x]-1 & fixesN$Lon<lon[x]+1 & fixesN$Lat>=lat[y]-1 & fixesN$Lat<lat[y]+1 & fixesN$Location==1)
loc[y,x,1]=length(I)
I=which(fixesE$Lon>=lon[x]-1 & fixesE$Lon<lon[x]+1 & fixesE$Lat>=lat[y]-1 & fixesE$Lat<lat[y]+1 & fixesE$Location==1)
loc[y,x,2]=length(I)
for(n in 1:6)
{
J=which(events[[n]]$CV2>=thresh2[n])
I=which(fixes[[n]]$Lon>=lon[x]-1 & fixes[[n]]$Lon<lon[x]+1 & fixes[[n]]$Lat>=lat[y]-1 & fixes[[n]]$Lat<lat[y]+1 &
fixes[[n]]$Location==1 & fixes[[n]]$ID%in%events[[n]]$ID[J] & fixes[[n]]$CV>=thresh2[n])
loc[y,x,n+2]=length(I)
}
}
loc=loc/20
names=c("NCEP_p100","ERAI_p100","NCEP-R1_p100","NCEP-R2_p100","NCEP-R3_p100","NCEP-R1_p240","NCEP-R2_p240","NCEP-R3_p240")
library(maps)
ColorBar <- function(brks,cols,labels=NA)
{
par(mar = c(3, 1, 3, 4), mgp = c(1, 1, 0), las = 1, cex = 1.2)
image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, col = cols,
xlab = '', ylab = '')
box()
if(is.na(labels[1])) labels=brks[seq(2, length(brks)-1)]
axis(4, at = seq(1.5, length(brks) - 1.5), tick = TRUE,
labels = labels)
}
cm2=gray(seq(1,0.1,-0.15))
bb2=c(-0.5,0,1,2,3,4,5,100)
for(i in 1:8)
{
pdf(file=paste("outputUM/ECL_locations_",names[i],"_top22_d01.pdf",sep=""),width=6,height=4.5,pointsize=12)
layout(cbind(1,2),c(1,0.3))
par(mar=c(3,3,3,2))
image(lon,lat,t(loc[,,i]),xlab="",ylab="",breaks=bb2,col=cm2,zlim=c(-Inf,Inf),xlim=c(142.5,162.5),ylim=c(-42.5,-22.5),main=names[i],cex.axis=1.5,cex.main=1.5)
map(xlim=c(142.5,162.5),ylim=c(-42.5,-22.5),add=T,lwd=2)
ColorBar(bb2,cm2)
dev.off()
}
########## Whole CORDEX domain
fixesALL<-list()
fixesALL[[1]]=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_rad2cv1/ECLfixes_umelb_ncep_rad2cv1_9009_ALL.csv",sep=""))
fixesALL[[2]]=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_erai_150_topo_rad2_proj100/ECLfixes_umelb_erai_rad2cv1_9009_ALL.csv",stringsAsFactors = F)
n=3
for(proj in c(100,240))
for(r in 1:3)
{
tmp=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_WRF",wrf[r],"_50_rad2cv1/ECLfixes_umelb_ncep_wrf",wrf[r],"_rad2cv1_9009_ALL.csv",sep=""))
fixesALL[[n]]=tmp[tmp$CV>=thresh2[n-2],]
n=n+1
}
lat=seq(-50,0,2.5)
lon=seq(100,180,2.5)
loc2=array(0,c(21,33,8))
for(y in 1:length(lat))
for(x in 1:length(lon))
for(n in 1:8)
{
I=which(fixesALL[[n]]$Lon>=lon[x]-1.25 & fixesALL[[n]]$Lon<lon[x]+1.25 & fixesALL[[n]]$Lat>=lat[y]-1.25 & fixesALL[[n]]$Lat<lat[y]+1.25)
loc2[y,x,n]=length(I)
}
loc2=loc2/20
names=c("NCEP_p100","ERAI_p100","NCEP-R1_p100","NCEP-R2_p100","NCEP-R3_p100","NCEP-R1_p240","NCEP-R2_p240","NCEP-R3_p240")
for(i in 1:8)
{
pdf(file=paste("outputUM/ECL_locations_CORDEX_",names[i],"_top22_d01_v2.pdf",sep=""),width=8,height=5,pointsize=12)
layout(cbind(1,2),c(1,0.2))
par(mar=c(3,3,3,2))
image(lon,lat,t(loc2[,,i]),xlab="",ylab="",breaks=bb2,col=cm2,zlim=c(-Inf,Inf),main=names[i],cex.axis=1.5,cex.main=1.5,
xlim=c(110,172.5),ylim=c(-45,-10))
map(add=T,lwd=2)
ColorBar(bb2,cm2)
dev.off()
}
pdf(file=paste("outputUM/ECL_locations_CORDEX_NCEP-WRF_proj100_top22_d01_v2.pdf",sep=""),width=8,height=5,pointsize=12)
layout(cbind(1,2),c(1,0.2))
par(mar=c(3,3,3,2))
image(lon,lat,t(apply(loc2[,,3:5],c(1,2),mean)),xlab="",ylab="",breaks=bb2,
col=cm2,zlim=c(-Inf,Inf),main="NCEP-WRF",cex.axis=1.5,cex.main=1.5,
xlim=c(110,172.5),ylim=c(-45,-10))
map(add=T,lwd=2)
ColorBar(bb2,cm2)
dev.off()
####### Compare to merra
eventsM=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_merraN_rad2cv1_proj100_diff2/ECLevents_umelb_merra_rad2cv1_9009.csv")
eventsM=eventsM[eventsM$Date1>=19900000 & eventsM$Date2<=20100000,]
fixesM=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_merraN_rad2cv1_proj100_diff2/ECLfixes_umelb_merra_rad2cv1_9009_ALL.csv",stringsAsFactors = F)
fixesM=fixesM[fixesM$Date>=19900000 & fixesM$Date<=20100000,]
locM=array(0,c(21,33))
for(y in 1:length(lat))
for(x in 1:length(lon))
{
I=which(fixesM$Lon>=lon[x]-1.25 & fixesM$Lon<lon[x]+1.25 & fixesM$Lat>=lat[y]-1.25 & fixesM$Lat<lat[y]+1.25)
locM[y,x]=length(I)
}
locM=locM/20
pdf(file=paste("outputUM/ECL_locations_CORDEX_MERRA_proj100_d01_v2.pdf",sep=""),width=8,height=5,pointsize=12)
layout(cbind(1,2),c(1,0.2))
par(mar=c(3,3,3,2))
image(lon,lat,t(locM),xlab="",ylab="",breaks=bb2,
col=cm2,zlim=c(-Inf,Inf),main="MERRA",cex.axis=1.5,cex.main=1.5,
xlim=c(110,172.5),ylim=c(-45,-10))
map(add=T,lwd=2)
ColorBar(bb2,cm2)
dev.off()
###############
##########
## Histograms of ECL intensity, duration, etc.
fixesM=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_merraN_rad2cv1_proj100_diff2/ECLfixes_umelb_merra_rad2cv1_9009.csv",stringsAsFactors = F)
fixesM=fixesM[fixesM$Date>=19900000 & fixesM$Date<=20100000,]
cvthresh=c(seq(1,4,0.25),Inf)
cvhist=matrix(0,13,6)
rownames(cvhist)=cvthresh[1:13]
colnames(cvhist)=c("NCEP","ERAI","MERRA","R1","R2","R3")
for(i in 1:length(cvhist[,1]))
{
cvhist[i,1]=length(which(eventsN$CV2>=cvthresh[i] & eventsN$CV2<cvthresh[i+1]))
cvhist[i,2]=length(which(eventsE$CV2>=cvthresh[i] & eventsE$CV2<cvthresh[i+1]))
cvhist[i,3]=length(which(eventsM$CV2>=cvthresh[i] & eventsM$CV2<cvthresh[i+1]))
for(n in 1:3) cvhist[i,n+3]=length(which(events[[n]]$CV2>=cvthresh[i] & events[[n]]$CV2<cvthresh[i+1]))
}
clist=c("black","grey","blue","red","darkgreen","purple")
plot(cvthresh[1:13],cvhist[,1],lwd=2,type="l",
xlab="Intensity",ylab="Number of events",ylim=c(0,250))
for(i in 2:6) lines(cvthresh[1:13],cvhist[,i],lwd=2,col=clist[i])
lenthresh=c(1,5,9,13,17,21,Inf)
lenhist=matrix(0,6,6)
rownames(lenhist)=lenthresh[1:6]
colnames(lenhist)=c("NCEP","ERAI","MERRA","R1","R2","R3")
for(i in 1:length(lenhist[,1]))
{
lenhist[i,1]=length(which(eventsN$Length2>=lenthresh[i] & eventsN$Length2<lenthresh[i+1]))
lenhist[i,2]=length(which(eventsE$Length2>=lenthresh[i] & eventsE$Length2<lenthresh[i+1]))
lenhist[i,3]=length(which(eventsM$Length2>=lenthresh[i] & eventsM$Length2<lenthresh[i+1]))
for(n in 1:3) lenhist[i,n+3]=length(which(events[[n]]$Length2>=lenthresh[i] & events[[n]]$Length2<lenthresh[i+1] & events[[n]]$CV2>=thresh2[n]))
}
###### That's odd - is it because these events move slower on average?
fixesN$Movement=NaN
library(sp)
for(i in 2:length(fixesN[,1]))
if(fixesN$ID[i]==fixesN$ID[i-1])
fixesN$Movement[i]=spDistsN1(as.matrix(cbind(fixesN$Lon[i],fixesN$Lat[i])),c(fixesN$Lon[i-1],fixesN$Lat[i-1]),longlat=T)
eventsN$Move2<-eventsN$Move<-rep(NaN,length(eventsN$ID))
for(i in 1:length(eventsN$ID))
{
I=which(fixesN$ID==eventsN$ID[i])
eventsN$Move[i]=mean(fixesN$Movement[I],na.rm=T)
I=which(fixesN$ID==eventsN$ID[i] & fixesN$Location==1)
eventsN$Move2[i]=mean(fixesN$Movement[I],na.rm=T)
}
fixesE$Movement=NaN
for(i in 2:length(fixesE[,1]))
if(fixesE$ID[i]==fixesE$ID[i-1])
fixesE$Movement[i]=spDistsN1(as.matrix(cbind(fixesE$Lon[i],fixesE$Lat[i])),c(fixesE$Lon[i-1],fixesE$Lat[i-1]),longlat=T)
eventsE$Move2<-eventsE$Move<-rep(NaN,length(eventsE$ID))
for(i in 1:length(eventsE$ID))
{
I=which(fixesE$ID==eventsE$ID[i])
eventsE$Move[i]=mean(fixesE$Movement[I],na.rm=T)
I=which(fixesE$ID==eventsE$ID[i] & fixesE$Location==1)
eventsE$Move2[i]=mean(fixesE$Movement[I],na.rm=T)
}
for(n in 1:6)
{
fixes[[n]]$Movement=NaN
for(i in 2:length(fixes[[n]][,1]))
if(fixes[[n]]$ID[i]==fixes[[n]]$ID[i-1])
fixes[[n]]$Movement[i]=spDistsN1(as.matrix(cbind(fixes[[n]]$Lon[i],fixes[[n]]$Lat[i])),c(fixes[[n]]$Lon[i-1],fixes[[n]]$Lat[i-1]),longlat=T)
events[[n]]$Move2<-events[[n]]$Move<-rep(NaN,length(events[[n]]$ID))
for(i in 1:length(events[[n]]$ID))
{
I=which(fixes[[n]]$ID==events[[n]]$ID[i])
events[[n]]$Move[i]=mean(fixes[[n]]$Movement[I],na.rm=T)
I=which(fixes[[n]]$ID==events[[n]]$ID[i] & fixes[[n]]$Location==1)
events[[n]]$Move2[i]=mean(fixes[[n]]$Movement[I],na.rm=T)
}
}
mthresh=c(seq(0,1200,60),Inf)
mhist=matrix(0,21,5)
rownames(mhist)=mthresh[1:21]
colnames(mhist)=c("NCEP","ERAI","R1","R2","R3")
for(i in 1:length(mhist[,1]))
{
mhist[i,1]=length(which(eventsN$Move2>=mthresh[i] & eventsN$Move2<mthresh[i+1]))
mhist[i,2]=length(which(eventsE$Move2>=mthresh[i] & eventsE$Move2<mthresh[i+1]))
for(n in 1:3) mhist[i,n+2]=length(which(events[[n]]$Move2>=mthresh[i] & events[[n]]$Move2<mthresh[i+1] & events[[n]]$CV2>=thresh2[n]))
}
clist=c("black","grey","red","darkgreen","purple")
plot(mthresh[1:21],mhist[,1],lwd=2,type="l",
xlab="Intensity",ylab="Number of events",ylim=c(0,250))
for(i in 2:5) lines(mthresh[1:21],mhist[,i],lwd=2,col=clist[i])
| /NCEP-WRF_skill.R | no_license | apepler/Code-R | R | false | false | 14,181 | r | rm(list=ls())
setwd("/srv/ccrc/data34/z3478332/CMIP-WRF-ECLs/")
library(ggplot2)
library(reshape2)
library(abind)
library(RNetCDF)
wrf=c("R1","R2","R3")
namelist=rep("aaa",12)
events<-fixes<-list()
n=1
for(proj in c(100,240))
for(r in 1:3)
{
if(proj==100) cv=1 else cv=1.35
events[[n]]=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_WRF",wrf[r],"_50_rad2cv1/ECLevents_umelb_ncep_wrf",wrf[r],"_proj",proj,"_rad2cv",cv,"_9009.csv",sep=""))
events[[n]]$Year=floor(events[[n]]$Date1/10000)
events[[n]]$Month=floor(events[[n]]$Date1/100)%%100
fixes[[n]]=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_WRF",wrf[r],"_50_rad2cv1/ECLfixes_umelb_ncep_wrf",wrf[r],"_proj",proj,"_rad2cv",cv,"_9009.csv",sep=""))
fixes[[n]]$Year=floor(fixes[[n]]$Date/10000)
fixes[[n]]$Month=floor(fixes[[n]]$Date/100)%%100
fixes[[n]]$Date2=as.POSIXct(paste(as.character(fixes[[n]]$Date),substr(fixes[[n]]$Time,1,2),sep=""),format="%Y%m%d%H",tz="GMT")
n=n+1
}
proj=100
eventsN=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_rad2cv1/ECLevents_umelb_ncep_proj",proj,"_rad2cv1_9009.csv",sep=""))
eventsN$Year=floor(eventsN$Date1/10000)
eventsN$Month=floor(eventsN$Date1/100)%%100
fixesN=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_rad2cv1/ECLfixes_umelb_ncep_proj",proj,"_rad2cv1_9009.csv",sep=""))
fixesN$Year=floor(fixesN$Date/10000)
fixesN$Month=floor(fixesN$Date/100)%%100
fixesN$Date2=as.POSIXct(paste(as.character(fixesN$Date),substr(fixesN$Time,1,2),sep=""),format="%Y%m%d%H",tz="GMT")
eventsE=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_erai_150_topo_rad2_proj100/ECLevents_umelb_erai_150_topo_rad2_proj100.csv",stringsAsFactors = F)
eventsE$Year=floor(eventsE$Date1/10000)
eventsE$Month=floor(eventsE$Date1/100)%%100
eventsE=eventsE[eventsE$Year>=1990 & eventsE$Year<=2009,]
fixesE=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_erai_150_topo_rad2_proj100/ECLfixes_umelb_erai_150_topo_rad2_proj100.csv",stringsAsFactors = F)
fixesE$Year=floor(fixesE$Date/10000)
fixesE$Month=floor(fixesE$Date/100)%%100
fixesE$Date2=as.POSIXct(paste(as.character(fixesE$Date),substr(fixesE$Time,1,2),sep=""),format="%Y%m%d%H",tz="GMT")
###### Step 1 - Count by year/month
years=cbind(1990:2009,matrix(0,20,8))
colnames(years)<-c("Year","NCEP p100","ERAI p100","R1 p100","R2 p100","R3 p100","R1 p240","R2 p240","R3 p240")
months=cbind(1:12,matrix(0,12,8))
colnames(months)<-c("Month","NCEP p100","ERAI p100","R1 p100","R2 p100","R3 p100","R1 p240","R2 p240","R3 p240")
for(i in 1:20)
{
I=which(eventsN$Year==years[i,1])
years[i,2]=length(I)
I=which(eventsE$Year==years[i,1])
years[i,3]=length(I)
for(j in 1:6)
{
I=which(events[[j]]$Year==years[i,1])
years[i,j+3]=length(I)
}
}
apply(years,2,mean)
for(i in 1:6) print(cor(years[,2],years[,i+2]))
for(i in 1:12)
{
I=which(eventsN$Month==months[i,1])
months[i,2]=length(I)
I=which(eventsE$Month==months[i,1])
months[i,3]=length(I)
for(j in 1:6)
{
I=which(events[[j]]$Month==months[i,1])
months[i,j+3]=length(I)
}
}
months2=months
for(i in 1:12) months2[i,2:9]=100*months[i,2:9]/apply(months[,2:9],2,sum)
apply(months2[5:10,],2,sum)
plot(months2[,1],months2[,2],col=1,lwd=3,type="l",xlab="Month",ylab="% of ECLs",ylim=c(0,max(months2)))
for(i in 2:8) lines(months2[,1],months2[,i+1],col=i,lwd=3)
##########
yearsA=years
monthsA=months
for(i in 4:6) events[[i]]$CV2=events[[i]]$CVmax
thresh=22
thresh2=rep(0,6)
for(i in 1:6)
{
data=events[[i]]
b=order(data$CV2,decreasing=T)
thresh2[i]=data$CV2[b[20*thresh]]
if(is.na(thresh2[i])) thresh2[i]=min(data$CV2,na.rm=T)
for(y in 1:20) yearsA[y,i+3]=length(which(data$Year==years[y,1] & data$CV2>=thresh2[i]))
for(m in 1:12) monthsA[m,i+3]=length(which(data$Month==m & data$CV2>=thresh2[i]))
}
corrs=matrix(0,8,8)
for(i in 1:8)
for(j in 1:8)
corrs[i,j]=cor(yearsA[,i+1],yearsA[,j+1])
corrs[corrs==1]=NaN
months2=monthsA
for(i in 1:12) months2[i,2:9]=100*monthsA[i,2:9]/apply(monthsA[,2:9],2,sum)
apply(months2[5:10,],2,sum)
clist=c("black","grey","red","blue","purple")
plot(months2[,1],months2[,2],col=1,lwd=3,type="l",xlab="Month",ylab="% of ECLs",ylim=c(0,max(months2)))
for(i in 2:5) lines(months2[,1],months2[,i+1],col=clist[i],lwd=3)
###################### ECL matching?
matches=matrix(0,length(eventsN$ID),7)
for(i in 1:length(eventsN$ID))
{
tmp=fixesN[(fixesN$ID==eventsN$ID[i] & fixesN$Location==1),]
rn=range(tmp$Date2)
I=which(fixesE$Date2<=rn[2]+(60*60*6) & fixesE$Date2>=rn[1]-(60*60*6) & fixesE$Location==1)
if(length(I)>0) matches[i,1]=1
for(j in 1:6)
{
J=which(events[[j]]$CV2>=thresh2[j])
I=which(fixes[[j]]$Date2<=rn[2]+(60*60*6) & fixes[[j]]$Date2>=rn[1]-(60*60*6) &
fixes[[j]]$Location==1 & fixes[[j]]$ID%in%events[[j]]$ID[J])
if(length(I)>0) matches[i,1+j]=1
}
}
apply(matches,2,mean)
### Same version, different projection
for(n in 1:3)
{
K=which(events[[n]]$CV2>=thresh2[n])
matches=rep(0,length(K))
for(i in 1:length(K))
{
tmp=fixes[[n]][(fixes[[n]]$ID==events[[n]]$ID[K[i]] & fixes[[n]]$Location==1),]
rn=range(tmp$Date2)
j=n+3
J=which(events[[j]]$CV2>=thresh2[j])
I=which(fixes[[j]]$Date2<=rn[2]+(60*60*6) & fixes[[j]]$Date2>=rn[1]-(60*60*6) &
fixes[[j]]$Location==1 & fixes[[j]]$ID%in%events[[j]]$ID[J])
if(length(I)>0) matches[i]=1
}
print(mean(matches))
}
########## Locations
lat=seq(-40,-24,2)
lon=seq(148,160,2)
loc=array(0,c(9,7,8))
for(y in 1:length(lat))
for(x in 1:length(lon))
{
I=which(fixesN$Lon>=lon[x]-1 & fixesN$Lon<lon[x]+1 & fixesN$Lat>=lat[y]-1 & fixesN$Lat<lat[y]+1 & fixesN$Location==1)
loc[y,x,1]=length(I)
I=which(fixesE$Lon>=lon[x]-1 & fixesE$Lon<lon[x]+1 & fixesE$Lat>=lat[y]-1 & fixesE$Lat<lat[y]+1 & fixesE$Location==1)
loc[y,x,2]=length(I)
for(n in 1:6)
{
J=which(events[[n]]$CV2>=thresh2[n])
I=which(fixes[[n]]$Lon>=lon[x]-1 & fixes[[n]]$Lon<lon[x]+1 & fixes[[n]]$Lat>=lat[y]-1 & fixes[[n]]$Lat<lat[y]+1 &
fixes[[n]]$Location==1 & fixes[[n]]$ID%in%events[[n]]$ID[J] & fixes[[n]]$CV>=thresh2[n])
loc[y,x,n+2]=length(I)
}
}
loc=loc/20
names=c("NCEP_p100","ERAI_p100","NCEP-R1_p100","NCEP-R2_p100","NCEP-R3_p100","NCEP-R1_p240","NCEP-R2_p240","NCEP-R3_p240")
library(maps)
ColorBar <- function(brks,cols,labels=NA)
{
par(mar = c(3, 1, 3, 4), mgp = c(1, 1, 0), las = 1, cex = 1.2)
image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, col = cols,
xlab = '', ylab = '')
box()
if(is.na(labels[1])) labels=brks[seq(2, length(brks)-1)]
axis(4, at = seq(1.5, length(brks) - 1.5), tick = TRUE,
labels = labels)
}
cm2=gray(seq(1,0.1,-0.15))
bb2=c(-0.5,0,1,2,3,4,5,100)
for(i in 1:8)
{
pdf(file=paste("outputUM/ECL_locations_",names[i],"_top22_d01.pdf",sep=""),width=6,height=4.5,pointsize=12)
layout(cbind(1,2),c(1,0.3))
par(mar=c(3,3,3,2))
image(lon,lat,t(loc[,,i]),xlab="",ylab="",breaks=bb2,col=cm2,zlim=c(-Inf,Inf),xlim=c(142.5,162.5),ylim=c(-42.5,-22.5),main=names[i],cex.axis=1.5,cex.main=1.5)
map(xlim=c(142.5,162.5),ylim=c(-42.5,-22.5),add=T,lwd=2)
ColorBar(bb2,cm2)
dev.off()
}
########## Whole CORDEX domain
fixesALL<-list()
fixesALL[[1]]=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_rad2cv1/ECLfixes_umelb_ncep_rad2cv1_9009_ALL.csv",sep=""))
fixesALL[[2]]=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_erai_150_topo_rad2_proj100/ECLfixes_umelb_erai_rad2cv1_9009_ALL.csv",stringsAsFactors = F)
n=3
for(proj in c(100,240))
for(r in 1:3)
{
tmp=read.csv(paste("outputUM/proj",proj,"/outputUM_ncep_WRF",wrf[r],"_50_rad2cv1/ECLfixes_umelb_ncep_wrf",wrf[r],"_rad2cv1_9009_ALL.csv",sep=""))
fixesALL[[n]]=tmp[tmp$CV>=thresh2[n-2],]
n=n+1
}
lat=seq(-50,0,2.5)
lon=seq(100,180,2.5)
loc2=array(0,c(21,33,8))
for(y in 1:length(lat))
for(x in 1:length(lon))
for(n in 1:8)
{
I=which(fixesALL[[n]]$Lon>=lon[x]-1.25 & fixesALL[[n]]$Lon<lon[x]+1.25 & fixesALL[[n]]$Lat>=lat[y]-1.25 & fixesALL[[n]]$Lat<lat[y]+1.25)
loc2[y,x,n]=length(I)
}
loc2=loc2/20
names=c("NCEP_p100","ERAI_p100","NCEP-R1_p100","NCEP-R2_p100","NCEP-R3_p100","NCEP-R1_p240","NCEP-R2_p240","NCEP-R3_p240")
for(i in 1:8)
{
pdf(file=paste("outputUM/ECL_locations_CORDEX_",names[i],"_top22_d01_v2.pdf",sep=""),width=8,height=5,pointsize=12)
layout(cbind(1,2),c(1,0.2))
par(mar=c(3,3,3,2))
image(lon,lat,t(loc2[,,i]),xlab="",ylab="",breaks=bb2,col=cm2,zlim=c(-Inf,Inf),main=names[i],cex.axis=1.5,cex.main=1.5,
xlim=c(110,172.5),ylim=c(-45,-10))
map(add=T,lwd=2)
ColorBar(bb2,cm2)
dev.off()
}
pdf(file=paste("outputUM/ECL_locations_CORDEX_NCEP-WRF_proj100_top22_d01_v2.pdf",sep=""),width=8,height=5,pointsize=12)
layout(cbind(1,2),c(1,0.2))
par(mar=c(3,3,3,2))
image(lon,lat,t(apply(loc2[,,3:5],c(1,2),mean)),xlab="",ylab="",breaks=bb2,
col=cm2,zlim=c(-Inf,Inf),main="NCEP-WRF",cex.axis=1.5,cex.main=1.5,
xlim=c(110,172.5),ylim=c(-45,-10))
map(add=T,lwd=2)
ColorBar(bb2,cm2)
dev.off()
####### Compare to merra
eventsM=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_merraN_rad2cv1_proj100_diff2/ECLevents_umelb_merra_rad2cv1_9009.csv")
eventsM=eventsM[eventsM$Date1>=19900000 & eventsM$Date2<=20100000,]
fixesM=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_merraN_rad2cv1_proj100_diff2/ECLfixes_umelb_merra_rad2cv1_9009_ALL.csv",stringsAsFactors = F)
fixesM=fixesM[fixesM$Date>=19900000 & fixesM$Date<=20100000,]
locM=array(0,c(21,33))
for(y in 1:length(lat))
for(x in 1:length(lon))
{
I=which(fixesM$Lon>=lon[x]-1.25 & fixesM$Lon<lon[x]+1.25 & fixesM$Lat>=lat[y]-1.25 & fixesM$Lat<lat[y]+1.25)
locM[y,x]=length(I)
}
locM=locM/20
pdf(file=paste("outputUM/ECL_locations_CORDEX_MERRA_proj100_d01_v2.pdf",sep=""),width=8,height=5,pointsize=12)
layout(cbind(1,2),c(1,0.2))
par(mar=c(3,3,3,2))
image(lon,lat,t(locM),xlab="",ylab="",breaks=bb2,
col=cm2,zlim=c(-Inf,Inf),main="MERRA",cex.axis=1.5,cex.main=1.5,
xlim=c(110,172.5),ylim=c(-45,-10))
map(add=T,lwd=2)
ColorBar(bb2,cm2)
dev.off()
###############
##########
## Histograms of ECL intensity, duration, etc.
fixesM=read.csv("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_merraN_rad2cv1_proj100_diff2/ECLfixes_umelb_merra_rad2cv1_9009.csv",stringsAsFactors = F)
fixesM=fixesM[fixesM$Date>=19900000 & fixesM$Date<=20100000,]
cvthresh=c(seq(1,4,0.25),Inf)
cvhist=matrix(0,13,6)
rownames(cvhist)=cvthresh[1:13]
colnames(cvhist)=c("NCEP","ERAI","MERRA","R1","R2","R3")
for(i in 1:length(cvhist[,1]))
{
cvhist[i,1]=length(which(eventsN$CV2>=cvthresh[i] & eventsN$CV2<cvthresh[i+1]))
cvhist[i,2]=length(which(eventsE$CV2>=cvthresh[i] & eventsE$CV2<cvthresh[i+1]))
cvhist[i,3]=length(which(eventsM$CV2>=cvthresh[i] & eventsM$CV2<cvthresh[i+1]))
for(n in 1:3) cvhist[i,n+3]=length(which(events[[n]]$CV2>=cvthresh[i] & events[[n]]$CV2<cvthresh[i+1]))
}
clist=c("black","grey","blue","red","darkgreen","purple")
plot(cvthresh[1:13],cvhist[,1],lwd=2,type="l",
xlab="Intensity",ylab="Number of events",ylim=c(0,250))
for(i in 2:6) lines(cvthresh[1:13],cvhist[,i],lwd=2,col=clist[i])
lenthresh=c(1,5,9,13,17,21,Inf)
lenhist=matrix(0,6,6)
rownames(lenhist)=lenthresh[1:6]
colnames(lenhist)=c("NCEP","ERAI","MERRA","R1","R2","R3")
for(i in 1:length(lenhist[,1]))
{
lenhist[i,1]=length(which(eventsN$Length2>=lenthresh[i] & eventsN$Length2<lenthresh[i+1]))
lenhist[i,2]=length(which(eventsE$Length2>=lenthresh[i] & eventsE$Length2<lenthresh[i+1]))
lenhist[i,3]=length(which(eventsM$Length2>=lenthresh[i] & eventsM$Length2<lenthresh[i+1]))
for(n in 1:3) lenhist[i,n+3]=length(which(events[[n]]$Length2>=lenthresh[i] & events[[n]]$Length2<lenthresh[i+1] & events[[n]]$CV2>=thresh2[n]))
}
###### That's odd - is it because these events move slower on average?
fixesN$Movement=NaN
library(sp)
for(i in 2:length(fixesN[,1]))
if(fixesN$ID[i]==fixesN$ID[i-1])
fixesN$Movement[i]=spDistsN1(as.matrix(cbind(fixesN$Lon[i],fixesN$Lat[i])),c(fixesN$Lon[i-1],fixesN$Lat[i-1]),longlat=T)
eventsN$Move2<-eventsN$Move<-rep(NaN,length(eventsN$ID))
for(i in 1:length(eventsN$ID))
{
I=which(fixesN$ID==eventsN$ID[i])
eventsN$Move[i]=mean(fixesN$Movement[I],na.rm=T)
I=which(fixesN$ID==eventsN$ID[i] & fixesN$Location==1)
eventsN$Move2[i]=mean(fixesN$Movement[I],na.rm=T)
}
fixesE$Movement=NaN
for(i in 2:length(fixesE[,1]))
if(fixesE$ID[i]==fixesE$ID[i-1])
fixesE$Movement[i]=spDistsN1(as.matrix(cbind(fixesE$Lon[i],fixesE$Lat[i])),c(fixesE$Lon[i-1],fixesE$Lat[i-1]),longlat=T)
eventsE$Move2<-eventsE$Move<-rep(NaN,length(eventsE$ID))
for(i in 1:length(eventsE$ID))
{
I=which(fixesE$ID==eventsE$ID[i])
eventsE$Move[i]=mean(fixesE$Movement[I],na.rm=T)
I=which(fixesE$ID==eventsE$ID[i] & fixesE$Location==1)
eventsE$Move2[i]=mean(fixesE$Movement[I],na.rm=T)
}
for(n in 1:6)
{
fixes[[n]]$Movement=NaN
for(i in 2:length(fixes[[n]][,1]))
if(fixes[[n]]$ID[i]==fixes[[n]]$ID[i-1])
fixes[[n]]$Movement[i]=spDistsN1(as.matrix(cbind(fixes[[n]]$Lon[i],fixes[[n]]$Lat[i])),c(fixes[[n]]$Lon[i-1],fixes[[n]]$Lat[i-1]),longlat=T)
events[[n]]$Move2<-events[[n]]$Move<-rep(NaN,length(events[[n]]$ID))
for(i in 1:length(events[[n]]$ID))
{
I=which(fixes[[n]]$ID==events[[n]]$ID[i])
events[[n]]$Move[i]=mean(fixes[[n]]$Movement[I],na.rm=T)
I=which(fixes[[n]]$ID==events[[n]]$ID[i] & fixes[[n]]$Location==1)
events[[n]]$Move2[i]=mean(fixes[[n]]$Movement[I],na.rm=T)
}
}
mthresh=c(seq(0,1200,60),Inf)
mhist=matrix(0,21,5)
rownames(mhist)=mthresh[1:21]
colnames(mhist)=c("NCEP","ERAI","R1","R2","R3")
for(i in 1:length(mhist[,1]))
{
mhist[i,1]=length(which(eventsN$Move2>=mthresh[i] & eventsN$Move2<mthresh[i+1]))
mhist[i,2]=length(which(eventsE$Move2>=mthresh[i] & eventsE$Move2<mthresh[i+1]))
for(n in 1:3) mhist[i,n+2]=length(which(events[[n]]$Move2>=mthresh[i] & events[[n]]$Move2<mthresh[i+1] & events[[n]]$CV2>=thresh2[n]))
}
clist=c("black","grey","red","darkgreen","purple")
plot(mthresh[1:21],mhist[,1],lwd=2,type="l",
xlab="Intensity",ylab="Number of events",ylim=c(0,250))
for(i in 2:5) lines(mthresh[1:21],mhist[,i],lwd=2,col=clist[i])
|
# Utilities for this package
EnsurePackage <- function(x) {
# EnsurePackage(x) - Installs and loads a package if necessary
# Args:
# x: name of package
x <- as.character(x)
if (!require(x, character.only=TRUE)) {
install.packages(pkgs=x, repos="http://cran.r-project.org")
require(x, character.only=TRUE)
}
}
TrimAt <- function(x) {
# remove @ from text
sub('@', '', x)
}
TrimHead <- function(x) {
# remove starting @, .@, RT @, MT @, etc.
sub('^(.*)?@', '', x)
}
TrimUsers <- function(x) {
# remove users, i.e. "@user", in a tweet
str_replace_all(x, '(@[[:alnum:]_]*)', '')
}
TrimHashtags <- function(x) {
# remove hashtags, i.e. "#tag", in a tweet
str_replace_all(x, '(#[[:alnum:]_]*)', '')
}
TrimUrls <- function(x) {
# remove urls in a tweet
str_replace_all(x, 'http[^[:blank:]]+', '')
}
TrimOddChar <- function(x) {
# remove odd charactors
iconv(x, to = 'UTF-8')
}
CosineSimilarity <- function(va, vb) {
# Computer cosine similarity between two numeric vectors of the same length
crossprod(va, vb) / sqrt(crossprod(va) * crossprod(vb))
}
| /twitteR/utilities.R | no_license | KartikKannapur/R-Statistical-Computing | R | false | false | 1,122 | r | # Utilities for this package
EnsurePackage <- function(x) {
# EnsurePackage(x) - Installs and loads a package if necessary
# Args:
# x: name of package
x <- as.character(x)
if (!require(x, character.only=TRUE)) {
install.packages(pkgs=x, repos="http://cran.r-project.org")
require(x, character.only=TRUE)
}
}
TrimAt <- function(x) {
# remove @ from text
sub('@', '', x)
}
TrimHead <- function(x) {
# remove starting @, .@, RT @, MT @, etc.
sub('^(.*)?@', '', x)
}
TrimUsers <- function(x) {
# remove users, i.e. "@user", in a tweet
str_replace_all(x, '(@[[:alnum:]_]*)', '')
}
TrimHashtags <- function(x) {
# remove hashtags, i.e. "#tag", in a tweet
str_replace_all(x, '(#[[:alnum:]_]*)', '')
}
TrimUrls <- function(x) {
# remove urls in a tweet
str_replace_all(x, 'http[^[:blank:]]+', '')
}
TrimOddChar <- function(x) {
# remove odd charactors
iconv(x, to = 'UTF-8')
}
CosineSimilarity <- function(va, vb) {
# Computer cosine similarity between two numeric vectors of the same length
crossprod(va, vb) / sqrt(crossprod(va) * crossprod(vb))
}
|
### predict.slm.R (2017-03-28)
###
### Prediction from linear model
###
### Copyright 2011-17 Korbinian Strimmer
###
###
### This file is part of the `sda' library for R and related languages.
### It is made available under the terms of the GNU General Public
### License, version 3, or at your option, any later version,
### incorporated herein by reference.
###
### This program is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
### PURPOSE. See the GNU General Public License for more
### details.
###
### You should have received a copy of the GNU General Public
### License along with this program; if not, write to the Free
### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
### MA 02111-1307, USA
predict.slm = function(object, Xtest, verbose=TRUE, ...)
{
if ( missing(object) ) {
stop("An slm object must be supplied.")
}
if ( missing(Xtest) ) {
stop("A test data set must be supplied.")
}
if (!is.matrix(Xtest)) stop("Test data must be given as matrix!")
ntest = nrow(Xtest) # number of test samples
nvtest = ncol(Xtest) # number of of variables in test data set
ncoeff = ncol(object$coefficients)-1 # number of coefficients
if (ncoeff != nvtest)
stop("Incompatible number of variables in test data set (", nvtest,
") and number of coefficients in slm object (", ncoeff, ")", sep="")
m = length(object$numpred)
yhat = matrix(0, nrow=ntest, ncol=m)
colnames(yhat) = names(object$numpred)
rownames(yhat) = rownames(Xtest)
predsd = matrix(0, nrow=1, ncol=m)
colnames(predsd) = names(object$numpred)
rownames(predsd) = NULL
for (i in 1:m)
{
if (verbose) cat("Prediction uses", object$numpred[i], "variables.\n")
b = matrix(object$coefficients[i, -1])
b0 = object$coefficients[i, 1]
yhat[,i] = b0 + Xtest %*% b
predsd[,i] = object$sd.resid[i]
}
attr(yhat, "sd") = predsd
return( yhat )
}
| /R/predict.slm.R | no_license | cran/care | R | false | false | 2,039 | r | ### predict.slm.R (2017-03-28)
###
### Prediction from linear model
###
### Copyright 2011-17 Korbinian Strimmer
###
###
### This file is part of the `sda' library for R and related languages.
### It is made available under the terms of the GNU General Public
### License, version 3, or at your option, any later version,
### incorporated herein by reference.
###
### This program is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied
### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
### PURPOSE. See the GNU General Public License for more
### details.
###
### You should have received a copy of the GNU General Public
### License along with this program; if not, write to the Free
### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
### MA 02111-1307, USA
predict.slm = function(object, Xtest, verbose=TRUE, ...)
{
if ( missing(object) ) {
stop("An slm object must be supplied.")
}
if ( missing(Xtest) ) {
stop("A test data set must be supplied.")
}
if (!is.matrix(Xtest)) stop("Test data must be given as matrix!")
ntest = nrow(Xtest) # number of test samples
nvtest = ncol(Xtest) # number of of variables in test data set
ncoeff = ncol(object$coefficients)-1 # number of coefficients
if (ncoeff != nvtest)
stop("Incompatible number of variables in test data set (", nvtest,
") and number of coefficients in slm object (", ncoeff, ")", sep="")
m = length(object$numpred)
yhat = matrix(0, nrow=ntest, ncol=m)
colnames(yhat) = names(object$numpred)
rownames(yhat) = rownames(Xtest)
predsd = matrix(0, nrow=1, ncol=m)
colnames(predsd) = names(object$numpred)
rownames(predsd) = NULL
for (i in 1:m)
{
if (verbose) cat("Prediction uses", object$numpred[i], "variables.\n")
b = matrix(object$coefficients[i, -1])
b0 = object$coefficients[i, 1]
yhat[,i] = b0 + Xtest %*% b
predsd[,i] = object$sd.resid[i]
}
attr(yhat, "sd") = predsd
return( yhat )
}
|
#######################################
# This R script called run_analysis.R does the following:
#
# 1.- Merges the training and the test sets to create one data set.
# 2.- Extracts only the measurements on the mean and standard deviation for each measurement.
# 3.- Uses descriptive activity names to name the activities in the data set
# 4.- Appropriately labels the data set with descriptive variable names.
# 5.- From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
#
# README.md for details.
#######################################
library(dplyr)
#######################################
# Download and unzip data
#######################################
fileName <- "finalProjectDataSet.zip"
# Checking if archieve already exists.
if (!file.exists(fileName)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, fileName, method="curl")
}
# Checking if folder exists
if (!file.exists("UCI HAR Dataset")) {
unzip(fileName)
}
dataPath <- "UCI HAR Dataset"
#######################################
# Read data
#######################################
# Training data
trainingSubjects <- read.table(file.path(dataPath, "train", "subject_train.txt"))
trainingValues <- read.table(file.path(dataPath, "train", "X_train.txt"))
trainingActivity <- read.table(file.path(dataPath, "train", "y_train.txt"))
bodyAccXTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_acc_x_train.txt"))
bodyAccYTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_acc_y_train.txt"))
bodyAccZTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_acc_z_train.txt"))
bodyGyroXTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_gyro_x_train.txt"))
bodyGyroYTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_gyro_y_train.txt"))
bodyGyroZTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_gyro_z_train.txt"))
totalAccXTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "total_acc_x_train.txt"))
totalAccYTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "total_acc_y_train.txt"))
totalAccZTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "total_acc_z_train.txt"))
# Test data
testSubjects <- read.table(file.path(dataPath, "test", "subject_test.txt"))
testValues <- read.table(file.path(dataPath, "test", "X_test.txt"))
testActivity <- read.table(file.path(dataPath, "test", "y_test.txt"))
bodyAccXTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_acc_x_test.txt"))
bodyAccYTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_acc_y_test.txt"))
bodyAccZTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_acc_z_test.txt"))
bodyGyroXTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_gyro_x_test.txt"))
bodyGyroYTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_gyro_y_test.txt"))
bodyGyroZTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_gyro_z_test.txt"))
totalAccXTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "total_acc_x_test.txt"))
totalAccYTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "total_acc_y_test.txt"))
totalAccZTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "total_acc_z_test.txt"))
# Activity labels
activity <- read.table(file.path(dataPath, "activity_labels.txt"))
colnames(activity) <- c("activityId", "activityLabel")
# Features
features <- read.table(file.path(dataPath, "features.txt"), as.is = TRUE)
#######################################
# Step 1: Merges the training and the test sets to create one data set.
#######################################
xDataSet <- rbind(trainingValues, testValues)
yDataSet <- rbind(trainingActivity, testActivity)
subjectDataSet <- rbind(trainingSubjects, testSubjects)
mergedDataSet <- cbind(subjectDataSet, yDataSet, xDataSet)
# Remove data tables to save memory
rm(trainingSubjects, trainingValues, trainingActivity, bodyAccXTrainData, bodyAccYTrainData, bodyAccZTrainData, bodyGyroXTrainData, bodyGyroYTrainData, bodyGyroZTrainData, totalAccXTrainData, totalAccYTrainData, totalAccZTrainData,
testSubjects, testValues, testActivity, bodyAccXTestData, bodyAccYTestData, bodyAccZTestData, bodyGyroXTestData, bodyGyroYTestData, bodyGyroZTestData, totalAccXTestData, totalAccYTestData, totalAccZTestData,
xDataSet, yDataSet, subjectDataSet)
# Assign column names
colnames(mergedDataSet) <- c("subject", features[, 2], "activity")
#######################################
# Step 2: Extracts only the measurements on the mean and standard deviation for each measurement.
#######################################
# Determine columns of interest
columnsOfInterest <- grepl("subject|code|activity|mean|std", colnames(mergedDataSet))
mergedDataSet <- mergedDataSet[, columnsOfInterest]
#######################################
# Step 3: Uses descriptive activity names to name the activities in the data set.
#######################################
# Replace activity values with named factor levels
mergedDataSet$activity <- factor(mergedDataSet$activity, levels = activity[, 1], labels = activity[, 2])
#######################################
# Step 4: Appropriately labels the data set with descriptive variable names.
#######################################
# Column names
mergedDataSetCols <- colnames(mergedDataSet)
# No special characters
mergedDataSetCols <- gsub("[\\(\\)-]", "", mergedDataSetCols)
# Clean names
mergedDataSetCols <- gsub("Acc", "Accelerometer", mergedDataSetCols)
mergedDataSetCols <- gsub("angle", "Angle", mergedDataSetCols)
mergedDataSetCols <- gsub("gravity", "Gravity", mergedDataSetCols)
mergedDataSetCols <- gsub("Gyro", "Gyroscope", mergedDataSetCols)
mergedDataSetCols <- gsub("Mag", "Magnitude", mergedDataSetCols)
mergedDataSetCols <- gsub("^f", "frequencyDomain", mergedDataSetCols)
mergedDataSetCols <- gsub("Freq", "Frequency", mergedDataSetCols)
mergedDataSetCols <- gsub("-freq()", "Frequency", mergedDataSetCols)
mergedDataSetCols <- gsub("BodyBody", "Body", mergedDataSetCols)
mergedDataSetCols <- gsub("tBody", "TimeBody", mergedDataSetCols)
mergedDataSetCols <- gsub("^t", "timeDomain", mergedDataSetCols)
mergedDataSetCols <- gsub("mean", "Mean", mergedDataSetCols)
mergedDataSetCols <- gsub("-mean()", "Mean", mergedDataSetCols)
mergedDataSetCols <- gsub("std", "StandardDeviation", mergedDataSetCols)
mergedDataSetCols <- gsub("-std()", "StandardDeviation", mergedDataSetCols)
# Assing labels as column names
colnames(mergedDataSet) <- mergedDataSetCols
#######################################
# Step 5: From the data set in step 4, creates a second, independent tidy data set with the
# average of each variable for each activity and each subject.
#######################################
tidyDataSet <- mergedDataSet %>% group_by(subject, activity) %>% summarise_each(funs(mean))
# Output to file "tidyData.txt"
write.table(tidyDataSet, "tidyData.txt", row.name=FALSE, quote = FALSE)
#######################################
# Final Check
#######################################
# Checking variable names
str(tidyDataSet)
tidyDataSet
| /run_analysis.R | no_license | neucast/Getting_And_Cleaning_Data_Final_Project | R | false | false | 7,451 | r | #######################################
# This R script called run_analysis.R does the following:
#
# 1.- Merges the training and the test sets to create one data set.
# 2.- Extracts only the measurements on the mean and standard deviation for each measurement.
# 3.- Uses descriptive activity names to name the activities in the data set
# 4.- Appropriately labels the data set with descriptive variable names.
# 5.- From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
#
# README.md for details.
#######################################
library(dplyr)
#######################################
# Download and unzip data
#######################################
fileName <- "finalProjectDataSet.zip"
# Checking if archieve already exists.
if (!file.exists(fileName)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, fileName, method="curl")
}
# Checking if folder exists
if (!file.exists("UCI HAR Dataset")) {
unzip(fileName)
}
dataPath <- "UCI HAR Dataset"
#######################################
# Read data
#######################################
# Training data
trainingSubjects <- read.table(file.path(dataPath, "train", "subject_train.txt"))
trainingValues <- read.table(file.path(dataPath, "train", "X_train.txt"))
trainingActivity <- read.table(file.path(dataPath, "train", "y_train.txt"))
bodyAccXTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_acc_x_train.txt"))
bodyAccYTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_acc_y_train.txt"))
bodyAccZTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_acc_z_train.txt"))
bodyGyroXTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_gyro_x_train.txt"))
bodyGyroYTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_gyro_y_train.txt"))
bodyGyroZTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "body_gyro_z_train.txt"))
totalAccXTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "total_acc_x_train.txt"))
totalAccYTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "total_acc_y_train.txt"))
totalAccZTrainData <- read.table(file.path(dataPath, "train/Inertial Signals", "total_acc_z_train.txt"))
# Test data
testSubjects <- read.table(file.path(dataPath, "test", "subject_test.txt"))
testValues <- read.table(file.path(dataPath, "test", "X_test.txt"))
testActivity <- read.table(file.path(dataPath, "test", "y_test.txt"))
bodyAccXTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_acc_x_test.txt"))
bodyAccYTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_acc_y_test.txt"))
bodyAccZTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_acc_z_test.txt"))
bodyGyroXTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_gyro_x_test.txt"))
bodyGyroYTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_gyro_y_test.txt"))
bodyGyroZTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "body_gyro_z_test.txt"))
totalAccXTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "total_acc_x_test.txt"))
totalAccYTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "total_acc_y_test.txt"))
totalAccZTestData <- read.table(file.path(dataPath, "test/Inertial Signals", "total_acc_z_test.txt"))
# Activity labels
activity <- read.table(file.path(dataPath, "activity_labels.txt"))
colnames(activity) <- c("activityId", "activityLabel")
# Features
features <- read.table(file.path(dataPath, "features.txt"), as.is = TRUE)
#######################################
# Step 1: Merges the training and the test sets to create one data set.
#######################################
xDataSet <- rbind(trainingValues, testValues)
yDataSet <- rbind(trainingActivity, testActivity)
subjectDataSet <- rbind(trainingSubjects, testSubjects)
mergedDataSet <- cbind(subjectDataSet, yDataSet, xDataSet)
# Remove data tables to save memory
rm(trainingSubjects, trainingValues, trainingActivity, bodyAccXTrainData, bodyAccYTrainData, bodyAccZTrainData, bodyGyroXTrainData, bodyGyroYTrainData, bodyGyroZTrainData, totalAccXTrainData, totalAccYTrainData, totalAccZTrainData,
testSubjects, testValues, testActivity, bodyAccXTestData, bodyAccYTestData, bodyAccZTestData, bodyGyroXTestData, bodyGyroYTestData, bodyGyroZTestData, totalAccXTestData, totalAccYTestData, totalAccZTestData,
xDataSet, yDataSet, subjectDataSet)
# Assign column names
colnames(mergedDataSet) <- c("subject", features[, 2], "activity")
#######################################
# Step 2: Extracts only the measurements on the mean and standard deviation for each measurement.
#######################################
# Determine columns of interest
columnsOfInterest <- grepl("subject|code|activity|mean|std", colnames(mergedDataSet))
mergedDataSet <- mergedDataSet[, columnsOfInterest]
#######################################
# Step 3: Uses descriptive activity names to name the activities in the data set.
#######################################
# Replace activity values with named factor levels
mergedDataSet$activity <- factor(mergedDataSet$activity, levels = activity[, 1], labels = activity[, 2])
#######################################
# Step 4: Appropriately labels the data set with descriptive variable names.
#######################################
# Column names
mergedDataSetCols <- colnames(mergedDataSet)
# No special characters
mergedDataSetCols <- gsub("[\\(\\)-]", "", mergedDataSetCols)
# Clean names
mergedDataSetCols <- gsub("Acc", "Accelerometer", mergedDataSetCols)
mergedDataSetCols <- gsub("angle", "Angle", mergedDataSetCols)
mergedDataSetCols <- gsub("gravity", "Gravity", mergedDataSetCols)
mergedDataSetCols <- gsub("Gyro", "Gyroscope", mergedDataSetCols)
mergedDataSetCols <- gsub("Mag", "Magnitude", mergedDataSetCols)
mergedDataSetCols <- gsub("^f", "frequencyDomain", mergedDataSetCols)
mergedDataSetCols <- gsub("Freq", "Frequency", mergedDataSetCols)
mergedDataSetCols <- gsub("-freq()", "Frequency", mergedDataSetCols)
mergedDataSetCols <- gsub("BodyBody", "Body", mergedDataSetCols)
mergedDataSetCols <- gsub("tBody", "TimeBody", mergedDataSetCols)
mergedDataSetCols <- gsub("^t", "timeDomain", mergedDataSetCols)
mergedDataSetCols <- gsub("mean", "Mean", mergedDataSetCols)
mergedDataSetCols <- gsub("-mean()", "Mean", mergedDataSetCols)
mergedDataSetCols <- gsub("std", "StandardDeviation", mergedDataSetCols)
mergedDataSetCols <- gsub("-std()", "StandardDeviation", mergedDataSetCols)
# Assing labels as column names
colnames(mergedDataSet) <- mergedDataSetCols
#######################################
# Step 5: From the data set in step 4, creates a second, independent tidy data set with the
# average of each variable for each activity and each subject.
#######################################
tidyDataSet <- mergedDataSet %>% group_by(subject, activity) %>% summarise_each(funs(mean))
# Output to file "tidyData.txt"
write.table(tidyDataSet, "tidyData.txt", row.name=FALSE, quote = FALSE)
#######################################
# Final Check
#######################################
# Checking variable names
str(tidyDataSet)
tidyDataSet
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 523
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 523
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_ntrivil_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 193
c no.of clauses 523
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 523
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_ntrivil_1344n.qdimacs 193 523 E1 [] 0 35 158 523 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_ntrivil_1344n/query24_ntrivil_1344n.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 704 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 523
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 523
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_ntrivil_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 193
c no.of clauses 523
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 523
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query24_ntrivil_1344n.qdimacs 193 523 E1 [] 0 35 158 523 NONE
|
library(stuart)
### Name: holdout
### Title: Data selection for holdout validation.
### Aliases: holdout
### ** Examples
# seeded selection, 25% validation sample
data(fairplayer)
split <- holdout(fairplayer, .75, seed = 55635)
lapply(split, nrow) # check size of samples
| /data/genthat_extracted_code/stuart/examples/holdout.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 281 | r | library(stuart)
### Name: holdout
### Title: Data selection for holdout validation.
### Aliases: holdout
### ** Examples
# seeded selection, 25% validation sample
data(fairplayer)
split <- holdout(fairplayer, .75, seed = 55635)
lapply(split, nrow) # check size of samples
|
library(biomod2)
### Name: full_suffling
### Title: data set shuffling tool
### Aliases: full_suffling
### Keywords: suffle random importance
### ** Examples
xx <- matrix(rep(1:10,3),10,3)
full_suffling(xx,c(1,2))
| /data/genthat_extracted_code/biomod2/examples/full_shuffling.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 221 | r | library(biomod2)
### Name: full_suffling
### Title: data set shuffling tool
### Aliases: full_suffling
### Keywords: suffle random importance
### ** Examples
xx <- matrix(rep(1:10,3),10,3)
full_suffling(xx,c(1,2))
|
message("R/data/process_data.R")
# paquetes ----------------------------------------------------------------
library(dplyr)
library(stringr)
# lectura de datos --------------------------------------------------------
# Script que procesa movid18.csv
mv <- data.table::fread("data/movid19.csv", encoding = "UTF-8")
mv <- as_tibble(mv)
# mv %>%
# filter(fecha_obs %>% as.Date() >= lubridate::ymd(20200803)) %>%
# {
# table(.$fecha_obs %>% as.Date(), .$semana)
# }
#
# table(mv$fecha_obs %>% as.Date(), mv$semana)
# mv$r5_educ
# glimpse(mv)
# renombrando -------------------------------------------------------------
# Movid: Recodificaciones generales (Monica)
mv <- mv %>%
rename(
fecha = fecha_obs,
sexo = r2_sexo,
# firstName = r1_nombre
region = u1_region,
comuna = u2_comuna,
# calle = u3_calle,
educ = r5_educ,
tra_salud = pr1_wrk_salud,
prev = pr2_prevision,
# pob_id = X.U.FEFF.X.U.FEFF.pob_id
)
# recodificacion ----------------------------------------------------------
mv <- mv %>%
mutate(
fecha_ymd = as.Date(fecha)
)
mv$fecha_ymd <- as.Date(mv$fecha)
mv$tra_salud_dic <- ifelse(mv$tra_salud=="Sí", 1,
ifelse(mv$tra_salud=="No", 0, NA))
mv$sexo_trasalud <- ifelse(mv$sexo=="Femenino" & mv$tra_salud=="Sí", "Mujer trabajadora de salud",
ifelse(mv$sexo=="Femenino" & mv$tra_salud=="No", "Mujer no trabajadora de salud",
ifelse(mv$sexo=="Masculino" & mv$tra_salud=="Sí", "Hombre trabajador de salud",
ifelse(mv$sexo=="Masculino" & mv$tra_salud=="No", "Hombre no trabajador de salud", NA))))
mv$educ_4cat <- ifelse(mv$educ=="Sin estudios" | mv$educ=="Educación Básica (primaria o preparatoria)", "Basica o sin estudios",
ifelse(mv$educ == "Educación Media (Humanidades)", "Media",
ifelse(mv$educ == "Educación Profesional (Carreras de 4 o más años)", "Profesional",
ifelse(mv$educ == "Educación Técnica Nivel Superior (Carreras de 1 a 3 años)", "Tecnica", NA))))
mv$educ_3cat <- ifelse(mv$educ=="Sin estudios" | mv$educ=="Educación Básica (primaria o preparatoria)" | mv$educ == "Educación Media (Humanidades)", "Media o menos",
ifelse(mv$educ == "Educación Profesional (Carreras de 4 o más años)", "Profesional",
ifelse(mv$educ == "Educación Técnica Nivel Superior (Carreras de 1 a 3 años)", "Técnica", NA)))
mv$educ_2cat <- ifelse(mv$educ=="Sin estudios" | mv$educ=="Educación Básica (primaria o preparatoria)" | mv$educ == "Educación Media (Humanidades)", "Media o menos",
ifelse(mv$educ == "Educación Profesional (Carreras de 4 o más años)" | mv$educ == "Educación Técnica Nivel Superior (Carreras de 1 a 3 años)", "Más que media", NA))
# mv$tertil_ingre_c <- ifelse(mv$tertil_ingre==1, "Ingresos bajos",
# ifelse(mv$tertil_ingre==2, "Ingresos medios",
# ifelse(mv$tertil_ingre==3, "Ingresos altos", NA)))
mv$sexo_edad <- ifelse(mv$sexo=="Masculino" & mv$edad<65, "Hombre menor a 65",
ifelse(mv$sexo=="Masculino" & mv$edad>64, "Hombre mayor a 65",
ifelse(mv$sexo=="Femenino" & mv$edad<65, "Mujer menor a 65",
ifelse(mv$sexo=="Femenino" & mv$edad>64, "Mujer mayor a 65", NA))))
mv$dic_trabajo <- ifelse(mv$p1_pra_trabajo==0, 0,
ifelse(mv$p1_pra_trabajo>0, 1, NA))
mv$dic_tramite <- ifelse(mv$p1_pra_tramite==0, 0,
ifelse(mv$p1_pra_tramite>0, 1, NA))
mv$dic_visita <- ifelse(mv$p1_pra_visita==0, 0,
ifelse(mv$p1_pra_visita>0, 1, NA))
mv$dic_recrea <- ifelse(mv$p1_pra_recrea==0, 0,
ifelse(mv$p1_pra_recrea>0, 1, NA))
mv$dic_transporte <- ifelse(mv$p1_pra_transporte==0, 0,
ifelse(mv$p1_pra_transporte>0, 1, NA))
mv$dic_invitado <- ifelse(mv$p1_pra_invitado==0, 0,
ifelse(mv$p1_pra_invitado>0, 1, NA))
mv$dic_otro <- ifelse(mv$p1_pra_otro==0, 0,
ifelse(mv$p1_pra_otro>0, 1, NA))
mv$dic_practicas <- ifelse((mv$dic_trabajo==0 & mv$dic_tramite==0 & mv$dic_invitado==0 &
mv$dic_recrea==0 & mv$dic_transporte==0 & mv$dic_visita==0), 0,
ifelse((mv$dic_trabajo>0 | mv$dic_tramite>0 | mv$dic_invitado>0 |
mv$dic_recrea>0 | mv$dic_transporte>0 | mv$dic_visita>0), 1, NA))
mv$n_salidas <- (mv$p1_pra_trabajo+mv$p1_pra_recrea+mv$p1_pra_tramite+mv$p1_pra_transporte)
mv$sintoma <- ifelse((mv$s1_snt_fiebre==1 | mv$s1_snt_anosmia==1 | mv$s1_snt_disnea==1 | mv$s1_snt_tos==1 |
mv$s1_snt_mialgias==1 | mv$s1_snt_odinofagia==1 | mv$s1_snt_dol_torax==1 |
mv$s1_snt_cefalea==1 | mv$s1_snt_diarrea==1 | mv$s1_snt_disgeusia==1), 1,
ifelse((mv$s1_snt_fiebre==0 & mv$s1_snt_anosmia==0 & mv$s1_snt_disnea==0 & mv$s1_snt_tos==0 &
mv$s1_snt_mialgias==0 & mv$s1_snt_odinofagia==0 & mv$s1_snt_dol_torax==0 &
mv$s1_snt_cefalea==0 & mv$s1_snt_diarrea==0 & mv$s1_snt_disgeusia==0), 0, NA))
mv$sintoma <- ifelse(mv$s1_snt_null==1, 0, mv$sintoma)
mv$edad_3cat <- ifelse(mv$edad<40, "18 a 39",
ifelse(mv$edad<65 & mv$edad>39, "40 a 64",
ifelse(mv$edad>64, "65 y más", NA)))
mv$semana <- ifelse(mv$semana==15, 16, mv$semana)
# Actividad mezclando actividad de semana referencia y normalmente en el pasado
mv <- mv %>%
mutate(actividad = case_when(
str_detect(pr4_wrk_lastw, "No realicé ") & educ_2cat == "Media o menos" &
str_detect(pr6_ocup_normal, "cuenta propia") ~ "Cuenta propia baja",
str_detect(pr4_wrk_lastw, "No realicé ") & educ_2cat == "Más que media" &
str_detect(pr6_ocup_normal, "cuenta propia") ~ "Cuenta propia alta",
educ_2cat == "Media o menos" &
str_detect(pr4_wrk_lastw, "cuenta propia") ~ "Cuenta propia baja",
educ_2cat == "Más que media" &
str_detect(pr4_wrk_lastw, "cuenta propia") ~ "Cuenta propia alta",
is.na(educ_2cat) & str_detect(pr4_wrk_lastw, "cuenta propia") ~ NA_character_,
str_detect(pr4_wrk_lastw, "doméstico") |
str_detect(pr6_ocup_normal, "doméstico") ~ "Casa particular",
str_detect(pr4_wrk_lastw, "público") |
str_detect(pr6_ocup_normal, "público") ~ "Asalariado/a público",
str_detect(pr4_wrk_lastw, "privada") |
str_detect(pr6_ocup_normal, "privada") ~ "Asalariado/a privado",
str_detect(pr4_wrk_lastw, "propia empresa") |
str_detect(pr6_ocup_normal, "propia empresa") ~ "Empleador",
!is.na(pr4_wrk_lastw) ~ pr4_wrk_lastw,
!is.na(pr6_ocup_normal) ~ pr6_ocup_normal,
TRUE ~ pr6_ocup_normal),
actividad2 = case_when(
actividad == "Asalariado/a privado" &
educ_2cat == "Media o menos" ~ "Privado baja",
actividad == "Asalariado/a privado" &
educ_2cat == "Más que media" ~ "Privado alta",
actividad == "Asalariado/a privado" ~ NA_character_,
TRUE ~ actividad),
actividad3 = case_when(
pr3_ocupacion == "Desempleado o desempleada" ~ "Desempleado",
TRUE ~ actividad
)
)
# 20200805 ----------------------------------------------------------------
# miercoles de la semana anterior
# mv$fecha_ultima_obs
# table( mv$fecha_ultima_obs %>% as.Date(), mv$semana )
#
# lubridate::wday(Sys.Date(), week_start = 1)
# mv <- mv %>%
# mutate(
# semana_fecha_miercoles = as.Date(paste(2020, semana - 1, 3, sep="-"), "%Y-%U-%u"),
# semana_fecha_miercoles = if_else(
# lubridate::wday(as.Date(fecha), week_start = 1) > 3,
# semana_fecha_miercoles + lubridate::days(7),
# semana_fecha_miercoles
# )
# ) %>%
# rename(semana_fecha = semana_fecha_miercoles)
mv <- mv %>%
mutate(
semana_fecha = as.Date(paste(2020, semana - 1, 3, sep="-"), "%Y-%U-%u"),
fecha_date = as.Date(fecha)
)
# 20200827 ----------------------------------------------------------------
# considerar semana completas
semanas_incompletas <- mv %>%
count(semana, fecha_date) %>%
group_by(semana) %>%
mutate(dias = n()) %>%
filter(dias < 7) %>%
distinct(semana)
mv <- mv %>%
anti_join(semanas_incompletas, by = "semana")
# mv %>%
# count(as.Date(fecha)) %>%
# tail(10)
# 20200810 ----------------------------------------------------------------
mv <- mv %>%
mutate(
caso_probable2 = contacto == 1 & sosp_minsal0530 == 1
)
# 20200811 ----------------------------------------------------------------
# variable auxiliar para el selector dedesagregación
mv <- mv %>%
mutate(
todo = "Total"
)
# 20200814 ----------------------------------------------------------------
mv <- mv %>%
mutate(
prev = ifelse(stringr::str_detect(prev, "Otra"), "Otra", prev)
)
# percepcion de legitmidad ------------------------------------------------
mv$soc1_bienestar <- car::recode(mv$soc1_bienestar, c("1='Muy de acuerdo';2='De acuerdo';3='Ni de acuerdo ni en desacuerdo';4= 'En desacuerdo'; 5='Muy en desacuerdo'"), as.factor = T,
levels = c('Muy de acuerdo','De acuerdo', 'Ni de acuerdo ni en desacuerdo', 'En desacuerdo', 'Muy en desacuerdo'))
mv$soc2_obedecer <- car::recode(mv$soc2_obedecer, c("1='Muy de acuerdo';2='De acuerdo';3='Ni de acuerdo ni en desacuerdo';4= 'En desacuerdo'; 5='Muy en desacuerdo'"), as.factor = T,
levels = c('Muy de acuerdo','De acuerdo', 'Ni de acuerdo ni en desacuerdo', 'En desacuerdo', 'Muy en desacuerdo'))
mv$soc3_desigualdad <- car::recode(mv$soc3_desigualdad, c("1='Muy de acuerdo';2='De acuerdo';3='Ni de acuerdo ni en desacuerdo';4= 'En desacuerdo'; 5='Muy en desacuerdo'"), as.factor = T,
levels = c('Muy de acuerdo','De acuerdo', 'Ni de acuerdo ni en desacuerdo', 'En desacuerdo', 'Muy en desacuerdo'))
mv$soc4_represion <- car::recode(mv$soc4_represion, c("1='Muy de acuerdo';2='De acuerdo';3='Ni de acuerdo ni en desacuerdo';4= 'En desacuerdo'; 5='Muy en desacuerdo'"), as.factor = T,
levels = c('Muy de acuerdo','De acuerdo', 'Ni de acuerdo ni en desacuerdo', 'En desacuerdo', 'Muy en desacuerdo'))
mv %>%
select(pob_id, semana_fecha, soc1_bienestar)
mv %>%
count(soc1_bienestar)
dmensual <- mv %>%
distinct(anio = lubridate::year(semana_fecha), mes = lubridate::month(semana_fecha), semana_fecha) %>%
arrange(anio, mes, semana_fecha) %>%
group_by(anio, mes) %>%
filter(semana_fecha == max(semana_fecha)) %>%
ungroup()
dsoc <- mv %>%
group_by(pob_id, anio = lubridate::year(semana_fecha), mes = lubridate::month(semana_fecha)) %>%
summarise_at(
vars(soc1_bienestar, soc2_obedecer, soc3_desigualdad, soc4_represion),
~last(na.omit(.x))
)
dsoc <- left_join(dsoc, dmensual) %>%
ungroup() %>%
select(-anio, -mes) %>%
filter(semana_fecha >= lubridate::ymd(20200901))
dsoc <- dsoc %>%
arrange(pob_id, semana_fecha)
# dsoc %>%
# # group_by(pob_id) %>%
# filter(semana_fecha >= lubridate::ymd(20201001)) %>%
# filter(!is.na(soc1_bienestar))
#
# dsoc %>%
# group_by(pob_id) %>%
# filter(length(na.omit(soc1_bienestar)) >= 2)
# filter(pob_id == "0000daa20a9d6b34502b0e9c80318cd1a9d8fe456b3ef5c9e3c7d18121b65c6b") %>%
# View()
#
#
# mv %>%
# filter(pob_id == "0000daa20a9d6b34502b0e9c80318cd1a9d8fe456b3ef5c9e3c7d18121b65c6b") %>%
# select(fecha, semana_fecha, starts_with("soc")) %>%
# View()
# -------------------------------------------------------------------------
# mv <- mv %>%
# select(
# -contains("TEXT"),
# -starts_with("sa3"),
# -starts_with("sa2"),
# -starts_with("c7"),
# -starts_with("c6"),
# -starts_with("pr"),
# -edad_65,
# -RM,
# -comuna,
# -semana,
# -semana0,
# -region
# )
# exportar ----------------------------------------------------------------
saveRDS(mv, "data/movid.rds")
saveRDS(dsoc, "data/dsoc.rds")
| /R/data/movid_data.R | no_license | MOVID19/movid19-shiny | R | false | false | 12,353 | r | message("R/data/process_data.R")
# paquetes ----------------------------------------------------------------
library(dplyr)
library(stringr)
# lectura de datos --------------------------------------------------------
# Script que procesa movid18.csv
mv <- data.table::fread("data/movid19.csv", encoding = "UTF-8")
mv <- as_tibble(mv)
# mv %>%
# filter(fecha_obs %>% as.Date() >= lubridate::ymd(20200803)) %>%
# {
# table(.$fecha_obs %>% as.Date(), .$semana)
# }
#
# table(mv$fecha_obs %>% as.Date(), mv$semana)
# mv$r5_educ
# glimpse(mv)
# renombrando -------------------------------------------------------------
# Movid: Recodificaciones generales (Monica)
mv <- mv %>%
rename(
fecha = fecha_obs,
sexo = r2_sexo,
# firstName = r1_nombre
region = u1_region,
comuna = u2_comuna,
# calle = u3_calle,
educ = r5_educ,
tra_salud = pr1_wrk_salud,
prev = pr2_prevision,
# pob_id = X.U.FEFF.X.U.FEFF.pob_id
)
# recodificacion ----------------------------------------------------------
mv <- mv %>%
mutate(
fecha_ymd = as.Date(fecha)
)
mv$fecha_ymd <- as.Date(mv$fecha)
mv$tra_salud_dic <- ifelse(mv$tra_salud=="Sí", 1,
ifelse(mv$tra_salud=="No", 0, NA))
mv$sexo_trasalud <- ifelse(mv$sexo=="Femenino" & mv$tra_salud=="Sí", "Mujer trabajadora de salud",
ifelse(mv$sexo=="Femenino" & mv$tra_salud=="No", "Mujer no trabajadora de salud",
ifelse(mv$sexo=="Masculino" & mv$tra_salud=="Sí", "Hombre trabajador de salud",
ifelse(mv$sexo=="Masculino" & mv$tra_salud=="No", "Hombre no trabajador de salud", NA))))
mv$educ_4cat <- ifelse(mv$educ=="Sin estudios" | mv$educ=="Educación Básica (primaria o preparatoria)", "Basica o sin estudios",
ifelse(mv$educ == "Educación Media (Humanidades)", "Media",
ifelse(mv$educ == "Educación Profesional (Carreras de 4 o más años)", "Profesional",
ifelse(mv$educ == "Educación Técnica Nivel Superior (Carreras de 1 a 3 años)", "Tecnica", NA))))
mv$educ_3cat <- ifelse(mv$educ=="Sin estudios" | mv$educ=="Educación Básica (primaria o preparatoria)" | mv$educ == "Educación Media (Humanidades)", "Media o menos",
ifelse(mv$educ == "Educación Profesional (Carreras de 4 o más años)", "Profesional",
ifelse(mv$educ == "Educación Técnica Nivel Superior (Carreras de 1 a 3 años)", "Técnica", NA)))
mv$educ_2cat <- ifelse(mv$educ=="Sin estudios" | mv$educ=="Educación Básica (primaria o preparatoria)" | mv$educ == "Educación Media (Humanidades)", "Media o menos",
ifelse(mv$educ == "Educación Profesional (Carreras de 4 o más años)" | mv$educ == "Educación Técnica Nivel Superior (Carreras de 1 a 3 años)", "Más que media", NA))
# mv$tertil_ingre_c <- ifelse(mv$tertil_ingre==1, "Ingresos bajos",
# ifelse(mv$tertil_ingre==2, "Ingresos medios",
# ifelse(mv$tertil_ingre==3, "Ingresos altos", NA)))
mv$sexo_edad <- ifelse(mv$sexo=="Masculino" & mv$edad<65, "Hombre menor a 65",
ifelse(mv$sexo=="Masculino" & mv$edad>64, "Hombre mayor a 65",
ifelse(mv$sexo=="Femenino" & mv$edad<65, "Mujer menor a 65",
ifelse(mv$sexo=="Femenino" & mv$edad>64, "Mujer mayor a 65", NA))))
mv$dic_trabajo <- ifelse(mv$p1_pra_trabajo==0, 0,
ifelse(mv$p1_pra_trabajo>0, 1, NA))
mv$dic_tramite <- ifelse(mv$p1_pra_tramite==0, 0,
ifelse(mv$p1_pra_tramite>0, 1, NA))
mv$dic_visita <- ifelse(mv$p1_pra_visita==0, 0,
ifelse(mv$p1_pra_visita>0, 1, NA))
mv$dic_recrea <- ifelse(mv$p1_pra_recrea==0, 0,
ifelse(mv$p1_pra_recrea>0, 1, NA))
mv$dic_transporte <- ifelse(mv$p1_pra_transporte==0, 0,
ifelse(mv$p1_pra_transporte>0, 1, NA))
mv$dic_invitado <- ifelse(mv$p1_pra_invitado==0, 0,
ifelse(mv$p1_pra_invitado>0, 1, NA))
mv$dic_otro <- ifelse(mv$p1_pra_otro==0, 0,
ifelse(mv$p1_pra_otro>0, 1, NA))
mv$dic_practicas <- ifelse((mv$dic_trabajo==0 & mv$dic_tramite==0 & mv$dic_invitado==0 &
mv$dic_recrea==0 & mv$dic_transporte==0 & mv$dic_visita==0), 0,
ifelse((mv$dic_trabajo>0 | mv$dic_tramite>0 | mv$dic_invitado>0 |
mv$dic_recrea>0 | mv$dic_transporte>0 | mv$dic_visita>0), 1, NA))
mv$n_salidas <- (mv$p1_pra_trabajo+mv$p1_pra_recrea+mv$p1_pra_tramite+mv$p1_pra_transporte)
mv$sintoma <- ifelse((mv$s1_snt_fiebre==1 | mv$s1_snt_anosmia==1 | mv$s1_snt_disnea==1 | mv$s1_snt_tos==1 |
mv$s1_snt_mialgias==1 | mv$s1_snt_odinofagia==1 | mv$s1_snt_dol_torax==1 |
mv$s1_snt_cefalea==1 | mv$s1_snt_diarrea==1 | mv$s1_snt_disgeusia==1), 1,
ifelse((mv$s1_snt_fiebre==0 & mv$s1_snt_anosmia==0 & mv$s1_snt_disnea==0 & mv$s1_snt_tos==0 &
mv$s1_snt_mialgias==0 & mv$s1_snt_odinofagia==0 & mv$s1_snt_dol_torax==0 &
mv$s1_snt_cefalea==0 & mv$s1_snt_diarrea==0 & mv$s1_snt_disgeusia==0), 0, NA))
mv$sintoma <- ifelse(mv$s1_snt_null==1, 0, mv$sintoma)
mv$edad_3cat <- ifelse(mv$edad<40, "18 a 39",
ifelse(mv$edad<65 & mv$edad>39, "40 a 64",
ifelse(mv$edad>64, "65 y más", NA)))
mv$semana <- ifelse(mv$semana==15, 16, mv$semana)
# Actividad mezclando actividad de semana referencia y normalmente en el pasado
mv <- mv %>%
mutate(actividad = case_when(
str_detect(pr4_wrk_lastw, "No realicé ") & educ_2cat == "Media o menos" &
str_detect(pr6_ocup_normal, "cuenta propia") ~ "Cuenta propia baja",
str_detect(pr4_wrk_lastw, "No realicé ") & educ_2cat == "Más que media" &
str_detect(pr6_ocup_normal, "cuenta propia") ~ "Cuenta propia alta",
educ_2cat == "Media o menos" &
str_detect(pr4_wrk_lastw, "cuenta propia") ~ "Cuenta propia baja",
educ_2cat == "Más que media" &
str_detect(pr4_wrk_lastw, "cuenta propia") ~ "Cuenta propia alta",
is.na(educ_2cat) & str_detect(pr4_wrk_lastw, "cuenta propia") ~ NA_character_,
str_detect(pr4_wrk_lastw, "doméstico") |
str_detect(pr6_ocup_normal, "doméstico") ~ "Casa particular",
str_detect(pr4_wrk_lastw, "público") |
str_detect(pr6_ocup_normal, "público") ~ "Asalariado/a público",
str_detect(pr4_wrk_lastw, "privada") |
str_detect(pr6_ocup_normal, "privada") ~ "Asalariado/a privado",
str_detect(pr4_wrk_lastw, "propia empresa") |
str_detect(pr6_ocup_normal, "propia empresa") ~ "Empleador",
!is.na(pr4_wrk_lastw) ~ pr4_wrk_lastw,
!is.na(pr6_ocup_normal) ~ pr6_ocup_normal,
TRUE ~ pr6_ocup_normal),
actividad2 = case_when(
actividad == "Asalariado/a privado" &
educ_2cat == "Media o menos" ~ "Privado baja",
actividad == "Asalariado/a privado" &
educ_2cat == "Más que media" ~ "Privado alta",
actividad == "Asalariado/a privado" ~ NA_character_,
TRUE ~ actividad),
actividad3 = case_when(
pr3_ocupacion == "Desempleado o desempleada" ~ "Desempleado",
TRUE ~ actividad
)
)
# 20200805 ----------------------------------------------------------------
# miercoles de la semana anterior
# mv$fecha_ultima_obs
# table( mv$fecha_ultima_obs %>% as.Date(), mv$semana )
#
# lubridate::wday(Sys.Date(), week_start = 1)
# mv <- mv %>%
# mutate(
# semana_fecha_miercoles = as.Date(paste(2020, semana - 1, 3, sep="-"), "%Y-%U-%u"),
# semana_fecha_miercoles = if_else(
# lubridate::wday(as.Date(fecha), week_start = 1) > 3,
# semana_fecha_miercoles + lubridate::days(7),
# semana_fecha_miercoles
# )
# ) %>%
# rename(semana_fecha = semana_fecha_miercoles)
mv <- mv %>%
mutate(
semana_fecha = as.Date(paste(2020, semana - 1, 3, sep="-"), "%Y-%U-%u"),
fecha_date = as.Date(fecha)
)
# 20200827 ----------------------------------------------------------------
# considerar semana completas
semanas_incompletas <- mv %>%
count(semana, fecha_date) %>%
group_by(semana) %>%
mutate(dias = n()) %>%
filter(dias < 7) %>%
distinct(semana)
mv <- mv %>%
anti_join(semanas_incompletas, by = "semana")
# mv %>%
# count(as.Date(fecha)) %>%
# tail(10)
# 20200810 ----------------------------------------------------------------
mv <- mv %>%
mutate(
caso_probable2 = contacto == 1 & sosp_minsal0530 == 1
)
# 20200811 ----------------------------------------------------------------
# variable auxiliar para el selector dedesagregación
mv <- mv %>%
mutate(
todo = "Total"
)
# 20200814 ----------------------------------------------------------------
mv <- mv %>%
mutate(
prev = ifelse(stringr::str_detect(prev, "Otra"), "Otra", prev)
)
# percepcion de legitmidad ------------------------------------------------
mv$soc1_bienestar <- car::recode(mv$soc1_bienestar, c("1='Muy de acuerdo';2='De acuerdo';3='Ni de acuerdo ni en desacuerdo';4= 'En desacuerdo'; 5='Muy en desacuerdo'"), as.factor = T,
levels = c('Muy de acuerdo','De acuerdo', 'Ni de acuerdo ni en desacuerdo', 'En desacuerdo', 'Muy en desacuerdo'))
mv$soc2_obedecer <- car::recode(mv$soc2_obedecer, c("1='Muy de acuerdo';2='De acuerdo';3='Ni de acuerdo ni en desacuerdo';4= 'En desacuerdo'; 5='Muy en desacuerdo'"), as.factor = T,
levels = c('Muy de acuerdo','De acuerdo', 'Ni de acuerdo ni en desacuerdo', 'En desacuerdo', 'Muy en desacuerdo'))
mv$soc3_desigualdad <- car::recode(mv$soc3_desigualdad, c("1='Muy de acuerdo';2='De acuerdo';3='Ni de acuerdo ni en desacuerdo';4= 'En desacuerdo'; 5='Muy en desacuerdo'"), as.factor = T,
levels = c('Muy de acuerdo','De acuerdo', 'Ni de acuerdo ni en desacuerdo', 'En desacuerdo', 'Muy en desacuerdo'))
mv$soc4_represion <- car::recode(mv$soc4_represion, c("1='Muy de acuerdo';2='De acuerdo';3='Ni de acuerdo ni en desacuerdo';4= 'En desacuerdo'; 5='Muy en desacuerdo'"), as.factor = T,
levels = c('Muy de acuerdo','De acuerdo', 'Ni de acuerdo ni en desacuerdo', 'En desacuerdo', 'Muy en desacuerdo'))
mv %>%
select(pob_id, semana_fecha, soc1_bienestar)
mv %>%
count(soc1_bienestar)
dmensual <- mv %>%
distinct(anio = lubridate::year(semana_fecha), mes = lubridate::month(semana_fecha), semana_fecha) %>%
arrange(anio, mes, semana_fecha) %>%
group_by(anio, mes) %>%
filter(semana_fecha == max(semana_fecha)) %>%
ungroup()
dsoc <- mv %>%
group_by(pob_id, anio = lubridate::year(semana_fecha), mes = lubridate::month(semana_fecha)) %>%
summarise_at(
vars(soc1_bienestar, soc2_obedecer, soc3_desigualdad, soc4_represion),
~last(na.omit(.x))
)
dsoc <- left_join(dsoc, dmensual) %>%
ungroup() %>%
select(-anio, -mes) %>%
filter(semana_fecha >= lubridate::ymd(20200901))
dsoc <- dsoc %>%
arrange(pob_id, semana_fecha)
# dsoc %>%
# # group_by(pob_id) %>%
# filter(semana_fecha >= lubridate::ymd(20201001)) %>%
# filter(!is.na(soc1_bienestar))
#
# dsoc %>%
# group_by(pob_id) %>%
# filter(length(na.omit(soc1_bienestar)) >= 2)
# filter(pob_id == "0000daa20a9d6b34502b0e9c80318cd1a9d8fe456b3ef5c9e3c7d18121b65c6b") %>%
# View()
#
#
# mv %>%
# filter(pob_id == "0000daa20a9d6b34502b0e9c80318cd1a9d8fe456b3ef5c9e3c7d18121b65c6b") %>%
# select(fecha, semana_fecha, starts_with("soc")) %>%
# View()
# -------------------------------------------------------------------------
# mv <- mv %>%
# select(
# -contains("TEXT"),
# -starts_with("sa3"),
# -starts_with("sa2"),
# -starts_with("c7"),
# -starts_with("c6"),
# -starts_with("pr"),
# -edad_65,
# -RM,
# -comuna,
# -semana,
# -semana0,
# -region
# )
# exportar ----------------------------------------------------------------
saveRDS(mv, "data/movid.rds")
saveRDS(dsoc, "data/dsoc.rds")
|
\name{summarise.size.frq.bet}
\alias{summarise.size.frq.bet}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
summarise.size.frq.bet.Rd
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Takes two frq files from the bigeye assessment and compares the length and weight data on an annual basis for the range of years
Just does the last 15 years at the moment. Specific for the 2009 BET assessment, and included as an example. .
}
\usage{
summarise.size.frq.bet(frq1, fishery = 5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{frq1}{
%% ~~Describe \code{frq1} here~~
}
\item{fishery}{
%% ~~Describe \code{fishery} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
Shelton Harley
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/summarise.size.frq.bet.Rd | no_license | PacificCommunity/ofp-sam-r4mfcl | R | false | false | 1,552 | rd | \name{summarise.size.frq.bet}
\alias{summarise.size.frq.bet}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
summarise.size.frq.bet.Rd
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Takes two frq files from the bigeye assessment and compares the length and weight data on an annual basis for the range of years
Just does the last 15 years at the moment. Specific for the 2009 BET assessment, and included as an example. .
}
\usage{
summarise.size.frq.bet(frq1, fishery = 5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{frq1}{
%% ~~Describe \code{frq1} here~~
}
\item{fishery}{
%% ~~Describe \code{fishery} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
Shelton Harley
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
# load packages -----------------------------------------------------------
require(ggplot2)
require(plotly)
library(stringi)
require(tidyr)
require(purrr)
require(dplyr)
require(rio)
require(lubridate)
require(readxl)
require(stringdist)
library(xlsx) #
library(reldist)
library(ggthemes)
library(ggalt) #for lollipop charts
library(highcharter)
#theme_set(theme_bw())
theme_set(theme_minimal())
library(tidyquant)
#library(XLConnect)
wdr <- getwd()
# >> load dataset ---------------------------------------------------------
#setwd("C:/Users/Roland/Downloads")
Sys.setlocale("LC_CTYPE", "russian") #allows displaying party names in cyrillic
laws <- readxl::read_xlsx(paste0(wdr, "/data/1997_2014_Laws.xlsx"), sheet="BiHLaws")
# recode type of law ------------------------------------------------------
laws$category[laws$category==1] <- "under consideration"
laws$category[laws$category==2] <- "adopted"
#laws$category[laws$category==3] does not exist
laws$category[laws$category==4] <- "rejected" #Odbijeni zakoni
laws$category[laws$category==5] <- "disputed" #Povuceni zakoni
laws$category[laws$category==6] <- "suspended"
laws$category[laws$category==7] <- "expired"
laws$category[laws$category==8] <- "previously adopted"
laws$date.start2 <- as.Date(laws$date.start, "%d.%m.%Y")
#category 3 is unrelated; link leads somewhere else on the page
laws <- laws %>% filter(category!=3)
# laws$year <- year(laws$date.start2)
# class(laws$year)
laws$year <- as.numeric(strftime(laws$date.start2, format="%Y"))
class(laws$year)
# Plot --------------------------------------------------------------------
laws.bar <- laws %>%
#filter(area %in% filter.area) %>%
ggplot(.,aes(date.start2))+
geom_bar(aes(fill=category)) +
labs(y="Number of Laws",
x=NULL,
title="Number of Laws",
subtitle="selected categories")+
theme(legend.position = "none",
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
scale_fill_brewer(palette="Set1")+
scale_x_date(date_labels="%Y", date_breaks = "1 year")+
scale_y_continuous(limits=c(0,10))+
facet_grid(category~.)#+
#geom_vline(data=OHR, aes(xintercept=as.numeric(start.date)), linetype=4)+
#geom_text(data=OHR, aes(label=HR, x=start.date, y=10), size=3 ,angle = 60, hjust = 0)
print(laws.bar)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws.bar.pdf"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# Yearly summs ------------------------------------------------------------
laws.year <- laws %>%
group_by(category, year) %>%
summarise(freq=n()) %>%
ungroup()
class(laws.year$freq)
laws.year <- as.data.frame(laws.year)
laws.year.bar <- laws.year %>%
#filter(area %in% filter.area) %>%
ggplot(.,aes(year, freq))+
geom_col(aes(fill=category)) +
labs(y="Number of Laws per year",
x=NULL,
title="Number of Laws",
subtitle="selected categories")+
theme(legend.position = "bottom",
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
scale_fill_brewer(palette="Set1")+
scale_x_continuous(breaks=seq(min(laws.year$year),max(laws.year$year),1)) #+
# scale_y_continuous(limits=c(0,10))+
#facet_grid(category~.)#+
#geom_vline(data=OHR, aes(xintercept=as.numeric(start.date)), linetype=4)+
#geom_text(data=OHR, aes(label=HR, x=start.date, y=10), size=3 ,angle = 60, hjust = 0)
print(laws.year.bar)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws.year.bar.pdf"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# Laws 1996 - 1998 --------------------------------------------------------
laws9698 <- read_excel("graphs.xlsx",sheet="BiHLaws96-98")
names(laws9698) <- c("No","law","date.HoP","date.HoR","session")
library(stringr)
laws9698$date.HoP <- k <- str_extract_all(laws9698$date.HoP, "\\([^()]+\\)")
laws9698$date.HoP <- substring(laws9698$date.HoP, 2, nchar(k)-2)
laws9698$date.HoP <- as.Date(laws9698$date.HoP, "%d.%m.%Y")
class(laws9698$date.HoP)
laws9698$date.HoR <- k <- str_extract_all(laws9698$date.HoR, "\\([^()]+\\)")
laws9698$date.HoR <- substring(laws9698$date.HoR, 2, nchar(k)-2)
laws9698$date.HoR <- as.Date(laws9698$date.HoR, "%d.%m.%Y")
class(laws9698$date.HoR)
laws9698$date <- pmax(laws9698$date.HoP, laws9698$date.HoR)
laws9698$year <- year(laws9698$date)
# Laws 98-2000 ------------------------------------------------------------
laws9800 <- read_excel("graphs.xlsx",sheet="BiHLaws98-00")
names(laws9800) <- c("No","law","date.HoP","date.HoR","session")
library(stringr)
laws9800$date.HoP <- k <- str_extract_all(laws9800$date.HoP, "\\([^()]+\\)")
laws9800$date.HoP <- substring(laws9800$date.HoP, 2, nchar(k)-2)
laws9800$date.HoP <- as.Date(laws9800$date.HoP, "%d.%m.%Y")
class(laws9800$date.HoP)
laws9800$date.HoR <- k <- str_extract_all(laws9800$date.HoR, "\\([^()]+\\)")
laws9800$date.HoR <- substring(laws9800$date.HoR, 2, nchar(k)-2)
laws9800$date.HoR <- as.Date(laws9800$date.HoR, "%d.%m.%Y")
class(laws9800$date.HoR)
laws9800$date <- pmax(laws9800$date.HoP, laws9800$date.HoR)
laws9800$year <- year(laws9800$date)
# Laws 2000-02 ------------------------------------------------------------
laws0002 <- read_excel("graphs.xlsx",sheet="BiHLaws00-02")
names(laws0002) <- c("No","law","date.HoP","date.HoR","session")
library(stringr)
laws0002$date.HoP <- k <- str_extract_all(laws0002$date.HoP, "\\([^()]+\\)")
laws0002$date.HoP <- substring(laws0002$date.HoP, 2, nchar(k)-2)
laws0002$date.HoP <- as.Date(laws0002$date.HoP, "%d.%m.%Y")
class(laws0002$date.HoP)
laws0002$date.HoR <- k <- str_extract_all(laws0002$date.HoR, "\\([^()]+\\)")
laws0002$date.HoR <- substring(laws0002$date.HoR, 2, nchar(k)-2)
laws0002$date.HoR <- as.Date(laws0002$date.HoR, "%d.%m.%Y")
class(laws0002$date.HoR)
laws0002$date <- pmax(laws0002$date.HoP, laws0002$date.HoR)
laws0002$year <- year(laws0002$date)
# Laws 2002-06 ------------------------------------------------------------
laws0206 <- read_excel("graphs.xlsx",sheet="BiHLaws02-06")
names(laws0206) <- c("No","law","date.HoP","date.HoR","session")
library(stringr)
laws0206$date.HoP <- k <- str_extract_all(laws0206$date.HoP, "\\/[^()]+\\/")
laws0206$date.HoP <- substring(laws0206$date.HoP, 2, nchar(k)-2)
laws0206$date.HoP <- as.Date(laws0206$date.HoP, "%d.%m.%Y")
class(laws0206$date.HoP)
laws0206$date.HoR <- k <- str_extract_all(laws0206$date.HoR, "\\/[^()]+\\/")
laws0206$date.HoR <- substring(laws0206$date.HoR, 2, nchar(k)-2)
laws0206$date.HoR <- as.Date(laws0206$date.HoR, "%d.%m.%Y")
class(laws0206$date.HoR)
laws0206$date <- pmax(laws0206$date.HoP, laws0206$date.HoR)
laws0206$year <- year(laws0206$date)
# Laws 96 - 2006 ----------------------------------------------------------
laws9606 <- rbind(laws9698, laws9800, laws0002, laws0206)
# Laws 96 - 2014 monthly --------------------------------------------------
laws0614 <- laws %>%
select(date.start2, law.title, category)%>%
rename(date=date.start2,
law=law.title)
laws9606 <- laws9606 %>%
select(date, law) %>%
mutate(category="adopted")
laws9614 <- rbind(laws9606, laws0614)
laws9614 <- laws9614 %>%
filter(date < "2015-01-01")
# dichotomy passed / not passed / under conisderation ---------------------
adopted <- c("adopted","previously adopted")
laws9614$category2<- ifelse(laws9614$category %in% adopted, "passed","disputed, rejected, suspended")
laws9614$category2[laws9614$category=="under consideration"] <- "under consideration"
library(zoo)
laws9614.m <- laws9614 %>%
mutate(yearmonth=as.yearmon(date)) %>%
group_by(yearmonth, category) %>%
summarise(n=n())%>%
ungroup()
# plot laws monthly -------------------------------------------------------
laws.month.bar <- laws9614.m %>%
#filter(area %in% filter.area) %>%
ggplot(.,aes(yearmonth, n))+
print(laws.month.bar)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws.month.bar.pdf"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# Plot laws 96 - 14 yearly ------------------------------------------------
laws9614$year <- as.numeric(strftime(laws9614$date, format="%Y"))
class(laws9614$year)
laws9614.y <- laws9614 %>%
group_by(year, category) %>%
summarise(freq=n())
laws9614.year.bar <- laws9614.y %>%
#filter(area %in% filter.area) %>%
ggplot(.,aes(year, freq))+
geom_col(aes(fill=category)) +
labs(y="Number of Laws per year",
x=NULL,
title="Number of Laws",
subtitle="selected categories")+
theme(legend.position = "bottom",
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
scale_fill_brewer(palette="Set1")+
scale_x_continuous(breaks=seq(min(laws9614.y$year),max(laws9614.y$year),1)) #+
print(laws9614.year.bar)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws9614.year.bar.pdf"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# Plot laws 96 - 14 yearly Passed vs Non-Passed ------------------------------------------------
library(padr)
library(scales)
class(laws9614$year)
# laws9614.y <- laws9614 %>%
# select(-year)%>%
# group_by(category2) %>%
# arrange(date)%>%
# thicken(interval="year",colname="year", by="date")%>%
# group_by(category2,year)%>%
# summarize(freq=n())%>%
# #ungroup()%>%
# pad(group=c("category2"), interval="year", start=as.Date(min(laws9614.y$year))) %>%
# fill_by_value(value=0)
class(laws9614.y$year)
laws9614.y <- laws9614%>%
group_by(year,category2)%>%
summarise(freq=n())%>%
spread(key=category2, value=freq, fill=0, drop=FALSE)%>%
gather(key=category2, value=freq, 2:4)%>%
mutate(category2=factor(category2, levels=c("passed", "disputed, rejected, suspended", "under consideration")))
passed.nonpassed.laws9614.year.bar <- laws9614.y %>%
#filter(area %in% filter.area) %>%
ggplot(.,aes(year, freq))+
geom_col(aes(fill=category2), position="dodge") +
labs(y=NULL,
x=NULL,
title="Number of laws per year in BiH Parliament",
subtitle="",
caption="Data: www.parlament.ba")+
theme(legend.position = "bottom",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank(),
plot.caption = element_text(size=8))+
guides(fill=guide_legend(keywidth = 2, keyheight = 0.7))+
scale_y_continuous(limit=c(0,100))+
geom_vline(xintercept = 1998, linetype="dashed", color="black")+ #Dez 1997 = Bonn Powers
geom_vline(xintercept = 2006, linetype="dashed", color="black")+
scale_fill_manual(values=c("#008B00", "#CD2626","#FF8C00"))+
scale_x_continuous(limit=c(1995, 2015), breaks=seq(1995,2015), labels = paste(substring(as.character((seq(1995,2015))),3)))
#in case x axis is of type date
# scale_x_date(limits=as.Date(c('1995-05-01','2015-01-01')), date_breaks="1 year", labels=date_format("%Y"))+
# geom_vline(xintercept = as.Date('1998-01-01'), linetype="dashed", color="black")+ #Dez 1997 = Bonn Powers
# geom_vline(xintercept = as.Date('2006-01-01'), linetype="dashed", color="black")
#creates 2 character long label: paste(substring(as.character((seq(1995,2015))),3))
print(passed.nonpassed.laws9614.year.bar)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-passed.nonpassed.laws9614.year.bar.png"
ggsave(paste(folder,time,filename, sep=""), width=16, height=7, unit="cm")
# Bar Plot - only passed laws ---------------------------------------------
passed.laws9614.year.bar <- laws9614 %>%
filter(category %in% c("adopted","previously adopted")) %>%
group_by(year, category)%>%
summarise(freq=n())
passed.laws9614.year.bar <- laws9614 %>%
filter(category %in% c("adopted","previously adopted")) %>%
group_by(year)%>%
summarise(freq=n())%>%
spread(year,freq)
#write.table(passed.laws9614.year.bar, file = "passedlaws.txt", sep = ",", quote = FALSE, row.names = F)
passed.laws9614.year.bar.plot <- passed.laws9614.year.bar %>%
ggplot(.,aes(year, freq))+
#geom_col(aes(fill=category))+
geom_line()+
labs(title="Number of passed Laws",
subtitle="adopted and previoiusly adopted laws")+
theme(legend.position = "none",
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
scale_fill_brewer(palette="Set1")+
scale_x_continuous(limit=c(1995, 2015), breaks=seq(1995,2015))
print(passed.laws9614.year.bar.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-passed.laws9614.year.bar..plot.pdf"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# [pending] Bar Plot - only non-passed laws ---------------------------------------------
# filter.category <- c("adopted","previously adopted","under consideration")
#
# not.passed.laws9614.year.bar <- laws9614.y %>%
# filter(!category %in% filter.category) %>%
# ggplot(.,aes(year, freq))+
# geom_col() +
# #geom_col(aes(fill=category)) +
# labs(y=NULL,
# x=NULL,
# title="Number of not-passed Laws",
# subtitle="laws which are disputed, rejected, or suspended")+
# theme(legend.position = "none",
# panel.grid.major.x=element_blank(),
# panel.grid.minor.x = element_blank())+
# scale_fill_brewer(palette="Set1")+
# #scale_x_continuous(breaks=seq(min(laws9614.y$year),max(laws9614.y$year),1)) #+
# scale_y_continuous(limit=c(0,100))+
# scale_x_continuous(limit=c(1995, 2015), breaks=seq(1995,2015))
#
# print(not.passed.laws9614.year.bar)
#
# folder <-"graphs/draft/"
# time <- format(Sys.time(),"%Y%m%d-%H%M%S")
# filename <-"-not.passed.laws9614.year.bar.pdf"
# ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# Laws per Government -----------------------------------------------------
# >> load government periods ----------------------------------------------
Gov <- read_excel("graphs.xlsx",sheet="BiHgovs")
Gov$interval <- interval(Gov$start, Gov$end)
Gov$name <- paste(Gov$no, Gov$chair, sep = " - ")
Gov$interval2<- paste(strftime(Gov$start, "%y/%m"), strftime(Gov$end, "%y/%m"),sep="-")
class(Gov$interval2)
Gov$name2 <- paste(Gov$name,Gov$interval2,sep="\n")
# >> Gov duration ---------------------------------------------------------
Gov$duration <- difftime(Gov$end, Gov$start, units = "days")
# >> loop to match government period with date of law ---------------------
laws9614$gov <- as.character("")
for(i in 1:nrow(laws9614)) {
laws9614$gov[i] <- Gov$name2[laws9614$date[i] %within% Gov$interval]
}
laws9614$gov <- as.factor(laws9614$gov)
laws9614$gov <- factor(laws9614$gov,
levels=c("1 - Bosic/Silajdzic\n97/01-99/02",
"2 - Silajdzic/Mihajlovic\n99/02-00/06",
"3 - Tusevljak\n00/06-00/10",
"4 - Raguz\n00/10-01/02",
"5 - Matic\n01/02-01/07",
"6 - Lagumdzija\n01/07-02/03",
"7 - Mikerevic\n02/03-02/12",
"8 - Terzic\n02/12-07/01",
"9 - Spric\n07/01-07/12",
"10 - Spiric\n07/12-12/01",
"11 - Bevanda\n12/01-15/02",
"12 - Zvizdic\n15/02-17/05"))
# >> dichotomy passed vs non-passed laws ----------------------------------
laws9614 <- laws9614 %>%
mutate(status=case_when(category=="adopted" ~ "passed",
category=="previously adopted" ~ "passed",
category=="rejected" ~ "not passed",
category=="disputed" ~ "not passed",
category=="suspended" ~ "not passed",
category=="under consideration" ~ "under consideration"))
# >> sum passed laws per government ---------------------------------------
laws.gov <- laws9614 %>%
group_by(gov, status) %>%
summarise(freq=n())
# >> plot laws per government ------------------------------------------------------------------------
laws.gov.plot <- laws.gov %>%
filter(status=="passed")%>%
ggplot(.,aes(gov, freq))+
geom_col(aes(),fill="darkblue", position="dodge") +
labs(y=NULL,
x=NULL,
title="Number of Laws",
subtitle="selected categories")+
theme_tq()+
theme(legend.position = "bottom",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())
print(laws.gov.plot)
# >> Laws per day ---------------------------------------------------------
laws.day <- laws9614 %>%
filter(status=="passed") %>%
group_by(gov)%>%
summarise(n.laws=n())
laws.day<- full_join(laws.day, Gov[c("name2","duration")], by=c("gov"="name2"))
laws.day <- laws.day %>%
mutate(duration.day=as.numeric(duration),
duration.week=duration.day/7,
ratio.day=n.laws/duration.day,
ratio.week=n.laws/duration.week)
levels(laws9614$gov)
# >> plot laws per week ---------------------------------------------------
laws.day$gov <- factor(laws.day$gov,
levels=c("1 - Bosic/Silajdzic\n97/01-99/02",
"2 - Silajdzic/Mihajlovic\n99/02-00/06",
"3 - Tusevljak\n00/06-00/10",
"4 - Raguz\n00/10-01/02",
"5 - Matic\n01/02-01/07",
"6 - Lagumdzija\n01/07-02/03",
"7 - Mikerevic\n02/03-02/12",
"8 - Terzic\n02/12-07/01",
"9 - Spric\n07/01-07/12",
"10 - Spiric\n07/12-12/01",
"11 - Bevanda\n12/01-15/02",
"12 - Zvizdic\n15/02-17/05"))
AllianceOfChange <- c("5 - Matic\n01/02-01/07",
"6 - Lagumdzija\n01/07-02/03",
"7 - Mikerevic\n02/03-02/12")
#geom_rect over adjacent factor levels on x-axis;
#https://stackoverflow.com/questions/31381053/changing-the-background-color-of-a-ggplot-chart-with-a-factor-variable?noredirect=1&lq=1
rects <- data.frame(xstart=seq(0.5,11.5,1),xend=seq(1.5,12.5,1),col=levels(laws.day$gov))
laws.week.plot <- laws.day %>%
filter(gov!="12 - Zvizdic\n15/02-17/05") %>%
mutate(AoF=ifelse(gov %in% AllianceOfChange, "AoF",""))%>%
ggplot(.,aes(gov, ratio.week))+
geom_lollipop(color="#20B2AA", size=2)+
geom_rect(data=rects[rects$col %in% AllianceOfChange,], aes(xmin=xstart, xmax=xend,
ymin=0, ymax=Inf),
fill="grey", alpha=0.3, inherit.aes = FALSE)+
geom_label(aes(x=5, y=1,label="'Alliance of Change'"), label.size=0, fill="lightgrey",
size=3, fontface="italic", hjust="left",inherit.aes=FALSE)+
labs(title="Legislative Output per Government",
subtitle="Average number of laws passed per week",
caption="Data: Parlament.ba",
y="laws per week", x="")+
theme_minimal()+
theme(legend.position="bottom",
legend.title = element_blank(),
plot.caption = element_text(size=8),
panel.grid.major.x = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1))+
# scale_color_discrete(breaks="AoF",name="", labels=c("Alliance of Change"))+
scale_y_continuous(limits = c(0,1.2))
print(laws.week.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BiHLawAnalysis"
plotname <-"laws.week.plot.png"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=15, height=9, unit="cm")
# >> Ratio Passed - Non-Passed Laws ---------------------------------------
passed.ratio <- laws9614 %>%
group_by(year, category2)%>%
summarise(freq=n())%>%
filter(year>2005)%>%
filter(!year>2014)%>%
filter(category2!="under consideration")%>%
spread(category2, freq)%>%
ungroup()%>%
mutate(ratio=round(.[[3]]/.[[2]]*100, 2))%>%
gather("category2","n", 2:4)
# >>> ratio Plot ----------------------------------------------------------
plot.passed.ratio <- passed.ratio %>%
filter(category2=="ratio")%>%
ggplot(.,aes(year,n))+
geom_rect(aes(xmin=-Inf,xmax=+Inf,ymin=0, ymax=100), fill="grey", alpha=0.3)+
annotate("text",2009, 51, label="more laws are rejected than passed")+
geom_line()+
labs(title="Ratio of Passed to Non-Passed Laws",
subtitle="Number of adoped to disputed, rejected or suspended laws;\nno data prior to 2006 available",
caption="Data: Parlament.ba",
y="%", x="")+
#geom_hline(yintercept=100)+
theme_minimal()+
theme(legend.position = "none",
plot.caption = element_text(size=8),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y=element_blank())+
scale_x_continuous(limit=c(2006, 2014), breaks=seq(2006,2014))+
scale_y_continuous(breaks=seq(0,600,100))
print(plot.passed.ratio)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-plot.passed.ratio.png"
ggsave(paste(folder,time,filename, sep=""), width=15, height=9, unit="cm")
# >>> ratio plot 2 - passed vs non-passed ----------------------------------
###
plot.passed.ratio2 <- passed.ratio %>%
filter(category2!="ratio")%>%
ggplot(.,aes(year,n))+
geom_col(aes(fill=category2), position="dodge")+
labs(y="number of laws",
x="",
title="Annual totals of passed and not-passed laws",
subtitle="no data prior to 2006",
caption="plot.passed.ratio2" )+
theme_tq()+
theme(legend.position = "bottom",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
scale_fill_brewer(palette="Set1")+
scale_x_continuous(breaks=seq(2006,2014))
print(plot.passed.ratio2)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-plot.passed.ratio2.png"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# >> Annual Laws vs Annual Bonn Powers ---------------------------------------------------------
# >>> Bonn Power decisions per year ----------------------------------------
BP <- read_excel("graphs.xlsx",sheet="BonnPowersScrapped")
nrow(BP)
BP$year <- year(BP$date.publish)
# >>> mark lifts of removals ----------------------------------------
BP.removals <- BP %>%
filter(area=="Removals and Suspensions from Office") %>%
mutate(area=ifelse(grepl("lift", decision.name,
ignore.case = TRUE,
perl = FALSE,
fixed = FALSE,
useBytes = FALSE),
"Lifted Removals and Suspensions from Office",
"Removals and Suspensions from Office"))
BP.nonremovals <- BP %>%
filter(area!="Removals and Suspensions from Office")
BP <- bind_rows(BP.removals, BP.nonremovals)
nrow(BP)
# >> Bonn Powers per year ---------------------------------------------------------------------
BP.year <- BP %>%
group_by(year,area)%>%
summarise(freq=n())%>%
group_by(year)%>%
mutate(BP.y=sum(freq[area!="Lifted Removals and Suspensions from Office"]))%>% #not to include in total!
spread(key=area,value=freq, fill=0)
# >> Laws per year ---------------------------------------------------------------------
laws.year <- laws9614 %>%
group_by(year, category2)%>%
summarise(freq=n())%>%
spread(key=category2, value=freq, fill=0)%>%
filter(year<2015)
laws.bonn.year <- inner_join(BP.year, laws.year, by="year")
# >> Plot Bonn Powers vs Laws ---------------------------------------------------------------------
laws.bonn.year.plot <- laws.bonn.year %>%
select(year,BP.y, passed, `disputed, rejected, suspended`) %>%
gather(key=bonn.laws, value=freq, BP.y, passed,`disputed, rejected, suspended`)%>%
#filter(bonn.laws!="disputed, rejected, suspended" & year>2005)
filter(bonn.laws!="disputed, rejected, suspended" |
bonn.laws=="disputed, rejected, suspended" & year > 2005) %>%
#filter(bonn.laws!="Removals and Suspensions from Office") %>%
ggplot(.,aes(year,freq))+
geom_line(aes(color=bonn.laws))+
labs(y="annual total",
x="",
title="Annual total of Bonn Power decisions and passed laws",
subtitle="excluding decisions to lift previous Bonn Power decisions",
caption="laws.bonn.year.plot")+
theme_tq()+
theme(legend.position = "bottom",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
# scale_fill_brewer(palette="Set1")+
scale_x_continuous(breaks=seq(1996,2014))+
scale_color_discrete(labels=c("annual total of Bonn Power decisions",
"annual total of not-passed laws",
"annual total of passed laws"))
print(laws.bonn.year.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws.bonn.year.plot.png"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# >> (pending )Scatter Plot Bonn Powers vs Laws ---------------------------------------------------------------------
#Interpetation: there seems to be a lag between Bonn Powers decisions and laws being passed
scatter.laws.bonn <- laws.bonn.year %>%
select(year,BP.y, passed) %>%
ggplot(.,aes(BP.y,passed))+
geom_point()
print(scatter.laws.bonn)
# Laws per OHR ------------------------------------------------------------
OHR.mandate <- read_excel("graphs.xlsx",sheet="OHRMandates")
OHR.mandate$interval <- interval(OHR.mandate$start.date,OHR.mandate$end.date)
OHR.mandate$length.w <- as.numeric(round(difftime(OHR.mandate$end.date, OHR.mandate$start.date, units=c("weeks")),0))
# > assign OHR to each law ------------------------------------------------
laws9614$HR <- as.character("")
for(i in 1:nrow(laws9614)) {
laws9614$HR[i] <- OHR.mandate$HR[laws9614$date[i] %within% OHR.mandate$interval]
}
# > legislative output per OHR, per week ----------------------------------------------
laws.ohr <- laws9614 %>%
filter(year<2015)%>%
filter(category2=="passed") %>%
group_by(HR) %>%
summarise(laws.n=n())
x <- OHR.mandate %>%
select(-interval)
laws.ohr <- inner_join(laws.ohr, x, by=c("HR"))
laws.ohr <- laws.ohr %>%
mutate(laws.w=laws.n/length.w)
# !! ERROR: --------------------------------------------------------------
#length.w for inzko is misleading; we have laws only until the end of 2014, but Inzko's tenure is calculated
#up until May 2017; Bonn Power decisions after 2014 have to be removed; length has to be corrected
laws.ohr$label <- paste(laws.ohr$HR,"\n",format(laws.ohr$start.date, "%y/%m"),"-",format(laws.ohr$end.date, "%y/%m"), sep="")
laws.ohr$label <- factor(laws.ohr$label,
levels=c("Westendorp\n97/06-99/08",
"Petritsch\n99/08-02/05",
"Ashdown\n02/05-06/01",
"Schwarz-Schilling\n06/02-07/06",
"Lajcak\n07/07-09/02",
"Inzko\n09/03-14/12"))
# > graph legislative output per OHR ----------------------------------------------
laws.ohr.plot <- laws.ohr %>%
select(label, laws.n, laws.w) %>%
gather(key="laws",value="number",laws.n, laws.w)%>%
mutate(number=round(number,2),
laws=case_when(laws=="laws.n" ~ "total",
laws=="laws.w" ~ "avg. week"))%>%
ggplot(.,aes(label,number))+
geom_lollipop(aes(color=laws), size=1)+
labs(title="Passed Laws per HR",
subtitle="Total and average per week",
caption="Data: OHR.int, Parlament.ba",
y="number of laws", x="")+
theme_minimal()+
theme(legend.position = "none",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y=element_blank(),
axis.title = element_text(size=8),
plot.caption = element_text(size=8))+
scale_color_manual(values=c("lightblue","darkblue"))+
facet_wrap(~laws, scales="free",nrow=2)
print(laws.ohr.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws.ohr.plot.png"
ggsave(paste(folder,time,filename, sep=""), width=15, height=10, unit="cm")
# Laws and Bonn Power Decisions per OHR per week --------------------------
BonnWeek <- read_excel("graphs.xlsx",sheet="BonnWeek",
col_names = TRUE)
Bonn.laws.w <- inner_join(BonnWeek, laws.ohr, by="HR")
Bonn.laws.w.plot <- Bonn.laws.w %>%
select(label, laws.w, BP.w)%>%
ggplot(.,aes(BP.w, laws.w, label=label))+
#geom_point()+
geom_label(size=2, fill="grey", alpha=0.1)+
labs(title="Laws and 'Bonn Power' decisions per HR",
subtitle="Average per week; decisions lifting previous 'Bonn Power' decisions excluded; \nonly passed laws",
caption="Data: OHR.int, Paralment.ba",
y="avg. weekly number of laws", x="avg. weekly number of 'Bonn Power' decisions")+
theme_minimal()+
theme(legend.position = "none",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y=element_blank(),
axis.title = element_text(size=8),
plot.caption = element_text(size=6))+
scale_y_continuous(limits=c(0,1.25), breaks=seq(0,1.25, 0.25))+
scale_x_continuous(limits=c(0,1.75), breaks=seq(0,1.75, 0.25))
print(Bonn.laws.w.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-Bonn.laws.w.plot.png"
ggsave(paste(folder,time,filename, sep=""), width=15, height=10, unit="cm")
| /BiHLaws/BiHLawAnalysis.R | no_license | rs2903/BiH | R | false | false | 29,984 | r | # load packages -----------------------------------------------------------
require(ggplot2)
require(plotly)
library(stringi)
require(tidyr)
require(purrr)
require(dplyr)
require(rio)
require(lubridate)
require(readxl)
require(stringdist)
library(xlsx) #
library(reldist)
library(ggthemes)
library(ggalt) #for lollipop charts
library(highcharter)
#theme_set(theme_bw())
theme_set(theme_minimal())
library(tidyquant)
#library(XLConnect)
wdr <- getwd()
# >> load dataset ---------------------------------------------------------
#setwd("C:/Users/Roland/Downloads")
Sys.setlocale("LC_CTYPE", "russian") #allows displaying party names in cyrillic
laws <- readxl::read_xlsx(paste0(wdr, "/data/1997_2014_Laws.xlsx"), sheet="BiHLaws")
# recode type of law ------------------------------------------------------
laws$category[laws$category==1] <- "under consideration"
laws$category[laws$category==2] <- "adopted"
#laws$category[laws$category==3] does not exist
laws$category[laws$category==4] <- "rejected" #Odbijeni zakoni
laws$category[laws$category==5] <- "disputed" #Povuceni zakoni
laws$category[laws$category==6] <- "suspended"
laws$category[laws$category==7] <- "expired"
laws$category[laws$category==8] <- "previously adopted"
laws$date.start2 <- as.Date(laws$date.start, "%d.%m.%Y")
#category 3 is unrelated; link leads somewhere else on the page
laws <- laws %>% filter(category!=3)
# laws$year <- year(laws$date.start2)
# class(laws$year)
laws$year <- as.numeric(strftime(laws$date.start2, format="%Y"))
class(laws$year)
# Plot --------------------------------------------------------------------
laws.bar <- laws %>%
#filter(area %in% filter.area) %>%
ggplot(.,aes(date.start2))+
geom_bar(aes(fill=category)) +
labs(y="Number of Laws",
x=NULL,
title="Number of Laws",
subtitle="selected categories")+
theme(legend.position = "none",
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
scale_fill_brewer(palette="Set1")+
scale_x_date(date_labels="%Y", date_breaks = "1 year")+
scale_y_continuous(limits=c(0,10))+
facet_grid(category~.)#+
#geom_vline(data=OHR, aes(xintercept=as.numeric(start.date)), linetype=4)+
#geom_text(data=OHR, aes(label=HR, x=start.date, y=10), size=3 ,angle = 60, hjust = 0)
print(laws.bar)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws.bar.pdf"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# Yearly summs ------------------------------------------------------------
laws.year <- laws %>%
group_by(category, year) %>%
summarise(freq=n()) %>%
ungroup()
class(laws.year$freq)
laws.year <- as.data.frame(laws.year)
laws.year.bar <- laws.year %>%
#filter(area %in% filter.area) %>%
ggplot(.,aes(year, freq))+
geom_col(aes(fill=category)) +
labs(y="Number of Laws per year",
x=NULL,
title="Number of Laws",
subtitle="selected categories")+
theme(legend.position = "bottom",
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
scale_fill_brewer(palette="Set1")+
scale_x_continuous(breaks=seq(min(laws.year$year),max(laws.year$year),1)) #+
# scale_y_continuous(limits=c(0,10))+
#facet_grid(category~.)#+
#geom_vline(data=OHR, aes(xintercept=as.numeric(start.date)), linetype=4)+
#geom_text(data=OHR, aes(label=HR, x=start.date, y=10), size=3 ,angle = 60, hjust = 0)
print(laws.year.bar)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws.year.bar.pdf"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# Laws 1996 - 1998 --------------------------------------------------------
laws9698 <- read_excel("graphs.xlsx",sheet="BiHLaws96-98")
names(laws9698) <- c("No","law","date.HoP","date.HoR","session")
library(stringr)
laws9698$date.HoP <- k <- str_extract_all(laws9698$date.HoP, "\\([^()]+\\)")
laws9698$date.HoP <- substring(laws9698$date.HoP, 2, nchar(k)-2)
laws9698$date.HoP <- as.Date(laws9698$date.HoP, "%d.%m.%Y")
class(laws9698$date.HoP)
laws9698$date.HoR <- k <- str_extract_all(laws9698$date.HoR, "\\([^()]+\\)")
laws9698$date.HoR <- substring(laws9698$date.HoR, 2, nchar(k)-2)
laws9698$date.HoR <- as.Date(laws9698$date.HoR, "%d.%m.%Y")
class(laws9698$date.HoR)
laws9698$date <- pmax(laws9698$date.HoP, laws9698$date.HoR)
laws9698$year <- year(laws9698$date)
# Laws 98-2000 ------------------------------------------------------------
laws9800 <- read_excel("graphs.xlsx",sheet="BiHLaws98-00")
names(laws9800) <- c("No","law","date.HoP","date.HoR","session")
library(stringr)
laws9800$date.HoP <- k <- str_extract_all(laws9800$date.HoP, "\\([^()]+\\)")
laws9800$date.HoP <- substring(laws9800$date.HoP, 2, nchar(k)-2)
laws9800$date.HoP <- as.Date(laws9800$date.HoP, "%d.%m.%Y")
class(laws9800$date.HoP)
laws9800$date.HoR <- k <- str_extract_all(laws9800$date.HoR, "\\([^()]+\\)")
laws9800$date.HoR <- substring(laws9800$date.HoR, 2, nchar(k)-2)
laws9800$date.HoR <- as.Date(laws9800$date.HoR, "%d.%m.%Y")
class(laws9800$date.HoR)
laws9800$date <- pmax(laws9800$date.HoP, laws9800$date.HoR)
laws9800$year <- year(laws9800$date)
# Laws 2000-02 ------------------------------------------------------------
laws0002 <- read_excel("graphs.xlsx",sheet="BiHLaws00-02")
names(laws0002) <- c("No","law","date.HoP","date.HoR","session")
library(stringr)
laws0002$date.HoP <- k <- str_extract_all(laws0002$date.HoP, "\\([^()]+\\)")
laws0002$date.HoP <- substring(laws0002$date.HoP, 2, nchar(k)-2)
laws0002$date.HoP <- as.Date(laws0002$date.HoP, "%d.%m.%Y")
class(laws0002$date.HoP)
laws0002$date.HoR <- k <- str_extract_all(laws0002$date.HoR, "\\([^()]+\\)")
laws0002$date.HoR <- substring(laws0002$date.HoR, 2, nchar(k)-2)
laws0002$date.HoR <- as.Date(laws0002$date.HoR, "%d.%m.%Y")
class(laws0002$date.HoR)
laws0002$date <- pmax(laws0002$date.HoP, laws0002$date.HoR)
laws0002$year <- year(laws0002$date)
# Laws 2002-06 ------------------------------------------------------------
laws0206 <- read_excel("graphs.xlsx",sheet="BiHLaws02-06")
names(laws0206) <- c("No","law","date.HoP","date.HoR","session")
library(stringr)
laws0206$date.HoP <- k <- str_extract_all(laws0206$date.HoP, "\\/[^()]+\\/")
laws0206$date.HoP <- substring(laws0206$date.HoP, 2, nchar(k)-2)
laws0206$date.HoP <- as.Date(laws0206$date.HoP, "%d.%m.%Y")
class(laws0206$date.HoP)
laws0206$date.HoR <- k <- str_extract_all(laws0206$date.HoR, "\\/[^()]+\\/")
laws0206$date.HoR <- substring(laws0206$date.HoR, 2, nchar(k)-2)
laws0206$date.HoR <- as.Date(laws0206$date.HoR, "%d.%m.%Y")
class(laws0206$date.HoR)
laws0206$date <- pmax(laws0206$date.HoP, laws0206$date.HoR)
laws0206$year <- year(laws0206$date)
# Laws 96 - 2006 ----------------------------------------------------------
laws9606 <- rbind(laws9698, laws9800, laws0002, laws0206)
# Laws 96 - 2014 monthly --------------------------------------------------
laws0614 <- laws %>%
select(date.start2, law.title, category)%>%
rename(date=date.start2,
law=law.title)
laws9606 <- laws9606 %>%
select(date, law) %>%
mutate(category="adopted")
laws9614 <- rbind(laws9606, laws0614)
laws9614 <- laws9614 %>%
filter(date < "2015-01-01")
# dichotomy passed / not passed / under conisderation ---------------------
adopted <- c("adopted","previously adopted")
laws9614$category2<- ifelse(laws9614$category %in% adopted, "passed","disputed, rejected, suspended")
laws9614$category2[laws9614$category=="under consideration"] <- "under consideration"
library(zoo)
laws9614.m <- laws9614 %>%
mutate(yearmonth=as.yearmon(date)) %>%
group_by(yearmonth, category) %>%
summarise(n=n())%>%
ungroup()
# plot laws monthly -------------------------------------------------------
laws.month.bar <- laws9614.m %>%
#filter(area %in% filter.area) %>%
ggplot(.,aes(yearmonth, n))+
print(laws.month.bar)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws.month.bar.pdf"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# Plot laws 96 - 14 yearly ------------------------------------------------
laws9614$year <- as.numeric(strftime(laws9614$date, format="%Y"))
class(laws9614$year)
laws9614.y <- laws9614 %>%
group_by(year, category) %>%
summarise(freq=n())
laws9614.year.bar <- laws9614.y %>%
#filter(area %in% filter.area) %>%
ggplot(.,aes(year, freq))+
geom_col(aes(fill=category)) +
labs(y="Number of Laws per year",
x=NULL,
title="Number of Laws",
subtitle="selected categories")+
theme(legend.position = "bottom",
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
scale_fill_brewer(palette="Set1")+
scale_x_continuous(breaks=seq(min(laws9614.y$year),max(laws9614.y$year),1)) #+
print(laws9614.year.bar)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws9614.year.bar.pdf"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# Plot laws 96 - 14 yearly Passed vs Non-Passed ------------------------------------------------
library(padr)
library(scales)
class(laws9614$year)
# laws9614.y <- laws9614 %>%
# select(-year)%>%
# group_by(category2) %>%
# arrange(date)%>%
# thicken(interval="year",colname="year", by="date")%>%
# group_by(category2,year)%>%
# summarize(freq=n())%>%
# #ungroup()%>%
# pad(group=c("category2"), interval="year", start=as.Date(min(laws9614.y$year))) %>%
# fill_by_value(value=0)
class(laws9614.y$year)
laws9614.y <- laws9614%>%
group_by(year,category2)%>%
summarise(freq=n())%>%
spread(key=category2, value=freq, fill=0, drop=FALSE)%>%
gather(key=category2, value=freq, 2:4)%>%
mutate(category2=factor(category2, levels=c("passed", "disputed, rejected, suspended", "under consideration")))
passed.nonpassed.laws9614.year.bar <- laws9614.y %>%
#filter(area %in% filter.area) %>%
ggplot(.,aes(year, freq))+
geom_col(aes(fill=category2), position="dodge") +
labs(y=NULL,
x=NULL,
title="Number of laws per year in BiH Parliament",
subtitle="",
caption="Data: www.parlament.ba")+
theme(legend.position = "bottom",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank(),
plot.caption = element_text(size=8))+
guides(fill=guide_legend(keywidth = 2, keyheight = 0.7))+
scale_y_continuous(limit=c(0,100))+
geom_vline(xintercept = 1998, linetype="dashed", color="black")+ #Dez 1997 = Bonn Powers
geom_vline(xintercept = 2006, linetype="dashed", color="black")+
scale_fill_manual(values=c("#008B00", "#CD2626","#FF8C00"))+
scale_x_continuous(limit=c(1995, 2015), breaks=seq(1995,2015), labels = paste(substring(as.character((seq(1995,2015))),3)))
#in case x axis is of type date
# scale_x_date(limits=as.Date(c('1995-05-01','2015-01-01')), date_breaks="1 year", labels=date_format("%Y"))+
# geom_vline(xintercept = as.Date('1998-01-01'), linetype="dashed", color="black")+ #Dez 1997 = Bonn Powers
# geom_vline(xintercept = as.Date('2006-01-01'), linetype="dashed", color="black")
#creates 2 character long label: paste(substring(as.character((seq(1995,2015))),3))
print(passed.nonpassed.laws9614.year.bar)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-passed.nonpassed.laws9614.year.bar.png"
ggsave(paste(folder,time,filename, sep=""), width=16, height=7, unit="cm")
# Bar Plot - only passed laws ---------------------------------------------
passed.laws9614.year.bar <- laws9614 %>%
filter(category %in% c("adopted","previously adopted")) %>%
group_by(year, category)%>%
summarise(freq=n())
passed.laws9614.year.bar <- laws9614 %>%
filter(category %in% c("adopted","previously adopted")) %>%
group_by(year)%>%
summarise(freq=n())%>%
spread(year,freq)
#write.table(passed.laws9614.year.bar, file = "passedlaws.txt", sep = ",", quote = FALSE, row.names = F)
passed.laws9614.year.bar.plot <- passed.laws9614.year.bar %>%
ggplot(.,aes(year, freq))+
#geom_col(aes(fill=category))+
geom_line()+
labs(title="Number of passed Laws",
subtitle="adopted and previoiusly adopted laws")+
theme(legend.position = "none",
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
scale_fill_brewer(palette="Set1")+
scale_x_continuous(limit=c(1995, 2015), breaks=seq(1995,2015))
print(passed.laws9614.year.bar.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-passed.laws9614.year.bar..plot.pdf"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# [pending] Bar Plot - only non-passed laws ---------------------------------------------
# filter.category <- c("adopted","previously adopted","under consideration")
#
# not.passed.laws9614.year.bar <- laws9614.y %>%
# filter(!category %in% filter.category) %>%
# ggplot(.,aes(year, freq))+
# geom_col() +
# #geom_col(aes(fill=category)) +
# labs(y=NULL,
# x=NULL,
# title="Number of not-passed Laws",
# subtitle="laws which are disputed, rejected, or suspended")+
# theme(legend.position = "none",
# panel.grid.major.x=element_blank(),
# panel.grid.minor.x = element_blank())+
# scale_fill_brewer(palette="Set1")+
# #scale_x_continuous(breaks=seq(min(laws9614.y$year),max(laws9614.y$year),1)) #+
# scale_y_continuous(limit=c(0,100))+
# scale_x_continuous(limit=c(1995, 2015), breaks=seq(1995,2015))
#
# print(not.passed.laws9614.year.bar)
#
# folder <-"graphs/draft/"
# time <- format(Sys.time(),"%Y%m%d-%H%M%S")
# filename <-"-not.passed.laws9614.year.bar.pdf"
# ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# Laws per Government -----------------------------------------------------
# >> load government periods ----------------------------------------------
Gov <- read_excel("graphs.xlsx",sheet="BiHgovs")
Gov$interval <- interval(Gov$start, Gov$end)
Gov$name <- paste(Gov$no, Gov$chair, sep = " - ")
Gov$interval2<- paste(strftime(Gov$start, "%y/%m"), strftime(Gov$end, "%y/%m"),sep="-")
class(Gov$interval2)
Gov$name2 <- paste(Gov$name,Gov$interval2,sep="\n")
# >> Gov duration ---------------------------------------------------------
Gov$duration <- difftime(Gov$end, Gov$start, units = "days")
# >> loop to match government period with date of law ---------------------
laws9614$gov <- as.character("")
for(i in 1:nrow(laws9614)) {
laws9614$gov[i] <- Gov$name2[laws9614$date[i] %within% Gov$interval]
}
laws9614$gov <- as.factor(laws9614$gov)
laws9614$gov <- factor(laws9614$gov,
levels=c("1 - Bosic/Silajdzic\n97/01-99/02",
"2 - Silajdzic/Mihajlovic\n99/02-00/06",
"3 - Tusevljak\n00/06-00/10",
"4 - Raguz\n00/10-01/02",
"5 - Matic\n01/02-01/07",
"6 - Lagumdzija\n01/07-02/03",
"7 - Mikerevic\n02/03-02/12",
"8 - Terzic\n02/12-07/01",
"9 - Spric\n07/01-07/12",
"10 - Spiric\n07/12-12/01",
"11 - Bevanda\n12/01-15/02",
"12 - Zvizdic\n15/02-17/05"))
# >> dichotomy passed vs non-passed laws ----------------------------------
laws9614 <- laws9614 %>%
mutate(status=case_when(category=="adopted" ~ "passed",
category=="previously adopted" ~ "passed",
category=="rejected" ~ "not passed",
category=="disputed" ~ "not passed",
category=="suspended" ~ "not passed",
category=="under consideration" ~ "under consideration"))
# >> sum passed laws per government ---------------------------------------
laws.gov <- laws9614 %>%
group_by(gov, status) %>%
summarise(freq=n())
# >> plot laws per government ------------------------------------------------------------------------
laws.gov.plot <- laws.gov %>%
filter(status=="passed")%>%
ggplot(.,aes(gov, freq))+
geom_col(aes(),fill="darkblue", position="dodge") +
labs(y=NULL,
x=NULL,
title="Number of Laws",
subtitle="selected categories")+
theme_tq()+
theme(legend.position = "bottom",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())
print(laws.gov.plot)
# >> Laws per day ---------------------------------------------------------
laws.day <- laws9614 %>%
filter(status=="passed") %>%
group_by(gov)%>%
summarise(n.laws=n())
laws.day<- full_join(laws.day, Gov[c("name2","duration")], by=c("gov"="name2"))
laws.day <- laws.day %>%
mutate(duration.day=as.numeric(duration),
duration.week=duration.day/7,
ratio.day=n.laws/duration.day,
ratio.week=n.laws/duration.week)
levels(laws9614$gov)
# >> plot laws per week ---------------------------------------------------
laws.day$gov <- factor(laws.day$gov,
levels=c("1 - Bosic/Silajdzic\n97/01-99/02",
"2 - Silajdzic/Mihajlovic\n99/02-00/06",
"3 - Tusevljak\n00/06-00/10",
"4 - Raguz\n00/10-01/02",
"5 - Matic\n01/02-01/07",
"6 - Lagumdzija\n01/07-02/03",
"7 - Mikerevic\n02/03-02/12",
"8 - Terzic\n02/12-07/01",
"9 - Spric\n07/01-07/12",
"10 - Spiric\n07/12-12/01",
"11 - Bevanda\n12/01-15/02",
"12 - Zvizdic\n15/02-17/05"))
AllianceOfChange <- c("5 - Matic\n01/02-01/07",
"6 - Lagumdzija\n01/07-02/03",
"7 - Mikerevic\n02/03-02/12")
#geom_rect over adjacent factor levels on x-axis;
#https://stackoverflow.com/questions/31381053/changing-the-background-color-of-a-ggplot-chart-with-a-factor-variable?noredirect=1&lq=1
rects <- data.frame(xstart=seq(0.5,11.5,1),xend=seq(1.5,12.5,1),col=levels(laws.day$gov))
laws.week.plot <- laws.day %>%
filter(gov!="12 - Zvizdic\n15/02-17/05") %>%
mutate(AoF=ifelse(gov %in% AllianceOfChange, "AoF",""))%>%
ggplot(.,aes(gov, ratio.week))+
geom_lollipop(color="#20B2AA", size=2)+
geom_rect(data=rects[rects$col %in% AllianceOfChange,], aes(xmin=xstart, xmax=xend,
ymin=0, ymax=Inf),
fill="grey", alpha=0.3, inherit.aes = FALSE)+
geom_label(aes(x=5, y=1,label="'Alliance of Change'"), label.size=0, fill="lightgrey",
size=3, fontface="italic", hjust="left",inherit.aes=FALSE)+
labs(title="Legislative Output per Government",
subtitle="Average number of laws passed per week",
caption="Data: Parlament.ba",
y="laws per week", x="")+
theme_minimal()+
theme(legend.position="bottom",
legend.title = element_blank(),
plot.caption = element_text(size=8),
panel.grid.major.x = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1))+
# scale_color_discrete(breaks="AoF",name="", labels=c("Alliance of Change"))+
scale_y_continuous(limits = c(0,1.2))
print(laws.week.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BiHLawAnalysis"
plotname <-"laws.week.plot.png"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=15, height=9, unit="cm")
# >> Ratio Passed - Non-Passed Laws ---------------------------------------
passed.ratio <- laws9614 %>%
group_by(year, category2)%>%
summarise(freq=n())%>%
filter(year>2005)%>%
filter(!year>2014)%>%
filter(category2!="under consideration")%>%
spread(category2, freq)%>%
ungroup()%>%
mutate(ratio=round(.[[3]]/.[[2]]*100, 2))%>%
gather("category2","n", 2:4)
# >>> ratio Plot ----------------------------------------------------------
plot.passed.ratio <- passed.ratio %>%
filter(category2=="ratio")%>%
ggplot(.,aes(year,n))+
geom_rect(aes(xmin=-Inf,xmax=+Inf,ymin=0, ymax=100), fill="grey", alpha=0.3)+
annotate("text",2009, 51, label="more laws are rejected than passed")+
geom_line()+
labs(title="Ratio of Passed to Non-Passed Laws",
subtitle="Number of adoped to disputed, rejected or suspended laws;\nno data prior to 2006 available",
caption="Data: Parlament.ba",
y="%", x="")+
#geom_hline(yintercept=100)+
theme_minimal()+
theme(legend.position = "none",
plot.caption = element_text(size=8),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y=element_blank())+
scale_x_continuous(limit=c(2006, 2014), breaks=seq(2006,2014))+
scale_y_continuous(breaks=seq(0,600,100))
print(plot.passed.ratio)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-plot.passed.ratio.png"
ggsave(paste(folder,time,filename, sep=""), width=15, height=9, unit="cm")
# >>> ratio plot 2 - passed vs non-passed ----------------------------------
###
plot.passed.ratio2 <- passed.ratio %>%
filter(category2!="ratio")%>%
ggplot(.,aes(year,n))+
geom_col(aes(fill=category2), position="dodge")+
labs(y="number of laws",
x="",
title="Annual totals of passed and not-passed laws",
subtitle="no data prior to 2006",
caption="plot.passed.ratio2" )+
theme_tq()+
theme(legend.position = "bottom",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
scale_fill_brewer(palette="Set1")+
scale_x_continuous(breaks=seq(2006,2014))
print(plot.passed.ratio2)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-plot.passed.ratio2.png"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# >> Annual Laws vs Annual Bonn Powers ---------------------------------------------------------
# >>> Bonn Power decisions per year ----------------------------------------
BP <- read_excel("graphs.xlsx",sheet="BonnPowersScrapped")
nrow(BP)
BP$year <- year(BP$date.publish)
# >>> mark lifts of removals ----------------------------------------
BP.removals <- BP %>%
filter(area=="Removals and Suspensions from Office") %>%
mutate(area=ifelse(grepl("lift", decision.name,
ignore.case = TRUE,
perl = FALSE,
fixed = FALSE,
useBytes = FALSE),
"Lifted Removals and Suspensions from Office",
"Removals and Suspensions from Office"))
BP.nonremovals <- BP %>%
filter(area!="Removals and Suspensions from Office")
BP <- bind_rows(BP.removals, BP.nonremovals)
nrow(BP)
# >> Bonn Powers per year ---------------------------------------------------------------------
BP.year <- BP %>%
group_by(year,area)%>%
summarise(freq=n())%>%
group_by(year)%>%
mutate(BP.y=sum(freq[area!="Lifted Removals and Suspensions from Office"]))%>% #not to include in total!
spread(key=area,value=freq, fill=0)
# >> Laws per year ---------------------------------------------------------------------
laws.year <- laws9614 %>%
group_by(year, category2)%>%
summarise(freq=n())%>%
spread(key=category2, value=freq, fill=0)%>%
filter(year<2015)
laws.bonn.year <- inner_join(BP.year, laws.year, by="year")
# >> Plot Bonn Powers vs Laws ---------------------------------------------------------------------
laws.bonn.year.plot <- laws.bonn.year %>%
select(year,BP.y, passed, `disputed, rejected, suspended`) %>%
gather(key=bonn.laws, value=freq, BP.y, passed,`disputed, rejected, suspended`)%>%
#filter(bonn.laws!="disputed, rejected, suspended" & year>2005)
filter(bonn.laws!="disputed, rejected, suspended" |
bonn.laws=="disputed, rejected, suspended" & year > 2005) %>%
#filter(bonn.laws!="Removals and Suspensions from Office") %>%
ggplot(.,aes(year,freq))+
geom_line(aes(color=bonn.laws))+
labs(y="annual total",
x="",
title="Annual total of Bonn Power decisions and passed laws",
subtitle="excluding decisions to lift previous Bonn Power decisions",
caption="laws.bonn.year.plot")+
theme_tq()+
theme(legend.position = "bottom",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank())+
# scale_fill_brewer(palette="Set1")+
scale_x_continuous(breaks=seq(1996,2014))+
scale_color_discrete(labels=c("annual total of Bonn Power decisions",
"annual total of not-passed laws",
"annual total of passed laws"))
print(laws.bonn.year.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws.bonn.year.plot.png"
ggsave(paste(folder,time,filename, sep=""), width=10, height=5)
# >> (pending )Scatter Plot Bonn Powers vs Laws ---------------------------------------------------------------------
#Interpetation: there seems to be a lag between Bonn Powers decisions and laws being passed
scatter.laws.bonn <- laws.bonn.year %>%
select(year,BP.y, passed) %>%
ggplot(.,aes(BP.y,passed))+
geom_point()
print(scatter.laws.bonn)
# Laws per OHR ------------------------------------------------------------
OHR.mandate <- read_excel("graphs.xlsx",sheet="OHRMandates")
OHR.mandate$interval <- interval(OHR.mandate$start.date,OHR.mandate$end.date)
OHR.mandate$length.w <- as.numeric(round(difftime(OHR.mandate$end.date, OHR.mandate$start.date, units=c("weeks")),0))
# > assign OHR to each law ------------------------------------------------
laws9614$HR <- as.character("")
for(i in 1:nrow(laws9614)) {
laws9614$HR[i] <- OHR.mandate$HR[laws9614$date[i] %within% OHR.mandate$interval]
}
# > legislative output per OHR, per week ----------------------------------------------
laws.ohr <- laws9614 %>%
filter(year<2015)%>%
filter(category2=="passed") %>%
group_by(HR) %>%
summarise(laws.n=n())
x <- OHR.mandate %>%
select(-interval)
laws.ohr <- inner_join(laws.ohr, x, by=c("HR"))
laws.ohr <- laws.ohr %>%
mutate(laws.w=laws.n/length.w)
# !! ERROR: --------------------------------------------------------------
#length.w for inzko is misleading; we have laws only until the end of 2014, but Inzko's tenure is calculated
#up until May 2017; Bonn Power decisions after 2014 have to be removed; length has to be corrected
laws.ohr$label <- paste(laws.ohr$HR,"\n",format(laws.ohr$start.date, "%y/%m"),"-",format(laws.ohr$end.date, "%y/%m"), sep="")
laws.ohr$label <- factor(laws.ohr$label,
levels=c("Westendorp\n97/06-99/08",
"Petritsch\n99/08-02/05",
"Ashdown\n02/05-06/01",
"Schwarz-Schilling\n06/02-07/06",
"Lajcak\n07/07-09/02",
"Inzko\n09/03-14/12"))
# > graph legislative output per OHR ----------------------------------------------
laws.ohr.plot <- laws.ohr %>%
select(label, laws.n, laws.w) %>%
gather(key="laws",value="number",laws.n, laws.w)%>%
mutate(number=round(number,2),
laws=case_when(laws=="laws.n" ~ "total",
laws=="laws.w" ~ "avg. week"))%>%
ggplot(.,aes(label,number))+
geom_lollipop(aes(color=laws), size=1)+
labs(title="Passed Laws per HR",
subtitle="Total and average per week",
caption="Data: OHR.int, Parlament.ba",
y="number of laws", x="")+
theme_minimal()+
theme(legend.position = "none",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y=element_blank(),
axis.title = element_text(size=8),
plot.caption = element_text(size=8))+
scale_color_manual(values=c("lightblue","darkblue"))+
facet_wrap(~laws, scales="free",nrow=2)
print(laws.ohr.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-laws.ohr.plot.png"
ggsave(paste(folder,time,filename, sep=""), width=15, height=10, unit="cm")
# Laws and Bonn Power Decisions per OHR per week --------------------------
BonnWeek <- read_excel("graphs.xlsx",sheet="BonnWeek",
col_names = TRUE)
Bonn.laws.w <- inner_join(BonnWeek, laws.ohr, by="HR")
Bonn.laws.w.plot <- Bonn.laws.w %>%
select(label, laws.w, BP.w)%>%
ggplot(.,aes(BP.w, laws.w, label=label))+
#geom_point()+
geom_label(size=2, fill="grey", alpha=0.1)+
labs(title="Laws and 'Bonn Power' decisions per HR",
subtitle="Average per week; decisions lifting previous 'Bonn Power' decisions excluded; \nonly passed laws",
caption="Data: OHR.int, Paralment.ba",
y="avg. weekly number of laws", x="avg. weekly number of 'Bonn Power' decisions")+
theme_minimal()+
theme(legend.position = "none",
legend.title = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y=element_blank(),
axis.title = element_text(size=8),
plot.caption = element_text(size=6))+
scale_y_continuous(limits=c(0,1.25), breaks=seq(0,1.25, 0.25))+
scale_x_continuous(limits=c(0,1.75), breaks=seq(0,1.75, 0.25))
print(Bonn.laws.w.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
filename <-"-Bonn.laws.w.plot.png"
ggsave(paste(folder,time,filename, sep=""), width=15, height=10, unit="cm")
|
#' Create a data cube from an image collection
#'
#' Create a proxy data cube, which loads data from a given image collection according to a data cube view
#'
#' @param image_collection Source image collection as from \code{image_collection} or \code{create_image_collection}
#' @param view A data cube view defining the shape (spatiotemporal extent, resolution, and spatial reference), if missing, a default overview is used
#' @param mask mask pixels of images based on band values, see \code{\link{image_mask}}
#' @param chunking length-3 vector or a function returning a vector of length 3, defining the size of data cube chunks in the order time, y, x.
#' @return A proxy data cube object
#' @details
#' The following steps will be performed when the data cube is requested to read data of a chunk:
#'
#' 1. Find images from the input collection that intersect with the spatiotemporal extent of the chunk
#' 2. For all resulting images, apply gdalwarp to reproject, resize, and resample to an in-memory GDAL dataset
#' 3. Read the resulting data to the chunk buffer and optionally apply a mask on the result
#' 4. Update pixel-wise aggregator (as defined in the data cube view) to combine values of multiple images within the same data cube pixels
#'
#' If chunking is provided as a function, it must accept exactly three arguments for the total size of the cube in t, y, and x axes (in this order).
#'
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-01", t1="2018-12"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' raster_cube(L8.col, v)
#'
#' # using a mask on the Landsat quality bit band to filter out clouds
#' raster_cube(L8.col, v, mask=image_mask("BQA", bits=4, values=16))
#'
#' @note This function returns a proxy object, i.e., it will not start any computations besides deriving the shape of the result.
#' @export
raster_cube <- function(image_collection, view, mask=NULL, chunking=.pkgenv$default_chunksize) {
stopifnot(is.image_collection(image_collection))
if (is.function(chunking)) {
if (missing(view)) {
warning("Function to derive chunk sizes is not supprted when data cube is missing, using fixed chunk size (1, 512, 512)")
chunking = c(1, 512, 512)
}
else {
chunking = chunking(view$time$nt, view$space$ny, view$space$nx)
}
}
stopifnot(length(chunking) == 3)
chunking = as.integer(chunking)
stopifnot(chunking[1] > 0 && chunking[2] > 0 && chunking[3] > 0)
if (!is.null(mask)) {
stopifnot(is.image_mask(mask))
}
x = NULL
if (!missing(view)) {
stopifnot(is.cube_view(view))
x = gc_create_image_collection_cube(image_collection, as.integer(chunking), mask, view)
}
else {
x = gc_create_image_collection_cube(image_collection, as.integer(chunking), mask)
}
class(x) <- c("image_collection_cube", "cube", "xptr")
return(x)
}
#' Create a data cube from a set of images with the same spatial extent and spatial reference system
#'
#' Create a spatiotemporal data cube directly from images with identical spatial extent and spatial reference system, similar
#' to a raster stack with an additional dimension supporting both, time and multiple bands / variables.
#'
#' @details
#' This function creates a four-dimensional (space, time, bands / variables) raster data cube from a
#' set of provided files without the need to create an image collection before. This is possible if all images
#' have the same spatial extent and spatial reference system and can be used for two different file organizations:
#'
#' 1. If all image files share the same bands / variables, the \code{bands} argument can be ignored (default NULL) can
#' names of the bands can be specified using the \code{band_names} argument.
#'
#' 2. If image files represent different band / variable (e.g. individual files for red, green, and blue channels), the \code{bands}
#' argument must be used to define the corresponding band / variable. Notice that in this case all files are expected to
#' represent exactly one variable / band at one point in datetime. It is not possible to combine files with different
#' numbers of variables / bands. If image files for different bands have different pixel sizes, the smallest size is used
#' by default.
#'
#' Notice that to avoid opening all image files in advance,no automatic check whether all images share the
#' spatial extent and spatial reference system is performed.
#'
#' @param x character vector where items point to image files
#' @param datetime_values vector of type character, Date, or POSIXct with recording date of images
#' @param bands optional character vector defining the band or spectral band of each item in x, if files relate to different spectral bands or variables
#' @param band_names name of bands, only used if bands is NULL, i.e., if all files contain the same spectral band(s) / variable(s)
#' @param chunking vector of length 3 defining the size of data cube chunks in the order time, y, x.
#' @param dx optional target pixel size in x direction, by default (NULL) the original or highest resolution of images is used
#' @param dy optional target pixel size in y direction, by default (NULL) the original or highest resolution of images is used
#' @return A proxy data cube object
#' @examples
#' # toy example, repeating the same image as a daily time series
#' L8_file_nir <-
#' system.file("L8NY18/LC08_L1TP_014032_20181122_20181129_01_T1/LC08_L1TP_014032_20181122_B5.TIF",
#' package = "gdalcubes")
#' files = rep(L8_file_nir, 10)
#' datetime = as.Date("2018-11-22") + 1:10
#' stack_cube(files, datetime, band_names = "B05")
#'
#' # using a second band from different files
#' L8_file_red <-
#' system.file("L8NY18/LC08_L1TP_014032_20181122_20181129_01_T1/LC08_L1TP_014032_20181122_B4.TIF",
#' package = "gdalcubes")
#' files = rep(c(L8_file_nir, L8_file_red), each = 10)
#' datetime = rep(as.Date("2018-11-22") + 1:10, 2)
#' bands = rep(c("B5","B4"), each = 10)
#' stack_cube(files, datetime, bands = bands)
#'
#' @note This function returns a proxy object, i.e., it will not start any computations besides deriving the shape of the result.
#' @export
stack_cube <- function(x, datetime_values, bands = NULL, band_names = NULL, chunking = c(1, 256, 256), dx=NULL, dy=NULL) {
if (length(datetime_values) != length(x)) {
stop("x and datetime_values have different length")
}
if (!is.null(bands)) {
if (length(bands) != length(x)) {
stop("x and bands have different length")
}
}
stopifnot(length(chunking) == 3)
if (!is.character(datetime_values)) {
datetime_values = as.character(datetime_values)
}
if (!is.null(bands) & !is.null(band_names)) {
warning("Ignoring band_names because bands have been defined per file")
}
if (is.null(bands)) {
bands = character(0)
}
if (is.null(band_names)) {
band_names = character(0)
}
if (is.null(dx)) {
dx = -1.0
}
if (is.null(dy)) {
dy = -1.0
}
x = gc_create_simple_cube(x, datetime_values, bands, band_names, dx, dy, as.integer(chunking))
class(x) <- c("simple_cube", "cube", "xptr")
return(x)
}
#' Create a data cube proxy object copy
#'
#' Copy a data cube proxy object without copying any data
#'
#' @param cube source data cube proxy object
#' @return copied data cube proxy object
#' @details
#' This internal function copies the complete processing chain / graph of a data cube but does not copy any data
#' It is used internally to avoid in-place modification for operations with potential side effects on source data cubes.
.copy_cube <- function(cube) {
cc = class(cube)
cube = gc_copy_cube(cube)
class(cube) <- cc
return(cube)
}
#' Read a data cube from a json description file
#'
#' @param json length-one character vector with a valid json data cube description
#' @param path source data cube proxy object
#' @return data cube proxy object
#' @details
#' Data cubes can be stored as JSON description files. These files do not store any data but the recipe
#' how a data cube is consructed, i.e., the chain (or graph) of processes involved.
#'
#' Since data cube objects (as returned from \code{\link{raster_cube}}) cannot be saved with normal R methods,
#' the combination of \code{\link{as_json}} and \code{\link{json_cube}} provides a cheap way to save data cube
#' objects across several R sessions, as in the examples.
#'
#' @examples{
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-01", t1="2018-12"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' cube = raster_cube(L8.col, v)
#'
#' # save
#' fname = tempfile()
#' writeLines(as_json(cube), fname)
#'
#' # load
#' json_cube(path = fname)
#' }
#'
#' @export
json_cube <- function(json, path = NULL) {
if (!missing(json)) {
if (!is.null(path)) {
warning("Expected only one of arguments 'json' and 'path'; path will be ignored")
}
cube = gc_from_json_string(json)
}
else {
if (!is.null(path)) {
cube = gc_from_json_file(path)
}
else {
stop("Missing argument, please provide either a JSON string, or a path to a JSON file")
}
}
class(cube) <- "cube" # TODO: any way to derive exact cube type here?
return(cube)
}
#' Create a mask for images in a raster data cube
#'
#' Create an image mask based on a band and provided values to filter pixels of images
#' read by \code{\link{raster_cube}}
#'
#' @details
#' Values of the selected mask band can be based on a range (by passing \code{min} and \code{max}) or on a set of values (by passing \code{values}). By default
#' pixels with mask values contained in the range or in the values are masked out, i.e. set to NA. Setting \code{invert = TRUE} will invert the masking behavior.
#' Passing \code{values} will override \code{min} and \code{max}.
#'
#' @note
#' Notice that masks are applied per image while reading images as a raster cube. They can be useful to eliminate e.g. cloudy pixels before applying the temporal aggregation to
#' merge multiple values for the same data cube pixel.
#'
#' @examples
#' image_mask("SCL", values = c(3,8,9)) # Sentinel 2 L2A: mask cloud and cloud shadows
#' image_mask("BQA", bits=4, values=16) # Landsat 8: mask clouds
#' image_mask("B10", min = 8000, max=65000)
#'
#' @param band name of the mask band
#' @param min minimum value, values between \code{min} and \code{max} will be masked
#' @param max maximum value, values between \code{min} and \code{max} will be masked
#' @param values numeric vector; specific values that will be masked.
#' @param bits for bitmasks, extract the given bits (integer vector) with a bitwise AND before filtering the mask values, bit indexes are zero-based
#' @param invert logical; invert mask
#' @export
image_mask <- function(band, min=NULL, max=NULL, values=NULL, bits=NULL, invert=FALSE) {
if (is.null(values) && is.null(min) && is.null(max)) {
stop("either values or min and max must be provided")
}
if (is.null(values) && is.null(min) && !is.null(max)) {
stop("either values or min AND max must be provided")
}
if (is.null(values) && !is.null(min) && is.null(max)) {
stop("either values or min AND max must be provided")
}
if (!is.null(values)) {
if (!is.null(min) || !is.null(max)) {
warning("using values instead of min / max")
}
out = list(band = band, values = values, invert = invert, bits = bits)
}
else {
out = list(band = band, min = min, max = max, invert = invert, bits = bits)
}
class(out) <- "image_mask"
return(out)
}
is.image_mask <- function(obj) {
if(!("image_mask" %in% class(obj))) {
return(FALSE)
}
return(TRUE)
}
is.image_collection_cube <- function(obj) {
if(!("image_collection_cube" %in% class(obj))) {
return(FALSE)
}
if (gc_is_null(obj)) {
warning("GDAL data cube proxy object is invalid")
return(FALSE)
}
return(TRUE)
}
is.cube <- function(obj) {
if(!("cube" %in% class(obj))) {
return(FALSE)
}
if (gc_is_null(obj)) {
warning("GDAL data cube proxy object is invalid")
return(FALSE)
}
return(TRUE)
}
#' Print data cube information
#'
#' Prints information about the dimensions and bands of a data cube.
#'
#' @param x Object of class "cube"
#' @param ... Further arguments passed to the generic print function
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-01", t1="2018-12"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' print(raster_cube(L8.col, v))
#' @export
print.cube <- function(x, ...) {
if (gc_is_null(x)) {
stop("GDAL data cube proxy object is invalid")
}
y = gc_cube_info(x)
cat("A GDAL data cube proxy object\n")
cat("\n")
cat("Dimensions:\n")
dimensions = data.frame(
#name = c("time","y","x"),
low = sapply(y$dimensions, function(z) z$low),
high = sapply(y$dimensions, function(z) z$high),
count = sapply(y$dimensions, function(z) z$count),
pixel_size = sapply(y$dimensions, function(z) z$pixel_size),
chunk_size = sapply(y$dimensions, function(z) z$chunk_size)
)
if (!is.null(y$dimensions$t$values)) {
nmax = 5
str = paste(head(y$dimensions$t$values,nmax), collapse=",")
if (length(y$dimensions$t$values) > nmax)
str = paste0(str, ",...")
dimensions$values = c(str, "","")
}
rownames(dimensions) = c("t","y","x")
print(dimensions)
cat("\n")
cat("Bands:\n")
print(y$bands)
cat("\n")
}
#' Query data cube properties
#'
#' @return size of a data cube (number of cells) as integer vector in the order t, y, x
#' @seealso \code{\link{dim.cube}}
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' size(raster_cube(L8.col, v))
#' @export
size <- function(obj) {
if (gc_is_null(obj)) {
stop("GDAL data cube proxy object is invalid")
}
x = gc_cube_info(obj)
return(x$size[2:4])
}
#' Query data cube properties
#'
#' @return size of a data cube (number of cells) as integer vector in the order t, y, x
#' @seealso \code{\link{size}}
#' @param x a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' dim(raster_cube(L8.col, v))
#' @export
dim.cube <- function(x) {
return(size(x))
}
#' Query data cube properties
#'
#' @return Band names as character vector
#'
#' @param x a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' names(raster_cube(L8.col, v))
#' @export
names.cube <- function(x) {
if (gc_is_null(x)) {
stop("GDAL data cube proxy object is invalid")
}
y = gc_cube_info(x)
return(as.character(y$bands$name))
}
#' Query data cube properties
#'
#' @return Dimension information as a list
#'
#' @details Elements of the returned list represent individual dimensions with properties such as dimension boundaries, names, and chunk size stored as inner lists
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' dimensions(raster_cube(L8.col, v))
#' @export
dimensions <- function(obj) {
if (gc_is_null(obj)) {
stop("GDAL data cube proxy object is invalid")
}
y = gc_cube_info(obj)
return(y$dimensions)
}
#' Query data cube properties
#'
#' @return A data.frame with rows representing the bands and columns representing properties of a band (name, type, scale, offset, unit)
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' bands(raster_cube(L8.col, v))
#' @export
bands <- function(obj) {
if (gc_is_null(obj)) {
stop("GDAL data cube proxy object is invalid")
}
x = gc_cube_info(obj)
return(x$bands)
}
#' Query data cube properties
#'
#' @return The spatial reference system expressed as a string readable by GDAL
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' srs(raster_cube(L8.col, v))
#' @export
srs <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$srs)
}
#' Query data cube properties
#'
#' @return The spatial reference system expressed as proj4 string
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' proj4(raster_cube(L8.col, v))
#' @export
proj4 <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$proj4)
}
#' Query data cube properties
#'
#' @return Total data size of data cube values expressed in the given unit
#'
#' @param obj a data cube proxy object (class cube)
#' @param unit Unit of data size, can be "B", "KB", "KiB", "MB", "MiB", "GB", "GiB", "TB", "TiB", "PB", "PiB"
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' memsize(raster_cube(L8.col, v))
#' @export
memsize <- function(obj, unit="MiB") {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
size_bytes = prod(x$size) * 8 # assuming everything is double
return(switch(unit,
B = size_bytes,
KB = size_bytes / 1000,
KiB = size_bytes / 1024,
MB = size_bytes / (1000^2),
MiB = size_bytes / (1024^2),
GB = size_bytes / (1000^3),
GiB = size_bytes / (1024^3),
TB = size_bytes / (1000^4),
TiB = size_bytes / (1024^4),
PB = size_bytes / (1000^5),
PiB = size_bytes / (1024^5)
))
}
#' Query data cube properties
#'
#' @return Number of bands
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' nbands(raster_cube(L8.col, v))
#' @export
nbands <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$size[1])
}
#' Query data cube properties
#'
#' @return Number of pixels in the time dimension
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' nt(raster_cube(L8.col, v))
#' @export
nt <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$size[2])
}
#' Query data cube properties
#'
#' @return Number of pixels in the y dimension
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' ny(raster_cube(L8.col, v))
#' @export
ny <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$size[3])
}
#' Query data cube properties
#'
#' @return Number of pixels in the x dimension
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' nx(raster_cube(L8.col, v))
#' @export
nx <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$size[4])
}
#' Query data cube properties
#'
#' gdalcubes uses a graph (currently a tree) to serialize data cubes (including chains of cubes). This function gives a JSON
#' representation, which will be communicated to gdalcubes_server instances to create identical cube instances
#' remotely.
#'
#' @return A JSON string representing a graph (currently a tree) that can be used to create the same
#' chain of gdalcubes operations.
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-04"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' cat(as_json(select_bands(raster_cube(L8.col, v), c("B04", "B05"))))
#' @export
as_json <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(jsonlite::prettify(x$graph))
}
#' Helper function to define packed data exports by min / max values
#'
#' This function can be used to define packed exports in \code{\link{write_ncdf}}
#' and \code{\link{write_tif}}. It will generate scale and offset values with maximum precision (unless simplify=TRUE).
#'
#' @details
#' Nodata values will be mapped to the lowest value of the target data type.
#'
#' Arguments min and max must have length 1 or length equal to the number of bands of the data cube to be exported. In the former
#' case, the same values are used for all bands of the exported target cube, whereas the latter case allows to use different
#' ranges for different bands.
#'
#' @note
#' Using simplify=TRUE will round scale values to the next smaller power of 10.
#'
#' @examples
#' ndvi_packing = pack_minmax(type="int16", min=-1, max=1)
#' ndvi_packing
#'
#' @param type target data type of packed values (one of "uint8", "uint16", "uint32", "int16", or "int32")
#' @param min numeric; minimum value(s) of original values, will be packed to the 2nd lowest value of the target data type
#' @param max numeric; maximum value(s) in original scale, will be packed to the highest value of the target data type
#' @param simplify logical; round resulting scale and offset to power of 10 values
#' @export
pack_minmax <- function(type="int16", min, max, simplify=FALSE) {
stopifnot(length(min) == length(max))
if (type == "int16") {
nodata = -2^15
low = -2^15+1
high = 2^15 - 1
scale = (max-min)/(high-low)
offset = min - low * scale
out = list(type="int16", offset=offset, scale=scale, nodata=nodata)
}
else if (type == "int32") {
nodata = -2^31
low = -2^(31)+1
high = 2^31 - 1
scale = (max-min)/(high-low)
offset = min - low * scale
out = list(type="int32", offset=offset, scale=scale, nodata=nodata)
}
else if (type == "uint8") {
nodata = 0
low = 1
high = 2^8 - 1
scale = (max-min)/(high-low)
offset = min - low * scale
out = list(type="uint8", offset=offset, scale=scale, nodata=nodata)
}
else if (type == "uint16") {
nodata = 0
low = 1
high = 2^16 - 1
scale = (max-min)/(high-low)
offset = min - low * scale
out = list(type="uint16", offset=offset, scale=scale, nodata=nodata)
}
else if (type == "uint32") {
nodata = 0
low = 1
high = 2^32 - 1
scale = (max-min)/(high-low)
offset = min - low * scale
out = list(type="uint32", offset=offset, scale=scale, nodata=nodata)
}
else {
stop("Invalid data type for packed export.")
}
if (simplify) {
floor_10 <- function(x) 10^floor(log10(x))
out$scale = floor_10(out$scale)
}
return(out)
}
#' Export a data cube as netCDF file(s)
#'
#' This function will read chunks of a data cube and write them to a single (the default) or multitple (if \code{chunked = TRUE}) netCDF file(s). The resulting
#' file(s) uses the enhanced netCDF-4 format, supporting chunking and compression.
#'
#' @seealso \code{\link{gdalcubes_options}}
#' @param x a data cube proxy object (class cube)
#' @param fname output file name
#' @param overwrite logical; overwrite output file if it already exists
#' @param write_json_descr logical; write a JSON description of x as additional file
#' @param with_VRT logical; write additional VRT datasets (one per time slice)
#' @param pack reduce output file size by packing values (see Details), defaults to no packing
#' @param chunked logical; if TRUE, write one netCDF file per chunk; defaults to FALSE
#'
#' @seealso \code{\link{pack_minmax}}
#'
#' @details
#' The resulting netCDF file(s) contain three dimensions (t, y, x) and bands as variables.
#'
#' If \code{write_json_descr} is TRUE, the function will write an addition file with the same name as the NetCDF file but
#' ".json" suffix. This file includes a serialized description of the input data cube, including all chained data cube operations.
#'
#' To reduce the size of created files, values can be packed by applying a scale factor and an offset value and using a smaller
#' integer data type for storage (only supported if \code{chunked = TRUE}). The \code{pack} argument can be either NULL (the default), or a list with elements \code{type}, \code{scale}, \code{offset},
#' and \code{nodata}. \code{type} can be any of "uint8", "uint16" , "uint32", "int16", or "int32". \code{scale}, \code{offset}, and
#' \code{nodata} must be numeric vectors with length one or length equal to the number of data cube bands (to use different values for different bands).
#' The helper function \code{\link{pack_minmax}} can be used to derive offset and scale values with maximum precision from minimum and maximum data values on
#' original scale.
#'
#' If \code{chunked = TRUE}, names of the produced files will start with \code{name} (with removed extension), followed by an underscore and the internal integer chunk number.
#'
#' @note Packing is currently ignored if \code{chunked = TRUE}
#'
#' @return returns (invisibly) the path of the created netCDF file(s)
#'
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-04"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' write_ncdf(select_bands(raster_cube(L8.col, v), c("B04", "B05")), fname=tempfile(fileext = ".nc"))
#' @export
write_ncdf <- function(x, fname = tempfile(pattern = "gdalcubes", fileext = ".nc"), overwrite = FALSE,
write_json_descr = FALSE, with_VRT = FALSE, pack = NULL, chunked = FALSE) {
stopifnot(is.cube(x))
fname = path.expand(fname)
if (!overwrite && file.exists(fname)) {
stop("File already exists, please change the output filename or set overwrite = TRUE")
}
if (!is.null(pack)) {
stopifnot(is.list(pack))
stopifnot(length(pack$offset) == 1 || length(pack$offset) == nbands(x))
stopifnot(length(pack$scale) == 1 || length(pack$scale) == nbands(x))
stopifnot(length(pack$nodata) == 1 || length(pack$nodata) == nbands(x))
stopifnot(length(pack$offset) == length(pack$scale))
stopifnot(length(pack$offset) == length(pack$nodata))
}
if (!is.null(pack) && chunked) {
warning("Since chunked = TRUE, packing will be ignored (data type will remain 8 byte double)")
}
if (.pkgenv$ncdf_write_bounds && chunked) {
warning("Since chunked = TRUE, resulting netCDF files will not include bounds variables.")
}
if (!chunked) {
if (.pkgenv$use_cube_cache) {
j = gc_simple_hash(as_json(x))
if (!is.null(.pkgenv$cube_cache[[j]])
&& file.exists(.pkgenv$cube_cache[[j]])) {
file.copy(from=.pkgenv$cube_cache[[j]], to = fname, overwrite=TRUE)
}
else {
gc_eval_cube(x, fname, .pkgenv$compression_level, with_VRT, .pkgenv$ncdf_write_bounds, pack)
}
}
else {
gc_eval_cube(x, fname, .pkgenv$compression_level, with_VRT, .pkgenv$ncdf_write_bounds, pack)
}
}
else {
gc_write_chunks_ncdf(x, dirname(fname), tools::file_path_sans_ext(basename(fname)), .pkgenv$compression_level)
}
if (write_json_descr) {
writeLines(as_json(x), paste(fname, ".json", sep=""))
}
if (!chunked) {
invisible(fname)
}
else {
list.files(dirname(fname), pattern=paste(tools::file_path_sans_ext(basename(fname)), "_[0-9]+.nc", sep=""), full.names = TRUE)
}
}
#' Export a data cube as a collection of GeoTIFF files
#'
#' This function will time slices of a data cube as GeoTIFF files
#' in a given directory.
#'
#' @param x a data cube proxy object (class cube)
#' @param dir destination directory
#' @param prefix output file name
#' @param overviews logical; generate overview images
#' @param COG logical; create cloud-optimized GeoTIFF files (forces overviews=TRUE)
#' @param rsmpl_overview resampling method for overviews (image pyramid) generation (see \url{https://gdal.org/programs/gdaladdo.html} for available methods)
#' @param creation_options additional creation options for resulting GeoTIFF files, e.g. to define compression (see \url{https://gdal.org/drivers/raster/gtiff.html#creation-options})
#' @param write_json_descr logical; write a JSON description of x as additional file
#' @param pack reduce output file size by packing values (see Details), defaults to no packing
#'
#' @seealso \code{\link{pack_minmax}}
#'
#' @return returns (invisibly) a vector of paths pointing to the created GeoTIFF files
#'
#' @details
#'
#' If \code{write_json_descr} is TRUE, the function will write an additional file with name according to prefix (if not missing) or simply cube.json
#' This file includes a serialized description of the input data cube, including all chained data cube operations.
#'
#' Additional GDAL creation options for resulting GeoTIFF files must be passed as a named list of simple strings, where element names refer to the key. For example,
#' \code{creation_options = list("COMPRESS" = "DEFLATE", "ZLEVEL" = "5")} would enable deflate compression at level 5.
#'
#' To reduce the size of created files, values can be packed by applying a scale factor and an offset value and using a smaller
#' integer data type for storage. The \code{pack} argument can be either NULL (the default), or a list with elements \code{type}, \code{scale}, \code{offset},
#' and \code{nodata}. \code{type} can be any of "uint8", "uint16" , "uint32", "int16", or "int32". \code{scale}, \code{offset}, and
#' \code{nodata} must be numeric vectors with length one or length equal to the number of data cube bands (to use different values for different bands).
#' The helper function \code{\link{pack_minmax}} can be used to derive offset and scale values with maximum precision from minimum and maximum data values on
#' original scale.
#'
#' If \code{overviews=TRUE}, the numbers of pixels are halved until the longer spatial dimensions counts less than 256 pixels.
#' Setting \code{COG=TRUE} automatically sets \code{overviews=TRUE}.
#'
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-04"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' write_tif(select_bands(raster_cube(L8.col, v), c("B04", "B05")), dir=tempdir())
#' @export
write_tif <- function(x, dir = tempfile(pattern=""), prefix = basename(tempfile(pattern = "cube_")), overviews = FALSE,
COG = FALSE, rsmpl_overview="nearest", creation_options = NULL , write_json_descr=FALSE, pack = NULL) {
stopifnot(is.cube(x))
dir = path.expand(dir)
# if (dir.exists(dir) && prefix == "" && length(list.files(dir, include.dirs = TRUE) > 0)) {
# stop("Directory already exists and is not empty, please either")
# }
if (!(is.null(creation_options) || is.list(creation_options))) {
stop("Expected either NULL or a list as creation_options argument.")
}
if (!is.character(rsmpl_overview)) {
stop("Expected a chracte as rsmpl_overview argument.")
}
if (!overviews && COG) {
overviews = TRUE
}
if (!is.null(pack)) {
stopifnot(is.list(pack))
stopifnot(length(pack$offset) == 1 || length(pack$offset) == nbands(x))
stopifnot(length(pack$scale) == 1 || length(pack$scale) == nbands(x))
stopifnot(length(pack$nodata) == 1 || length(pack$nodata) == nbands(x))
stopifnot(length(pack$offset) == length(pack$scale))
stopifnot(length(pack$offset) == length(pack$nodata))
}
# TODO: find out how to enable caching
gc_write_tif(x, dir, prefix, overviews, COG, creation_options, rsmpl_overview, pack)
if (write_json_descr) {
if (prefix == "") {
writeLines(as_json(x), file.path(dir, "cube.json"))
}
else {
writeLines(as_json(x), file.path(dir, paste(prefix, ".json", sep="")))
}
}
return(invisible(list.files(path = dir,pattern = paste(prefix, ".*\\.tif", sep=""), full.names = TRUE)))
}
#' Query coordinate values for all dimensions of a data cube
#'
#' Dimension values give the coordinates along the spatial and temporal axes of a data cube.
#'
#' @param obj a data cube proxy (class cube), or a data cube view object
#' @param datetime_unit unit used to format values in the datetime dimension, one of "Y", "m", "d", "H", "M", "S", defaults to the unit of the cube.
#' @return list with elements t,y,x
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' dimension_values(raster_cube(L8.col, v))
#' @export
dimension_values <- function(obj, datetime_unit=NULL) {
if (is.cube(obj)) {
if (is.null(datetime_unit)) {
datetime_unit = ""
}
return(gc_dimension_values(obj, datetime_unit))
}
else if (is.cube_view(obj)) {
if (is.null(datetime_unit)) {
datetime_unit = ""
}
return(gc_dimension_values_from_view(obj, datetime_unit))
}
else {
stop("obj must be either from class cube or from class cube_view")
}
}
#' Query coordinate bounds for all dimensions of a data cube
#'
#' Dimension values give the coordinates bounds the spatial and temporal axes of a data cube.
#'
#' @param obj a data cube proxy (class cube)
#' @param datetime_unit unit used to format values in the datetime dimension, one of "Y", "m", "d", "H", "M", "S", defaults to the unit of the cube.
#' @return list with elements t,y,x, each a list with two elements, start and end
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' dimension_bounds(raster_cube(L8.col, v))
#' @export
dimension_bounds <- function(obj, datetime_unit=NULL) {
stopifnot(is.cube(obj))
if (is.null(datetime_unit)) {
datetime_unit = ""
}
bnds = gc_dimension_bounds(obj, datetime_unit)
out = list(t = list(start = bnds$t[seq(1,length(bnds$t), by = 2)], end = bnds$t[seq(2,length(bnds$t), by = 2)]),
y = list(start = bnds$y[seq(1,length(bnds$y), by = 2)], end = bnds$y[seq(2,length(bnds$y), by = 2)]),
x = list(start = bnds$x[seq(1,length(bnds$x), by = 2)], end = bnds$x[seq(2,length(bnds$x), by = 2)]))
return(out)
}
| /R/cube.R | permissive | rsbivand/gdalcubes_R | R | false | false | 45,236 | r |
#' Create a data cube from an image collection
#'
#' Create a proxy data cube, which loads data from a given image collection according to a data cube view
#'
#' @param image_collection Source image collection as from \code{image_collection} or \code{create_image_collection}
#' @param view A data cube view defining the shape (spatiotemporal extent, resolution, and spatial reference), if missing, a default overview is used
#' @param mask mask pixels of images based on band values, see \code{\link{image_mask}}
#' @param chunking length-3 vector or a function returning a vector of length 3, defining the size of data cube chunks in the order time, y, x.
#' @return A proxy data cube object
#' @details
#' The following steps will be performed when the data cube is requested to read data of a chunk:
#'
#' 1. Find images from the input collection that intersect with the spatiotemporal extent of the chunk
#' 2. For all resulting images, apply gdalwarp to reproject, resize, and resample to an in-memory GDAL dataset
#' 3. Read the resulting data to the chunk buffer and optionally apply a mask on the result
#' 4. Update pixel-wise aggregator (as defined in the data cube view) to combine values of multiple images within the same data cube pixels
#'
#' If chunking is provided as a function, it must accept exactly three arguments for the total size of the cube in t, y, and x axes (in this order).
#'
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-01", t1="2018-12"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' raster_cube(L8.col, v)
#'
#' # using a mask on the Landsat quality bit band to filter out clouds
#' raster_cube(L8.col, v, mask=image_mask("BQA", bits=4, values=16))
#'
#' @note This function returns a proxy object, i.e., it will not start any computations besides deriving the shape of the result.
#' @export
raster_cube <- function(image_collection, view, mask=NULL, chunking=.pkgenv$default_chunksize) {
stopifnot(is.image_collection(image_collection))
if (is.function(chunking)) {
if (missing(view)) {
warning("Function to derive chunk sizes is not supprted when data cube is missing, using fixed chunk size (1, 512, 512)")
chunking = c(1, 512, 512)
}
else {
chunking = chunking(view$time$nt, view$space$ny, view$space$nx)
}
}
stopifnot(length(chunking) == 3)
chunking = as.integer(chunking)
stopifnot(chunking[1] > 0 && chunking[2] > 0 && chunking[3] > 0)
if (!is.null(mask)) {
stopifnot(is.image_mask(mask))
}
x = NULL
if (!missing(view)) {
stopifnot(is.cube_view(view))
x = gc_create_image_collection_cube(image_collection, as.integer(chunking), mask, view)
}
else {
x = gc_create_image_collection_cube(image_collection, as.integer(chunking), mask)
}
class(x) <- c("image_collection_cube", "cube", "xptr")
return(x)
}
#' Create a data cube from a set of images with the same spatial extent and spatial reference system
#'
#' Create a spatiotemporal data cube directly from images with identical spatial extent and spatial reference system, similar
#' to a raster stack with an additional dimension supporting both, time and multiple bands / variables.
#'
#' @details
#' This function creates a four-dimensional (space, time, bands / variables) raster data cube from a
#' set of provided files without the need to create an image collection before. This is possible if all images
#' have the same spatial extent and spatial reference system and can be used for two different file organizations:
#'
#' 1. If all image files share the same bands / variables, the \code{bands} argument can be ignored (default NULL) can
#' names of the bands can be specified using the \code{band_names} argument.
#'
#' 2. If image files represent different band / variable (e.g. individual files for red, green, and blue channels), the \code{bands}
#' argument must be used to define the corresponding band / variable. Notice that in this case all files are expected to
#' represent exactly one variable / band at one point in datetime. It is not possible to combine files with different
#' numbers of variables / bands. If image files for different bands have different pixel sizes, the smallest size is used
#' by default.
#'
#' Notice that to avoid opening all image files in advance,no automatic check whether all images share the
#' spatial extent and spatial reference system is performed.
#'
#' @param x character vector where items point to image files
#' @param datetime_values vector of type character, Date, or POSIXct with recording date of images
#' @param bands optional character vector defining the band or spectral band of each item in x, if files relate to different spectral bands or variables
#' @param band_names name of bands, only used if bands is NULL, i.e., if all files contain the same spectral band(s) / variable(s)
#' @param chunking vector of length 3 defining the size of data cube chunks in the order time, y, x.
#' @param dx optional target pixel size in x direction, by default (NULL) the original or highest resolution of images is used
#' @param dy optional target pixel size in y direction, by default (NULL) the original or highest resolution of images is used
#' @return A proxy data cube object
#' @examples
#' # toy example, repeating the same image as a daily time series
#' L8_file_nir <-
#' system.file("L8NY18/LC08_L1TP_014032_20181122_20181129_01_T1/LC08_L1TP_014032_20181122_B5.TIF",
#' package = "gdalcubes")
#' files = rep(L8_file_nir, 10)
#' datetime = as.Date("2018-11-22") + 1:10
#' stack_cube(files, datetime, band_names = "B05")
#'
#' # using a second band from different files
#' L8_file_red <-
#' system.file("L8NY18/LC08_L1TP_014032_20181122_20181129_01_T1/LC08_L1TP_014032_20181122_B4.TIF",
#' package = "gdalcubes")
#' files = rep(c(L8_file_nir, L8_file_red), each = 10)
#' datetime = rep(as.Date("2018-11-22") + 1:10, 2)
#' bands = rep(c("B5","B4"), each = 10)
#' stack_cube(files, datetime, bands = bands)
#'
#' @note This function returns a proxy object, i.e., it will not start any computations besides deriving the shape of the result.
#' @export
stack_cube <- function(x, datetime_values, bands = NULL, band_names = NULL, chunking = c(1, 256, 256), dx=NULL, dy=NULL) {
if (length(datetime_values) != length(x)) {
stop("x and datetime_values have different length")
}
if (!is.null(bands)) {
if (length(bands) != length(x)) {
stop("x and bands have different length")
}
}
stopifnot(length(chunking) == 3)
if (!is.character(datetime_values)) {
datetime_values = as.character(datetime_values)
}
if (!is.null(bands) & !is.null(band_names)) {
warning("Ignoring band_names because bands have been defined per file")
}
if (is.null(bands)) {
bands = character(0)
}
if (is.null(band_names)) {
band_names = character(0)
}
if (is.null(dx)) {
dx = -1.0
}
if (is.null(dy)) {
dy = -1.0
}
x = gc_create_simple_cube(x, datetime_values, bands, band_names, dx, dy, as.integer(chunking))
class(x) <- c("simple_cube", "cube", "xptr")
return(x)
}
#' Create a data cube proxy object copy
#'
#' Copy a data cube proxy object without copying any data
#'
#' @param cube source data cube proxy object
#' @return copied data cube proxy object
#' @details
#' This internal function copies the complete processing chain / graph of a data cube but does not copy any data
#' It is used internally to avoid in-place modification for operations with potential side effects on source data cubes.
.copy_cube <- function(cube) {
cc = class(cube)
cube = gc_copy_cube(cube)
class(cube) <- cc
return(cube)
}
#' Read a data cube from a json description file
#'
#' @param json length-one character vector with a valid json data cube description
#' @param path source data cube proxy object
#' @return data cube proxy object
#' @details
#' Data cubes can be stored as JSON description files. These files do not store any data but the recipe
#' how a data cube is consructed, i.e., the chain (or graph) of processes involved.
#'
#' Since data cube objects (as returned from \code{\link{raster_cube}}) cannot be saved with normal R methods,
#' the combination of \code{\link{as_json}} and \code{\link{json_cube}} provides a cheap way to save data cube
#' objects across several R sessions, as in the examples.
#'
#' @examples{
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-01", t1="2018-12"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' cube = raster_cube(L8.col, v)
#'
#' # save
#' fname = tempfile()
#' writeLines(as_json(cube), fname)
#'
#' # load
#' json_cube(path = fname)
#' }
#'
#' @export
json_cube <- function(json, path = NULL) {
if (!missing(json)) {
if (!is.null(path)) {
warning("Expected only one of arguments 'json' and 'path'; path will be ignored")
}
cube = gc_from_json_string(json)
}
else {
if (!is.null(path)) {
cube = gc_from_json_file(path)
}
else {
stop("Missing argument, please provide either a JSON string, or a path to a JSON file")
}
}
class(cube) <- "cube" # TODO: any way to derive exact cube type here?
return(cube)
}
#' Create a mask for images in a raster data cube
#'
#' Create an image mask based on a band and provided values to filter pixels of images
#' read by \code{\link{raster_cube}}
#'
#' @details
#' Values of the selected mask band can be based on a range (by passing \code{min} and \code{max}) or on a set of values (by passing \code{values}). By default
#' pixels with mask values contained in the range or in the values are masked out, i.e. set to NA. Setting \code{invert = TRUE} will invert the masking behavior.
#' Passing \code{values} will override \code{min} and \code{max}.
#'
#' @note
#' Notice that masks are applied per image while reading images as a raster cube. They can be useful to eliminate e.g. cloudy pixels before applying the temporal aggregation to
#' merge multiple values for the same data cube pixel.
#'
#' @examples
#' image_mask("SCL", values = c(3,8,9)) # Sentinel 2 L2A: mask cloud and cloud shadows
#' image_mask("BQA", bits=4, values=16) # Landsat 8: mask clouds
#' image_mask("B10", min = 8000, max=65000)
#'
#' @param band name of the mask band
#' @param min minimum value, values between \code{min} and \code{max} will be masked
#' @param max maximum value, values between \code{min} and \code{max} will be masked
#' @param values numeric vector; specific values that will be masked.
#' @param bits for bitmasks, extract the given bits (integer vector) with a bitwise AND before filtering the mask values, bit indexes are zero-based
#' @param invert logical; invert mask
#' @export
image_mask <- function(band, min=NULL, max=NULL, values=NULL, bits=NULL, invert=FALSE) {
if (is.null(values) && is.null(min) && is.null(max)) {
stop("either values or min and max must be provided")
}
if (is.null(values) && is.null(min) && !is.null(max)) {
stop("either values or min AND max must be provided")
}
if (is.null(values) && !is.null(min) && is.null(max)) {
stop("either values or min AND max must be provided")
}
if (!is.null(values)) {
if (!is.null(min) || !is.null(max)) {
warning("using values instead of min / max")
}
out = list(band = band, values = values, invert = invert, bits = bits)
}
else {
out = list(band = band, min = min, max = max, invert = invert, bits = bits)
}
class(out) <- "image_mask"
return(out)
}
is.image_mask <- function(obj) {
if(!("image_mask" %in% class(obj))) {
return(FALSE)
}
return(TRUE)
}
is.image_collection_cube <- function(obj) {
if(!("image_collection_cube" %in% class(obj))) {
return(FALSE)
}
if (gc_is_null(obj)) {
warning("GDAL data cube proxy object is invalid")
return(FALSE)
}
return(TRUE)
}
is.cube <- function(obj) {
if(!("cube" %in% class(obj))) {
return(FALSE)
}
if (gc_is_null(obj)) {
warning("GDAL data cube proxy object is invalid")
return(FALSE)
}
return(TRUE)
}
#' Print data cube information
#'
#' Prints information about the dimensions and bands of a data cube.
#'
#' @param x Object of class "cube"
#' @param ... Further arguments passed to the generic print function
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-01", t1="2018-12"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' print(raster_cube(L8.col, v))
#' @export
print.cube <- function(x, ...) {
if (gc_is_null(x)) {
stop("GDAL data cube proxy object is invalid")
}
y = gc_cube_info(x)
cat("A GDAL data cube proxy object\n")
cat("\n")
cat("Dimensions:\n")
dimensions = data.frame(
#name = c("time","y","x"),
low = sapply(y$dimensions, function(z) z$low),
high = sapply(y$dimensions, function(z) z$high),
count = sapply(y$dimensions, function(z) z$count),
pixel_size = sapply(y$dimensions, function(z) z$pixel_size),
chunk_size = sapply(y$dimensions, function(z) z$chunk_size)
)
if (!is.null(y$dimensions$t$values)) {
nmax = 5
str = paste(head(y$dimensions$t$values,nmax), collapse=",")
if (length(y$dimensions$t$values) > nmax)
str = paste0(str, ",...")
dimensions$values = c(str, "","")
}
rownames(dimensions) = c("t","y","x")
print(dimensions)
cat("\n")
cat("Bands:\n")
print(y$bands)
cat("\n")
}
#' Query data cube properties
#'
#' @return size of a data cube (number of cells) as integer vector in the order t, y, x
#' @seealso \code{\link{dim.cube}}
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' size(raster_cube(L8.col, v))
#' @export
size <- function(obj) {
if (gc_is_null(obj)) {
stop("GDAL data cube proxy object is invalid")
}
x = gc_cube_info(obj)
return(x$size[2:4])
}
#' Query data cube properties
#'
#' @return size of a data cube (number of cells) as integer vector in the order t, y, x
#' @seealso \code{\link{size}}
#' @param x a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' dim(raster_cube(L8.col, v))
#' @export
dim.cube <- function(x) {
return(size(x))
}
#' Query data cube properties
#'
#' @return Band names as character vector
#'
#' @param x a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' names(raster_cube(L8.col, v))
#' @export
names.cube <- function(x) {
if (gc_is_null(x)) {
stop("GDAL data cube proxy object is invalid")
}
y = gc_cube_info(x)
return(as.character(y$bands$name))
}
#' Query data cube properties
#'
#' @return Dimension information as a list
#'
#' @details Elements of the returned list represent individual dimensions with properties such as dimension boundaries, names, and chunk size stored as inner lists
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' dimensions(raster_cube(L8.col, v))
#' @export
dimensions <- function(obj) {
if (gc_is_null(obj)) {
stop("GDAL data cube proxy object is invalid")
}
y = gc_cube_info(obj)
return(y$dimensions)
}
#' Query data cube properties
#'
#' @return A data.frame with rows representing the bands and columns representing properties of a band (name, type, scale, offset, unit)
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' bands(raster_cube(L8.col, v))
#' @export
bands <- function(obj) {
if (gc_is_null(obj)) {
stop("GDAL data cube proxy object is invalid")
}
x = gc_cube_info(obj)
return(x$bands)
}
#' Query data cube properties
#'
#' @return The spatial reference system expressed as a string readable by GDAL
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' srs(raster_cube(L8.col, v))
#' @export
srs <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$srs)
}
#' Query data cube properties
#'
#' @return The spatial reference system expressed as proj4 string
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' proj4(raster_cube(L8.col, v))
#' @export
proj4 <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$proj4)
}
#' Query data cube properties
#'
#' @return Total data size of data cube values expressed in the given unit
#'
#' @param obj a data cube proxy object (class cube)
#' @param unit Unit of data size, can be "B", "KB", "KiB", "MB", "MiB", "GB", "GiB", "TB", "TiB", "PB", "PiB"
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' memsize(raster_cube(L8.col, v))
#' @export
memsize <- function(obj, unit="MiB") {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
size_bytes = prod(x$size) * 8 # assuming everything is double
return(switch(unit,
B = size_bytes,
KB = size_bytes / 1000,
KiB = size_bytes / 1024,
MB = size_bytes / (1000^2),
MiB = size_bytes / (1024^2),
GB = size_bytes / (1000^3),
GiB = size_bytes / (1024^3),
TB = size_bytes / (1000^4),
TiB = size_bytes / (1024^4),
PB = size_bytes / (1000^5),
PiB = size_bytes / (1024^5)
))
}
#' Query data cube properties
#'
#' @return Number of bands
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' nbands(raster_cube(L8.col, v))
#' @export
nbands <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$size[1])
}
#' Query data cube properties
#'
#' @return Number of pixels in the time dimension
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' nt(raster_cube(L8.col, v))
#' @export
nt <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$size[2])
}
#' Query data cube properties
#'
#' @return Number of pixels in the y dimension
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' ny(raster_cube(L8.col, v))
#' @export
ny <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$size[3])
}
#' Query data cube properties
#'
#' @return Number of pixels in the x dimension
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' nx(raster_cube(L8.col, v))
#' @export
nx <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(x$size[4])
}
#' Query data cube properties
#'
#' gdalcubes uses a graph (currently a tree) to serialize data cubes (including chains of cubes). This function gives a JSON
#' representation, which will be communicated to gdalcubes_server instances to create identical cube instances
#' remotely.
#'
#' @return A JSON string representing a graph (currently a tree) that can be used to create the same
#' chain of gdalcubes operations.
#'
#' @param obj a data cube proxy object (class cube)
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-04"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' cat(as_json(select_bands(raster_cube(L8.col, v), c("B04", "B05"))))
#' @export
as_json <- function(obj) {
stopifnot(is.cube(obj))
x = gc_cube_info(obj)
return(jsonlite::prettify(x$graph))
}
#' Helper function to define packed data exports by min / max values
#'
#' This function can be used to define packed exports in \code{\link{write_ncdf}}
#' and \code{\link{write_tif}}. It will generate scale and offset values with maximum precision (unless simplify=TRUE).
#'
#' @details
#' Nodata values will be mapped to the lowest value of the target data type.
#'
#' Arguments min and max must have length 1 or length equal to the number of bands of the data cube to be exported. In the former
#' case, the same values are used for all bands of the exported target cube, whereas the latter case allows to use different
#' ranges for different bands.
#'
#' @note
#' Using simplify=TRUE will round scale values to the next smaller power of 10.
#'
#' @examples
#' ndvi_packing = pack_minmax(type="int16", min=-1, max=1)
#' ndvi_packing
#'
#' @param type target data type of packed values (one of "uint8", "uint16", "uint32", "int16", or "int32")
#' @param min numeric; minimum value(s) of original values, will be packed to the 2nd lowest value of the target data type
#' @param max numeric; maximum value(s) in original scale, will be packed to the highest value of the target data type
#' @param simplify logical; round resulting scale and offset to power of 10 values
#' @export
pack_minmax <- function(type="int16", min, max, simplify=FALSE) {
stopifnot(length(min) == length(max))
if (type == "int16") {
nodata = -2^15
low = -2^15+1
high = 2^15 - 1
scale = (max-min)/(high-low)
offset = min - low * scale
out = list(type="int16", offset=offset, scale=scale, nodata=nodata)
}
else if (type == "int32") {
nodata = -2^31
low = -2^(31)+1
high = 2^31 - 1
scale = (max-min)/(high-low)
offset = min - low * scale
out = list(type="int32", offset=offset, scale=scale, nodata=nodata)
}
else if (type == "uint8") {
nodata = 0
low = 1
high = 2^8 - 1
scale = (max-min)/(high-low)
offset = min - low * scale
out = list(type="uint8", offset=offset, scale=scale, nodata=nodata)
}
else if (type == "uint16") {
nodata = 0
low = 1
high = 2^16 - 1
scale = (max-min)/(high-low)
offset = min - low * scale
out = list(type="uint16", offset=offset, scale=scale, nodata=nodata)
}
else if (type == "uint32") {
nodata = 0
low = 1
high = 2^32 - 1
scale = (max-min)/(high-low)
offset = min - low * scale
out = list(type="uint32", offset=offset, scale=scale, nodata=nodata)
}
else {
stop("Invalid data type for packed export.")
}
if (simplify) {
floor_10 <- function(x) 10^floor(log10(x))
out$scale = floor_10(out$scale)
}
return(out)
}
#' Export a data cube as netCDF file(s)
#'
#' This function will read chunks of a data cube and write them to a single (the default) or multitple (if \code{chunked = TRUE}) netCDF file(s). The resulting
#' file(s) uses the enhanced netCDF-4 format, supporting chunking and compression.
#'
#' @seealso \code{\link{gdalcubes_options}}
#' @param x a data cube proxy object (class cube)
#' @param fname output file name
#' @param overwrite logical; overwrite output file if it already exists
#' @param write_json_descr logical; write a JSON description of x as additional file
#' @param with_VRT logical; write additional VRT datasets (one per time slice)
#' @param pack reduce output file size by packing values (see Details), defaults to no packing
#' @param chunked logical; if TRUE, write one netCDF file per chunk; defaults to FALSE
#'
#' @seealso \code{\link{pack_minmax}}
#'
#' @details
#' The resulting netCDF file(s) contain three dimensions (t, y, x) and bands as variables.
#'
#' If \code{write_json_descr} is TRUE, the function will write an addition file with the same name as the NetCDF file but
#' ".json" suffix. This file includes a serialized description of the input data cube, including all chained data cube operations.
#'
#' To reduce the size of created files, values can be packed by applying a scale factor and an offset value and using a smaller
#' integer data type for storage (only supported if \code{chunked = TRUE}). The \code{pack} argument can be either NULL (the default), or a list with elements \code{type}, \code{scale}, \code{offset},
#' and \code{nodata}. \code{type} can be any of "uint8", "uint16" , "uint32", "int16", or "int32". \code{scale}, \code{offset}, and
#' \code{nodata} must be numeric vectors with length one or length equal to the number of data cube bands (to use different values for different bands).
#' The helper function \code{\link{pack_minmax}} can be used to derive offset and scale values with maximum precision from minimum and maximum data values on
#' original scale.
#'
#' If \code{chunked = TRUE}, names of the produced files will start with \code{name} (with removed extension), followed by an underscore and the internal integer chunk number.
#'
#' @note Packing is currently ignored if \code{chunked = TRUE}
#'
#' @return returns (invisibly) the path of the created netCDF file(s)
#'
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-04"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' write_ncdf(select_bands(raster_cube(L8.col, v), c("B04", "B05")), fname=tempfile(fileext = ".nc"))
#' @export
write_ncdf <- function(x, fname = tempfile(pattern = "gdalcubes", fileext = ".nc"), overwrite = FALSE,
write_json_descr = FALSE, with_VRT = FALSE, pack = NULL, chunked = FALSE) {
stopifnot(is.cube(x))
fname = path.expand(fname)
if (!overwrite && file.exists(fname)) {
stop("File already exists, please change the output filename or set overwrite = TRUE")
}
if (!is.null(pack)) {
stopifnot(is.list(pack))
stopifnot(length(pack$offset) == 1 || length(pack$offset) == nbands(x))
stopifnot(length(pack$scale) == 1 || length(pack$scale) == nbands(x))
stopifnot(length(pack$nodata) == 1 || length(pack$nodata) == nbands(x))
stopifnot(length(pack$offset) == length(pack$scale))
stopifnot(length(pack$offset) == length(pack$nodata))
}
if (!is.null(pack) && chunked) {
warning("Since chunked = TRUE, packing will be ignored (data type will remain 8 byte double)")
}
if (.pkgenv$ncdf_write_bounds && chunked) {
warning("Since chunked = TRUE, resulting netCDF files will not include bounds variables.")
}
if (!chunked) {
if (.pkgenv$use_cube_cache) {
j = gc_simple_hash(as_json(x))
if (!is.null(.pkgenv$cube_cache[[j]])
&& file.exists(.pkgenv$cube_cache[[j]])) {
file.copy(from=.pkgenv$cube_cache[[j]], to = fname, overwrite=TRUE)
}
else {
gc_eval_cube(x, fname, .pkgenv$compression_level, with_VRT, .pkgenv$ncdf_write_bounds, pack)
}
}
else {
gc_eval_cube(x, fname, .pkgenv$compression_level, with_VRT, .pkgenv$ncdf_write_bounds, pack)
}
}
else {
gc_write_chunks_ncdf(x, dirname(fname), tools::file_path_sans_ext(basename(fname)), .pkgenv$compression_level)
}
if (write_json_descr) {
writeLines(as_json(x), paste(fname, ".json", sep=""))
}
if (!chunked) {
invisible(fname)
}
else {
list.files(dirname(fname), pattern=paste(tools::file_path_sans_ext(basename(fname)), "_[0-9]+.nc", sep=""), full.names = TRUE)
}
}
#' Export a data cube as a collection of GeoTIFF files
#'
#' This function will time slices of a data cube as GeoTIFF files
#' in a given directory.
#'
#' @param x a data cube proxy object (class cube)
#' @param dir destination directory
#' @param prefix output file name
#' @param overviews logical; generate overview images
#' @param COG logical; create cloud-optimized GeoTIFF files (forces overviews=TRUE)
#' @param rsmpl_overview resampling method for overviews (image pyramid) generation (see \url{https://gdal.org/programs/gdaladdo.html} for available methods)
#' @param creation_options additional creation options for resulting GeoTIFF files, e.g. to define compression (see \url{https://gdal.org/drivers/raster/gtiff.html#creation-options})
#' @param write_json_descr logical; write a JSON description of x as additional file
#' @param pack reduce output file size by packing values (see Details), defaults to no packing
#'
#' @seealso \code{\link{pack_minmax}}
#'
#' @return returns (invisibly) a vector of paths pointing to the created GeoTIFF files
#'
#' @details
#'
#' If \code{write_json_descr} is TRUE, the function will write an additional file with name according to prefix (if not missing) or simply cube.json
#' This file includes a serialized description of the input data cube, including all chained data cube operations.
#'
#' Additional GDAL creation options for resulting GeoTIFF files must be passed as a named list of simple strings, where element names refer to the key. For example,
#' \code{creation_options = list("COMPRESS" = "DEFLATE", "ZLEVEL" = "5")} would enable deflate compression at level 5.
#'
#' To reduce the size of created files, values can be packed by applying a scale factor and an offset value and using a smaller
#' integer data type for storage. The \code{pack} argument can be either NULL (the default), or a list with elements \code{type}, \code{scale}, \code{offset},
#' and \code{nodata}. \code{type} can be any of "uint8", "uint16" , "uint32", "int16", or "int32". \code{scale}, \code{offset}, and
#' \code{nodata} must be numeric vectors with length one or length equal to the number of data cube bands (to use different values for different bands).
#' The helper function \code{\link{pack_minmax}} can be used to derive offset and scale values with maximum precision from minimum and maximum data values on
#' original scale.
#'
#' If \code{overviews=TRUE}, the numbers of pixels are halved until the longer spatial dimensions counts less than 256 pixels.
#' Setting \code{COG=TRUE} automatically sets \code{overviews=TRUE}.
#'
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-04"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' write_tif(select_bands(raster_cube(L8.col, v), c("B04", "B05")), dir=tempdir())
#' @export
write_tif <- function(x, dir = tempfile(pattern=""), prefix = basename(tempfile(pattern = "cube_")), overviews = FALSE,
COG = FALSE, rsmpl_overview="nearest", creation_options = NULL , write_json_descr=FALSE, pack = NULL) {
stopifnot(is.cube(x))
dir = path.expand(dir)
# if (dir.exists(dir) && prefix == "" && length(list.files(dir, include.dirs = TRUE) > 0)) {
# stop("Directory already exists and is not empty, please either")
# }
if (!(is.null(creation_options) || is.list(creation_options))) {
stop("Expected either NULL or a list as creation_options argument.")
}
if (!is.character(rsmpl_overview)) {
stop("Expected a chracte as rsmpl_overview argument.")
}
if (!overviews && COG) {
overviews = TRUE
}
if (!is.null(pack)) {
stopifnot(is.list(pack))
stopifnot(length(pack$offset) == 1 || length(pack$offset) == nbands(x))
stopifnot(length(pack$scale) == 1 || length(pack$scale) == nbands(x))
stopifnot(length(pack$nodata) == 1 || length(pack$nodata) == nbands(x))
stopifnot(length(pack$offset) == length(pack$scale))
stopifnot(length(pack$offset) == length(pack$nodata))
}
# TODO: find out how to enable caching
gc_write_tif(x, dir, prefix, overviews, COG, creation_options, rsmpl_overview, pack)
if (write_json_descr) {
if (prefix == "") {
writeLines(as_json(x), file.path(dir, "cube.json"))
}
else {
writeLines(as_json(x), file.path(dir, paste(prefix, ".json", sep="")))
}
}
return(invisible(list.files(path = dir,pattern = paste(prefix, ".*\\.tif", sep=""), full.names = TRUE)))
}
#' Query coordinate values for all dimensions of a data cube
#'
#' Dimension values give the coordinates along the spatial and temporal axes of a data cube.
#'
#' @param obj a data cube proxy (class cube), or a data cube view object
#' @param datetime_unit unit used to format values in the datetime dimension, one of "Y", "m", "d", "H", "M", "S", defaults to the unit of the cube.
#' @return list with elements t,y,x
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' dimension_values(raster_cube(L8.col, v))
#' @export
dimension_values <- function(obj, datetime_unit=NULL) {
if (is.cube(obj)) {
if (is.null(datetime_unit)) {
datetime_unit = ""
}
return(gc_dimension_values(obj, datetime_unit))
}
else if (is.cube_view(obj)) {
if (is.null(datetime_unit)) {
datetime_unit = ""
}
return(gc_dimension_values_from_view(obj, datetime_unit))
}
else {
stop("obj must be either from class cube or from class cube_view")
}
}
#' Query coordinate bounds for all dimensions of a data cube
#'
#' Dimension values give the coordinates bounds the spatial and temporal axes of a data cube.
#'
#' @param obj a data cube proxy (class cube)
#' @param datetime_unit unit used to format values in the datetime dimension, one of "Y", "m", "d", "H", "M", "S", defaults to the unit of the cube.
#' @return list with elements t,y,x, each a list with two elements, start and end
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-06"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' dimension_bounds(raster_cube(L8.col, v))
#' @export
dimension_bounds <- function(obj, datetime_unit=NULL) {
stopifnot(is.cube(obj))
if (is.null(datetime_unit)) {
datetime_unit = ""
}
bnds = gc_dimension_bounds(obj, datetime_unit)
out = list(t = list(start = bnds$t[seq(1,length(bnds$t), by = 2)], end = bnds$t[seq(2,length(bnds$t), by = 2)]),
y = list(start = bnds$y[seq(1,length(bnds$y), by = 2)], end = bnds$y[seq(2,length(bnds$y), by = 2)]),
x = list(start = bnds$x[seq(1,length(bnds$x), by = 2)], end = bnds$x[seq(2,length(bnds$x), by = 2)]))
return(out)
}
|
# Load the package
installed.packages("eurostat")
library(devtools)
install_github("ropengov/eurostat")
library(eurostat)
library(rvest)
# Get Eurostat data listing
toc <-get_eurostat_toc()
# Check the first items
library(knitr)
kable(head(toc))
# Search for Themes
kable(head(search_eurostat("cars"), n = 15))
# Check Id
id <- search_eurostat("Passenger cars, by alternative motor energy and by power of vehicles",
type = "dataset")$code[1]
# Download Dataset
dat <-get_eurostat(id, time_format = "num", type = "label")
# Check data
str(dat)
head(dat)
df <- data.frame(dat)
table(df$prod_nrg); table(df$unit); table(df$time); table(df$geo)
# Subset by type of energy
elec <- subset(df, prod_nrg == "Electrical Energy", select = c(geo, time, values))
gas <- subset(df, prod_nrg == "Natural Gas", select = c(geo, time, values))
lpg <- subset(df, prod_nrg == "LPG", select = c(geo, time, values))
other <- subset(df, prod_nrg == "Other products", select = c(geo, time, values))
total <- subset(df, prod_nrg == "Total", select = c(geo, time, values))
# Total analysis
el.date <- aggregate(values ~ time, elec, sum)
gas.date <- aggregate(values ~ time, gas, sum)
lpg.date <- aggregate(values ~ time, lpg, sum)
other.date <- aggregate(values ~ time, other, sum)
total.date <- aggregate(values ~ time, total, sum)
# Plot by Sector/Years
p <- plot(el.date, type='o',
col="red",
ylab="Amount of Eur(millions)",
xlab="Years",
ylim = c(0, 10000))
points(values ~ time, data=gas.date, type='b', col="green")
points(values ~ time, data=lpg.date, type='l',lty=2, col="blue")
points(values ~ time, data=other.date, type='l', col="orange")
title(main="Investiment in Automotive Sector in UE", col.main="black", font.main=4)
legend(100,9.5,
c("Electric", "Gas", "Lpg", "Other"),
lty = c(1,1,1,1),
col = c("red", "green", "blue", "orange"))
# Analysis by Country
# Total analysis
total.geo <- aggregate(values ~ geo, total, sum)
boxplot(total.geo$values)
# Dendogram
dd <- dist(scale(total.geo$values), method = "euclidean")
name <- as.factor(total.geo$geo)
hc <- hclust(dd, method = "ward.D2")
plot(hc)
| /Eurostat/eurostat.R | no_license | fabiorcampos/Innovation-Analysis | R | false | false | 2,195 | r | # Load the package
installed.packages("eurostat")
library(devtools)
install_github("ropengov/eurostat")
library(eurostat)
library(rvest)
# Get Eurostat data listing
toc <-get_eurostat_toc()
# Check the first items
library(knitr)
kable(head(toc))
# Search for Themes
kable(head(search_eurostat("cars"), n = 15))
# Check Id
id <- search_eurostat("Passenger cars, by alternative motor energy and by power of vehicles",
type = "dataset")$code[1]
# Download Dataset
dat <-get_eurostat(id, time_format = "num", type = "label")
# Check data
str(dat)
head(dat)
df <- data.frame(dat)
table(df$prod_nrg); table(df$unit); table(df$time); table(df$geo)
# Subset by type of energy
elec <- subset(df, prod_nrg == "Electrical Energy", select = c(geo, time, values))
gas <- subset(df, prod_nrg == "Natural Gas", select = c(geo, time, values))
lpg <- subset(df, prod_nrg == "LPG", select = c(geo, time, values))
other <- subset(df, prod_nrg == "Other products", select = c(geo, time, values))
total <- subset(df, prod_nrg == "Total", select = c(geo, time, values))
# Total analysis
el.date <- aggregate(values ~ time, elec, sum)
gas.date <- aggregate(values ~ time, gas, sum)
lpg.date <- aggregate(values ~ time, lpg, sum)
other.date <- aggregate(values ~ time, other, sum)
total.date <- aggregate(values ~ time, total, sum)
# Plot by Sector/Years
p <- plot(el.date, type='o',
col="red",
ylab="Amount of Eur(millions)",
xlab="Years",
ylim = c(0, 10000))
points(values ~ time, data=gas.date, type='b', col="green")
points(values ~ time, data=lpg.date, type='l',lty=2, col="blue")
points(values ~ time, data=other.date, type='l', col="orange")
title(main="Investiment in Automotive Sector in UE", col.main="black", font.main=4)
legend(100,9.5,
c("Electric", "Gas", "Lpg", "Other"),
lty = c(1,1,1,1),
col = c("red", "green", "blue", "orange"))
# Analysis by Country
# Total analysis
total.geo <- aggregate(values ~ geo, total, sum)
boxplot(total.geo$values)
# Dendogram
dd <- dist(scale(total.geo$values), method = "euclidean")
name <- as.factor(total.geo$geo)
hc <- hclust(dd, method = "ward.D2")
plot(hc)
|
\name{maxlof}
\alias{maxlof}
\title{ Detection of multivariate outliers using the LOF algorithm}
\description{
A function that detects multivariate outliers using the local outlier factor for a matrix
over a range of neighbors called minpts.
}
\usage{
maxlof(data, name = "", minptsl = 10, minptsu = 20)
}
\arguments{
\item{data}{ Dataset for outlier detection}
\item{name}{ Name of dataset used in the graph title.}
\item{minptsl}{ Lower bound for the number of neighbors}
\item{minptsu}{ Upper bound for the number of neighbors}
}
\details{
Calls on the function "lofactor" to compute the local
outlier factor for each integer number of neighbors in the range
[minptsl, minptsu]. Also displays a plot of the factors for
each observation of the dataset. In the plot, the user should
seek to identify observations with large gaps between outlyingness
measures. These would be candidates for outliers.
}
\value{
\item{maxlofactor}{ A vector containing the index of each observation
of the dataset and the corresponding local outlier factor.}
}
\references{Breuning, M., Kriegel, H., Ng, R.T, and Sander. J. (2000).
LOF: Identifying density-based local outliers. In Proceedings of the
ACM SIGMOD International Conference on Management of Data.
}
\author{Caroline Rodriguez}
\examples{
#Detecting top 10 outliers in class number 1 of Breastw using the LOF algorithm
data(breastw)
breastw1.lof=maxlof(breastw[breastw[,10]==1,],name="Breast-Wisconsin",30,40)
breastw1.lof[order(breastw1.lof,decreasing=TRUE)][1:10]
}
\keyword{methods}
| /man/maxlof.Rd | no_license | a704261687/dprep | R | false | false | 1,590 | rd | \name{maxlof}
\alias{maxlof}
\title{ Detection of multivariate outliers using the LOF algorithm}
\description{
A function that detects multivariate outliers using the local outlier factor for a matrix
over a range of neighbors called minpts.
}
\usage{
maxlof(data, name = "", minptsl = 10, minptsu = 20)
}
\arguments{
\item{data}{ Dataset for outlier detection}
\item{name}{ Name of dataset used in the graph title.}
\item{minptsl}{ Lower bound for the number of neighbors}
\item{minptsu}{ Upper bound for the number of neighbors}
}
\details{
Calls on the function "lofactor" to compute the local
outlier factor for each integer number of neighbors in the range
[minptsl, minptsu]. Also displays a plot of the factors for
each observation of the dataset. In the plot, the user should
seek to identify observations with large gaps between outlyingness
measures. These would be candidates for outliers.
}
\value{
\item{maxlofactor}{ A vector containing the index of each observation
of the dataset and the corresponding local outlier factor.}
}
\references{Breuning, M., Kriegel, H., Ng, R.T, and Sander. J. (2000).
LOF: Identifying density-based local outliers. In Proceedings of the
ACM SIGMOD International Conference on Management of Data.
}
\author{Caroline Rodriguez}
\examples{
#Detecting top 10 outliers in class number 1 of Breastw using the LOF algorithm
data(breastw)
breastw1.lof=maxlof(breastw[breastw[,10]==1,],name="Breast-Wisconsin",30,40)
breastw1.lof[order(breastw1.lof,decreasing=TRUE)][1:10]
}
\keyword{methods}
|
library(ggplot2)
library(tidyr)
library(dplyr)
library(vegan)
#load file and tiding up
Oehl2003<-read.csv("Oehl_etal2003.csv",header = TRUE,stringsAsFactors = FALSE)
Oehl2003$Species<-sub("\\s","_",Oehl2003$Species)
Oehl2003<-Oehl2003[-grep("BR\\d$",Oehl2003$Species),]
Oehl2003$Species[which(Oehl2003$Species=="Acaulospora_scrobiculta")]<-
"Acaulospora_scrobiculata"
Oehl2003$Species[which(Oehl2003$Species=="Scutellopora_pellucida")]<-
"Scutellospora_pellucida"
Oehl2003$Species[which(Oehl2003$Species=="Archeospora_leptoticha")]<-
"Archaeospora_leptoticha"
#1. Correct the names
Oehl2003[!Oehl2003[,1]%in%AMF_Taxonomy[,1],1]
Oehl2003<-secondTrial(AMF_Taxonomy,Oehl2003)
#2. Check what new spores data need to be entried
AMF_All_Copy[which(!is.na(match(AMF_All_Copy[,1],
Oehl2003[,1]))),
c(1,13)]
#3. Change the format of the dataframe to do the desired ggplot
Oehl2003_df<-gather(Oehl2003,key=sites,
value=abundance,W:R)
names(Oehl2003_df)[1]<-"good.names"
#4. Adding trait data
Oehl2003_df<-left_join(Oehl2003_df,AMF_All_Copy[,c(1,13)])
Oehl2003_df$sites<-factor(Oehl2003_df$sites,
levels = c("W","V","G","O","L","F","S","R"))
#5. Performing the ggplot for all the data
Oehl2003_df %>%
filter(abundance!=0) %>%
ggplot(aes(x=sites,y=SporeArea,size=abundance,col=good.names))+
geom_point(alpha=0.5)+scale_y_log10()+theme(legend.position = "none")+
theme(axis.text.x = element_text(size = 5))+
ggtitle("Oehl2003 abundance with singletons")
#Measuring braycurtis distances among treatments, community weigthed means of spore size and IQR
transposed<-t(Oehl2003[,-1]);
transposed<-transposed[match(levels(Oehl2003_df$sites),row.names(transposed)),]
dists <- as.matrix(vegdist(transposed,
method='bray'))[, 1];
dists <- data.frame(habitat=names(dists), bray.dist=dists);
row.names(dists)<-NULL
Oehl2003_df$Abund_Area<-
Oehl2003_df$SporeArea*
ave(Oehl2003_df$abundance,Oehl2003_df$sites,FUN = function(x){x/sum(x)})
Oehl2003_df$Abund_Area[Oehl2003_df$Abund_Area==0]<-NA
CommTraits<-cbind(
data.frame(CWMean=
tapply(Oehl2003_df$Abund_Area,
Oehl2003_df$sites,sum,na.rm=TRUE)),
data.frame(IQR=
tapply(Oehl2003_df$Abund_Area,
Oehl2003_df$sites,IQR,na.rm=TRUE)),
data.frame(IDR=
tapply(Oehl2003_df$Abund_Area,
Oehl2003_df$sites,function(x){diff(quantile(x,c(0.1,0.9),na.rm = TRUE,names = FALSE))})
))
CommTraits$habitat<-row.names(CommTraits)
row.names(CommTraits)<-NULL
CommTraits<-CommTraits[,c(4,1,2,3)]
CommTraits<-merge(dists,CommTraits,by="habitat")
CommTraits$habitat<-factor(CommTraits$habitat,
levels = levels(Oehl2003_df$sites))
#plot(CommTraits$bray.dist,CommTraits$CWMean)
CommTraitsOehl2003<-CommTraits
CommTraitsOehl2003$Study<-"Oehl2003"
CommTraitsOehl2003
CommTraitsOehl2003%>%
ggplot(aes(x=bray.dist,y=CWMean,size=IQR,col=habitat))+
geom_point(alpha=0.5)#+scale_y_log10()+
theme(axis.text.x = element_text(size = 5))
rm(CommTraits,dists,transposed,Oehl2003_df)
| /Scripts/Oehl_etal2003.R | no_license | aguilart/SporeSize | R | false | false | 3,207 | r | library(ggplot2)
library(tidyr)
library(dplyr)
library(vegan)
#load file and tiding up
Oehl2003<-read.csv("Oehl_etal2003.csv",header = TRUE,stringsAsFactors = FALSE)
Oehl2003$Species<-sub("\\s","_",Oehl2003$Species)
Oehl2003<-Oehl2003[-grep("BR\\d$",Oehl2003$Species),]
Oehl2003$Species[which(Oehl2003$Species=="Acaulospora_scrobiculta")]<-
"Acaulospora_scrobiculata"
Oehl2003$Species[which(Oehl2003$Species=="Scutellopora_pellucida")]<-
"Scutellospora_pellucida"
Oehl2003$Species[which(Oehl2003$Species=="Archeospora_leptoticha")]<-
"Archaeospora_leptoticha"
#1. Correct the names
Oehl2003[!Oehl2003[,1]%in%AMF_Taxonomy[,1],1]
Oehl2003<-secondTrial(AMF_Taxonomy,Oehl2003)
#2. Check what new spores data need to be entried
AMF_All_Copy[which(!is.na(match(AMF_All_Copy[,1],
Oehl2003[,1]))),
c(1,13)]
#3. Change the format of the dataframe to do the desired ggplot
Oehl2003_df<-gather(Oehl2003,key=sites,
value=abundance,W:R)
names(Oehl2003_df)[1]<-"good.names"
#4. Adding trait data
Oehl2003_df<-left_join(Oehl2003_df,AMF_All_Copy[,c(1,13)])
Oehl2003_df$sites<-factor(Oehl2003_df$sites,
levels = c("W","V","G","O","L","F","S","R"))
#5. Performing the ggplot for all the data
Oehl2003_df %>%
filter(abundance!=0) %>%
ggplot(aes(x=sites,y=SporeArea,size=abundance,col=good.names))+
geom_point(alpha=0.5)+scale_y_log10()+theme(legend.position = "none")+
theme(axis.text.x = element_text(size = 5))+
ggtitle("Oehl2003 abundance with singletons")
#Measuring braycurtis distances among treatments, community weigthed means of spore size and IQR
transposed<-t(Oehl2003[,-1]);
transposed<-transposed[match(levels(Oehl2003_df$sites),row.names(transposed)),]
dists <- as.matrix(vegdist(transposed,
method='bray'))[, 1];
dists <- data.frame(habitat=names(dists), bray.dist=dists);
row.names(dists)<-NULL
Oehl2003_df$Abund_Area<-
Oehl2003_df$SporeArea*
ave(Oehl2003_df$abundance,Oehl2003_df$sites,FUN = function(x){x/sum(x)})
Oehl2003_df$Abund_Area[Oehl2003_df$Abund_Area==0]<-NA
CommTraits<-cbind(
data.frame(CWMean=
tapply(Oehl2003_df$Abund_Area,
Oehl2003_df$sites,sum,na.rm=TRUE)),
data.frame(IQR=
tapply(Oehl2003_df$Abund_Area,
Oehl2003_df$sites,IQR,na.rm=TRUE)),
data.frame(IDR=
tapply(Oehl2003_df$Abund_Area,
Oehl2003_df$sites,function(x){diff(quantile(x,c(0.1,0.9),na.rm = TRUE,names = FALSE))})
))
CommTraits$habitat<-row.names(CommTraits)
row.names(CommTraits)<-NULL
CommTraits<-CommTraits[,c(4,1,2,3)]
CommTraits<-merge(dists,CommTraits,by="habitat")
CommTraits$habitat<-factor(CommTraits$habitat,
levels = levels(Oehl2003_df$sites))
#plot(CommTraits$bray.dist,CommTraits$CWMean)
CommTraitsOehl2003<-CommTraits
CommTraitsOehl2003$Study<-"Oehl2003"
CommTraitsOehl2003
CommTraitsOehl2003%>%
ggplot(aes(x=bray.dist,y=CWMean,size=IQR,col=habitat))+
geom_point(alpha=0.5)#+scale_y_log10()+
theme(axis.text.x = element_text(size = 5))
rm(CommTraits,dists,transposed,Oehl2003_df)
|
## data path
setwd("data")
# librarys
library(raster)
library(rgdal)
getShapefileInfo <- function(fileName){
if(!file.exists(fileName))
return;
# Get the layer names
fileLayers <- ogrListLayers(dsn = fileName)
# Get the file information for the first layer
fileInfo <- ogrInfo(dsn = fileName, fileLayers[1])
fileInfo
# Load the file for the first layer
fileData <- readOGR(dsn = fileName, fileLayers[1])
fileData
}
shp2raster <- function(fileName, extentValue){
if(!file.exists(fileName))
return;
# Get the layer names
fileLayers <- ogrListLayers(dsn = fileName)
# read shapefile
teow <- readOGR(dsn = fileName, fileLayers[1]) #layer = "vegtype_2000"
## Set up a raster "template" to use in rasterize()
ext <- extent (-74, -34.5, -34, 5)
xy <- abs(apply(as.matrix(bbox(ext)), 1, diff))
n <- 5
r <- raster(ext, ncol=xy[1]*n, nrow=xy[2]*n)
## Rasterize the shapefile
rr <-rasterize(teow, r)
## A couple of outputs
plot(rr)
#writes a tiff
writeRaster(rr,"raster_from_shp.tif",format = 'GTiff', datatype='INT2U', overwrite=TRUE)
}
#
# RUN
#
void_main <- function(){
#----------caragua--------------------------
getShapefileInfo("ignore_data/urbano_caragua_coluna_classes2010/caragua_classes2010.shp")
# -------- vegtype_2000----------------------
#getShapefileInfo("vegtype_2000/vegtype_2000.shp")
# Set up a raster "template" to use in rasterize()
#extent : -74, -34.5, -34, 5 (xmin, xmax, ymin, ymax)
#extentValue <- c(-74, -34.5, -34, 5)
#shp2raster("vegtype_2000/vegtype_2000.shp", extentValue)
}
void_main()
| /shp_to_raster.r | no_license | hguerra/geospatial_conversion_tools | R | false | false | 1,651 | r | ## data path
setwd("data")
# librarys
library(raster)
library(rgdal)
getShapefileInfo <- function(fileName){
if(!file.exists(fileName))
return;
# Get the layer names
fileLayers <- ogrListLayers(dsn = fileName)
# Get the file information for the first layer
fileInfo <- ogrInfo(dsn = fileName, fileLayers[1])
fileInfo
# Load the file for the first layer
fileData <- readOGR(dsn = fileName, fileLayers[1])
fileData
}
shp2raster <- function(fileName, extentValue){
if(!file.exists(fileName))
return;
# Get the layer names
fileLayers <- ogrListLayers(dsn = fileName)
# read shapefile
teow <- readOGR(dsn = fileName, fileLayers[1]) #layer = "vegtype_2000"
## Set up a raster "template" to use in rasterize()
ext <- extent (-74, -34.5, -34, 5)
xy <- abs(apply(as.matrix(bbox(ext)), 1, diff))
n <- 5
r <- raster(ext, ncol=xy[1]*n, nrow=xy[2]*n)
## Rasterize the shapefile
rr <-rasterize(teow, r)
## A couple of outputs
plot(rr)
#writes a tiff
writeRaster(rr,"raster_from_shp.tif",format = 'GTiff', datatype='INT2U', overwrite=TRUE)
}
#
# RUN
#
void_main <- function(){
#----------caragua--------------------------
getShapefileInfo("ignore_data/urbano_caragua_coluna_classes2010/caragua_classes2010.shp")
# -------- vegtype_2000----------------------
#getShapefileInfo("vegtype_2000/vegtype_2000.shp")
# Set up a raster "template" to use in rasterize()
#extent : -74, -34.5, -34, 5 (xmin, xmax, ymin, ymax)
#extentValue <- c(-74, -34.5, -34, 5)
#shp2raster("vegtype_2000/vegtype_2000.shp", extentValue)
}
void_main()
|
library(shiny)
#setwd("C:/Users/Charles/Desktop")
shinyUI(
fluidPage(
titlePanel("An Overview of the Federalist Papers"),
sidebarPanel(width = 3,
sliderInput("fedPaper", "Range of Federalist Papers:", min=1, max=85, value=c(5,13)),
selectInput("colorScheme", "Color Scheme:",
choices = c("Default", "Accent", "Set1", "Set2","Set3", "Dark2", "Pastel1", "Pastel2", "None"))
),
mainPanel(
tabsetPanel(
tabPanel("Bar Multiples",
fluidRow(
column(4,offset = 2,
radioButtons("barType", "Sort By:", c("Most Frequent", "Least Frequent", "Highest TF-IDF", "LOWEST TF-IDF"), selected="Most Frequent")
),
column(4,
sliderInput("barFreq", "Number of Words:", min=1, max=10, value=5)
)
)
),
tabPanel("Cloud",
fluidRow(
column(4,offset = 2,
checkboxGroupInput("cloudAuthor", "Authors to Compare:",
choices = list("MADISON" = 1, "HAMILTON" = 2, "JAY" = 3, "UNKNOWN"=4))
),
column(4,
radioButtons("cloudType", "Word Size Based On:", c("Word Count", "TF-IDF"), selected="Word Count")
)
)
),
tabPanel("Lines",
fluidRow(
column(4,
dateRangeInput("lineDates", "Date of Publish", start=as.Date("1787-10-27"), min =as.Date("1787-10-27"),
end = as.Date("1788-08-13"), max = as.Date("1788-08-13"), format = "M-d-yyyy")
),
column(3,
sliderInput("lineFreq", "Number of Words:", min=1, max=10, value=3)
),
column(3,
checkboxGroupInput("lineAuthor", "Authors to Compare:",
choices = list("MADISON" = 1, "HAMILTON" = 2, "JAY" = 3, "UNKNOWN"=4))
),
column(2,
radioButtons("lineFacet", "Compare:", c("Separately", "Same Plot"), selected="Separately")
)
)
),
tabPanel("Network",
fluidRow(
column(2,
selectInput("networkLink", "Words Linked By:",
choices = c("and", "to", "for", "of", "from"), selected = "and")
),
column(5, offset = 1,
sliderInput("networkNodes", "Maximum Number of Nodes:", min=5, max=30, value=10)
),
column(3, offset = 1,
radioButtons("networkAuthor", "Authors To Network:",
choices = c("MADISON", "HAMILTON", "JAY", "UNKNOWN", "ALL"), selected = "ALL")
)
)
)
)
)
)
)
| /project-prototype/ui.R | no_license | chrono721/msan622 | R | false | false | 3,265 | r |
library(shiny)
#setwd("C:/Users/Charles/Desktop")
shinyUI(
fluidPage(
titlePanel("An Overview of the Federalist Papers"),
sidebarPanel(width = 3,
sliderInput("fedPaper", "Range of Federalist Papers:", min=1, max=85, value=c(5,13)),
selectInput("colorScheme", "Color Scheme:",
choices = c("Default", "Accent", "Set1", "Set2","Set3", "Dark2", "Pastel1", "Pastel2", "None"))
),
mainPanel(
tabsetPanel(
tabPanel("Bar Multiples",
fluidRow(
column(4,offset = 2,
radioButtons("barType", "Sort By:", c("Most Frequent", "Least Frequent", "Highest TF-IDF", "LOWEST TF-IDF"), selected="Most Frequent")
),
column(4,
sliderInput("barFreq", "Number of Words:", min=1, max=10, value=5)
)
)
),
tabPanel("Cloud",
fluidRow(
column(4,offset = 2,
checkboxGroupInput("cloudAuthor", "Authors to Compare:",
choices = list("MADISON" = 1, "HAMILTON" = 2, "JAY" = 3, "UNKNOWN"=4))
),
column(4,
radioButtons("cloudType", "Word Size Based On:", c("Word Count", "TF-IDF"), selected="Word Count")
)
)
),
tabPanel("Lines",
fluidRow(
column(4,
dateRangeInput("lineDates", "Date of Publish", start=as.Date("1787-10-27"), min =as.Date("1787-10-27"),
end = as.Date("1788-08-13"), max = as.Date("1788-08-13"), format = "M-d-yyyy")
),
column(3,
sliderInput("lineFreq", "Number of Words:", min=1, max=10, value=3)
),
column(3,
checkboxGroupInput("lineAuthor", "Authors to Compare:",
choices = list("MADISON" = 1, "HAMILTON" = 2, "JAY" = 3, "UNKNOWN"=4))
),
column(2,
radioButtons("lineFacet", "Compare:", c("Separately", "Same Plot"), selected="Separately")
)
)
),
tabPanel("Network",
fluidRow(
column(2,
selectInput("networkLink", "Words Linked By:",
choices = c("and", "to", "for", "of", "from"), selected = "and")
),
column(5, offset = 1,
sliderInput("networkNodes", "Maximum Number of Nodes:", min=5, max=30, value=10)
),
column(3, offset = 1,
radioButtons("networkAuthor", "Authors To Network:",
choices = c("MADISON", "HAMILTON", "JAY", "UNKNOWN", "ALL"), selected = "ALL")
)
)
)
)
)
)
)
|
# -*- tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
# vi: set ts=2 noet:
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
library(ggplot2)
library(plyr)
library(dplyr)
library(viridis)
feature_analyses <- c(feature_analyses, methods::new("FeaturesAnalysis",
id = "OHacceptor_chi",
author = "Matthew O'Meara",
brief_description = "",
feature_reporter_dependencies = c("HBondFeatures"),
run=function(self, sample_sources, output_dir, output_formats){
sele <-"
SELECT
geom.cosBAH,
geom.chi,
CASE acc_site.HBChemType
WHEN 'hbacc_IMD' THEN 'ring' WHEN 'hbacc_IME' THEN 'ring'
WHEN 'hbacc_AHX' THEN 'sp3' WHEN 'hbacc_HXL' THEN 'sp3'
WHEN 'hbacc_CXA' THEN 'sp2' WHEN 'hbacc_CXL' THEN 'sp2'
WHEN 'hbacc_PBA' THEN 'bb_sp2' END AS hybrid,
acc_site.HBChemType AS acc_chem_type, don_site.HBChemType AS don_chem_type,
acc_atoms.base2_x AS ab2x, acc_atoms.base2_y AS ab2y, acc_atoms.base2_z AS ab2z, -- acceptor base 2 atom
acc_atoms.base_x AS abx, acc_atoms.base_y AS aby, acc_atoms.base_z AS abz, -- acceptor base atom
acc_atoms.atm_x AS ax, acc_atoms.atm_y AS ay, acc_atoms.atm_z AS az, -- acceptor atom
don_atoms.atm_x AS hx, don_atoms.atm_y AS hy, don_atoms.atm_z AS hz -- hydrogen atom
FROM
hbond_geom_coords AS geom,
hbonds AS hbond,
hbond_sites AS don_site, hbond_sites AS acc_site,
hbond_site_atoms AS don_atoms, hbond_site_atoms AS acc_atoms
WHERE
hbond.struct_id = geom.struct_id AND hbond.hbond_id = geom.hbond_id AND
hbond.struct_id = don_site.struct_id AND hbond.don_id = don_site.site_id AND
hbond.struct_id = acc_site.struct_id AND hbond.acc_id = acc_site.site_id AND
don_atoms.struct_id = hbond.struct_id AND don_atoms.site_id = hbond.don_id AND
acc_atoms.struct_id = hbond.struct_id AND acc_atoms.site_id = hbond.acc_id AND
(don_site.HBChemType = 'hbdon_AXL' OR don_site.HBChemType = 'hbdon_HXL');"
f <- query_sample_sources(sample_sources, sele)
alt_chi_dihedral_angle <- function(ab2, ab, a, h){
alt_ab <- (ab + ab2)/2
alt_ab2 <- vector_crossprod(ab - ab2, a - ab) - alt_ab
vector_dihedral(alt_ab2, alt_ab, a, h)
}
f[f$hybrid %in% c("sp3", "ring"), "chi"] <-
with(f[f$hybrid %in% c("sp3", "ring"),], alt_chi_dihedral_angle(
cbind(ab2x, ab2y, ab2z), cbind(abx, aby, abz),
cbind(ax, ay, az), cbind(hx, hy, hz)))
alt_sp3_cosBAH <- function(ab2, ab, a, h){
alt_ab <- (ab + ab2)/2
vector_dotprod(vector_normalize(a-alt_ab), vector_normalize(h-a))
}
f <- f %>%
dplyr::mutate(
cosBAH = ifelse(hybrid != "sp3",
cosBAH,
alt_sp3_cosBAH(
cbind(ab2x, ab2y, ab2z),
cbind(abx, aby, abz),
cbind(ax, ay, az),
cbind(hx, hy, hz))))
#equal area projection
f <- transform(f,
capx = 2*sin(acos(cosBAH)/2)*cos(chi),
capy = 2*sin(acos(cosBAH)/2)*sin(chi))
#capx_limits <- range(f$capx); capy_limits <- range(f$capy)
capx_limits <- c(-1.5,1.5); capy_limits <- capx_limits;
l_ply(levels(f$hybrid), function(hybrid){
d_ply(sample_sources, .("sample_sources"), function(sample_source){
ss_id <- sample_source$sample_source[1]
plot_id = paste("hbond_chi_BAH_polar_density_", hybrid, "_by_don_chem_type_", ss_id, sep="")
ggplot(data=subset(f, sample_source == ss_id & hybrid==hybrid)) + theme_bw() +
polar_equal_area_grids_bw() +
stat_bin2d(aes(x=capx, y=capy, fill=log(..density..)), binwidth=c(.06,.06)) +
facet_grid(don_ss ~ acc_ss) +
ggtitle(paste("Backbone-Backbone Hydrogen Bonds chi vs sinBAH Angles by Secondary Structure\nLambert Azimuthal Projection Sample Source: ", ss_id, sep="")) +
scale_x_continuous('', limits=capx_limits, breaks=c(-1, 0, 1)) +
scale_y_continuous('', limits=capy_limits, breaks=c(-1, 0, 1)) +
scale_fill_viridis('log(Normalized\nDensity)')
save_plots(self, plot_id, sample_source, output_dir, output_formats)
})
})
})) # end FeaturesAnalysis
| /inst/scripts/analysis/plots/hbonds/hydroxyl_sites/OHacceptor_chi.R | no_license | momeara/RosettaFeatures | R | false | false | 4,208 | r | # -*- tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
# vi: set ts=2 noet:
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
library(ggplot2)
library(plyr)
library(dplyr)
library(viridis)
feature_analyses <- c(feature_analyses, methods::new("FeaturesAnalysis",
id = "OHacceptor_chi",
author = "Matthew O'Meara",
brief_description = "",
feature_reporter_dependencies = c("HBondFeatures"),
run=function(self, sample_sources, output_dir, output_formats){
sele <-"
SELECT
geom.cosBAH,
geom.chi,
CASE acc_site.HBChemType
WHEN 'hbacc_IMD' THEN 'ring' WHEN 'hbacc_IME' THEN 'ring'
WHEN 'hbacc_AHX' THEN 'sp3' WHEN 'hbacc_HXL' THEN 'sp3'
WHEN 'hbacc_CXA' THEN 'sp2' WHEN 'hbacc_CXL' THEN 'sp2'
WHEN 'hbacc_PBA' THEN 'bb_sp2' END AS hybrid,
acc_site.HBChemType AS acc_chem_type, don_site.HBChemType AS don_chem_type,
acc_atoms.base2_x AS ab2x, acc_atoms.base2_y AS ab2y, acc_atoms.base2_z AS ab2z, -- acceptor base 2 atom
acc_atoms.base_x AS abx, acc_atoms.base_y AS aby, acc_atoms.base_z AS abz, -- acceptor base atom
acc_atoms.atm_x AS ax, acc_atoms.atm_y AS ay, acc_atoms.atm_z AS az, -- acceptor atom
don_atoms.atm_x AS hx, don_atoms.atm_y AS hy, don_atoms.atm_z AS hz -- hydrogen atom
FROM
hbond_geom_coords AS geom,
hbonds AS hbond,
hbond_sites AS don_site, hbond_sites AS acc_site,
hbond_site_atoms AS don_atoms, hbond_site_atoms AS acc_atoms
WHERE
hbond.struct_id = geom.struct_id AND hbond.hbond_id = geom.hbond_id AND
hbond.struct_id = don_site.struct_id AND hbond.don_id = don_site.site_id AND
hbond.struct_id = acc_site.struct_id AND hbond.acc_id = acc_site.site_id AND
don_atoms.struct_id = hbond.struct_id AND don_atoms.site_id = hbond.don_id AND
acc_atoms.struct_id = hbond.struct_id AND acc_atoms.site_id = hbond.acc_id AND
(don_site.HBChemType = 'hbdon_AXL' OR don_site.HBChemType = 'hbdon_HXL');"
f <- query_sample_sources(sample_sources, sele)
alt_chi_dihedral_angle <- function(ab2, ab, a, h){
alt_ab <- (ab + ab2)/2
alt_ab2 <- vector_crossprod(ab - ab2, a - ab) - alt_ab
vector_dihedral(alt_ab2, alt_ab, a, h)
}
f[f$hybrid %in% c("sp3", "ring"), "chi"] <-
with(f[f$hybrid %in% c("sp3", "ring"),], alt_chi_dihedral_angle(
cbind(ab2x, ab2y, ab2z), cbind(abx, aby, abz),
cbind(ax, ay, az), cbind(hx, hy, hz)))
alt_sp3_cosBAH <- function(ab2, ab, a, h){
alt_ab <- (ab + ab2)/2
vector_dotprod(vector_normalize(a-alt_ab), vector_normalize(h-a))
}
f <- f %>%
dplyr::mutate(
cosBAH = ifelse(hybrid != "sp3",
cosBAH,
alt_sp3_cosBAH(
cbind(ab2x, ab2y, ab2z),
cbind(abx, aby, abz),
cbind(ax, ay, az),
cbind(hx, hy, hz))))
#equal area projection
f <- transform(f,
capx = 2*sin(acos(cosBAH)/2)*cos(chi),
capy = 2*sin(acos(cosBAH)/2)*sin(chi))
#capx_limits <- range(f$capx); capy_limits <- range(f$capy)
capx_limits <- c(-1.5,1.5); capy_limits <- capx_limits;
l_ply(levels(f$hybrid), function(hybrid){
d_ply(sample_sources, .("sample_sources"), function(sample_source){
ss_id <- sample_source$sample_source[1]
plot_id = paste("hbond_chi_BAH_polar_density_", hybrid, "_by_don_chem_type_", ss_id, sep="")
ggplot(data=subset(f, sample_source == ss_id & hybrid==hybrid)) + theme_bw() +
polar_equal_area_grids_bw() +
stat_bin2d(aes(x=capx, y=capy, fill=log(..density..)), binwidth=c(.06,.06)) +
facet_grid(don_ss ~ acc_ss) +
ggtitle(paste("Backbone-Backbone Hydrogen Bonds chi vs sinBAH Angles by Secondary Structure\nLambert Azimuthal Projection Sample Source: ", ss_id, sep="")) +
scale_x_continuous('', limits=capx_limits, breaks=c(-1, 0, 1)) +
scale_y_continuous('', limits=capy_limits, breaks=c(-1, 0, 1)) +
scale_fill_viridis('log(Normalized\nDensity)')
save_plots(self, plot_id, sample_source, output_dir, output_formats)
})
})
})) # end FeaturesAnalysis
|
#Rcode
library(RColorBrewer)
mycol <- brewer.pal(8,'Accent')
b1 <- read.table('../analysis2/fitness_residue.txt',header=1)
drawpic <- function(name,colu){
png(paste('../figures/Fitness_profile_',name,'.png',sep=''),res=600,height=2500,width=4500)
plot(b1$Pos,b1[,colu],type='h',lwd=4,col=mycol[1],ylab='log10 fitness',ylim=c(-0.7,0.5),xaxt='n',yaxt='n')
box(lwd=4)
axis(1,at=c(0,20,40,60,80),lwd=4)
axis(2,at=c(-0.5,0,0.5),lwd=4)
dev.off()
}
drawpic('SL9',2)
drawpic('SL9No',3)
drawpic('KF11',4)
drawpic('KF11No',5)
| /drawprofile.R | no_license | Tian-hao/Nef | R | false | false | 518 | r | #Rcode
library(RColorBrewer)
mycol <- brewer.pal(8,'Accent')
b1 <- read.table('../analysis2/fitness_residue.txt',header=1)
drawpic <- function(name,colu){
png(paste('../figures/Fitness_profile_',name,'.png',sep=''),res=600,height=2500,width=4500)
plot(b1$Pos,b1[,colu],type='h',lwd=4,col=mycol[1],ylab='log10 fitness',ylim=c(-0.7,0.5),xaxt='n',yaxt='n')
box(lwd=4)
axis(1,at=c(0,20,40,60,80),lwd=4)
axis(2,at=c(-0.5,0,0.5),lwd=4)
dev.off()
}
drawpic('SL9',2)
drawpic('SL9No',3)
drawpic('KF11',4)
drawpic('KF11No',5)
|
# Spect.R
# by Shuce Zhang May 4, 2018
#This script is to analyze spectrum data from plate readers.
#Should you have any questions please contact Shuce: shuce@ualberta.ca
rm(list = ls())
library(plyr)
library(ggplot2)
library(reshape2)
setwd("G:/Wet_Data_Analysis/Campbell/Spectrum/new/test")
#raw <- read.table("raw.txt", header = TRUE) # input file
init.df <- read.table("raw.txt", header = TRUE)
blank <- init.df$H7 # identify the background well
wav <- init.df[,1]
raw <- init.df[,2:ncol(init.df)] - blank
raw <- data.frame(wav, raw)
raw$A7 <- NULL # Remove the background wells
raw$H7 <- NULL # Remove the background wells
colNum <- ncol(raw)
reorg <- 0 # 0 if replication are in the same row, 1 if same column
reorg.rep <- 3 # number of samples
norm_wav_range <- c(350,390) # range of peaks you want to label
do_norm <- FALSE # 1 if you want to output normalized data
# normalization factor for each sample.
# Measurements of each sample will be divided by these values, respectively
norm_vec <- c(0.083, 0.200, 0.043, 0.152, 0.169, 0.192,
0.210, 0.213, 0.216, 0.206, 0.193, 0.216,
0.144, 0.207, 0.227, 0.196, 0.189, 0.204,
0.253, 0.240, 0.217, 0.117, 0.194, 0.193,
0.182, 0.191, 0.196, 0.210, 0.185, 0.196,
0.168, 0.195, 0.086, 0.204, 0.177, 0.225,
0.216, 0.163, 0.521, 0.210, 0.278, 0.230,
0.274, 0.246, 0.234, 0.232, 0.252, 0.310)
#0.199, 0.183, 0.183, 1, 0.281, 0.291, 1)
#norm_update <- t(raw[11,2:colNum])
#rownames(norm_update) <- c()
#norm_vec <- norm_update # Comment if you want to use the old coefficents
##################Defining functions###########################
which.peaks <- function(x,partial=TRUE,decreasing=FALSE){
if (decreasing){
if (partial){
which(diff(c(FALSE,diff(x)>0,TRUE))>0)
}else {
which(diff(diff(x)>0)>0)+1
}
}else {
if (partial){
which(diff(c(TRUE,diff(x)>=0,FALSE))<0)
}else {
which(diff(diff(x)>=0)<0)+1
}
}
}
summ.peaks <- function(x, partial=TRUE, decreasing=FALSE, wav = raw[,1], wavRan = norm_wav_range) {
maInd <- which.peaks(x, partial = partial, decreasing = decreasing)
wavLen <- wav[maInd]
Int <- x[maInd]
star <- rep("",length(maInd))
star[wavLen >= wavRan[1] & wavLen <= wavRan[2]] <- "***"
data.frame(maInd, wavLen, Int, star)
}
#############################Reorganizing data####################################
if (reorg) {
df <- data.frame(raw[,1])
raw[,1] <- NULL
sel <- seq(from = 1, to = colNum - 1, by = reorg.rep)
for (i in 1:reorg.rep) {
df <- data.frame(df, raw[, sel+i-1])
}
raw <- df
}
names(raw) <- c('Wavelength', paste('sample', seq(colNum-1), sep = '.'))
#############################Visualization initial figure#########################
p <- ggplot(data = NULL, aes(x=wav))
for (i in 2:colNum) {
print(i)
randomwalk <- raw[,i]
# Generate Data
tops <- which.peaks(randomwalk, decreasing = FALSE)
bottoms <- which.peaks(randomwalk, decreasing = TRUE)
# Color functions
cf.top <- grDevices::colorRampPalette("red")
cf.bottom <- grDevices::colorRampPalette("blue")
#plot(randomwalk, type = 'l', main = "Minima & Maxima\nVariable Thresholds")
#lines(randomwalk, raw[,1])
p <- p + geom_line(aes(x=wav, y=randomwalk), color="#0072B2")
p <- p + geom_point(aes(x=wav[tops], y=randomwalk[tops]), color='red') +
geom_text(aes(x=wav[tops], y=randomwalk[tops], label = wav[tops]), nudge_y = 0.03, check_overlap = TRUE)
p <- p + geom_point(aes(x=wav[bottoms], y=randomwalk[bottoms]), color='blue')
p <- p + labs(x = 'Wavelength / nm', y = names(raw)[i])
print(p)
}
raw.summ <- apply(raw[,2:colNum], 2, summ.peaks)
print(raw.summ)
sink("00summ.txt"); print(raw.summ); sink()
#write.csv(raw.summ, file = "00summ.txt", row.names = FALSE, col.names = FALSE)
###############################Normalization##########################
if (do_norm) {
newdf <- data.frame(raw[1])
for (i in 2:colNum) {
temp <- raw[,i]/norm_vec[i-1]
newdf <- data.frame(newdf, temp)
}
names(newdf) <- names(raw)
long.df <- melt(newdf,id.vars = 1)
names(long.df) <- c('Wavelength', 'Sample', 'Intensity')
q <- ggplot(data = long.df, aes(x = Wavelength, y= Intensity))
q <- q + geom_line(aes(color = Sample))
print(q)
write.table(newdf, file = "00signal.txt", sep = ",", row.names = FALSE, col.names = FALSE)
}
| /spect.R | no_license | shucez/Spectrum | R | false | false | 4,798 | r | # Spect.R
# by Shuce Zhang May 4, 2018
#This script is to analyze spectrum data from plate readers.
#Should you have any questions please contact Shuce: shuce@ualberta.ca
rm(list = ls())
library(plyr)
library(ggplot2)
library(reshape2)
setwd("G:/Wet_Data_Analysis/Campbell/Spectrum/new/test")
#raw <- read.table("raw.txt", header = TRUE) # input file
init.df <- read.table("raw.txt", header = TRUE)
blank <- init.df$H7 # identify the background well
wav <- init.df[,1]
raw <- init.df[,2:ncol(init.df)] - blank
raw <- data.frame(wav, raw)
raw$A7 <- NULL # Remove the background wells
raw$H7 <- NULL # Remove the background wells
colNum <- ncol(raw)
reorg <- 0 # 0 if replication are in the same row, 1 if same column
reorg.rep <- 3 # number of samples
norm_wav_range <- c(350,390) # range of peaks you want to label
do_norm <- FALSE # 1 if you want to output normalized data
# normalization factor for each sample.
# Measurements of each sample will be divided by these values, respectively
norm_vec <- c(0.083, 0.200, 0.043, 0.152, 0.169, 0.192,
0.210, 0.213, 0.216, 0.206, 0.193, 0.216,
0.144, 0.207, 0.227, 0.196, 0.189, 0.204,
0.253, 0.240, 0.217, 0.117, 0.194, 0.193,
0.182, 0.191, 0.196, 0.210, 0.185, 0.196,
0.168, 0.195, 0.086, 0.204, 0.177, 0.225,
0.216, 0.163, 0.521, 0.210, 0.278, 0.230,
0.274, 0.246, 0.234, 0.232, 0.252, 0.310)
#0.199, 0.183, 0.183, 1, 0.281, 0.291, 1)
#norm_update <- t(raw[11,2:colNum])
#rownames(norm_update) <- c()
#norm_vec <- norm_update # Comment if you want to use the old coefficents
##################Defining functions###########################
which.peaks <- function(x,partial=TRUE,decreasing=FALSE){
if (decreasing){
if (partial){
which(diff(c(FALSE,diff(x)>0,TRUE))>0)
}else {
which(diff(diff(x)>0)>0)+1
}
}else {
if (partial){
which(diff(c(TRUE,diff(x)>=0,FALSE))<0)
}else {
which(diff(diff(x)>=0)<0)+1
}
}
}
summ.peaks <- function(x, partial=TRUE, decreasing=FALSE, wav = raw[,1], wavRan = norm_wav_range) {
maInd <- which.peaks(x, partial = partial, decreasing = decreasing)
wavLen <- wav[maInd]
Int <- x[maInd]
star <- rep("",length(maInd))
star[wavLen >= wavRan[1] & wavLen <= wavRan[2]] <- "***"
data.frame(maInd, wavLen, Int, star)
}
#############################Reorganizing data####################################
if (reorg) {
df <- data.frame(raw[,1])
raw[,1] <- NULL
sel <- seq(from = 1, to = colNum - 1, by = reorg.rep)
for (i in 1:reorg.rep) {
df <- data.frame(df, raw[, sel+i-1])
}
raw <- df
}
names(raw) <- c('Wavelength', paste('sample', seq(colNum-1), sep = '.'))
#############################Visualization initial figure#########################
p <- ggplot(data = NULL, aes(x=wav))
for (i in 2:colNum) {
print(i)
randomwalk <- raw[,i]
# Generate Data
tops <- which.peaks(randomwalk, decreasing = FALSE)
bottoms <- which.peaks(randomwalk, decreasing = TRUE)
# Color functions
cf.top <- grDevices::colorRampPalette("red")
cf.bottom <- grDevices::colorRampPalette("blue")
#plot(randomwalk, type = 'l', main = "Minima & Maxima\nVariable Thresholds")
#lines(randomwalk, raw[,1])
p <- p + geom_line(aes(x=wav, y=randomwalk), color="#0072B2")
p <- p + geom_point(aes(x=wav[tops], y=randomwalk[tops]), color='red') +
geom_text(aes(x=wav[tops], y=randomwalk[tops], label = wav[tops]), nudge_y = 0.03, check_overlap = TRUE)
p <- p + geom_point(aes(x=wav[bottoms], y=randomwalk[bottoms]), color='blue')
p <- p + labs(x = 'Wavelength / nm', y = names(raw)[i])
print(p)
}
raw.summ <- apply(raw[,2:colNum], 2, summ.peaks)
print(raw.summ)
sink("00summ.txt"); print(raw.summ); sink()
#write.csv(raw.summ, file = "00summ.txt", row.names = FALSE, col.names = FALSE)
###############################Normalization##########################
if (do_norm) {
newdf <- data.frame(raw[1])
for (i in 2:colNum) {
temp <- raw[,i]/norm_vec[i-1]
newdf <- data.frame(newdf, temp)
}
names(newdf) <- names(raw)
long.df <- melt(newdf,id.vars = 1)
names(long.df) <- c('Wavelength', 'Sample', 'Intensity')
q <- ggplot(data = long.df, aes(x = Wavelength, y= Intensity))
q <- q + geom_line(aes(color = Sample))
print(q)
write.table(newdf, file = "00signal.txt", sep = ",", row.names = FALSE, col.names = FALSE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_eccentricity.R
\name{get_eccentricity}
\alias{get_eccentricity}
\title{Get node eccentricities}
\usage{
get_eccentricity(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a data frame containing eccentricity values
by node ID value.
}
\description{
Get a data frame with node
eccentricity values.
}
\examples{
# Get the eccentricities for all nodes
# in a randomly-created graph
get_eccentricity(
graph = create_random_graph(
5, 7, set_seed = 23))
#> id eccentricity
#> 1 1 2
#> 2 2 3
#> 3 3 2
#> 4 4 1
#> 5 5 0
}
| /man/get_eccentricity.Rd | no_license | pcuthbert/DiagrammeR | R | false | true | 709 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_eccentricity.R
\name{get_eccentricity}
\alias{get_eccentricity}
\title{Get node eccentricities}
\usage{
get_eccentricity(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a data frame containing eccentricity values
by node ID value.
}
\description{
Get a data frame with node
eccentricity values.
}
\examples{
# Get the eccentricities for all nodes
# in a randomly-created graph
get_eccentricity(
graph = create_random_graph(
5, 7, set_seed = 23))
#> id eccentricity
#> 1 1 2
#> 2 2 3
#> 3 3 2
#> 4 4 1
#> 5 5 0
}
|
install.packages("recommenderlab", dependencies=TRUE)
library(Matrix)
library(recommenderlab)
library(caTools)
str(book)
View(book)
hist(book$Book.Rating)
book_data_matrix <- as(book, 'realRatingMatrix')
book_recomm_model <- Recommender(book_data_matrix, method = "POPULAR")
recomm_items1 <- predict(book_recomm_model, book_data_matrix[1])
as(recomm_items1, "list")
book_recomm_model2 <- Recommender(book_data_matrix, method = "UBCF")
recomm_items2 <- predict(book_recomm_model2, book_data_matrix[1], n=4)
as(recomm_items2, "list")
| /Recommandation System/Solution (Book).R | no_license | Abhishek012345/Data-Science-Assignment | R | false | false | 566 | r | install.packages("recommenderlab", dependencies=TRUE)
library(Matrix)
library(recommenderlab)
library(caTools)
str(book)
View(book)
hist(book$Book.Rating)
book_data_matrix <- as(book, 'realRatingMatrix')
book_recomm_model <- Recommender(book_data_matrix, method = "POPULAR")
recomm_items1 <- predict(book_recomm_model, book_data_matrix[1])
as(recomm_items1, "list")
book_recomm_model2 <- Recommender(book_data_matrix, method = "UBCF")
recomm_items2 <- predict(book_recomm_model2, book_data_matrix[1], n=4)
as(recomm_items2, "list")
|
#' List all the observable partial capture histories
#'
#' This function returns a list of all the observable partial capture histories which can be recorded in a discrete-time capture-recapture setting with \eqn{t} consecutive trapping occasions. The observable partial capture histories are \eqn{2^t-1}
#'
#' @usage list.historylabels(t,t.max=15)
#'
#' @param t a positive integer representing the total number of trapping occasions
#' @param t.max a positive integer representing upper bound on the total number of trapping occasions allowed.
#' @details For obvious computing/memory reasons t is not allowed to be arbitrarily large. With \code{t.max=15} there are 32767 possible partial capture histories. If \code{t>t.max} the function stops with an error message.
#'
#' @return
#'
#' A list of all the observable partial capture histories which can be recorded in a discrete-time capture-recapture setting with t consecutive trapping occasions. If \code{t>t.max} the function stops with an error message.
#'
#' @author Danilo Alunni Fegatelli and Luca Tardella
#'
#' @seealso \code{\link{partition.ch}}
#'
#' @examples
#'
#' list.historylabels(t=4)
#'
#' @export
list.historylabels = function(t, t.max=15){
if(t>t.max) stop(paste("argument t cannot be greater than t.max=",t.max,sep=""))
ph.list=vector(mode="list",length=0)
for(j in 1:(t-1)){
fac=vector(mode="list",length=j)
lapply(fac[1:j], function(x) factor(c(0,1))) -> fac[1:j]
matindex=expand.grid(fac)
matindex=as.data.frame(t(matindex))
lapply(matindex[1:ncol(matindex)],function(x) factor(x,levels=c("0","1"))) -> matindex[1:ncol(matindex)]
ph.list=c(ph.list,as.list(matindex))
}
ph.list=c("",ph.list)
out=lapply(ph.list,paste,collapse="")
return(out)
}
| /R/list.historylabels.R | no_license | lucatardella/BBRecapture | R | false | false | 1,766 | r | #' List all the observable partial capture histories
#'
#' This function returns a list of all the observable partial capture histories which can be recorded in a discrete-time capture-recapture setting with \eqn{t} consecutive trapping occasions. The observable partial capture histories are \eqn{2^t-1}
#'
#' @usage list.historylabels(t,t.max=15)
#'
#' @param t a positive integer representing the total number of trapping occasions
#' @param t.max a positive integer representing upper bound on the total number of trapping occasions allowed.
#' @details For obvious computing/memory reasons t is not allowed to be arbitrarily large. With \code{t.max=15} there are 32767 possible partial capture histories. If \code{t>t.max} the function stops with an error message.
#'
#' @return
#'
#' A list of all the observable partial capture histories which can be recorded in a discrete-time capture-recapture setting with t consecutive trapping occasions. If \code{t>t.max} the function stops with an error message.
#'
#' @author Danilo Alunni Fegatelli and Luca Tardella
#'
#' @seealso \code{\link{partition.ch}}
#'
#' @examples
#'
#' list.historylabels(t=4)
#'
#' @export
list.historylabels = function(t, t.max=15){
if(t>t.max) stop(paste("argument t cannot be greater than t.max=",t.max,sep=""))
ph.list=vector(mode="list",length=0)
for(j in 1:(t-1)){
fac=vector(mode="list",length=j)
lapply(fac[1:j], function(x) factor(c(0,1))) -> fac[1:j]
matindex=expand.grid(fac)
matindex=as.data.frame(t(matindex))
lapply(matindex[1:ncol(matindex)],function(x) factor(x,levels=c("0","1"))) -> matindex[1:ncol(matindex)]
ph.list=c(ph.list,as.list(matindex))
}
ph.list=c("",ph.list)
out=lapply(ph.list,paste,collapse="")
return(out)
}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58789179781426e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615831291-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58789179781426e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
png("elograph.png", height=400, width=800);
# Load the winner csv file
mydata <- read.csv("c:\\python34\\winner_scores_2.csv");
# Pick the first column in vector form for input into histogram
mydata1 <- mydata[,1];
# Create the histogram
histdata1 <- hist(mydata1, xlim=c(0,1), breaks=c(seq(0,1,0.05)));
print("average");
mean(mydata1);
histvalues1 <- histdata1$breaks;
histcounts1 <- histdata1$counts;
headers <- vector(mode="numeric", length=0);
values <- vector(mode="numeric", length=0);
diffs <- vector(mode="numeric", length=0);
totals <- vector(mode="numeric", length=0);
for (i in 1:(length(histcounts1)/2))
{
numRight <- histcounts1[length(histcounts1) + 1 - i];
header = (histvalues1[length(histvalues1) - i] + histvalues1[length(histvalues1) - i + 1]) / 2;
total <- histcounts1[i] + numRight;
percentCorrect <- numRight / total;
headers <- append(headers, header);
values <- append(values, percentCorrect);
diffs <- append(diffs, (percentCorrect - header) * total);
totals <- append(totals, total);
}
datatable = rbind(headers, values, totals);
datatable;
#layout(rbind(c(1),c(2)), heights=c(1,1));
#layout(matrix(c(1,1), 2, 1, byrow=TRUE));
par(mfrow = c(1, 2))
df.bar <- barplot(values, names.arg=headers, ylim=c(0.4,1), xpd=FALSE, col=c("darkblue"), main="Actual Win Percentage per Bucket vs. Expected", beside=TRUE);
#legend("topright", legend=c("Old","New"), fill=c("darkred","darkgreen"));
lines(x = df.bar, y = headers);
points(x = df.bar, y = headers, col="black", bg="yellow", pch=21);
barplot(totals, names.arg=headers, ylim=c(0, 5000), col=c("darkblue"), main="Number of Matches per Bucket", beside=TRUE);
#legend("topleft", legend=c("Old","New"), fill=c("darkred","darkgreen"));
totalMatches = length(mydata)
totalMatches
sum(diffs) / totalMatches
# output histogram to disk and open it up
garbage <- dev.off();
browseURL("elograph.png");
| /r/elohistogram.R | no_license | KaiserKyle/Kayfabermetrics | R | false | false | 1,888 | r | png("elograph.png", height=400, width=800);
# Load the winner csv file
mydata <- read.csv("c:\\python34\\winner_scores_2.csv");
# Pick the first column in vector form for input into histogram
mydata1 <- mydata[,1];
# Create the histogram
histdata1 <- hist(mydata1, xlim=c(0,1), breaks=c(seq(0,1,0.05)));
print("average");
mean(mydata1);
histvalues1 <- histdata1$breaks;
histcounts1 <- histdata1$counts;
headers <- vector(mode="numeric", length=0);
values <- vector(mode="numeric", length=0);
diffs <- vector(mode="numeric", length=0);
totals <- vector(mode="numeric", length=0);
for (i in 1:(length(histcounts1)/2))
{
numRight <- histcounts1[length(histcounts1) + 1 - i];
header = (histvalues1[length(histvalues1) - i] + histvalues1[length(histvalues1) - i + 1]) / 2;
total <- histcounts1[i] + numRight;
percentCorrect <- numRight / total;
headers <- append(headers, header);
values <- append(values, percentCorrect);
diffs <- append(diffs, (percentCorrect - header) * total);
totals <- append(totals, total);
}
datatable = rbind(headers, values, totals);
datatable;
#layout(rbind(c(1),c(2)), heights=c(1,1));
#layout(matrix(c(1,1), 2, 1, byrow=TRUE));
par(mfrow = c(1, 2))
df.bar <- barplot(values, names.arg=headers, ylim=c(0.4,1), xpd=FALSE, col=c("darkblue"), main="Actual Win Percentage per Bucket vs. Expected", beside=TRUE);
#legend("topright", legend=c("Old","New"), fill=c("darkred","darkgreen"));
lines(x = df.bar, y = headers);
points(x = df.bar, y = headers, col="black", bg="yellow", pch=21);
barplot(totals, names.arg=headers, ylim=c(0, 5000), col=c("darkblue"), main="Number of Matches per Bucket", beside=TRUE);
#legend("topleft", legend=c("Old","New"), fill=c("darkred","darkgreen"));
totalMatches = length(mydata)
totalMatches
sum(diffs) / totalMatches
# output histogram to disk and open it up
garbage <- dev.off();
browseURL("elograph.png");
|
\name{affy.mgu74bv2}
\alias{affy.mgu74bv2}
\docType{data}
\title{Mouse(Affymetrix) :affy.mgu74bv2}
\description{
Information about accession ID and affy ID
}
\usage{data(affy.mgu74bv2)}
\examples{
data(affy.mgu74bv2)
}
\keyword{datasets}
| /man/affy.mgu74bv2.Rd | no_license | cran/BootCL | R | false | false | 255 | rd | \name{affy.mgu74bv2}
\alias{affy.mgu74bv2}
\docType{data}
\title{Mouse(Affymetrix) :affy.mgu74bv2}
\description{
Information about accession ID and affy ID
}
\usage{data(affy.mgu74bv2)}
\examples{
data(affy.mgu74bv2)
}
\keyword{datasets}
|
#################################################################
############# BAR GRAPH: Represent discrete categories of data
# length- represents the magnitude or frequencies of data
head(mtcars)
c <- table(mtcars$gear)
barplot(c, main="Car Distribution",
xlab="Number of Gears")
t=tapply(iris$Sepal.Length, iris$Species, mean)
barplot(t, main="Average Sepal Length",
xlab="Species",ylab="Mean")
library(ggplot2)
data(diamonds)
head(diamonds)
table(diamonds$color, diamonds$clarity)
barplot(table(diamonds$color, diamonds$clarity),
legend = levels(diamonds$color),
beside = TRUE)
barplot( table(diamonds$color, diamonds$clarity),
legend = levels(diamonds$color),
beside = TRUE,
xlab = "Diamond Clarity", # Add a label to the X-axis
ylab = "Diamond Count", # Add a label to the Y-axis
main = "Diamond Clarity, Grouped by Color", # Add a plot title
col = c("#FFFFFF","#F5FCC2","#E0ED87","#CCDE57", # Add color*
"#B3C732","#94A813","#718200") )
d=table(diamonds$color, diamonds$clarity)
######## USe GGPLOT for better graphs
# Very basic bar graph
qplot(factor(cyl), data=mtcars) #plot factor variables
#or
ggplot(mtcars, aes(x=factor(cyl))) + geom_bar()
qplot(color, data=diamonds, geom="bar") #specify bar
#stacked bars
head(diamonds)
ggplot(diamonds, aes(clarity, fill=cut)) + geom_bar(position="dodge")
ggplot(diamonds, aes(cut, fill=cut)) + geom_bar() +
facet_grid(. ~ clarity) #seperate panels on the basis of clarity
ship=as.data.frame(Titanic)
head(ship)
ggplot(aes(x=Age, weight=Freq), data=ship) +
geom_bar()
ggplot(aes(x=Age, weight=Freq), data=ship) +
geom_bar()+
facet_grid(Sex~Class)
## error bar: error or uncertainty in a reported measurement (mean)
##one standard deviation of uncertainty, one standard error
library(dplyr)
isum= iris %>% # the names of the new data frame and the data frame to be summarised
group_by(Species) %>% # the grouping variable
summarise(avg = mean(Petal.Length), # calculates the mean of each group
sdpl = sd(Petal.Length))
ggplot(isum, aes(Species, avg)) + geom_bar(stat="identity") + geom_errorbar(aes(ymin=avg-sdpl, ymax=avg+sdpl),width=0.2)
| /r/Lecture19_barplt1.r | permissive | praveentn/hgwxx7 | R | false | false | 2,396 | r | #################################################################
############# BAR GRAPH: Represent discrete categories of data
# length- represents the magnitude or frequencies of data
head(mtcars)
c <- table(mtcars$gear)
barplot(c, main="Car Distribution",
xlab="Number of Gears")
t=tapply(iris$Sepal.Length, iris$Species, mean)
barplot(t, main="Average Sepal Length",
xlab="Species",ylab="Mean")
library(ggplot2)
data(diamonds)
head(diamonds)
table(diamonds$color, diamonds$clarity)
barplot(table(diamonds$color, diamonds$clarity),
legend = levels(diamonds$color),
beside = TRUE)
barplot( table(diamonds$color, diamonds$clarity),
legend = levels(diamonds$color),
beside = TRUE,
xlab = "Diamond Clarity", # Add a label to the X-axis
ylab = "Diamond Count", # Add a label to the Y-axis
main = "Diamond Clarity, Grouped by Color", # Add a plot title
col = c("#FFFFFF","#F5FCC2","#E0ED87","#CCDE57", # Add color*
"#B3C732","#94A813","#718200") )
d=table(diamonds$color, diamonds$clarity)
######## USe GGPLOT for better graphs
# Very basic bar graph
qplot(factor(cyl), data=mtcars) #plot factor variables
#or
ggplot(mtcars, aes(x=factor(cyl))) + geom_bar()
qplot(color, data=diamonds, geom="bar") #specify bar
#stacked bars
head(diamonds)
ggplot(diamonds, aes(clarity, fill=cut)) + geom_bar(position="dodge")
ggplot(diamonds, aes(cut, fill=cut)) + geom_bar() +
facet_grid(. ~ clarity) #seperate panels on the basis of clarity
ship=as.data.frame(Titanic)
head(ship)
ggplot(aes(x=Age, weight=Freq), data=ship) +
geom_bar()
ggplot(aes(x=Age, weight=Freq), data=ship) +
geom_bar()+
facet_grid(Sex~Class)
## error bar: error or uncertainty in a reported measurement (mean)
##one standard deviation of uncertainty, one standard error
library(dplyr)
isum= iris %>% # the names of the new data frame and the data frame to be summarised
group_by(Species) %>% # the grouping variable
summarise(avg = mean(Petal.Length), # calculates the mean of each group
sdpl = sd(Petal.Length))
ggplot(isum, aes(Species, avg)) + geom_bar(stat="identity") + geom_errorbar(aes(ymin=avg-sdpl, ymax=avg+sdpl),width=0.2)
|
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------- 170612 - redirect STDOUT ---------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------------
m = system('ls /Users', intern = T)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------- 170430 - hclust() test ---------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------------
x = c(1,1.5,2,2.5,2.5,4,4,5,6,6,7)
y = c(1,2,1,3,4,1,4,5,4,6,1)
m = matrix(c(x,y), ncol=2)
rownames(m) = letters[1:11]
plot(m)
d = dist(m, method="euclidean")
fit <- hclust(d, method="ward.D")
plot(fit, main="ward.D")
fit <- hclust(d, method="ward.D2")
plot(fit, main="ward.D2")
fit <- hclust(d, method="single")
plot(fit, main="single")
fit <- hclust(d, method="complete")
plot(fit, main="complete")
fit <- hclust(d, method="average") # UPGMA
plot(fit, main="average")
fit <- hclust(d, method="mcquitty") # WPGMA
plot(fit, main="mcquitty")
fit <- hclust(d, method="median") # WPGMC
plot(fit, main="median")
fit <- hclust(d, method="centroid") # UPGMC
plot(fit, main="centroid")
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------- dgv sources stats ----------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------------
# load and edit file
setwd("/Users/Eugenio/Desktop/Lavoro/Seragnoli/dgv")
dgv_papers = read.csv("dgv_papers.csv", header=T, sep=",", dec=".", quote = "'", stringsAsFactors = F, check.names = F)
colnames(dgv_papers)[2] = "var"
# transform counts to proportions and barplot
dgv_papers$proportions = dgv_papers$var/sum(dgv_papers$var)
prop = dgv_papers$proportions
jpeg(filename="barplot_DGVvariant_study.jpeg", width=9, height=6, units="in", res=175, quality=100)
par(mar=c(5,10,0,1)) # set margins
barplot(prop, names.arg = dgv_papers$study, horiz=T, las=1, cex.names = 0.5, xlab="Proportion of DGV variants per study")
dev.off()
# proportion of first 7 sources together? ---> 0.987
sum(prop[1:7])/sum(prop)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------- PARALLEL PROGRAMMING ----------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------------
library(parallel)
cores = detectCores() -1 # detect number of total cores - 1
cluster = makeCluster(cores) # build cluster on "cores" number of cores
# syntax to call a function in parallel is:
# parLapply(cluster, levels, function())
stopCluster(cluster) # close cluster
prova <- function(exponent){
base = 3
base^exponent}
parLapply(cluster, 2:4, prova)
stopCluster(cl)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------ OTHER -----------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------------
# in case of error such as: "Error in plot.new() : figure margins too large"
# to check the current margins, usually "[1] 5.1 4.1 4.1 2.1"
par("mar")
# to change the margins as desired
# the number indicate margins in clockwise order, startin from lower margin
par(mar=c(3,3,2,1))
# binomial test
# null: probability of success is equal to "p"
# binom.test(number of successes, total number of trials, p)
binom.test(24,1000,p=0.015)
## calculate probability in poisson distribution
## p(x;λ) = (λ^x * e^-λ)/x!
# λ is the expected value (mean) and x is a specific observation
# ex: we expect 6 customers (this is λ) every 30 minutes (fixed amount of time); what is the probability (p) of
# having 3 customers (this is x) in 30 minutes?
# in R terms ---> ((λ^x) * (exp(1)^-λ)) / factorial(x)
# redirect STDIN to R from command line
basename(commandArgs()[6])
dirname(commandArgs()[6])
# df and matrices for trial
dat1 <- data.frame(var1 = rnorm(10), var2 = (1:10), var3 = gl(2, 5, labels = c("red", "blue")))
dat1[,1] = as.character(dat1[,1])
class(dat1[,1])
a = matrix(c(1, 2, 3, 4, 5, 6), nrow=3, ncol=2)
b = matrix(c(7, 8, 9, 10, 11, 12), nrow=3, ncol=2)
c = matrix(c(0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0), nrow=5, ncol=5)
m = matrix(runif(100),10,10)
image(c, breaks=1,2,3)
# counts how many zeroes and ones(or higher) are in each row of a matrix and summarize it in a data.frame
zeroesOnesGainAll = data.frame(matrix(rep(0, dim(gainAll)[1]*2), ncol=2))
colnames(zeroesOnesGainAll) = c("zeroes", "ones")
rownames(zeroesOnesGainAll) = rownames(gainAll)
for(i in 1:dim(gainAll)[1]){
trueFalse = table(as.numeric(gainAll[i,]) > 0)
zeroesOnesGainAll[i,1] = as.numeric(trueFalse)[1]
zeroesOnesGainAll[i,2] = as.numeric(trueFalse)[2]
}
# useful for trials when filtering out specific genes from gene lists
head(sort(colnames(countTable)[grep("[-]", colnames(countTable))]), n=50)
tail(sort(colnames(countTable)[grep("[.]IT", colnames(countTable))]), n=50)
length(sort(colnames(countTable)[grep("[-]", colnames(countTable))]))
table(grepl("AS7", sort(colnames(countTable)[grep("[.]AS", colnames(countTable))])))
false = which(grepl("AS", sort(colnames(countTable)[grep("[.]AS", colnames(countTable))])) == FALSE)
sort(colnames(countTable)[grep("AS", colnames(countTable))])[false]
# ?? anna tp53?
min(1-cumsum(dhyper(0:(3-1),40,19960,300)))
| /old_scripts/prova.R | no_license | efonzi/my_functions | R | false | false | 6,407 | r | #-------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------- 170612 - redirect STDOUT ---------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------------
m = system('ls /Users', intern = T)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------- 170430 - hclust() test ---------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------------
x = c(1,1.5,2,2.5,2.5,4,4,5,6,6,7)
y = c(1,2,1,3,4,1,4,5,4,6,1)
m = matrix(c(x,y), ncol=2)
rownames(m) = letters[1:11]
plot(m)
d = dist(m, method="euclidean")
fit <- hclust(d, method="ward.D")
plot(fit, main="ward.D")
fit <- hclust(d, method="ward.D2")
plot(fit, main="ward.D2")
fit <- hclust(d, method="single")
plot(fit, main="single")
fit <- hclust(d, method="complete")
plot(fit, main="complete")
fit <- hclust(d, method="average") # UPGMA
plot(fit, main="average")
fit <- hclust(d, method="mcquitty") # WPGMA
plot(fit, main="mcquitty")
fit <- hclust(d, method="median") # WPGMC
plot(fit, main="median")
fit <- hclust(d, method="centroid") # UPGMC
plot(fit, main="centroid")
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------- dgv sources stats ----------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------------
# load and edit file
setwd("/Users/Eugenio/Desktop/Lavoro/Seragnoli/dgv")
dgv_papers = read.csv("dgv_papers.csv", header=T, sep=",", dec=".", quote = "'", stringsAsFactors = F, check.names = F)
colnames(dgv_papers)[2] = "var"
# transform counts to proportions and barplot
dgv_papers$proportions = dgv_papers$var/sum(dgv_papers$var)
prop = dgv_papers$proportions
jpeg(filename="barplot_DGVvariant_study.jpeg", width=9, height=6, units="in", res=175, quality=100)
par(mar=c(5,10,0,1)) # set margins
barplot(prop, names.arg = dgv_papers$study, horiz=T, las=1, cex.names = 0.5, xlab="Proportion of DGV variants per study")
dev.off()
# proportion of first 7 sources together? ---> 0.987
sum(prop[1:7])/sum(prop)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------- PARALLEL PROGRAMMING ----------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------------
library(parallel)
cores = detectCores() -1 # detect number of total cores - 1
cluster = makeCluster(cores) # build cluster on "cores" number of cores
# syntax to call a function in parallel is:
# parLapply(cluster, levels, function())
stopCluster(cluster) # close cluster
prova <- function(exponent){
base = 3
base^exponent}
parLapply(cluster, 2:4, prova)
stopCluster(cl)
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------ OTHER -----------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------------
# in case of error such as: "Error in plot.new() : figure margins too large"
# to check the current margins, usually "[1] 5.1 4.1 4.1 2.1"
par("mar")
# to change the margins as desired
# the number indicate margins in clockwise order, startin from lower margin
par(mar=c(3,3,2,1))
# binomial test
# null: probability of success is equal to "p"
# binom.test(number of successes, total number of trials, p)
binom.test(24,1000,p=0.015)
## calculate probability in poisson distribution
## p(x;λ) = (λ^x * e^-λ)/x!
# λ is the expected value (mean) and x is a specific observation
# ex: we expect 6 customers (this is λ) every 30 minutes (fixed amount of time); what is the probability (p) of
# having 3 customers (this is x) in 30 minutes?
# in R terms ---> ((λ^x) * (exp(1)^-λ)) / factorial(x)
# redirect STDIN to R from command line
basename(commandArgs()[6])
dirname(commandArgs()[6])
# df and matrices for trial
dat1 <- data.frame(var1 = rnorm(10), var2 = (1:10), var3 = gl(2, 5, labels = c("red", "blue")))
dat1[,1] = as.character(dat1[,1])
class(dat1[,1])
a = matrix(c(1, 2, 3, 4, 5, 6), nrow=3, ncol=2)
b = matrix(c(7, 8, 9, 10, 11, 12), nrow=3, ncol=2)
c = matrix(c(0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0), nrow=5, ncol=5)
m = matrix(runif(100),10,10)
image(c, breaks=1,2,3)
# counts how many zeroes and ones(or higher) are in each row of a matrix and summarize it in a data.frame
zeroesOnesGainAll = data.frame(matrix(rep(0, dim(gainAll)[1]*2), ncol=2))
colnames(zeroesOnesGainAll) = c("zeroes", "ones")
rownames(zeroesOnesGainAll) = rownames(gainAll)
for(i in 1:dim(gainAll)[1]){
trueFalse = table(as.numeric(gainAll[i,]) > 0)
zeroesOnesGainAll[i,1] = as.numeric(trueFalse)[1]
zeroesOnesGainAll[i,2] = as.numeric(trueFalse)[2]
}
# useful for trials when filtering out specific genes from gene lists
head(sort(colnames(countTable)[grep("[-]", colnames(countTable))]), n=50)
tail(sort(colnames(countTable)[grep("[.]IT", colnames(countTable))]), n=50)
length(sort(colnames(countTable)[grep("[-]", colnames(countTable))]))
table(grepl("AS7", sort(colnames(countTable)[grep("[.]AS", colnames(countTable))])))
false = which(grepl("AS", sort(colnames(countTable)[grep("[.]AS", colnames(countTable))])) == FALSE)
sort(colnames(countTable)[grep("AS", colnames(countTable))])[false]
# ?? anna tp53?
min(1-cumsum(dhyper(0:(3-1),40,19960,300)))
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{example.spacemix.map.list}
\alias{example.spacemix.map.list}
\title{Example spacemix.map.list object}
\format{A list with 15 elements,
using \code{K} as the number of samples
\describe{
\item{MCMC.output}{
This is a list of the output of the SpaceMix analysis,
containing all the elements of the output .Robj file.}
\item{geographic.locations}{
This is a \code{K} x 2 matrix in which the ith row
gives the geographic coordinates (i.e., longitude and
latitude) of the ith sample.}
\item{name.vector}{
This is a character vector of length \code{K} in which each
element gives the name of the corresponding sample.}
\item{color.vector}{
This is a vector of colors of length \code{K}
in which each element gives the color in which
the corresponding sample should be plotted.}
\item{quantile}{
This value determines the size of the credible
interval calculated for model parameters.}
\item{best.iter}{
This is the index of the sampled MCMC iteration with the largest
posterior probability. We refer to parameter estimates in that iteration as
the maximum a posteriori (MAP) estimates.}
\item{admix.source.color.vector}{
This is a vector of faded colors (the same as given
in \code{color.vector}), for which the extent of fading is determined by the
admixture proportion. These colors, for which the opacity is proportional
to the estimated admixture proportion, are used in plotting the admixture
sources and admixture arrows.}
\item{k}{
This is the number of samples in the analysis.}
\item{MAPP.geogen.coords}{
This is the Procrustes-transformed MAP geogenetic location
coordinates.}
\item{MAPP.admix.source.coords}{
This is the Procrustes-transformed MAP admixture source
location coordinates.}
\item{procrustes.coord.posterior.lists}{
This is a list of the Procrustes-transformed
location parameter coordinates.}
\itemize{
\item geogen.coords.list A list of length N, where I is the number of sampled
MCMC iterations. The ith element of the list contains the Procrustes-
transformed geogenetic location coordinates in the ith sampled iteration
of the MCMC. As a whole, this list represents the posterior distribution
of geogenetic location parameters for all samples.
\item admix.source.coords.list A list of length N, where I is the number of sampled
MCMC iterations. The ith element of the list contains the Procrustes-
transformed admixture source location coordinates in the ith sampled iteration
of the MCMC. As a whole, this list represents the posterior distribution
of admixture source location parameters for all samples.
}
\item{pp.geogen.location.matrices}{
A list of length \code{K} in which the ith element is the Procrustes-
transformed posterior distribution of geogenetic location coordinates for the ith sample.}
\item{pp.admix.source.location.matrices}{
A list of length \code{K} in which the ith element is the Procrustes-
transformed posterior distribution of admixture source location coordinates for the ith sample.}
\item{pp.geogen.ellipses}{
A list of length \code{K} in which the ith element gives the boundaries of the
95\% credible ellipse of the Procrustes-transformed posterior distribution of geogenetic
location coordinates of the ith sample.}
\item{pp.admix.source.ellipses}{
A list of length \code{K} in which the ith element gives the boundaries of the
95\% credible ellipse of the Procrustes-transformed posterior distribution of admixture source
location coordinates of the ith sample.}
}}
\usage{
example.spacemix.map.list
}
\description{
Example list generated by \code{make.spacemix.map.list}
to be used in visualizing the output of a SpaceMix analysis
}
\keyword{datasets}
| /man/example.spacemix.map.list.Rd | no_license | HansonMenghan/SpaceMix | R | false | false | 3,861 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{example.spacemix.map.list}
\alias{example.spacemix.map.list}
\title{Example spacemix.map.list object}
\format{A list with 15 elements,
using \code{K} as the number of samples
\describe{
\item{MCMC.output}{
This is a list of the output of the SpaceMix analysis,
containing all the elements of the output .Robj file.}
\item{geographic.locations}{
This is a \code{K} x 2 matrix in which the ith row
gives the geographic coordinates (i.e., longitude and
latitude) of the ith sample.}
\item{name.vector}{
This is a character vector of length \code{K} in which each
element gives the name of the corresponding sample.}
\item{color.vector}{
This is a vector of colors of length \code{K}
in which each element gives the color in which
the corresponding sample should be plotted.}
\item{quantile}{
This value determines the size of the credible
interval calculated for model parameters.}
\item{best.iter}{
This is the index of the sampled MCMC iteration with the largest
posterior probability. We refer to parameter estimates in that iteration as
the maximum a posteriori (MAP) estimates.}
\item{admix.source.color.vector}{
This is a vector of faded colors (the same as given
in \code{color.vector}), for which the extent of fading is determined by the
admixture proportion. These colors, for which the opacity is proportional
to the estimated admixture proportion, are used in plotting the admixture
sources and admixture arrows.}
\item{k}{
This is the number of samples in the analysis.}
\item{MAPP.geogen.coords}{
This is the Procrustes-transformed MAP geogenetic location
coordinates.}
\item{MAPP.admix.source.coords}{
This is the Procrustes-transformed MAP admixture source
location coordinates.}
\item{procrustes.coord.posterior.lists}{
This is a list of the Procrustes-transformed
location parameter coordinates.}
\itemize{
\item geogen.coords.list A list of length N, where I is the number of sampled
MCMC iterations. The ith element of the list contains the Procrustes-
transformed geogenetic location coordinates in the ith sampled iteration
of the MCMC. As a whole, this list represents the posterior distribution
of geogenetic location parameters for all samples.
\item admix.source.coords.list A list of length N, where I is the number of sampled
MCMC iterations. The ith element of the list contains the Procrustes-
transformed admixture source location coordinates in the ith sampled iteration
of the MCMC. As a whole, this list represents the posterior distribution
of admixture source location parameters for all samples.
}
\item{pp.geogen.location.matrices}{
A list of length \code{K} in which the ith element is the Procrustes-
transformed posterior distribution of geogenetic location coordinates for the ith sample.}
\item{pp.admix.source.location.matrices}{
A list of length \code{K} in which the ith element is the Procrustes-
transformed posterior distribution of admixture source location coordinates for the ith sample.}
\item{pp.geogen.ellipses}{
A list of length \code{K} in which the ith element gives the boundaries of the
95\% credible ellipse of the Procrustes-transformed posterior distribution of geogenetic
location coordinates of the ith sample.}
\item{pp.admix.source.ellipses}{
A list of length \code{K} in which the ith element gives the boundaries of the
95\% credible ellipse of the Procrustes-transformed posterior distribution of admixture source
location coordinates of the ith sample.}
}}
\usage{
example.spacemix.map.list
}
\description{
Example list generated by \code{make.spacemix.map.list}
to be used in visualizing the output of a SpaceMix analysis
}
\keyword{datasets}
|
context("dummy_rows_returns right data set")
load(system.file("testdata", "fastDummies_data.rda",
package = "fastDummies"))
test_that("dummy_rows return expected data.frame", {
expect_equal(dummy_rows(no_dummies_needed),
no_dummies_needed)
expect_equal(dummy_rows(no_dummies_needed,
select_columns = "animals"),
no_dummies_needed)
expect_equal(dummy_rows(no_dummies_needed,
select_columns = "food"),
no_dummies_needed)
expect_equal(dummy_rows(no_dummies_needed,
select_columns = c("animals", "food")),
no_dummies_needed)
expect_equal(dummy_rows(no_dummies_needed, dummy_indicator = TRUE),
cbind(no_dummies_needed, dummy_indicator = rep(0, 4)))
expect_equal(dummy_rows(no_dummies_needed, dummy_indicator = TRUE,
select_columns = "animals"),
cbind(no_dummies_needed, dummy_indicator = rep(0, 4)))
expect_equal(dummy_rows(no_dummies_needed, dummy_indicator = TRUE,
select_columns = "food"),
cbind(no_dummies_needed, dummy_indicator = rep(0, 4)))
expect_equal(dummy_rows(no_dummies_needed, dummy_indicator = TRUE,
select_columns = c("animals", "food")),
cbind(no_dummies_needed, dummy_indicator = rep(0, 4)))
# fastDummies_example data - FULL
expect_equal(dummy_rows(fastDummies_example),
fastDummies_full)
expect_equal(dummy_rows(fastDummies_example,
select_columns = c("gender", "animals", "dates")),
fastDummies_full)
expect_equal(dummy_rows(fastDummies_example, dummy_indicator = TRUE),
cbind(fastDummies_full, dummy_indicator = c(0, 0, 0, 1,
1, 1, 1, 1)))
expect_equal(dummy_rows(fastDummies_example, dummy_indicator = TRUE,
select_columns = c("gender", "animals", "dates")),
cbind(fastDummies_full, dummy_indicator = c(0, 0, 0, 1,
1, 1, 1, 1)))
# fastDummies_example data - not full
expect_equal(dummy_rows(fastDummies_example, select_columns = "animals"),
fastDummies_example)
expect_equal(dummy_rows(fastDummies_example, select_columns = "gender"),
fastDummies_example)
expect_equal(dummy_rows(fastDummies_example, select_columns = "dates"),
fastDummies_example)
expect_equal(dummy_rows(fastDummies_example, select_columns = "animals",
dummy_indicator = TRUE),
cbind(fastDummies_example, dummy_indicator = rep(0, 3)))
expect_equal(dummy_rows(fastDummies_example, select_columns = "gender",
dummy_indicator = TRUE),
cbind(fastDummies_example, dummy_indicator = rep(0, 3)))
expect_equal(dummy_rows(fastDummies_example, select_columns = "dates",
dummy_indicator = TRUE),
cbind(fastDummies_example, dummy_indicator = rep(0, 3)))
# Crime dataset
expect_equal(dummy_rows(crime, select_columns = c("city", "year")),
crime_full)
expect_equal(dummy_rows(crime),
crime)
expect_equal(dummy_rows(crime, select_columns = c("year", "city")),
crime_full)
expect_equal(dummy_rows(crime, select_columns = "city"),
crime)
expect_equal(dummy_rows(crime, select_columns = "year"),
crime)
})
| /tests/testthat/test-rows-right-values.R | permissive | jacobkap/fastDummies | R | false | false | 3,615 | r | context("dummy_rows_returns right data set")
load(system.file("testdata", "fastDummies_data.rda",
package = "fastDummies"))
test_that("dummy_rows return expected data.frame", {
expect_equal(dummy_rows(no_dummies_needed),
no_dummies_needed)
expect_equal(dummy_rows(no_dummies_needed,
select_columns = "animals"),
no_dummies_needed)
expect_equal(dummy_rows(no_dummies_needed,
select_columns = "food"),
no_dummies_needed)
expect_equal(dummy_rows(no_dummies_needed,
select_columns = c("animals", "food")),
no_dummies_needed)
expect_equal(dummy_rows(no_dummies_needed, dummy_indicator = TRUE),
cbind(no_dummies_needed, dummy_indicator = rep(0, 4)))
expect_equal(dummy_rows(no_dummies_needed, dummy_indicator = TRUE,
select_columns = "animals"),
cbind(no_dummies_needed, dummy_indicator = rep(0, 4)))
expect_equal(dummy_rows(no_dummies_needed, dummy_indicator = TRUE,
select_columns = "food"),
cbind(no_dummies_needed, dummy_indicator = rep(0, 4)))
expect_equal(dummy_rows(no_dummies_needed, dummy_indicator = TRUE,
select_columns = c("animals", "food")),
cbind(no_dummies_needed, dummy_indicator = rep(0, 4)))
# fastDummies_example data - FULL
expect_equal(dummy_rows(fastDummies_example),
fastDummies_full)
expect_equal(dummy_rows(fastDummies_example,
select_columns = c("gender", "animals", "dates")),
fastDummies_full)
expect_equal(dummy_rows(fastDummies_example, dummy_indicator = TRUE),
cbind(fastDummies_full, dummy_indicator = c(0, 0, 0, 1,
1, 1, 1, 1)))
expect_equal(dummy_rows(fastDummies_example, dummy_indicator = TRUE,
select_columns = c("gender", "animals", "dates")),
cbind(fastDummies_full, dummy_indicator = c(0, 0, 0, 1,
1, 1, 1, 1)))
# fastDummies_example data - not full
expect_equal(dummy_rows(fastDummies_example, select_columns = "animals"),
fastDummies_example)
expect_equal(dummy_rows(fastDummies_example, select_columns = "gender"),
fastDummies_example)
expect_equal(dummy_rows(fastDummies_example, select_columns = "dates"),
fastDummies_example)
expect_equal(dummy_rows(fastDummies_example, select_columns = "animals",
dummy_indicator = TRUE),
cbind(fastDummies_example, dummy_indicator = rep(0, 3)))
expect_equal(dummy_rows(fastDummies_example, select_columns = "gender",
dummy_indicator = TRUE),
cbind(fastDummies_example, dummy_indicator = rep(0, 3)))
expect_equal(dummy_rows(fastDummies_example, select_columns = "dates",
dummy_indicator = TRUE),
cbind(fastDummies_example, dummy_indicator = rep(0, 3)))
# Crime dataset
expect_equal(dummy_rows(crime, select_columns = c("city", "year")),
crime_full)
expect_equal(dummy_rows(crime),
crime)
expect_equal(dummy_rows(crime, select_columns = c("year", "city")),
crime_full)
expect_equal(dummy_rows(crime, select_columns = "city"),
crime)
expect_equal(dummy_rows(crime, select_columns = "year"),
crime)
})
|
library(foreach)
library(doMC)
registerDoMC(10)
curDir <- ("~/Dropbox/sxoli/ptixiaki/evo3")
n<-25
foreach(i = 1:n) %dopar% {
dirName <- paste(curDir,"/selection.run.", i, sep="")
setwd(dirName)
mydata<-read.table("matrix.txt")
num_of_gens<-nrow(mydata)/500
generation<-matrix(nrow=500,ncol=1)
mean_power<-c()
#gia ka8e genia
for (l in 1:num_of_gens-1){
katw_orio1<-((l-1)*500)+1
panw_orio1<-l*500
#print(katw_orio)
#print(panw_orio)
generation<-mydata[katw_orio1:panw_orio1,]
#print(head(generation))
#gia ka8e gonotupo autis tis genias
mean_power[l]<-mean(as.matrix(generation))
}
#print(mean_power)
setwd("~/Dropbox/sxoli/ptixiaki/evo3")
png(paste("selection",sprintf("%04d", i),".png",sep=""))
plot(mean_power, type="l",main = "Power of Interaction",xlab="Generations",ylab="Power")
dev.off()
}
system("convert selection*.png selection_power_0.pdf")
system("rm selection*.png")
foreach(i = 1:n) %dopar% {
dirName <- paste(curDir,"/neutral.run.", i, sep="")
setwd(dirName)
mydata<-read.table("matrix.txt")
num_of_gens<-nrow(mydata)/500
generation<-matrix(nrow=500,ncol=1)
mean_power<-c()
#gia ka8e genia
for (l in 1:num_of_gens-1){
katw_orio1<-((l-1)*500)+1
panw_orio1<-l*500
#print(katw_orio)
#print(panw_orio)
generation<-mydata[katw_orio1:panw_orio1,]
#print(head(generation))
#gia ka8e gonotupo autis tis genias
mean_power[l]<-mean(as.matrix(generation))
}
#print(mean_power)
setwd("~/Dropbox/sxoli/ptixiaki/evo3")
png(paste("neutral",sprintf("%04d", i),".png",sep=""))
plot(mean_power, type="l",main = "Power of Interaction",xlab="Generations",ylab="Power")
dev.off()
}
system("convert neutral*.png neutral_power_0.pdf")
system("rm neutral*.png")
foreach(i = 1:n) %dopar% {
dirName <- paste(curDir,"/selection.run.", i, sep="")
setwd(dirName)
mydata<-read.table("matrix.txt")
num_of_gens<-nrow(mydata)/500
generation<-matrix(nrow=500,ncol=1)
mean_power<-c()
#gia ka8e genia
for (l in 1:num_of_gens-1){
katw_orio1<-((l-1)*500)+1
panw_orio1<-l*500
#print(katw_orio)
#print(panw_orio)
generation<-mydata[katw_orio1:panw_orio1,]
#print(head(generation))
new<-generation[generation!=0]
#gia ka8e gonotupo autis tis genias
mean_power[l]<-mean(as.matrix(new))
}
#print(mean_power)
setwd("~/Dropbox/sxoli/ptixiaki/evo3")
png(paste("selection",sprintf("%04d", i),".png",sep=""))
plot(mean_power, type="l",main = "Power of Interaction",xlab="Generations",ylab="Power")
dev.off()
}
system("convert selection*.png selection_power_NA.pdf")
system("rm selection*.png")
foreach(i = 1:n) %dopar% {
dirName <- paste(curDir,"/neutral.run.", i, sep="")
setwd(dirName)
mydata<-read.table("matrix.txt")
num_of_gens<-nrow(mydata)/500
generation<-matrix(nrow=500,ncol=1)
mean_power<-c()
#gia ka8e genia
for (l in 1:num_of_gens-1){
katw_orio1<-((l-1)*500)+1
panw_orio1<-l*500
#print(katw_orio)
#print(panw_orio)
generation<-mydata[katw_orio1:panw_orio1,]
#print(head(generation))
new<-generation[generation!=0]
#gia ka8e gonotupo autis tis genias
mean_power[l]<-mean(as.matrix(new))
}
#print(mean_power)
setwd("~/Dropbox/sxoli/ptixiaki/evo3")
png(paste("neutral",sprintf("%04d", i),".png",sep=""))
plot(mean_power, type="l",main = "Power of Interaction",xlab="Generations",ylab="Power")
dev.off()
}
system("convert neutral*.png neutral_power_NA.pdf")
system("rm neutral*.png") | /r_plots/power_interaction.R | no_license | antokioukis/evonet | R | false | false | 3,462 | r | library(foreach)
library(doMC)
registerDoMC(10)
curDir <- ("~/Dropbox/sxoli/ptixiaki/evo3")
n<-25
foreach(i = 1:n) %dopar% {
dirName <- paste(curDir,"/selection.run.", i, sep="")
setwd(dirName)
mydata<-read.table("matrix.txt")
num_of_gens<-nrow(mydata)/500
generation<-matrix(nrow=500,ncol=1)
mean_power<-c()
#gia ka8e genia
for (l in 1:num_of_gens-1){
katw_orio1<-((l-1)*500)+1
panw_orio1<-l*500
#print(katw_orio)
#print(panw_orio)
generation<-mydata[katw_orio1:panw_orio1,]
#print(head(generation))
#gia ka8e gonotupo autis tis genias
mean_power[l]<-mean(as.matrix(generation))
}
#print(mean_power)
setwd("~/Dropbox/sxoli/ptixiaki/evo3")
png(paste("selection",sprintf("%04d", i),".png",sep=""))
plot(mean_power, type="l",main = "Power of Interaction",xlab="Generations",ylab="Power")
dev.off()
}
system("convert selection*.png selection_power_0.pdf")
system("rm selection*.png")
foreach(i = 1:n) %dopar% {
dirName <- paste(curDir,"/neutral.run.", i, sep="")
setwd(dirName)
mydata<-read.table("matrix.txt")
num_of_gens<-nrow(mydata)/500
generation<-matrix(nrow=500,ncol=1)
mean_power<-c()
#gia ka8e genia
for (l in 1:num_of_gens-1){
katw_orio1<-((l-1)*500)+1
panw_orio1<-l*500
#print(katw_orio)
#print(panw_orio)
generation<-mydata[katw_orio1:panw_orio1,]
#print(head(generation))
#gia ka8e gonotupo autis tis genias
mean_power[l]<-mean(as.matrix(generation))
}
#print(mean_power)
setwd("~/Dropbox/sxoli/ptixiaki/evo3")
png(paste("neutral",sprintf("%04d", i),".png",sep=""))
plot(mean_power, type="l",main = "Power of Interaction",xlab="Generations",ylab="Power")
dev.off()
}
system("convert neutral*.png neutral_power_0.pdf")
system("rm neutral*.png")
foreach(i = 1:n) %dopar% {
dirName <- paste(curDir,"/selection.run.", i, sep="")
setwd(dirName)
mydata<-read.table("matrix.txt")
num_of_gens<-nrow(mydata)/500
generation<-matrix(nrow=500,ncol=1)
mean_power<-c()
#gia ka8e genia
for (l in 1:num_of_gens-1){
katw_orio1<-((l-1)*500)+1
panw_orio1<-l*500
#print(katw_orio)
#print(panw_orio)
generation<-mydata[katw_orio1:panw_orio1,]
#print(head(generation))
new<-generation[generation!=0]
#gia ka8e gonotupo autis tis genias
mean_power[l]<-mean(as.matrix(new))
}
#print(mean_power)
setwd("~/Dropbox/sxoli/ptixiaki/evo3")
png(paste("selection",sprintf("%04d", i),".png",sep=""))
plot(mean_power, type="l",main = "Power of Interaction",xlab="Generations",ylab="Power")
dev.off()
}
system("convert selection*.png selection_power_NA.pdf")
system("rm selection*.png")
foreach(i = 1:n) %dopar% {
dirName <- paste(curDir,"/neutral.run.", i, sep="")
setwd(dirName)
mydata<-read.table("matrix.txt")
num_of_gens<-nrow(mydata)/500
generation<-matrix(nrow=500,ncol=1)
mean_power<-c()
#gia ka8e genia
for (l in 1:num_of_gens-1){
katw_orio1<-((l-1)*500)+1
panw_orio1<-l*500
#print(katw_orio)
#print(panw_orio)
generation<-mydata[katw_orio1:panw_orio1,]
#print(head(generation))
new<-generation[generation!=0]
#gia ka8e gonotupo autis tis genias
mean_power[l]<-mean(as.matrix(new))
}
#print(mean_power)
setwd("~/Dropbox/sxoli/ptixiaki/evo3")
png(paste("neutral",sprintf("%04d", i),".png",sep=""))
plot(mean_power, type="l",main = "Power of Interaction",xlab="Generations",ylab="Power")
dev.off()
}
system("convert neutral*.png neutral_power_NA.pdf")
system("rm neutral*.png") |
# Lista de Exercícios - Gráficos
# Obs: Caso tenha problemas com a acentuação, consulte este link:
# https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
# Configurando o diretório de trabalho
# Coloque entre aspas o diretório de trabalho que você está usando no seu computador
# Não use diretórios com espaço no nome
setwd("C:/FCD/BigDataRAzure/Cap04")
getwd()
# Exercicio 1 - Crie uma função que receba os dois vetores abaixo como parâmetro,
# converta-os em um dataframe e imprima no console => OK!
vec1 <- (10:13)
vec2 <- c("a", "b", "c", "d")
myfunc <- function(x, y) {
df1 = data.frame(cbind(x, y))
print(df1)
}
myfunc(vec1, vec2)
# Exercicio 2 - Crie uma matriz com 4 linhas e 4 colunas preenchida com
# números inteiros e calcule a média de cada linha => OK!
mat1 <- matrix(1:16, nrow = 4, ncol = 4)
mat1
apply(mat1, 1, mean)
# Exercicio 3 - Considere o dataframe abaixo.
# Calcule a média por disciplina e depois calcule a média de apenas uma disciplina => OK
escola <- data.frame(Aluno = c('Alan', 'Alice', 'Alana', 'Aline', 'Alex', 'Ajay'),
Matematica = c(90, 80, 85, 87, 56, 79),
Geografia = c(100, 78, 86, 90, 98, 67),
Quimica = c(76, 56, 89, 90, 100, 87))
escola
?apply
media_disciplinas = apply(escola[, c(2,3,4)], 2, mean)
media_disciplinas
media_matematica <- mean(escola$Matematica)
media_matematica
# Solução usando apply
apply(escola[, c(2), drop = F], 2, mean)
# Exercicio 4 - Cria uma lista com 3 elementos, todos numéricos
# e calcule a soma de todos os elementos da lista => OK
?list
nova_lista <- list(156, 387, 298)
nova_lista
do.call(sum, nova_lista)
# Exercicio 5 - Transforme a lista anterior um vetor => OK
?as.vector
novo_vetor <- as.vector(nova_lista, mode = "integer")
novo_vetor
# Outra solução => unlist
unlist(nova_lista)
# Exercicio 6 - Considere a string abaixo. Substitua a palavra "textos" por "frases" => OK
str <- c("Expressoes", "regulares", "em linguagem R",
"permitem a busca de padroes", "e exploracao de textos",
"podemos buscar padroes em digitos",
"como por exemplo",
"10992451280")
str
gsub("textos", "frases", str)
# Exercicio 7 - Usando o dataset mtcars, crie um gráfico com ggplot do tipo
# scatter plot. Use as colunas disp e mpg nos eixos x e y respectivamente => OK
?plot
plot(mtcars$disp, mtcars$mpg, main = "Scatter mtcars",
xlab = "disp", ylab = "mpg")
# Usando ggplot2
library(ggplot2)
ggplot(data = mtcars, aes(x = disp, y = mpg)) + geom_point()
# Exercicio 8 - Considere a matriz abaixo.
# Crie um bar plot que represente os dados em barras individuais. => OK
mat1 <- matrix(c(652,1537,598,242,36,46,38,21,218,327,106,67), nrow = 3, byrow = T)
mat1
barplot(mat1, beside = T)
# Exercício 9 - Qual o erro do código abaixo? => OK
data(diamonds)
View(diamonds)
ggplot(data = diamonds, aes(x = price, group = fill, fill = cut)) +
geom_density(adjust = 1.5)
# Código correto - Erro código fill
ggplot(data = diamonds, aes(x = price, group = cut, fill = cut)) +
geom_density(adjust = 1.5)
# Exercício 10 - Qual o erro do código abaixo? => OK
ggplot(mtcars, aes(x = as.factor(cyl), fill = as.factor(cyl))) +
geom_barplot() +
labs(fill = "cyl")
# Código correto - Não existe geom_barplot
ggplot(mtcars, aes(x = as.factor(cyl), fill = as.factor(cyl))) +
geom_bar() +
labs(fill = "cyl")
| /Graficos.R | no_license | NelioMuniz/exerciciosRFundamentos | R | false | false | 3,450 | r | # Lista de Exercícios - Gráficos
# Obs: Caso tenha problemas com a acentuação, consulte este link:
# https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
# Configurando o diretório de trabalho
# Coloque entre aspas o diretório de trabalho que você está usando no seu computador
# Não use diretórios com espaço no nome
setwd("C:/FCD/BigDataRAzure/Cap04")
getwd()
# Exercicio 1 - Crie uma função que receba os dois vetores abaixo como parâmetro,
# converta-os em um dataframe e imprima no console => OK!
vec1 <- (10:13)
vec2 <- c("a", "b", "c", "d")
myfunc <- function(x, y) {
df1 = data.frame(cbind(x, y))
print(df1)
}
myfunc(vec1, vec2)
# Exercicio 2 - Crie uma matriz com 4 linhas e 4 colunas preenchida com
# números inteiros e calcule a média de cada linha => OK!
mat1 <- matrix(1:16, nrow = 4, ncol = 4)
mat1
apply(mat1, 1, mean)
# Exercicio 3 - Considere o dataframe abaixo.
# Calcule a média por disciplina e depois calcule a média de apenas uma disciplina => OK
escola <- data.frame(Aluno = c('Alan', 'Alice', 'Alana', 'Aline', 'Alex', 'Ajay'),
Matematica = c(90, 80, 85, 87, 56, 79),
Geografia = c(100, 78, 86, 90, 98, 67),
Quimica = c(76, 56, 89, 90, 100, 87))
escola
?apply
media_disciplinas = apply(escola[, c(2,3,4)], 2, mean)
media_disciplinas
media_matematica <- mean(escola$Matematica)
media_matematica
# Solução usando apply
apply(escola[, c(2), drop = F], 2, mean)
# Exercicio 4 - Cria uma lista com 3 elementos, todos numéricos
# e calcule a soma de todos os elementos da lista => OK
?list
nova_lista <- list(156, 387, 298)
nova_lista
do.call(sum, nova_lista)
# Exercicio 5 - Transforme a lista anterior um vetor => OK
?as.vector
novo_vetor <- as.vector(nova_lista, mode = "integer")
novo_vetor
# Outra solução => unlist
unlist(nova_lista)
# Exercicio 6 - Considere a string abaixo. Substitua a palavra "textos" por "frases" => OK
str <- c("Expressoes", "regulares", "em linguagem R",
"permitem a busca de padroes", "e exploracao de textos",
"podemos buscar padroes em digitos",
"como por exemplo",
"10992451280")
str
gsub("textos", "frases", str)
# Exercicio 7 - Usando o dataset mtcars, crie um gráfico com ggplot do tipo
# scatter plot. Use as colunas disp e mpg nos eixos x e y respectivamente => OK
?plot
plot(mtcars$disp, mtcars$mpg, main = "Scatter mtcars",
xlab = "disp", ylab = "mpg")
# Usando ggplot2
library(ggplot2)
ggplot(data = mtcars, aes(x = disp, y = mpg)) + geom_point()
# Exercicio 8 - Considere a matriz abaixo.
# Crie um bar plot que represente os dados em barras individuais. => OK
mat1 <- matrix(c(652,1537,598,242,36,46,38,21,218,327,106,67), nrow = 3, byrow = T)
mat1
barplot(mat1, beside = T)
# Exercício 9 - Qual o erro do código abaixo? => OK
data(diamonds)
View(diamonds)
ggplot(data = diamonds, aes(x = price, group = fill, fill = cut)) +
geom_density(adjust = 1.5)
# Código correto - Erro código fill
ggplot(data = diamonds, aes(x = price, group = cut, fill = cut)) +
geom_density(adjust = 1.5)
# Exercício 10 - Qual o erro do código abaixo? => OK
ggplot(mtcars, aes(x = as.factor(cyl), fill = as.factor(cyl))) +
geom_barplot() +
labs(fill = "cyl")
# Código correto - Não existe geom_barplot
ggplot(mtcars, aes(x = as.factor(cyl), fill = as.factor(cyl))) +
geom_bar() +
labs(fill = "cyl")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class_definitions.R, R/cover_methods.R
\docType{class}
\name{cover-class}
\alias{cover-class}
\alias{patch-class}
\alias{show,cover-method}
\alias{show,patch-method}
\title{An S4 class to represent a cover.}
\usage{
\S4method{show}{cover}(object)
\S4method{show}{patch}(object)
}
\arguments{
\item{object}{A cover}
}
\description{
An S4 class to represent a cover.
An S4 class to represent a patch.
}
\section{Slots}{
\describe{
\item{\code{data}}{A data.frame}
\item{\code{subsets}}{A list of patches}
\item{\code{internal_nodes}}{internal nodes}
\item{\code{external_nodes}}{external nodes}
\item{\code{data_filter_values}}{the filter value of the entire data set}
\item{\code{parameter}}{list of parameters}
\item{\code{type}}{type of cover}
\item{\code{id}}{Patch id}
\item{\code{indices}}{Indices of data points in patch}
\item{\code{predicted}}{Indices of predicted points}
\item{\code{anchor_points}}{Anchor points}
\item{\code{children}}{Indices of children}
\item{\code{filter_value}}{Filter value}
\item{\code{parent}}{Index of parent}
\item{\code{parent_filter}}{Filter value of parent}
}}
| /man/cover-class.Rd | no_license | blasern/dcph | R | false | true | 1,197 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class_definitions.R, R/cover_methods.R
\docType{class}
\name{cover-class}
\alias{cover-class}
\alias{patch-class}
\alias{show,cover-method}
\alias{show,patch-method}
\title{An S4 class to represent a cover.}
\usage{
\S4method{show}{cover}(object)
\S4method{show}{patch}(object)
}
\arguments{
\item{object}{A cover}
}
\description{
An S4 class to represent a cover.
An S4 class to represent a patch.
}
\section{Slots}{
\describe{
\item{\code{data}}{A data.frame}
\item{\code{subsets}}{A list of patches}
\item{\code{internal_nodes}}{internal nodes}
\item{\code{external_nodes}}{external nodes}
\item{\code{data_filter_values}}{the filter value of the entire data set}
\item{\code{parameter}}{list of parameters}
\item{\code{type}}{type of cover}
\item{\code{id}}{Patch id}
\item{\code{indices}}{Indices of data points in patch}
\item{\code{predicted}}{Indices of predicted points}
\item{\code{anchor_points}}{Anchor points}
\item{\code{children}}{Indices of children}
\item{\code{filter_value}}{Filter value}
\item{\code{parent}}{Index of parent}
\item{\code{parent_filter}}{Filter value of parent}
}}
|
#!/usr/bin/env rscript
library(lubridate)
source('data_loader.R')
d <- load_data()
png("plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
# First plot
plot(
x = d$d,
y = as.numeric(d[,"Global_active_power"]),
xlab="",
ylab="Global Active Power",
col="black",
type="l")
# Second plot
plot(
x = d$d,
y = as.numeric(d[,"Voltage"]),
xlab="datetime",
ylab="Voltage",
col="black",
type="l")
# Third plot
plot(
x = d$d,
y = as.numeric(d[,"Sub_metering_1"]),
xlab="",
ylab="Energy sub metering",
col="black",
type="l")
lines(
x = d$d,
y = as.numeric(d[,"Sub_metering_2"]),
col="red")
lines(
x = d$d,
y = as.numeric(d[,"Sub_metering_3"]),
col="blue")
legend(
"topright",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"),
lty=c(1,1,1),
bty="n",
cex=0.9
)
# Fourth plot
plot(
x = d$d,
y = as.numeric(d[,"Global_reactive_power"]),
xlab="datetime",
ylab="Global_reactive_power",
col="black",
type="l")
dev.off() | /plot4.R | no_license | learnerconstant/ExData_Plotting1 | R | false | false | 1,319 | r | #!/usr/bin/env rscript
library(lubridate)
source('data_loader.R')
d <- load_data()
png("plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
# First plot
plot(
x = d$d,
y = as.numeric(d[,"Global_active_power"]),
xlab="",
ylab="Global Active Power",
col="black",
type="l")
# Second plot
plot(
x = d$d,
y = as.numeric(d[,"Voltage"]),
xlab="datetime",
ylab="Voltage",
col="black",
type="l")
# Third plot
plot(
x = d$d,
y = as.numeric(d[,"Sub_metering_1"]),
xlab="",
ylab="Energy sub metering",
col="black",
type="l")
lines(
x = d$d,
y = as.numeric(d[,"Sub_metering_2"]),
col="red")
lines(
x = d$d,
y = as.numeric(d[,"Sub_metering_3"]),
col="blue")
legend(
"topright",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"),
lty=c(1,1,1),
bty="n",
cex=0.9
)
# Fourth plot
plot(
x = d$d,
y = as.numeric(d[,"Global_reactive_power"]),
xlab="datetime",
ylab="Global_reactive_power",
col="black",
type="l")
dev.off() |
/Code/createTable.R | no_license | mywanuo/PLPPS-pipeline | R | false | false | 530 | r | ||
require(httr)
require(jsonlite)
require(magrittr)
require(stringr)
#' A TessituraService class
#'
#' This is a more useable Tessitura Service class that can be implemented by
#' further objects to create requests.
#'
#' @docType class
#' @title TessituraService
#' @field tessituraUrl Base Tessitura REST API url
#' @field credentials Base64 encoded credentials - can be created with createCredentials()
#' @field defaultHeaders Default headers for all requests
#' @field timeout The maximum timeout for a request
#' @field maxRetryAttempts The maximum number of times to retry a request before failing
#' @export
#'
#' @examples
#' TessituraService$new(
#' tessituraUrl = "https://mytessi.tessituranetwork.com",
#' credentials = createCredentials(username="creif", usergroup="admin", location="MET95", password="impresario")
#' )
TessituraService <- R6::R6Class(
classname = "TessituraService",
public = list(
tessituraUrl = NULL,
credentials = NULL,
defaultHeaders = NULL,
timeout = NULL,
maxRetryAttempts = NULL,
userAgent = "TessituraUser/1.0.0/r",
#' @description Constructor
#'
#' @param tessituraUrl Base Tessitura REST API url
#' @param credentials Base64 encoded credentials
#' @param defaultHeaders Default headers for all requests
#' @param timeout The maximum timeout for a request. Defaults to 5000.
#' @param maxRetryAttempts The maximum number of times to retry a request before failing. Defaults to 3.
#' @param userAgent The user agent to use in requests
#'
initialize = function(tessituraUrl, credentials, defaultHeaders = NULL, userAgent = NULL, timeout = 5000, maxRetryAttempts = 3) {
if(!is.null(tessituraUrl)) {
self$tessituraUrl <- tessituraUrl
}
if(!is.null(credentials)) {
self$credentials <- credentials
}
if(!is.null(defaultHeaders)) {
self$defaultHeaders <- defaultHeaders
}
if(!is.null(userAgent)) {
self$userAgent <- userAgent
}
if (!is.null(timeout)) {
self$timeout <- timeout
}
if(!is.null(maxRetryAttempts)) {
self$maxRetryAttempts <- maxRetryAttempts
}
},
#'
#' @description Call a Tessitura endpoint with retries
#'
#' @param url The endpoint to request relative to the tessituraUrl
#' @param method The HTTP method
#' @param queryParams The query parameters for the request
#' @param headerParams Any additional headers to attach to the request
#' @param body The request body for POST and PUT methods
#' @param ... Any other parameters for the httr methods
#'
CallTessi = function(url, method, queryParams = list(), headerParams = c(), body = NULL, ...) {
retryStatusCodes <- c(500)
resp <- self$Execute(url, method, queryParams, headerParams, body, ...)
statusCode <- httr::status_code(resp)
if (is.null(self$maxRetryAttempts)) {
self$maxRetryAttempts = 3
}
for (i in 1 : self$maxRetryAttempts) {
if (statusCode %in% retryStatusCodes) {
Sys.sleep((2 ^ i) + stats::runif(n = 1, min = 0, max = 1))
resp <- self$Execute(url, method, queryParams, headerParams, body, ...)
statusCode <- httr::status_code(resp)
} else {
break
}
}
resp
},
#'
#' @description Execute a request
#'
#' @param url The endpoint to request relative to the tessituraUrl
#' @param method The HTTP method
#' @param queryParams The query parameters for the request
#' @param headerParams Any additional headers to attach to the request
#' @param body The request body for POST and PUT methods
#' @param ... Any other parameters for the httr methods
#'
Execute = function(url, method, queryParams, headerParams, body, ...) {
headers = httr::add_headers(
c(headerParams, self$defaultHeaders, Authorization = self$credentials)
)
path = paste0(self$tessituraUrl, url)
if (method == "GET") {
httr::GET(path, query = queryParams, headers, httr::timeout(self$timeout),
httr::user_agent(self$`userAgent`), ...)
} else if (method == "POST") {
httr::POST(path, query = queryParams, headers, body = body,
httr::content_type("application/json"), httr::timeout(self$timeout),
httr::user_agent(self$`userAgent`), ....)
} else if (method == "PUT") {
httr::PUT(path, query = queryParams, headers, body = body,
httr::content_type("application/json"), httr::timeout(self$timeout),
httr::user_agent(self$`userAgent`), ....)
} else if (method == "DELETE") {
httr::DELETE(path, query = queryParams, headers, httr::timeout(self$timeout),
httr::user_agent(self$`userAgent`), ...)
} else {
error <- "HTTP Method must be GET, POST, PUT or DELETE"
stop(error)
}
},
#' @description Deserialize the content of api response to the given type.
#'
#' @param resp The response object
#' @param returnType The type of object to return
#' @param pkgEnv The environment to find the type in
#'
deserialize = function(resp, returnType, pkgEnv) {
respObj <- jsonlite::fromJSON(httr::content(resp, "text", encoding = "UTF-8"))
self$deserializeObj(respObj, returnType, pkgEnv)
},
#' @description Deserialize the response from jsonlite object based on the given type
#' by handling complex and nested types by iterating recursively
#' Example returnTypes will be like "array[integer]", "map(Performance)", "array[map(Performance)]", etc.,
#'
#' @param obj The object to deserialize
#' @param returnType The type of object to return
#' @param pkgEnv The environment to find the type in
#'
deserializeObj = function(obj, returnType, pkgEnv) {
returnObj <- NULL
primitiveTypes <- c("character", "numeric", "integer", "logical", "complex")
if (startsWith(returnType, "map(")) {
innerReturnType <- regmatches(returnType, regexec(pattern = "map\\((.*)\\)", returnType))[[1]][2]
returnObj <- lapply(names(obj), function(name) {
self$deserializeObj(obj[[name]], innerReturnType, pkgEnv)
})
names(returnObj) <- names(obj)
}
else if (startsWith(returnType, "array[")) {
innerReturnType <- regmatches(returnType, regexec(pattern = "array\\[(.*)\\]", returnType))[[1]][2]
if (c(innerReturnType) %in% primitiveTypes) {
returnObj <- vector("list", length = length(obj))
if (length(obj) > 0) {
for (row in 1:length(obj)) {
returnObj[[row]] <- self$deserializeObj(obj[row], innerReturnType, pkgEnv)
}
}
} else {
if(!is.null(nrow(obj))){
returnObj <- vector("list", length = nrow(obj))
if (nrow(obj) > 0) {
for (row in 1:nrow(obj)) {
returnObj[[row]] <- self$deserializeObj(obj[row, , drop = FALSE], innerReturnType, pkgEnv)
}
}
}
}
}
else if (exists(returnType, pkgEnv) && !(c(returnType) %in% primitiveTypes)) {
returnType <- get(returnType, envir = as.environment(pkgEnv))
returnObj <- returnType$new()
returnObj$fromJSON(
jsonlite::toJSON(obj, digits = NA, auto_unbox = TRUE)
)
}
else {
returnObj <- obj
}
returnObj
},
#' @description Flatten and unwrap a response to a data frame
#'
#' @param response The HTTP response object
#' @returns A flattened result data frame
#'
flattenResponse = function(response) {
flatResult <- response %>%
httr::content("text") %>%
jsonlite::fromJSON(flatten=TRUE)
if(inherits(flatResult, "data.frame")){
return(flatResult)
} else {
return(
purrr::map_dfr(
unlist(flatResult),
magrittr::extract
)
)
}
}
)
)
#' Create a new Tessitura Service object
#' @param tessituraUrl The base url for your Tessitura REST API
#' @param credentials A base64 encded character string. Can be created with createCredentials()
#'
#' @return a TessituraService S4 object
#' @export
#'
createTessituraService = function(tessituraUrl, credentials) {
return(
TessituraService$new(tessituraUrl, credentials)
)
}
#' Send a Request to the Tessitura API - Deprecated
#'
#' @param host The host name for your Tessitura API
#' @param basePath The base path for your Tessitura API. eg., TessituraService
#' @param resource The resource to be request from the API. eg., Diagnostics/Status
#' @param credentials A base64 encoded character string. Can be created with createCredentials()
#' @param request_type An http verb. Currently on GET and POST are supported
#' @param data The data object for POST.
#' @param flatten If true, a data frame will be returned of the results. If false, the result object will be returned.
#'
#' @return A data frame or list of the result
#' @export
#'
#' @examples
#'host <- 'mytessi.tessituranetwork.com/'
#'basePath <- 'TessituraService'
#'resource <- '/Diagnostics/Status'
#'credentials <- 'mybase64credentials'
#'#' callTessi(host, basePath, resource, credentials)
callTessi <- function(host, basePath, resource, credentials, request_type = "GET", data, flatten=TRUE){
url <- paste0(host, basePath, resource)
result <- list()
if(request_type == "GET"){
result <- httr::GET(
url,
httr::add_headers(.headers = c('Authorization' = credentials))
)
} else if(request_type == "POST") {
if(typeof(data) != "list"){stop("Data must be a data frame")}
topLevelNames <- list()
for(column in data){
if(stringr::str_detect(names(data), ".")){
}
}
tryCatch({
result <- httr::POST(
url,
httr::add_headers(.headers = c('Authorization' = credentials)),
encode = "json",
body=data
)}, warning = function(war) {
print(paste("Warning: ", war))
}, error = function(err) {
print(paste("Error: ", err))
}
)
} else if(request_type %in% c("PUT", "DELETE")){
stop("This type of request is not yet supported.")
} else {
stop("There was a problem with your request type.")
}
if(httr::status_code(result) != 200){
stop(
writeLines(
paste0("Your request returned status ", httr::status_code(result), "\n",
"For more information, visit http://en.wikipedia.org/wiki/Http_error_codes\n",
"The message from the server was '", strtrim(httr::content(result), 100), "'"
)
)
)
}
if(flatten == TRUE){
flatResult <- result %>%
httr::content("text") %>%
jsonlite::fromJSON(flatten=TRUE)
if(inherits(flatResult, "data.frame")){
return(flatResult)
} else {
return(
purrr::map_dfr(
unlist(flatResult),
magrittr::extract
)
)
}
}
return(result)
}
#' @title Format a Tessitura Post Request
#'
#' @description Since Tessitura requires nested JSON requests in most routes, this function
#' will transform a data frame containing columns with names separated by a point
#' into a nested list or JSON string. Each column should be named like "Keyword.Category.Id"
#' as the function will nest at each subsequent . character.
#'
#' @param data A data frame to transform
#' @param returnJSON Return data in JSON or list format. Default is JSON.
#'
#' @return A nested JSON string for POST requests to the Tessitura API
#' @export
#'
#' @examples
#' data <- tribble(
#' ~Keyword.Description, ~Keyword.Id, ~Keyword.Category.Id, ~Constituent.Id, ~Id, ~Value,
#' "sample string 1", 2, 1, 1, 1, "sample string 3"
#' )
#' formattedRequest <- formatTessiPostRequest(data)
#'
formatTessiPostRequest <- function(data, returnJSON = TRUE) {
splitColumnNames <- strsplit(names(data), "[.]")
nestedData <- list()
# This should be updated to work recursively for each level
for(name in names(data)){
splitColumnName <- unlist(strsplit(name, "[.]"))
if(length(splitColumnName) > 1){
for(level in splitColumnName){
innerList <- list()
if((which(level == splitColumnName) == 1))
{
next
}
else
{
innerList[[level]] <- data[[name]]
nestedData[[splitColumnName[1]]] <- innerList
}
}
}
else
{
nestedData[[name]] <- data2[[name]]
}
}
if(returnJSON){
return(jsonlite::toJSON(nestedData, pretty=TRUE))
} else {
return(nestedData)
}
}
#' Create Tessitura API Credentials
#'
#' @param username Your Tessitura user name
#' @param usergroup Your Tessitura user group
#' @param location Your Tessitura machine location
#' @param password Your Tessitura password
#'
#' @return A base64 encoded character string for authorization
#' @export
#'
#' @examples
#' #' createCredentials(username="creif", usergroup="admin", location="MET95", password="impresario")
createCredentials <- function(username, usergroup, location, password) {
cred <- paste0(
"Basic ", jsonlite::base64_enc(
paste(username, usergroup, location, password, sep=":")
)
)
return(cred)
}
| /R/control.R | no_license | gdgkirkley/tessituraR | R | false | false | 13,417 | r | require(httr)
require(jsonlite)
require(magrittr)
require(stringr)
#' A TessituraService class
#'
#' This is a more useable Tessitura Service class that can be implemented by
#' further objects to create requests.
#'
#' @docType class
#' @title TessituraService
#' @field tessituraUrl Base Tessitura REST API url
#' @field credentials Base64 encoded credentials - can be created with createCredentials()
#' @field defaultHeaders Default headers for all requests
#' @field timeout The maximum timeout for a request
#' @field maxRetryAttempts The maximum number of times to retry a request before failing
#' @export
#'
#' @examples
#' TessituraService$new(
#' tessituraUrl = "https://mytessi.tessituranetwork.com",
#' credentials = createCredentials(username="creif", usergroup="admin", location="MET95", password="impresario")
#' )
TessituraService <- R6::R6Class(
classname = "TessituraService",
public = list(
tessituraUrl = NULL,
credentials = NULL,
defaultHeaders = NULL,
timeout = NULL,
maxRetryAttempts = NULL,
userAgent = "TessituraUser/1.0.0/r",
#' @description Constructor
#'
#' @param tessituraUrl Base Tessitura REST API url
#' @param credentials Base64 encoded credentials
#' @param defaultHeaders Default headers for all requests
#' @param timeout The maximum timeout for a request. Defaults to 5000.
#' @param maxRetryAttempts The maximum number of times to retry a request before failing. Defaults to 3.
#' @param userAgent The user agent to use in requests
#'
initialize = function(tessituraUrl, credentials, defaultHeaders = NULL, userAgent = NULL, timeout = 5000, maxRetryAttempts = 3) {
if(!is.null(tessituraUrl)) {
self$tessituraUrl <- tessituraUrl
}
if(!is.null(credentials)) {
self$credentials <- credentials
}
if(!is.null(defaultHeaders)) {
self$defaultHeaders <- defaultHeaders
}
if(!is.null(userAgent)) {
self$userAgent <- userAgent
}
if (!is.null(timeout)) {
self$timeout <- timeout
}
if(!is.null(maxRetryAttempts)) {
self$maxRetryAttempts <- maxRetryAttempts
}
},
#'
#' @description Call a Tessitura endpoint with retries
#'
#' @param url The endpoint to request relative to the tessituraUrl
#' @param method The HTTP method
#' @param queryParams The query parameters for the request
#' @param headerParams Any additional headers to attach to the request
#' @param body The request body for POST and PUT methods
#' @param ... Any other parameters for the httr methods
#'
CallTessi = function(url, method, queryParams = list(), headerParams = c(), body = NULL, ...) {
retryStatusCodes <- c(500)
resp <- self$Execute(url, method, queryParams, headerParams, body, ...)
statusCode <- httr::status_code(resp)
if (is.null(self$maxRetryAttempts)) {
self$maxRetryAttempts = 3
}
for (i in 1 : self$maxRetryAttempts) {
if (statusCode %in% retryStatusCodes) {
Sys.sleep((2 ^ i) + stats::runif(n = 1, min = 0, max = 1))
resp <- self$Execute(url, method, queryParams, headerParams, body, ...)
statusCode <- httr::status_code(resp)
} else {
break
}
}
resp
},
#'
#' @description Execute a request
#'
#' @param url The endpoint to request relative to the tessituraUrl
#' @param method The HTTP method
#' @param queryParams The query parameters for the request
#' @param headerParams Any additional headers to attach to the request
#' @param body The request body for POST and PUT methods
#' @param ... Any other parameters for the httr methods
#'
Execute = function(url, method, queryParams, headerParams, body, ...) {
headers = httr::add_headers(
c(headerParams, self$defaultHeaders, Authorization = self$credentials)
)
path = paste0(self$tessituraUrl, url)
if (method == "GET") {
httr::GET(path, query = queryParams, headers, httr::timeout(self$timeout),
httr::user_agent(self$`userAgent`), ...)
} else if (method == "POST") {
httr::POST(path, query = queryParams, headers, body = body,
httr::content_type("application/json"), httr::timeout(self$timeout),
httr::user_agent(self$`userAgent`), ....)
} else if (method == "PUT") {
httr::PUT(path, query = queryParams, headers, body = body,
httr::content_type("application/json"), httr::timeout(self$timeout),
httr::user_agent(self$`userAgent`), ....)
} else if (method == "DELETE") {
httr::DELETE(path, query = queryParams, headers, httr::timeout(self$timeout),
httr::user_agent(self$`userAgent`), ...)
} else {
error <- "HTTP Method must be GET, POST, PUT or DELETE"
stop(error)
}
},
#' @description Deserialize the content of api response to the given type.
#'
#' @param resp The response object
#' @param returnType The type of object to return
#' @param pkgEnv The environment to find the type in
#'
deserialize = function(resp, returnType, pkgEnv) {
respObj <- jsonlite::fromJSON(httr::content(resp, "text", encoding = "UTF-8"))
self$deserializeObj(respObj, returnType, pkgEnv)
},
#' @description Deserialize the response from jsonlite object based on the given type
#' by handling complex and nested types by iterating recursively
#' Example returnTypes will be like "array[integer]", "map(Performance)", "array[map(Performance)]", etc.,
#'
#' @param obj The object to deserialize
#' @param returnType The type of object to return
#' @param pkgEnv The environment to find the type in
#'
deserializeObj = function(obj, returnType, pkgEnv) {
returnObj <- NULL
primitiveTypes <- c("character", "numeric", "integer", "logical", "complex")
if (startsWith(returnType, "map(")) {
innerReturnType <- regmatches(returnType, regexec(pattern = "map\\((.*)\\)", returnType))[[1]][2]
returnObj <- lapply(names(obj), function(name) {
self$deserializeObj(obj[[name]], innerReturnType, pkgEnv)
})
names(returnObj) <- names(obj)
}
else if (startsWith(returnType, "array[")) {
innerReturnType <- regmatches(returnType, regexec(pattern = "array\\[(.*)\\]", returnType))[[1]][2]
if (c(innerReturnType) %in% primitiveTypes) {
returnObj <- vector("list", length = length(obj))
if (length(obj) > 0) {
for (row in 1:length(obj)) {
returnObj[[row]] <- self$deserializeObj(obj[row], innerReturnType, pkgEnv)
}
}
} else {
if(!is.null(nrow(obj))){
returnObj <- vector("list", length = nrow(obj))
if (nrow(obj) > 0) {
for (row in 1:nrow(obj)) {
returnObj[[row]] <- self$deserializeObj(obj[row, , drop = FALSE], innerReturnType, pkgEnv)
}
}
}
}
}
else if (exists(returnType, pkgEnv) && !(c(returnType) %in% primitiveTypes)) {
returnType <- get(returnType, envir = as.environment(pkgEnv))
returnObj <- returnType$new()
returnObj$fromJSON(
jsonlite::toJSON(obj, digits = NA, auto_unbox = TRUE)
)
}
else {
returnObj <- obj
}
returnObj
},
#' @description Flatten and unwrap a response to a data frame
#'
#' @param response The HTTP response object
#' @returns A flattened result data frame
#'
flattenResponse = function(response) {
flatResult <- response %>%
httr::content("text") %>%
jsonlite::fromJSON(flatten=TRUE)
if(inherits(flatResult, "data.frame")){
return(flatResult)
} else {
return(
purrr::map_dfr(
unlist(flatResult),
magrittr::extract
)
)
}
}
)
)
#' Create a new Tessitura Service object
#' @param tessituraUrl The base url for your Tessitura REST API
#' @param credentials A base64 encded character string. Can be created with createCredentials()
#'
#' @return a TessituraService S4 object
#' @export
#'
createTessituraService = function(tessituraUrl, credentials) {
return(
TessituraService$new(tessituraUrl, credentials)
)
}
#' Send a Request to the Tessitura API - Deprecated
#'
#' @param host The host name for your Tessitura API
#' @param basePath The base path for your Tessitura API. eg., TessituraService
#' @param resource The resource to be request from the API. eg., Diagnostics/Status
#' @param credentials A base64 encoded character string. Can be created with createCredentials()
#' @param request_type An http verb. Currently on GET and POST are supported
#' @param data The data object for POST.
#' @param flatten If true, a data frame will be returned of the results. If false, the result object will be returned.
#'
#' @return A data frame or list of the result
#' @export
#'
#' @examples
#'host <- 'mytessi.tessituranetwork.com/'
#'basePath <- 'TessituraService'
#'resource <- '/Diagnostics/Status'
#'credentials <- 'mybase64credentials'
#'#' callTessi(host, basePath, resource, credentials)
callTessi <- function(host, basePath, resource, credentials, request_type = "GET", data, flatten=TRUE){
url <- paste0(host, basePath, resource)
result <- list()
if(request_type == "GET"){
result <- httr::GET(
url,
httr::add_headers(.headers = c('Authorization' = credentials))
)
} else if(request_type == "POST") {
if(typeof(data) != "list"){stop("Data must be a data frame")}
topLevelNames <- list()
for(column in data){
if(stringr::str_detect(names(data), ".")){
}
}
tryCatch({
result <- httr::POST(
url,
httr::add_headers(.headers = c('Authorization' = credentials)),
encode = "json",
body=data
)}, warning = function(war) {
print(paste("Warning: ", war))
}, error = function(err) {
print(paste("Error: ", err))
}
)
} else if(request_type %in% c("PUT", "DELETE")){
stop("This type of request is not yet supported.")
} else {
stop("There was a problem with your request type.")
}
if(httr::status_code(result) != 200){
stop(
writeLines(
paste0("Your request returned status ", httr::status_code(result), "\n",
"For more information, visit http://en.wikipedia.org/wiki/Http_error_codes\n",
"The message from the server was '", strtrim(httr::content(result), 100), "'"
)
)
)
}
if(flatten == TRUE){
flatResult <- result %>%
httr::content("text") %>%
jsonlite::fromJSON(flatten=TRUE)
if(inherits(flatResult, "data.frame")){
return(flatResult)
} else {
return(
purrr::map_dfr(
unlist(flatResult),
magrittr::extract
)
)
}
}
return(result)
}
#' @title Format a Tessitura Post Request
#'
#' @description Since Tessitura requires nested JSON requests in most routes, this function
#' will transform a data frame containing columns with names separated by a point
#' into a nested list or JSON string. Each column should be named like "Keyword.Category.Id"
#' as the function will nest at each subsequent . character.
#'
#' @param data A data frame to transform
#' @param returnJSON Return data in JSON or list format. Default is JSON.
#'
#' @return A nested JSON string for POST requests to the Tessitura API
#' @export
#'
#' @examples
#' data <- tribble(
#' ~Keyword.Description, ~Keyword.Id, ~Keyword.Category.Id, ~Constituent.Id, ~Id, ~Value,
#' "sample string 1", 2, 1, 1, 1, "sample string 3"
#' )
#' formattedRequest <- formatTessiPostRequest(data)
#'
formatTessiPostRequest <- function(data, returnJSON = TRUE) {
splitColumnNames <- strsplit(names(data), "[.]")
nestedData <- list()
# This should be updated to work recursively for each level
for(name in names(data)){
splitColumnName <- unlist(strsplit(name, "[.]"))
if(length(splitColumnName) > 1){
for(level in splitColumnName){
innerList <- list()
if((which(level == splitColumnName) == 1))
{
next
}
else
{
innerList[[level]] <- data[[name]]
nestedData[[splitColumnName[1]]] <- innerList
}
}
}
else
{
nestedData[[name]] <- data2[[name]]
}
}
if(returnJSON){
return(jsonlite::toJSON(nestedData, pretty=TRUE))
} else {
return(nestedData)
}
}
#' Create Tessitura API Credentials
#'
#' @param username Your Tessitura user name
#' @param usergroup Your Tessitura user group
#' @param location Your Tessitura machine location
#' @param password Your Tessitura password
#'
#' @return A base64 encoded character string for authorization
#' @export
#'
#' @examples
#' #' createCredentials(username="creif", usergroup="admin", location="MET95", password="impresario")
createCredentials <- function(username, usergroup, location, password) {
cred <- paste0(
"Basic ", jsonlite::base64_enc(
paste(username, usergroup, location, password, sep=":")
)
)
return(cred)
}
|
\name{groupRatio}
%
\alias{groupRatio}
\alias{groupRatio-methods}
\alias{groupRatio,exonLoessModel-method}
%
\title{Calculates group-wise ratios of alignment depth (AD)}
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
% Description
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
\description{\code{groupRatio} takes a \code{bamRange} and returns a data.frame
(128 rows, number of columns=length of the longest sequence in range).
\code{groupRatio} counts occurrences for every sequence position (column) and
every phred value (row).
\code{getQualQuantiles} takes a \code{bamReader} and a vector of quantiles
(must be between 0 and 1) and returns a data.frame.
The data.frame contains one row for each quantile and also as many columns
as the maximum sequence length. \code{plotQualQuant} plots the values
for quanties 0.1,0.25,0.5,0.75 and 0.9.}
%
\usage{groupRatio(object, lim=1.2, cut=0, order=NULL, f=mean)}
\arguments{
\item{object}{exonLoessModel}
%
\item{lim}{numeric. Limit ratio. Must be > 1. The function returns
the fraction of genetic position where AD-ratio between groups is > lim
or the fraction of positions where AD-Ratio is < 1/lim
(i.e the larger ratio).}
%
\item{cut}{numeric. When >0 , the function uses \code{cutFlatAlignDepth}
for cutting out low alignment depth regions before calculating.
alignment depth ratio.}
%
\item{order}{numeric. When given, the function reorders the sample groups.
Can be used to provide ascending (or descending) group ordering, e.g.
group1 < group2 < group3.}
%
\item{f}{function. Function for calculation of group accumulates.
Defaults to \code{mean}. Alternatively \code{median} may also
be used.
}
%
}
\value{numeric}
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
% Details
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
\details{The size of the returned value (abs(groupRatio)) indicates on which
proportion of the genetic region, AD ratio between subsequent
groups exceeds the given limit. For lim=1.1, group1<group2<group3, a
returned value of 0.8 says that the AD ratios group2:group1 and group3:group2
are at least 1.1 (> 1) on 80 percent of the contained genomic positions.
Negative values say that the relation is group1>group2>group3. This
allows discrimination of up- and down-regulated genes.}
\author{Wolfgang Kaisers}
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
% Examples
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
\examples{
## - - - - - - - - - - - - - - - - - - - - - - ##
# Construct sampleBamFiles object
bam <- system.file("extdata", "accepted_hits.bam", package="rbamtools")
bs <- sampleBamFiles(1)
bamFiles(bs) <- bam
sampleLabels(bs) <- "s1"
sampleGroups(bs) <- "g1"
checkBamFiles(bs)
nAligns(bs) <- bamCountAll(bs)
bs
## - - - - - - - - - - - - - - - - - - - - - - ##
# Construct geneModel object
library(refGenome)
ucfile <- system.file("extdata", "hs.ucsc.small.RData", package="refGenome")
uc <- loadGenome(ucfile)
gt <- getGeneTable(uc)
gene_id <- as.character(gt$gene_id[1])
gm <- geneModel(uc, gene_id)
## - - - - - - - - - - - - - - - - - - - - - - ##
# Construct geneAlignDepth object
gad <- geneAlignDepth(bs, gm)
## - - - - - - - - - - - - - - - - - - - - - - ##
# Extract exonLoessModel object
ead <- exonAlignDepth(gad, ratioLim=5, infVal=1000)
elm <- exonLoessModel(ead)
celm <- cutFlatAlignDepth(elm, ratio=0.1)
groupRatio(celm, lim=1.2, cut=0, order=1)
}
\keyword{groupRatio}
\keyword{bamRange}
| /man/groupRatio.Rd | no_license | cran/rbamtools | R | false | false | 3,680 | rd | \name{groupRatio}
%
\alias{groupRatio}
\alias{groupRatio-methods}
\alias{groupRatio,exonLoessModel-method}
%
\title{Calculates group-wise ratios of alignment depth (AD)}
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
% Description
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
\description{\code{groupRatio} takes a \code{bamRange} and returns a data.frame
(128 rows, number of columns=length of the longest sequence in range).
\code{groupRatio} counts occurrences for every sequence position (column) and
every phred value (row).
\code{getQualQuantiles} takes a \code{bamReader} and a vector of quantiles
(must be between 0 and 1) and returns a data.frame.
The data.frame contains one row for each quantile and also as many columns
as the maximum sequence length. \code{plotQualQuant} plots the values
for quanties 0.1,0.25,0.5,0.75 and 0.9.}
%
\usage{groupRatio(object, lim=1.2, cut=0, order=NULL, f=mean)}
\arguments{
\item{object}{exonLoessModel}
%
\item{lim}{numeric. Limit ratio. Must be > 1. The function returns
the fraction of genetic position where AD-ratio between groups is > lim
or the fraction of positions where AD-Ratio is < 1/lim
(i.e the larger ratio).}
%
\item{cut}{numeric. When >0 , the function uses \code{cutFlatAlignDepth}
for cutting out low alignment depth regions before calculating.
alignment depth ratio.}
%
\item{order}{numeric. When given, the function reorders the sample groups.
Can be used to provide ascending (or descending) group ordering, e.g.
group1 < group2 < group3.}
%
\item{f}{function. Function for calculation of group accumulates.
Defaults to \code{mean}. Alternatively \code{median} may also
be used.
}
%
}
\value{numeric}
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
% Details
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
\details{The size of the returned value (abs(groupRatio)) indicates on which
proportion of the genetic region, AD ratio between subsequent
groups exceeds the given limit. For lim=1.1, group1<group2<group3, a
returned value of 0.8 says that the AD ratios group2:group1 and group3:group2
are at least 1.1 (> 1) on 80 percent of the contained genomic positions.
Negative values say that the relation is group1>group2>group3. This
allows discrimination of up- and down-regulated genes.}
\author{Wolfgang Kaisers}
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
% Examples
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - %
\examples{
## - - - - - - - - - - - - - - - - - - - - - - ##
# Construct sampleBamFiles object
bam <- system.file("extdata", "accepted_hits.bam", package="rbamtools")
bs <- sampleBamFiles(1)
bamFiles(bs) <- bam
sampleLabels(bs) <- "s1"
sampleGroups(bs) <- "g1"
checkBamFiles(bs)
nAligns(bs) <- bamCountAll(bs)
bs
## - - - - - - - - - - - - - - - - - - - - - - ##
# Construct geneModel object
library(refGenome)
ucfile <- system.file("extdata", "hs.ucsc.small.RData", package="refGenome")
uc <- loadGenome(ucfile)
gt <- getGeneTable(uc)
gene_id <- as.character(gt$gene_id[1])
gm <- geneModel(uc, gene_id)
## - - - - - - - - - - - - - - - - - - - - - - ##
# Construct geneAlignDepth object
gad <- geneAlignDepth(bs, gm)
## - - - - - - - - - - - - - - - - - - - - - - ##
# Extract exonLoessModel object
ead <- exonAlignDepth(gad, ratioLim=5, infVal=1000)
elm <- exonLoessModel(ead)
celm <- cutFlatAlignDepth(elm, ratio=0.1)
groupRatio(celm, lim=1.2, cut=0, order=1)
}
\keyword{groupRatio}
\keyword{bamRange}
|
library(table1)
### Name: render.varlabel
### Title: Render variable labels for table output.
### Aliases: render.varlabel
### Keywords: utilities
### ** Examples
x <- exp(rnorm(100, 1, 1))
label(x) <- "Weight"
units(x) <- "kg"
render.varlabel(x)
y <- factor(sample(0:1, 99, replace=TRUE), labels=c("Female", "Male"))
y[1:10] <- NA
label(y) <- "Sex"
render.varlabel(y)
| /data/genthat_extracted_code/table1/examples/render.varlabel.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 377 | r | library(table1)
### Name: render.varlabel
### Title: Render variable labels for table output.
### Aliases: render.varlabel
### Keywords: utilities
### ** Examples
x <- exp(rnorm(100, 1, 1))
label(x) <- "Weight"
units(x) <- "kg"
render.varlabel(x)
y <- factor(sample(0:1, 99, replace=TRUE), labels=c("Female", "Male"))
y[1:10] <- NA
label(y) <- "Sex"
render.varlabel(y)
|
library(tidyverse)
library(rjags)
library(ggmcmc)
library(devtools)
load_all("deconvolution.data")
extdir <- system.file("extdata", package="deconvolution.data")
fname <- file.path(extdir, "500kb_deconvolution.rds")
dat <- readRDS(fname) %>%
as_tibble() %>%
rename(tissue_source=sample) %>%
## I'm guessing here
mutate(bin=rep(seq_len(5008), 79))
##
## Most convenient format would be a matrix for each sample
## rows are bins, columns indicate tissue source
## - make for total coverage and short/long
##
id <- unique(dat$id)
J <- length(id)
rlist <- vector("list", length(id))
clist <- rlist
for(j in seq_len(J)){
short <- filter(dat, id==id[j]) %>%
select(tissue_source, bin, arm, short.cor) %>%
spread(tissue_source, short.cor)
shortm <- short %>%
select(-c(bin, arm)) %>%
as.matrix()
long <- filter(dat, id==id[j]) %>%
select(tissue_source, bin, arm, long.cor) %>%
spread(tissue_source, long.cor)
longm <- long %>%
select(-c(bin, arm)) %>%
as.matrix()
ratios <- (shortm/longm) %>%
as_tibble() %>%
mutate(bin=short$bin,
arm=short$arm)
total <- (shortm + longm) %>%
as_tibble() %>%
mutate(bin=short$bin,
arm=short$arm)
rlist[[j]] <- ratios
clist[[j]] <- total
}
names(rlist) <- names(clist) <- id
##
## First pass. Start with sample with high maf
##
mafs <- readRDS(file.path(extdir, "tumor_fractions.rds")) %>%
arrange(-ichor.tumor.fraction)
rlist <- rlist[mafs$id]
clist <- clist[mafs$id]
s1 <- clist[[1]]
dat <- list(y=s1$plasma,
wbc=log(s1$buffy),
tumor=log(s1$tumor),
normal=(s1$normal))
fit <- jags.model("mixmodel.jag",
data=dat,
n.chains=3)
samples <- coda.samples(fit, variable.names="theta", n.iter=1000) %>%
ggs()
| /code/mixmodel_jags.R | no_license | cancer-genomics/deconvolution | R | false | false | 1,887 | r | library(tidyverse)
library(rjags)
library(ggmcmc)
library(devtools)
load_all("deconvolution.data")
extdir <- system.file("extdata", package="deconvolution.data")
fname <- file.path(extdir, "500kb_deconvolution.rds")
dat <- readRDS(fname) %>%
as_tibble() %>%
rename(tissue_source=sample) %>%
## I'm guessing here
mutate(bin=rep(seq_len(5008), 79))
##
## Most convenient format would be a matrix for each sample
## rows are bins, columns indicate tissue source
## - make for total coverage and short/long
##
id <- unique(dat$id)
J <- length(id)
rlist <- vector("list", length(id))
clist <- rlist
for(j in seq_len(J)){
short <- filter(dat, id==id[j]) %>%
select(tissue_source, bin, arm, short.cor) %>%
spread(tissue_source, short.cor)
shortm <- short %>%
select(-c(bin, arm)) %>%
as.matrix()
long <- filter(dat, id==id[j]) %>%
select(tissue_source, bin, arm, long.cor) %>%
spread(tissue_source, long.cor)
longm <- long %>%
select(-c(bin, arm)) %>%
as.matrix()
ratios <- (shortm/longm) %>%
as_tibble() %>%
mutate(bin=short$bin,
arm=short$arm)
total <- (shortm + longm) %>%
as_tibble() %>%
mutate(bin=short$bin,
arm=short$arm)
rlist[[j]] <- ratios
clist[[j]] <- total
}
names(rlist) <- names(clist) <- id
##
## First pass. Start with sample with high maf
##
mafs <- readRDS(file.path(extdir, "tumor_fractions.rds")) %>%
arrange(-ichor.tumor.fraction)
rlist <- rlist[mafs$id]
clist <- clist[mafs$id]
s1 <- clist[[1]]
dat <- list(y=s1$plasma,
wbc=log(s1$buffy),
tumor=log(s1$tumor),
normal=(s1$normal))
fit <- jags.model("mixmodel.jag",
data=dat,
n.chains=3)
samples <- coda.samples(fit, variable.names="theta", n.iter=1000) %>%
ggs()
|
ntests <- 15
muList <- seq(from=3, to = 15, by=(15-3)/(ntests-1))/5
errR <- c()
for (i in (1:ntests)){
mu <- muList[i]
A <- .sparseDiagonal(x=1, n) + mu*L
for (k in (1:3)){
Xmu[k,] <- t(pcg(as.matrix(A),X[k,]))
errR <- c(errR,snr(X0,Xmu))
}
}
plot(errR, type="b", col="blue", xlab = "mu", ylab = "SNR") | /r/nt_solutions/meshproc_3_denoising/exo4.R | no_license | akn264/numerical-tours | R | false | false | 341 | r | ntests <- 15
muList <- seq(from=3, to = 15, by=(15-3)/(ntests-1))/5
errR <- c()
for (i in (1:ntests)){
mu <- muList[i]
A <- .sparseDiagonal(x=1, n) + mu*L
for (k in (1:3)){
Xmu[k,] <- t(pcg(as.matrix(A),X[k,]))
errR <- c(errR,snr(X0,Xmu))
}
}
plot(errR, type="b", col="blue", xlab = "mu", ylab = "SNR") |
##使用ライブラリ
library(pROC)
library(dplyr)
library(ggplot2)
library(gridExtra)
##データ読込
train<-read.csv("../motodata/train.csv", header=T)
test<-read.csv("../motodata/test.csv", header=T)
## Customerを分割. Old
train_old <- train %>%
dplyr::filter(pdays > -1)
test_old <- test %>%
dplyr::filter(pdays > -1)
## 前キャンペーンの日付求める
lct <- Sys.getlocale("LC_TIME"); Sys.setlocale("LC_TIME", "C")
train_old <- train_old %>%
mutate(pdate = as.Date(paste(day,month,"2017",sep=""),"%d%b%Y"))
## mutate(pdate = as.integer(as.Date(paste(day,month,"2017",sep=""),"%d%b%Y")))
test_old <- test_old %>%
mutate(pdate = as.Date(paste(day,month,"2017",sep=""),"%d%b%Y"))
## mutate(pdate = as.integer(as.Date(paste(day,month,"2017",sep=""),"%d%b%Y")))
Sys.setlocale("LC_TIME", lct)
str(train_old$pdate)
table(train_old$pdate, train_old$y)
table(train_old$pdate, train_old$poutcome)
table(train_old$pdate, train_old$job)
train_old %>%
dplyr::select(c('pdate','job')) %>%
dplyr::group_by(pdate,job) %>%
dplyr::summarise(count=n()) %>%
dplyr::ungroup(.) %>%
ggplot(., aes(x=pdate, y=count, colour=job)) + geom_line()
## 前キャンペーンにアプローチした数
g <- train_old %>%
dplyr::select(c('pdate','job','poutcome')) %>%
dplyr::group_by(pdate,job) %>%
dplyr::summarise(count=n()) %>%
dplyr::ungroup(.) %>%
glimpse
## 前キャンペーンでsuccessだった数
g <- train_old %>%
dplyr::filter(poutcome == 'success') %>%
dplyr::select(c('pdate','job','poutcome')) %>%
dplyr::group_by(pdate,job) %>%
dplyr::summarise(count=n()) %>%
dplyr::ungroup(.) %>%
glimpse
ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
geom_line()
ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
xlim(as.Date("2017-01-15"),as.Date("2017-02-20")) +
geom_line()
ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
xlim(as.Date("2017-04-10"),as.Date("2017-05-25")) +
geom_line()
ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
xlim(as.Date("2017-11-10"),as.Date("2017-12-01")) +
geom_line()
chart1 <- ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
geom_line() +
xlim(as.Date("2017-01-15"),as.Date("2017-02-20")) +
facet_grid(~job)
chart2 <- ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
geom_line() +
xlim(as.Date("2017-04-10"),as.Date("2017-05-25")) +
facet_grid(~job)
chart3 <- ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
geom_line() +
xlim(as.Date("2017-11-10"),as.Date("2017-12-01")) +
facet_grid(~job)
## まとめて1枚に出力
grid.arrange(chart1, chart2, chart3, ncol = 1)
## 顧客情報の観察
p <- ggplot(train, aes(x=age, y=balance))
p + geom_point(aes(colour=y))
g <- train %>%
dplyr::group_by(job, contact, housing) %>%
dplyr::summarise(m=mean(y), count=n()) %>%
dplyr::ungroup(.)
dplyr::arrange(desc(m)) %>%
df = as.data.frame(g)
| /pgm-r/PreviousCampagin.R | no_license | zoe3/bank | R | false | false | 3,099 | r | ##使用ライブラリ
library(pROC)
library(dplyr)
library(ggplot2)
library(gridExtra)
##データ読込
train<-read.csv("../motodata/train.csv", header=T)
test<-read.csv("../motodata/test.csv", header=T)
## Customerを分割. Old
train_old <- train %>%
dplyr::filter(pdays > -1)
test_old <- test %>%
dplyr::filter(pdays > -1)
## 前キャンペーンの日付求める
lct <- Sys.getlocale("LC_TIME"); Sys.setlocale("LC_TIME", "C")
train_old <- train_old %>%
mutate(pdate = as.Date(paste(day,month,"2017",sep=""),"%d%b%Y"))
## mutate(pdate = as.integer(as.Date(paste(day,month,"2017",sep=""),"%d%b%Y")))
test_old <- test_old %>%
mutate(pdate = as.Date(paste(day,month,"2017",sep=""),"%d%b%Y"))
## mutate(pdate = as.integer(as.Date(paste(day,month,"2017",sep=""),"%d%b%Y")))
Sys.setlocale("LC_TIME", lct)
str(train_old$pdate)
table(train_old$pdate, train_old$y)
table(train_old$pdate, train_old$poutcome)
table(train_old$pdate, train_old$job)
train_old %>%
dplyr::select(c('pdate','job')) %>%
dplyr::group_by(pdate,job) %>%
dplyr::summarise(count=n()) %>%
dplyr::ungroup(.) %>%
ggplot(., aes(x=pdate, y=count, colour=job)) + geom_line()
## 前キャンペーンにアプローチした数
g <- train_old %>%
dplyr::select(c('pdate','job','poutcome')) %>%
dplyr::group_by(pdate,job) %>%
dplyr::summarise(count=n()) %>%
dplyr::ungroup(.) %>%
glimpse
## 前キャンペーンでsuccessだった数
g <- train_old %>%
dplyr::filter(poutcome == 'success') %>%
dplyr::select(c('pdate','job','poutcome')) %>%
dplyr::group_by(pdate,job) %>%
dplyr::summarise(count=n()) %>%
dplyr::ungroup(.) %>%
glimpse
ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
geom_line()
ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
xlim(as.Date("2017-01-15"),as.Date("2017-02-20")) +
geom_line()
ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
xlim(as.Date("2017-04-10"),as.Date("2017-05-25")) +
geom_line()
ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
xlim(as.Date("2017-11-10"),as.Date("2017-12-01")) +
geom_line()
chart1 <- ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
geom_line() +
xlim(as.Date("2017-01-15"),as.Date("2017-02-20")) +
facet_grid(~job)
chart2 <- ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
geom_line() +
xlim(as.Date("2017-04-10"),as.Date("2017-05-25")) +
facet_grid(~job)
chart3 <- ggplot(data=g, aes(x=pdate, y=count, colour=job)) +
geom_line() +
xlim(as.Date("2017-11-10"),as.Date("2017-12-01")) +
facet_grid(~job)
## まとめて1枚に出力
grid.arrange(chart1, chart2, chart3, ncol = 1)
## 顧客情報の観察
p <- ggplot(train, aes(x=age, y=balance))
p + geom_point(aes(colour=y))
g <- train %>%
dplyr::group_by(job, contact, housing) %>%
dplyr::summarise(m=mean(y), count=n()) %>%
dplyr::ungroup(.)
dplyr::arrange(desc(m)) %>%
df = as.data.frame(g)
|
x <- 1:10
x[1:2] <- 3:4
.Last.value ## 3:4
x[1:2] <- 1
.Last.value ## 1
x[1:2] = 2
.Last.value ## 2
x[1:2] <- x[1:2] * 2
.Last.value ## 4:8
1:10 * 1:2
| /tmp-tests/pb-with-subset.R | no_license | privefl/inplace | R | false | false | 158 | r | x <- 1:10
x[1:2] <- 3:4
.Last.value ## 3:4
x[1:2] <- 1
.Last.value ## 1
x[1:2] = 2
.Last.value ## 2
x[1:2] <- x[1:2] * 2
.Last.value ## 4:8
1:10 * 1:2
|
######################
# Pre-processing 2014
######################
#install.packages("XLConnect")
options(java.parameters = "-Xmx7000m")
library(XLConnect)
basePath = file.path("D:/Monash Study/Winter Research/")
input = file.path(basePath, "source/Podes_Survey_2014trim.xlsx")
data.raw = readWorksheetFromFile(input,sheet = 1)
# Retrive class information for vaiable types
class=sapply(data.raw,class)
input = file.path(basePath, "source/Podes_Survey_2014.csv") #saved from original xlsx file with population added
data.raw= read.csv(input, header=TRUE, colClass=class,na.strings=c("NA", ""))
# Edit electricit and PLN variable
data.raw$electricity = 0
data.raw$electricity[which(data.raw$R501A1 !=0)] = "PLN"
data.raw$electricity[which(data.raw$R501A2 !=0 & data.raw$R501A1 ==0)] = "nonPLN"
data.raw$electricity[which(data.raw$R501A2 ==0 & data.raw$R501A1 ==0)] = "noE"
data.raw$PLN = 0
data.raw$PLN[which(data.raw$electricity == "PLN")] = 1
data.raw$PLN = as.factor(data.raw$PLN)
data.14 = data.raw
rm(data.raw)
#######################
# Read in 2011 and 2008
#######################
#11 data first#
input = file.path(basePath, "source/2011_trim.xlsx")
data.class = readWorksheetFromFile(input,sheet = 1)
# Retrive class information for vaiable types
class=sapply(data.class,class)
input = file.path(basePath, "source/podes_desa_2011.csv") #saved from original dataset file with villageID added
data.11 = read.csv(input, header=TRUE, colClass=class,na.strings=c("NA", ""))
#08 data second#
# Treatment done in excel to correct all connect2008.csv data type
input = file.path(basePath, "source/2008_trim.xlsx")
data.class = readWorksheetFromFile(input,sheet = 1)
class=sapply(data.class,class)
input = file.path(basePath, "source/PODES2008.csv") #saved from original dataset file with villageID added
data.08 = read.csv(input, header=TRUE, colClass=class,na.strings=c("NA", ""))
dim(data.11)
dim(data.08)
######################################################
# find out villages that changed PLN status from 11-14
######################################################
library(dplyr)
data.08=tbl_df(data.08)
data.11=tbl_df(data.11)
data.14=tbl_df(data.14)
# edit PLN variable for 11 survey data, no info provided for villages that have no power supply
data.08=mutate(data.08,PLN = ifelse(R501A==2,0,ifelse(R501B1 %in%c(0,NA),0,1))) #R501B1 asks for number of PLN-connected families
data.11=mutate(data.11,PLN = ifelse(R501A!=0,1,0))
nonPLN11= filter(data.11,PLN==0)
yesPLN14= filter(data.14,PLN==1)
nonPLN08 =filter(data.08,PLN==0)
yesPLN11 = filter(data.11,PLN==1)
data.join11_14 = inner_join(nonPLN11,yesPLN14,by=c("id2011"="id2013"))
data.join08_11 = inner_join(nonPLN08,yesPLN11,by=c("id2008"="id2011"))
village_id11 = select(data.join11_14, id2011)
village_id08 =select(data.join08_11,id2008)
# update data.11 and 08 to specify which villages were recently connected
data.11 =mutate(data.11, connect = ifelse((data.11$id2011 %in% village_id11$id2011),1,
ifelse((PLN ==1 ),"PLN",0)))
data.08 =mutate(data.08, connect = ifelse((data.08$id2008 %in% village_id08$id2008),1,
ifelse((PLN ==1 ),"PLN",0)))
# Add population
data.11$POP = data.11$R401A + data.11$R401B
data.08$POP = data.08$R401A + data.08$R401B
table(data.08$connect,useNA = "always")
table(data.11$connect,useNA = "always")
dim(data.11)
dim(data.08)
#rm(data.join11_14, data.join08_11, nonPLN11, yesPLN14, village_id11, village_id08, nonPLN08, yesPLN11)
na = sort(sapply(data.11,function(x){mean(is.na(x)==TRUE)}),decreasing = TRUE)
na = na[na!=0]
na = as.data.frame(na)
na = na[order(row.names(na)),,drop=FALSE]
write.csv(na, "na.csv")
#################
# Imputation 2011
#################
varName = sapply(data.11, is.numeric)
nominalVar=colnames(data.11)[!varName]
numericVar=colnames(data.11)[varName]
# All nominial variables with NAs equal to unique level "NA"
na.nom=sapply(data.11[,nominalVar],function(df){sum(is.na(df))})
na.nom=na.nom[na.nom!=0]
data.11[,nominalVar][is.na(data.11[,nominalVar])==TRUE]="NA"
# Recode all numerical NAs
na.num=sapply(data.11[,numericVar],function(df){sum(is.na(df))})
na.num=na.num[na.num!=0]
data.11[,numericVar][is.na(data.11[,numericVar])==TRUE]=0
# Village Head Age should not be 0 when NA
data.11$R1501AK3[data.11$R1501AK3==0] = NA
data.11$R1501BK3[data.11$R1501BK3==0] = NA
#################
# Imputation 2008
#################
varName = sapply(data.08, is.numeric)
nominalVar=colnames(data.08)[!varName]
numericVar=colnames(data.08)[varName]
# All nominial variables with NAs equal to unique level "NA"
na.nom=sapply(data.08[,nominalVar],function(df){sum(is.na(df))})
na.nom=na.nom[na.nom!=0]
data.08[,nominalVar][is.na(data.08[,nominalVar])==TRUE]="NA"
# Recode all numerical NAs
na.num=sapply(data.08[,numericVar],function(df){sum(is.na(df))})
na.num=na.num[na.num!=0]
data.08[,numericVar][is.na(data.08[,numericVar])==TRUE]=0
data.08$R1401A_3[data.08$R1401A_3==0] = NA
data.08$R1401B_3[data.08$R1401B_3==0] = NA
#################
# Imputation 2014
#################
varName = sapply(data.14, is.numeric)
nominalVar=colnames(data.14)[!varName]
numericVar=colnames(data.14)[varName]
# All nominial variables with NAs equal to unique level "NA"
na.nom=sapply(data.14[,nominalVar],function(df){sum(is.na(df))})
na.nom=na.nom[na.nom!=0]
data.14[,nominalVar][is.na(data.14[,nominalVar])==TRUE]="NA"
# Recode all numerical NAs
na.num=sapply(data.14[,numericVar],function(df){sum(is.na(df))})
na.num=na.num[na.num!=0]
data.14[,numericVar][is.na(data.14[,numericVar])==TRUE]=0
data.14$R1601A_K3[data.14$R1601A_K3==0] = NA
data.14$R1601B_K3[data.14$R1601B_K3==0] = NA
######################
# Save to connect file
######################
write.csv(data.11,"connect2011.csv",row.names=FALSE)
write.csv(data.08,"connect2008.csv",row.names=FALSE)
write.csv(data.14,"connect2014.csv",row.names=FALSE)
save(data.11,file = "data11.RData")
save(data.08,file = "data08.RData")
save(data.14,file = "data14.RData")
env = new.env()
load("data11.RData",env )
data.11 = env$data.11
env = new.env()
load("data08.RData",env )
data.08 = env$data.08
dim(data.11)
dim(data.08)
data.11=filter(data.11,data.11$connect != "PLN")
data.08=filter(data.08,data.08$connect != "PLN")
dim(data.11)
dim(data.08)
#####################################
# Information gain for 08 and 11 data
######################################
library(glmnet)
library(dplyr)
# Information gain for 11
library(FSelector)
info.connect_11 = information.gain(connect~., data.11)
info.connect_11 = info.connect_11[order(-info.connect_11$attr_importance),,drop=FALSE]
# info.gain for 08
info.connect_08 = information.gain(connect~., data.08)
info.connect_08 = info.connect_08[order(-info.connect_08$attr_importance),,drop=FALSE]
infogain08 = data.frame(rownames(info.connect_08),info.connect_08)
infogain11 = data.frame(rownames(info.connect_11),info.connect_11)
head(info.connect_08,10)
head(info.connect_11,10)
write.csv(infogain08,"infogain08.csv", row.names = FALSE)
write.csv(infogain11,"infogain11.csv", row.names = FALSE)
table(data.11$R1501AK3,useNA = 'always')
##########
#LASSO
##########
library(glmnet)
library(doMC)
registerDoMC(cores=2)
# Delete all unessary variables
data.11=data.11[,!names(data.11) %in%
c('X',"id2011","KODE_PROV", "NAMA_PROV", "KODE_KAB", "NAMA_KAB",
"KODE_KEC", "NAMA_KEC", "KODE_DESA", "NAMA_DESA", "R106",
"R301", "R302A" ,"R303B","NAMA_PULAU")]
#Problematic R1004A(B,C)K3, R1401A(B)K4, R1402A(B,C)K4(5),
#they have abnormal levels as factor variables
data.11=data.11[,!names(data.11) %in%
c('R1004AK3', 'R1004BK3', 'R1004CK3', 'R1401AK4',
'R1401B1K4','R1401B2K4','R1401B3K4','R1401B4K4','R1401B5K4','R1401B6K4',
'R1402A1K4','R1402A2K4','R1402A3K4','R1402A4K4','R1402A1K5','R1402A2K5',
'R1402A3K5','R1402A4K5','R1402B1K5','R1402B2K5','R1402B3K5','R1402C1K5',
'R1402C1K5','R1402C2K5','R1402C3K5')]
# R807(Majority ethnithity) contains three variables, delete two of them
data.11 = data.11[,!names(data.11) %in% c('R807','R807_2')]
# Delete all level-1 variables to avoid complete separation
level1 = sapply(data.11,function(x){length(unique(x))})
level1 = level1[level1==1]
level1
data.11=data.11[,!names(data.11) %in% c('R501A','PLN','R60108K4')]
# There should be no NAs except for R1501AK3 and R1501BK3 (age of village head and secretary)
# We delete these two variables as retaining the rows is more important than retaining these columns
data.11=data.11[,!names(data.11) %in% c('R1501AK3','R1501BK3')]
dim(data.11)
# check if there are NAs, not allowed for regression!
sapply(data.11,function(df){mean(is.na(df))})
# LASSO Model
options(warn=-1)
x.11 = sparse.model.matrix(connect~.,data.11)[,-1]
y.11 = data.11$connect
fit.11 = cv.glmnet(x.11,y.11,alpha = 1, type.measure = "class", family = "binomial", parallel=TRUE)
plot(fit.11)
print (fit.11)
library(AUC)
pred.11 = predict(fit.11, x.11, type='response')
auc(roc(pred.11, factor(y.11)))
plot(roc(pred.11, factor(y.11)), main = "ROC",col="red")
coef.11 = predict(fit.11, newx = x.11, s = "lambda.1se", type = "coefficients",exact = TRUE)
coef.11 = as.matrix(coef.11)
coef.11 = coef.11[coef.11!=0,,drop=FALSE]
write.csv(coef.11,"coef11.csv")
coef.11
length(coef.11)
| /1.PODES+Pre-processing.r | no_license | robertf99/PODES | R | false | false | 9,491 | r |
######################
# Pre-processing 2014
######################
#install.packages("XLConnect")
options(java.parameters = "-Xmx7000m")
library(XLConnect)
basePath = file.path("D:/Monash Study/Winter Research/")
input = file.path(basePath, "source/Podes_Survey_2014trim.xlsx")
data.raw = readWorksheetFromFile(input,sheet = 1)
# Retrive class information for vaiable types
class=sapply(data.raw,class)
input = file.path(basePath, "source/Podes_Survey_2014.csv") #saved from original xlsx file with population added
data.raw= read.csv(input, header=TRUE, colClass=class,na.strings=c("NA", ""))
# Edit electricit and PLN variable
data.raw$electricity = 0
data.raw$electricity[which(data.raw$R501A1 !=0)] = "PLN"
data.raw$electricity[which(data.raw$R501A2 !=0 & data.raw$R501A1 ==0)] = "nonPLN"
data.raw$electricity[which(data.raw$R501A2 ==0 & data.raw$R501A1 ==0)] = "noE"
data.raw$PLN = 0
data.raw$PLN[which(data.raw$electricity == "PLN")] = 1
data.raw$PLN = as.factor(data.raw$PLN)
data.14 = data.raw
rm(data.raw)
#######################
# Read in 2011 and 2008
#######################
#11 data first#
input = file.path(basePath, "source/2011_trim.xlsx")
data.class = readWorksheetFromFile(input,sheet = 1)
# Retrive class information for vaiable types
class=sapply(data.class,class)
input = file.path(basePath, "source/podes_desa_2011.csv") #saved from original dataset file with villageID added
data.11 = read.csv(input, header=TRUE, colClass=class,na.strings=c("NA", ""))
#08 data second#
# Treatment done in excel to correct all connect2008.csv data type
input = file.path(basePath, "source/2008_trim.xlsx")
data.class = readWorksheetFromFile(input,sheet = 1)
class=sapply(data.class,class)
input = file.path(basePath, "source/PODES2008.csv") #saved from original dataset file with villageID added
data.08 = read.csv(input, header=TRUE, colClass=class,na.strings=c("NA", ""))
dim(data.11)
dim(data.08)
######################################################
# find out villages that changed PLN status from 11-14
######################################################
library(dplyr)
data.08=tbl_df(data.08)
data.11=tbl_df(data.11)
data.14=tbl_df(data.14)
# edit PLN variable for 11 survey data, no info provided for villages that have no power supply
data.08=mutate(data.08,PLN = ifelse(R501A==2,0,ifelse(R501B1 %in%c(0,NA),0,1))) #R501B1 asks for number of PLN-connected families
data.11=mutate(data.11,PLN = ifelse(R501A!=0,1,0))
nonPLN11= filter(data.11,PLN==0)
yesPLN14= filter(data.14,PLN==1)
nonPLN08 =filter(data.08,PLN==0)
yesPLN11 = filter(data.11,PLN==1)
data.join11_14 = inner_join(nonPLN11,yesPLN14,by=c("id2011"="id2013"))
data.join08_11 = inner_join(nonPLN08,yesPLN11,by=c("id2008"="id2011"))
village_id11 = select(data.join11_14, id2011)
village_id08 =select(data.join08_11,id2008)
# update data.11 and 08 to specify which villages were recently connected
data.11 =mutate(data.11, connect = ifelse((data.11$id2011 %in% village_id11$id2011),1,
ifelse((PLN ==1 ),"PLN",0)))
data.08 =mutate(data.08, connect = ifelse((data.08$id2008 %in% village_id08$id2008),1,
ifelse((PLN ==1 ),"PLN",0)))
# Add population
data.11$POP = data.11$R401A + data.11$R401B
data.08$POP = data.08$R401A + data.08$R401B
table(data.08$connect,useNA = "always")
table(data.11$connect,useNA = "always")
dim(data.11)
dim(data.08)
#rm(data.join11_14, data.join08_11, nonPLN11, yesPLN14, village_id11, village_id08, nonPLN08, yesPLN11)
na = sort(sapply(data.11,function(x){mean(is.na(x)==TRUE)}),decreasing = TRUE)
na = na[na!=0]
na = as.data.frame(na)
na = na[order(row.names(na)),,drop=FALSE]
write.csv(na, "na.csv")
#################
# Imputation 2011
#################
varName = sapply(data.11, is.numeric)
nominalVar=colnames(data.11)[!varName]
numericVar=colnames(data.11)[varName]
# All nominial variables with NAs equal to unique level "NA"
na.nom=sapply(data.11[,nominalVar],function(df){sum(is.na(df))})
na.nom=na.nom[na.nom!=0]
data.11[,nominalVar][is.na(data.11[,nominalVar])==TRUE]="NA"
# Recode all numerical NAs
na.num=sapply(data.11[,numericVar],function(df){sum(is.na(df))})
na.num=na.num[na.num!=0]
data.11[,numericVar][is.na(data.11[,numericVar])==TRUE]=0
# Village Head Age should not be 0 when NA
data.11$R1501AK3[data.11$R1501AK3==0] = NA
data.11$R1501BK3[data.11$R1501BK3==0] = NA
#################
# Imputation 2008
#################
varName = sapply(data.08, is.numeric)
nominalVar=colnames(data.08)[!varName]
numericVar=colnames(data.08)[varName]
# All nominial variables with NAs equal to unique level "NA"
na.nom=sapply(data.08[,nominalVar],function(df){sum(is.na(df))})
na.nom=na.nom[na.nom!=0]
data.08[,nominalVar][is.na(data.08[,nominalVar])==TRUE]="NA"
# Recode all numerical NAs
na.num=sapply(data.08[,numericVar],function(df){sum(is.na(df))})
na.num=na.num[na.num!=0]
data.08[,numericVar][is.na(data.08[,numericVar])==TRUE]=0
data.08$R1401A_3[data.08$R1401A_3==0] = NA
data.08$R1401B_3[data.08$R1401B_3==0] = NA
#################
# Imputation 2014
#################
varName = sapply(data.14, is.numeric)
nominalVar=colnames(data.14)[!varName]
numericVar=colnames(data.14)[varName]
# All nominial variables with NAs equal to unique level "NA"
na.nom=sapply(data.14[,nominalVar],function(df){sum(is.na(df))})
na.nom=na.nom[na.nom!=0]
data.14[,nominalVar][is.na(data.14[,nominalVar])==TRUE]="NA"
# Recode all numerical NAs
na.num=sapply(data.14[,numericVar],function(df){sum(is.na(df))})
na.num=na.num[na.num!=0]
data.14[,numericVar][is.na(data.14[,numericVar])==TRUE]=0
data.14$R1601A_K3[data.14$R1601A_K3==0] = NA
data.14$R1601B_K3[data.14$R1601B_K3==0] = NA
######################
# Save to connect file
######################
write.csv(data.11,"connect2011.csv",row.names=FALSE)
write.csv(data.08,"connect2008.csv",row.names=FALSE)
write.csv(data.14,"connect2014.csv",row.names=FALSE)
save(data.11,file = "data11.RData")
save(data.08,file = "data08.RData")
save(data.14,file = "data14.RData")
env = new.env()
load("data11.RData",env )
data.11 = env$data.11
env = new.env()
load("data08.RData",env )
data.08 = env$data.08
dim(data.11)
dim(data.08)
data.11=filter(data.11,data.11$connect != "PLN")
data.08=filter(data.08,data.08$connect != "PLN")
dim(data.11)
dim(data.08)
#####################################
# Information gain for 08 and 11 data
######################################
library(glmnet)
library(dplyr)
# Information gain for 11
library(FSelector)
info.connect_11 = information.gain(connect~., data.11)
info.connect_11 = info.connect_11[order(-info.connect_11$attr_importance),,drop=FALSE]
# info.gain for 08
info.connect_08 = information.gain(connect~., data.08)
info.connect_08 = info.connect_08[order(-info.connect_08$attr_importance),,drop=FALSE]
infogain08 = data.frame(rownames(info.connect_08),info.connect_08)
infogain11 = data.frame(rownames(info.connect_11),info.connect_11)
head(info.connect_08,10)
head(info.connect_11,10)
write.csv(infogain08,"infogain08.csv", row.names = FALSE)
write.csv(infogain11,"infogain11.csv", row.names = FALSE)
table(data.11$R1501AK3,useNA = 'always')
##########
#LASSO
##########
library(glmnet)
library(doMC)
registerDoMC(cores=2)
# Delete all unessary variables
data.11=data.11[,!names(data.11) %in%
c('X',"id2011","KODE_PROV", "NAMA_PROV", "KODE_KAB", "NAMA_KAB",
"KODE_KEC", "NAMA_KEC", "KODE_DESA", "NAMA_DESA", "R106",
"R301", "R302A" ,"R303B","NAMA_PULAU")]
#Problematic R1004A(B,C)K3, R1401A(B)K4, R1402A(B,C)K4(5),
#they have abnormal levels as factor variables
data.11=data.11[,!names(data.11) %in%
c('R1004AK3', 'R1004BK3', 'R1004CK3', 'R1401AK4',
'R1401B1K4','R1401B2K4','R1401B3K4','R1401B4K4','R1401B5K4','R1401B6K4',
'R1402A1K4','R1402A2K4','R1402A3K4','R1402A4K4','R1402A1K5','R1402A2K5',
'R1402A3K5','R1402A4K5','R1402B1K5','R1402B2K5','R1402B3K5','R1402C1K5',
'R1402C1K5','R1402C2K5','R1402C3K5')]
# R807(Majority ethnithity) contains three variables, delete two of them
data.11 = data.11[,!names(data.11) %in% c('R807','R807_2')]
# Delete all level-1 variables to avoid complete separation
level1 = sapply(data.11,function(x){length(unique(x))})
level1 = level1[level1==1]
level1
data.11=data.11[,!names(data.11) %in% c('R501A','PLN','R60108K4')]
# There should be no NAs except for R1501AK3 and R1501BK3 (age of village head and secretary)
# We delete these two variables as retaining the rows is more important than retaining these columns
data.11=data.11[,!names(data.11) %in% c('R1501AK3','R1501BK3')]
dim(data.11)
# check if there are NAs, not allowed for regression!
sapply(data.11,function(df){mean(is.na(df))})
# LASSO Model
options(warn=-1)
x.11 = sparse.model.matrix(connect~.,data.11)[,-1]
y.11 = data.11$connect
fit.11 = cv.glmnet(x.11,y.11,alpha = 1, type.measure = "class", family = "binomial", parallel=TRUE)
plot(fit.11)
print (fit.11)
library(AUC)
pred.11 = predict(fit.11, x.11, type='response')
auc(roc(pred.11, factor(y.11)))
plot(roc(pred.11, factor(y.11)), main = "ROC",col="red")
coef.11 = predict(fit.11, newx = x.11, s = "lambda.1se", type = "coefficients",exact = TRUE)
coef.11 = as.matrix(coef.11)
coef.11 = coef.11[coef.11!=0,,drop=FALSE]
write.csv(coef.11,"coef11.csv")
coef.11
length(coef.11)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crm_fit.R
\name{summary.crm_fit}
\alias{summary.crm_fit}
\title{Obtain summary of an crm_fit}
\usage{
\method{summary}{crm_fit}(object, ...)
}
\arguments{
\item{object}{\code{\link{crm_fit}} object to summarise.}
\item{...}{Extra parameters, passed onwards.}
}
\value{
A summary object.
}
\description{
Obtain summary of an crm_fit
}
\seealso{
\code{\link{stan_crm}}
}
| /man/summary.crm_fit.Rd | no_license | brockk/trialr | R | false | true | 448 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crm_fit.R
\name{summary.crm_fit}
\alias{summary.crm_fit}
\title{Obtain summary of an crm_fit}
\usage{
\method{summary}{crm_fit}(object, ...)
}
\arguments{
\item{object}{\code{\link{crm_fit}} object to summarise.}
\item{...}{Extra parameters, passed onwards.}
}
\value{
A summary object.
}
\description{
Obtain summary of an crm_fit
}
\seealso{
\code{\link{stan_crm}}
}
|
### ガンマ分布に従う乱数の生成
set.seed(123) # 乱数の初期値を指定
rgamma(10, # ガンマ分布に従う乱数を10個発生
shape=3, rate=1)
## 統計的性質
nu <- 4
alpha <- 2
x <- rgamma(10^5, # ガンマ乱数を10000個発生
shape=nu, rate=alpha)
mean(x) # nu/alpha=2 に近い(大数の法則)
## データのヒストグラム(密度表示)
if(Sys.info()["sysname"]=="Darwin") { # MacOSの場合
par(family="HiraginoSans-W4")} # 日本語フォント
hist(x, freq=FALSE, breaks=50,
border="white", col="lightblue",
main=bquote(paste("ガンマ分布 ",
Gamma(.(nu),.(alpha)))))
curve(dgamma(x, shape=nu, rate=alpha), add=TRUE,
col="red", lwd=3) # 理論上の確率密度関数
legend("topright", inset=.05, # 凡例を作成
legend=c("観測値", "理論値"),
col=c("lightblue", "red"), lwd=3)
| /docs/code/sreg-gamma.r | no_license | noboru-murata/sda | R | false | false | 915 | r | ### ガンマ分布に従う乱数の生成
set.seed(123) # 乱数の初期値を指定
rgamma(10, # ガンマ分布に従う乱数を10個発生
shape=3, rate=1)
## 統計的性質
nu <- 4
alpha <- 2
x <- rgamma(10^5, # ガンマ乱数を10000個発生
shape=nu, rate=alpha)
mean(x) # nu/alpha=2 に近い(大数の法則)
## データのヒストグラム(密度表示)
if(Sys.info()["sysname"]=="Darwin") { # MacOSの場合
par(family="HiraginoSans-W4")} # 日本語フォント
hist(x, freq=FALSE, breaks=50,
border="white", col="lightblue",
main=bquote(paste("ガンマ分布 ",
Gamma(.(nu),.(alpha)))))
curve(dgamma(x, shape=nu, rate=alpha), add=TRUE,
col="red", lwd=3) # 理論上の確率密度関数
legend("topright", inset=.05, # 凡例を作成
legend=c("観測値", "理論値"),
col=c("lightblue", "red"), lwd=3)
|
#' GiveAction
#'
#' The act of transferring ownership of an object to a destination. Reciprocal of TakeAction.Related actions:* [[TakeAction]]: Reciprocal of GiveAction.* [[SendAction]]: Unlike SendAction, GiveAction implies that ownership is being transferred (e.g. I may send my laptop to you, but that doesn't mean I'm giving it to you).
#'
#'
#' @param id identifier for the object (URI)
#' @param target (EntryPoint type.) Indicates a target EntryPoint for an Action.
#' @param startTime (DateTime or DateTime type.) The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. e.g. John wrote a book from *January* to December.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.
#' @param result (Thing type.) The result produced in the action. e.g. John wrote *a book*.
#' @param participant (Person or Organization type.) Other co-agents that participated in the action indirectly. e.g. John wrote a book with *Steve*.
#' @param object (Thing type.) The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). e.g. John read *a book*.
#' @param location (Text or PostalAddress or Place or Text or PostalAddress or Place or Text or PostalAddress or Place type.) The location of for example where the event is happening, an organization is located, or where an action takes place.
#' @param instrument (Thing type.) The object that helped the agent perform the action. e.g. John wrote a book with *a pen*.
#' @param error (Thing type.) For failed actions, more information on the cause of the failure.
#' @param endTime (DateTime or DateTime type.) The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. e.g. John wrote a book from January to *December*.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.
#' @param agent (Person or Organization type.) The direct performer or driver of the action (animate or inanimate). e.g. *John* wrote a book.
#' @param actionStatus (ActionStatusType type.) Indicates the current disposition of the Action.
#' @param recipient (Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience type.) A sub property of participant. The participant who is at the receiving end of the action.
#' @param url (URL type.) URL of the item.
#' @param sameAs (URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
#' @param potentialAction (Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
#' @param name (Text type.) The name of the item.
#' @param mainEntityOfPage (URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
#' @param image (URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
#' @param identifier (URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
#' @param disambiguatingDescription (Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
#' @param description (Text type.) A description of the item.
#' @param alternateName (Text type.) An alias for the item.
#' @param additionalType (URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
#' @param toLocation (Place or Place or Place or Place type.) A sub property of location. The final location of the object or the agent after the action.
#' @param fromLocation (Place or Place or Place type.) A sub property of location. The original location of the object or the agent before the action.
#'
#' @return a list object corresponding to a schema:GiveAction
#'
#' @export
GiveAction <- function(id = NULL,
target = NULL,
startTime = NULL,
result = NULL,
participant = NULL,
object = NULL,
location = NULL,
instrument = NULL,
error = NULL,
endTime = NULL,
agent = NULL,
actionStatus = NULL,
recipient = NULL,
url = NULL,
sameAs = NULL,
potentialAction = NULL,
name = NULL,
mainEntityOfPage = NULL,
image = NULL,
identifier = NULL,
disambiguatingDescription = NULL,
description = NULL,
alternateName = NULL,
additionalType = NULL,
toLocation = NULL,
fromLocation = NULL){
Filter(Negate(is.null),
list(
type = "GiveAction",
id = id,
target = target,
startTime = startTime,
result = result,
participant = participant,
object = object,
location = location,
instrument = instrument,
error = error,
endTime = endTime,
agent = agent,
actionStatus = actionStatus,
recipient = recipient,
url = url,
sameAs = sameAs,
potentialAction = potentialAction,
name = name,
mainEntityOfPage = mainEntityOfPage,
image = image,
identifier = identifier,
disambiguatingDescription = disambiguatingDescription,
description = description,
alternateName = alternateName,
additionalType = additionalType,
toLocation = toLocation,
fromLocation = fromLocation))}
| /R/GiveAction.R | no_license | cboettig/schemar | R | false | false | 6,817 | r | #' GiveAction
#'
#' The act of transferring ownership of an object to a destination. Reciprocal of TakeAction.Related actions:* [[TakeAction]]: Reciprocal of GiveAction.* [[SendAction]]: Unlike SendAction, GiveAction implies that ownership is being transferred (e.g. I may send my laptop to you, but that doesn't mean I'm giving it to you).
#'
#'
#' @param id identifier for the object (URI)
#' @param target (EntryPoint type.) Indicates a target EntryPoint for an Action.
#' @param startTime (DateTime or DateTime type.) The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. e.g. John wrote a book from *January* to December.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.
#' @param result (Thing type.) The result produced in the action. e.g. John wrote *a book*.
#' @param participant (Person or Organization type.) Other co-agents that participated in the action indirectly. e.g. John wrote a book with *Steve*.
#' @param object (Thing type.) The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). e.g. John read *a book*.
#' @param location (Text or PostalAddress or Place or Text or PostalAddress or Place or Text or PostalAddress or Place type.) The location of for example where the event is happening, an organization is located, or where an action takes place.
#' @param instrument (Thing type.) The object that helped the agent perform the action. e.g. John wrote a book with *a pen*.
#' @param error (Thing type.) For failed actions, more information on the cause of the failure.
#' @param endTime (DateTime or DateTime type.) The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. e.g. John wrote a book from January to *December*.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.
#' @param agent (Person or Organization type.) The direct performer or driver of the action (animate or inanimate). e.g. *John* wrote a book.
#' @param actionStatus (ActionStatusType type.) Indicates the current disposition of the Action.
#' @param recipient (Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience or Person or Organization or ContactPoint or Audience type.) A sub property of participant. The participant who is at the receiving end of the action.
#' @param url (URL type.) URL of the item.
#' @param sameAs (URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.
#' @param potentialAction (Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.
#' @param name (Text type.) The name of the item.
#' @param mainEntityOfPage (URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.
#' @param image (URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].
#' @param identifier (URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.
#' @param disambiguatingDescription (Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.
#' @param description (Text type.) A description of the item.
#' @param alternateName (Text type.) An alias for the item.
#' @param additionalType (URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.
#' @param toLocation (Place or Place or Place or Place type.) A sub property of location. The final location of the object or the agent after the action.
#' @param fromLocation (Place or Place or Place type.) A sub property of location. The original location of the object or the agent before the action.
#'
#' @return a list object corresponding to a schema:GiveAction
#'
#' @export
GiveAction <- function(id = NULL,
target = NULL,
startTime = NULL,
result = NULL,
participant = NULL,
object = NULL,
location = NULL,
instrument = NULL,
error = NULL,
endTime = NULL,
agent = NULL,
actionStatus = NULL,
recipient = NULL,
url = NULL,
sameAs = NULL,
potentialAction = NULL,
name = NULL,
mainEntityOfPage = NULL,
image = NULL,
identifier = NULL,
disambiguatingDescription = NULL,
description = NULL,
alternateName = NULL,
additionalType = NULL,
toLocation = NULL,
fromLocation = NULL){
Filter(Negate(is.null),
list(
type = "GiveAction",
id = id,
target = target,
startTime = startTime,
result = result,
participant = participant,
object = object,
location = location,
instrument = instrument,
error = error,
endTime = endTime,
agent = agent,
actionStatus = actionStatus,
recipient = recipient,
url = url,
sameAs = sameAs,
potentialAction = potentialAction,
name = name,
mainEntityOfPage = mainEntityOfPage,
image = image,
identifier = identifier,
disambiguatingDescription = disambiguatingDescription,
description = description,
alternateName = alternateName,
additionalType = additionalType,
toLocation = toLocation,
fromLocation = fromLocation))}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/embed.R
\name{embed_data}
\alias{embed_data}
\title{embed a dataframe into a set of model specifications}
\usage{
embed_data(mspecs, data, ...)
}
\arguments{
\item{mspecs}{two-sided formula with outcome(s)
on the left hand side and exposure(s) on the
right hand side.}
\item{...}{key and value pairs specifying labels for variables in data.
For example, a variable called sbp may have the label of systolic blood
pressure. This could be set by writing sbp = 'systolic blood pressure'}
}
\description{
embed a dataframe into a set of model specifications
}
| /man/embed_data.Rd | permissive | bcjaeger/rpriori | R | false | true | 635 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/embed.R
\name{embed_data}
\alias{embed_data}
\title{embed a dataframe into a set of model specifications}
\usage{
embed_data(mspecs, data, ...)
}
\arguments{
\item{mspecs}{two-sided formula with outcome(s)
on the left hand side and exposure(s) on the
right hand side.}
\item{...}{key and value pairs specifying labels for variables in data.
For example, a variable called sbp may have the label of systolic blood
pressure. This could be set by writing sbp = 'systolic blood pressure'}
}
\description{
embed a dataframe into a set of model specifications
}
|
#_____________________________________________________________________________
# Introduction
#_____________________________________________________________________________
# Welcome to the LOBSTER R demo.
# http://www.lobsterdata.com
#___________________________________________________________________________
#The code provided below might help you get started with your LOBSTER data.
#The demo focuses on the two LOBSTER output files 'orderbook' and 'message'.
#You can find a detailed description of the LOBSTER data structure
#at http://LOBSTER.wiwi.hu-berlin.de
#Data used: AMZN - 2012-June-21 - 10 Levels
#_____________________________________________________________________________
#_____________________________________________________________________________
#
#Set up the Basics
#load the libraries, the package gplots and graphics need to be installed
#_____________________________________________________________________________
rm(list=ls(all=TRUE))
library(graphics)
source('lobster.R')
## set the working directory
# setwd('C:/path/to/your/data/directory')
# Note: The files must be in the same working directory as the LOBSTER_demo.r file.
ticker <- "AMZN" #TICKER
# DATE for which data is downloaded , the file name you downloaded contains this string , say if you downloaded from 1st july 2009 , type here 2009-07-01
demodate = "2012-06-21"
starttime <- 34200000
endtime <- 57600000
# Levels
lvl = 10;
# Load data
filenameBook <- paste(paste(ticker , demodate ,starttime,endtime,"orderbook" ,lvl ,sep = "_"),"csv",sep = ".")
filenameMess <- paste(paste(ticker , demodate ,starttime,endtime,"message" ,lvl ,sep = "_"),"csv",sep = ".")
mess <- lobster.readMessage(filenameMess);
book <- lobster.readOrderBook(filenameBook)
# Visualize order book
# convert prices into dollars
book[,seq(1,(4*lvl),by=2)] <- book[,seq(1,(4*lvl),by=2)]/10000;
idx <- which(mess[,1]<10.50*3600)
idx <- idx[length(idx)] # the index of the order book immediately before 10:30
obk <- new("OrderBook",ticker=ticker,timeStamp=mess[idx,1],bookData=book[idx,])
lobster.plot(obk)
| /demo.R | permissive | lobsterdata/r4lobster | R | false | false | 2,149 | r | #_____________________________________________________________________________
# Introduction
#_____________________________________________________________________________
# Welcome to the LOBSTER R demo.
# http://www.lobsterdata.com
#___________________________________________________________________________
#The code provided below might help you get started with your LOBSTER data.
#The demo focuses on the two LOBSTER output files 'orderbook' and 'message'.
#You can find a detailed description of the LOBSTER data structure
#at http://LOBSTER.wiwi.hu-berlin.de
#Data used: AMZN - 2012-June-21 - 10 Levels
#_____________________________________________________________________________
#_____________________________________________________________________________
#
#Set up the Basics
#load the libraries, the package gplots and graphics need to be installed
#_____________________________________________________________________________
rm(list=ls(all=TRUE))
library(graphics)
source('lobster.R')
## set the working directory
# setwd('C:/path/to/your/data/directory')
# Note: The files must be in the same working directory as the LOBSTER_demo.r file.
ticker <- "AMZN" #TICKER
# DATE for which data is downloaded , the file name you downloaded contains this string , say if you downloaded from 1st july 2009 , type here 2009-07-01
demodate = "2012-06-21"
starttime <- 34200000
endtime <- 57600000
# Levels
lvl = 10;
# Load data
filenameBook <- paste(paste(ticker , demodate ,starttime,endtime,"orderbook" ,lvl ,sep = "_"),"csv",sep = ".")
filenameMess <- paste(paste(ticker , demodate ,starttime,endtime,"message" ,lvl ,sep = "_"),"csv",sep = ".")
mess <- lobster.readMessage(filenameMess);
book <- lobster.readOrderBook(filenameBook)
# Visualize order book
# convert prices into dollars
book[,seq(1,(4*lvl),by=2)] <- book[,seq(1,(4*lvl),by=2)]/10000;
idx <- which(mess[,1]<10.50*3600)
idx <- idx[length(idx)] # the index of the order book immediately before 10:30
obk <- new("OrderBook",ticker=ticker,timeStamp=mess[idx,1],bookData=book[idx,])
lobster.plot(obk)
|
y.fit <-
function(bug,sims,ysim=NULL,pre.beg=FALSE){
if(is.null(colnames(sims)))
stop("columns of sims can not be NULL. names should correspond to parameters")
if(class(bug)!="tsbugs")
stop("bug must be a object of class tsbugs")
k<-nrow(sims)
y<-bug$data$y
n<-bug$info$n
beg<-bug$info$args$beg
temp<-theta.it(bug,sims)
phi<-temp$phi
max.phi<-ncol(phi)-1
ymean<-matrix(NA,k,n)
ylag<-matrix(0,k,max.phi)
for(t in beg:n){
ylag[]<-rep(y[(t-1):(t-max.phi)],each=k)
#missing y use ysim. asked about this on stack exchange
if(sum(is.na(ylag))>0){
if(is.null(ysim))
stop("missing y in data, need some ysim")
ysimlag<-ysim[(t-1):(t-max.phi)-beg+1]
ylag[is.na(ylag)]<-ysimlag[is.na(ylag)]
}
ymean[,t]<-phi[,1]+rowSums(phi[,-1]*(ylag-phi[,1]))
}
temp<-ymean
if(pre.beg==FALSE) temp<-ymean[,-(1:(beg-1))]
return(temp)
}
| /tsbridge/R/y.fit.R | no_license | ingted/R-Examples | R | false | false | 935 | r | y.fit <-
function(bug,sims,ysim=NULL,pre.beg=FALSE){
if(is.null(colnames(sims)))
stop("columns of sims can not be NULL. names should correspond to parameters")
if(class(bug)!="tsbugs")
stop("bug must be a object of class tsbugs")
k<-nrow(sims)
y<-bug$data$y
n<-bug$info$n
beg<-bug$info$args$beg
temp<-theta.it(bug,sims)
phi<-temp$phi
max.phi<-ncol(phi)-1
ymean<-matrix(NA,k,n)
ylag<-matrix(0,k,max.phi)
for(t in beg:n){
ylag[]<-rep(y[(t-1):(t-max.phi)],each=k)
#missing y use ysim. asked about this on stack exchange
if(sum(is.na(ylag))>0){
if(is.null(ysim))
stop("missing y in data, need some ysim")
ysimlag<-ysim[(t-1):(t-max.phi)-beg+1]
ylag[is.na(ylag)]<-ysimlag[is.na(ylag)]
}
ymean[,t]<-phi[,1]+rowSums(phi[,-1]*(ylag-phi[,1]))
}
temp<-ymean
if(pre.beg==FALSE) temp<-ymean[,-(1:(beg-1))]
return(temp)
}
|
######################################################################
### Luxor Strategy
######################################################################
library(quantmod)
Xt = getSymbols("^SOX",auto.assign = F)
chartSeries(Xt,TA=NULL, theme = "white")
addSMA(n = 5,col = 2)
addSMA(n = 60, col = 3)
Cl5SMA = SMA(Cl(Xt),n = 5)
Cl60SMA = SMA(Cl(Xt),n = 60)
addTA(Cl5SMA - Cl60SMA,col=4)
addTA(sign(Cl5SMA - Cl60SMA),col=5)
# re-draw with two SMAs
chartSeries(Cl5SMA, theme = "white")
addTA(Cl60SMA,col=5,on = 1)
addTA(Cl5SMA - Cl60SMA,col=6)
addTA(sign(Cl5SMA - Cl60SMA),col=7)
addTA(diff(sign(Cl5SMA - Cl60SMA)),col=8)
diff(sign(Cl5SMA - Cl60SMA)) > 0
which(diff(sign(Cl5SMA - Cl60SMA)) > 0 )
TDnDates <- index(Cl5SMA)[which(diff(sign(Cl5SMA - Cl60SMA)) > 0 )]
TUpDates <- index(Cl5SMA)[which(diff(sign(Cl5SMA - Cl60SMA)) < 0 )]
liftRatio = 0.02
addTA(Hi(Xt)[TDnDates]*(1+liftRatio),on=1,type="p",col=3,pch=25,bg="green")
addTA(Lo(Xt)[TUpDates]*(1-liftRatio),on=1,type="p",col=2,pch=24,bg="red")
######################################################################
### Crossover Events Detection
######################################################################
library(quantmod)
Xt = getSymbols("^TWII",auto.assign = F)
chartSeries(Xt,TA=NULL, theme = "white")
k=60
Xt_kMin = runMin(Xt, n = k, cumulative = FALSE)
Xt_kMax = runMax(Xt, n = k, cumulative = FALSE)
addTA(Xt_kMin,on=1,type="l",col=2)
addTA(Xt_kMax,on=1,type="l",col=4)
# re-draw with close and channel bottom
chartSeries(Cl(Xt), theme = "white")
addTA(Xt_kMin,on=1,type="l",col=2)
addTA(Cl(Xt) - Xt_kMin,type="l",col=3)
addTA(sign(Cl(Xt) - Xt_kMin),type="l",col=4)
addTA(diff(sign(Cl(Xt) - Xt_kMin)),type="l",col=5)
ClCrossMinGtDates <- index(Xt)[which(diff(sign(Cl(Xt) - Xt_kMin)) > 0 )]
ClCrossMinLtDates <- index(Xt)[which(diff(sign(Cl(Xt) - Xt_kMin)) < 0 )]
liftRatio = 0.02
chartSeries(Cl(Xt), theme = "white")
addTA(Xt_kMin,on=1,type="l",col=2)
addTA(Cl(Xt)[ClCrossMinLtDates]*(1+liftRatio),on=1,type="p",col=3,pch=25,bg="green")
addTA(Cl(Xt)[ClCrossMinGtDates]*(1-liftRatio),on=1,type="p",col=2,pch=24,bg="red")
# re-draw with close and channel bottom
chartSeries(Cl(Xt), theme = "white")
addTA(Xt_kMax,on=1,type="l",col=2)
addTA(Cl(Xt) - Xt_kMax,type="l",col=3)
addTA(sign(Cl(Xt) - Xt_kMax),type="l",col=4)
addTA(diff(sign(Cl(Xt) - Xt_kMax)),type="l",col=5)
ClCrossMaxGtDates <- index(Xt)[which(diff(sign(Cl(Xt) - Xt_kMax)) > 0 )]
ClCrossMaxLtDates <- index(Xt)[which(diff(sign(Cl(Xt) - Xt_kMax)) < 0 )]
liftRatio = 0.02
chartSeries(Cl(Xt), theme = "white")
addTA(Xt_kMax,on=1,type="l",col=2)
addTA(Cl(Xt)[ClCrossMaxLtDates]*(1+liftRatio),on=1,type="p",col=3,pch=25,bg="green")
addTA(Cl(Xt)[ClCrossMaxGtDates]*(1-liftRatio),on=1,type="p",col=2,pch=24,bg="red")
# Summary
chartSeries(Cl(Xt), theme = "white")
addTA(Xt_kMin,on=1,type="l",col=2)
addTA(Xt_kMax,on=1,type="l",col=4)
addTA(Hi(Xt)[ClCrossMaxLtDates]*(1+liftRatio),on=1,type="p",col=3,pch=25,bg="green")
addTA(Cl(Xt)[ClCrossMinGtDates]*(1-liftRatio),on=1,type="p",col=2,pch=24,bg="red")
######################################################################
### Volatility Thresholds Breaking
######################################################################
library(quantmod)
Xt = getSymbols("^TWII",auto.assign = F)
chartSeries(Xt,TA=NULL, theme = "white")
k=30
Xt_kMin = runMin(Xt, n = k, cumulative = FALSE)
Xt_kMax = runMax(Xt, n = k, cumulative = FALSE)
addTA(Xt_kMin,on=1,type="l",col=2)
addTA(Xt_kMax,on=1,type="l",col=4)
volatility = (Xt_kMax - Xt_kMin)/Xt_kMin
addTA( volatility ,type="l",col=4)
hist(volatility,breaks =100)
addTA( diff(volatility) ,type="l",col=5)
######################################################################
### Volatility Thresholds Breaking
######################################################################
library(quantmod)
Xt = getSymbols("^TWII",auto.assign = F)
chartSeries(Xt,TA="addBBands(n=30)", theme = "white")
addTA(Xt_kMin,on=1,type="l",col=9)
addTA(Xt_kMax,on=1,type="l",col=9)
bb = BBands(HLC(Xt),n=30)
tail(bb)
BBvolatility = (bb$up - bb$dn)/bb$dn
addTA( BBvolatility ,type="l",col=4)
# hist(BBvolatility,breaks = 200)
addTA( volatility ,type="l",col=5)
addTA( diff(BBvolatility) ,type="l",col=6)
addTA( diff(volatility) ,type="l",col=7)
# Clean up
chartSeries(Xt,TA="addBBands(n=60)", theme = "white")
bb = BBands(HLC(Xt),n=60)
BBvolatility = (bb$up - bb$dn)/bb$dn
hist(BBvolatility,breaks = 200)
quantile(BBvolatility,probs = 0.8,na.rm = T)
addTA( BBvolatility ,type="l",col=4)
addTA( sign(BBvolatility - 0.2) ,type="l",col=4)
addTA(1- BBvolatility ,type="l",col=4)
| /part1/quantmod101/Example3_eventDetection.R | no_license | datasci-info/ms-partner-training-20160308 | R | false | false | 4,616 | r | ######################################################################
### Luxor Strategy
######################################################################
library(quantmod)
Xt = getSymbols("^SOX",auto.assign = F)
chartSeries(Xt,TA=NULL, theme = "white")
addSMA(n = 5,col = 2)
addSMA(n = 60, col = 3)
Cl5SMA = SMA(Cl(Xt),n = 5)
Cl60SMA = SMA(Cl(Xt),n = 60)
addTA(Cl5SMA - Cl60SMA,col=4)
addTA(sign(Cl5SMA - Cl60SMA),col=5)
# re-draw with two SMAs
chartSeries(Cl5SMA, theme = "white")
addTA(Cl60SMA,col=5,on = 1)
addTA(Cl5SMA - Cl60SMA,col=6)
addTA(sign(Cl5SMA - Cl60SMA),col=7)
addTA(diff(sign(Cl5SMA - Cl60SMA)),col=8)
diff(sign(Cl5SMA - Cl60SMA)) > 0
which(diff(sign(Cl5SMA - Cl60SMA)) > 0 )
TDnDates <- index(Cl5SMA)[which(diff(sign(Cl5SMA - Cl60SMA)) > 0 )]
TUpDates <- index(Cl5SMA)[which(diff(sign(Cl5SMA - Cl60SMA)) < 0 )]
liftRatio = 0.02
addTA(Hi(Xt)[TDnDates]*(1+liftRatio),on=1,type="p",col=3,pch=25,bg="green")
addTA(Lo(Xt)[TUpDates]*(1-liftRatio),on=1,type="p",col=2,pch=24,bg="red")
######################################################################
### Crossover Events Detection
######################################################################
library(quantmod)
Xt = getSymbols("^TWII",auto.assign = F)
chartSeries(Xt,TA=NULL, theme = "white")
k=60
Xt_kMin = runMin(Xt, n = k, cumulative = FALSE)
Xt_kMax = runMax(Xt, n = k, cumulative = FALSE)
addTA(Xt_kMin,on=1,type="l",col=2)
addTA(Xt_kMax,on=1,type="l",col=4)
# re-draw with close and channel bottom
chartSeries(Cl(Xt), theme = "white")
addTA(Xt_kMin,on=1,type="l",col=2)
addTA(Cl(Xt) - Xt_kMin,type="l",col=3)
addTA(sign(Cl(Xt) - Xt_kMin),type="l",col=4)
addTA(diff(sign(Cl(Xt) - Xt_kMin)),type="l",col=5)
ClCrossMinGtDates <- index(Xt)[which(diff(sign(Cl(Xt) - Xt_kMin)) > 0 )]
ClCrossMinLtDates <- index(Xt)[which(diff(sign(Cl(Xt) - Xt_kMin)) < 0 )]
liftRatio = 0.02
chartSeries(Cl(Xt), theme = "white")
addTA(Xt_kMin,on=1,type="l",col=2)
addTA(Cl(Xt)[ClCrossMinLtDates]*(1+liftRatio),on=1,type="p",col=3,pch=25,bg="green")
addTA(Cl(Xt)[ClCrossMinGtDates]*(1-liftRatio),on=1,type="p",col=2,pch=24,bg="red")
# re-draw with close and channel bottom
chartSeries(Cl(Xt), theme = "white")
addTA(Xt_kMax,on=1,type="l",col=2)
addTA(Cl(Xt) - Xt_kMax,type="l",col=3)
addTA(sign(Cl(Xt) - Xt_kMax),type="l",col=4)
addTA(diff(sign(Cl(Xt) - Xt_kMax)),type="l",col=5)
ClCrossMaxGtDates <- index(Xt)[which(diff(sign(Cl(Xt) - Xt_kMax)) > 0 )]
ClCrossMaxLtDates <- index(Xt)[which(diff(sign(Cl(Xt) - Xt_kMax)) < 0 )]
liftRatio = 0.02
chartSeries(Cl(Xt), theme = "white")
addTA(Xt_kMax,on=1,type="l",col=2)
addTA(Cl(Xt)[ClCrossMaxLtDates]*(1+liftRatio),on=1,type="p",col=3,pch=25,bg="green")
addTA(Cl(Xt)[ClCrossMaxGtDates]*(1-liftRatio),on=1,type="p",col=2,pch=24,bg="red")
# Summary
chartSeries(Cl(Xt), theme = "white")
addTA(Xt_kMin,on=1,type="l",col=2)
addTA(Xt_kMax,on=1,type="l",col=4)
addTA(Hi(Xt)[ClCrossMaxLtDates]*(1+liftRatio),on=1,type="p",col=3,pch=25,bg="green")
addTA(Cl(Xt)[ClCrossMinGtDates]*(1-liftRatio),on=1,type="p",col=2,pch=24,bg="red")
######################################################################
### Volatility Thresholds Breaking
######################################################################
library(quantmod)
Xt = getSymbols("^TWII",auto.assign = F)
chartSeries(Xt,TA=NULL, theme = "white")
k=30
Xt_kMin = runMin(Xt, n = k, cumulative = FALSE)
Xt_kMax = runMax(Xt, n = k, cumulative = FALSE)
addTA(Xt_kMin,on=1,type="l",col=2)
addTA(Xt_kMax,on=1,type="l",col=4)
volatility = (Xt_kMax - Xt_kMin)/Xt_kMin
addTA( volatility ,type="l",col=4)
hist(volatility,breaks =100)
addTA( diff(volatility) ,type="l",col=5)
######################################################################
### Volatility Thresholds Breaking
######################################################################
library(quantmod)
Xt = getSymbols("^TWII",auto.assign = F)
chartSeries(Xt,TA="addBBands(n=30)", theme = "white")
addTA(Xt_kMin,on=1,type="l",col=9)
addTA(Xt_kMax,on=1,type="l",col=9)
bb = BBands(HLC(Xt),n=30)
tail(bb)
BBvolatility = (bb$up - bb$dn)/bb$dn
addTA( BBvolatility ,type="l",col=4)
# hist(BBvolatility,breaks = 200)
addTA( volatility ,type="l",col=5)
addTA( diff(BBvolatility) ,type="l",col=6)
addTA( diff(volatility) ,type="l",col=7)
# Clean up
chartSeries(Xt,TA="addBBands(n=60)", theme = "white")
bb = BBands(HLC(Xt),n=60)
BBvolatility = (bb$up - bb$dn)/bb$dn
hist(BBvolatility,breaks = 200)
quantile(BBvolatility,probs = 0.8,na.rm = T)
addTA( BBvolatility ,type="l",col=4)
addTA( sign(BBvolatility - 0.2) ,type="l",col=4)
addTA(1- BBvolatility ,type="l",col=4)
|
#Remove all env variables
rm(list = ls(all.names = T))
#Load the data from packages
library(caret)
library(pROC)
set.seed(100)
CTR_SD_Data <- read.csv("/home/raghunandangupta/Downloads/splits/sub-testtaa")
#Convert categorical values to numeric
CTR_SD_Data$site_id = as.numeric(CTR_SD_Data$site_id)
CTR_SD_Data$site_domain = as.numeric(CTR_SD_Data$site_domain)
CTR_SD_Data$site_category = as.numeric(CTR_SD_Data$site_category)
CTR_SD_Data$app_id = as.numeric(CTR_SD_Data$app_id)
CTR_SD_Data$app_domain = as.numeric(CTR_SD_Data$app_domain)
CTR_SD_Data$app_category = as.numeric(CTR_SD_Data$app_category)
CTR_SD_Data$device_id = as.numeric(CTR_SD_Data$device_id)
CTR_SD_Data$device_ip = as.numeric(CTR_SD_Data$device_ip)
CTR_SD_Data$device_model = as.numeric(CTR_SD_Data$device_model)
prop.table(table(CTR_SD_Data$click))
#Split the data in training and test data
rows = seq(from=1,to=nrow(CTR_SD_Data), by = 1)
train_rows = sample(x=rows, size=(0.7 * nrow(CTR_SD_Data))) #selecting 70% random sample no of row no as training data
#Getting training data i.e. selecting all rows that we had randomly selected from rows
training = CTR_SD_Data[train_rows,-3]
training[complete.cases(training), ]
NaRV.omit(training)
#Getting TEST data i.e. all rows not mentioned in train rows
testing = CTR_SD_Data[-train_rows,-3]
testing[complete.cases(testing), ]
NaRV.omit(testing)
set.seed(33)
objControl <- trainControl(method='cv', number=3, returnResamp='none', summaryFunction = twoClassSummary, classProbs = TRUE)
objModel <- train(training, as.factor(training$click),
method='gbm',
trControl=objControl,
metric = "ROC",
preProc = c("center", "scale"))
summary(objModel)
| /ctr_analysis/ctr_gbm_alternate.R | no_license | ragnar-lothbrok/data-analytics | R | false | false | 1,828 | r | #Remove all env variables
rm(list = ls(all.names = T))
#Load the data from packages
library(caret)
library(pROC)
set.seed(100)
CTR_SD_Data <- read.csv("/home/raghunandangupta/Downloads/splits/sub-testtaa")
#Convert categorical values to numeric
CTR_SD_Data$site_id = as.numeric(CTR_SD_Data$site_id)
CTR_SD_Data$site_domain = as.numeric(CTR_SD_Data$site_domain)
CTR_SD_Data$site_category = as.numeric(CTR_SD_Data$site_category)
CTR_SD_Data$app_id = as.numeric(CTR_SD_Data$app_id)
CTR_SD_Data$app_domain = as.numeric(CTR_SD_Data$app_domain)
CTR_SD_Data$app_category = as.numeric(CTR_SD_Data$app_category)
CTR_SD_Data$device_id = as.numeric(CTR_SD_Data$device_id)
CTR_SD_Data$device_ip = as.numeric(CTR_SD_Data$device_ip)
CTR_SD_Data$device_model = as.numeric(CTR_SD_Data$device_model)
prop.table(table(CTR_SD_Data$click))
#Split the data in training and test data
rows = seq(from=1,to=nrow(CTR_SD_Data), by = 1)
train_rows = sample(x=rows, size=(0.7 * nrow(CTR_SD_Data))) #selecting 70% random sample no of row no as training data
#Getting training data i.e. selecting all rows that we had randomly selected from rows
training = CTR_SD_Data[train_rows,-3]
training[complete.cases(training), ]
NaRV.omit(training)
#Getting TEST data i.e. all rows not mentioned in train rows
testing = CTR_SD_Data[-train_rows,-3]
testing[complete.cases(testing), ]
NaRV.omit(testing)
set.seed(33)
objControl <- trainControl(method='cv', number=3, returnResamp='none', summaryFunction = twoClassSummary, classProbs = TRUE)
objModel <- train(training, as.factor(training$click),
method='gbm',
trControl=objControl,
metric = "ROC",
preProc = c("center", "scale"))
summary(objModel)
|
library(tidyverse)
library(glmnet)
setwd('~/mortalityblob/glmnet/')
#############################
# Continent No-AEZ
#############################
fs <- list.files(pattern='noloc.Rdata$|afr.Rdata')
resdf <- data.frame(spei=seq(-2.5, 2.5, 0.1))
for (f in fs){
load(f)
df <- data.frame(as.matrix(mod$beta)) %>%
mutate(var = row.names(.)) %>%
filter(grepl('spei', var))
spei <- df$s0[grepl('^spei', df$var)]
spei_p1 <- df$s0[grepl('\\+ 1', df$var)]
spei_m0 <- df$s0[grepl('- 0', df$var)]
spei_m1 <- df$s0[grepl('- 1', df$var)]
res <- resdf$spei*spei
res <- res + pmax(resdf$spei + 1, 0)*spei_p1
res <- res + pmax(resdf$spei - 0, 0)*spei_m0
res <- res + pmax(resdf$spei - 1, 0)*spei_m1
resdf[ , f] <- res
}
plotdf <- resdf %>%
gather(model, value, -spei) %>%
mutate(speiwindow = substr(model, 1, 6),
precipvar = substr(model, 8, 9),
allloc = !grepl('noloc', model))
ggplot(plotdf) +
geom_line(aes(x=spei, y=value, color=allloc)) +
facet_grid(speiwindow ~ precipvar)
#############################
# AEZs
#############################
fs <- list.files(pattern='spei(03|24)\\....afr.aez')
resdf <- expand.grid(list(spei=seq(-2.5, 2.5, 0.1),
aez=c('Forest', 'Highlands', 'Mediterranean', 'Savanna',
'SemiForest', 'Desert')))
for (f in fs){
load(f)
df <- data.frame(as.matrix(mod$beta)) %>%
mutate(var = row.names(.)) %>%
filter(grepl('spei', var))
spei <- df$s0[grepl('^spei', df$var)]
spei_p1 <- df$s0[grepl('^I.*\\+ 1', df$var)]
spei_m0 <- df$s0[grepl('^I.*- 0', df$var)]
spei_m1 <- df$s0[grepl('^I.*- 1', df$var)]
res <- resdf$spei*spei
res <- res + pmax(resdf$spei + 1, 0)*spei_p1
res <- res + pmax(resdf$spei - 0, 0)*spei_m0
res <- res + pmax(resdf$spei - 1, 0)*spei_m1
for (a in c('Forest', 'Highlands', 'Mediterranean', 'Savanna','SemiForest')){
df <- data.frame(as.matrix(mod$beta)) %>%
mutate(var = row.names(.)) %>%
filter(grepl(paste0('aez', a, '.*spei'), var))
ix <- resdf$aez == a
spei_a <- df$s0[grepl(':spei', df$var)]
spei_p1_a <- df$s0[grepl('\\+ 1', df$var)]
spei_m0_a <- df$s0[grepl('- 0', df$var)]
spei_m1_a <- df$s0[grepl('- 1', df$var)]
res[ix] <- res[ix] + resdf$spei[ix]*spei_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] + 1, 0)*spei_p1_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] - 0, 0)*spei_m0_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] - 1, 0)*spei_m1_a
}
resdf[ , f] <- res
}
plotdf <- resdf %>%
gather(model, value, -spei, -aez) %>%
mutate(speiwindow = substr(model, 1, 6),
precipvar = substr(model, 8, 9)) %>%
filter(aez != 'Mediterranean')
#Standardize
plotdf <- plotdf %>%
group_by(model, aez) %>%
mutate(value = value - value[spei == 0])
ggplot(plotdf) +
geom_line(aes(x=spei, y=value, color=aez)) +
facet_grid(speiwindow ~ precipvar)
#############################
# Combined Model With AEZs
#############################
fs <- list.files(pattern='0324')
resdf <- expand.grid(list(spei=seq(-2.5, 2.5, 0.1),
aez=c('Forest', 'Highlands', 'Mediterranean', 'Savanna',
'SemiForest', 'Desert')))
for (f in fs){
load(f)
for (s in c('3', '24')){
df <- data.frame(as.matrix(mod$beta)) %>%
mutate(var = row.names(.)) %>%
filter(grepl(paste0('spei....', s), var))
spei <- df$s0[grepl('^spei', df$var)]
spei_p1 <- df$s0[grepl('^I.*\\+ 1', df$var)]
spei_m0 <- df$s0[grepl('^I.*- 0', df$var)]
spei_m1 <- df$s0[grepl('^I.*- 1', df$var)]
res <- resdf$spei*spei
res <- res + pmax(resdf$spei + 1, 0)*spei_p1
res <- res + pmax(resdf$spei - 0, 0)*spei_m0
res <- res + pmax(resdf$spei - 1, 0)*spei_m1
for (a in c('Forest', 'Highlands', 'Mediterranean', 'Savanna','SemiForest')){
df <- data.frame(as.matrix(mod$beta)) %>%
mutate(var = row.names(.)) %>%
filter(grepl(paste0('aez', a, '.*spei....', s), var))
ix <- resdf$aez == a
spei_a <- df$s0[grepl(':spei', df$var)]
spei_p1_a <- df$s0[grepl('\\+ 1', df$var)]
spei_m0_a <- df$s0[grepl('- 0', df$var)]
spei_m1_a <- df$s0[grepl('- 1', df$var)]
res[ix] <- res[ix] + resdf$spei[ix]*spei_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] + 1, 0)*spei_p1_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] - 0, 0)*spei_m0_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] - 1, 0)*spei_m1_a
}
resdf[ , paste0(f, s)] <- res
}
}
plotdf <- resdf %>%
gather(model, value, -spei, -aez) %>%
mutate(speiwindow = substr(model, 26, nchar(model)),
precipvar = substr(model, 10, 11)) %>%
filter(aez != 'Mediterranean')
#Standardize
plotdf <- plotdf %>%
group_by(model, aez) %>%
mutate(value = value - value[spei == 0])
ggplot(plotdf) +
geom_line(aes(x=spei, y=value, color=aez)) +
facet_grid(speiwindow ~ precipvar)
| /visualize/Plot_Glmnet_Res.R | no_license | mcooper/mortality | R | false | false | 4,938 | r | library(tidyverse)
library(glmnet)
setwd('~/mortalityblob/glmnet/')
#############################
# Continent No-AEZ
#############################
fs <- list.files(pattern='noloc.Rdata$|afr.Rdata')
resdf <- data.frame(spei=seq(-2.5, 2.5, 0.1))
for (f in fs){
load(f)
df <- data.frame(as.matrix(mod$beta)) %>%
mutate(var = row.names(.)) %>%
filter(grepl('spei', var))
spei <- df$s0[grepl('^spei', df$var)]
spei_p1 <- df$s0[grepl('\\+ 1', df$var)]
spei_m0 <- df$s0[grepl('- 0', df$var)]
spei_m1 <- df$s0[grepl('- 1', df$var)]
res <- resdf$spei*spei
res <- res + pmax(resdf$spei + 1, 0)*spei_p1
res <- res + pmax(resdf$spei - 0, 0)*spei_m0
res <- res + pmax(resdf$spei - 1, 0)*spei_m1
resdf[ , f] <- res
}
plotdf <- resdf %>%
gather(model, value, -spei) %>%
mutate(speiwindow = substr(model, 1, 6),
precipvar = substr(model, 8, 9),
allloc = !grepl('noloc', model))
ggplot(plotdf) +
geom_line(aes(x=spei, y=value, color=allloc)) +
facet_grid(speiwindow ~ precipvar)
#############################
# AEZs
#############################
fs <- list.files(pattern='spei(03|24)\\....afr.aez')
resdf <- expand.grid(list(spei=seq(-2.5, 2.5, 0.1),
aez=c('Forest', 'Highlands', 'Mediterranean', 'Savanna',
'SemiForest', 'Desert')))
for (f in fs){
load(f)
df <- data.frame(as.matrix(mod$beta)) %>%
mutate(var = row.names(.)) %>%
filter(grepl('spei', var))
spei <- df$s0[grepl('^spei', df$var)]
spei_p1 <- df$s0[grepl('^I.*\\+ 1', df$var)]
spei_m0 <- df$s0[grepl('^I.*- 0', df$var)]
spei_m1 <- df$s0[grepl('^I.*- 1', df$var)]
res <- resdf$spei*spei
res <- res + pmax(resdf$spei + 1, 0)*spei_p1
res <- res + pmax(resdf$spei - 0, 0)*spei_m0
res <- res + pmax(resdf$spei - 1, 0)*spei_m1
for (a in c('Forest', 'Highlands', 'Mediterranean', 'Savanna','SemiForest')){
df <- data.frame(as.matrix(mod$beta)) %>%
mutate(var = row.names(.)) %>%
filter(grepl(paste0('aez', a, '.*spei'), var))
ix <- resdf$aez == a
spei_a <- df$s0[grepl(':spei', df$var)]
spei_p1_a <- df$s0[grepl('\\+ 1', df$var)]
spei_m0_a <- df$s0[grepl('- 0', df$var)]
spei_m1_a <- df$s0[grepl('- 1', df$var)]
res[ix] <- res[ix] + resdf$spei[ix]*spei_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] + 1, 0)*spei_p1_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] - 0, 0)*spei_m0_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] - 1, 0)*spei_m1_a
}
resdf[ , f] <- res
}
plotdf <- resdf %>%
gather(model, value, -spei, -aez) %>%
mutate(speiwindow = substr(model, 1, 6),
precipvar = substr(model, 8, 9)) %>%
filter(aez != 'Mediterranean')
#Standardize
plotdf <- plotdf %>%
group_by(model, aez) %>%
mutate(value = value - value[spei == 0])
ggplot(plotdf) +
geom_line(aes(x=spei, y=value, color=aez)) +
facet_grid(speiwindow ~ precipvar)
#############################
# Combined Model With AEZs
#############################
fs <- list.files(pattern='0324')
resdf <- expand.grid(list(spei=seq(-2.5, 2.5, 0.1),
aez=c('Forest', 'Highlands', 'Mediterranean', 'Savanna',
'SemiForest', 'Desert')))
for (f in fs){
load(f)
for (s in c('3', '24')){
df <- data.frame(as.matrix(mod$beta)) %>%
mutate(var = row.names(.)) %>%
filter(grepl(paste0('spei....', s), var))
spei <- df$s0[grepl('^spei', df$var)]
spei_p1 <- df$s0[grepl('^I.*\\+ 1', df$var)]
spei_m0 <- df$s0[grepl('^I.*- 0', df$var)]
spei_m1 <- df$s0[grepl('^I.*- 1', df$var)]
res <- resdf$spei*spei
res <- res + pmax(resdf$spei + 1, 0)*spei_p1
res <- res + pmax(resdf$spei - 0, 0)*spei_m0
res <- res + pmax(resdf$spei - 1, 0)*spei_m1
for (a in c('Forest', 'Highlands', 'Mediterranean', 'Savanna','SemiForest')){
df <- data.frame(as.matrix(mod$beta)) %>%
mutate(var = row.names(.)) %>%
filter(grepl(paste0('aez', a, '.*spei....', s), var))
ix <- resdf$aez == a
spei_a <- df$s0[grepl(':spei', df$var)]
spei_p1_a <- df$s0[grepl('\\+ 1', df$var)]
spei_m0_a <- df$s0[grepl('- 0', df$var)]
spei_m1_a <- df$s0[grepl('- 1', df$var)]
res[ix] <- res[ix] + resdf$spei[ix]*spei_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] + 1, 0)*spei_p1_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] - 0, 0)*spei_m0_a
res[ix] <- res[ix] + pmax(resdf$spei[ix] - 1, 0)*spei_m1_a
}
resdf[ , paste0(f, s)] <- res
}
}
plotdf <- resdf %>%
gather(model, value, -spei, -aez) %>%
mutate(speiwindow = substr(model, 26, nchar(model)),
precipvar = substr(model, 10, 11)) %>%
filter(aez != 'Mediterranean')
#Standardize
plotdf <- plotdf %>%
group_by(model, aez) %>%
mutate(value = value - value[spei == 0])
ggplot(plotdf) +
geom_line(aes(x=spei, y=value, color=aez)) +
facet_grid(speiwindow ~ precipvar)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viz.R
\name{plot_affinity_matrix}
\alias{plot_affinity_matrix}
\title{Plot clustered affinity matrix}
\usage{
plot_affinity_matrix(affinity_matrix, partition)
}
\arguments{
\item{affinity_matrix}{An affinity matrix.}
\item{partition}{A partition of the samples.}
}
\description{
To be used with SNF and ANF affinity matrices for example.
}
| /man/plot_affinity_matrix.Rd | permissive | agapow/subtypr | R | false | true | 419 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viz.R
\name{plot_affinity_matrix}
\alias{plot_affinity_matrix}
\title{Plot clustered affinity matrix}
\usage{
plot_affinity_matrix(affinity_matrix, partition)
}
\arguments{
\item{affinity_matrix}{An affinity matrix.}
\item{partition}{A partition of the samples.}
}
\description{
To be used with SNF and ANF affinity matrices for example.
}
|
################################################################################
########################## GSEA1 PLOTS ############################
################################################################################
##------------------------------------------------------------------------------
##Plot enrichment analysis from TNA objects.
tna.plot.gsea1<-function(object, labPheno="", file="tna_gsea1", filepath=".", regulon.order="size",
ntop=NULL, tfs=NULL, ylimPanels=c(0.0,3.5,0.0,0.8), heightPanels=c(1,1,3),
width=4.4, height=4, ylabPanels=c("Phenotype","Regulon","Enrichment score"),
xlab="Position in the ranked list of genes", alpha=0.5, sparsity=10,
autoformat=TRUE, plotpdf = TRUE, ...) {
#checks
if(class(object)!="TNA" || object@status$analysis["GSEA1"]!="[x]"){
cat("-invalid 'GSEA1' status! \n")
stop("NOTE: gsea plot requires results from 'tna.gsea1' analysis!")
}
tnai.checks(name="labPheno",labPheno)
tnai.checks(name="file",file)
tnai.checks(name="filepath",filepath)
tnai.checks(name="ntop",ntop)
tnai.checks(name="tfs",tfs)
tnai.checks(name="ylimPanels",ylimPanels)
tnai.checks(name="heightPanels",heightPanels)
tnai.checks(name="width",width)
tnai.checks(name="height",height)
tnai.checks(name="ylabPanels",ylabPanels)
tnai.checks(name="xlab",xlab)
tnai.checks(name="alpha",alpha)
tnai.checks(name="autoformat",autoformat)
##-----get gsea1 results
if(!is.null(tfs)){
resgsea<-tna.get(object, what="gsea1", reportNames=TRUE)
idx<-(rownames(resgsea)%in%tfs+resgsea$Regulon%in%tfs)>0
if(all(!idx)){
stop("one or more input 'tfs' not found in the 'gsea1' results!")
}
resgsea<-resgsea[idx,]
} else {
resgsea<-tna.get(object, what="gsea1", ntop=ntop, reportNames=TRUE)
}
##-----get gene sets used in the gsea1 analysis
if(object@para$gsea1$tnet=="cdt"){
rgcs<-object@listOfModulators
if(ylabPanels[2]=="Regulon")ylabPanels[2]<-"Modulators"
} else if(object@para$gsea1$tnet=="ref"){
rgcs<-tna.get(object,what="refregulons")
} else {
rgcs<-tna.get(object,what="regulons")
}
##-----get args used in the gsea1 analysis
phenotype<-object@phenotype
orderAbsValue<-object@para$gsea1$orderAbsValue
nPermutations<-object@para$gsea1$nPermutations
exponent<-object@para$gsea1$exponent
##-----send to a common plot function
plot.gsea1(resgsea=resgsea, rgcs=rgcs, phenotype=phenotype,
orderAbsValue=orderAbsValue, nPermutations=nPermutations,
exponent=exponent, labPheno=labPheno, file=file, filepath=filepath,
regulon.order=regulon.order,ylimPanels=ylimPanels,
heightPanels=heightPanels, width=width, height=height,
ylabPanels=ylabPanels,xlab=xlab,alpha=alpha, sparsity=sparsity,
autoformat=autoformat, plotpdf=plotpdf, ...=...)
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea1
plot.gsea1<-function(resgsea, rgcs, phenotype, orderAbsValue, nPermutations, exponent,
labPheno="tna", file=labPheno, filepath=".", regulon.order="size",
ylimPanels=c(0.0,3.5,0.0,0.8), heightPanels=c(1,1,3), width=6,
height=5, ylabPanels=c("Phenotype","Regulon","Enrichment score"),
xlab="Position in the ranked list of genes", alpha=0.5, sparsity=10,
autoformat=TRUE, plotpdf = TRUE, ...) {
##return valid arg
regulon.order=tnai.checks(name="regulon.order",regulon.order)
##-----check available results
if(!is.null(resgsea) && nrow(resgsea)>0){
if(regulon.order!='none'){
decreasing<-ifelse(regulon.order=='Observed.Score',TRUE,FALSE)
resgsea<-resgsea[sort.list(resgsea[,regulon.order],decreasing=decreasing),]
}
gs.names<-rownames(resgsea)
names(gs.names)<-resgsea$Regulon
} else {
stop("gsea1 is empty or null!")
}
##-----get ordered phenotype
if(orderAbsValue)phenotype<-abs(phenotype)
phenotype<-phenotype[order(phenotype,decreasing=TRUE)]
##----get stat resolution
pvresolu<-signif(1/(nPermutations+1), digits=5)
pvcutoff<-paste("< ",as.character(format(pvresolu,scientific=TRUE,digits=2)),collapse="",sep="")
##-----get merged data
tests<-get.merged.data1(gs.names,phenotype,rgcs,resgsea,exponent)
##-----fix pvalue report for the resolution
idx<-tests$pv==pvresolu
tests$adjpv[idx]<-pvcutoff
##-----check format
if(autoformat)ylimPanels<-check.format1(tests)
##-----make plot
make.plot1(tests,labPheno,file,filepath,heightPanels,ylimPanels,ylabPanels,xlab,width,
height,alpha,sparsity,plotpdf, ...=...)
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea1
get.merged.data1<-function(gs.names,phenotype,rgcs,resgsea,exponent){
res<-list()
for(gs.name in gs.names){
test<-gseaScores4RTN(geneList=phenotype,geneSet=rgcs[[gs.name]],
exponent=exponent,mode="graph")
res$enrichmentScores[[gs.name]]<-test$enrichmentScore
res$runningScores[[gs.name]]<-test$runningScore
res$positions[[gs.name]]<-test$positions
res$pvals[[gs.name]]<-resgsea[gs.name,][["Pvalue"]]
res$adjpvals[[gs.name]]<-resgsea[gs.name,][["Adjusted.Pvalue"]]
}
tests<-list()
tests[["enrichmentScores"]]<-res$enrichmentScores
tests[["runningScores"]]<-as.data.frame(res$runningScores,stringsAsFactors=FALSE)
tests[["positions"]]<-as.data.frame(res$positions,stringsAsFactors=FALSE)
tests[["pv"]]<-res$pvals
tests[["adjpv"]]<-paste("= ",as.character(format(res$adjpvals,scientific=TRUE,digits=2)),sep="")
tests[["geneList"]]<-phenotype
tests[["labels"]]<-names(gs.names)
tests
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea1
check.format1<-function(tests){
ylimPanels<-rep(0,4)
tp<-c(min(min(tests$geneList)),max(tests$geneList))
tpp<-as.integer(tp)
if(tp[1]<tpp[1])tpp[1]=tpp[1]-1
if(tp[2]>tpp[2])tpp[2]=tpp[2]+1
ylimPanels[1:2]<-tpp
tp<-sapply(1:length(tests$labels),function(i){
c(min(tests$runningScores[,i]),max(tests$runningScores[,i]))
})
tp<-c(min(tp),max(tp))
if(min(tests$geneList)>=0 && tp[1]>=0)tp[1]=0
tpp<-round(tp,digits=1)
if(tp[1]<tpp[1])tpp[1]=tpp[1]-0.1
if(tp[2]>tpp[2])tpp[2]=tpp[2]+0.1
ylimPanels[3:4]<-tpp
ylimPanels
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea1
make.plot1 <- function(tests, labPheno, file, filepath, heightPanels, ylimPanels, ylabPanels,
xlab, width, height, alpha, sparsity, plotpdf, ...) {
if (plotpdf){
pdf(file=file.path(filepath, paste(file,".pdf", sep="")), width=width, height=height)
}
gsplot1(tests$runningScore, tests$enrichmentScore, tests$positions, tests$adjpv, tests$geneList,
tests$labels, heightPanels, ylimPanels, ylabPanels, xlab, labPheno, alpha, sparsity,
...=... )
if (plotpdf)dev.off()
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea1
gsplot1 <- function(runningScore, enrichmentScore, positions, adjpv,
geneList, labels, heightPanels, ylimPanels, ylabPanels, xlab,
labPheno, alpha, sparsity, ...) {
#-------------------------------------------------
#set text size levels
cexlev=c(1.3,1.2,1.1,0.9,0.8)
#set colors
rsc<-positions
ng<-ncol(rsc)
get.alpha<-function (colour,alpha=1.0) {
col <- col2rgb(colour, TRUE)/255
alpha <- rep(alpha, length.out = length(colour))
rgb(col[1, ], col[2, ], col[3, ], alpha)
}
rsc.colors<-get.alpha(palette(), alpha)
if(ng>length(rsc.colors)){
rsc.colors<-get.alpha(colorRampPalette(rsc.colors)(ng),alpha)
} else if(ng<length(rsc.colors)){
rsc.colors<-rsc.colors[1:ng]
}
#-------------------------------------------------
#set positions that hits will appear in the plot
for(i in 1:ng){
idx<-rsc[,i]!=0
rsc[idx,i]<-c(ng:1)[i]
}
rsc<-as.matrix(rsc)
rsc.vec<-cbind(rep(1:nrow(rsc),ng),as.vector(rsc))
rsc.vec<-rsc.vec[rsc.vec[,2]!=0,]
#-------------------------------------------------
# layout
np<-sum(heightPanels>0)
ht<-heightPanels
ht<-ht[ht>0]
layout(matrix(1:np, np, 1, byrow=TRUE), heights=ht)
# plot1
par(family="sans")
if(heightPanels[1]>0){
par(mar=c(0.5, 6.5, 1.5, 1.0),mgp=c(4.5,0.5,0),tcl=-0.2)
plot(x=c(1,max(rsc.vec[,1])),y=c(min(geneList),max(geneList)), type="n",
axes= FALSE,xlab="", ylab=ylabPanels[1], cex.lab=cexlev[1], ylim=ylimPanels[1:2], ...=...)
if(min(geneList)<0)abline(h=0,lwd=0.6)
sq<-c(1:length(geneList))%%sparsity;sq[sq>1]<-0
sq<-as.logical(sq)
lines(x=c(1:length(geneList))[sq],y=geneList[sq],col="grey75",lwd=1.4)
nn=ifelse(min(geneList)<0,4,2)
pp<-pretty(c(geneList,ylimPanels[1:2]),n=nn)
axis(2,line=0, cex.axis=cexlev[2], las=2, at=pp, lwd=1.2, labels=pp,...=...)
if(!is.null(labPheno)){
legend("topright", legend=labPheno, col="grey75", pch="---",
bty="n",cex=cexlev[3], pt.cex=1.5, ...=...)
}
}
#-------------------------------------------------
# plot2
if(heightPanels[2]>0){
par(mar=c(0.0, 6.5, 0.0, 1.0),mgp=c(4.5,0.5,0),tcl=-0.2)
plot(x=c(1,max(rsc.vec[,1])),y=c(0,max(rsc.vec[,2])), type="n", axes=FALSE, xlab="",
ylab=ylabPanels[2],cex.lab=cexlev[1], ...=...)
for(i in 1:ng){
idx<-rsc.vec[,2]==i
xx<-rsc.vec[idx,1]
yy<-rsc.vec[idx,2]
segments(xx,yy-0.9,xx, yy-0.1, col=rev(rsc.colors)[i],lwd=0.2)
}
axis(2,las=2, at=c(1:ng)-0.5,labels=rev(labels), line=0, cex.axis=cexlev[5],lwd=1.2 , ...=...)
}
#-------------------------------------------------
# plot3
if(heightPanels[3]>0){
rsc.colors<-get.alpha(rsc.colors,1.0)
par(mar=c(5, 6.5, 0.0, 1.0),mgp=c(4.5,0.5,0),tcl=-0.2)
cc<-as.matrix(runningScore)
plot(x=c(1,nrow(cc)),y=c(min(cc),max(cc)), type="n", axes=FALSE, xlab="",
ylim=ylimPanels[c(3,4)],ylab=ylabPanels[3], cex.lab=cexlev[1], ...=...)
par(mgp=c(3.0,0.5,0))
title(xlab=xlab, cex.lab=cexlev[1], ...=...)
if(min(cc)<0)abline(h=0,lwd=1.2)
for(i in 1:ng){
yy<-cc[,i]
xx<-c(1:nrow(cc))
xx<-which(yy==enrichmentScore[[i]])
segments(xx,0, xx, yy[xx], col=rsc.colors[i],lwd=1, lty=3)
}
for(i in 1:ng){
yy<-cc[,i]
xx<-c(1:nrow(cc))
sq<-c(1:length(xx))%%sparsity;sq[sq>1]<-0
sq<-as.logical(sq)
lines(x=xx[sq],y=yy[sq],col=rsc.colors[i],lwd=0.7)
}
axis(1,cex.axis=cexlev[2], lwd=1.2, ...=...)
axis(2,las=2,cex.axis=cexlev[2], lwd=1.2, ...=...)
labels<-paste(labels," (adj.p ",format(adjpv,scientific=TRUE,digits=2),")",sep="")
#labels=sub("=","<",labels)
legend("topright", legend=labels, col=rsc.colors, pch="---", bty="n",cex=cexlev[4],
pt.cex=1.2, title=ylabPanels[2], title.adj = 0, ...=...)
}
}
################################################################################
########################## GSEA2 PLOTS ############################
################################################################################
##------------------------------------------------------------------------------
##Plot 2-tailed enrichment analysis from TNA objects.
tna.plot.gsea2<-function(object, labPheno="", file="tna_gsea2", filepath=".", regulon.order="size",
ntop=NULL, tfs=NULL, ylimPanels=c(-3.0,3.0,-0.5,0.5), heightPanels=c(2.0,0.8,5.0),
width=2.8, height=3.0, ylabPanels=c("Phenotype","Regulon","Enrichment score"),
xlab="Position in the ranked list of genes",
alpha=1.0, sparsity=10, autoformat=TRUE, plotpdf = TRUE, ...) {
#checks
if(class(object)!="TNA" || object@status$analysis["GSEA2"]!="[x]"){
cat("-invalid 'GSEA2' status! \n")
stop("NOTE: gsea plot requires results from 'tna.gsea2' analysis!")
}
tnai.checks(name="labPheno",labPheno)
tnai.checks(name="file",file)
tnai.checks(name="filepath",filepath)
tnai.checks(name="ntop",ntop)
tnai.checks(name="tfs",tfs)
tnai.checks(name="ylimPanels",ylimPanels)
tnai.checks(name="heightPanels",heightPanels)
tnai.checks(name="width",width)
tnai.checks(name="height",height)
tnai.checks(name="ylabPanels",ylabPanels)
tnai.checks(name="xlab",xlab)
tnai.checks(name="alpha",alpha)
tnai.checks(name="autoformat",autoformat)
##-----get gsea2 results
if(!is.null(tfs)){
resgsea<-tna.get(object, what="gsea2", ntop=-1, reportNames=TRUE)
idx<-(rownames(resgsea$differential)%in%tfs+resgsea$differential$Regulon%in%tfs)>0
if(all(!idx)){
stop("one or more input 'tfs' not found in the 'gsea2' results!")
}
resgsea$differential<-resgsea$differential[idx,]
resgsea$positive<-resgsea$positive[idx,]
resgsea$negative<-resgsea$negative[idx,]
} else {
resgsea<-tna.get(object, what="gsea2", ntop=ntop, reportNames=TRUE)
}
##-----get gene sets used in the current gsea analysis
if(object@para$gsea2$tnet=="cdt"){
rgcs<-object@listOfModulators
if(ylabPanels[2]=="Regulon")ylabPanels[2]<-"Modulators"
} else if(object@para$gsea2$tnet=="ref"){
rgcs<-tna.get(object,what="refregulons.and.mode")
} else if(object@para$gsea2$tnet=="nondpi"){
rgcs<-tna.get(object,what="nondpiregulons.and.mode")
} else {
rgcs<-tna.get(object,what="regulons.and.mode")
}
##-----get args used in the gsea2 analysis
phenotype<-object@phenotype
nPermutations<-object@para$gsea2$nPermutations
exponent<-object@para$gsea2$exponent
##-----send to a common plot function
plot.gsea2(resgsea=resgsea, rgcs=rgcs, phenotype=phenotype,
nPermutations=nPermutations, exponent=exponent,
labPheno=labPheno, file=file, filepath=filepath,
regulon.order=regulon.order, ntop=ntop, tfs=tfs,
ylimPanels=ylimPanels, heightPanels=heightPanels,
width=width, height=height, ylabPanels=ylabPanels,
xlab=xlab, alpha=alpha, sparsity=sparsity,
autoformat=autoformat, plotpdf=plotpdf, ...=...)
}
##------------------------------------------------------------------------------
##Plot 2-tailed enrichment analysis from TNA objects.
plot.gsea2<-function(resgsea, rgcs, phenotype, nPermutations, exponent,
labPheno="tna", file=labPheno, filepath=".", regulon.order="Regulon.Size",
ntop=NULL, tfs=NULL, ylimPanels=c(-3.0,3.0,-0.5,0.5), heightPanels=c(1.5,0.7,5.0),
width=4, height=3.5, ylabPanels=c("Phenotype","Regulon","Enrichment score"),
xlab="Position in the ranked list of genes", alpha=1.0,
sparsity=10, autoformat=TRUE, plotpdf=TRUE, ...) {
##return valid arg
regulon.order=tnai.checks(name="regulon.order",regulon.order)
##-----check available results
if(!is.null(resgsea) && nrow(resgsea$differential)>0){
if(regulon.order!='none'){
decreasing<-ifelse(regulon.order=='Observed.Score',TRUE,FALSE)
idx<-sort.list(resgsea$differential[,regulon.order],decreasing=decreasing)
resgsea$differential<-resgsea$differential[idx,]
resgsea$positive<-resgsea$positive[idx,]
resgsea$negative<-resgsea$negative[idx,]
}
gs.names<-rownames(resgsea$differential)
names(gs.names)<-resgsea$differential$Regulon
} else {
stop("gsea2 is empty or null!")
}
##-----get ordered phenotype
phenotype<-phenotype[order(phenotype,decreasing=TRUE)]
##----get stat resolution
pvresolu<-signif(1/(nPermutations+1), digits=5)
pvcutoff<-paste("< ",as.character(format(pvresolu,scientific=TRUE,digits=2)),collapse="",sep="")
##----plot
for(i in 1:length(gs.names)){
##-----get merged data
tests<-get.merged.data2(gs.names[i],phenotype,rgcs,resgsea$differential[i,,drop=FALSE],exponent)
tests$pv[["up"]]<-resgsea$positive[i,"Pvalue"]
tests$adjpv[["up"]]<-resgsea$positive[i,"Adjusted.Pvalue"]
tests$pv[["down"]]<-resgsea$negative[i,"Pvalue"]
tests$adjpv[["down"]]<-resgsea$negative[i,"Adjusted.Pvalue"]
tests$adjpv[]<-paste("= ",as.character(format(tests$adjpv,scientific=TRUE,digits=2)),sep="")
tests$dES<-resgsea$differential[i,"Observed.Score"]
##-----fix pvalue report for the resolution
idx<-tests$pv==pvresolu
tests$adjpv[idx]<-pvcutoff
##-----check format
if(autoformat)ylimPanels<-check.format2(tests)
##-----make plot
make.plot2(tests,labPheno,file,filepath,heightPanels,ylimPanels,ylabPanels,
xlab,width,height,alpha,sparsity, plotpdf, ...=...)
}
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea2
get.merged.data2<-function(gs.name,phenotype,rgcs,resgsea,exponent){
res<-list()
gs<-rgcs[[gs.name]]
test<-gseaScores4RTN(geneList=phenotype, geneSet=gs,
exponent=exponent, mode="graph")
res$positions[[gs.name]]<-test$positions
res$pvals[[gs.name]]<-resgsea[gs.name,][["Pvalue"]]
res$adjpvals[[gs.name]]<-resgsea[gs.name,][["Adjusted.Pvalue"]]
testup<-gseaScores4RTN(geneList=phenotype, geneSet=gs[gs>0],
exponent=exponent, mode="graph")
testdown<-gseaScores4RTN(geneList=phenotype, geneSet=gs[gs<0],
exponent=exponent, mode="graph")
res$testup$enrichmentScores[[gs.name]]<-testup$enrichmentScore
res$testup$runningScores[[gs.name]]<-testup$runningScore
res$testdown$enrichmentScores[[gs.name]]<-testdown$enrichmentScore
res$testdown$runningScores[[gs.name]]<-testdown$runningScore
tests<-list()
tests$testup[["enrichmentScores"]]<-res$testup$enrichmentScores
tests$testup[["runningScores"]]<-as.data.frame(res$testup$runningScores,stringsAsFactors=FALSE)
tests$testdown[["enrichmentScores"]]<-res$testdown$enrichmentScores
tests$testdown[["runningScores"]]<-as.data.frame(res$testdown$runningScores,stringsAsFactors=FALSE)
tests[["positions"]]<-as.data.frame(res$positions,stringsAsFactors=FALSE)
tests[["geneList"]]<-phenotype
tests[["label"]]<-names(gs.name)
tests$pv[["pv"]]<-res$pvals
tests$adjpv[["pv"]]<-res$adjpvals
tests
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea2
check.format2<-function(tests){
ylimPanels<-rep(0,4)
tp<-c(min(tests$geneList),max(tests$geneList))
tpp<-as.integer(tp)
if(tp[1]<tpp[1])tpp[1]=tpp[1]-1
if(tp[2]>tpp[2])tpp[2]=tpp[2]+1
ylimPanels[1:2]<-tpp
tp<-sapply(1:length(tests$label),function(i){
tp1<-min(c(tests$testup$runningScores[,i],tests$testdown$runningScores[,i]))
tp2<-max(c(tests$testup$runningScores[,i],tests$testdown$runningScores[,i]))
c(tp1,tp2)
})
tp<-c(min(tp),max(tp))
if(min(tests$geneList)>=0 && tp[1]>=0)tp[1]=0
tpp<-round(tp,digits=1)
if(tp[1]<tpp[1])tpp[1]=tpp[1]-0.1
if(tp[2]>tpp[2])tpp[2]=tpp[2]+0.1
tpp[1]<-ifelse(tpp[1] < (-0.5),tpp[1],-0.5)
tpp[2]<-ifelse(tpp[2] > ( 0.5),tpp[2], 0.5)
ylimPanels[3:4]<-tpp
ylimPanels
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea2
make.plot2 <- function(tests, labPheno, file, filepath, heightPanels, ylimPanels, ylabPanels,
xlab, width, height, alpha, sparsity, plotpdf,...) {
if (plotpdf){
pdf(file=file.path(filepath, paste(file,"_",tests$label,".pdf", sep="")),
width=width, height=height)
}
gsplot2(tests$testup$runningScore, tests$testup$enrichmentScore,
tests$testdown$runningScore, tests$testdown$enrichmentScore,tests$dES,
tests$positions, tests$adjpv, tests$geneList, tests$label, heightPanels,
ylimPanels, ylabPanels, xlab, labPheno, alpha, sparsity, ...=... )
if (plotpdf)dev.off()
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea2
gsplot2 <- function(runningScoreUp, enrichmentScoreUp, runningScoreDown, enrichmentScoreDown,
dES, positions, adjpv, geneList, label, heightPanels, ylimPanels, ylabPanels,
xlab, labPheno, alpha, sparsity, ...) {
#-------------------------------------------------
positions<-as.matrix(positions)
positions[positions==1]=2
positions[positions==-1]=1
#set text size levels
cexlev=c(1.3,1.2,1.1,0.9,0.8)
cexlev=c(1.1,1.0,1.0,1.0,0.9)
#set colors
ng<-ncol(positions)
get.alpha<-function (colour,alpha=1.0) {
col <- col2rgb(colour, TRUE)/255
alpha <- rep(alpha, length.out = length(colour))
rgb(col[1, ], col[2, ], col[3, ], alpha)
}
rsc.colors<-get.alpha(c("#96D1FF","#FF8E91"), alpha)
#-------------------------------------------------
#set hits
rsc1<-rsc2<-positions
for(i in 1:ng){
idx<-rsc1[,i]!=0
rsc1[idx,i]<-i
}
rsc.vec<-cbind(rep(1:nrow(rsc1),ng),as.vector(rsc1),as.vector(rsc2))
rsc.vec<-rsc.vec[rsc.vec[,2]!=0,]
#-------------------------------------------------
# layout
op <- par(no.readonly = TRUE)
np<-sum(heightPanels>0)
ht<-heightPanels
ht<-ht[ht>0]
layout(matrix(1:np, np, 1, byrow=TRUE), heights=ht)
# plot1
par(family="sans")
if(heightPanels[1]>0){
xlim<-c(0,length(geneList))
nn<-ifelse(min(geneList)<0,4,3)
ylim<-ylimPanels[1:2]
par(mar=c(0.1, 5.0, 1.5, 1.5),mgp=c(2.2,0.6,0),tcl=-0.2,family="sans")
plot(x=c(1,max(rsc.vec[,1])),y=c(min(geneList),max(geneList)), type="n",
axes= FALSE,xlab="", ylab=ylabPanels[1], cex.lab=cexlev[1], ylim=ylim,xlim=xlim, ...=...)
if(min(geneList)<0)abline(h=0,lwd=1.1) #segments(0, 0, length(geneList), 0,col="grey70")
sq<-c(1:length(geneList))%%sparsity;sq[sq>1]<-0
sq<-as.logical(sq)
lines(x=c(1:length(geneList))[sq],y=geneList[sq],col="#008080",lwd=1.5)
pp<-pretty(c(geneList,ylimPanels[1:2]),n=nn)
pp<-pp[(pp >= ylim[1] & pp <= ylim[2])]
axis(2,line=0, cex.axis=cexlev[2], las=2, at=pp, labels=pp, lwd=1.3,...=...)
if(!is.null(labPheno)){
legend("topright", legend=labPheno, col="#008080", pch="", x.intersp=0.5,
bty="n",cex=cexlev[3], seg.len=1, lwd=2, ...=...)
}
}
#-------------------------------------------------
# plot2
if(heightPanels[2]>0){
par(mar=c(0.0, 5.0, 0, 1.5),mgp=c(2.1,0.25,0),tcl=-0.1,tck=0.15)
plot(x=c(1,max(rsc.vec[,1])),y=c(0,max(rsc.vec[,2])+1), type="n", axes=FALSE, xlab="",
ylab="",cex.lab=cexlev[1], ...=...)
for(i in 1:ng){
idx<-rsc.vec[,2]==i
xx<-rsc.vec[idx,1]
yy<-rsc.vec[idx,2]
cc<-rsc.vec[idx,3]
segments(xx,yy-0.9,xx, yy-0.1, col=rsc.colors[cc],lwd=0.35)
}
axis(2,las=2, at=c(1:ng)-0.5,labels=ylabPanels[2], line=0, cex.axis=cexlev[3],lwd=1.3, ...=...)
legend("top", legend=c("negative","positive"), col=rsc.colors, bty="n",cex=cexlev[4],
horiz=TRUE,seg.len=1,lwd=2,title=NULL,title.adj = 0,inset=-0.1,x.intersp=0.5, ...=...)
}
#-------------------------------------------------
# plot3
if(heightPanels[3]>0){
rsc.colors<-get.alpha(rsc.colors,1.0)
par(mar=c(5, 5.0, 0.0, 1.5),mgp=c(2.2,0.5,0),tcl=-0.2)
cc<-as.matrix(runningScoreDown)
plot(x=c(1,nrow(cc)),y=c(min(cc),max(cc)), type="n", axes=FALSE, xlab="",
ylim=ylimPanels[c(3,4)],ylab=ylabPanels[3], cex.lab=cexlev[1], ...=...)
par(mgp=c(2.0,0.5,0))
title(xlab=xlab, cex.lab=cexlev[1], ...=...)
if(min(cc)<0)abline(h=0,lwd=1.1)
#---
cc<-as.matrix(runningScoreDown)
for(i in 1:ng){
yy<-cc[,i]
xx<-which(yy==enrichmentScoreDown[[i]])
xx<-xx[length(xx)]
segments(xx,0, xx, yy[xx], col=rsc.colors[1],lwd=1.5, lty=3)
}
for(i in 1:ng){
yy<-cc[,i]
xx<-c(1:nrow(cc))
sq<-c(1:length(xx))%%sparsity;sq[sq>1]<-0
sq<-as.logical(sq)
lines(x=xx[sq],y=yy[sq],col=rsc.colors[1],lwd=1.5)
}
#---
cc<-as.matrix(runningScoreUp)
for(i in 1:ng){
yy<-cc[,i]
xx<-which(yy==enrichmentScoreUp[[i]])[1]
segments(xx,0, xx, yy[xx], col=rsc.colors[2],lwd=1.5, lty=3)
}
for(i in 1:ng){
yy<-cc[,i]
xx<-c(1:nrow(cc))
sq<-c(1:length(xx))%%sparsity;sq[sq>1]<-0
sq<-as.logical(sq)
lines(x=xx[sq],y=yy[sq],col=rsc.colors[2],lwd=1.5)
}
cc<-as.matrix(runningScoreDown)
for(i in 1:ng){
yy<-cc[,i]
xx<-which(yy==enrichmentScoreDown[[i]])
xx<-xx[length(xx)]
points(xx, yy[xx], bg=rsc.colors[1],col=rsc.colors[1], lwd=1, cex=1, pch=21)
}
cc<-as.matrix(runningScoreUp)
for(i in 1:ng){
yy<-cc[,i]
xx<-which(yy==enrichmentScoreUp[[i]])[1]
points(xx, yy[xx], bg=rsc.colors[2],col=rsc.colors[2], lwd=1, cex=1, pch=21)
}
#---
pp<-pretty(c(0,nrow(cc)))
axis(1,cex.axis=cexlev[2], lwd=1.3,at=pp, labels=pp, ...=...)
axis(2,las=2,cex.axis=cexlev[2], lwd=1.3, ...=...)
adjpv<-c(adjpv["down"],adjpv["up"],adjpv["pv"])
lbstat<-paste(c("neg ","pos ","diff "),adjpv,sep="")
legend("bottomleft", legend=lbstat, col=c(rsc.colors[1],rsc.colors[2],NA), pch=20, bty="n",cex=cexlev[5],
pt.cex=1.2, title=" Adj. p-value", title.adj = 0, y.intersp=0.85,x.intersp=0.6, ...=...)
legend("topright", legend=label, col=NA, pch=NA, bty="n",cex=cexlev[1]*1.3, pt.cex=1.2, title=NULL, ...=...)
legend("bottomright", legend=paste("dES = ",dES,sep=""), col=NA, pch=NA, bty="n",cex=cexlev[1]*0.6, title=NULL, ...=...)
}
par(op)
}
| /Alexis_work/project_6/dataset/RTN/R/AllPlotsTNA.R | no_license | yann-zhong/Project_resys_YZ | R | false | false | 25,777 | r | ################################################################################
########################## GSEA1 PLOTS ############################
################################################################################
##------------------------------------------------------------------------------
##Plot enrichment analysis from TNA objects.
tna.plot.gsea1<-function(object, labPheno="", file="tna_gsea1", filepath=".", regulon.order="size",
ntop=NULL, tfs=NULL, ylimPanels=c(0.0,3.5,0.0,0.8), heightPanels=c(1,1,3),
width=4.4, height=4, ylabPanels=c("Phenotype","Regulon","Enrichment score"),
xlab="Position in the ranked list of genes", alpha=0.5, sparsity=10,
autoformat=TRUE, plotpdf = TRUE, ...) {
#checks
if(class(object)!="TNA" || object@status$analysis["GSEA1"]!="[x]"){
cat("-invalid 'GSEA1' status! \n")
stop("NOTE: gsea plot requires results from 'tna.gsea1' analysis!")
}
tnai.checks(name="labPheno",labPheno)
tnai.checks(name="file",file)
tnai.checks(name="filepath",filepath)
tnai.checks(name="ntop",ntop)
tnai.checks(name="tfs",tfs)
tnai.checks(name="ylimPanels",ylimPanels)
tnai.checks(name="heightPanels",heightPanels)
tnai.checks(name="width",width)
tnai.checks(name="height",height)
tnai.checks(name="ylabPanels",ylabPanels)
tnai.checks(name="xlab",xlab)
tnai.checks(name="alpha",alpha)
tnai.checks(name="autoformat",autoformat)
##-----get gsea1 results
if(!is.null(tfs)){
resgsea<-tna.get(object, what="gsea1", reportNames=TRUE)
idx<-(rownames(resgsea)%in%tfs+resgsea$Regulon%in%tfs)>0
if(all(!idx)){
stop("one or more input 'tfs' not found in the 'gsea1' results!")
}
resgsea<-resgsea[idx,]
} else {
resgsea<-tna.get(object, what="gsea1", ntop=ntop, reportNames=TRUE)
}
##-----get gene sets used in the gsea1 analysis
if(object@para$gsea1$tnet=="cdt"){
rgcs<-object@listOfModulators
if(ylabPanels[2]=="Regulon")ylabPanels[2]<-"Modulators"
} else if(object@para$gsea1$tnet=="ref"){
rgcs<-tna.get(object,what="refregulons")
} else {
rgcs<-tna.get(object,what="regulons")
}
##-----get args used in the gsea1 analysis
phenotype<-object@phenotype
orderAbsValue<-object@para$gsea1$orderAbsValue
nPermutations<-object@para$gsea1$nPermutations
exponent<-object@para$gsea1$exponent
##-----send to a common plot function
plot.gsea1(resgsea=resgsea, rgcs=rgcs, phenotype=phenotype,
orderAbsValue=orderAbsValue, nPermutations=nPermutations,
exponent=exponent, labPheno=labPheno, file=file, filepath=filepath,
regulon.order=regulon.order,ylimPanels=ylimPanels,
heightPanels=heightPanels, width=width, height=height,
ylabPanels=ylabPanels,xlab=xlab,alpha=alpha, sparsity=sparsity,
autoformat=autoformat, plotpdf=plotpdf, ...=...)
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea1
plot.gsea1<-function(resgsea, rgcs, phenotype, orderAbsValue, nPermutations, exponent,
labPheno="tna", file=labPheno, filepath=".", regulon.order="size",
ylimPanels=c(0.0,3.5,0.0,0.8), heightPanels=c(1,1,3), width=6,
height=5, ylabPanels=c("Phenotype","Regulon","Enrichment score"),
xlab="Position in the ranked list of genes", alpha=0.5, sparsity=10,
autoformat=TRUE, plotpdf = TRUE, ...) {
##return valid arg
regulon.order=tnai.checks(name="regulon.order",regulon.order)
##-----check available results
if(!is.null(resgsea) && nrow(resgsea)>0){
if(regulon.order!='none'){
decreasing<-ifelse(regulon.order=='Observed.Score',TRUE,FALSE)
resgsea<-resgsea[sort.list(resgsea[,regulon.order],decreasing=decreasing),]
}
gs.names<-rownames(resgsea)
names(gs.names)<-resgsea$Regulon
} else {
stop("gsea1 is empty or null!")
}
##-----get ordered phenotype
if(orderAbsValue)phenotype<-abs(phenotype)
phenotype<-phenotype[order(phenotype,decreasing=TRUE)]
##----get stat resolution
pvresolu<-signif(1/(nPermutations+1), digits=5)
pvcutoff<-paste("< ",as.character(format(pvresolu,scientific=TRUE,digits=2)),collapse="",sep="")
##-----get merged data
tests<-get.merged.data1(gs.names,phenotype,rgcs,resgsea,exponent)
##-----fix pvalue report for the resolution
idx<-tests$pv==pvresolu
tests$adjpv[idx]<-pvcutoff
##-----check format
if(autoformat)ylimPanels<-check.format1(tests)
##-----make plot
make.plot1(tests,labPheno,file,filepath,heightPanels,ylimPanels,ylabPanels,xlab,width,
height,alpha,sparsity,plotpdf, ...=...)
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea1
get.merged.data1<-function(gs.names,phenotype,rgcs,resgsea,exponent){
res<-list()
for(gs.name in gs.names){
test<-gseaScores4RTN(geneList=phenotype,geneSet=rgcs[[gs.name]],
exponent=exponent,mode="graph")
res$enrichmentScores[[gs.name]]<-test$enrichmentScore
res$runningScores[[gs.name]]<-test$runningScore
res$positions[[gs.name]]<-test$positions
res$pvals[[gs.name]]<-resgsea[gs.name,][["Pvalue"]]
res$adjpvals[[gs.name]]<-resgsea[gs.name,][["Adjusted.Pvalue"]]
}
tests<-list()
tests[["enrichmentScores"]]<-res$enrichmentScores
tests[["runningScores"]]<-as.data.frame(res$runningScores,stringsAsFactors=FALSE)
tests[["positions"]]<-as.data.frame(res$positions,stringsAsFactors=FALSE)
tests[["pv"]]<-res$pvals
tests[["adjpv"]]<-paste("= ",as.character(format(res$adjpvals,scientific=TRUE,digits=2)),sep="")
tests[["geneList"]]<-phenotype
tests[["labels"]]<-names(gs.names)
tests
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea1
check.format1<-function(tests){
ylimPanels<-rep(0,4)
tp<-c(min(min(tests$geneList)),max(tests$geneList))
tpp<-as.integer(tp)
if(tp[1]<tpp[1])tpp[1]=tpp[1]-1
if(tp[2]>tpp[2])tpp[2]=tpp[2]+1
ylimPanels[1:2]<-tpp
tp<-sapply(1:length(tests$labels),function(i){
c(min(tests$runningScores[,i]),max(tests$runningScores[,i]))
})
tp<-c(min(tp),max(tp))
if(min(tests$geneList)>=0 && tp[1]>=0)tp[1]=0
tpp<-round(tp,digits=1)
if(tp[1]<tpp[1])tpp[1]=tpp[1]-0.1
if(tp[2]>tpp[2])tpp[2]=tpp[2]+0.1
ylimPanels[3:4]<-tpp
ylimPanels
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea1
make.plot1 <- function(tests, labPheno, file, filepath, heightPanels, ylimPanels, ylabPanels,
xlab, width, height, alpha, sparsity, plotpdf, ...) {
if (plotpdf){
pdf(file=file.path(filepath, paste(file,".pdf", sep="")), width=width, height=height)
}
gsplot1(tests$runningScore, tests$enrichmentScore, tests$positions, tests$adjpv, tests$geneList,
tests$labels, heightPanels, ylimPanels, ylabPanels, xlab, labPheno, alpha, sparsity,
...=... )
if (plotpdf)dev.off()
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea1
gsplot1 <- function(runningScore, enrichmentScore, positions, adjpv,
geneList, labels, heightPanels, ylimPanels, ylabPanels, xlab,
labPheno, alpha, sparsity, ...) {
#-------------------------------------------------
#set text size levels
cexlev=c(1.3,1.2,1.1,0.9,0.8)
#set colors
rsc<-positions
ng<-ncol(rsc)
get.alpha<-function (colour,alpha=1.0) {
col <- col2rgb(colour, TRUE)/255
alpha <- rep(alpha, length.out = length(colour))
rgb(col[1, ], col[2, ], col[3, ], alpha)
}
rsc.colors<-get.alpha(palette(), alpha)
if(ng>length(rsc.colors)){
rsc.colors<-get.alpha(colorRampPalette(rsc.colors)(ng),alpha)
} else if(ng<length(rsc.colors)){
rsc.colors<-rsc.colors[1:ng]
}
#-------------------------------------------------
#set positions that hits will appear in the plot
for(i in 1:ng){
idx<-rsc[,i]!=0
rsc[idx,i]<-c(ng:1)[i]
}
rsc<-as.matrix(rsc)
rsc.vec<-cbind(rep(1:nrow(rsc),ng),as.vector(rsc))
rsc.vec<-rsc.vec[rsc.vec[,2]!=0,]
#-------------------------------------------------
# layout
np<-sum(heightPanels>0)
ht<-heightPanels
ht<-ht[ht>0]
layout(matrix(1:np, np, 1, byrow=TRUE), heights=ht)
# plot1
par(family="sans")
if(heightPanels[1]>0){
par(mar=c(0.5, 6.5, 1.5, 1.0),mgp=c(4.5,0.5,0),tcl=-0.2)
plot(x=c(1,max(rsc.vec[,1])),y=c(min(geneList),max(geneList)), type="n",
axes= FALSE,xlab="", ylab=ylabPanels[1], cex.lab=cexlev[1], ylim=ylimPanels[1:2], ...=...)
if(min(geneList)<0)abline(h=0,lwd=0.6)
sq<-c(1:length(geneList))%%sparsity;sq[sq>1]<-0
sq<-as.logical(sq)
lines(x=c(1:length(geneList))[sq],y=geneList[sq],col="grey75",lwd=1.4)
nn=ifelse(min(geneList)<0,4,2)
pp<-pretty(c(geneList,ylimPanels[1:2]),n=nn)
axis(2,line=0, cex.axis=cexlev[2], las=2, at=pp, lwd=1.2, labels=pp,...=...)
if(!is.null(labPheno)){
legend("topright", legend=labPheno, col="grey75", pch="---",
bty="n",cex=cexlev[3], pt.cex=1.5, ...=...)
}
}
#-------------------------------------------------
# plot2
if(heightPanels[2]>0){
par(mar=c(0.0, 6.5, 0.0, 1.0),mgp=c(4.5,0.5,0),tcl=-0.2)
plot(x=c(1,max(rsc.vec[,1])),y=c(0,max(rsc.vec[,2])), type="n", axes=FALSE, xlab="",
ylab=ylabPanels[2],cex.lab=cexlev[1], ...=...)
for(i in 1:ng){
idx<-rsc.vec[,2]==i
xx<-rsc.vec[idx,1]
yy<-rsc.vec[idx,2]
segments(xx,yy-0.9,xx, yy-0.1, col=rev(rsc.colors)[i],lwd=0.2)
}
axis(2,las=2, at=c(1:ng)-0.5,labels=rev(labels), line=0, cex.axis=cexlev[5],lwd=1.2 , ...=...)
}
#-------------------------------------------------
# plot3
if(heightPanels[3]>0){
rsc.colors<-get.alpha(rsc.colors,1.0)
par(mar=c(5, 6.5, 0.0, 1.0),mgp=c(4.5,0.5,0),tcl=-0.2)
cc<-as.matrix(runningScore)
plot(x=c(1,nrow(cc)),y=c(min(cc),max(cc)), type="n", axes=FALSE, xlab="",
ylim=ylimPanels[c(3,4)],ylab=ylabPanels[3], cex.lab=cexlev[1], ...=...)
par(mgp=c(3.0,0.5,0))
title(xlab=xlab, cex.lab=cexlev[1], ...=...)
if(min(cc)<0)abline(h=0,lwd=1.2)
for(i in 1:ng){
yy<-cc[,i]
xx<-c(1:nrow(cc))
xx<-which(yy==enrichmentScore[[i]])
segments(xx,0, xx, yy[xx], col=rsc.colors[i],lwd=1, lty=3)
}
for(i in 1:ng){
yy<-cc[,i]
xx<-c(1:nrow(cc))
sq<-c(1:length(xx))%%sparsity;sq[sq>1]<-0
sq<-as.logical(sq)
lines(x=xx[sq],y=yy[sq],col=rsc.colors[i],lwd=0.7)
}
axis(1,cex.axis=cexlev[2], lwd=1.2, ...=...)
axis(2,las=2,cex.axis=cexlev[2], lwd=1.2, ...=...)
labels<-paste(labels," (adj.p ",format(adjpv,scientific=TRUE,digits=2),")",sep="")
#labels=sub("=","<",labels)
legend("topright", legend=labels, col=rsc.colors, pch="---", bty="n",cex=cexlev[4],
pt.cex=1.2, title=ylabPanels[2], title.adj = 0, ...=...)
}
}
################################################################################
########################## GSEA2 PLOTS ############################
################################################################################
##------------------------------------------------------------------------------
##Plot 2-tailed enrichment analysis from TNA objects.
tna.plot.gsea2<-function(object, labPheno="", file="tna_gsea2", filepath=".", regulon.order="size",
ntop=NULL, tfs=NULL, ylimPanels=c(-3.0,3.0,-0.5,0.5), heightPanels=c(2.0,0.8,5.0),
width=2.8, height=3.0, ylabPanels=c("Phenotype","Regulon","Enrichment score"),
xlab="Position in the ranked list of genes",
alpha=1.0, sparsity=10, autoformat=TRUE, plotpdf = TRUE, ...) {
#checks
if(class(object)!="TNA" || object@status$analysis["GSEA2"]!="[x]"){
cat("-invalid 'GSEA2' status! \n")
stop("NOTE: gsea plot requires results from 'tna.gsea2' analysis!")
}
tnai.checks(name="labPheno",labPheno)
tnai.checks(name="file",file)
tnai.checks(name="filepath",filepath)
tnai.checks(name="ntop",ntop)
tnai.checks(name="tfs",tfs)
tnai.checks(name="ylimPanels",ylimPanels)
tnai.checks(name="heightPanels",heightPanels)
tnai.checks(name="width",width)
tnai.checks(name="height",height)
tnai.checks(name="ylabPanels",ylabPanels)
tnai.checks(name="xlab",xlab)
tnai.checks(name="alpha",alpha)
tnai.checks(name="autoformat",autoformat)
##-----get gsea2 results
if(!is.null(tfs)){
resgsea<-tna.get(object, what="gsea2", ntop=-1, reportNames=TRUE)
idx<-(rownames(resgsea$differential)%in%tfs+resgsea$differential$Regulon%in%tfs)>0
if(all(!idx)){
stop("one or more input 'tfs' not found in the 'gsea2' results!")
}
resgsea$differential<-resgsea$differential[idx,]
resgsea$positive<-resgsea$positive[idx,]
resgsea$negative<-resgsea$negative[idx,]
} else {
resgsea<-tna.get(object, what="gsea2", ntop=ntop, reportNames=TRUE)
}
##-----get gene sets used in the current gsea analysis
if(object@para$gsea2$tnet=="cdt"){
rgcs<-object@listOfModulators
if(ylabPanels[2]=="Regulon")ylabPanels[2]<-"Modulators"
} else if(object@para$gsea2$tnet=="ref"){
rgcs<-tna.get(object,what="refregulons.and.mode")
} else if(object@para$gsea2$tnet=="nondpi"){
rgcs<-tna.get(object,what="nondpiregulons.and.mode")
} else {
rgcs<-tna.get(object,what="regulons.and.mode")
}
##-----get args used in the gsea2 analysis
phenotype<-object@phenotype
nPermutations<-object@para$gsea2$nPermutations
exponent<-object@para$gsea2$exponent
##-----send to a common plot function
plot.gsea2(resgsea=resgsea, rgcs=rgcs, phenotype=phenotype,
nPermutations=nPermutations, exponent=exponent,
labPheno=labPheno, file=file, filepath=filepath,
regulon.order=regulon.order, ntop=ntop, tfs=tfs,
ylimPanels=ylimPanels, heightPanels=heightPanels,
width=width, height=height, ylabPanels=ylabPanels,
xlab=xlab, alpha=alpha, sparsity=sparsity,
autoformat=autoformat, plotpdf=plotpdf, ...=...)
}
##------------------------------------------------------------------------------
##Plot 2-tailed enrichment analysis from TNA objects.
plot.gsea2<-function(resgsea, rgcs, phenotype, nPermutations, exponent,
labPheno="tna", file=labPheno, filepath=".", regulon.order="Regulon.Size",
ntop=NULL, tfs=NULL, ylimPanels=c(-3.0,3.0,-0.5,0.5), heightPanels=c(1.5,0.7,5.0),
width=4, height=3.5, ylabPanels=c("Phenotype","Regulon","Enrichment score"),
xlab="Position in the ranked list of genes", alpha=1.0,
sparsity=10, autoformat=TRUE, plotpdf=TRUE, ...) {
##return valid arg
regulon.order=tnai.checks(name="regulon.order",regulon.order)
##-----check available results
if(!is.null(resgsea) && nrow(resgsea$differential)>0){
if(regulon.order!='none'){
decreasing<-ifelse(regulon.order=='Observed.Score',TRUE,FALSE)
idx<-sort.list(resgsea$differential[,regulon.order],decreasing=decreasing)
resgsea$differential<-resgsea$differential[idx,]
resgsea$positive<-resgsea$positive[idx,]
resgsea$negative<-resgsea$negative[idx,]
}
gs.names<-rownames(resgsea$differential)
names(gs.names)<-resgsea$differential$Regulon
} else {
stop("gsea2 is empty or null!")
}
##-----get ordered phenotype
phenotype<-phenotype[order(phenotype,decreasing=TRUE)]
##----get stat resolution
pvresolu<-signif(1/(nPermutations+1), digits=5)
pvcutoff<-paste("< ",as.character(format(pvresolu,scientific=TRUE,digits=2)),collapse="",sep="")
##----plot
for(i in 1:length(gs.names)){
##-----get merged data
tests<-get.merged.data2(gs.names[i],phenotype,rgcs,resgsea$differential[i,,drop=FALSE],exponent)
tests$pv[["up"]]<-resgsea$positive[i,"Pvalue"]
tests$adjpv[["up"]]<-resgsea$positive[i,"Adjusted.Pvalue"]
tests$pv[["down"]]<-resgsea$negative[i,"Pvalue"]
tests$adjpv[["down"]]<-resgsea$negative[i,"Adjusted.Pvalue"]
tests$adjpv[]<-paste("= ",as.character(format(tests$adjpv,scientific=TRUE,digits=2)),sep="")
tests$dES<-resgsea$differential[i,"Observed.Score"]
##-----fix pvalue report for the resolution
idx<-tests$pv==pvresolu
tests$adjpv[idx]<-pvcutoff
##-----check format
if(autoformat)ylimPanels<-check.format2(tests)
##-----make plot
make.plot2(tests,labPheno,file,filepath,heightPanels,ylimPanels,ylabPanels,
xlab,width,height,alpha,sparsity, plotpdf, ...=...)
}
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea2
get.merged.data2<-function(gs.name,phenotype,rgcs,resgsea,exponent){
res<-list()
gs<-rgcs[[gs.name]]
test<-gseaScores4RTN(geneList=phenotype, geneSet=gs,
exponent=exponent, mode="graph")
res$positions[[gs.name]]<-test$positions
res$pvals[[gs.name]]<-resgsea[gs.name,][["Pvalue"]]
res$adjpvals[[gs.name]]<-resgsea[gs.name,][["Adjusted.Pvalue"]]
testup<-gseaScores4RTN(geneList=phenotype, geneSet=gs[gs>0],
exponent=exponent, mode="graph")
testdown<-gseaScores4RTN(geneList=phenotype, geneSet=gs[gs<0],
exponent=exponent, mode="graph")
res$testup$enrichmentScores[[gs.name]]<-testup$enrichmentScore
res$testup$runningScores[[gs.name]]<-testup$runningScore
res$testdown$enrichmentScores[[gs.name]]<-testdown$enrichmentScore
res$testdown$runningScores[[gs.name]]<-testdown$runningScore
tests<-list()
tests$testup[["enrichmentScores"]]<-res$testup$enrichmentScores
tests$testup[["runningScores"]]<-as.data.frame(res$testup$runningScores,stringsAsFactors=FALSE)
tests$testdown[["enrichmentScores"]]<-res$testdown$enrichmentScores
tests$testdown[["runningScores"]]<-as.data.frame(res$testdown$runningScores,stringsAsFactors=FALSE)
tests[["positions"]]<-as.data.frame(res$positions,stringsAsFactors=FALSE)
tests[["geneList"]]<-phenotype
tests[["label"]]<-names(gs.name)
tests$pv[["pv"]]<-res$pvals
tests$adjpv[["pv"]]<-res$adjpvals
tests
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea2
check.format2<-function(tests){
ylimPanels<-rep(0,4)
tp<-c(min(tests$geneList),max(tests$geneList))
tpp<-as.integer(tp)
if(tp[1]<tpp[1])tpp[1]=tpp[1]-1
if(tp[2]>tpp[2])tpp[2]=tpp[2]+1
ylimPanels[1:2]<-tpp
tp<-sapply(1:length(tests$label),function(i){
tp1<-min(c(tests$testup$runningScores[,i],tests$testdown$runningScores[,i]))
tp2<-max(c(tests$testup$runningScores[,i],tests$testdown$runningScores[,i]))
c(tp1,tp2)
})
tp<-c(min(tp),max(tp))
if(min(tests$geneList)>=0 && tp[1]>=0)tp[1]=0
tpp<-round(tp,digits=1)
if(tp[1]<tpp[1])tpp[1]=tpp[1]-0.1
if(tp[2]>tpp[2])tpp[2]=tpp[2]+0.1
tpp[1]<-ifelse(tpp[1] < (-0.5),tpp[1],-0.5)
tpp[2]<-ifelse(tpp[2] > ( 0.5),tpp[2], 0.5)
ylimPanels[3:4]<-tpp
ylimPanels
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea2
make.plot2 <- function(tests, labPheno, file, filepath, heightPanels, ylimPanels, ylabPanels,
xlab, width, height, alpha, sparsity, plotpdf,...) {
if (plotpdf){
pdf(file=file.path(filepath, paste(file,"_",tests$label,".pdf", sep="")),
width=width, height=height)
}
gsplot2(tests$testup$runningScore, tests$testup$enrichmentScore,
tests$testdown$runningScore, tests$testdown$enrichmentScore,tests$dES,
tests$positions, tests$adjpv, tests$geneList, tests$label, heightPanels,
ylimPanels, ylabPanels, xlab, labPheno, alpha, sparsity, ...=... )
if (plotpdf)dev.off()
}
#-------------------------------------------------------------------------------------
#--subfunction for tna.plot.gsea2
gsplot2 <- function(runningScoreUp, enrichmentScoreUp, runningScoreDown, enrichmentScoreDown,
dES, positions, adjpv, geneList, label, heightPanels, ylimPanels, ylabPanels,
xlab, labPheno, alpha, sparsity, ...) {
#-------------------------------------------------
positions<-as.matrix(positions)
positions[positions==1]=2
positions[positions==-1]=1
#set text size levels
cexlev=c(1.3,1.2,1.1,0.9,0.8)
cexlev=c(1.1,1.0,1.0,1.0,0.9)
#set colors
ng<-ncol(positions)
get.alpha<-function (colour,alpha=1.0) {
col <- col2rgb(colour, TRUE)/255
alpha <- rep(alpha, length.out = length(colour))
rgb(col[1, ], col[2, ], col[3, ], alpha)
}
rsc.colors<-get.alpha(c("#96D1FF","#FF8E91"), alpha)
#-------------------------------------------------
#set hits
rsc1<-rsc2<-positions
for(i in 1:ng){
idx<-rsc1[,i]!=0
rsc1[idx,i]<-i
}
rsc.vec<-cbind(rep(1:nrow(rsc1),ng),as.vector(rsc1),as.vector(rsc2))
rsc.vec<-rsc.vec[rsc.vec[,2]!=0,]
#-------------------------------------------------
# layout
op <- par(no.readonly = TRUE)
np<-sum(heightPanels>0)
ht<-heightPanels
ht<-ht[ht>0]
layout(matrix(1:np, np, 1, byrow=TRUE), heights=ht)
# plot1
par(family="sans")
if(heightPanels[1]>0){
xlim<-c(0,length(geneList))
nn<-ifelse(min(geneList)<0,4,3)
ylim<-ylimPanels[1:2]
par(mar=c(0.1, 5.0, 1.5, 1.5),mgp=c(2.2,0.6,0),tcl=-0.2,family="sans")
plot(x=c(1,max(rsc.vec[,1])),y=c(min(geneList),max(geneList)), type="n",
axes= FALSE,xlab="", ylab=ylabPanels[1], cex.lab=cexlev[1], ylim=ylim,xlim=xlim, ...=...)
if(min(geneList)<0)abline(h=0,lwd=1.1) #segments(0, 0, length(geneList), 0,col="grey70")
sq<-c(1:length(geneList))%%sparsity;sq[sq>1]<-0
sq<-as.logical(sq)
lines(x=c(1:length(geneList))[sq],y=geneList[sq],col="#008080",lwd=1.5)
pp<-pretty(c(geneList,ylimPanels[1:2]),n=nn)
pp<-pp[(pp >= ylim[1] & pp <= ylim[2])]
axis(2,line=0, cex.axis=cexlev[2], las=2, at=pp, labels=pp, lwd=1.3,...=...)
if(!is.null(labPheno)){
legend("topright", legend=labPheno, col="#008080", pch="", x.intersp=0.5,
bty="n",cex=cexlev[3], seg.len=1, lwd=2, ...=...)
}
}
#-------------------------------------------------
# plot2
if(heightPanels[2]>0){
par(mar=c(0.0, 5.0, 0, 1.5),mgp=c(2.1,0.25,0),tcl=-0.1,tck=0.15)
plot(x=c(1,max(rsc.vec[,1])),y=c(0,max(rsc.vec[,2])+1), type="n", axes=FALSE, xlab="",
ylab="",cex.lab=cexlev[1], ...=...)
for(i in 1:ng){
idx<-rsc.vec[,2]==i
xx<-rsc.vec[idx,1]
yy<-rsc.vec[idx,2]
cc<-rsc.vec[idx,3]
segments(xx,yy-0.9,xx, yy-0.1, col=rsc.colors[cc],lwd=0.35)
}
axis(2,las=2, at=c(1:ng)-0.5,labels=ylabPanels[2], line=0, cex.axis=cexlev[3],lwd=1.3, ...=...)
legend("top", legend=c("negative","positive"), col=rsc.colors, bty="n",cex=cexlev[4],
horiz=TRUE,seg.len=1,lwd=2,title=NULL,title.adj = 0,inset=-0.1,x.intersp=0.5, ...=...)
}
#-------------------------------------------------
# plot3
if(heightPanels[3]>0){
rsc.colors<-get.alpha(rsc.colors,1.0)
par(mar=c(5, 5.0, 0.0, 1.5),mgp=c(2.2,0.5,0),tcl=-0.2)
cc<-as.matrix(runningScoreDown)
plot(x=c(1,nrow(cc)),y=c(min(cc),max(cc)), type="n", axes=FALSE, xlab="",
ylim=ylimPanels[c(3,4)],ylab=ylabPanels[3], cex.lab=cexlev[1], ...=...)
par(mgp=c(2.0,0.5,0))
title(xlab=xlab, cex.lab=cexlev[1], ...=...)
if(min(cc)<0)abline(h=0,lwd=1.1)
#---
cc<-as.matrix(runningScoreDown)
for(i in 1:ng){
yy<-cc[,i]
xx<-which(yy==enrichmentScoreDown[[i]])
xx<-xx[length(xx)]
segments(xx,0, xx, yy[xx], col=rsc.colors[1],lwd=1.5, lty=3)
}
for(i in 1:ng){
yy<-cc[,i]
xx<-c(1:nrow(cc))
sq<-c(1:length(xx))%%sparsity;sq[sq>1]<-0
sq<-as.logical(sq)
lines(x=xx[sq],y=yy[sq],col=rsc.colors[1],lwd=1.5)
}
#---
cc<-as.matrix(runningScoreUp)
for(i in 1:ng){
yy<-cc[,i]
xx<-which(yy==enrichmentScoreUp[[i]])[1]
segments(xx,0, xx, yy[xx], col=rsc.colors[2],lwd=1.5, lty=3)
}
for(i in 1:ng){
yy<-cc[,i]
xx<-c(1:nrow(cc))
sq<-c(1:length(xx))%%sparsity;sq[sq>1]<-0
sq<-as.logical(sq)
lines(x=xx[sq],y=yy[sq],col=rsc.colors[2],lwd=1.5)
}
cc<-as.matrix(runningScoreDown)
for(i in 1:ng){
yy<-cc[,i]
xx<-which(yy==enrichmentScoreDown[[i]])
xx<-xx[length(xx)]
points(xx, yy[xx], bg=rsc.colors[1],col=rsc.colors[1], lwd=1, cex=1, pch=21)
}
cc<-as.matrix(runningScoreUp)
for(i in 1:ng){
yy<-cc[,i]
xx<-which(yy==enrichmentScoreUp[[i]])[1]
points(xx, yy[xx], bg=rsc.colors[2],col=rsc.colors[2], lwd=1, cex=1, pch=21)
}
#---
pp<-pretty(c(0,nrow(cc)))
axis(1,cex.axis=cexlev[2], lwd=1.3,at=pp, labels=pp, ...=...)
axis(2,las=2,cex.axis=cexlev[2], lwd=1.3, ...=...)
adjpv<-c(adjpv["down"],adjpv["up"],adjpv["pv"])
lbstat<-paste(c("neg ","pos ","diff "),adjpv,sep="")
legend("bottomleft", legend=lbstat, col=c(rsc.colors[1],rsc.colors[2],NA), pch=20, bty="n",cex=cexlev[5],
pt.cex=1.2, title=" Adj. p-value", title.adj = 0, y.intersp=0.85,x.intersp=0.6, ...=...)
legend("topright", legend=label, col=NA, pch=NA, bty="n",cex=cexlev[1]*1.3, pt.cex=1.2, title=NULL, ...=...)
legend("bottomright", legend=paste("dES = ",dES,sep=""), col=NA, pch=NA, bty="n",cex=cexlev[1]*0.6, title=NULL, ...=...)
}
par(op)
}
|
# title: ps3.R
# author: nadia lucas
# updated: march 2021
#================================================#
# set up
#================================================#
# packages
library(tidyverse)
library(tigris)
library(sf)
library(RColorBrewer)
library(ggmap)
library(ggthemes)
library(mapview)
library(webshot)
library(ggpubr)
library(Hmisc)
library(knitr)
library(raster)
library(sf)
library(sp)
library(rgdal)
rm(list = ls())
# set directory
ddir <- "/Users/nadialucas/Documents/ercot"
setwd(ddir)
datadir <- "/Users/nadialucas/Dropbox/research/ercot"
prodtable <- read.csv(paste(datadir, "/raw_data/Production\ Table.CSV", sep = ""))
county_names = unique(prodtable$County.Parish)
# match each county to corresponding load zone
# read in tiffs
#lz_raster <- raster(paste(datadir, "/raw_data/maps/Load-Zone-Map_2020_full.tiff", sep = ""))
#lz_raster <- setMinMax(lz_raster)
#crs(lz_raster) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
prodtable <- prodtable %>% rename(county = County.Parish) %>%
mutate(lz = "")
#template
# first get at the border counties
prodtable <- prodtable %>% mutate(lz = ifelse(county == "TRAVIS (TX)", "AEN", lz)) %>%
mutate(lz = ifelse(county == "BEXAR (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "CHAMBERS (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "HARRIS (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "MONTGOMERY (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "GALVESTON-LB (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "GALVESTON-SB (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "GALVESTON (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "FORT BEND (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "MATAGORDA (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "WHARTON (TX)" , "S", lz)) %>%
mutate(lz = ifelse(county == "COLORADO (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "AUSTIN (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "WASHINGTON (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "GRIMES (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "BRAZOS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "BRAZOS-SB (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "BRAZOS-LB (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "BURLESON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "BRAZORIA (TX)" , "H", lz)) %>%
mutate(lz = ifelse(county == "MILAM (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "ROBERTSON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "FALLS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "MCLENNAN (TX)" , "N", lz)) %>%
mutate(lz = ifelse(county == "BOSQUE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "CORYELL (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HAMILTON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "WILLIAMSON (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "MILLS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "LAMPASAS (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "BROWN (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "COMANCHE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "ERATH (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "EASTLAND (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "STEPHENS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "YOUNG (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "ARCHER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CLAY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "MONTAGUE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "WISE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "JACK (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "MCCULLOCH (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "MENARD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "KIMBLE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "EDWARDS (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "VAL VERDE (TX)", "W", lz))
# now time for the
prodtable <- prodtable %>% mutate(lz = ifelse(county == "PECOS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "REEVES (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "LOVING (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CULBERSON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "JEFF DAVIS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "PRESIDIO (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "TERRELL (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CROCKETT (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "WARD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "WINKLER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "ECTOR (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CRANE (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "ANDREWS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "MIDLAND (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "UPTON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "REAGAN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "GLASSCOCK (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "GAINES (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "YOAKUM (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "TERRY (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "COCHRAN (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LYNN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "GARZA (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "BORDEN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "DAWSON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "HOCKLEY (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LUBBOCK (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LAMB (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HALE (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "FLOYD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "MOTLEY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "COTTLE (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CROSBY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "DICKENS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "KENT (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "STONEWALL (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "SCURRY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "MARTIN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "HOWARD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "MITCHELL (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "NOLAN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "TAYLOR (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "STERLING (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "IRION (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "TOM GREEN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "COKE (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "SCHLEICHER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "SUTTON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CONCHO (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "RUNNELS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "COLEMAN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CALLAHAN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "SHACKELFORD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "DALLAM (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "SHERMAN (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HANSFORD (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "OCHILTREE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LIPSCOMB (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HARTLEY (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "MOORE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HUTCHINSON (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "ROBERTS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "HEMPHILL (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "OLDHAM (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "POTTER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CARSON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "GRAY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "WHEELER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "DONLEY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "COLLINGSWORTH (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "BRISCOE (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CHILDRESS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "HARDEMAN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "FOARD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "KING (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "KNOX (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "HASKELL (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "FISHER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "JONES (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "THROCKMORTON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "BAYLOR (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "WILBARGER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "WICHITA (TX)", "W", lz))
# now North area
prodtable <- prodtable %>%
mutate(lz = ifelse(county == "COOKE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "GRAYSON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "FANNIN (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "RED RIVER (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "DENTON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "PALO PINTO (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "PARKER (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "TARRANT (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "DALLAS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HUNT (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HOPKINS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "WOOD (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "FRANKLIN (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "TITUS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "CAMP (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "BOWIE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "MORRIS (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "MARION (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HARRISON (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "PANOLA (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "GREGG (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "UPSHUR (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "RUSK (TX)" , "N", lz)) %>%
mutate(lz = ifelse(county == "SHELBY (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "SABINE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "SAN AUGUSTINE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "NEWTON (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "JASPER (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "TYLER (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "POLK (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HARDIN (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "ORANGE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "JEFFERSON (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "SAN JACINTO (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LIBERTY (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "WALKER (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "ANGELINA (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "NACOGDOCHES (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "SMITH (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "CHEROKEE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HOUSTON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "ANDERSON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "MADISON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "LEON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "FREESTONE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HENDERSON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "VAN ZANDT (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "KAUFMAN (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "ELLIS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "NAVARRO (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "LIMESTONE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "JOHNSON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HOOD (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HILL (TX)", "N", lz))
# time to code the south
prodtable <- prodtable %>%
mutate(lz = ifelse(county == "CAMERON (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "WILLACY (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "HIDALGO (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "KENEDY (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "STARR (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "JIM HOGG (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "BROOKS (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "KLEBERG (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "JIM WELLS (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "NUECES (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "DUVAL (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "WEBB (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "LA SALLE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "ZAPATA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "SAN PATRICIO (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "REFUGIO (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "ARANSAS (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "BEE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "GOLIAD (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "MCMULLEN (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "LIVE OAK (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "DIMMIT (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "ZAVALA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "FRIO (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "KARNES (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "ATASCOSA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "WILSON (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "DEWITT (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "GONZALES (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "GUADALUPE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "CALDWELL (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "BASTROP (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "FAYETTE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "JACKSON (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "VICTORIA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "CALHOUN (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "KERR (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "UVALDE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "REAL (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "BANDERA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "MEDINA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "HUDSPETH (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LAVACA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "MAVERICK (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "CASS (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LEE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "WALLER (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "SOMERVELL (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "MATGRDA IS-LB (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "MATGRDA IS-SB (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "RAINS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "TRINITY (TX)", "NA", lz))
write.csv(prodtable, paste(datadir, "/intermediate_data/prod_data_cleaned.csv", sep=""))
| /data/production_cleaning.R | no_license | nadialucas/ercot | R | false | false | 15,704 | r | # title: ps3.R
# author: nadia lucas
# updated: march 2021
#================================================#
# set up
#================================================#
# packages
library(tidyverse)
library(tigris)
library(sf)
library(RColorBrewer)
library(ggmap)
library(ggthemes)
library(mapview)
library(webshot)
library(ggpubr)
library(Hmisc)
library(knitr)
library(raster)
library(sf)
library(sp)
library(rgdal)
rm(list = ls())
# set directory
ddir <- "/Users/nadialucas/Documents/ercot"
setwd(ddir)
datadir <- "/Users/nadialucas/Dropbox/research/ercot"
prodtable <- read.csv(paste(datadir, "/raw_data/Production\ Table.CSV", sep = ""))
county_names = unique(prodtable$County.Parish)
# match each county to corresponding load zone
# read in tiffs
#lz_raster <- raster(paste(datadir, "/raw_data/maps/Load-Zone-Map_2020_full.tiff", sep = ""))
#lz_raster <- setMinMax(lz_raster)
#crs(lz_raster) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
prodtable <- prodtable %>% rename(county = County.Parish) %>%
mutate(lz = "")
#template
# first get at the border counties
prodtable <- prodtable %>% mutate(lz = ifelse(county == "TRAVIS (TX)", "AEN", lz)) %>%
mutate(lz = ifelse(county == "BEXAR (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "CHAMBERS (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "HARRIS (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "MONTGOMERY (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "GALVESTON-LB (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "GALVESTON-SB (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "GALVESTON (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "FORT BEND (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "MATAGORDA (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "WHARTON (TX)" , "S", lz)) %>%
mutate(lz = ifelse(county == "COLORADO (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "AUSTIN (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "WASHINGTON (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "GRIMES (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "BRAZOS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "BRAZOS-SB (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "BRAZOS-LB (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "BURLESON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "BRAZORIA (TX)" , "H", lz)) %>%
mutate(lz = ifelse(county == "MILAM (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "ROBERTSON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "FALLS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "MCLENNAN (TX)" , "N", lz)) %>%
mutate(lz = ifelse(county == "BOSQUE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "CORYELL (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HAMILTON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "WILLIAMSON (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "MILLS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "LAMPASAS (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "BROWN (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "COMANCHE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "ERATH (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "EASTLAND (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "STEPHENS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "YOUNG (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "ARCHER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CLAY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "MONTAGUE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "WISE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "JACK (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "MCCULLOCH (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "MENARD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "KIMBLE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "EDWARDS (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "VAL VERDE (TX)", "W", lz))
# now time for the
prodtable <- prodtable %>% mutate(lz = ifelse(county == "PECOS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "REEVES (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "LOVING (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CULBERSON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "JEFF DAVIS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "PRESIDIO (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "TERRELL (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CROCKETT (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "WARD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "WINKLER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "ECTOR (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CRANE (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "ANDREWS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "MIDLAND (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "UPTON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "REAGAN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "GLASSCOCK (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "GAINES (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "YOAKUM (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "TERRY (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "COCHRAN (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LYNN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "GARZA (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "BORDEN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "DAWSON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "HOCKLEY (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LUBBOCK (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LAMB (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HALE (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "FLOYD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "MOTLEY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "COTTLE (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CROSBY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "DICKENS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "KENT (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "STONEWALL (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "SCURRY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "MARTIN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "HOWARD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "MITCHELL (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "NOLAN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "TAYLOR (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "STERLING (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "IRION (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "TOM GREEN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "COKE (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "SCHLEICHER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "SUTTON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CONCHO (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "RUNNELS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "COLEMAN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CALLAHAN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "SHACKELFORD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "DALLAM (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "SHERMAN (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HANSFORD (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "OCHILTREE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LIPSCOMB (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HARTLEY (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "MOORE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HUTCHINSON (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "ROBERTS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "HEMPHILL (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "OLDHAM (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "POTTER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CARSON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "GRAY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "WHEELER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "DONLEY (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "COLLINGSWORTH (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "BRISCOE (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "CHILDRESS (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "HARDEMAN (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "FOARD (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "KING (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "KNOX (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "HASKELL (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "FISHER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "JONES (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "THROCKMORTON (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "BAYLOR (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "WILBARGER (TX)", "W", lz)) %>%
mutate(lz = ifelse(county == "WICHITA (TX)", "W", lz))
# now North area
prodtable <- prodtable %>%
mutate(lz = ifelse(county == "COOKE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "GRAYSON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "FANNIN (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "RED RIVER (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "DENTON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "PALO PINTO (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "PARKER (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "TARRANT (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "DALLAS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HUNT (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HOPKINS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "WOOD (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "FRANKLIN (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "TITUS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "CAMP (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "BOWIE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "MORRIS (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "MARION (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HARRISON (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "PANOLA (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "GREGG (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "UPSHUR (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "RUSK (TX)" , "N", lz)) %>%
mutate(lz = ifelse(county == "SHELBY (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "SABINE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "SAN AUGUSTINE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "NEWTON (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "JASPER (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "TYLER (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "POLK (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "HARDIN (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "ORANGE (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "JEFFERSON (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "SAN JACINTO (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LIBERTY (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "WALKER (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "ANGELINA (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "NACOGDOCHES (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "SMITH (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "CHEROKEE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HOUSTON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "ANDERSON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "MADISON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "LEON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "FREESTONE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HENDERSON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "VAN ZANDT (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "KAUFMAN (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "ELLIS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "NAVARRO (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "LIMESTONE (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "JOHNSON (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HOOD (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "HILL (TX)", "N", lz))
# time to code the south
prodtable <- prodtable %>%
mutate(lz = ifelse(county == "CAMERON (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "WILLACY (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "HIDALGO (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "KENEDY (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "STARR (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "JIM HOGG (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "BROOKS (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "KLEBERG (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "JIM WELLS (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "NUECES (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "DUVAL (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "WEBB (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "LA SALLE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "ZAPATA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "SAN PATRICIO (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "REFUGIO (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "ARANSAS (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "BEE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "GOLIAD (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "MCMULLEN (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "LIVE OAK (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "DIMMIT (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "ZAVALA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "FRIO (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "KARNES (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "ATASCOSA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "WILSON (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "DEWITT (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "GONZALES (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "GUADALUPE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "CALDWELL (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "BASTROP (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "FAYETTE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "JACKSON (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "VICTORIA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "CALHOUN (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "KERR (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "UVALDE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "REAL (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "BANDERA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "MEDINA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "HUDSPETH (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LAVACA (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "MAVERICK (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "CASS (TX)", "NA", lz)) %>%
mutate(lz = ifelse(county == "LEE (TX)", "S", lz)) %>%
mutate(lz = ifelse(county == "WALLER (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "SOMERVELL (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "MATGRDA IS-LB (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "MATGRDA IS-SB (TX)", "H", lz)) %>%
mutate(lz = ifelse(county == "RAINS (TX)", "N", lz)) %>%
mutate(lz = ifelse(county == "TRINITY (TX)", "NA", lz))
write.csv(prodtable, paste(datadir, "/intermediate_data/prod_data_cleaned.csv", sep=""))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.