content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
setwd("~/R Projects/lectures-ml/competition/titanic-classification/titanic-kaggle")
library(doParallel)
cl <- makeCluster(detectCores(), type='PSOCK')
registerDoParallel(cl)
library(ggplot2)
library(plyr)
library(dplyr)
library(pROC)
library(zoo)
library(caret)
training <- read.csv("./data/train.csv", stringsAsFactors = FALSE, na.strings=c(""," ","NA"))
testing <- read.csv("./data/test.csv", stringsAsFactors = FALSE, na.strings=c(""," ","NA"))
testing$Survived <- NA
data <- rbind(training, testing)
PassengerId <- testing$PassengerId
# FEATURE ENGINEERING
# get social status title of person from their name
data$Title <- ifelse(grepl("Mr", data$Name), "Mr",
ifelse(grepl("Mrs", data$Name), "Mrs",
ifelse(grepl("Miss", data$Name), "Miss", "nothing")))
# fill NAs of Age with decision tree
library(rpart)
rpartFit_age <- rpart(Age ~ Survived + Sex + Pclass + Title + Fare, data = data[!is.na(data$Age), ],
method = "anova", control = rpart.control(cp = 0.001))
data$Age[is.na(data$Age)] <- predict(rpartFit_age, data[is.na(data$Age), ])
# fill NAs of Embarked with decision tree
rpartFit_Embarked <- rpart(Embarked ~ Survived + Sex + Pclass + Title + Fare, data = data[!is.na(data$Embarked), ],
control = rpart.control(cp = 0.001))
data$Embarked[is.na(data$Embarked)] <- as.character(predict(rpartFit_Embarked, data[is.na(data$Embarked), ], type = "class"))
# fill NAs of Fare with median age
data$Fare[is.na(data$Fare)] <- median(data$Fare, na.rm = TRUE)
# convert variables to correct class
data$Pclass <- as.ordered(data$Pclass) # will make hot encoding work
# combine ex and class
data$PclassSex[data$Pclass == 1 & data$Sex == "female"] <- "P1Female"
data$PclassSex[data$Pclass == 2 & data$Sex == "female"] <- "P2Female"
data$PclassSex[data$Pclass == 3 & data$Sex == "female"] <- "P3Female"
data$PclassSex[data$Pclass == 1 & data$Sex == "male"] <- "P1Male"
data$PclassSex[data$Pclass == 2 & data$Sex == "male"] <- "P2Male"
data$PclassSex[data$Pclass == 3 & data$Sex == "male"] <- "P3Male"
# categorical age
data$Age_group[data$Age <= 10] <- "child"
data$Age_group[data$Age > 10 & data$Age <= 50] <- "adult"
data$Age_group[data$Age > 50] <- "elder"
# categorical age and sex
data$Age_sex[data$Age_group == "child" & data$Sex == "male"] <- "child_male"
data$Age_sex[data$Age_group == "child" & data$Sex == "female"] <- "child_female"
data$Age_sex[data$Age_group == "adult" & data$Sex == "male"] <- "adult_male"
data$Age_sex[data$Age_group == "adult" & data$Sex == "female"] <- "adult_male"
data$Age_sex[data$Age_group == "elder" & data$Sex == "male"] <- "elder_male"
data$Age_sex[data$Age_group == "elder" & data$Sex == "female"] <- "elder_female"
# embarked and sex
data$Sex_embarked[data$Sex == "male" & data$Embarked == "Q"] <- "male_Q"
data$Sex_embarked[data$Sex == "female" & data$Embarked == "Q"] <- "female_Q"
data$Sex_embarked[data$Sex == "male" & data$Embarked == "S"] <- "male_S"
data$Sex_embarked[data$Sex == "female" & data$Embarked == "S"] <- "female_S"
data$Sex_embarked[data$Sex == "male" & data$Embarked == "C"] <- "male_C"
data$Sex_embarked[data$Sex == "female" & data$Embarked == "C"] <- "female_C"
# fare cat
data$Fare_cat[data$Fare == 0] <- "free"
data$Fare_cat[data$Fare > 0 & data$Fare <= 100] <- "normal"
data$Fare_cat[data$Fare > 100] <- "expensive"
# log of numeric
data$Age <- log(data$Age +1)
data$Fare <- log(data$Fare +1)
# group of people by ticket
ticket_group <- ddply(data, ~ Ticket, function(x) c(Ticket_group_size = length(x$Ticket)))
# merge
data <- left_join(data, ticket_group, by = "Ticket")
data$Ticket_group[data$Ticket_group_size == 1] <- "Alone"
data$Ticket_group[data$Ticket_group_size == 2] <- "Couple"
data$Ticket_group[data$Ticket_group_size >= 3 & data$Ticket_group_size <= 5] <- "Group"
data$Ticket_group[data$Ticket_group_size >5] <- "LargeGroup"
# select data
data <- data %>% select(Pclass, Age, Sex, Title, Survived, SibSp, Parch, Fare, Embarked, PclassSex, Age_group, Age_sex,
Fare_cat, Sex_embarked, Ticket_group_size, Ticket_group)
# data$Pclass <- as.factor(data$Pclass) # 1st is upper and 3rd is lower class
# data$Title <- as.factor(data$Title)
# data$Sex <- as.factor(data$Sex)
# near zero variance here? select by hand with variable importance analysis?
# nzv <- nearZeroVar(data.clean, saveMetrics = TRUE)
# nzvNames <- row.names(nzv[nzv$nzv == TRUE, ])
# data.clean <- data.clean[, !nzv$nzv]
# create dummy variables from levels of factors
Pclass <- data$Pclass
data.dummy <- dummyVars(~ ., data = data[, -1], fullRank = FALSE)
data <- as.data.frame(predict(data.dummy, data)) # no more levels as text
data$Pclass <- Pclass
# convert response to factor class
data$Survived <- as.factor(ifelse(data$Survived == 1, "survived", "died"))
prop.table(table(data$Survived)) # 61.8% died
# unbind testing and training data (Now none have NAs)
testing <- data[is.na(data$Survived), ]
training <- data[!is.na(data$Survived), ]
# split training into train and test (to get quality metric estimation before sending to Kaggle)
set.seed(42)
inTrain <- createDataPartition(training$Survived, p = 0.6, list = FALSE)
training.train <- training[inTrain, ]
training.test <- training[-inTrain, ]
# train control with tuned parameters
folds <- 5
cv_folds <- createMultiFolds(training.train$Survived, k = folds, times = 1)
trControl_tuned <- trainControl(
method = "repeatedcv", number = 5, repeats = 1, search = "grid",
index = cv_folds,
# summaryFunction = twoClassSummary, # add for ROC
classProbs = TRUE, # Important for classification
verboseIter = TRUE
)
# train control for searching parameter
trControl_search <- trainControl(
method = "repeatedcv", number = folds, repeats = 2, search = "random",
index = cv_folds,
classProbs = TRUE, # Important for classification
verboseIter = TRUE
)
# TRAINING-TRAIN
# LASSO (FINAL)
myGrid_lasso <- expand.grid(
alpha = 1,
lambda = 0.012
)
set.seed(42)
lassoFit <- train(Survived ~ ., data = training.train,
method = "glmnet",
tuneGrid = myGrid_lasso,
# tuneLength = 10,
metric = "Accuracy",
trControl = trControl_tuned)
plot(lassoFit$finalModel, label = TRUE)
plot(varImp(lassoFit))
# training.train Accuracy 0.83027
train_pred <- predict(lassoFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8282
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.8078
# bag of lasso
predictorNames <- names(predictors)
length_divisor <- 1
predictions <- 0
predictions <- foreach(i=1:10,.combine=cbind) %dopar% {
set.seed(i)
sampleRows <- sample(nrow(training.train), size = floor((nrow(training.train)/length_divisor)), replace = TRUE)
fit <- train(Survived ~ ., data = training.train[sampleRows, ],
method = "glmnet",
tuneGrid = myGrid_lasso,
# tuneLength = 10,
metric = "Accuracy",
trControl = trControl_tuned)
predictions[i] <- data.frame(predict(fit, newdata = training.test, type = "prob")[1]) # pred > .5 died
}
auc <- roc(training.test$Survived, rowMeans(predictions), plot = TRUE)
print(auc)
bag_mean_pred <- rowMeans(predictions) # prob of dying
bag_mean_pred <- ifelse(bag_mean_pred < .5, "survived", "died")
confusionMatrix(training.test$Survived, bag_mean_pred)
# accuracy 0.8028
# Elastic Net
myGrid_glmnet <- expand.grid(
alpha = 0,
lambda = seq(0.001, 0.2, 0.001)
)
set.seed(42)
glmnetFit <- train(Survived ~ ., data = training.train,
method = "glmnet",
tuneGrid = myGrid_glmnet,
# tuneLength = 1000,
metric = "Accuracy",
trControl = trControl_search)
plot(glmnetFit$finalModel, label = TRUE)
plot(varImp(glmnetFit))
# training.train Accuracy 0.8241789
train_pred <- predict(glmnetFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8394
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.8183
# Support Vector Machines with Radial Basis Function Kernel
myGrid_svmRadial <- expand.grid(
sigma = 0.062,
C = 2.7
)
set.seed(42)
svmRadialFit <- train(Survived ~ ., data = training.train,
method = "svmRadial",
tuneGrid = myGrid_svmRadial,
# tuneLength = 100,
metric = "Accuracy",
trControl = trControl_tuned)
plot(svmRadialFit, label = TRUE)
plot(varImp(svmRadialFit))
# training.train Accuracy
train_pred <- predict(svmRadialFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8056
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.7853
# Decision tree with ctree
myGrid_rpart <- expand.grid(
cp = seq(0.001, 0.01, 0.001)
)
set.seed(42)
rpartFit <- train(Survived ~ ., data = training.train,
method = "rpart",
tuneGrid = myGrid_rpart,
# tuneLength = 100,
metric = "Accuracy",
trControl = trControl_tuned)
plot(rpartFit) # while tuning
library(rattle)
library(rpart.plot)
rpart.plot(rpartFit$finalModel)
# training.train Accuracy 0.8041191
train_pred <- predict(rpartFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8141
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.7946
# kernel k-nearest neighboors
myGrid_kknn <- expand.grid(
kmax = 11,
distance = 2,
kernel = "optimal")
set.seed(42)
kknnFit <- train(Survived ~ ., data = training.train,
method = "kknn",
tuneGrid = myGrid_kknn,
# tuneLength = 100,
metric = "Accuracy",
trControl = trControl_tuned)
plot(kknnFit$finalModel)
# training.train Accuracy
train_pred <- predict(kknnFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.7863
# random forest with ranger
myGrid_ranger <- expand.grid(
mtry = 8,
splitrule = "extratrees",
min.node.size = 8
)
set.seed(42)
rangerFit <- train(Survived ~ ., data = training.train,
method = "ranger",
tuneGrid = myGrid_ranger,
# tuneLength = 10,
metric = "Accuracy",
trControl = trControl_tuned)
plot(rangerFit$finalModel)
# training.train Accuracy 0.8059536
train_pred <- predict(rangerFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8394
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.8225
# random forest with rf
myGrid_rf <- expand.grid(
mtry = c(2:20)
)
set.seed(42)
rfFit <- train(Survived ~ ., data = training.train,
method = "rf",
tuneGrid = myGrid_rf,
# tuneLength = 10,
metric = "Accuracy",
trControl = trControl_tuned)
plot(rfFit$finalModel)
# training.train Accuracy 0.8059536
train_pred <- predict(rfFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8423
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.8178
# stocastic gradient boosting with gbm
myGrid_gbm <- expand.grid(
n.trees = 500,
interaction.depth = 7,
shrinkage = 0.01,
n.minobsinnode = 10
)
set.seed(42)
gbmFit <- train(Survived ~ ., data = training.train,
method = "gbm",
tuneGrid = myGrid_gbm,
# tuneLength = 100,
metric = "Accuracy",
trControl = trControl_tuned)
plot(gbmFit$finalModel)
# training.train Accuracy 0.7965758
train_pred <- predict(gbmFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8423
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.8192
# Xtreme Gradient Boosting with xgbLinear
myGrid_xgbLinear <- expand.grid(
nrounds = 100,
lambda = 0.1,
alpha = 1,
eta = 0.5
)
set.seed(42)
xgbLinearFit <- train(Survived ~ ., data = training.train,
method = "xgbLinear",
tuneGrid = myGrid_xgbLinear,
# tuneLength = 20,
metric = "Accuracy",
trControl = trControl_tuned)
plot(xgbLinearFit$finalModel)
# training.train Accuracy 0.8197
train_pred <- predict(xgbLinearFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
#
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.7954
# validation is not correlated to test set. How to split/validate this dataset?
# extreme regularized gradient boosting with xgbTree (xboost)
myGrid_xgbTree <- expand.grid(
nrounds = 600,
eta = 0.15,
gamma = 0.3,
colsample_bytree = 0.04,
min_child_weight = 3,
subsample = 0.9,
max_depth = 6
)
set.seed(42)
xgbTreeFit <- train(Survived ~ ., data = training.train,
method = "xgbTree",
tuneGrid = myGrid_xgbTree,
# tuneLength = 20,
metric = "Accuracy",
trControl = trControl_tuned)
plot(xgbTreeFit$finalModel)
# training.train Accuracy 0.8307943
train_pred <- predict(xgbTreeFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8254 | Tune 2 0.8423, overfitted on test
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.7999 | Tune 2 0.8192
# MODEL COMPARISONS
resamps <- resamples(list(XBOOST = xgbTreeFit,
GBM = gbmFit))
summary(resamps)
trellis.par.set(caretTheme())
dotplot(resamps, metric = "Accuracy")
# FINAL TRAINING
set.seed(42)
finalFit <- train(Survived ~ ., data = training,
method = "glmnet",
tuneGrid = myGrid_lasso,
# tuneLength = 100,
metric = "Accuracy",
trControl = trControl_tuned)
pred <- predict(finalFit, newdata = testing)
pred <- ifelse(pred == "survived", 1, 0)
# ranger 0.8290637
# xgbTree 0.8342908
# lasso 0.8327961 - best so far Kaggle
# ridge 0.8294124
# svmRadial 0.8175871
# gbm 0.831067
# rf 0.8303788
# xgbLinear 0.8260479
submit <- data.frame(PassengerId = PassengerId, Survived = pred)
# submission glmnet
write.csv(submit, file = "submission.glmnet.06.oneHotEncoding.41var.alpha1.lambda0012.csv", row.names = FALSE)
# submission xgbTree
write.csv(submit, file = "submission.03.oneHotEncoding.xgbLinear.nrounds100.lambda01.alpha1.eta05.csv", row.names = FALSE)
|
/titanic-classification-model-tuning.R
|
no_license
|
designervga/titanic-kaggle
|
R
| false
| false
| 15,611
|
r
|
setwd("~/R Projects/lectures-ml/competition/titanic-classification/titanic-kaggle")
library(doParallel)
cl <- makeCluster(detectCores(), type='PSOCK')
registerDoParallel(cl)
library(ggplot2)
library(plyr)
library(dplyr)
library(pROC)
library(zoo)
library(caret)
training <- read.csv("./data/train.csv", stringsAsFactors = FALSE, na.strings=c(""," ","NA"))
testing <- read.csv("./data/test.csv", stringsAsFactors = FALSE, na.strings=c(""," ","NA"))
testing$Survived <- NA
data <- rbind(training, testing)
PassengerId <- testing$PassengerId
# FEATURE ENGINEERING
# get social status title of person from their name
data$Title <- ifelse(grepl("Mr", data$Name), "Mr",
ifelse(grepl("Mrs", data$Name), "Mrs",
ifelse(grepl("Miss", data$Name), "Miss", "nothing")))
# fill NAs of Age with decision tree
library(rpart)
rpartFit_age <- rpart(Age ~ Survived + Sex + Pclass + Title + Fare, data = data[!is.na(data$Age), ],
method = "anova", control = rpart.control(cp = 0.001))
data$Age[is.na(data$Age)] <- predict(rpartFit_age, data[is.na(data$Age), ])
# fill NAs of Embarked with decision tree
rpartFit_Embarked <- rpart(Embarked ~ Survived + Sex + Pclass + Title + Fare, data = data[!is.na(data$Embarked), ],
control = rpart.control(cp = 0.001))
data$Embarked[is.na(data$Embarked)] <- as.character(predict(rpartFit_Embarked, data[is.na(data$Embarked), ], type = "class"))
# fill NAs of Fare with median age
data$Fare[is.na(data$Fare)] <- median(data$Fare, na.rm = TRUE)
# convert variables to correct class
data$Pclass <- as.ordered(data$Pclass) # will make hot encoding work
# combine ex and class
data$PclassSex[data$Pclass == 1 & data$Sex == "female"] <- "P1Female"
data$PclassSex[data$Pclass == 2 & data$Sex == "female"] <- "P2Female"
data$PclassSex[data$Pclass == 3 & data$Sex == "female"] <- "P3Female"
data$PclassSex[data$Pclass == 1 & data$Sex == "male"] <- "P1Male"
data$PclassSex[data$Pclass == 2 & data$Sex == "male"] <- "P2Male"
data$PclassSex[data$Pclass == 3 & data$Sex == "male"] <- "P3Male"
# categorical age
data$Age_group[data$Age <= 10] <- "child"
data$Age_group[data$Age > 10 & data$Age <= 50] <- "adult"
data$Age_group[data$Age > 50] <- "elder"
# categorical age and sex
data$Age_sex[data$Age_group == "child" & data$Sex == "male"] <- "child_male"
data$Age_sex[data$Age_group == "child" & data$Sex == "female"] <- "child_female"
data$Age_sex[data$Age_group == "adult" & data$Sex == "male"] <- "adult_male"
data$Age_sex[data$Age_group == "adult" & data$Sex == "female"] <- "adult_male"
data$Age_sex[data$Age_group == "elder" & data$Sex == "male"] <- "elder_male"
data$Age_sex[data$Age_group == "elder" & data$Sex == "female"] <- "elder_female"
# embarked and sex
data$Sex_embarked[data$Sex == "male" & data$Embarked == "Q"] <- "male_Q"
data$Sex_embarked[data$Sex == "female" & data$Embarked == "Q"] <- "female_Q"
data$Sex_embarked[data$Sex == "male" & data$Embarked == "S"] <- "male_S"
data$Sex_embarked[data$Sex == "female" & data$Embarked == "S"] <- "female_S"
data$Sex_embarked[data$Sex == "male" & data$Embarked == "C"] <- "male_C"
data$Sex_embarked[data$Sex == "female" & data$Embarked == "C"] <- "female_C"
# fare cat
data$Fare_cat[data$Fare == 0] <- "free"
data$Fare_cat[data$Fare > 0 & data$Fare <= 100] <- "normal"
data$Fare_cat[data$Fare > 100] <- "expensive"
# log of numeric
data$Age <- log(data$Age +1)
data$Fare <- log(data$Fare +1)
# group of people by ticket
ticket_group <- ddply(data, ~ Ticket, function(x) c(Ticket_group_size = length(x$Ticket)))
# merge
data <- left_join(data, ticket_group, by = "Ticket")
data$Ticket_group[data$Ticket_group_size == 1] <- "Alone"
data$Ticket_group[data$Ticket_group_size == 2] <- "Couple"
data$Ticket_group[data$Ticket_group_size >= 3 & data$Ticket_group_size <= 5] <- "Group"
data$Ticket_group[data$Ticket_group_size >5] <- "LargeGroup"
# select data
data <- data %>% select(Pclass, Age, Sex, Title, Survived, SibSp, Parch, Fare, Embarked, PclassSex, Age_group, Age_sex,
Fare_cat, Sex_embarked, Ticket_group_size, Ticket_group)
# data$Pclass <- as.factor(data$Pclass) # 1st is upper and 3rd is lower class
# data$Title <- as.factor(data$Title)
# data$Sex <- as.factor(data$Sex)
# near zero variance here? select by hand with variable importance analysis?
# nzv <- nearZeroVar(data.clean, saveMetrics = TRUE)
# nzvNames <- row.names(nzv[nzv$nzv == TRUE, ])
# data.clean <- data.clean[, !nzv$nzv]
# create dummy variables from levels of factors
Pclass <- data$Pclass
data.dummy <- dummyVars(~ ., data = data[, -1], fullRank = FALSE)
data <- as.data.frame(predict(data.dummy, data)) # no more levels as text
data$Pclass <- Pclass
# convert response to factor class
data$Survived <- as.factor(ifelse(data$Survived == 1, "survived", "died"))
prop.table(table(data$Survived)) # 61.8% died
# unbind testing and training data (Now none have NAs)
testing <- data[is.na(data$Survived), ]
training <- data[!is.na(data$Survived), ]
# split training into train and test (to get quality metric estimation before sending to Kaggle)
set.seed(42)
inTrain <- createDataPartition(training$Survived, p = 0.6, list = FALSE)
training.train <- training[inTrain, ]
training.test <- training[-inTrain, ]
# train control with tuned parameters
folds <- 5
cv_folds <- createMultiFolds(training.train$Survived, k = folds, times = 1)
trControl_tuned <- trainControl(
method = "repeatedcv", number = 5, repeats = 1, search = "grid",
index = cv_folds,
# summaryFunction = twoClassSummary, # add for ROC
classProbs = TRUE, # Important for classification
verboseIter = TRUE
)
# train control for searching parameter
trControl_search <- trainControl(
method = "repeatedcv", number = folds, repeats = 2, search = "random",
index = cv_folds,
classProbs = TRUE, # Important for classification
verboseIter = TRUE
)
# TRAINING-TRAIN
# LASSO (FINAL)
myGrid_lasso <- expand.grid(
alpha = 1,
lambda = 0.012
)
set.seed(42)
lassoFit <- train(Survived ~ ., data = training.train,
method = "glmnet",
tuneGrid = myGrid_lasso,
# tuneLength = 10,
metric = "Accuracy",
trControl = trControl_tuned)
plot(lassoFit$finalModel, label = TRUE)
plot(varImp(lassoFit))
# training.train Accuracy 0.83027
train_pred <- predict(lassoFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8282
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.8078
# bag of lasso
predictorNames <- names(predictors)
length_divisor <- 1
predictions <- 0
predictions <- foreach(i=1:10,.combine=cbind) %dopar% {
set.seed(i)
sampleRows <- sample(nrow(training.train), size = floor((nrow(training.train)/length_divisor)), replace = TRUE)
fit <- train(Survived ~ ., data = training.train[sampleRows, ],
method = "glmnet",
tuneGrid = myGrid_lasso,
# tuneLength = 10,
metric = "Accuracy",
trControl = trControl_tuned)
predictions[i] <- data.frame(predict(fit, newdata = training.test, type = "prob")[1]) # pred > .5 died
}
auc <- roc(training.test$Survived, rowMeans(predictions), plot = TRUE)
print(auc)
bag_mean_pred <- rowMeans(predictions) # prob of dying
bag_mean_pred <- ifelse(bag_mean_pred < .5, "survived", "died")
confusionMatrix(training.test$Survived, bag_mean_pred)
# accuracy 0.8028
# Elastic Net
myGrid_glmnet <- expand.grid(
alpha = 0,
lambda = seq(0.001, 0.2, 0.001)
)
set.seed(42)
glmnetFit <- train(Survived ~ ., data = training.train,
method = "glmnet",
tuneGrid = myGrid_glmnet,
# tuneLength = 1000,
metric = "Accuracy",
trControl = trControl_search)
plot(glmnetFit$finalModel, label = TRUE)
plot(varImp(glmnetFit))
# training.train Accuracy 0.8241789
train_pred <- predict(glmnetFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8394
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.8183
# Support Vector Machines with Radial Basis Function Kernel
myGrid_svmRadial <- expand.grid(
sigma = 0.062,
C = 2.7
)
set.seed(42)
svmRadialFit <- train(Survived ~ ., data = training.train,
method = "svmRadial",
tuneGrid = myGrid_svmRadial,
# tuneLength = 100,
metric = "Accuracy",
trControl = trControl_tuned)
plot(svmRadialFit, label = TRUE)
plot(varImp(svmRadialFit))
# training.train Accuracy
train_pred <- predict(svmRadialFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8056
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.7853
# Decision tree with ctree
myGrid_rpart <- expand.grid(
cp = seq(0.001, 0.01, 0.001)
)
set.seed(42)
rpartFit <- train(Survived ~ ., data = training.train,
method = "rpart",
tuneGrid = myGrid_rpart,
# tuneLength = 100,
metric = "Accuracy",
trControl = trControl_tuned)
plot(rpartFit) # while tuning
library(rattle)
library(rpart.plot)
rpart.plot(rpartFit$finalModel)
# training.train Accuracy 0.8041191
train_pred <- predict(rpartFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8141
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.7946
# kernel k-nearest neighboors
myGrid_kknn <- expand.grid(
kmax = 11,
distance = 2,
kernel = "optimal")
set.seed(42)
kknnFit <- train(Survived ~ ., data = training.train,
method = "kknn",
tuneGrid = myGrid_kknn,
# tuneLength = 100,
metric = "Accuracy",
trControl = trControl_tuned)
plot(kknnFit$finalModel)
# training.train Accuracy
train_pred <- predict(kknnFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.7863
# random forest with ranger
myGrid_ranger <- expand.grid(
mtry = 8,
splitrule = "extratrees",
min.node.size = 8
)
set.seed(42)
rangerFit <- train(Survived ~ ., data = training.train,
method = "ranger",
tuneGrid = myGrid_ranger,
# tuneLength = 10,
metric = "Accuracy",
trControl = trControl_tuned)
plot(rangerFit$finalModel)
# training.train Accuracy 0.8059536
train_pred <- predict(rangerFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8394
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.8225
# random forest with rf
myGrid_rf <- expand.grid(
mtry = c(2:20)
)
set.seed(42)
rfFit <- train(Survived ~ ., data = training.train,
method = "rf",
tuneGrid = myGrid_rf,
# tuneLength = 10,
metric = "Accuracy",
trControl = trControl_tuned)
plot(rfFit$finalModel)
# training.train Accuracy 0.8059536
train_pred <- predict(rfFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8423
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.8178
# stocastic gradient boosting with gbm
myGrid_gbm <- expand.grid(
n.trees = 500,
interaction.depth = 7,
shrinkage = 0.01,
n.minobsinnode = 10
)
set.seed(42)
gbmFit <- train(Survived ~ ., data = training.train,
method = "gbm",
tuneGrid = myGrid_gbm,
# tuneLength = 100,
metric = "Accuracy",
trControl = trControl_tuned)
plot(gbmFit$finalModel)
# training.train Accuracy 0.7965758
train_pred <- predict(gbmFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8423
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.8192
# Xtreme Gradient Boosting with xgbLinear
myGrid_xgbLinear <- expand.grid(
nrounds = 100,
lambda = 0.1,
alpha = 1,
eta = 0.5
)
set.seed(42)
xgbLinearFit <- train(Survived ~ ., data = training.train,
method = "xgbLinear",
tuneGrid = myGrid_xgbLinear,
# tuneLength = 20,
metric = "Accuracy",
trControl = trControl_tuned)
plot(xgbLinearFit$finalModel)
# training.train Accuracy 0.8197
train_pred <- predict(xgbLinearFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
#
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.7954
# validation is not correlated to test set. How to split/validate this dataset?
# extreme regularized gradient boosting with xgbTree (xboost)
myGrid_xgbTree <- expand.grid(
nrounds = 600,
eta = 0.15,
gamma = 0.3,
colsample_bytree = 0.04,
min_child_weight = 3,
subsample = 0.9,
max_depth = 6
)
set.seed(42)
xgbTreeFit <- train(Survived ~ ., data = training.train,
method = "xgbTree",
tuneGrid = myGrid_xgbTree,
# tuneLength = 20,
metric = "Accuracy",
trControl = trControl_tuned)
plot(xgbTreeFit$finalModel)
# training.train Accuracy 0.8307943
train_pred <- predict(xgbTreeFit, newdata = training.test)
confusionMatrix(training.test$Survived, train_pred)
# 0.8254 | Tune 2 0.8423, overfitted on test
train_pred <- ifelse(train_pred == "survived", 1, 0)
auc <- roc(training.test$Survived, train_pred, plot = TRUE)
print(auc)
# training.test AUC 0.7999 | Tune 2 0.8192
# MODEL COMPARISONS
resamps <- resamples(list(XBOOST = xgbTreeFit,
GBM = gbmFit))
summary(resamps)
trellis.par.set(caretTheme())
dotplot(resamps, metric = "Accuracy")
# FINAL TRAINING
set.seed(42)
finalFit <- train(Survived ~ ., data = training,
method = "glmnet",
tuneGrid = myGrid_lasso,
# tuneLength = 100,
metric = "Accuracy",
trControl = trControl_tuned)
pred <- predict(finalFit, newdata = testing)
pred <- ifelse(pred == "survived", 1, 0)
# ranger 0.8290637
# xgbTree 0.8342908
# lasso 0.8327961 - best so far Kaggle
# ridge 0.8294124
# svmRadial 0.8175871
# gbm 0.831067
# rf 0.8303788
# xgbLinear 0.8260479
submit <- data.frame(PassengerId = PassengerId, Survived = pred)
# submission glmnet
write.csv(submit, file = "submission.glmnet.06.oneHotEncoding.41var.alpha1.lambda0012.csv", row.names = FALSE)
# submission xgbTree
write.csv(submit, file = "submission.03.oneHotEncoding.xgbLinear.nrounds100.lambda01.alpha1.eta05.csv", row.names = FALSE)
|
ibrary(tidyverse)
load("rdas/murders.rda")
murders %>% mutate(abb = reorder(abb, rate)) %>%
ggplot(aes(abb, rate)) +
geom_bar(width = 0.5, stat = "identity", color = "black") +
coord_flip()
ggsave("figs/barplot.png")
|
/analysis.R
|
no_license
|
Shaolin-Mei/murders
|
R
| false
| false
| 230
|
r
|
ibrary(tidyverse)
load("rdas/murders.rda")
murders %>% mutate(abb = reorder(abb, rate)) %>%
ggplot(aes(abb, rate)) +
geom_bar(width = 0.5, stat = "identity", color = "black") +
coord_flip()
ggsave("figs/barplot.png")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_prepare.R
\name{stat_segm}
\alias{stat_segm}
\title{Calculate statistics on a given segmentation}
\usage{
stat_segm(
data,
diag.var,
order.var = NULL,
param = NULL,
seg.type = NULL,
nseg
)
}
\arguments{
\item{data}{the data.frame with the different variable}
\item{diag.var}{names of the variables on which statistics are calculated}
\item{order.var}{names of the variable with which states are ordered}
\item{param}{parameters of output segmentation}
\item{seg.type}{either 'hybrid' or 'dynprog'}
\item{nseg}{number of segment chosen}
}
\value{
a list which first element is a data.frame with states of the
different segments and which second element is a data.frame with mean and
variance of the different states
}
\description{
\code{stat_segm} calculates statistics of a given segmentation : mean and
variance of the different states. it also creates standard objects for plot.
}
\examples{
\dontrun{
#res.segclust is a result of a segmentation-clustering algorithm
param <- res.segclust$param[["3 class"]]
nseg = 10
out <- stat_segm(data, diag.var = c("dist","angle"),
order.var = "dist", param = param, nseg=nseg, seg.type = "segclust")
}
}
|
/man/stat_segm.Rd
|
no_license
|
rpatin/segclust2d
|
R
| false
| true
| 1,304
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_prepare.R
\name{stat_segm}
\alias{stat_segm}
\title{Calculate statistics on a given segmentation}
\usage{
stat_segm(
data,
diag.var,
order.var = NULL,
param = NULL,
seg.type = NULL,
nseg
)
}
\arguments{
\item{data}{the data.frame with the different variable}
\item{diag.var}{names of the variables on which statistics are calculated}
\item{order.var}{names of the variable with which states are ordered}
\item{param}{parameters of output segmentation}
\item{seg.type}{either 'hybrid' or 'dynprog'}
\item{nseg}{number of segment chosen}
}
\value{
a list which first element is a data.frame with states of the
different segments and which second element is a data.frame with mean and
variance of the different states
}
\description{
\code{stat_segm} calculates statistics of a given segmentation : mean and
variance of the different states. it also creates standard objects for plot.
}
\examples{
\dontrun{
#res.segclust is a result of a segmentation-clustering algorithm
param <- res.segclust$param[["3 class"]]
nseg = 10
out <- stat_segm(data, diag.var = c("dist","angle"),
order.var = "dist", param = param, nseg=nseg, seg.type = "segclust")
}
}
|
shinyServer(function(input, output) {
value<-reactiveValues(click=FALSE)
Data <- reactive({
# input$file1 will be NULL initially. After the user selects and uploads a
# file, it will be a data frame with 'name', 'size', 'type', and 'datapath'
# columns. The 'datapath' column will contain the local filenames where the
# data can be found.
inFile <- input$file1
if (is.null(inFile))
return(NULL)
df.raw <- read.csv(inFile$datapath, header=TRUE, sep=',', quote='"')
#mydata<-read.csv(inFile$datapath, header=input$header, sep=input$sep, quote=input$quote)
# calculate term and pupil averages
number<-ncol(df.raw)
tot<-nrow(df.raw)
subj<-array(1:number,dim=c(number))
# zz<-array(1:number,dim=c(number))
zz<-0
if((colnames(df.raw[number])=="Class") | (colnames(df.raw[number-1])=="Class")| (colnames(df.raw[number-2])=="Class") |(colnames(df.raw[number])=="CLASS") | (colnames(df.raw[number-1])=="CLASS")| (colnames(df.raw[number-2])=="CLASS"))
{
number<-number-3
}
else
{
number<-number-2
}
for(i in 3:number)
{
subj[i]<-colnames(df.raw)[i]
}
name<-colnames(df.raw[3:number])
subavg<-array(1:number,dim=c(number-2))
hh<-array(1:15,dim=c(15))
for(i in 1:number-2)
{
subavg[i]<-colMeans(df.raw[3:number])[i]
}
colMax <- function(df.raw) sapply(df.raw, max, na.rm = TRUE)
abcde<-colMax(df.raw[3:number])[2]
for(i in 1:number-2)
{
hh[i]<-colMax(df.raw[3:number])[i]
}
# df.raw$Av <- round(rowMeans(df.raw[3:12]),1)
# reshape th data.frame for further analysis
df.melt <- melt(df.raw, id.vars=c("Roll","Name"))
colnames(df.melt) <- c("Roll","Name","Subject","Mark")
filename<-basename(inFile$name)
filename<-substr(filename,1,nchar(filename)-4)
# create a list of data for use in rendering
info <- list(df.raw=df.raw,df.melt=df.melt,name=name,subj=subj,number=number,subavg=subavg,tot=tot,zz=zz,hh=hh,filename=filename,abcde=abcde)
return(info)
})
# allows pageability and number of rows setting
myOptions <- reactive({
list(
#page=ifelse(input$pageable==TRUE,'enable','disable'),
pageSize=input$pagesize
)
} )
output$dropoptions<- renderUI({
selectInput("select",label=h3("Plot for"),
choices = Data()$name,
selected=Data()$subj[3]
)
})
observe({
if (input$save == 0) return()
value$click = TRUE
})
# observe({
# if (is.null(input$save))
# return()
# value$click = FALSE
# })
output$raw <- renderGvis({
if (is.null(input$file1)) { return() }
file<-Data()$filename
con<-dbConnect(MySQL(),user='root',password='',dbname='analysis',host='localhost')
if(dbExistsTable(con,"name"))
{
sql1<-dbSendQuery(con,"drop table name")
sql2<-dbSendQuery(con,"create table name(Name varchar(255))")
sql3<-paste("insert into name(Name) values('",file,"')",sep="")
sql4<-dbSendQuery(con,sql3)
}
if(dbExistsTable(con,"test"))
sql<-dbSendQuery(con,"drop table test")
dbWriteTable(con,"test",Data()$df.raw,append="TRUE")
if(value$click)
{
if(input$sheetname=="" | nchar(input$sheetname)!=10){
showModal(modalDialog(
title = "invalid name",
"Enter a valid name"
))
}
else
{
if(dbExistsTable(con,input$sheetname))
{
sql11<-as.character(input$sheetname)
sql11<-paste("drop table",sql11,sep=" ")
sql22<-dbSendQuery(con,sql11)
dbWriteTable(con,input$sheetname,Data()$df.raw,append="TRUE")
}
else
{
dbWriteTable(con,input$sheetname,Data()$df.raw,append="TRUE")
}
}
value$click=FALSE
}
dbDisconnect(con)
if(is.null(Data()$number)){return()}
else if(input$filter=='a'){
gvisTable(Data()$df.raw,options=myOptions())
}
else if(input$filter=='d'){
df.temp <- subset(Data()$df.raw,Percentage>=66)
gvisTable(df.temp,options=myOptions())
}
else if(input$filter=='f'){
df.temp <- subset(Data()$df.raw,Percentage>=60 & Percentage<66 )
gvisTable(df.temp,options=myOptions())
}
else if(input$filter=='h'){
df.temp <- subset(Data()$df.raw,Percentage>=55 & Percentage<60)
gvisTable(df.temp,options=myOptions())
}
else if(input$filter=='s'){
df.temp <- subset(Data()$df.raw,Percentage>=50 & Percentage<55)
gvisTable(df.temp,options=myOptions())
}
else if(input$filter=='p'){
df.temp <- subset(Data()$df.raw,Percentage>=40 & Percentage<50)
gvisTable(df.temp,options=myOptions())
}
else if(input$filter=='fail'){
df.temp <- subset(Data()$df.raw,Percentage<40)
gvisTable(df.temp,options=myOptions())
}
})
output$reportall<-renderPlot({
if (is.null(input$file1)) { return() }
else
{
df.graph2 <- subset(Data()$df.melt,Subject!="Total" & Subject!="Percentage" & Subject!= "Av" & Subject!= "Grand.Total" &Subject!= "Class")
df.graph3 <- subset(Data()$df.melt,Subject=="Total" | Subject=="Grand.Total")
df.graph2[4] <- sapply(df.graph2[4],as.numeric)
df.graph3[4] <- sapply(df.graph3[4],as.numeric)
if(input$rr=="g")
print(ggplot(df.graph3, aes(x=Mark, fill=Subject)) + geom_density(alpha=.5)+ scale_fill_manual( values = c("blue")))
else
print(ggplot(df.graph3, aes(x=Mark, fill=Subject)) + geom_histogram(alpha=.5)+ scale_fill_manual( values = c("blue")))
# for(i in 1:number-2)
# {
# subavg[i]<-colMeans(df.raw[3:number])[i]
#}
p = ggplot(data=df.graph3,
aes(x=factor(1),
y=Percentage,
fill = factor(response)
),
print(p=p + geom_bar(width = 1) )
)
}
})
output$textall <- renderText( {
if (is.null(input$file1)) { return() }
"Report For Semester"
})
output$density <- renderPlot({
if (is.null(input$file1)) { return() }
# df.graph <- subset(Data()$df.melt,Subject!="Total" & Subject!="Percentage" & Subject!= "Av")
else if(is.null(Data()$number) | is.null(input$select)){return()}
else
{
for(i in 3:Data()$number)
{
if(input$select==Data()$subj[i])
{
df.graph <- subset(Data()$df.melt,Subject==Data()$subj[i])
break
}
}
}
df.graph[4] <- sapply(df.graph[4],as.numeric)
str(df.graph)
if(input$ss=='g')
print(ggplot(df.graph, aes(x=Mark, fill=Subject)) + geom_density(alpha=.5) + scale_fill_manual( values = c("blue")))
else
print(ggplot(df.graph, aes(x=Mark, fill=Subject)) + geom_histogram(alpha=.5) + scale_fill_manual( values = c("blue")))
# print(hist(Data()$df.raw$t1Av))
})
output$performance <- renderPlot({
if (is.null(input$file1)) { return() }
if(input$ch=='r')
df.graph <- subset(Data()$df.melt,Roll==input$c & Subject!="Percentage" & Subject!="Av" & Subject!="Total" & Subject!="Grand.Total" & Subject!="Class")
else
df.graph <- subset(Data()$df.melt,Name==toupper(input$c) & Subject!="Percentage" & Subject!="Av" & Subject!="Total" & Subject!="Grand.Total" & Subject!="Class")
df.graph[4] <- sapply(df.graph[4],as.numeric)
print(ggplot(df.graph, aes(x=Subject,y=Mark)) +
scale_fill_gradient("Count", low = "cornflowerblue", high = "blue")+
geom_bar(aes(fill=Mark),stat="identity",width = 1))
})
output$report <- renderPrint({
if (is.null(input$file1)) { return() }
#df.gender<- subset(Data()$df.melt,Subject!="Av")
# aov.by.gender <- aov(Mark ~ Gender, data=df.gender)
#summary(aov.by.gender)
})
output$markdown <- renderUI({
HTML(markdown::markdownToHTML(knit('rep.Rmd', quiet = TRUE)))
#a("test", href="http://google.com", target="_blank")
})
output$performancetable<-renderGvis(
{
if (is.null(input$file1)) { return() }
else
{
if(input$ch=='r')
df.temp <- subset(Data()$df.raw,Roll==input$c)
else
df.temp <- subset(Data()$df.raw,Name==toupper(input$c))
gvisTable(df.temp,options=myOptions())
}
}
)
#output$myTable1 <- renderTable({
# data.frame(Sr.No. =(length.out=5), ExamNo.=(""), Student="" ,Marks="",Percentage="")
# }, include.rownames = FALSE)
# output$myTable2 <- renderTable({
# data.frame(No.ofStudents=(length.out=5), Pass=(""), failwithATKT= "" ,fail="",PassPercentage="")
# }, include.rownames = FALSE)
# output$myTable3 <- renderTable({
# data.frame(No.ofStudentsAppearedPercentage=(length.out=5), DISTINCTIONonwards="", FirstClass="" ,HigherSecondClass="",SecondClass="",PassClass="")
# }, include.rownames = FALSE)
# output$myTable4 <- renderTable({
# data.frame(Subject=(length.out=5), Appeared=(""), pass="" ,fail="",percentage="")
# }, include.rownames = FALSE)
output$caption1 <- renderText( {
if (is.null(input$file1)) { return() }
"Student Marks"
})
output$caption2 <- renderText( {
if (is.null(input$file1)) { return() }
# paste0("Average Mark DS : ", Data()$t1Av," CO : ", Data()$t2Av," DELD :", Data()$t3Av)
else if(is.null(Data()$number)|is.null(input$select)){return()}
else
{
for(i in 3:Data()$number)
{
if(input$select==Data()$subj[i])
{
store<-i
break
}
}
paste0("Average Marks : ", Data()$subavg[i-2])
}
})
# output$caption3 <- renderText( {
# if (is.null(input$file1)) { return() }
# paste0("Analysis of Variance by Gender - Boys Average Mark:",Data()$boys, " Girls Average Mark:",Data()$girls)
# })
output$notes2 <- renderUI( {
if (is.null(input$file1)) { return() }
HTML("The above graph shows the variation in pupils' marks by term. The annual spread
will normally be greater as the example data is random and normally some pupils will
tend to be better than others over each term")
paste0("The marks for subject ", input$select,":")
# paste0("tempmax is",Data()$abcde)
})
output$notes3 <- renderUI( {
if (is.null(input$file1)) { return() }
HTML("The Analysis of Variance indicates whether there is a statistically significant
difference between boys and girls in the class. With this 'fixed' data, there is a
significant difference at the 5% level")
})
output$caption4 <- renderText( {
if (is.null(input$file1)) { return() }
if(is.null(input$c)){return()}
else
{
df.rollno <- subset(Data()$df.melt,Roll==input$c & Subject!="Percentage" & Subject!="Av" & Subject!="Total" & Name!="Name" & Roll!= "Roll")
paste0(df.rollno[1,3][],":",df.rollno[1,4][]," ",df.rollno[2,3][],":",df.rollno[2,4][]," ",df.rollno[3,3][],":",df.rollno[3,4][]," ",df.rollno[4,3][],":",df.rollno[4,4][]," ",df.rollno[5,3][],":",df.rollno[5,4][]," ",df.rollno[6,3][],":",df.rollno[6,4][]," ",df.rollno[7,3][],":",df.rollno[7,4][]," ",df.rollno[8,3][],":",df.rollno[8,4][]," ",df.rollno[9,3][],":",df.rollno[9,4][]," ",df.rollno[10,3][],":",df.rollno[10,4][])
}
})
output$newtable<-renderGvis({
if (is.null(input$file1)) { return() }
else if(is.null(input$select)){return()}
else
if(input$filter2=='a')
{
df.sel<-subset(Data()$df.melt,Subject==input$select)
}
else if(input$filter2=='d')
{
for(i in 3:Data()$number){
if(input$select==Data()$subj[i])
{
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=66)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.66*50)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.66*25)
}
}
}
else if(input$filter2=='f')
{
for(i in 3:Data()$number)
if(input$select==Data()$subj[i])
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=60 & Mark<=66)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.60 & Mark<=50*0.66)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.60*25 & Mark<=25*0.66)
}
else if(input$filter2=='h')
{
for(i in 3:Data()$number)
if(input$select==Data()$subj[i])
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=55 & Mark<=60)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.55 & Mark<=50*0.60)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.55*25 & Mark<=25*0.60)
}
else if(input$filter2=='s')
{
for(i in 3:Data()$number)
if(input$select==Data()$subj[i])
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=50 & Mark<=55)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.50 & Mark<=50*0.55)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.50*25 & Mark<=25*0.55)
}
else if(input$filter2=='p')
{
for(i in 3:Data()$number)
if(input$select==Data()$subj[i])
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=40 & Mark<=50)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.40 & Mark<=50*0.50)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.40*25 & Mark<=25*0.50)
}
else if(input$filter2=='fail')
{
#if(Data()$subavg[input$select]>=40)
for(i in 3:Data()$number)
{
if(input$select==Data()$subj[i])
{
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark<40)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark<20)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark<10)
}
}
}
gvisTable(df.sel,options=list(width="920px",height="400px"))
})
output$chart<-renderPlot(
{
# if(input$filter2=='d')
for(i in 3:Data()$number)
if(input$select==Data()$subj[i])
if(Data()$hh[i-2]>50)
{
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=66)
df.sel2<-subset(Data()$df.melt,Subject==input$select & Mark>=60 & Mark<=66)
df.sel3<-subset(Data()$df.melt,Subject==input$select & Mark>=55 & Mark<=60)
df.sel4<-subset(Data()$df.melt,Subject==input$select & Mark>=50 & Mark<=55)
df.sel5<-subset(Data()$df.melt,Subject==input$select & Mark<40)
}
else if(Data()$hh[i-2]>25 & Data()$hh[i-2]<=50)
{
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.66*50)
df.sel2<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.60 & Mark<=50*0.66)
df.sel3<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.55 & Mark<=50*0.60)
df.sel4<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.50 & Mark<=50*0.55)
df.sel5<-subset(Data()$df.melt,Subject==input$select & Mark<50*0.40)
}
else if(Data()$hh[i-2]<=25)
{
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.66*25)
df.sel2<-subset(Data()$df.melt,Subject==input$select & Mark>=25*0.60 & Mark<=25*0.66)
df.sel3<-subset(Data()$df.melt,Subject==input$select & Mark>=25*0.55 & Mark<=25*0.60)
df.sel4<-subset(Data()$df.melt,Subject==input$select & Mark>=25*0.50 & Mark<=25*0.55)
df.sel5<-subset(Data()$df.melt,Subject==input$select & Mark<25*0.40)
}
counttemp<-1
# number<-ncol(df.raw)
for(i in 3:Data()$number)
{
if(input$select==Data()$subj[i])
tempdata<-df.sel[5,4]
# if(Data()$tempdata>40)
break
counttemp<-tempdata
}
cc<-nrow(df.sel)
cc2<-nrow(df.sel2)
cc3<-nrow(df.sel3)
cc4<-nrow(df.sel4)
cc5<-nrow(df.sel5)
# df.sel<-rbind(df.sel,newRow)
# variable<-c("Distinction", "First Class","Higher Second Class"," Second Class","Fail")
x<-c(cc/Data()$tot*100,cc2/Data()$tot*100,cc3/Data()$tot*100,cc4/Data()$tot*100,cc5/Data()$tot*100)
count<-c(paste("Distinction:",cc),paste("First Class:",cc2),paste("Higher Second Class:",cc3),paste("Second Class:",cc4),paste("Fail:",cc5))
cols<-c("brown2","cadetblue3","hotpink2","chartreuse3","peru")
pie(x, labels = count, main = "Distribution of Students According to Class",col=cols,init.angle =90,radius = 1)
#legend("topright",variable, cex = 1.2,
# fill = cols)
}
)
output$allchart<-renderPlot(
{
# for(i in 3:Data()$number)
#if(input$select==Data()$subj[i])
#if(Data()$subavg[i-2]>=40)
{
df.sel<-subset(Data()$df.melt,Subject=="Percentage" & Mark>=66)
df.sel2<-subset(Data()$df.melt,Subject=="Percentage" & Mark>=60 & Mark<=66)
df.sel3<-subset(Data()$df.melt,Subject=="Percentage" & Mark>=55 & Mark<=60)
df.sel4<-subset(Data()$df.melt,Subject=="Percentage" & Mark>=50 & Mark<=55)
df.sel5<-subset(Data()$df.melt,Subject=="Percentage" & Mark>=40 & Mark<=50)
df.sel6<-subset(Data()$df.melt,Subject=="Percentage" & Mark<40)
}
counttemp<-1
cc<-nrow(df.sel)
cc2<-nrow(df.sel2)
cc3<-nrow(df.sel3)
cc4<-nrow(df.sel4)
cc5<-nrow(df.sel5)
cc6<-nrow(df.sel6)
zz<-cc
# zz[1]<-nrow(df.sel)
#df11 <- data.frame(
# variable = c("Distinction", "First Class","Higher Second Class"," Second Class","Pass Class","Fail"),
# percent=c(cc/Data()$tot*100,cc2/Data()$tot*100,cc3/Data()$tot*100,cc4/Data()$tot*100,cc5/Data()$tot*100,cc6/Data()$tot*100)
# )
#variable<-c("Distinction", "First Class","Higher Second Class"," Second Class","Pass Class","Fail")
x<-c(cc/Data()$tot*100,cc2/Data()$tot*100,cc3/Data()$tot*100,cc4/Data()$tot*100,cc5/Data()$tot*100,cc6/Data()$tot*100)
count<-c(paste("Distinction:",cc),paste("First Class:",cc2),paste("Higher Second Class:",cc3),paste("Second Class:",cc4),paste("Pass Class:",cc5),paste("Fail:",cc6))
# print(ggplot(df11, aes(x = "tttt", y = percent, fill = variable,label=variable)) +
# geom_bar(width = 1, stat = "identity") +
#scale_fill_manual(values = c("red", "yellow")) +
# coord_polar("y", start = pi / 3) )
#labs(title = ""))
cols<-c("brown2","cadetblue3","hotpink2","chartreuse3","darkseagreen4","peru")
pie(x, labels = count, main = "Distribution of Students According to Class",col=cols,init.angle =90,radius = 1)
#legend("topright",variable, cex = 1.2,
# fill = cols)
# pie <- ggplot(df11, aes(x="",y=percent,fill=(variable),label=percent)) +
# geom_bar(width = 1,stat = "identity")+
# geom_text(aes( y =percent , label = percent), size = 6)
# pie + coord_polar("y",start=0)+labs(title = "Distribution of Students According to Class")
}
)
output$repend <- renderText( {
if (is.null(input$file1)) { return() }
#paste0("Distinction")
#,"First Class",cc2,"Higher Second Class",cc3," Second Class",cc4,"Pass Class",cc5,"Fail",cc6)
})
})
|
/admin/server.R
|
no_license
|
shubham414/Analytics-in-Academics
|
R
| false
| false
| 20,964
|
r
|
shinyServer(function(input, output) {
value<-reactiveValues(click=FALSE)
Data <- reactive({
# input$file1 will be NULL initially. After the user selects and uploads a
# file, it will be a data frame with 'name', 'size', 'type', and 'datapath'
# columns. The 'datapath' column will contain the local filenames where the
# data can be found.
inFile <- input$file1
if (is.null(inFile))
return(NULL)
df.raw <- read.csv(inFile$datapath, header=TRUE, sep=',', quote='"')
#mydata<-read.csv(inFile$datapath, header=input$header, sep=input$sep, quote=input$quote)
# calculate term and pupil averages
number<-ncol(df.raw)
tot<-nrow(df.raw)
subj<-array(1:number,dim=c(number))
# zz<-array(1:number,dim=c(number))
zz<-0
if((colnames(df.raw[number])=="Class") | (colnames(df.raw[number-1])=="Class")| (colnames(df.raw[number-2])=="Class") |(colnames(df.raw[number])=="CLASS") | (colnames(df.raw[number-1])=="CLASS")| (colnames(df.raw[number-2])=="CLASS"))
{
number<-number-3
}
else
{
number<-number-2
}
for(i in 3:number)
{
subj[i]<-colnames(df.raw)[i]
}
name<-colnames(df.raw[3:number])
subavg<-array(1:number,dim=c(number-2))
hh<-array(1:15,dim=c(15))
for(i in 1:number-2)
{
subavg[i]<-colMeans(df.raw[3:number])[i]
}
colMax <- function(df.raw) sapply(df.raw, max, na.rm = TRUE)
abcde<-colMax(df.raw[3:number])[2]
for(i in 1:number-2)
{
hh[i]<-colMax(df.raw[3:number])[i]
}
# df.raw$Av <- round(rowMeans(df.raw[3:12]),1)
# reshape th data.frame for further analysis
df.melt <- melt(df.raw, id.vars=c("Roll","Name"))
colnames(df.melt) <- c("Roll","Name","Subject","Mark")
filename<-basename(inFile$name)
filename<-substr(filename,1,nchar(filename)-4)
# create a list of data for use in rendering
info <- list(df.raw=df.raw,df.melt=df.melt,name=name,subj=subj,number=number,subavg=subavg,tot=tot,zz=zz,hh=hh,filename=filename,abcde=abcde)
return(info)
})
# allows pageability and number of rows setting
myOptions <- reactive({
list(
#page=ifelse(input$pageable==TRUE,'enable','disable'),
pageSize=input$pagesize
)
} )
output$dropoptions<- renderUI({
selectInput("select",label=h3("Plot for"),
choices = Data()$name,
selected=Data()$subj[3]
)
})
observe({
if (input$save == 0) return()
value$click = TRUE
})
# observe({
# if (is.null(input$save))
# return()
# value$click = FALSE
# })
output$raw <- renderGvis({
if (is.null(input$file1)) { return() }
file<-Data()$filename
con<-dbConnect(MySQL(),user='root',password='',dbname='analysis',host='localhost')
if(dbExistsTable(con,"name"))
{
sql1<-dbSendQuery(con,"drop table name")
sql2<-dbSendQuery(con,"create table name(Name varchar(255))")
sql3<-paste("insert into name(Name) values('",file,"')",sep="")
sql4<-dbSendQuery(con,sql3)
}
if(dbExistsTable(con,"test"))
sql<-dbSendQuery(con,"drop table test")
dbWriteTable(con,"test",Data()$df.raw,append="TRUE")
if(value$click)
{
if(input$sheetname=="" | nchar(input$sheetname)!=10){
showModal(modalDialog(
title = "invalid name",
"Enter a valid name"
))
}
else
{
if(dbExistsTable(con,input$sheetname))
{
sql11<-as.character(input$sheetname)
sql11<-paste("drop table",sql11,sep=" ")
sql22<-dbSendQuery(con,sql11)
dbWriteTable(con,input$sheetname,Data()$df.raw,append="TRUE")
}
else
{
dbWriteTable(con,input$sheetname,Data()$df.raw,append="TRUE")
}
}
value$click=FALSE
}
dbDisconnect(con)
if(is.null(Data()$number)){return()}
else if(input$filter=='a'){
gvisTable(Data()$df.raw,options=myOptions())
}
else if(input$filter=='d'){
df.temp <- subset(Data()$df.raw,Percentage>=66)
gvisTable(df.temp,options=myOptions())
}
else if(input$filter=='f'){
df.temp <- subset(Data()$df.raw,Percentage>=60 & Percentage<66 )
gvisTable(df.temp,options=myOptions())
}
else if(input$filter=='h'){
df.temp <- subset(Data()$df.raw,Percentage>=55 & Percentage<60)
gvisTable(df.temp,options=myOptions())
}
else if(input$filter=='s'){
df.temp <- subset(Data()$df.raw,Percentage>=50 & Percentage<55)
gvisTable(df.temp,options=myOptions())
}
else if(input$filter=='p'){
df.temp <- subset(Data()$df.raw,Percentage>=40 & Percentage<50)
gvisTable(df.temp,options=myOptions())
}
else if(input$filter=='fail'){
df.temp <- subset(Data()$df.raw,Percentage<40)
gvisTable(df.temp,options=myOptions())
}
})
output$reportall<-renderPlot({
if (is.null(input$file1)) { return() }
else
{
df.graph2 <- subset(Data()$df.melt,Subject!="Total" & Subject!="Percentage" & Subject!= "Av" & Subject!= "Grand.Total" &Subject!= "Class")
df.graph3 <- subset(Data()$df.melt,Subject=="Total" | Subject=="Grand.Total")
df.graph2[4] <- sapply(df.graph2[4],as.numeric)
df.graph3[4] <- sapply(df.graph3[4],as.numeric)
if(input$rr=="g")
print(ggplot(df.graph3, aes(x=Mark, fill=Subject)) + geom_density(alpha=.5)+ scale_fill_manual( values = c("blue")))
else
print(ggplot(df.graph3, aes(x=Mark, fill=Subject)) + geom_histogram(alpha=.5)+ scale_fill_manual( values = c("blue")))
# for(i in 1:number-2)
# {
# subavg[i]<-colMeans(df.raw[3:number])[i]
#}
p = ggplot(data=df.graph3,
aes(x=factor(1),
y=Percentage,
fill = factor(response)
),
print(p=p + geom_bar(width = 1) )
)
}
})
output$textall <- renderText( {
if (is.null(input$file1)) { return() }
"Report For Semester"
})
output$density <- renderPlot({
if (is.null(input$file1)) { return() }
# df.graph <- subset(Data()$df.melt,Subject!="Total" & Subject!="Percentage" & Subject!= "Av")
else if(is.null(Data()$number) | is.null(input$select)){return()}
else
{
for(i in 3:Data()$number)
{
if(input$select==Data()$subj[i])
{
df.graph <- subset(Data()$df.melt,Subject==Data()$subj[i])
break
}
}
}
df.graph[4] <- sapply(df.graph[4],as.numeric)
str(df.graph)
if(input$ss=='g')
print(ggplot(df.graph, aes(x=Mark, fill=Subject)) + geom_density(alpha=.5) + scale_fill_manual( values = c("blue")))
else
print(ggplot(df.graph, aes(x=Mark, fill=Subject)) + geom_histogram(alpha=.5) + scale_fill_manual( values = c("blue")))
# print(hist(Data()$df.raw$t1Av))
})
output$performance <- renderPlot({
if (is.null(input$file1)) { return() }
if(input$ch=='r')
df.graph <- subset(Data()$df.melt,Roll==input$c & Subject!="Percentage" & Subject!="Av" & Subject!="Total" & Subject!="Grand.Total" & Subject!="Class")
else
df.graph <- subset(Data()$df.melt,Name==toupper(input$c) & Subject!="Percentage" & Subject!="Av" & Subject!="Total" & Subject!="Grand.Total" & Subject!="Class")
df.graph[4] <- sapply(df.graph[4],as.numeric)
print(ggplot(df.graph, aes(x=Subject,y=Mark)) +
scale_fill_gradient("Count", low = "cornflowerblue", high = "blue")+
geom_bar(aes(fill=Mark),stat="identity",width = 1))
})
output$report <- renderPrint({
if (is.null(input$file1)) { return() }
#df.gender<- subset(Data()$df.melt,Subject!="Av")
# aov.by.gender <- aov(Mark ~ Gender, data=df.gender)
#summary(aov.by.gender)
})
output$markdown <- renderUI({
HTML(markdown::markdownToHTML(knit('rep.Rmd', quiet = TRUE)))
#a("test", href="http://google.com", target="_blank")
})
output$performancetable<-renderGvis(
{
if (is.null(input$file1)) { return() }
else
{
if(input$ch=='r')
df.temp <- subset(Data()$df.raw,Roll==input$c)
else
df.temp <- subset(Data()$df.raw,Name==toupper(input$c))
gvisTable(df.temp,options=myOptions())
}
}
)
#output$myTable1 <- renderTable({
# data.frame(Sr.No. =(length.out=5), ExamNo.=(""), Student="" ,Marks="",Percentage="")
# }, include.rownames = FALSE)
# output$myTable2 <- renderTable({
# data.frame(No.ofStudents=(length.out=5), Pass=(""), failwithATKT= "" ,fail="",PassPercentage="")
# }, include.rownames = FALSE)
# output$myTable3 <- renderTable({
# data.frame(No.ofStudentsAppearedPercentage=(length.out=5), DISTINCTIONonwards="", FirstClass="" ,HigherSecondClass="",SecondClass="",PassClass="")
# }, include.rownames = FALSE)
# output$myTable4 <- renderTable({
# data.frame(Subject=(length.out=5), Appeared=(""), pass="" ,fail="",percentage="")
# }, include.rownames = FALSE)
output$caption1 <- renderText( {
if (is.null(input$file1)) { return() }
"Student Marks"
})
output$caption2 <- renderText( {
if (is.null(input$file1)) { return() }
# paste0("Average Mark DS : ", Data()$t1Av," CO : ", Data()$t2Av," DELD :", Data()$t3Av)
else if(is.null(Data()$number)|is.null(input$select)){return()}
else
{
for(i in 3:Data()$number)
{
if(input$select==Data()$subj[i])
{
store<-i
break
}
}
paste0("Average Marks : ", Data()$subavg[i-2])
}
})
# output$caption3 <- renderText( {
# if (is.null(input$file1)) { return() }
# paste0("Analysis of Variance by Gender - Boys Average Mark:",Data()$boys, " Girls Average Mark:",Data()$girls)
# })
output$notes2 <- renderUI( {
if (is.null(input$file1)) { return() }
HTML("The above graph shows the variation in pupils' marks by term. The annual spread
will normally be greater as the example data is random and normally some pupils will
tend to be better than others over each term")
paste0("The marks for subject ", input$select,":")
# paste0("tempmax is",Data()$abcde)
})
output$notes3 <- renderUI( {
if (is.null(input$file1)) { return() }
HTML("The Analysis of Variance indicates whether there is a statistically significant
difference between boys and girls in the class. With this 'fixed' data, there is a
significant difference at the 5% level")
})
output$caption4 <- renderText( {
if (is.null(input$file1)) { return() }
if(is.null(input$c)){return()}
else
{
df.rollno <- subset(Data()$df.melt,Roll==input$c & Subject!="Percentage" & Subject!="Av" & Subject!="Total" & Name!="Name" & Roll!= "Roll")
paste0(df.rollno[1,3][],":",df.rollno[1,4][]," ",df.rollno[2,3][],":",df.rollno[2,4][]," ",df.rollno[3,3][],":",df.rollno[3,4][]," ",df.rollno[4,3][],":",df.rollno[4,4][]," ",df.rollno[5,3][],":",df.rollno[5,4][]," ",df.rollno[6,3][],":",df.rollno[6,4][]," ",df.rollno[7,3][],":",df.rollno[7,4][]," ",df.rollno[8,3][],":",df.rollno[8,4][]," ",df.rollno[9,3][],":",df.rollno[9,4][]," ",df.rollno[10,3][],":",df.rollno[10,4][])
}
})
output$newtable<-renderGvis({
if (is.null(input$file1)) { return() }
else if(is.null(input$select)){return()}
else
if(input$filter2=='a')
{
df.sel<-subset(Data()$df.melt,Subject==input$select)
}
else if(input$filter2=='d')
{
for(i in 3:Data()$number){
if(input$select==Data()$subj[i])
{
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=66)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.66*50)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.66*25)
}
}
}
else if(input$filter2=='f')
{
for(i in 3:Data()$number)
if(input$select==Data()$subj[i])
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=60 & Mark<=66)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.60 & Mark<=50*0.66)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.60*25 & Mark<=25*0.66)
}
else if(input$filter2=='h')
{
for(i in 3:Data()$number)
if(input$select==Data()$subj[i])
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=55 & Mark<=60)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.55 & Mark<=50*0.60)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.55*25 & Mark<=25*0.60)
}
else if(input$filter2=='s')
{
for(i in 3:Data()$number)
if(input$select==Data()$subj[i])
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=50 & Mark<=55)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.50 & Mark<=50*0.55)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.50*25 & Mark<=25*0.55)
}
else if(input$filter2=='p')
{
for(i in 3:Data()$number)
if(input$select==Data()$subj[i])
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=40 & Mark<=50)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.40 & Mark<=50*0.50)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.40*25 & Mark<=25*0.50)
}
else if(input$filter2=='fail')
{
#if(Data()$subavg[input$select]>=40)
for(i in 3:Data()$number)
{
if(input$select==Data()$subj[i])
{
if(Data()$hh[i-2]>50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark<40)
else if(Data()$hh[i-2]>=25 & Data()$hh[i-2]<50)
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark<20)
else
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark<10)
}
}
}
gvisTable(df.sel,options=list(width="920px",height="400px"))
})
output$chart<-renderPlot(
{
# if(input$filter2=='d')
for(i in 3:Data()$number)
if(input$select==Data()$subj[i])
if(Data()$hh[i-2]>50)
{
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=66)
df.sel2<-subset(Data()$df.melt,Subject==input$select & Mark>=60 & Mark<=66)
df.sel3<-subset(Data()$df.melt,Subject==input$select & Mark>=55 & Mark<=60)
df.sel4<-subset(Data()$df.melt,Subject==input$select & Mark>=50 & Mark<=55)
df.sel5<-subset(Data()$df.melt,Subject==input$select & Mark<40)
}
else if(Data()$hh[i-2]>25 & Data()$hh[i-2]<=50)
{
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.66*50)
df.sel2<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.60 & Mark<=50*0.66)
df.sel3<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.55 & Mark<=50*0.60)
df.sel4<-subset(Data()$df.melt,Subject==input$select & Mark>=50*0.50 & Mark<=50*0.55)
df.sel5<-subset(Data()$df.melt,Subject==input$select & Mark<50*0.40)
}
else if(Data()$hh[i-2]<=25)
{
df.sel<-subset(Data()$df.melt,Subject==input$select & Mark>=0.66*25)
df.sel2<-subset(Data()$df.melt,Subject==input$select & Mark>=25*0.60 & Mark<=25*0.66)
df.sel3<-subset(Data()$df.melt,Subject==input$select & Mark>=25*0.55 & Mark<=25*0.60)
df.sel4<-subset(Data()$df.melt,Subject==input$select & Mark>=25*0.50 & Mark<=25*0.55)
df.sel5<-subset(Data()$df.melt,Subject==input$select & Mark<25*0.40)
}
counttemp<-1
# number<-ncol(df.raw)
for(i in 3:Data()$number)
{
if(input$select==Data()$subj[i])
tempdata<-df.sel[5,4]
# if(Data()$tempdata>40)
break
counttemp<-tempdata
}
cc<-nrow(df.sel)
cc2<-nrow(df.sel2)
cc3<-nrow(df.sel3)
cc4<-nrow(df.sel4)
cc5<-nrow(df.sel5)
# df.sel<-rbind(df.sel,newRow)
# variable<-c("Distinction", "First Class","Higher Second Class"," Second Class","Fail")
x<-c(cc/Data()$tot*100,cc2/Data()$tot*100,cc3/Data()$tot*100,cc4/Data()$tot*100,cc5/Data()$tot*100)
count<-c(paste("Distinction:",cc),paste("First Class:",cc2),paste("Higher Second Class:",cc3),paste("Second Class:",cc4),paste("Fail:",cc5))
cols<-c("brown2","cadetblue3","hotpink2","chartreuse3","peru")
pie(x, labels = count, main = "Distribution of Students According to Class",col=cols,init.angle =90,radius = 1)
#legend("topright",variable, cex = 1.2,
# fill = cols)
}
)
output$allchart<-renderPlot(
{
# for(i in 3:Data()$number)
#if(input$select==Data()$subj[i])
#if(Data()$subavg[i-2]>=40)
{
df.sel<-subset(Data()$df.melt,Subject=="Percentage" & Mark>=66)
df.sel2<-subset(Data()$df.melt,Subject=="Percentage" & Mark>=60 & Mark<=66)
df.sel3<-subset(Data()$df.melt,Subject=="Percentage" & Mark>=55 & Mark<=60)
df.sel4<-subset(Data()$df.melt,Subject=="Percentage" & Mark>=50 & Mark<=55)
df.sel5<-subset(Data()$df.melt,Subject=="Percentage" & Mark>=40 & Mark<=50)
df.sel6<-subset(Data()$df.melt,Subject=="Percentage" & Mark<40)
}
counttemp<-1
cc<-nrow(df.sel)
cc2<-nrow(df.sel2)
cc3<-nrow(df.sel3)
cc4<-nrow(df.sel4)
cc5<-nrow(df.sel5)
cc6<-nrow(df.sel6)
zz<-cc
# zz[1]<-nrow(df.sel)
#df11 <- data.frame(
# variable = c("Distinction", "First Class","Higher Second Class"," Second Class","Pass Class","Fail"),
# percent=c(cc/Data()$tot*100,cc2/Data()$tot*100,cc3/Data()$tot*100,cc4/Data()$tot*100,cc5/Data()$tot*100,cc6/Data()$tot*100)
# )
#variable<-c("Distinction", "First Class","Higher Second Class"," Second Class","Pass Class","Fail")
x<-c(cc/Data()$tot*100,cc2/Data()$tot*100,cc3/Data()$tot*100,cc4/Data()$tot*100,cc5/Data()$tot*100,cc6/Data()$tot*100)
count<-c(paste("Distinction:",cc),paste("First Class:",cc2),paste("Higher Second Class:",cc3),paste("Second Class:",cc4),paste("Pass Class:",cc5),paste("Fail:",cc6))
# print(ggplot(df11, aes(x = "tttt", y = percent, fill = variable,label=variable)) +
# geom_bar(width = 1, stat = "identity") +
#scale_fill_manual(values = c("red", "yellow")) +
# coord_polar("y", start = pi / 3) )
#labs(title = ""))
cols<-c("brown2","cadetblue3","hotpink2","chartreuse3","darkseagreen4","peru")
pie(x, labels = count, main = "Distribution of Students According to Class",col=cols,init.angle =90,radius = 1)
#legend("topright",variable, cex = 1.2,
# fill = cols)
# pie <- ggplot(df11, aes(x="",y=percent,fill=(variable),label=percent)) +
# geom_bar(width = 1,stat = "identity")+
# geom_text(aes( y =percent , label = percent), size = 6)
# pie + coord_polar("y",start=0)+labs(title = "Distribution of Students According to Class")
}
)
output$repend <- renderText( {
if (is.null(input$file1)) { return() }
#paste0("Distinction")
#,"First Class",cc2,"Higher Second Class",cc3," Second Class",cc4,"Pass Class",cc5,"Fail",cc6)
})
})
|
library(MESS)
### Name: clotting
### Title: Blood clotting for 158 rats
### Aliases: clotting
### Keywords: datasets
### ** Examples
data(clotting)
dim(clotting)
head(clotting)
day0= transform(clotting, day=0, pca=PCA0)
day4= transform(clotting, day=4, pca=PCA4)
day.both= rbind(day0,day4)
m1= lm(pca ~ rat + day*locality + day*sex, data=day.both)
anova(m1)
summary(m1)
m2= lm(pca ~ rat + day, data=day.both)
anova(m2)
## Log transformation suggested.
## Random effect of rat.
## maybe str(clotting) ; plot(clotting) ...
|
/data/genthat_extracted_code/MESS/examples/clotting.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 541
|
r
|
library(MESS)
### Name: clotting
### Title: Blood clotting for 158 rats
### Aliases: clotting
### Keywords: datasets
### ** Examples
data(clotting)
dim(clotting)
head(clotting)
day0= transform(clotting, day=0, pca=PCA0)
day4= transform(clotting, day=4, pca=PCA4)
day.both= rbind(day0,day4)
m1= lm(pca ~ rat + day*locality + day*sex, data=day.both)
anova(m1)
summary(m1)
m2= lm(pca ~ rat + day, data=day.both)
anova(m2)
## Log transformation suggested.
## Random effect of rat.
## maybe str(clotting) ; plot(clotting) ...
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/facet_gridh_paginate.R
\docType{data}
\name{facet_gridh_paginate}
\alias{facet_gridh_paginate}
\alias{FacetGridhPaginate}
\title{Split facet_grid from boxploth over multiple plots}
\usage{
facet_gridh_paginate(
facets,
margins = FALSE,
scales = "fixed",
space = "fixed",
shrink = TRUE,
labeller = "label_value",
as.table = TRUE,
switch = NULL,
drop = TRUE,
ncol = NULL,
nrow = NULL,
page = 1,
byrow = TRUE,
x.variable = "variable"
)
}
\arguments{
\item{facets}{This argument is soft-deprecated, please use \code{rows}
and \code{cols} instead.}
\item{margins}{Either a logical value or a character
vector. Margins are additional facets which contain all the data
for each of the possible values of the faceting variables. If
\code{FALSE}, no additional facets are included (the
default). If \code{TRUE}, margins are included for all faceting
variables. If specified as a character vector, it is the names of
variables for which margins are to be created.}
\item{scales}{Are scales shared across all facets (the default,
\code{"fixed"}), or do they vary across rows (\code{"free_x"}),
columns (\code{"free_y"}), or both rows and columns (\code{"free"})?}
\item{space}{If \code{"fixed"}, the default, all panels have the same size.
If \code{"free_y"} their height will be proportional to the length of the
y scale; if \code{"free_x"} their width will be proportional to the
length of the x scale; or if \code{"free"} both height and width will
vary. This setting has no effect unless the appropriate scales also vary.}
\item{shrink}{If \code{TRUE}, will shrink scales to fit output of
statistics, not raw data. If \code{FALSE}, will be range of raw data
before statistical summary.}
\item{labeller}{A function that takes one data frame of labels and
returns a list or data frame of character vectors. Each input
column corresponds to one factor. Thus there will be more than
one with \code{vars(cyl, am)}. Each output
column gets displayed as one separate line in the strip
label. This function should inherit from the "labeller" S3 class
for compatibility with \code{\link[ggplot2:labeller]{labeller()}}. You can use different labeling
functions for different kind of labels, for example use \code{\link[ggplot2:labellers]{label_parsed()}} for
formatting facet labels. \code{\link[ggplot2:labellers]{label_value()}} is used by default,
check it for more details and pointers to other options.}
\item{as.table}{If \code{TRUE}, the default, the facets are laid out like
a table with highest values at the bottom-right. If \code{FALSE}, the
facets are laid out like a plot with the highest value at the top-right.}
\item{switch}{By default, the labels are displayed on the top and
right of the plot. If \code{"x"}, the top labels will be
displayed to the bottom. If \code{"y"}, the right-hand side
labels will be displayed to the left. Can also be set to
\code{"both"}.}
\item{drop}{If \code{TRUE}, the default, all factor levels not used in the
data will automatically be dropped. If \code{FALSE}, all factor levels
will be shown, regardless of whether or not they appear in the data.}
\item{ncol}{Number of columns per page}
\item{nrow}{Number of rows per page}
\item{page}{The page to draw}
\item{byrow}{Should the pages be created row-wise or column wise}
\item{x.variable}{Specify the x variable to caculate how many boxes for each panel}
}
\description{
This extension to [ggplot2::facet_grid()] will allow you to split
a facetted plot over multiple pages. You define a number of rows and columns
per page as well as the page number to plot, and the function will
automatically only plot the correct panels. Usually this will be put in a
loop to render all pages one by one.
}
\note{
If either `ncol` or `nrow` is `NULL` this function will
fall back to the standard `facet_grid` functionality.
}
\examples{
# Draw a small section of the grid
ggplot(diamonds) +
geom_point(aes(carat, price), alpha = 0.1) +
facet_gridh_paginate(color~cut:clarity, ncol = 1, nrow = 3, page = 4, space="free_y", scales="free_y", x.variable="variable")
}
\concept{ggfacet}
\keyword{datasets}
|
/man/facet_gridh_paginate.Rd
|
no_license
|
crotoc/ggfaceth
|
R
| false
| true
| 4,197
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/facet_gridh_paginate.R
\docType{data}
\name{facet_gridh_paginate}
\alias{facet_gridh_paginate}
\alias{FacetGridhPaginate}
\title{Split facet_grid from boxploth over multiple plots}
\usage{
facet_gridh_paginate(
facets,
margins = FALSE,
scales = "fixed",
space = "fixed",
shrink = TRUE,
labeller = "label_value",
as.table = TRUE,
switch = NULL,
drop = TRUE,
ncol = NULL,
nrow = NULL,
page = 1,
byrow = TRUE,
x.variable = "variable"
)
}
\arguments{
\item{facets}{This argument is soft-deprecated, please use \code{rows}
and \code{cols} instead.}
\item{margins}{Either a logical value or a character
vector. Margins are additional facets which contain all the data
for each of the possible values of the faceting variables. If
\code{FALSE}, no additional facets are included (the
default). If \code{TRUE}, margins are included for all faceting
variables. If specified as a character vector, it is the names of
variables for which margins are to be created.}
\item{scales}{Are scales shared across all facets (the default,
\code{"fixed"}), or do they vary across rows (\code{"free_x"}),
columns (\code{"free_y"}), or both rows and columns (\code{"free"})?}
\item{space}{If \code{"fixed"}, the default, all panels have the same size.
If \code{"free_y"} their height will be proportional to the length of the
y scale; if \code{"free_x"} their width will be proportional to the
length of the x scale; or if \code{"free"} both height and width will
vary. This setting has no effect unless the appropriate scales also vary.}
\item{shrink}{If \code{TRUE}, will shrink scales to fit output of
statistics, not raw data. If \code{FALSE}, will be range of raw data
before statistical summary.}
\item{labeller}{A function that takes one data frame of labels and
returns a list or data frame of character vectors. Each input
column corresponds to one factor. Thus there will be more than
one with \code{vars(cyl, am)}. Each output
column gets displayed as one separate line in the strip
label. This function should inherit from the "labeller" S3 class
for compatibility with \code{\link[ggplot2:labeller]{labeller()}}. You can use different labeling
functions for different kind of labels, for example use \code{\link[ggplot2:labellers]{label_parsed()}} for
formatting facet labels. \code{\link[ggplot2:labellers]{label_value()}} is used by default,
check it for more details and pointers to other options.}
\item{as.table}{If \code{TRUE}, the default, the facets are laid out like
a table with highest values at the bottom-right. If \code{FALSE}, the
facets are laid out like a plot with the highest value at the top-right.}
\item{switch}{By default, the labels are displayed on the top and
right of the plot. If \code{"x"}, the top labels will be
displayed to the bottom. If \code{"y"}, the right-hand side
labels will be displayed to the left. Can also be set to
\code{"both"}.}
\item{drop}{If \code{TRUE}, the default, all factor levels not used in the
data will automatically be dropped. If \code{FALSE}, all factor levels
will be shown, regardless of whether or not they appear in the data.}
\item{ncol}{Number of columns per page}
\item{nrow}{Number of rows per page}
\item{page}{The page to draw}
\item{byrow}{Should the pages be created row-wise or column wise}
\item{x.variable}{Specify the x variable to caculate how many boxes for each panel}
}
\description{
This extension to [ggplot2::facet_grid()] will allow you to split
a facetted plot over multiple pages. You define a number of rows and columns
per page as well as the page number to plot, and the function will
automatically only plot the correct panels. Usually this will be put in a
loop to render all pages one by one.
}
\note{
If either `ncol` or `nrow` is `NULL` this function will
fall back to the standard `facet_grid` functionality.
}
\examples{
# Draw a small section of the grid
ggplot(diamonds) +
geom_point(aes(carat, price), alpha = 0.1) +
facet_gridh_paginate(color~cut:clarity, ncol = 1, nrow = 3, page = 4, space="free_y", scales="free_y", x.variable="variable")
}
\concept{ggfacet}
\keyword{datasets}
|
\name{california.blkgrp}
\Rdversion{1.1}
\alias{california.blkgrp}
\docType{data}
\title{
california.blkgrp
}
\description{
california.blkgrp is a \code{\link[sp:SpatialPolygonsDataFrame]{SpatialPolygonsDataFrame}} with polygons made from the 2000 US Census tiger/line boundary files (\url{http://www.census.gov/geo/www/tiger/}) for Census Block Groups. It also contains 86 variables from the Summary File 1 (SF 1) which contains the 100-percent data (\url{http://www.census.gov/prod/cen2000/doc/sf1.pdf}).
All polygons are projected in CRS("+proj=longlat +datum=NAD83")
}
\usage{data(california.blkgrp)}
%\format{
%}
\details{
\bold{ID Variables} \cr
\tabular{ll}{
data field name \tab Full Description \cr
state \tab State FIPS code \cr
county \tab County FIPS code \cr
tract \tab Tract FIPS code \cr
blkgrp \tab Blockgroup FIPS code \cr
}
\bold{Census Variables} \cr
\tabular{lll}{
Census SF1 Field Name \tab data field name \tab Full Description \cr
(P007001) \tab pop2000 \tab population 2000 \cr
(P007002) \tab white \tab white alone \cr
(P007003) \tab black \tab black or african american alone \cr
(P007004) \tab ameri.es \tab american indian and alaska native alone \cr
(P007005) \tab asian \tab asian alone \cr
(P007006) \tab hawn.pi \tab native hawaiian and other pacific islander alone \cr
(P007007) \tab other \tab some other race alone \cr
(P007008) \tab mult.race \tab 2 or more races \cr
(P011001) \tab hispanic \tab people who are hispanic or latino \cr
(P008002) \tab not.hispanic.t \tab Not Hispanic or Latino \cr
(P008003) \tab nh.white \tab White alone \cr
(P008004) \tab nh.black \tab Black or African American alone \cr
(P008005) \tab nh.ameri.es \tab American Indian and Alaska Native alone \cr
(P008006) \tab nh.asian \tab Asian alone \cr
(P008007) \tab nh.hawn.pi \tab Native Hawaiian and Other Pacific Islander alone \cr
(P008008) \tab nh.other \tab Some other race alone \cr
(P008010) \tab hispanic.t \tab Hispanic or Latino \cr
(P008011) \tab h.white \tab White alone \cr
(P008012) \tab h.black \tab Black or African American alone \cr
(P008013) \tab h.american.es \tab American Indian and Alaska Native alone \cr
(P008014) \tab h.asian \tab Asian alone \cr
(P008015) \tab h.hawn.pi \tab Native Hawaiian and Other Pacific Islander alone \cr
(P008016) \tab h.other \tab Some other race alone \cr
(P012002) \tab males \tab males \cr
(P012026) \tab females \tab females \cr
(P012003 + P012027) \tab age.under5 \tab male and female under 5 yrs \cr
(P012004-006 + P012028-030) \tab age.5.17 \tab male and female 5 to 17 yrs \cr
(P012007-009 + P012031-033) \tab age.18.21 \tab male and female 18 to 21 yrs \cr
(P012010-011 + P012034-035) \tab age.22.29 \tab male and female 22 to 29 yrs \cr
(P012012-013 + P012036-037) \tab age.30.39 \tab male and female 30 to 39 yrs \cr
(P012014-015 + P012038-039) \tab age.40.49 \tab male and female 40 to 49 yrs \cr
(P012016-019 + P012040-043) \tab age.50.64 \tab male and female 50 to 64 yrs \cr
(P012020-025 + P012044-049) \tab age.65.up \tab male and female 65 yrs and over \cr
(P013001) \tab med.age \tab median age, both sexes \cr
(P013002) \tab med.age.m \tab median age, males \cr
(P013003) \tab med.age.f \tab median age, females \cr
(P015001) \tab households \tab households \cr
(P017001) \tab ave.hh.sz \tab average household size \cr
(P018003) \tab hsehld.1.m \tab 1-person household, male householder \cr
(P018004) \tab hsehld.1.f \tab 1-person household, female householder \cr
(P018008) \tab marhh.chd \tab family households, married-couple family, w/ own children under 18 yrs \cr
(P018009) \tab marhh.no.c \tab family households, married-couple family, no own children under 18 yrs \cr
(P018012) \tab mhh.child \tab family households, other family, male householder, no wife present, w/ own children under 18 yrs \cr
(P018015) \tab fhh.child \tab family households, other family, female householder, no husband present, w/ own children under 18 yrs \cr
(H001001) \tab hh.units \tab housng units total \cr
(H002002) \tab hh.urban \tab urban housing units \cr
(H002005) \tab hh.rural \tab rural housing units \cr
(H003002) \tab hh.occupied \tab occupied housing units \cr
(H003003) \tab hh.vacant \tab vacant housing units \cr
(H004002) \tab hh.owner \tab owner occupied housing units \cr
(H004003) \tab hh.renter \tab renter occupied housing units \cr
(H013002) \tab hh.1person \tab 1-person household \cr
(H013003) \tab hh.2person \tab 2-person household \cr
(H013004) \tab hh.3person \tab 3-person household \cr
(H013005) \tab hh.4person \tab 4-person household \cr
(H013006) \tab hh.5person \tab 5-person household \cr
(H013007) \tab hh.6person \tab 6-person household \cr
(H013008) \tab hh.7person \tab 7-person household \cr
(H015I003)+(H015I011) \tab hh.nh.white.1p \tab (white only, not hispanic ) 1-person household \cr
(H015I004)+(H015I012) \tab hh.nh.white.2p \tab (white only, not hispanic ) 2-person household \cr
(H015I005)+(H015I013) \tab hh.nh.white.3p \tab (white only, not hispanic ) 3-person household \cr
(H015I006)+(H015I014) \tab hh.nh.white.4p \tab (white only, not hispanic ) 4-person household \cr
(H015I007)+(H015I015) \tab hh.nh.white.5p \tab (white only, not hispanic ) 5-person household \cr
(H015I008)+(H015I016) \tab hh.nh.white.6p \tab (white only, not hispanic ) 6-person household \cr
(H015I009)+(H015I017) \tab hh.nh.white.7p \tab (white only, not hispanic ) 7-person household \cr
(H015H003)+(H015H011) \tab hh.hisp.1p \tab (hispanic) 1-person household \cr
(H015H004)+(H015H012) \tab hh.hisp.2p \tab (hispanic) 2-person household \cr
(H015H005)+(H015H013) \tab hh.hisp.3p \tab (hispanic) 3-person household \cr
(H015H006)+(H015H014) \tab hh.hisp.4p \tab (hispanic) 4-person household \cr
(H015H007)+(H015H015) \tab hh.hisp.5p \tab (hispanic) 5-person household \cr
(H015H008)+(H015H016) \tab hh.hisp.6p \tab (hispanic) 6-person household \cr
(H015H009)+(H015H017) \tab hh.hisp.7p \tab (hispanic) 7-person household \cr
(H015B003)+(H015B011) \tab hh.black.1p \tab (black) 1-person household \cr
(H015B004)+(H015B012) \tab hh.black.2p \tab (black) 2-person household \cr
(H015B005)+(H015B013) \tab hh.black.3p \tab (black) 3-person household \cr
(H015B006)+(H015B014) \tab hh.black.4p \tab (black) 4-person household \cr
(H015B007)+(H015B015) \tab hh.black.5p \tab (black) 5-person household \cr
(H015B008)+(H015B016) \tab hh.black.6p \tab (black) 6-person household \cr
(H015B009)+(H015B017) \tab hh.black.7p \tab (black) 7-person household \cr
(H015D003)+(H015D011) \tab hh.asian.1p \tab (asian) 1-person household \cr
(H015D004)+(H015D012) \tab hh.asian.2p \tab (asian) 2-person household \cr
(H015D005)+(H015D013) \tab hh.asian.3p \tab (asian) 3-person household \cr
(H015D006)+(H015D014) \tab hh.asian.4p \tab (asian) 4-person household \cr
(H015D007)+(H015D015) \tab hh.asian.5p \tab (asian) 5-person household \cr
(H015D008)+(H015D016) \tab hh.asian.6p \tab (asian) 6-person household \cr
(H015D009)+(H015D017) \tab hh.asian.7p \tab (asian) 7-person household \cr
}
}
\source{
Census 2000 Summary File 1 [name of state1 or United States]/prepared by the U.S. Census
Bureau, 2001.
}
\references{
\url{http://www.census.gov/ }\cr
\url{http://www2.census.gov/cgi-bin/shapefiles/national-files} \cr
\url{http://www.census.gov/prod/cen2000/doc/sf1.pdf} \cr
}
\examples{
data(california.blkgrp)
############################################
## Helper function for handling coloring of the map
############################################
color.map<- function(x,dem,y=NULL){
l.poly<-length(x@polygons)
dem.num<- cut(as.numeric(dem) ,breaks=ceiling(quantile(dem)),dig.lab = 6)
dem.num[which(is.na(dem.num)==TRUE)]<-levels(dem.num)[1]
l.uc<-length(table(dem.num))
if(is.null(y)){
##commented out, but creates different color schemes
## using runif, may take a couple times to get a good color scheme.
##col.heat<-rgb( runif(l.uc,0,1), runif(l.uc,0,1) , runif(l.uc,0,1) )
col.heat<-heat.colors(16)[c(14,8,4,1)] ##fixed set of four colors
}else{
col.heat<-y
}
dem.col<-cbind(col.heat,names(table(dem.num)))
colors.dem<-vector(length=l.poly)
for(i in 1:l.uc){
colors.dem[which(dem.num==dem.col[i,2])]<-dem.col[i,1]
}
out<-list(colors=colors.dem,dem.cut=dem.col[,2],table.colors=dem.col[,1])
return(out)
}
############################################
## Helper function for handling coloring of the map
############################################
colors.use<-color.map(california.blkgrp,as.numeric(california.blkgrp@data$pop2000))
plot(california.blkgrp,col=colors.use$colors)
#text(coordinates(california.blkgrp),california.blkgrp@data$name,cex=.3)
title(main="Census Block Groups \n of California, 2000", sub="Quantiles (equal frequency)")
legend("bottomright",legend=colors.use$dem.cut,fill=colors.use$table.colors,bty="o",title="Population Count",bg="white")
###############################
### Alternative way to do the above
###############################
\dontrun{
####This example requires the following additional libraries
library(RColorBrewer)
library(classInt)
library(maps)
####This example requires the following additional libraries
data(california.blkgrp)
map('state',region='california')
plotvar <- as.numeric(california.blkgrp@data$pop2000)
nclr <- 4
#BuPu
plotclr <- brewer.pal(nclr,"BuPu")
class <- classIntervals(plotvar, nclr, style="quantile")
colcode <- findColours(class, plotclr)
plot(california.blkgrp, col=colcode, border="transparent",add=TRUE)
#transparent
title(main="Census Block Groups \n of California, 2000", sub="Quantiles (equal frequency)")
map.text("county", "california",cex=.7,add=TRUE)
map('county','california',add=TRUE)
legend("bottomright","(x,y)", legend=names(attr(colcode, "table")),fill=attr(colcode, "palette"),
cex=0.9, bty="o", title="Population Count",bg="white")
}
}
\keyword{datasets}
|
/man/california.blkgrp.Rd
|
no_license
|
cran/UScensus2000blkgrp
|
R
| false
| false
| 9,851
|
rd
|
\name{california.blkgrp}
\Rdversion{1.1}
\alias{california.blkgrp}
\docType{data}
\title{
california.blkgrp
}
\description{
california.blkgrp is a \code{\link[sp:SpatialPolygonsDataFrame]{SpatialPolygonsDataFrame}} with polygons made from the 2000 US Census tiger/line boundary files (\url{http://www.census.gov/geo/www/tiger/}) for Census Block Groups. It also contains 86 variables from the Summary File 1 (SF 1) which contains the 100-percent data (\url{http://www.census.gov/prod/cen2000/doc/sf1.pdf}).
All polygons are projected in CRS("+proj=longlat +datum=NAD83")
}
\usage{data(california.blkgrp)}
%\format{
%}
\details{
\bold{ID Variables} \cr
\tabular{ll}{
data field name \tab Full Description \cr
state \tab State FIPS code \cr
county \tab County FIPS code \cr
tract \tab Tract FIPS code \cr
blkgrp \tab Blockgroup FIPS code \cr
}
\bold{Census Variables} \cr
\tabular{lll}{
Census SF1 Field Name \tab data field name \tab Full Description \cr
(P007001) \tab pop2000 \tab population 2000 \cr
(P007002) \tab white \tab white alone \cr
(P007003) \tab black \tab black or african american alone \cr
(P007004) \tab ameri.es \tab american indian and alaska native alone \cr
(P007005) \tab asian \tab asian alone \cr
(P007006) \tab hawn.pi \tab native hawaiian and other pacific islander alone \cr
(P007007) \tab other \tab some other race alone \cr
(P007008) \tab mult.race \tab 2 or more races \cr
(P011001) \tab hispanic \tab people who are hispanic or latino \cr
(P008002) \tab not.hispanic.t \tab Not Hispanic or Latino \cr
(P008003) \tab nh.white \tab White alone \cr
(P008004) \tab nh.black \tab Black or African American alone \cr
(P008005) \tab nh.ameri.es \tab American Indian and Alaska Native alone \cr
(P008006) \tab nh.asian \tab Asian alone \cr
(P008007) \tab nh.hawn.pi \tab Native Hawaiian and Other Pacific Islander alone \cr
(P008008) \tab nh.other \tab Some other race alone \cr
(P008010) \tab hispanic.t \tab Hispanic or Latino \cr
(P008011) \tab h.white \tab White alone \cr
(P008012) \tab h.black \tab Black or African American alone \cr
(P008013) \tab h.american.es \tab American Indian and Alaska Native alone \cr
(P008014) \tab h.asian \tab Asian alone \cr
(P008015) \tab h.hawn.pi \tab Native Hawaiian and Other Pacific Islander alone \cr
(P008016) \tab h.other \tab Some other race alone \cr
(P012002) \tab males \tab males \cr
(P012026) \tab females \tab females \cr
(P012003 + P012027) \tab age.under5 \tab male and female under 5 yrs \cr
(P012004-006 + P012028-030) \tab age.5.17 \tab male and female 5 to 17 yrs \cr
(P012007-009 + P012031-033) \tab age.18.21 \tab male and female 18 to 21 yrs \cr
(P012010-011 + P012034-035) \tab age.22.29 \tab male and female 22 to 29 yrs \cr
(P012012-013 + P012036-037) \tab age.30.39 \tab male and female 30 to 39 yrs \cr
(P012014-015 + P012038-039) \tab age.40.49 \tab male and female 40 to 49 yrs \cr
(P012016-019 + P012040-043) \tab age.50.64 \tab male and female 50 to 64 yrs \cr
(P012020-025 + P012044-049) \tab age.65.up \tab male and female 65 yrs and over \cr
(P013001) \tab med.age \tab median age, both sexes \cr
(P013002) \tab med.age.m \tab median age, males \cr
(P013003) \tab med.age.f \tab median age, females \cr
(P015001) \tab households \tab households \cr
(P017001) \tab ave.hh.sz \tab average household size \cr
(P018003) \tab hsehld.1.m \tab 1-person household, male householder \cr
(P018004) \tab hsehld.1.f \tab 1-person household, female householder \cr
(P018008) \tab marhh.chd \tab family households, married-couple family, w/ own children under 18 yrs \cr
(P018009) \tab marhh.no.c \tab family households, married-couple family, no own children under 18 yrs \cr
(P018012) \tab mhh.child \tab family households, other family, male householder, no wife present, w/ own children under 18 yrs \cr
(P018015) \tab fhh.child \tab family households, other family, female householder, no husband present, w/ own children under 18 yrs \cr
(H001001) \tab hh.units \tab housng units total \cr
(H002002) \tab hh.urban \tab urban housing units \cr
(H002005) \tab hh.rural \tab rural housing units \cr
(H003002) \tab hh.occupied \tab occupied housing units \cr
(H003003) \tab hh.vacant \tab vacant housing units \cr
(H004002) \tab hh.owner \tab owner occupied housing units \cr
(H004003) \tab hh.renter \tab renter occupied housing units \cr
(H013002) \tab hh.1person \tab 1-person household \cr
(H013003) \tab hh.2person \tab 2-person household \cr
(H013004) \tab hh.3person \tab 3-person household \cr
(H013005) \tab hh.4person \tab 4-person household \cr
(H013006) \tab hh.5person \tab 5-person household \cr
(H013007) \tab hh.6person \tab 6-person household \cr
(H013008) \tab hh.7person \tab 7-person household \cr
(H015I003)+(H015I011) \tab hh.nh.white.1p \tab (white only, not hispanic ) 1-person household \cr
(H015I004)+(H015I012) \tab hh.nh.white.2p \tab (white only, not hispanic ) 2-person household \cr
(H015I005)+(H015I013) \tab hh.nh.white.3p \tab (white only, not hispanic ) 3-person household \cr
(H015I006)+(H015I014) \tab hh.nh.white.4p \tab (white only, not hispanic ) 4-person household \cr
(H015I007)+(H015I015) \tab hh.nh.white.5p \tab (white only, not hispanic ) 5-person household \cr
(H015I008)+(H015I016) \tab hh.nh.white.6p \tab (white only, not hispanic ) 6-person household \cr
(H015I009)+(H015I017) \tab hh.nh.white.7p \tab (white only, not hispanic ) 7-person household \cr
(H015H003)+(H015H011) \tab hh.hisp.1p \tab (hispanic) 1-person household \cr
(H015H004)+(H015H012) \tab hh.hisp.2p \tab (hispanic) 2-person household \cr
(H015H005)+(H015H013) \tab hh.hisp.3p \tab (hispanic) 3-person household \cr
(H015H006)+(H015H014) \tab hh.hisp.4p \tab (hispanic) 4-person household \cr
(H015H007)+(H015H015) \tab hh.hisp.5p \tab (hispanic) 5-person household \cr
(H015H008)+(H015H016) \tab hh.hisp.6p \tab (hispanic) 6-person household \cr
(H015H009)+(H015H017) \tab hh.hisp.7p \tab (hispanic) 7-person household \cr
(H015B003)+(H015B011) \tab hh.black.1p \tab (black) 1-person household \cr
(H015B004)+(H015B012) \tab hh.black.2p \tab (black) 2-person household \cr
(H015B005)+(H015B013) \tab hh.black.3p \tab (black) 3-person household \cr
(H015B006)+(H015B014) \tab hh.black.4p \tab (black) 4-person household \cr
(H015B007)+(H015B015) \tab hh.black.5p \tab (black) 5-person household \cr
(H015B008)+(H015B016) \tab hh.black.6p \tab (black) 6-person household \cr
(H015B009)+(H015B017) \tab hh.black.7p \tab (black) 7-person household \cr
(H015D003)+(H015D011) \tab hh.asian.1p \tab (asian) 1-person household \cr
(H015D004)+(H015D012) \tab hh.asian.2p \tab (asian) 2-person household \cr
(H015D005)+(H015D013) \tab hh.asian.3p \tab (asian) 3-person household \cr
(H015D006)+(H015D014) \tab hh.asian.4p \tab (asian) 4-person household \cr
(H015D007)+(H015D015) \tab hh.asian.5p \tab (asian) 5-person household \cr
(H015D008)+(H015D016) \tab hh.asian.6p \tab (asian) 6-person household \cr
(H015D009)+(H015D017) \tab hh.asian.7p \tab (asian) 7-person household \cr
}
}
\source{
Census 2000 Summary File 1 [name of state1 or United States]/prepared by the U.S. Census
Bureau, 2001.
}
\references{
\url{http://www.census.gov/ }\cr
\url{http://www2.census.gov/cgi-bin/shapefiles/national-files} \cr
\url{http://www.census.gov/prod/cen2000/doc/sf1.pdf} \cr
}
\examples{
data(california.blkgrp)
############################################
## Helper function for handling coloring of the map
############################################
color.map<- function(x,dem,y=NULL){
l.poly<-length(x@polygons)
dem.num<- cut(as.numeric(dem) ,breaks=ceiling(quantile(dem)),dig.lab = 6)
dem.num[which(is.na(dem.num)==TRUE)]<-levels(dem.num)[1]
l.uc<-length(table(dem.num))
if(is.null(y)){
##commented out, but creates different color schemes
## using runif, may take a couple times to get a good color scheme.
##col.heat<-rgb( runif(l.uc,0,1), runif(l.uc,0,1) , runif(l.uc,0,1) )
col.heat<-heat.colors(16)[c(14,8,4,1)] ##fixed set of four colors
}else{
col.heat<-y
}
dem.col<-cbind(col.heat,names(table(dem.num)))
colors.dem<-vector(length=l.poly)
for(i in 1:l.uc){
colors.dem[which(dem.num==dem.col[i,2])]<-dem.col[i,1]
}
out<-list(colors=colors.dem,dem.cut=dem.col[,2],table.colors=dem.col[,1])
return(out)
}
############################################
## Helper function for handling coloring of the map
############################################
colors.use<-color.map(california.blkgrp,as.numeric(california.blkgrp@data$pop2000))
plot(california.blkgrp,col=colors.use$colors)
#text(coordinates(california.blkgrp),california.blkgrp@data$name,cex=.3)
title(main="Census Block Groups \n of California, 2000", sub="Quantiles (equal frequency)")
legend("bottomright",legend=colors.use$dem.cut,fill=colors.use$table.colors,bty="o",title="Population Count",bg="white")
###############################
### Alternative way to do the above
###############################
\dontrun{
####This example requires the following additional libraries
library(RColorBrewer)
library(classInt)
library(maps)
####This example requires the following additional libraries
data(california.blkgrp)
map('state',region='california')
plotvar <- as.numeric(california.blkgrp@data$pop2000)
nclr <- 4
#BuPu
plotclr <- brewer.pal(nclr,"BuPu")
class <- classIntervals(plotvar, nclr, style="quantile")
colcode <- findColours(class, plotclr)
plot(california.blkgrp, col=colcode, border="transparent",add=TRUE)
#transparent
title(main="Census Block Groups \n of California, 2000", sub="Quantiles (equal frequency)")
map.text("county", "california",cex=.7,add=TRUE)
map('county','california',add=TRUE)
legend("bottomright","(x,y)", legend=names(attr(colcode, "table")),fill=attr(colcode, "palette"),
cex=0.9, bty="o", title="Population Count",bg="white")
}
}
\keyword{datasets}
|
############################### Calculate beta diversity for datasets #########
# R function to script
# R script (09/13) for analyses in Weissbecker et al. 2018
# version: August 2018
######################
library(vegan) #vegan_2.5-2
library(gdata) #gdata_2.18.0
#sessionInfo()
#R version 3.5.1 (2018-07-02)
######################
# The function needs three arguments:
# 1) data, if phyloseq it will extract the OTU table
# 2) type of transformation ("none" or decostand options)
# 3) type of distance measure ("none" or vergdist options)
# Example: Fungi_beta<-create_beta(fungi_phyloseq,"transform-method", "distance-measure")
#################################
create_beta <- function(mydata, trans, distance_measure) {
if (class(mydata)[1]=="phyloseq") {
print("data recognized as phyloseq object"); mydata<-t(otu_table(mydata))
}
if (trans != "none"){
if(trans == "log(x+1)"){
mydata<-log(mydata+1)
} else {
mydata<-decostand(mydata, method=trans)
}
print("data transformation")
print(trans)
}
if (distance_measure != "none"){
print("dissimilarity calculation")
print(distance_measure)
mydata<-as.matrix(vegdist(mydata, method=distance_measure))
}
mydata_dist<-lowerTriangle(mydata, diag=FALSE)
return(mydata_dist)
}
|
/09_Plot_beta_fungi_localtree_function.R
|
no_license
|
cw-ufz/BEFChina_fungaldiversity
|
R
| false
| false
| 1,369
|
r
|
############################### Calculate beta diversity for datasets #########
# R function to script
# R script (09/13) for analyses in Weissbecker et al. 2018
# version: August 2018
######################
library(vegan) #vegan_2.5-2
library(gdata) #gdata_2.18.0
#sessionInfo()
#R version 3.5.1 (2018-07-02)
######################
# The function needs three arguments:
# 1) data, if phyloseq it will extract the OTU table
# 2) type of transformation ("none" or decostand options)
# 3) type of distance measure ("none" or vergdist options)
# Example: Fungi_beta<-create_beta(fungi_phyloseq,"transform-method", "distance-measure")
#################################
create_beta <- function(mydata, trans, distance_measure) {
if (class(mydata)[1]=="phyloseq") {
print("data recognized as phyloseq object"); mydata<-t(otu_table(mydata))
}
if (trans != "none"){
if(trans == "log(x+1)"){
mydata<-log(mydata+1)
} else {
mydata<-decostand(mydata, method=trans)
}
print("data transformation")
print(trans)
}
if (distance_measure != "none"){
print("dissimilarity calculation")
print(distance_measure)
mydata<-as.matrix(vegdist(mydata, method=distance_measure))
}
mydata_dist<-lowerTriangle(mydata, diag=FALSE)
return(mydata_dist)
}
|
#cmd -> 프로젝트폴더로 이동
# -> java -Dwebdriver.chrome.driver="chromedriver.exe" -jar selenium-server-standalone-4.0.0-alpha-1.jar -port 4445
library(RSelenium)
remDr <- remoteDriver(remoteServerAddr = "localhost" ,
port = 4445, browserName = "chrome")
remDr$open()
site <- 'https://www.acmicpc.net/problem/tags'
remDr$navigate(site)
#작업 디렉토리에 새폴더 생성(csv파일 저장 폴더)
dir.create('BAEKJOON')
for (n in 1:30) {
pageLink <- NULL
algo_title <- NULL
problem_num <- NULL
answer_percent <- NULL
#태그로 이동_문제수 100문제이상
Sys.sleep(5)
pageLink <- remDr$findElements(using='xpath',
value= paste0('/html/body/div[3]/div[2]/div[5]/div/div/table/tbody/tr[', n, ']/td[1]/a'))
#알고리즘 태그명
algo_titles <- sapply(pageLink, function(x) {x$getElementText()})
print(algo_titles)
algo_title <- append(algo_title, unlist(algo_titles))
#태그별 문제수
algo_node <- remDr$findElements(using='xpath',
value= paste0('/html/body/div[3]/div[2]/div[5]/div/div/table/tbody/tr[', n, ']/td[3]'))
problem_nums <- sapply(algo_node, function(x) {x$getElementText()})
print(problem_nums)
problem_num <- append(problem_num, unlist(problem_nums))
#태그 클릭
remDr$executeScript("arguments[0].click();",pageLink)
Sys.sleep(3)
pageLink_next <- NULL
curr_PageOldNum <- 0
repeat{
#정답 비율
problem_nodes <- remDr$findElements(using='xpath',
value= paste0('//*[@id="problemset"]/tbody/tr/td[6]'))
answer_percents <- sapply(problem_nodes, function(x) {x$getElementText()})
answer_percent <- append(answer_percent, unlist(answer_percents))
#다음페이지
pageLink_next <- remDr$findElements(using='css',"#next_page")
remDr$executeScript("arguments[0].click();",pageLink_next)
Sys.sleep(1)
curr_PageElem <- remDr$findElement(using='css',
'div.wrapper > div.container.content > div:nth-child(6) > div:nth-child(2) > div > ul > li.active')
curr_PageNewNum <- as.numeric(curr_PageElem$getElementText())
if(curr_PageNewNum == curr_PageOldNum) {
cat("종료\n")
#태그 하나 종료 시 다시 처음 화면으로
site <- 'https://www.acmicpc.net/problem/tags'
remDr$navigate(site)
df <- data.frame(problem_num, answer_percent, check.rows = FALSE)
# 파일명 생성
file_name <- paste0(df[n,"algo_title"],".csv")
# 저장 경로지정 + 순서 + 파일 이름
save_name <- paste0("./BEAKJOON/", n, "_", file_name)
# 파일 저장
write.csv(df, save_name)
break;
}
curr_PageOldNum <- curr_PageNewNum;
}
}
|
/s_project.R
|
no_license
|
lee-dk/R
|
R
| false
| false
| 2,812
|
r
|
#cmd -> 프로젝트폴더로 이동
# -> java -Dwebdriver.chrome.driver="chromedriver.exe" -jar selenium-server-standalone-4.0.0-alpha-1.jar -port 4445
library(RSelenium)
remDr <- remoteDriver(remoteServerAddr = "localhost" ,
port = 4445, browserName = "chrome")
remDr$open()
site <- 'https://www.acmicpc.net/problem/tags'
remDr$navigate(site)
#작업 디렉토리에 새폴더 생성(csv파일 저장 폴더)
dir.create('BAEKJOON')
for (n in 1:30) {
pageLink <- NULL
algo_title <- NULL
problem_num <- NULL
answer_percent <- NULL
#태그로 이동_문제수 100문제이상
Sys.sleep(5)
pageLink <- remDr$findElements(using='xpath',
value= paste0('/html/body/div[3]/div[2]/div[5]/div/div/table/tbody/tr[', n, ']/td[1]/a'))
#알고리즘 태그명
algo_titles <- sapply(pageLink, function(x) {x$getElementText()})
print(algo_titles)
algo_title <- append(algo_title, unlist(algo_titles))
#태그별 문제수
algo_node <- remDr$findElements(using='xpath',
value= paste0('/html/body/div[3]/div[2]/div[5]/div/div/table/tbody/tr[', n, ']/td[3]'))
problem_nums <- sapply(algo_node, function(x) {x$getElementText()})
print(problem_nums)
problem_num <- append(problem_num, unlist(problem_nums))
#태그 클릭
remDr$executeScript("arguments[0].click();",pageLink)
Sys.sleep(3)
pageLink_next <- NULL
curr_PageOldNum <- 0
repeat{
#정답 비율
problem_nodes <- remDr$findElements(using='xpath',
value= paste0('//*[@id="problemset"]/tbody/tr/td[6]'))
answer_percents <- sapply(problem_nodes, function(x) {x$getElementText()})
answer_percent <- append(answer_percent, unlist(answer_percents))
#다음페이지
pageLink_next <- remDr$findElements(using='css',"#next_page")
remDr$executeScript("arguments[0].click();",pageLink_next)
Sys.sleep(1)
curr_PageElem <- remDr$findElement(using='css',
'div.wrapper > div.container.content > div:nth-child(6) > div:nth-child(2) > div > ul > li.active')
curr_PageNewNum <- as.numeric(curr_PageElem$getElementText())
if(curr_PageNewNum == curr_PageOldNum) {
cat("종료\n")
#태그 하나 종료 시 다시 처음 화면으로
site <- 'https://www.acmicpc.net/problem/tags'
remDr$navigate(site)
df <- data.frame(problem_num, answer_percent, check.rows = FALSE)
# 파일명 생성
file_name <- paste0(df[n,"algo_title"],".csv")
# 저장 경로지정 + 순서 + 파일 이름
save_name <- paste0("./BEAKJOON/", n, "_", file_name)
# 파일 저장
write.csv(df, save_name)
break;
}
curr_PageOldNum <- curr_PageNewNum;
}
}
|
library(powstreams)
library(streamMetabolizer)
library(plyr)
library(fBasics)
#Elevation files required a little clean-up ahead of importing into R.
#Following sites caused problems importing data, so they were deleted:
#02231254, 03220510, 295554095095093402, 46093912....
#Set working directory
workingDir <- "c:/Users/estets/Documents/R/workSpaces/POWELL_CENTER/stream_metab_usa/sandbox_ted/data/"
#Read elevation data
pre_elev_a <- read.table(paste0(workingDir,"ancillary_data/site_elevation_a.txt"),
colClasses=c("NULL","character","NULL","NULL","NULL","NULL","NULL","numeric",
"character","character","character"),sep="\t",header=TRUE,comment.char="#",fill=TRUE)
pre_elev_b <- read.table(paste0(workingDir,"ancillary_data/site_elevation_b.txt"),
colClasses=c("NULL","character","NULL","NULL","NULL","NULL","NULL","numeric",
"character","character","character"),sep="\t",header=TRUE,comment.char="#",fill=TRUE)
pre_elev_c <- read.table(paste0(workingDir,"ancillary_data/site_elevation_c.txt"),
colClasses=c("NULL","character","NULL","NULL","NULL","NULL","NULL","numeric",
"character","character","character"),sep="\t",header=TRUE,comment.char="#",fill=TRUE)
pre_elev_d <- read.table(paste0(workingDir,"ancillary_data/site_elevation_d.txt"),
colClasses=c("NULL","character","NULL","NULL","NULL","NULL","NULL","numeric",
"character","character","character"),sep="\t",header=TRUE,comment.char="#",fill=TRUE)
site_elev <- rbind(pre_elev_a,pre_elev_b,pre_elev_c,pre_elev_d)
#List of site IDs from Science Base for Powell Center work.
sb_sites <- read.csv(paste0(workingDir,"sb_site_names2.csv"),header=TRUE)
site_elev$site_id <- paste0("nwis_",site_elev$site_no)
diel_sites <- na.omit(merge(x=sb_sites, y=site_elev, by="site_id",row.names=FALSE))
for (i in 1:nrow(diel_sites)) {
oneSiteName <- paste0(diel_sites$site_id[i])
#Check if data file exists, skip if it does not
if (file.exists(paste0(workingDir,oneSiteName,".csv")) == "TRUE"){
oneSiteData <- read.csv(paste0(workingDir,oneSiteName,".csv"), header = TRUE)
oneSiteData$DatePos <- as.POSIXct(oneSiteData$DateTime, tz="GMT")
oneSiteData$DateSol <- as.Date(convert_GMT_to_solartime(oneSiteData$DatePos,
diel_sites$dec_long_va[i],time.type="apparent solar"))
bp <- ((1-(2.25577e-5*diel_sites$alt_va[i]))^5.25588)*760
oneSiteData$o2Sat <- (exp(2.00907 + 3.22014 * (log((298.15-oneSiteData$ts_wtr) /
(273.15 + oneSiteData$ts_wtr))) + 4.0501 * (log((298.15 - oneSiteData$ts_wtr) /
(273.15 + oneSiteData$ts_wtr))) ^ 2 + 4.94457 * (log((298.15 - oneSiteData$ts_wtr)/
(273.15 + oneSiteData$ts_wtr))) ^ 3 - 0.256847 * (log((298.15 - oneSiteData$ts_wtr)/
(273.15 + oneSiteData$ts_wtr))) ^ 4 + 3.88767 * (log((298.15 - oneSiteData$ts_wtr)/
(273.15 + oneSiteData$ts_wtr))) ^ 5)) * 1.4276 * bp / 760
oneSiteData$dO2 <- (1000*oneSiteData$ts_doobs/32) - (1000*oneSiteData$o2Sat/32)
dailyO2Min <- aggregate(oneSiteData$dO2,list(as.Date(oneSiteData$DateSol)),min,na.rm=T)
names(dailyO2Min) <- c("solDate","dO2Min")
dailyO2Max <- aggregate(oneSiteData$dO2,list(as.Date(oneSiteData$DateSol)),max,na.rm=T)
names(dailyO2Max) <- c("solDate","dO2Max")
dailyQ <- aggregate(oneSiteData$ts_disch,list(as.Date(oneSiteData$DateSol)),median,na.rm=T)
names(dailyQ) <- c("solDate","dayQ")
dailyT <- aggregate(oneSiteData$ts_wtr,list(as.Date(oneSiteData$DateSol)),median,na.rm=T)
names(dailyT) <- c("solDate","dayT")
dielSaturation <- merge(x = dailyO2Max, y = dailyO2Min, by = "solDate")
dielSaturation <- merge(x = dailyQ, y = dielSaturation, by = "solDate")
dielSaturation <- merge(x = dailyT, y = dielSaturation, by = "solDate")
dielSaturation$O2Range <- dielSaturation$dO2Max - dielSaturation$dO2Min
output <- data.frame(diel_sites$site_id[i],dielSaturation)
#plot(x=log(dielSaturation$dayQ), y=dielSaturation$O2Range)
#plot(x=dielSaturation$dayT, y=dielSaturation$O2Range)
write.table(output, paste0(workingDir,"diel_saturation/",diel_sites$site_id[i],"_range.csv"), sep=",", row.names=FALSE, col.names=TRUE)
}
else {notExist <- oneSiteName}
}
dielStats <- NULL
dielDir <- "c:/Users/estets/Documents/R/workSpaces/POWELL_CENTER/stream_metab_usa/sandbox_ted/data/diel_saturation/"
for (i in 1:nrow(diel_sites)) {
dielName <- paste0(diel_sites$site_id[i],"_range.csv")
if (file.exists(paste0(dielDir,dielName)) == "TRUE"){
dielData <- read.csv(paste0(dielDir,dielName), header = TRUE)
diel90 <- quantile(dielData$O2Range,.90,names=FALSE)
diel50 <- quantile(dielData$O2Range,.50,names=FALSE)
#dielSkew <- skewness(dielData$O2Range)
dielMean <- mean(dielData$O2Range,na.rm=TRUE)
dielStatsLine <- data.frame(site_id=diel_sites$site_id[i],diel90=diel90,diel50=diel50,
dielMean=dielMean)
#diel90 <- aggregate(dielData$O2Range,quantile,probs=c(0.90))
#dielStatsLine <- cbind(diel_sites$site_id[i],diel90)
}
else {
dielStatsLine <- data.frame(site_id=diel_sites$site_id[i],diel90=NA,diel50=NA,dielMean=NA)
}
dielStats <- rbind(dielStats,dielStatsLine)
}
dielStats <- subset(dielStats,is.finite(dielStats$dielMean))
par(new=FALSE)
dielStats$rank90 <- rank(dielStats$diel90)
dielStats$rank50 <- rank(dielStats$diel50)
dielStats$rankMean <- rank(dielStats$dielMean)
plot(x=dielStats$rank90,y=dielStats$diel90)
plot(x=dielStats$rank50,y=dielStats$diel50)
plot(x=dielStats$rankMean,y=dielStats$dielMean)
# output = data.frame(metadata, flux.co2.calc, flux.ch4.calc, GTV.co2.calc, GTV.ch4.calc,mean.r2.co2, mean.r2.ch4)
# str(output)
# write.table(output, output.file, sep=",", row.names=FALSE, col.names=TRUE)
#dielSaturation <- function(infile,longitude,elevation){
# bp <- ((1-(2.25577e-5*elevation))^5.25588)*760
# ddply(
# group_by(mutate(infile,
# Date=as.Date(convert_GMT_to_solartime(as.POSIXct(infile$DateTime,
# tz = "GMT"),longitude,
# time.type="apparent solar")),
# o2Sat = (exp(2.00907 + 3.22014 * (log((298.15-infile$ts_wtr) /
# (273.15 + infile$ts_wtr))) + 4.0501 * (log((298.15 - infile$ts_wtr) /
# (273.15 + infile$ts_wtr))) ^ 2 + 4.94457 * (log((298.15 - infile$ts_wtr) /
# (273.15 + infile$ts_wtr))) ^ 3 - 0.256847 * (log((298.15 - infile$ts_wtr) /
# (273.15 + infile$ts_wtr))) ^ 4 + 3.88767 * (log((298.15 - infile$ts_wtr) /
# (273.15 + infile$ts_wtr))) ^ 5)) * 1.4276 * bp / 760,
# dO2 = (1000*infile$ts_doobs/32)-(1000*o2Sat/32)), Date),
# "Date",
# summarise,diel_range = max(dO2) - min(dO2), ndays=length(unique(Date)))
#group_by(infile,Date)
#ddply(infile,"Date",summarise,diel_range = max(dO2)-min(dO2),ndays=length(unique(Date)))
#}
|
/sandbox_ted/dielSaturation06022015.R
|
permissive
|
tedstets-usgs/stream_metab_usa
|
R
| false
| false
| 6,824
|
r
|
library(powstreams)
library(streamMetabolizer)
library(plyr)
library(fBasics)
#Elevation files required a little clean-up ahead of importing into R.
#Following sites caused problems importing data, so they were deleted:
#02231254, 03220510, 295554095095093402, 46093912....
#Set working directory
workingDir <- "c:/Users/estets/Documents/R/workSpaces/POWELL_CENTER/stream_metab_usa/sandbox_ted/data/"
#Read elevation data
pre_elev_a <- read.table(paste0(workingDir,"ancillary_data/site_elevation_a.txt"),
colClasses=c("NULL","character","NULL","NULL","NULL","NULL","NULL","numeric",
"character","character","character"),sep="\t",header=TRUE,comment.char="#",fill=TRUE)
pre_elev_b <- read.table(paste0(workingDir,"ancillary_data/site_elevation_b.txt"),
colClasses=c("NULL","character","NULL","NULL","NULL","NULL","NULL","numeric",
"character","character","character"),sep="\t",header=TRUE,comment.char="#",fill=TRUE)
pre_elev_c <- read.table(paste0(workingDir,"ancillary_data/site_elevation_c.txt"),
colClasses=c("NULL","character","NULL","NULL","NULL","NULL","NULL","numeric",
"character","character","character"),sep="\t",header=TRUE,comment.char="#",fill=TRUE)
pre_elev_d <- read.table(paste0(workingDir,"ancillary_data/site_elevation_d.txt"),
colClasses=c("NULL","character","NULL","NULL","NULL","NULL","NULL","numeric",
"character","character","character"),sep="\t",header=TRUE,comment.char="#",fill=TRUE)
site_elev <- rbind(pre_elev_a,pre_elev_b,pre_elev_c,pre_elev_d)
#List of site IDs from Science Base for Powell Center work.
sb_sites <- read.csv(paste0(workingDir,"sb_site_names2.csv"),header=TRUE)
site_elev$site_id <- paste0("nwis_",site_elev$site_no)
diel_sites <- na.omit(merge(x=sb_sites, y=site_elev, by="site_id",row.names=FALSE))
for (i in 1:nrow(diel_sites)) {
oneSiteName <- paste0(diel_sites$site_id[i])
#Check if data file exists, skip if it does not
if (file.exists(paste0(workingDir,oneSiteName,".csv")) == "TRUE"){
oneSiteData <- read.csv(paste0(workingDir,oneSiteName,".csv"), header = TRUE)
oneSiteData$DatePos <- as.POSIXct(oneSiteData$DateTime, tz="GMT")
oneSiteData$DateSol <- as.Date(convert_GMT_to_solartime(oneSiteData$DatePos,
diel_sites$dec_long_va[i],time.type="apparent solar"))
bp <- ((1-(2.25577e-5*diel_sites$alt_va[i]))^5.25588)*760
oneSiteData$o2Sat <- (exp(2.00907 + 3.22014 * (log((298.15-oneSiteData$ts_wtr) /
(273.15 + oneSiteData$ts_wtr))) + 4.0501 * (log((298.15 - oneSiteData$ts_wtr) /
(273.15 + oneSiteData$ts_wtr))) ^ 2 + 4.94457 * (log((298.15 - oneSiteData$ts_wtr)/
(273.15 + oneSiteData$ts_wtr))) ^ 3 - 0.256847 * (log((298.15 - oneSiteData$ts_wtr)/
(273.15 + oneSiteData$ts_wtr))) ^ 4 + 3.88767 * (log((298.15 - oneSiteData$ts_wtr)/
(273.15 + oneSiteData$ts_wtr))) ^ 5)) * 1.4276 * bp / 760
oneSiteData$dO2 <- (1000*oneSiteData$ts_doobs/32) - (1000*oneSiteData$o2Sat/32)
dailyO2Min <- aggregate(oneSiteData$dO2,list(as.Date(oneSiteData$DateSol)),min,na.rm=T)
names(dailyO2Min) <- c("solDate","dO2Min")
dailyO2Max <- aggregate(oneSiteData$dO2,list(as.Date(oneSiteData$DateSol)),max,na.rm=T)
names(dailyO2Max) <- c("solDate","dO2Max")
dailyQ <- aggregate(oneSiteData$ts_disch,list(as.Date(oneSiteData$DateSol)),median,na.rm=T)
names(dailyQ) <- c("solDate","dayQ")
dailyT <- aggregate(oneSiteData$ts_wtr,list(as.Date(oneSiteData$DateSol)),median,na.rm=T)
names(dailyT) <- c("solDate","dayT")
dielSaturation <- merge(x = dailyO2Max, y = dailyO2Min, by = "solDate")
dielSaturation <- merge(x = dailyQ, y = dielSaturation, by = "solDate")
dielSaturation <- merge(x = dailyT, y = dielSaturation, by = "solDate")
dielSaturation$O2Range <- dielSaturation$dO2Max - dielSaturation$dO2Min
output <- data.frame(diel_sites$site_id[i],dielSaturation)
#plot(x=log(dielSaturation$dayQ), y=dielSaturation$O2Range)
#plot(x=dielSaturation$dayT, y=dielSaturation$O2Range)
write.table(output, paste0(workingDir,"diel_saturation/",diel_sites$site_id[i],"_range.csv"), sep=",", row.names=FALSE, col.names=TRUE)
}
else {notExist <- oneSiteName}
}
dielStats <- NULL
dielDir <- "c:/Users/estets/Documents/R/workSpaces/POWELL_CENTER/stream_metab_usa/sandbox_ted/data/diel_saturation/"
for (i in 1:nrow(diel_sites)) {
dielName <- paste0(diel_sites$site_id[i],"_range.csv")
if (file.exists(paste0(dielDir,dielName)) == "TRUE"){
dielData <- read.csv(paste0(dielDir,dielName), header = TRUE)
diel90 <- quantile(dielData$O2Range,.90,names=FALSE)
diel50 <- quantile(dielData$O2Range,.50,names=FALSE)
#dielSkew <- skewness(dielData$O2Range)
dielMean <- mean(dielData$O2Range,na.rm=TRUE)
dielStatsLine <- data.frame(site_id=diel_sites$site_id[i],diel90=diel90,diel50=diel50,
dielMean=dielMean)
#diel90 <- aggregate(dielData$O2Range,quantile,probs=c(0.90))
#dielStatsLine <- cbind(diel_sites$site_id[i],diel90)
}
else {
dielStatsLine <- data.frame(site_id=diel_sites$site_id[i],diel90=NA,diel50=NA,dielMean=NA)
}
dielStats <- rbind(dielStats,dielStatsLine)
}
dielStats <- subset(dielStats,is.finite(dielStats$dielMean))
par(new=FALSE)
dielStats$rank90 <- rank(dielStats$diel90)
dielStats$rank50 <- rank(dielStats$diel50)
dielStats$rankMean <- rank(dielStats$dielMean)
plot(x=dielStats$rank90,y=dielStats$diel90)
plot(x=dielStats$rank50,y=dielStats$diel50)
plot(x=dielStats$rankMean,y=dielStats$dielMean)
# output = data.frame(metadata, flux.co2.calc, flux.ch4.calc, GTV.co2.calc, GTV.ch4.calc,mean.r2.co2, mean.r2.ch4)
# str(output)
# write.table(output, output.file, sep=",", row.names=FALSE, col.names=TRUE)
#dielSaturation <- function(infile,longitude,elevation){
# bp <- ((1-(2.25577e-5*elevation))^5.25588)*760
# ddply(
# group_by(mutate(infile,
# Date=as.Date(convert_GMT_to_solartime(as.POSIXct(infile$DateTime,
# tz = "GMT"),longitude,
# time.type="apparent solar")),
# o2Sat = (exp(2.00907 + 3.22014 * (log((298.15-infile$ts_wtr) /
# (273.15 + infile$ts_wtr))) + 4.0501 * (log((298.15 - infile$ts_wtr) /
# (273.15 + infile$ts_wtr))) ^ 2 + 4.94457 * (log((298.15 - infile$ts_wtr) /
# (273.15 + infile$ts_wtr))) ^ 3 - 0.256847 * (log((298.15 - infile$ts_wtr) /
# (273.15 + infile$ts_wtr))) ^ 4 + 3.88767 * (log((298.15 - infile$ts_wtr) /
# (273.15 + infile$ts_wtr))) ^ 5)) * 1.4276 * bp / 760,
# dO2 = (1000*infile$ts_doobs/32)-(1000*o2Sat/32)), Date),
# "Date",
# summarise,diel_range = max(dO2) - min(dO2), ndays=length(unique(Date)))
#group_by(infile,Date)
#ddply(infile,"Date",summarise,diel_range = max(dO2)-min(dO2),ndays=length(unique(Date)))
#}
|
# harmony
suppressMessages(library("arrow"))
suppressMessages(library("Seurat"))
suppressMessages(library("harmony"))
source("../../scripts/cluster_tools.r")
samplingPos <- "."
OUT <- paste0("03-expression/merged/cellAlign/", samplingPos)
dir.create(OUT, showWarnings = F, recursive = T)
#load(file = paste0(OUT, "/cellAlign.RData"))
# 1. preprocess ----
# load expression matrix
expr_data <- read_feather("03-expression/merged/filtering/UMIcount_filtered.feather")
expr_data <- as.data.frame(expr_data)
expr_data_gene <- read.table("03-expression/merged/filtering/UMIcount_filtered.gene", header = F, sep = "\t", stringsAsFactors = F)
rownames(expr_data) <- expr_data_gene$V1
# load cell meta
cellMeta <- read.table("cell_metatable_filtered_plus.txt", header = T, sep = "\t", stringsAsFactors = F)
all(colnames(expr_data) == cellMeta$cell)
# 2. harmony ----
# norm
expr_norm <- sweep(expr_data, 2, colSums(expr_data), "/") * 1e6
expr_norm_MT <- as.matrix(expr_norm)
# alignment
harmony_embeddings <- HarmonyMatrix(expr_norm_MT, cellMeta, "stage")
rownames(harmony_embeddings) <- cellMeta$cell
colnames(harmony_embeddings) <- paste0("PC", 1:ncol(harmony_embeddings))
# 3. Seurat ----
# Initialize the Seurat object with the raw (non-normalized data).
expr <- CreateSeuratObject(raw.data = expr_data, min.cells = 3, min.genes = 500, project = samplingPos, names.delim = "/")
dim(expr@raw.data)
# add meta
expr@meta.data <- cbind(expr@meta.data, cellMeta[match(rownames(expr@meta.data), cellMeta$cell), c("stage", "ident")])
expr@meta.data$cluster <- expr@meta.data$ident
mito.genes <- grep(pattern = "^MT-", x = rownames(expr@raw.data))
length(mito.genes)
percent.mito <- Matrix::colSums(expr@raw.data[mito.genes, ])/Matrix::colSums(expr@raw.data)
expr <- do_addMeta(expr)
expr <- FilterCells(object = expr, subset.names = c("nGene", "percent.mito"), low.thresholds = c(500, -Inf), high.thresholds = c(Inf, Inf))
expr <- NormalizeData(object = expr, normalization.method = "LogNormalize", scale.factor = 10000)
expr <- FindVariableGenes(object = expr, mean.function = ExpMean, dispersion.function = LogVMR, x.low.cutoff = 0.25, x.high.cutoff = 5, y.cutoff = 0.5, do.plot = F)
expr <- ScaleData(object = expr, vars.to.regress = c("nUMI", "percent.mito"), num.cores = 20, do.par = T)
# run PCA
expr <- RunPCA(object = expr, pc.genes = expr@var.genes, pcs.compute = 20, do.print = F)
# create harmony embeddings
expr@dr$harmony <- expr@dr$pca
expr@dr$harmony@cell.embeddings <- harmony_embeddings
expr@dr$harmony@gene.loadings <- matrix()
expr@dr$harmony@sdev <- numeric()
# run tSNE/UMAP (based on Harmony)
expr <- RunTSNE(object = expr, reduction.use = "harmony", dims.use = 1:20, nthreads = 20, do.fast = T)
expr <- RunUMAP(object = expr, reduction.use = "harmony", dims.use = 1:20, min_dist = 1)
# plot
# DimPlot(object = expr, reduction.use = "pca", pt.size = 2, do.label = T, no.legend = T, plot.title = "PCA", group.by = "stage")
# DimPlot(object = expr, reduction.use = "pca", pt.size = 2, do.label = T, no.legend = T, plot.title = "PCA", group.by = "cluster")
# DimPlot(object = expr, reduction.use = "harmony", pt.size = 2, do.label = T, no.legend = T, plot.title = "Harmony", group.by = "stage")
# DimPlot(object = expr, reduction.use = "harmony", pt.size = 2, do.label = T, no.legend = T, plot.title = "Harmony", group.by = "cluster")
# DimPlot(object = expr, reduction.use = "tsne", pt.size = 2, do.label = T, no.legend = T, plot.title = "tSNE", group.by = "stage")
# DimPlot(object = expr, reduction.use = "tsne", pt.size = 2, do.label = T, no.legend = T, plot.title = "tSNE", group.by = "cluster")
# DimPlot(object = expr, reduction.use = "umap", pt.size = 2, do.label = T, no.legend = T, plot.title = "tSNE", group.by = "stage")
# DimPlot(object = expr, reduction.use = "umap", pt.size = 2, do.label = T, no.legend = T, plot.title = "tSNE", group.by = "cluster")
cellMetaData <- merge(expr@meta.data, cbind(expr@dr$tsne@cell.embeddings, expr@dr$umap@cell.embeddings), by = 0, sort = F)
colnames(cellMetaData)[1] <- "cell"
colnames(cellMetaData)[grep("^UMAP", colnames(cellMetaData))] <- c("UMAP_1", "UMAP_2")
# 4. save and write meta table ----
write.table(x = cellMetaData, file = paste0(OUT, "/Seurat_metaData.txt"), row.names = F, col.names = T, quote = F,sep = "\t")
save.image(file = paste0(OUT, "/cellAlign.RData"))
|
/scRNA-seq/pooled_data_all/17_thymus/do_cellAlign.r
|
permissive
|
shunsunsun/GeACT
|
R
| false
| false
| 4,350
|
r
|
# harmony
suppressMessages(library("arrow"))
suppressMessages(library("Seurat"))
suppressMessages(library("harmony"))
source("../../scripts/cluster_tools.r")
samplingPos <- "."
OUT <- paste0("03-expression/merged/cellAlign/", samplingPos)
dir.create(OUT, showWarnings = F, recursive = T)
#load(file = paste0(OUT, "/cellAlign.RData"))
# 1. preprocess ----
# load expression matrix
expr_data <- read_feather("03-expression/merged/filtering/UMIcount_filtered.feather")
expr_data <- as.data.frame(expr_data)
expr_data_gene <- read.table("03-expression/merged/filtering/UMIcount_filtered.gene", header = F, sep = "\t", stringsAsFactors = F)
rownames(expr_data) <- expr_data_gene$V1
# load cell meta
cellMeta <- read.table("cell_metatable_filtered_plus.txt", header = T, sep = "\t", stringsAsFactors = F)
all(colnames(expr_data) == cellMeta$cell)
# 2. harmony ----
# norm
expr_norm <- sweep(expr_data, 2, colSums(expr_data), "/") * 1e6
expr_norm_MT <- as.matrix(expr_norm)
# alignment
harmony_embeddings <- HarmonyMatrix(expr_norm_MT, cellMeta, "stage")
rownames(harmony_embeddings) <- cellMeta$cell
colnames(harmony_embeddings) <- paste0("PC", 1:ncol(harmony_embeddings))
# 3. Seurat ----
# Initialize the Seurat object with the raw (non-normalized data).
expr <- CreateSeuratObject(raw.data = expr_data, min.cells = 3, min.genes = 500, project = samplingPos, names.delim = "/")
dim(expr@raw.data)
# add meta
expr@meta.data <- cbind(expr@meta.data, cellMeta[match(rownames(expr@meta.data), cellMeta$cell), c("stage", "ident")])
expr@meta.data$cluster <- expr@meta.data$ident
mito.genes <- grep(pattern = "^MT-", x = rownames(expr@raw.data))
length(mito.genes)
percent.mito <- Matrix::colSums(expr@raw.data[mito.genes, ])/Matrix::colSums(expr@raw.data)
expr <- do_addMeta(expr)
expr <- FilterCells(object = expr, subset.names = c("nGene", "percent.mito"), low.thresholds = c(500, -Inf), high.thresholds = c(Inf, Inf))
expr <- NormalizeData(object = expr, normalization.method = "LogNormalize", scale.factor = 10000)
expr <- FindVariableGenes(object = expr, mean.function = ExpMean, dispersion.function = LogVMR, x.low.cutoff = 0.25, x.high.cutoff = 5, y.cutoff = 0.5, do.plot = F)
expr <- ScaleData(object = expr, vars.to.regress = c("nUMI", "percent.mito"), num.cores = 20, do.par = T)
# run PCA
expr <- RunPCA(object = expr, pc.genes = expr@var.genes, pcs.compute = 20, do.print = F)
# create harmony embeddings
expr@dr$harmony <- expr@dr$pca
expr@dr$harmony@cell.embeddings <- harmony_embeddings
expr@dr$harmony@gene.loadings <- matrix()
expr@dr$harmony@sdev <- numeric()
# run tSNE/UMAP (based on Harmony)
expr <- RunTSNE(object = expr, reduction.use = "harmony", dims.use = 1:20, nthreads = 20, do.fast = T)
expr <- RunUMAP(object = expr, reduction.use = "harmony", dims.use = 1:20, min_dist = 1)
# plot
# DimPlot(object = expr, reduction.use = "pca", pt.size = 2, do.label = T, no.legend = T, plot.title = "PCA", group.by = "stage")
# DimPlot(object = expr, reduction.use = "pca", pt.size = 2, do.label = T, no.legend = T, plot.title = "PCA", group.by = "cluster")
# DimPlot(object = expr, reduction.use = "harmony", pt.size = 2, do.label = T, no.legend = T, plot.title = "Harmony", group.by = "stage")
# DimPlot(object = expr, reduction.use = "harmony", pt.size = 2, do.label = T, no.legend = T, plot.title = "Harmony", group.by = "cluster")
# DimPlot(object = expr, reduction.use = "tsne", pt.size = 2, do.label = T, no.legend = T, plot.title = "tSNE", group.by = "stage")
# DimPlot(object = expr, reduction.use = "tsne", pt.size = 2, do.label = T, no.legend = T, plot.title = "tSNE", group.by = "cluster")
# DimPlot(object = expr, reduction.use = "umap", pt.size = 2, do.label = T, no.legend = T, plot.title = "tSNE", group.by = "stage")
# DimPlot(object = expr, reduction.use = "umap", pt.size = 2, do.label = T, no.legend = T, plot.title = "tSNE", group.by = "cluster")
cellMetaData <- merge(expr@meta.data, cbind(expr@dr$tsne@cell.embeddings, expr@dr$umap@cell.embeddings), by = 0, sort = F)
colnames(cellMetaData)[1] <- "cell"
colnames(cellMetaData)[grep("^UMAP", colnames(cellMetaData))] <- c("UMAP_1", "UMAP_2")
# 4. save and write meta table ----
write.table(x = cellMetaData, file = paste0(OUT, "/Seurat_metaData.txt"), row.names = F, col.names = T, quote = F,sep = "\t")
save.image(file = paste0(OUT, "/cellAlign.RData"))
|
#' mkhist() function to create
#' histograms from df variables
#' @export
mkhist <- function(x){
print(x %>% head())
x %>% gather() %>% head()
ggplot(gather(x), aes(value)) +
geom_histogram(bins=20) +
facet_wrap(~key, scales='free_x')
}
|
/R/mkhist.R
|
no_license
|
wyocompbio/mkhist
|
R
| false
| false
| 248
|
r
|
#' mkhist() function to create
#' histograms from df variables
#' @export
mkhist <- function(x){
print(x %>% head())
x %>% gather() %>% head()
ggplot(gather(x), aes(value)) +
geom_histogram(bins=20) +
facet_wrap(~key, scales='free_x')
}
|
#Plot method
GWAS_Manhattan<-
function(GWAS,col.snps=c("black","gray"),col.detected=c("blue"),col.text="black",title="GWAS Tutorial Manhattan Plot",display.text=TRUE){
manhat<-GWAS[!grepl("[A-z]",GWAS$chr),]
#sort the data by chromosome and then location
manhat.ord<-manhat[order(as.numeric(manhat$chr),manhat$position),]
manhat.ord<-manhat.ord[!is.na(manhat.ord$position),]
##Finding the maximum position for each chromosome
max.pos<-NULL
for (i in 1:21){
max.pos[i]<-max(manhat.ord$position[manhat.ord$chr==i])}
max.pos1<-c(0,max.pos)
max.pos2<-NULL
for (i in 1:22){max.pos2[i]<-sum(max.pos1[1:i])}
#Add spacing between chromosomes
max.pos2<-max.pos2+c(0:21)*10000000
#defining the postitions of each snp in the plot
manhat.ord$pos<-manhat.ord$position+max.pos2[as.numeric(manhat.ord$chr)]
manhat.ord$Neg_logP<-abs(manhat.ord$Neg_logP)
#defining the coloring for the Manhattan plot
manhat.ord$col[as.numeric(manhat.ord$chr)%%2==0]<-col.snps[1]
manhat.ord$col[as.numeric(manhat.ord$chr)%%2==1]<-col.snps[2]
text.pos<-rep(NA,22)
for (i in 1:22){text.pos[i]<-mean(manhat.ord$pos[manhat.ord$chr==i])}
#plot the data
plot(manhat.ord$pos/1000000,manhat.ord$Neg_logP,pch=20,cex=.3,col=manhat.ord$col,xlab="Chromosome",ylab="Negative Log P-Value",axes=F,ylim=c(0,max(manhat$Neg_logP)+1))
axis(2)
abline(h=0)
SigNifSNPs<-as.character(GWAS[GWAS$Neg_logP>-log10(0.05/1000000),1])
#Add legend
legend("topright",c("Bonferroni-wide Significant", "Bonferroni-wide Significance Threshold*"),border=col.detected,col=c(col.detected, "gray60"),pch=c(15, 0),lwd=c(0,1),pt.cex=c(0.5,0), bty="o", cex=0.7)
#Add chromosome number
text(text.pos/1000000,-.3,seq(1,22,by=1),xpd=TRUE,cex=1)
abline(h=-log10(0.05/1000000), untf = FALSE,col = "gray60")
#Plotting detected genes
#Were any genes detected?
if (length(SigNifSNPs)>0){
points(manhat.ord$pos[manhat.ord[,1]%in%SigNifSNPs]/1000000,manhat.ord$Neg_logP[manhat.ord[,1]%in%SigNifSNPs],pch=15,col=col.detected, bg=col.detected,cex=0.5)
text(manhat.ord$pos[manhat.ord[,1]%in%SigNifSNPs]/1000000,manhat.ord$Neg_logP[manhat.ord[,1]%in%SigNifSNPs],as.character(manhat.ord[manhat.ord[,1]%in%SigNifSNPs,1]),col=col.text,offset=1,adj=-.1, cex=.7)
}}
|
/GWAS_ManhattanFunction.R
|
no_license
|
foulkes/GWAS-Tutorial
|
R
| false
| false
| 2,414
|
r
|
#Plot method
GWAS_Manhattan<-
function(GWAS,col.snps=c("black","gray"),col.detected=c("blue"),col.text="black",title="GWAS Tutorial Manhattan Plot",display.text=TRUE){
manhat<-GWAS[!grepl("[A-z]",GWAS$chr),]
#sort the data by chromosome and then location
manhat.ord<-manhat[order(as.numeric(manhat$chr),manhat$position),]
manhat.ord<-manhat.ord[!is.na(manhat.ord$position),]
##Finding the maximum position for each chromosome
max.pos<-NULL
for (i in 1:21){
max.pos[i]<-max(manhat.ord$position[manhat.ord$chr==i])}
max.pos1<-c(0,max.pos)
max.pos2<-NULL
for (i in 1:22){max.pos2[i]<-sum(max.pos1[1:i])}
#Add spacing between chromosomes
max.pos2<-max.pos2+c(0:21)*10000000
#defining the postitions of each snp in the plot
manhat.ord$pos<-manhat.ord$position+max.pos2[as.numeric(manhat.ord$chr)]
manhat.ord$Neg_logP<-abs(manhat.ord$Neg_logP)
#defining the coloring for the Manhattan plot
manhat.ord$col[as.numeric(manhat.ord$chr)%%2==0]<-col.snps[1]
manhat.ord$col[as.numeric(manhat.ord$chr)%%2==1]<-col.snps[2]
text.pos<-rep(NA,22)
for (i in 1:22){text.pos[i]<-mean(manhat.ord$pos[manhat.ord$chr==i])}
#plot the data
plot(manhat.ord$pos/1000000,manhat.ord$Neg_logP,pch=20,cex=.3,col=manhat.ord$col,xlab="Chromosome",ylab="Negative Log P-Value",axes=F,ylim=c(0,max(manhat$Neg_logP)+1))
axis(2)
abline(h=0)
SigNifSNPs<-as.character(GWAS[GWAS$Neg_logP>-log10(0.05/1000000),1])
#Add legend
legend("topright",c("Bonferroni-wide Significant", "Bonferroni-wide Significance Threshold*"),border=col.detected,col=c(col.detected, "gray60"),pch=c(15, 0),lwd=c(0,1),pt.cex=c(0.5,0), bty="o", cex=0.7)
#Add chromosome number
text(text.pos/1000000,-.3,seq(1,22,by=1),xpd=TRUE,cex=1)
abline(h=-log10(0.05/1000000), untf = FALSE,col = "gray60")
#Plotting detected genes
#Were any genes detected?
if (length(SigNifSNPs)>0){
points(manhat.ord$pos[manhat.ord[,1]%in%SigNifSNPs]/1000000,manhat.ord$Neg_logP[manhat.ord[,1]%in%SigNifSNPs],pch=15,col=col.detected, bg=col.detected,cex=0.5)
text(manhat.ord$pos[manhat.ord[,1]%in%SigNifSNPs]/1000000,manhat.ord$Neg_logP[manhat.ord[,1]%in%SigNifSNPs],as.character(manhat.ord[manhat.ord[,1]%in%SigNifSNPs,1]),col=col.text,offset=1,adj=-.1, cex=.7)
}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kpDataBackground.R
\name{kpDataBackground}
\alias{kpDataBackground}
\title{kpDataBackground}
\usage{
kpDataBackground(karyoplot, r0=NULL, r1=NULL, data.panel=1, color="gray90", ...)
}
\arguments{
\item{karyoplot}{(a \code{KaryoPlot} object) This is the first argument to all data plotting functions of \code{karyoploteR}. A KaryoPlot object referring to the currently active plot.}
\item{r0}{(numeric) r0 and r1 define the vertical range of the data panel to be used to draw this plot. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{r1}{(numeric) r0 and r1 define the vertical range of the data panel to be used to draw this plot. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{data.panel}{(numeric) The identifier of the data panel where the data is to be plotted. The available data panels depend on the plot type selected in the call to \code{\link{plotKaryotype}}. (defaults to 1)}
\item{color}{(color) a valid color specification}
\item{...}{The ellipsis operator can be used to specify any additional graphical parameters. Any additional parameter will be passed to the internal calls to the R base plotting functions.}
}
\value{
Returns the original karyoplot object, unchanged.
}
\description{
Draws a solid rectangle delimiting the plotting area
}
\details{
This function is used to add a background color to delimit the plotting area.
It can either delimit the whole plotting area or part of it so different data plotting
regions can be seen.
}
\examples{
kp <- plotKaryotype("hg19", plot.type=2, chromosomes=c("chr1", "chr2"))
#Prepare data panel 1
kpDataBackground(kp, data.panel=1)
kpAxis(kp, data.panel = 1)
kpAxis(kp, data.panel = 1, ymin = 0, ymax=10, numticks = 11, side = 2, cex = 0.4, col="red")
#Prepare data panel 2
#Data panel 2 is conceptually split into two parts and the second part is "inverted"
kpDataBackground(kp, data.panel=2, r0 = 0, r1 = 0.45, color = "#EEEEFF")
kpAxis(kp, data.panel = 2, r0=0, r1=0.45, ymin = 0, ymax = 1, cex=0.5,
tick.pos = c(0.3, 0.5, 0.7), labels = c("-1 sd", "mean", "+1 sd"))
kpAxis(kp, data.panel = 2, r0=0, r1=0.45, ymin = 0, ymax = 1, cex=0.5, side=2)
kpDataBackground(kp, data.panel=2, r0 = 0.55, r1 = 1, color = "#EEFFEE")
kpAxis(kp, data.panel = 2, r0=1, r1=0.55, ymin = 0, ymax = 1, side=1, cex=0.5)
kpAxis(kp, data.panel = 2, r0=1, r1=0.55, ymin = 0, ymax = 1, side=2, cex=0.5)
}
\seealso{
\code{\link{plotKaryotype}}, \code{\link{kpAxis}}
}
|
/man/kpDataBackground.Rd
|
no_license
|
YTLogos/karyoploteR
|
R
| false
| true
| 2,933
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kpDataBackground.R
\name{kpDataBackground}
\alias{kpDataBackground}
\title{kpDataBackground}
\usage{
kpDataBackground(karyoplot, r0=NULL, r1=NULL, data.panel=1, color="gray90", ...)
}
\arguments{
\item{karyoplot}{(a \code{KaryoPlot} object) This is the first argument to all data plotting functions of \code{karyoploteR}. A KaryoPlot object referring to the currently active plot.}
\item{r0}{(numeric) r0 and r1 define the vertical range of the data panel to be used to draw this plot. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{r1}{(numeric) r0 and r1 define the vertical range of the data panel to be used to draw this plot. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{data.panel}{(numeric) The identifier of the data panel where the data is to be plotted. The available data panels depend on the plot type selected in the call to \code{\link{plotKaryotype}}. (defaults to 1)}
\item{color}{(color) a valid color specification}
\item{...}{The ellipsis operator can be used to specify any additional graphical parameters. Any additional parameter will be passed to the internal calls to the R base plotting functions.}
}
\value{
Returns the original karyoplot object, unchanged.
}
\description{
Draws a solid rectangle delimiting the plotting area
}
\details{
This function is used to add a background color to delimit the plotting area.
It can either delimit the whole plotting area or part of it so different data plotting
regions can be seen.
}
\examples{
kp <- plotKaryotype("hg19", plot.type=2, chromosomes=c("chr1", "chr2"))
#Prepare data panel 1
kpDataBackground(kp, data.panel=1)
kpAxis(kp, data.panel = 1)
kpAxis(kp, data.panel = 1, ymin = 0, ymax=10, numticks = 11, side = 2, cex = 0.4, col="red")
#Prepare data panel 2
#Data panel 2 is conceptually split into two parts and the second part is "inverted"
kpDataBackground(kp, data.panel=2, r0 = 0, r1 = 0.45, color = "#EEEEFF")
kpAxis(kp, data.panel = 2, r0=0, r1=0.45, ymin = 0, ymax = 1, cex=0.5,
tick.pos = c(0.3, 0.5, 0.7), labels = c("-1 sd", "mean", "+1 sd"))
kpAxis(kp, data.panel = 2, r0=0, r1=0.45, ymin = 0, ymax = 1, cex=0.5, side=2)
kpDataBackground(kp, data.panel=2, r0 = 0.55, r1 = 1, color = "#EEFFEE")
kpAxis(kp, data.panel = 2, r0=1, r1=0.55, ymin = 0, ymax = 1, side=1, cex=0.5)
kpAxis(kp, data.panel = 2, r0=1, r1=0.55, ymin = 0, ymax = 1, side=2, cex=0.5)
}
\seealso{
\code{\link{plotKaryotype}}, \code{\link{kpAxis}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chem_scatter.R
\name{chem_scatter}
\alias{chem_scatter}
\title{Creates a scatterplot with regression line}
\usage{
chem_scatter(data, xvar, yvar, xlab = "Insert X-axis label",
ylab = "Insert Y-axis label", intercept = NULL, reg_line = TRUE)
}
\arguments{
\item{data}{A data frame containing variables to be plotted}
\item{xvar}{The name of the x-variable}
\item{yvar}{The name of the y-variable}
\item{xlab}{A string containing the x-axis label}
\item{ylab}{A string containing the y-axis label}
\item{intercept}{Set to NULL by default, but can be changed to a numeric value
to force the regression to through a specified y-value. For example,
\code{intercept = 0} forces the regression line to go through origin}
\item{reg_line}{Set to TRUE by default to show the regression line. If set to
FALSE no regression line will appear showing only the points}
}
\value{
A plot
}
\description{
Creates a scatterplot with regression line
}
\examples{
\dontrun{
chem_scatter(iris, Sepal.Width, Sepal.Length)
}
}
|
/man/chem_scatter.Rd
|
no_license
|
ismayc/chemistr
|
R
| false
| true
| 1,092
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chem_scatter.R
\name{chem_scatter}
\alias{chem_scatter}
\title{Creates a scatterplot with regression line}
\usage{
chem_scatter(data, xvar, yvar, xlab = "Insert X-axis label",
ylab = "Insert Y-axis label", intercept = NULL, reg_line = TRUE)
}
\arguments{
\item{data}{A data frame containing variables to be plotted}
\item{xvar}{The name of the x-variable}
\item{yvar}{The name of the y-variable}
\item{xlab}{A string containing the x-axis label}
\item{ylab}{A string containing the y-axis label}
\item{intercept}{Set to NULL by default, but can be changed to a numeric value
to force the regression to through a specified y-value. For example,
\code{intercept = 0} forces the regression line to go through origin}
\item{reg_line}{Set to TRUE by default to show the regression line. If set to
FALSE no regression line will appear showing only the points}
}
\value{
A plot
}
\description{
Creates a scatterplot with regression line
}
\examples{
\dontrun{
chem_scatter(iris, Sepal.Width, Sepal.Length)
}
}
|
#!/usr/bin/env Rscript
#' ----------------------------------------------------------------------------
#' title: run_tests.R
#' description:
#' Script to run all unittests for the distributed Cox Proportional Hazards
#' algorithm.
#' author:
#' Melle Sieswerda <m.sieswerda@iknl.nl>
#' Anna van der Zalm <a.vanderzalm@iknl.nl>
#' Gijs Geleijnse <g.geleijnse@iknl.nl>
#' date: 09-may-2018
#' license: MIT License
#' ----------------------------------------------------------------------------
library(testthat)
source("dl_coxph.R")
# Find and run the tests in the current directory
test_results <- test_dir("./", reporter="summary")
|
/run_tests.R
|
permissive
|
AnanyaCN/d_coxph
|
R
| false
| false
| 645
|
r
|
#!/usr/bin/env Rscript
#' ----------------------------------------------------------------------------
#' title: run_tests.R
#' description:
#' Script to run all unittests for the distributed Cox Proportional Hazards
#' algorithm.
#' author:
#' Melle Sieswerda <m.sieswerda@iknl.nl>
#' Anna van der Zalm <a.vanderzalm@iknl.nl>
#' Gijs Geleijnse <g.geleijnse@iknl.nl>
#' date: 09-may-2018
#' license: MIT License
#' ----------------------------------------------------------------------------
library(testthat)
source("dl_coxph.R")
# Find and run the tests in the current directory
test_results <- test_dir("./", reporter="summary")
|
#Manipulacao de dados
library(tidyverse)
#Manipulacao de series temporais
library(tsibble)
#Funcoes de previsao
library(fable)
#Graficos e estatistitcas de series temporais
library(feasts)
#Series temporais tidy
library(tsibbledata)
#Todos itens acima e mais
library(fpp3)
#Plot
library(ggplot2)
#um tibble permite o armazenamento e manipulacao
#Ele contem? um index (info de tempo), variaveis medidas
#lendo um arquivo csv e convertendo para tsibble
url_data <- "https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-states.csv"
covid <- readr::read_csv(url_data)
covid
covid = readr::read_csv("https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-states.csv") %>%
select(date, state, newDeaths, newCases )%>% #Seleciona apenas essas colunas
as_tsibble( #converte em serie temporal
index = date, #index referencia o tempo
key = state #separa atraves de estados
) %>%
group_by(state) %>% #Agrupa por estado
mutate(MM_mortes = zoo::rollmean(newDeaths, k = 7, fill = NA, align = "right"),
MM_casos = zoo::rollmean(newCases, k = 7, fill = NA, align = "right"))
#Visualizacao de dados
#Plota cada em caixas separadas cada media movel de cada estado
#covid %>%
# filter(state != "TOTAL") %>% #Filtra o dataset covid para apenas os dados de states
# autoplot(MM_mortes) + #plota as mortes
# facet_wrap(~state, scales = "free") + #facet_wrap coloca a variavel state com o operador ~ como default em cada caixinha e escala cada caixinha da melhor forma possivel
# labs(x = "Dia", y = "Mortes", title = "Media Movel (7 dias)") #Muda as labels do plot
#Plota com interferencia da sazonalidade
covid %>%
filter(state == "TOTAL") %>%
gg_season(MM_mortes, period ="year") + #Plota com referencia a sazonalidade ou seja verificando o periodo(month) de cada grafico
labs(x="Dia", y="Mortes (M_Movel)[7 dias]")
#componentes de uma Serie Temporal
# Tendencia: quando ha um aumento ou diminuicao, ou sjea, a tendencia do grafico eh crescer ou diminuir ou eh estacionaria
# Sazonalidade: Quando uma serie eh influenciada por diferentes periodos de tempo, por ex, o mes de janeiro se comporta igual em todos os anos?
# Ciclo: Quando a serie apresenta padroes ciclicos isto eh que se repetem, por ex, as manchas solares repetem um padrao em periodos variaveis
#Sazonalidade vs Ciclo: O momento de picos eh mais facil de prever na sazonalidade do que no ciclo
#ST
#Verificar se tem sazonalidade e tendencia no plot
covid %>%
filter(state == "TOTAL") %>%
autoplot(newCases) +
labs(x = "Dia", y = "Casos", title = "Numero de casos por dia no Brasil")
#Para o caso acima ha sazonalidade a cada semana e tem tendencia, por causa da subnotificacao
# Funcao de autocorrelacao (ou ACF): correlacao
# Verifica o quanto o tempo esta relacionado com a quantidade de dados, para esse caso especifico
covid %>%
filter(state == "TOTAL") %>%
ACF(newCases, lag_max = 100) #ACF autocorrelation function; lag se refere a pouquissimo tempos de diferenca
#analisando essa saida sabemos o quanto cada saida esta relacionada com a quantidade de dias anteriores
#por exemplo, a cada 7 dias ha uma forte correlacao entre os dias anteriores
covid %>%
filter(state == "TOTAL") %>%
ACF(newCases, lag_max = 50) %>% #ACF autocorrelation function; lag se refere a pouquissimo tempos de diferenca
autoplot()
#Analisa o comportamento do grafico, percebemos que o decaimento das linhas eh bem lento, ou seja, isso indica que eh uma serie com tendencia
#Analisando os grandes picos do grafico, percebemos que a cada 7 dias ha um pico, ou seja, sazonalidade na semana
|
/covid.R
|
no_license
|
eduardo92005-debug/Cienc.Dados
|
R
| false
| false
| 3,714
|
r
|
#Manipulacao de dados
library(tidyverse)
#Manipulacao de series temporais
library(tsibble)
#Funcoes de previsao
library(fable)
#Graficos e estatistitcas de series temporais
library(feasts)
#Series temporais tidy
library(tsibbledata)
#Todos itens acima e mais
library(fpp3)
#Plot
library(ggplot2)
#um tibble permite o armazenamento e manipulacao
#Ele contem? um index (info de tempo), variaveis medidas
#lendo um arquivo csv e convertendo para tsibble
url_data <- "https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-states.csv"
covid <- readr::read_csv(url_data)
covid
covid = readr::read_csv("https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-states.csv") %>%
select(date, state, newDeaths, newCases )%>% #Seleciona apenas essas colunas
as_tsibble( #converte em serie temporal
index = date, #index referencia o tempo
key = state #separa atraves de estados
) %>%
group_by(state) %>% #Agrupa por estado
mutate(MM_mortes = zoo::rollmean(newDeaths, k = 7, fill = NA, align = "right"),
MM_casos = zoo::rollmean(newCases, k = 7, fill = NA, align = "right"))
#Visualizacao de dados
#Plota cada em caixas separadas cada media movel de cada estado
#covid %>%
# filter(state != "TOTAL") %>% #Filtra o dataset covid para apenas os dados de states
# autoplot(MM_mortes) + #plota as mortes
# facet_wrap(~state, scales = "free") + #facet_wrap coloca a variavel state com o operador ~ como default em cada caixinha e escala cada caixinha da melhor forma possivel
# labs(x = "Dia", y = "Mortes", title = "Media Movel (7 dias)") #Muda as labels do plot
#Plota com interferencia da sazonalidade
covid %>%
filter(state == "TOTAL") %>%
gg_season(MM_mortes, period ="year") + #Plota com referencia a sazonalidade ou seja verificando o periodo(month) de cada grafico
labs(x="Dia", y="Mortes (M_Movel)[7 dias]")
#componentes de uma Serie Temporal
# Tendencia: quando ha um aumento ou diminuicao, ou sjea, a tendencia do grafico eh crescer ou diminuir ou eh estacionaria
# Sazonalidade: Quando uma serie eh influenciada por diferentes periodos de tempo, por ex, o mes de janeiro se comporta igual em todos os anos?
# Ciclo: Quando a serie apresenta padroes ciclicos isto eh que se repetem, por ex, as manchas solares repetem um padrao em periodos variaveis
#Sazonalidade vs Ciclo: O momento de picos eh mais facil de prever na sazonalidade do que no ciclo
#ST
#Verificar se tem sazonalidade e tendencia no plot
covid %>%
filter(state == "TOTAL") %>%
autoplot(newCases) +
labs(x = "Dia", y = "Casos", title = "Numero de casos por dia no Brasil")
#Para o caso acima ha sazonalidade a cada semana e tem tendencia, por causa da subnotificacao
# Funcao de autocorrelacao (ou ACF): correlacao
# Verifica o quanto o tempo esta relacionado com a quantidade de dados, para esse caso especifico
covid %>%
filter(state == "TOTAL") %>%
ACF(newCases, lag_max = 100) #ACF autocorrelation function; lag se refere a pouquissimo tempos de diferenca
#analisando essa saida sabemos o quanto cada saida esta relacionada com a quantidade de dias anteriores
#por exemplo, a cada 7 dias ha uma forte correlacao entre os dias anteriores
covid %>%
filter(state == "TOTAL") %>%
ACF(newCases, lag_max = 50) %>% #ACF autocorrelation function; lag se refere a pouquissimo tempos de diferenca
autoplot()
#Analisa o comportamento do grafico, percebemos que o decaimento das linhas eh bem lento, ou seja, isso indica que eh uma serie com tendencia
#Analisando os grandes picos do grafico, percebemos que a cada 7 dias ha um pico, ou seja, sazonalidade na semana
|
DESeq2_gprofiler <- tabItem(tabName = "DESeq2_gprofiler", br(), br(),
h2("Pathway and Gene Ontology"), br()
,boxPlus(collapsible=T, closable=F, width = 10, title = "DGE results table"
,status = "primary", solidHeader = TRUE
,dataTableOutput("DESeq2_dge_res2")
)
,boxPlus(collapsible=T, closable=F, width = 10, title = "Pathway Analysis and Gene Ontology"
,status = "primary", solidHeader = TRUE
,fluidRow(column(10, uiOutput("filterGenes"))
,column(10, uiOutput("DESeq2_gprofile_par"))
)
)
)
|
/shared/DESeq2_gprofilerUI.R
|
no_license
|
rosericazondekon/irnaa
|
R
| false
| false
| 863
|
r
|
DESeq2_gprofiler <- tabItem(tabName = "DESeq2_gprofiler", br(), br(),
h2("Pathway and Gene Ontology"), br()
,boxPlus(collapsible=T, closable=F, width = 10, title = "DGE results table"
,status = "primary", solidHeader = TRUE
,dataTableOutput("DESeq2_dge_res2")
)
,boxPlus(collapsible=T, closable=F, width = 10, title = "Pathway Analysis and Gene Ontology"
,status = "primary", solidHeader = TRUE
,fluidRow(column(10, uiOutput("filterGenes"))
,column(10, uiOutput("DESeq2_gprofile_par"))
)
)
)
|
#'@section Standard descriptive variables (generated by this package):
#' \describe{
#'
#' \item{extended_country_name}{The name of the country in the Gleditsch-Ward
#' system of states, or the official name of the
#' entity (for non-sovereign entities and states not in the Gleditsch and Ward
#' system of states) or else a common name for disputed cases that do not have
#' an official name (e.g., Western Sahara, Hyderabad). The Gleditsch and Ward
#' scheme sometimes indicates the common name of the country and (in
#' parentheses) the name of an earlier incarnation of the state: thus, they
#' have Germany (Prussia), Russia (Soviet Union), Madagascar (Malagasy), etc.
#' For details, see Gleditsch, Kristian S. & Michael D. Ward. 1999. "Interstate
#' System Membership: A Revised List of the Independent States since 1816."
#' International Interactions 25: 393-413. The list can be found at
#' \url{http://privatewww.essex.ac.uk/~ksg/statelist.html}.}
#'
#' \item{GWn}{Gleditsch and Ward's numeric country code, from the Gleditsch and
#' Ward list of independent states.}
#'
#' \item{cown}{The Correlates of War numeric country code, 2016 version. This
#' differs from Gleditsch and Ward's numeric country code in a few cases. See
#' \url{http://www.correlatesofwar.org/data-sets/state-system-membership} for
#' the full list.}
#'
#' \item{in_GW_system}{Whether the state is "in system" (that is, is
#' independent and sovereign), according to Gleditsch and Ward, for this
#' particular date. Matches at the end of the year; so, for example South
#' Vietnam 1975 is \code{FALSE} because, according to Gleditsch and Ward, the
#' country ended on April 1975 (being absorbed by North Vietnam). It is also
#' \code{TRUE} for dates beyond 2012 for countries that did not end by then, depsite
#' the fact that the Gleditsch and Ward list has not been updated since.} }
|
/man-roxygen/standard-variables.R
|
no_license
|
Vittoriabrown/democracyData
|
R
| false
| false
| 1,896
|
r
|
#'@section Standard descriptive variables (generated by this package):
#' \describe{
#'
#' \item{extended_country_name}{The name of the country in the Gleditsch-Ward
#' system of states, or the official name of the
#' entity (for non-sovereign entities and states not in the Gleditsch and Ward
#' system of states) or else a common name for disputed cases that do not have
#' an official name (e.g., Western Sahara, Hyderabad). The Gleditsch and Ward
#' scheme sometimes indicates the common name of the country and (in
#' parentheses) the name of an earlier incarnation of the state: thus, they
#' have Germany (Prussia), Russia (Soviet Union), Madagascar (Malagasy), etc.
#' For details, see Gleditsch, Kristian S. & Michael D. Ward. 1999. "Interstate
#' System Membership: A Revised List of the Independent States since 1816."
#' International Interactions 25: 393-413. The list can be found at
#' \url{http://privatewww.essex.ac.uk/~ksg/statelist.html}.}
#'
#' \item{GWn}{Gleditsch and Ward's numeric country code, from the Gleditsch and
#' Ward list of independent states.}
#'
#' \item{cown}{The Correlates of War numeric country code, 2016 version. This
#' differs from Gleditsch and Ward's numeric country code in a few cases. See
#' \url{http://www.correlatesofwar.org/data-sets/state-system-membership} for
#' the full list.}
#'
#' \item{in_GW_system}{Whether the state is "in system" (that is, is
#' independent and sovereign), according to Gleditsch and Ward, for this
#' particular date. Matches at the end of the year; so, for example South
#' Vietnam 1975 is \code{FALSE} because, according to Gleditsch and Ward, the
#' country ended on April 1975 (being absorbed by North Vietnam). It is also
#' \code{TRUE} for dates beyond 2012 for countries that did not end by then, depsite
#' the fact that the Gleditsch and Ward list has not been updated since.} }
|
library(ape)
testtree <- read.tree("4265_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4265_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/4265_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("4265_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4265_0_unrooted.txt")
|
library(imager)
library(shiny)
library(ggplot2)
library(ggpubr)
# img <- load.image("03-Rscripts/zoom-plots/www/Captura.PNG")
# img <- load.image("03-Rscripts/zoom-plots/www/jaime_pino.png")
# img <- load.image("03-Rscripts/zoom-plots/www/Pinu_gorria_02_Asier.png")
# ABRIR PRIMERO LA IMAGEN CON IMAGER
#### UI ####
ui <- fluidPage(
# Overlapping images in 2 divs inside a "container"
fluidRow(
div(id="container",
height = dim(img)[1],
width = dim(img)[2],
style="position:relative;",
div(tags$img(src='Pinu_gorria_02_Asier.png',
style=paste0("width:",dim(img)[2],";height:",dim(img)[2],";")),
style="position:absolute; top:0; left:0;"),
div(plotOutput("plot1",
height = dim(img)[2],
width = dim(img)[1],
click = "image_click"),
style="position:absolute; top:0; left:0;")
)
)
)
### SERVER ####
server <- function(input, output, session) {
output$plot1 <- renderPlot({
ggplot(USArrests, aes(UrbanPop, Murder)) + # aprende a utilizar ggplot que ya es hora!
geom_point() +
theme(
panel.background = element_rect(fill = "transparent", colour = NA), # bg of the panel
plot.background = element_rect(fill = "transparent", colour = NA), # bg of the plot
panel.grid.major = element_blank(), # get rid of major grid
panel.grid.minor = element_blank(), # get rid of minor grid
legend.background = element_rect(fill = "transparent", colour = NA), # get rid of legend bg
legend.box.background = element_rect(fill = "transparent", colour = NA) # get rid of legend panel bg
)
},
bg="transparent")
}
# Run the application
shinyApp(ui = ui, server = server)
|
/03-Rscripts/zoom-plots/app-imager-ggplot.R
|
no_license
|
Joacala/trini
|
R
| false
| false
| 1,809
|
r
|
library(imager)
library(shiny)
library(ggplot2)
library(ggpubr)
# img <- load.image("03-Rscripts/zoom-plots/www/Captura.PNG")
# img <- load.image("03-Rscripts/zoom-plots/www/jaime_pino.png")
# img <- load.image("03-Rscripts/zoom-plots/www/Pinu_gorria_02_Asier.png")
# ABRIR PRIMERO LA IMAGEN CON IMAGER
#### UI ####
ui <- fluidPage(
# Overlapping images in 2 divs inside a "container"
fluidRow(
div(id="container",
height = dim(img)[1],
width = dim(img)[2],
style="position:relative;",
div(tags$img(src='Pinu_gorria_02_Asier.png',
style=paste0("width:",dim(img)[2],";height:",dim(img)[2],";")),
style="position:absolute; top:0; left:0;"),
div(plotOutput("plot1",
height = dim(img)[2],
width = dim(img)[1],
click = "image_click"),
style="position:absolute; top:0; left:0;")
)
)
)
### SERVER ####
server <- function(input, output, session) {
output$plot1 <- renderPlot({
ggplot(USArrests, aes(UrbanPop, Murder)) + # aprende a utilizar ggplot que ya es hora!
geom_point() +
theme(
panel.background = element_rect(fill = "transparent", colour = NA), # bg of the panel
plot.background = element_rect(fill = "transparent", colour = NA), # bg of the plot
panel.grid.major = element_blank(), # get rid of major grid
panel.grid.minor = element_blank(), # get rid of minor grid
legend.background = element_rect(fill = "transparent", colour = NA), # get rid of legend bg
legend.box.background = element_rect(fill = "transparent", colour = NA) # get rid of legend panel bg
)
},
bg="transparent")
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coef.CBFM.R
\name{coef.CBFM}
\alias{coef.CBFM}
\title{Extract model coefficients from a CBFM fit}
\usage{
\method{coef}{CBFM}(object, ...)
}
\arguments{
\item{object}{An object of class \code{CBFM}.}
\item{...}{Not used in this case.}
}
\value{
A matrix of estimated species-specific regression coefficients corresponding to the model matrix created, where the number of rows is equal to the number of species. For zero-inflated distributions, it returns a list containing both the matrix of estimated species-specific regression coefficients corresponding to the model matrix created, and a vector of estimated species-specific probabilities of zero-inflation on the logit scale.
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#stable}{\figure{lifecycle-stable.svg}{options: alt='[Stable]'}}}{\strong{[Stable]}}
Extracts the estimated regression coefficients associated with the covariates from a fitted \code{CBFM} object.
}
\details{
For the purposes of the package, the CBFM is characterized by the following mean regression model: for observational unit \eqn{i=1,\ldots,N} and species \eqn{j=1,\ldots,m}, we have
\deqn{g(\mu_{ij}) = \eta_{ij} = x_i^\top\beta_j + b_i^\top a_j,}
where \eqn{g(.)} is a known link function, \eqn{x_i} denotes a vector of predictors for unit \eqn{i} i.e., the \eqn{i}-th row from the created model matrix, \eqn{\beta_j} denotes the corresponding regression coefficients for species \eqn{j}, \eqn{b_i} denotes a vector of spatial, temporal, and/or spatio-temporal basis functions for unit \eqn{i} , and \eqn{a_j} denotes the corresponding regression coefficients for species \eqn{j}.
This function will extract the estimated coefficients \eqn{\hat{\beta}_j}'s from the fitted CBFM, noting that this may included the estimated smoothing coefficients if any smoothers were included. For zero-inflated distributions, it will also return the estimated coefficients associated with modeling the probability of zero-inflation, noting that this may included the estimated smoothing coefficients if any smoothers were included.
This function does \emph{not} return the estimated regression coefficients associated with the basis functions i.e., the \eqn{\hat{a}_j}'s. These can be obtained from \code{object$basis_effects_mat}.
}
\examples{
\dontrun{
library(autoFRK)
library(FRK)
library(MASS)
library(mvabund)
library(mvtnorm)
library(ROCR)
library(sp)
library(RandomFields)
library(tidyverse)
##------------------------------
## **Example 1: Fitting a CBFM to spatial multivariate presence-absence data**
## simulated from a spatial latent variable model
## Please note the data generation process (thus) differs from CBFM.
##------------------------------
set.seed(2021)
num_sites <- 500 # 500 (units) sites
num_spp <- 50 # Number of species
num_X <- 4 # Number of regression slopes
spp_slopes <- matrix(runif(num_spp * num_X, -1, 1), nrow = num_spp)
spp_intercepts <- runif(num_spp, -2, 0)
# Simulate spatial coordinates and environmental covariate components
xy <- data.frame(x = runif(num_sites, 0, 5), y = runif(num_sites, 0, 5))
X <- rmvnorm(num_sites, mean = rep(0,4))
colnames(X) <- c("temp", "depth", "chla", "O2")
dat <- data.frame(xy, X)
mm <- model.matrix(~ temp + depth + chla + O2 - 1, data = dat) \%>\%
scale \%>\%
as.matrix
# Simulate latent variable component
true_lvs <- RFsimulate(model = RMexp(var=1, scale=2),
x = xy$x, y = xy$y, n = 2)@data \%>\%
as.matrix
spp_loadings <- matrix(runif(num_spp * 2, -1, 1), nrow = num_spp)
set.seed(NULL)
# Simulate spatial multivariate abundance data (presence-absence)
eta <- tcrossprod(cbind(1,mm), cbind(spp_intercepts,spp_slopes)) +
tcrossprod(true_lvs, spp_loadings)
simy <- matrix(rbinom(num_sites * num_spp, size = 1,
prob = plogis(eta)), nrow = num_sites)
rm(X, mm, spp_loadings, true_lvs, xy, eta)
# Set up spatial basis functions for CBFM -- Most users will start here!
num_basisfunctions <- 25 # Number of spatial basis functions to use
basisfunctions <- mrts(dat[,c("x","y")], num_basisfunctions) \%>\%
as.matrix \%>\%
{.[,-(1)]} # Remove the first intercept column
# Fit CBFM
useformula <- ~ temp + depth + chla + O2
fitcbfm <- CBFM(y = simy, formula = useformula, data = dat,
B_space = basisfunctions, family = binomial(), control = list(trace = 1))
coef(fitcbfm)
}
}
\seealso{
\code{\link[=CBFM]{CBFM()}} for fitting CBFMs.
}
\author{
Francis K.C. Hui \href{mailto:fhui28@gmail.com}{fhui28@gmail.com}, Chris Haak
}
|
/man/coef.CBFM.Rd
|
no_license
|
fhui28/CBFM
|
R
| false
| true
| 4,559
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coef.CBFM.R
\name{coef.CBFM}
\alias{coef.CBFM}
\title{Extract model coefficients from a CBFM fit}
\usage{
\method{coef}{CBFM}(object, ...)
}
\arguments{
\item{object}{An object of class \code{CBFM}.}
\item{...}{Not used in this case.}
}
\value{
A matrix of estimated species-specific regression coefficients corresponding to the model matrix created, where the number of rows is equal to the number of species. For zero-inflated distributions, it returns a list containing both the matrix of estimated species-specific regression coefficients corresponding to the model matrix created, and a vector of estimated species-specific probabilities of zero-inflation on the logit scale.
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#stable}{\figure{lifecycle-stable.svg}{options: alt='[Stable]'}}}{\strong{[Stable]}}
Extracts the estimated regression coefficients associated with the covariates from a fitted \code{CBFM} object.
}
\details{
For the purposes of the package, the CBFM is characterized by the following mean regression model: for observational unit \eqn{i=1,\ldots,N} and species \eqn{j=1,\ldots,m}, we have
\deqn{g(\mu_{ij}) = \eta_{ij} = x_i^\top\beta_j + b_i^\top a_j,}
where \eqn{g(.)} is a known link function, \eqn{x_i} denotes a vector of predictors for unit \eqn{i} i.e., the \eqn{i}-th row from the created model matrix, \eqn{\beta_j} denotes the corresponding regression coefficients for species \eqn{j}, \eqn{b_i} denotes a vector of spatial, temporal, and/or spatio-temporal basis functions for unit \eqn{i} , and \eqn{a_j} denotes the corresponding regression coefficients for species \eqn{j}.
This function will extract the estimated coefficients \eqn{\hat{\beta}_j}'s from the fitted CBFM, noting that this may included the estimated smoothing coefficients if any smoothers were included. For zero-inflated distributions, it will also return the estimated coefficients associated with modeling the probability of zero-inflation, noting that this may included the estimated smoothing coefficients if any smoothers were included.
This function does \emph{not} return the estimated regression coefficients associated with the basis functions i.e., the \eqn{\hat{a}_j}'s. These can be obtained from \code{object$basis_effects_mat}.
}
\examples{
\dontrun{
library(autoFRK)
library(FRK)
library(MASS)
library(mvabund)
library(mvtnorm)
library(ROCR)
library(sp)
library(RandomFields)
library(tidyverse)
##------------------------------
## **Example 1: Fitting a CBFM to spatial multivariate presence-absence data**
## simulated from a spatial latent variable model
## Please note the data generation process (thus) differs from CBFM.
##------------------------------
set.seed(2021)
num_sites <- 500 # 500 (units) sites
num_spp <- 50 # Number of species
num_X <- 4 # Number of regression slopes
spp_slopes <- matrix(runif(num_spp * num_X, -1, 1), nrow = num_spp)
spp_intercepts <- runif(num_spp, -2, 0)
# Simulate spatial coordinates and environmental covariate components
xy <- data.frame(x = runif(num_sites, 0, 5), y = runif(num_sites, 0, 5))
X <- rmvnorm(num_sites, mean = rep(0,4))
colnames(X) <- c("temp", "depth", "chla", "O2")
dat <- data.frame(xy, X)
mm <- model.matrix(~ temp + depth + chla + O2 - 1, data = dat) \%>\%
scale \%>\%
as.matrix
# Simulate latent variable component
true_lvs <- RFsimulate(model = RMexp(var=1, scale=2),
x = xy$x, y = xy$y, n = 2)@data \%>\%
as.matrix
spp_loadings <- matrix(runif(num_spp * 2, -1, 1), nrow = num_spp)
set.seed(NULL)
# Simulate spatial multivariate abundance data (presence-absence)
eta <- tcrossprod(cbind(1,mm), cbind(spp_intercepts,spp_slopes)) +
tcrossprod(true_lvs, spp_loadings)
simy <- matrix(rbinom(num_sites * num_spp, size = 1,
prob = plogis(eta)), nrow = num_sites)
rm(X, mm, spp_loadings, true_lvs, xy, eta)
# Set up spatial basis functions for CBFM -- Most users will start here!
num_basisfunctions <- 25 # Number of spatial basis functions to use
basisfunctions <- mrts(dat[,c("x","y")], num_basisfunctions) \%>\%
as.matrix \%>\%
{.[,-(1)]} # Remove the first intercept column
# Fit CBFM
useformula <- ~ temp + depth + chla + O2
fitcbfm <- CBFM(y = simy, formula = useformula, data = dat,
B_space = basisfunctions, family = binomial(), control = list(trace = 1))
coef(fitcbfm)
}
}
\seealso{
\code{\link[=CBFM]{CBFM()}} for fitting CBFMs.
}
\author{
Francis K.C. Hui \href{mailto:fhui28@gmail.com}{fhui28@gmail.com}, Chris Haak
}
|
library(rvest)
#-- Clare Accommodation --
ClareAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186595-County_Clare-Hotels.html")
ClareAccommodation <- ClareAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
ClareAccommodation
#-- Cork Accommodation --
CorkAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186598-County_Cork-Hotels.html")
CorkAccommodation <- CorkAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
CorkAccommodation
#-- Kerry Accommodation --
KerryAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186610-County_Kerry-Hotels.html")
KerryAccommodation <- KerryAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
KerryAccommodation
#-- Waterford Accommodation --
WaterfordAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186637-County_Waterford-Hotels.html")
WaterfordAccommodation <- WaterfordAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
WaterfordAccommodation
#-- Limerick Accommodation --
LimerickAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186619-County_Limerick-Hotels.html")
LimerickAccommodation <- LimerickAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
LimerickAccommodation
#-- Tipperary Accommodation --
TipperaryAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186636-County_Tipperary-Hotels.html")
TipperaryAccommodation <- TipperaryAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
TipperaryAccommodation
|
/SoftwareProject/DataCollection/Accomodation/MunsterAccomodation.R
|
no_license
|
robbiejenkinson/SoftwareProject
|
R
| false
| false
| 1,696
|
r
|
library(rvest)
#-- Clare Accommodation --
ClareAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186595-County_Clare-Hotels.html")
ClareAccommodation <- ClareAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
ClareAccommodation
#-- Cork Accommodation --
CorkAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186598-County_Cork-Hotels.html")
CorkAccommodation <- CorkAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
CorkAccommodation
#-- Kerry Accommodation --
KerryAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186610-County_Kerry-Hotels.html")
KerryAccommodation <- KerryAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
KerryAccommodation
#-- Waterford Accommodation --
WaterfordAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186637-County_Waterford-Hotels.html")
WaterfordAccommodation <- WaterfordAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
WaterfordAccommodation
#-- Limerick Accommodation --
LimerickAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186619-County_Limerick-Hotels.html")
LimerickAccommodation <- LimerickAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
LimerickAccommodation
#-- Tipperary Accommodation --
TipperaryAccommodation <- read_html("https://www.tripadvisor.ie/Hotels-g186636-County_Tipperary-Hotels.html")
TipperaryAccommodation <- TipperaryAccommodation %>% html_nodes('.prw_filters_hsx_radio_resp .label') %>% html_text()
TipperaryAccommodation
|
FindT1Up <- function(patterns, now) {
t1 <- tail(patterns$t2.x[patterns$t2.x < now], 1)
t1 <- which.min(points[t1:now]) + t1 -1
while(now - t1 >= move.duration) {
t1 <- which.min(points[t1:now]) + t1 -1
}
t1
}
FindT1Down <- function(patterns, now) {
t1 <- tail(patterns$t2.x[patterns$t2.x < now], 1)
t1 <- which.max(points[t1:now]) + t1 -1
while(now - t1 >= move.duration) {
t1 <- which.max(points[t1:now]) + t1 -1
}
t1
}
FindT1T2 <- function(now) {
# Look back and find t1 t2 for current point.
# Select the last t1 t2 found
matched <- NULL
start <- now - 60 / interval * 24 * 10 # start from two weeks ago
i <- ifelse(start < 1, 1, start)
# traverse through all values of the vector
while (i < now) {
local.points <- points[i:now]
pattern <- FindSingleBack(local.points, interval, (i - 1),
min.basis = min.basis, move.duration = move.duration,
noise.basis = noise.basis, noise.duration = noise.duration,
retrace.percent = retrace.percent,
retrace.min = retrace.min, retrace.duration = retrace.duration)
if (is.data.frame(pattern)
&& (pattern$t2.x - pattern$t1.x) > 2) { # filter jumps with small gap (<=10mins)
t12 <- pattern$t2.x - pattern$t1.x
# Filter by volatility
vol12 <- sd(points[pattern$t1.x : pattern$t2.x]) / t12 * 100
if (vol12 <= vol.max) {
# When we find a pattern, we increment the index to the next t2 position
i <- pattern$t2.x - 1
matched <- pattern
}
}
i <- i + 1
}
return(matched)
}
FindSingleBack <- function(points, interval, x.offset = 0,
min.basis, move.duration,
noise.basis, noise.duration,
retrace.percent,
retrace.min, retrace.duration) {
# Finds the t2 and t3 point for a given t1 and data values.
# The only difference from FindSingle() is that after a potential t2 is found,
# it will output t1 t2 when it reaches the endpoint no matter whether a t3 has been found.
#
# Args:
# points: Price vector
#
# Returns:
# Pattern of t1, t2, t3 for starting point of "points" or null if none
points.length <- length(points)
move.duration <- move.duration %/% interval
noise.duration <- noise.duration %/% interval
retrace.min <- retrace.min %/% interval
retrace.duration <- retrace.duration %/% interval
i <- 2
# Search from next point for move larger than min.basis.
while (abs(points[i] - points[1]) < min.basis
&& (i - 1) < move.duration) {
i <- i + 1
if (i > points.length) {
return(NULL)
}
}
# If there is no large move within move.duration, return null.
if (i - 1 >= move.duration) {
return(NULL)
}
# Otherwise i is the first point that is beyond min.basis, which could be t2.
# Then we try to determine whether it is a t2.
# If price still moves in the same direction, continue to next point.
if ((i+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
while ((points[i+1] - points[i])*(points[i] - points[1]) >= 0
&& (i+1) - 1 < move.duration) {
i <- i + 1
if ((i+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
}
# If large move doesn't stop within move.duration, return null.
if ((points[i+1] - points[i])*(points[i] - points[1]) >= 0
&& (i+1) - 1 >= move.duration) {
return(NULL)
}
# Otherwise price starts to retrace in the next point.
# Check if it is just noise.
j <- i + 1
k <- i
max.move <- 0
while (abs(points[j] - points[i]) < noise.basis
&& (j - i) < noise.duration) {
if ((points[j] - points[i])*(points[i] - points[1]) > 0
&& abs(points[j] - points[i]) > max.move) {
k <- j
max.move <- abs(points[j] - points[i])
}
j <- j + 1
if (j > points.length) {
for (l in 2:(k-1)) {
if ((points[l] - points[1])*(points[k] - points[1]) < 0)
return(NULL)
}
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = k + x.offset, t2.y = points[k])
return(pattern)
}
}
# If small move doesn't stop within noise.duration, return null.
if (abs(points[j] - points[i]) < noise.basis
&& j - i >= noise.duration) {
return(NULL)
}
# Otherwise if price continues to move in the same direction after noise
while ((points[j] - points[i])*(points[i] - points[1]) >= 0
&& (j - 1) < move.duration) {
# Consider it as continuous move.
i <- j
# Repeat the whole process from line 80 to 121.
# If price still moves in the same direction, continue to next point.
if ((i+1) > points.length) {
for (l in 2:(i-1)) {
if ((points[l] - points[1])*(points[i] - points[1]) < 0)
return(NULL)
}
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
while ((points[i+1] - points[i])*(points[i] - points[1]) >= 0
&& (i+1) - 1 < move.duration) {
i <- i + 1
if ((i+1) > points.length) {
for (l in 2:(i-1)) {
if ((points[l] - points[1])*(points[i] - points[1]) < 0)
return(NULL)
}
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
}
# If large move doesn't stop within move.duration, return null.
if ((points[i+1] - points[i])*(points[i] - points[1]) >= 0
&& (i+1) - 1 >= move.duration) {
return(NULL)
}
# Otherwise price starts to retrace in the next point.
# Check if it is just noise.
j <- i + 1
k <- i
max.move <- 0
while (abs(points[j] - points[i]) < noise.basis
&& (j - i) < noise.duration) {
if ((points[j] - points[i])*(points[i] - points[1]) > 0
&& abs(points[j] - points[i]) > max.move) {
k <- j
max.move <- abs(points[j] - points[i])
}
j <- j + 1
if (j > points.length) {
for (l in 2:(k-1)) {
if ((points[l] - points[1])*(points[k] - points[1]) < 0)
return(NULL)
}
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = k + x.offset, t2.y = points[k])
return(pattern)
}
}
# If small move doesn't stop within noise.duration, return null.
if (abs(points[j] - points[i]) < noise.basis
&& j - i >= noise.duration) {
return(NULL)
}
}
# If price continues to move in the same direction but out of move.duration,
# return null.
if ((points[j] - points[i])*(points[i] - points[1]) >= 0
&& (j - 1) >= move.duration) {
return(NULL)
}
# Or price starts to retrace and move out of noise range, record extremum point as t2.
# Check if it will retrace to a t3.
i <- k
# If extremum point is out of move.duration, return null.
if ((i - 1) >= move.duration) {
return(NULL)
}
# If between t1 and i there is move that is in opposite direction to point i,
# return null.
for (l in 2:(i-1)) {
if ((points[l] - points[1])*(points[i] - points[1]) < 0)
return(NULL)
}
# Search from next point at least retrace.min away
# for retracement larger than retrace.percent.
j <- i + retrace.min
if (j > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
while (abs(points[j] - points[i]) < retrace.percent*0.01*abs(points[i] - points[1])
&& (j - i) < retrace.duration
&& (points[j] - points[i])*(points[i] - points[1]) < 0) {
j <- j + 1
if (j > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
}
# If price moves back again or out of retrace duration, return null.
if ((j - i) >= retrace.duration
|| (points[j] - points[i])*(points[i] - points[1]) >= 0) {
return(NULL)
}
# Otherwise j is the first point that is beyond retrace.percent, which could be t3.
# Then we try to determine whether it is a t3.
# If price still retraces in the same direction, continue to next point.
if ((j+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = j + x.offset, t3.y = points[j])
return(pattern)
}
while ((points[j+1] - points[j])*(points[j] - points[i]) >= 0
&& (j+1) - i < retrace.duration) {
j <- j + 1
if ((j+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = j + x.offset, t3.y = points[j])
return(pattern)
}
}
# If large retrace doesn't stop within retrace.duration, return null.
if ((points[j+1] - points[j])*(points[j] - points[i]) >= 0
&& (j+1) - i >= retrace.duration) {
return(NULL)
}
# Otherwise price starts to move back in the next point.
# Check if it is just noise (similiar to checking for t2, replace j->l i->j 1->i).
l <- j + 1
k <- j
max.move <- 0
while (abs(points[l] - points[j]) < noise.basis
&& (l - j) < noise.duration) {
if ((points[l] - points[j])*(points[j] - points[i]) > 0
&& abs(points[l] - points[j]) > max.move) {
k <- l
max.move <- abs(points[l] - points[j])
}
l <- l + 1
if (l > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = k + x.offset, t3.y = points[k])
return(pattern)
}
}
# If small move doesn't stop within noise.duration, return i and j as t2 t3.
if (abs(points[l] - points[j]) < noise.basis
&& l - j >= noise.duration) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = k + x.offset, t3.y = points[k])
return(pattern)
}
# Otherwise if price continues to retrace in the same direction after noise
while ((points[l] - points[j])*(points[j] - points[i]) >= 0
&& (l - i) < retrace.duration) {
# Consider it as continuous move.
j <- l
# Repeat the whole process from line 216 to 263.
# If price still retraces in the same direction, continue to next point.
if ((j+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = j + x.offset, t3.y = points[j])
return(pattern)
}
while ((points[j+1] - points[j])*(points[j] - points[i]) >= 0
&& (j+1) - i < retrace.duration) {
j <- j + 1
if ((j+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = j + x.offset, t3.y = points[j])
return(pattern)
}
}
# If large retrace doesn't stop within retrace.duration, return null.
if ((points[j+1] - points[j])*(points[j] - points[i]) >= 0
&& (j+1) - i >= retrace.duration) {
return(NULL)
}
# Otherwise price starts to move back in the next point.
# Check if it is just noise (similiar to checking for t2, replace j->l i->j 1->i).
l <- j + 1
k <- j
max.move <- 0
while (abs(points[l] - points[j]) < noise.basis
&& (l - j) < noise.duration) {
if ((points[l] - points[j])*(points[j] - points[i]) > 0
&& abs(points[l] - points[j]) > max.move) {
k <- l
max.move <- abs(points[l] - points[j])
}
l <- l + 1
if (l > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = k + x.offset, t3.y = points[k])
return(pattern)
}
}
# If small move doesn't stop within noise.duration, return i and j as t2 t3.
if (abs(points[l] - points[j]) < noise.basis
&& l - j >= noise.duration) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = k + x.offset, t3.y = points[k])
return(pattern)
}
}
# If price continues to retrace in the same direction but out of retrace.duration,
# return null.
if ((points[l] - points[j])*(points[j] - points[i]) >= 0
&& (l - i) >= retrace.duration) {
return(NULL)
}
# Or price starts to move back and out of noise range.
# Return i and j as t2 t3.
j <- k
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = j + x.offset, t3.y = points[j])
return(pattern)
}
|
/global/R/lookback_global.R
|
no_license
|
prashantg123/PMnPred
|
R
| false
| false
| 13,848
|
r
|
FindT1Up <- function(patterns, now) {
t1 <- tail(patterns$t2.x[patterns$t2.x < now], 1)
t1 <- which.min(points[t1:now]) + t1 -1
while(now - t1 >= move.duration) {
t1 <- which.min(points[t1:now]) + t1 -1
}
t1
}
FindT1Down <- function(patterns, now) {
t1 <- tail(patterns$t2.x[patterns$t2.x < now], 1)
t1 <- which.max(points[t1:now]) + t1 -1
while(now - t1 >= move.duration) {
t1 <- which.max(points[t1:now]) + t1 -1
}
t1
}
FindT1T2 <- function(now) {
# Look back and find t1 t2 for current point.
# Select the last t1 t2 found
matched <- NULL
start <- now - 60 / interval * 24 * 10 # start from two weeks ago
i <- ifelse(start < 1, 1, start)
# traverse through all values of the vector
while (i < now) {
local.points <- points[i:now]
pattern <- FindSingleBack(local.points, interval, (i - 1),
min.basis = min.basis, move.duration = move.duration,
noise.basis = noise.basis, noise.duration = noise.duration,
retrace.percent = retrace.percent,
retrace.min = retrace.min, retrace.duration = retrace.duration)
if (is.data.frame(pattern)
&& (pattern$t2.x - pattern$t1.x) > 2) { # filter jumps with small gap (<=10mins)
t12 <- pattern$t2.x - pattern$t1.x
# Filter by volatility
vol12 <- sd(points[pattern$t1.x : pattern$t2.x]) / t12 * 100
if (vol12 <= vol.max) {
# When we find a pattern, we increment the index to the next t2 position
i <- pattern$t2.x - 1
matched <- pattern
}
}
i <- i + 1
}
return(matched)
}
FindSingleBack <- function(points, interval, x.offset = 0,
min.basis, move.duration,
noise.basis, noise.duration,
retrace.percent,
retrace.min, retrace.duration) {
# Finds the t2 and t3 point for a given t1 and data values.
# The only difference from FindSingle() is that after a potential t2 is found,
# it will output t1 t2 when it reaches the endpoint no matter whether a t3 has been found.
#
# Args:
# points: Price vector
#
# Returns:
# Pattern of t1, t2, t3 for starting point of "points" or null if none
points.length <- length(points)
move.duration <- move.duration %/% interval
noise.duration <- noise.duration %/% interval
retrace.min <- retrace.min %/% interval
retrace.duration <- retrace.duration %/% interval
i <- 2
# Search from next point for move larger than min.basis.
while (abs(points[i] - points[1]) < min.basis
&& (i - 1) < move.duration) {
i <- i + 1
if (i > points.length) {
return(NULL)
}
}
# If there is no large move within move.duration, return null.
if (i - 1 >= move.duration) {
return(NULL)
}
# Otherwise i is the first point that is beyond min.basis, which could be t2.
# Then we try to determine whether it is a t2.
# If price still moves in the same direction, continue to next point.
if ((i+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
while ((points[i+1] - points[i])*(points[i] - points[1]) >= 0
&& (i+1) - 1 < move.duration) {
i <- i + 1
if ((i+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
}
# If large move doesn't stop within move.duration, return null.
if ((points[i+1] - points[i])*(points[i] - points[1]) >= 0
&& (i+1) - 1 >= move.duration) {
return(NULL)
}
# Otherwise price starts to retrace in the next point.
# Check if it is just noise.
j <- i + 1
k <- i
max.move <- 0
while (abs(points[j] - points[i]) < noise.basis
&& (j - i) < noise.duration) {
if ((points[j] - points[i])*(points[i] - points[1]) > 0
&& abs(points[j] - points[i]) > max.move) {
k <- j
max.move <- abs(points[j] - points[i])
}
j <- j + 1
if (j > points.length) {
for (l in 2:(k-1)) {
if ((points[l] - points[1])*(points[k] - points[1]) < 0)
return(NULL)
}
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = k + x.offset, t2.y = points[k])
return(pattern)
}
}
# If small move doesn't stop within noise.duration, return null.
if (abs(points[j] - points[i]) < noise.basis
&& j - i >= noise.duration) {
return(NULL)
}
# Otherwise if price continues to move in the same direction after noise
while ((points[j] - points[i])*(points[i] - points[1]) >= 0
&& (j - 1) < move.duration) {
# Consider it as continuous move.
i <- j
# Repeat the whole process from line 80 to 121.
# If price still moves in the same direction, continue to next point.
if ((i+1) > points.length) {
for (l in 2:(i-1)) {
if ((points[l] - points[1])*(points[i] - points[1]) < 0)
return(NULL)
}
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
while ((points[i+1] - points[i])*(points[i] - points[1]) >= 0
&& (i+1) - 1 < move.duration) {
i <- i + 1
if ((i+1) > points.length) {
for (l in 2:(i-1)) {
if ((points[l] - points[1])*(points[i] - points[1]) < 0)
return(NULL)
}
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
}
# If large move doesn't stop within move.duration, return null.
if ((points[i+1] - points[i])*(points[i] - points[1]) >= 0
&& (i+1) - 1 >= move.duration) {
return(NULL)
}
# Otherwise price starts to retrace in the next point.
# Check if it is just noise.
j <- i + 1
k <- i
max.move <- 0
while (abs(points[j] - points[i]) < noise.basis
&& (j - i) < noise.duration) {
if ((points[j] - points[i])*(points[i] - points[1]) > 0
&& abs(points[j] - points[i]) > max.move) {
k <- j
max.move <- abs(points[j] - points[i])
}
j <- j + 1
if (j > points.length) {
for (l in 2:(k-1)) {
if ((points[l] - points[1])*(points[k] - points[1]) < 0)
return(NULL)
}
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = k + x.offset, t2.y = points[k])
return(pattern)
}
}
# If small move doesn't stop within noise.duration, return null.
if (abs(points[j] - points[i]) < noise.basis
&& j - i >= noise.duration) {
return(NULL)
}
}
# If price continues to move in the same direction but out of move.duration,
# return null.
if ((points[j] - points[i])*(points[i] - points[1]) >= 0
&& (j - 1) >= move.duration) {
return(NULL)
}
# Or price starts to retrace and move out of noise range, record extremum point as t2.
# Check if it will retrace to a t3.
i <- k
# If extremum point is out of move.duration, return null.
if ((i - 1) >= move.duration) {
return(NULL)
}
# If between t1 and i there is move that is in opposite direction to point i,
# return null.
for (l in 2:(i-1)) {
if ((points[l] - points[1])*(points[i] - points[1]) < 0)
return(NULL)
}
# Search from next point at least retrace.min away
# for retracement larger than retrace.percent.
j <- i + retrace.min
if (j > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
while (abs(points[j] - points[i]) < retrace.percent*0.01*abs(points[i] - points[1])
&& (j - i) < retrace.duration
&& (points[j] - points[i])*(points[i] - points[1]) < 0) {
j <- j + 1
if (j > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i])
return(pattern)
}
}
# If price moves back again or out of retrace duration, return null.
if ((j - i) >= retrace.duration
|| (points[j] - points[i])*(points[i] - points[1]) >= 0) {
return(NULL)
}
# Otherwise j is the first point that is beyond retrace.percent, which could be t3.
# Then we try to determine whether it is a t3.
# If price still retraces in the same direction, continue to next point.
if ((j+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = j + x.offset, t3.y = points[j])
return(pattern)
}
while ((points[j+1] - points[j])*(points[j] - points[i]) >= 0
&& (j+1) - i < retrace.duration) {
j <- j + 1
if ((j+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = j + x.offset, t3.y = points[j])
return(pattern)
}
}
# If large retrace doesn't stop within retrace.duration, return null.
if ((points[j+1] - points[j])*(points[j] - points[i]) >= 0
&& (j+1) - i >= retrace.duration) {
return(NULL)
}
# Otherwise price starts to move back in the next point.
# Check if it is just noise (similiar to checking for t2, replace j->l i->j 1->i).
l <- j + 1
k <- j
max.move <- 0
while (abs(points[l] - points[j]) < noise.basis
&& (l - j) < noise.duration) {
if ((points[l] - points[j])*(points[j] - points[i]) > 0
&& abs(points[l] - points[j]) > max.move) {
k <- l
max.move <- abs(points[l] - points[j])
}
l <- l + 1
if (l > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = k + x.offset, t3.y = points[k])
return(pattern)
}
}
# If small move doesn't stop within noise.duration, return i and j as t2 t3.
if (abs(points[l] - points[j]) < noise.basis
&& l - j >= noise.duration) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = k + x.offset, t3.y = points[k])
return(pattern)
}
# Otherwise if price continues to retrace in the same direction after noise
while ((points[l] - points[j])*(points[j] - points[i]) >= 0
&& (l - i) < retrace.duration) {
# Consider it as continuous move.
j <- l
# Repeat the whole process from line 216 to 263.
# If price still retraces in the same direction, continue to next point.
if ((j+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = j + x.offset, t3.y = points[j])
return(pattern)
}
while ((points[j+1] - points[j])*(points[j] - points[i]) >= 0
&& (j+1) - i < retrace.duration) {
j <- j + 1
if ((j+1) > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = j + x.offset, t3.y = points[j])
return(pattern)
}
}
# If large retrace doesn't stop within retrace.duration, return null.
if ((points[j+1] - points[j])*(points[j] - points[i]) >= 0
&& (j+1) - i >= retrace.duration) {
return(NULL)
}
# Otherwise price starts to move back in the next point.
# Check if it is just noise (similiar to checking for t2, replace j->l i->j 1->i).
l <- j + 1
k <- j
max.move <- 0
while (abs(points[l] - points[j]) < noise.basis
&& (l - j) < noise.duration) {
if ((points[l] - points[j])*(points[j] - points[i]) > 0
&& abs(points[l] - points[j]) > max.move) {
k <- l
max.move <- abs(points[l] - points[j])
}
l <- l + 1
if (l > points.length) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = k + x.offset, t3.y = points[k])
return(pattern)
}
}
# If small move doesn't stop within noise.duration, return i and j as t2 t3.
if (abs(points[l] - points[j]) < noise.basis
&& l - j >= noise.duration) {
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = k + x.offset, t3.y = points[k])
return(pattern)
}
}
# If price continues to retrace in the same direction but out of retrace.duration,
# return null.
if ((points[l] - points[j])*(points[j] - points[i]) >= 0
&& (l - i) >= retrace.duration) {
return(NULL)
}
# Or price starts to move back and out of noise range.
# Return i and j as t2 t3.
j <- k
pattern <- data.frame(t1.x = 1 + x.offset, t1.y = points[1],
t2.x = i + x.offset, t2.y = points[i],
t3.x = j + x.offset, t3.y = points[j])
return(pattern)
}
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, -7.88781071482504e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615854999-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 883
|
r
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, -7.88781071482504e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
library(survival)
library(cmprsk)
# sample size
ndim = 1000
nsim = 1000
### true parameters ###
beta11=0.2
beta12=-0.5
beta13=1
p=0.65
T0 = c(20,21,22)
for(m in 1:nsim)
{
### generate the covariates ###
z1=rnorm(ndim)
z1=pmax(pmin(z1,3.5),-3.5)
z2=ifelse(runif(ndim)<0.7,1,0)
z3=rnorm(ndim)
z3=pmax(pmin(z3,3.5),-3.5)
F1=1-(1-p)^(exp(z1*beta11+z2*beta12+z3*beta13))
W=runif(ndim)
epsilon=ifelse(W<F1,1,2)
TT=rep(0,ndim)
TT[W<F1]=-log(1-(1-(1-W[W<F1])^(1/exp(z1[W<F1]*beta11+z2[W<F1]*beta12+z3[W<F1]*beta13)))/p)
zz=exp(z1[W>=F1]*beta11/10+z2[W>=F1]*beta12/10+z3[W>=F1]*beta13/10)
TT[W>=F1]=sapply(zz,function(o) return(rexp(1,o)))
TT=TT^0.2
TT=pmin(TT*20,100)
hist(TT)
for(t0 in T0){
n1 = sum(ifelse(TT<=t0 & epsilon==1,1,0))
n2 = sum(ifelse(TT<=t0 & epsilon==2,1,0))
n3 = ndim - n1 - n2
write.table(t(c(n1,n2,n3)),paste("NRI_n_",t0,".txt",sep=""), sep="\t", row.names = FALSE,col.names = FALSE,append = TRUE)
}
#############################################################################
### Fit the cox proportional hazard regression model without z3 (model 1) ###
#############################################################################
# calcualte the cumulative baseline hazard function for cause 1 #
cov = cbind(z1,z2)
crr1 = crr(TT,epsilon,cov)
pred1 = predict(crr1,cov)
# summary(crr1)
# indecies for the predified t0 #
t0.index= c(max(which(pred1[,1]<T0[1])),max(which(pred1[,1]<T0[2])),max(which(pred1[,1]<T0[3])))
# estimate the overall survival function #
p1.hat.m1 = (pred1[t0.index,-1])
crr2 = crr(TT,epsilon,cov,failcode=2)
pred2 = predict(crr2,cov)
# summary(crr2)
# indecies for the predified t0 #
t0.index= c(max(which(pred2[,1]<T0[1])),max(which(pred2[,1]<T0[2])),max(which(pred2[,1]<T0[3])))
# estimate the overall survival function #
p2.hat.m1 = (pred2[t0.index,-1])
p3.hat.m1 = 1-p1.hat.m1-p2.hat.m1
pind.m1=NULL
for(i in 1:3){
pp = cbind(p1.hat.m1[i,],p2.hat.m1[i,], p3.hat.m1[i,])
pmax = apply(pp,1,max)
pmax.matrix = cbind(pmax,pmax,pmax)
ind.matrix = cbind(rep(1,ndim),rep(2,ndim),rep(3,ndim))
pind.m1 = cbind(pind.m1,apply((pp>=pmax.matrix)*ind.matrix,1,max))
}
#######################################################################################
### Fit the cox proportional hazard regression model using all covariates (model 2) ###
#######################################################################################
# calcualte the cumulative baseline hazard function for cause 1 #
cov = cbind(z1,z2,z3)
crr1 = crr(TT,epsilon,cov,failcode=1)
pred1 = predict(crr1,cov)
# summary(crr1)
# indecies for the predified t0 #
t0.index= c(max(which(pred1[,1]<T0[1])),max(which(pred1[,1]<T0[2])),max(which(pred1[,1]<T0[3])))
# estimate the overall survival function #
p1.hat.m2 = (pred1[t0.index,-1])
crr2 = crr(TT,epsilon,cov,failcode=2)
pred2 = predict(crr2,cov)
# summary(crr2)
# indecies for the predified t0 #
t0.index= c(max(which(pred2[,1]<T0[1])),max(which(pred2[,1]<T0[2])),max(which(pred2[,1]<T0[3])))
# estimate the overall survival function #
p2.hat.m2 = (pred2[t0.index,-1])
p3.hat.m2 = 1-p1.hat.m2-p2.hat.m2
pind.m2=NULL
for(i in 1:3){
pp = cbind(p1.hat.m2[i,],p2.hat.m2[i,], p3.hat.m2[i,])
pmax = apply(pp,1,max)
pmax.matrix = cbind(pmax,pmax,pmax)
ind.matrix = cbind(rep(1,ndim),rep(2,ndim),rep(3,ndim))
pind.m2 = cbind(pind.m2,apply((pp>=pmax.matrix)*ind.matrix,1,max))
}
for(i in 1:3){
t0=T0[i]
sum1 = sum(ifelse(pind.m2[,i]==1 & pind.m1[,i]!=1 & TT<=t0 & epsilon ==1, 1, 0)) - sum(ifelse(pind.m2[,i]!=1 & pind.m1[,i]==1 & TT<=t0 & epsilon ==1, 1, 0))
sum2 = sum(ifelse(pind.m2[,i]==2 & pind.m1[,i]!=2 & TT<=t0 & epsilon ==2, 1, 0)) - sum(ifelse(pind.m2[,i]!=2 & pind.m1[,i]==2 & TT<=t0 & epsilon ==2, 1, 0))
sum3 = sum(ifelse(pind.m2[,i]==3 & pind.m1[,i]!=3 & TT>t0, 1, 0)) - sum(ifelse(pind.m2[,i]!=3 & pind.m1[,i]==3 & TT>t0, 1, 0))
write.table(t(c(sum1,sum2,sum3)),paste("NRI_sum_",t0,".txt",sep=""), sep="\t", row.names = FALSE,col.names = FALSE,append = TRUE)
}
}
dir2=''
ns = read.table(paste(dir2,"NRI_n_20.txt",sep=""),header = FALSE)
dim(ns)
ns = apply(ns,2,sum)
sums = read.table(paste(dir2,"NRI_sum_20.txt",sep=""),header = FALSE)
dim(sums)
sums = apply(sums,2,sum)
sum(sums/ns)/3
ns = read.table(paste(dir2,"NRI_n_21.txt",sep=""),header = FALSE)
dim(ns)
ns = apply(ns,2,sum)
sums = read.table(paste(dir2,"NRI_sum_21.txt",sep=""),header = FALSE)
dim(sums)
sums = apply(sums,2,sum)
sum(sums/ns)/3
ns = read.table(paste(dir2,"NRI_n_22.txt",sep=""),header = FALSE)
dim(ns)
ns = apply(ns,2,sum)
sums = read.table(paste(dir2,"NRI_sum_22.txt",sep=""),header = FALSE)
dim(sums)
sums = apply(sums,2,sum)
sum(sums/ns)/3
|
/Simulation/Fine/Under Alternative/TrueFineNRI1.R
|
permissive
|
WangandYu/NRIandIDI
|
R
| false
| false
| 4,866
|
r
|
library(survival)
library(cmprsk)
# sample size
ndim = 1000
nsim = 1000
### true parameters ###
beta11=0.2
beta12=-0.5
beta13=1
p=0.65
T0 = c(20,21,22)
for(m in 1:nsim)
{
### generate the covariates ###
z1=rnorm(ndim)
z1=pmax(pmin(z1,3.5),-3.5)
z2=ifelse(runif(ndim)<0.7,1,0)
z3=rnorm(ndim)
z3=pmax(pmin(z3,3.5),-3.5)
F1=1-(1-p)^(exp(z1*beta11+z2*beta12+z3*beta13))
W=runif(ndim)
epsilon=ifelse(W<F1,1,2)
TT=rep(0,ndim)
TT[W<F1]=-log(1-(1-(1-W[W<F1])^(1/exp(z1[W<F1]*beta11+z2[W<F1]*beta12+z3[W<F1]*beta13)))/p)
zz=exp(z1[W>=F1]*beta11/10+z2[W>=F1]*beta12/10+z3[W>=F1]*beta13/10)
TT[W>=F1]=sapply(zz,function(o) return(rexp(1,o)))
TT=TT^0.2
TT=pmin(TT*20,100)
hist(TT)
for(t0 in T0){
n1 = sum(ifelse(TT<=t0 & epsilon==1,1,0))
n2 = sum(ifelse(TT<=t0 & epsilon==2,1,0))
n3 = ndim - n1 - n2
write.table(t(c(n1,n2,n3)),paste("NRI_n_",t0,".txt",sep=""), sep="\t", row.names = FALSE,col.names = FALSE,append = TRUE)
}
#############################################################################
### Fit the cox proportional hazard regression model without z3 (model 1) ###
#############################################################################
# calcualte the cumulative baseline hazard function for cause 1 #
cov = cbind(z1,z2)
crr1 = crr(TT,epsilon,cov)
pred1 = predict(crr1,cov)
# summary(crr1)
# indecies for the predified t0 #
t0.index= c(max(which(pred1[,1]<T0[1])),max(which(pred1[,1]<T0[2])),max(which(pred1[,1]<T0[3])))
# estimate the overall survival function #
p1.hat.m1 = (pred1[t0.index,-1])
crr2 = crr(TT,epsilon,cov,failcode=2)
pred2 = predict(crr2,cov)
# summary(crr2)
# indecies for the predified t0 #
t0.index= c(max(which(pred2[,1]<T0[1])),max(which(pred2[,1]<T0[2])),max(which(pred2[,1]<T0[3])))
# estimate the overall survival function #
p2.hat.m1 = (pred2[t0.index,-1])
p3.hat.m1 = 1-p1.hat.m1-p2.hat.m1
pind.m1=NULL
for(i in 1:3){
pp = cbind(p1.hat.m1[i,],p2.hat.m1[i,], p3.hat.m1[i,])
pmax = apply(pp,1,max)
pmax.matrix = cbind(pmax,pmax,pmax)
ind.matrix = cbind(rep(1,ndim),rep(2,ndim),rep(3,ndim))
pind.m1 = cbind(pind.m1,apply((pp>=pmax.matrix)*ind.matrix,1,max))
}
#######################################################################################
### Fit the cox proportional hazard regression model using all covariates (model 2) ###
#######################################################################################
# calcualte the cumulative baseline hazard function for cause 1 #
cov = cbind(z1,z2,z3)
crr1 = crr(TT,epsilon,cov,failcode=1)
pred1 = predict(crr1,cov)
# summary(crr1)
# indecies for the predified t0 #
t0.index= c(max(which(pred1[,1]<T0[1])),max(which(pred1[,1]<T0[2])),max(which(pred1[,1]<T0[3])))
# estimate the overall survival function #
p1.hat.m2 = (pred1[t0.index,-1])
crr2 = crr(TT,epsilon,cov,failcode=2)
pred2 = predict(crr2,cov)
# summary(crr2)
# indecies for the predified t0 #
t0.index= c(max(which(pred2[,1]<T0[1])),max(which(pred2[,1]<T0[2])),max(which(pred2[,1]<T0[3])))
# estimate the overall survival function #
p2.hat.m2 = (pred2[t0.index,-1])
p3.hat.m2 = 1-p1.hat.m2-p2.hat.m2
pind.m2=NULL
for(i in 1:3){
pp = cbind(p1.hat.m2[i,],p2.hat.m2[i,], p3.hat.m2[i,])
pmax = apply(pp,1,max)
pmax.matrix = cbind(pmax,pmax,pmax)
ind.matrix = cbind(rep(1,ndim),rep(2,ndim),rep(3,ndim))
pind.m2 = cbind(pind.m2,apply((pp>=pmax.matrix)*ind.matrix,1,max))
}
for(i in 1:3){
t0=T0[i]
sum1 = sum(ifelse(pind.m2[,i]==1 & pind.m1[,i]!=1 & TT<=t0 & epsilon ==1, 1, 0)) - sum(ifelse(pind.m2[,i]!=1 & pind.m1[,i]==1 & TT<=t0 & epsilon ==1, 1, 0))
sum2 = sum(ifelse(pind.m2[,i]==2 & pind.m1[,i]!=2 & TT<=t0 & epsilon ==2, 1, 0)) - sum(ifelse(pind.m2[,i]!=2 & pind.m1[,i]==2 & TT<=t0 & epsilon ==2, 1, 0))
sum3 = sum(ifelse(pind.m2[,i]==3 & pind.m1[,i]!=3 & TT>t0, 1, 0)) - sum(ifelse(pind.m2[,i]!=3 & pind.m1[,i]==3 & TT>t0, 1, 0))
write.table(t(c(sum1,sum2,sum3)),paste("NRI_sum_",t0,".txt",sep=""), sep="\t", row.names = FALSE,col.names = FALSE,append = TRUE)
}
}
dir2=''
ns = read.table(paste(dir2,"NRI_n_20.txt",sep=""),header = FALSE)
dim(ns)
ns = apply(ns,2,sum)
sums = read.table(paste(dir2,"NRI_sum_20.txt",sep=""),header = FALSE)
dim(sums)
sums = apply(sums,2,sum)
sum(sums/ns)/3
ns = read.table(paste(dir2,"NRI_n_21.txt",sep=""),header = FALSE)
dim(ns)
ns = apply(ns,2,sum)
sums = read.table(paste(dir2,"NRI_sum_21.txt",sep=""),header = FALSE)
dim(sums)
sums = apply(sums,2,sum)
sum(sums/ns)/3
ns = read.table(paste(dir2,"NRI_n_22.txt",sep=""),header = FALSE)
dim(ns)
ns = apply(ns,2,sum)
sums = read.table(paste(dir2,"NRI_sum_22.txt",sep=""),header = FALSE)
dim(sums)
sums = apply(sums,2,sum)
sum(sums/ns)/3
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Train.R
\name{train_generic}
\alias{train_generic}
\title{Train an h2o model using the generic architecture}
\usage{
train_generic(model, info)
}
\arguments{
\item{model}{the name of the function to run}
\item{info}{the data for use with the model}
}
\description{
Not currently developed
}
|
/man/train_generic.Rd
|
no_license
|
NSAPH/airpred
|
R
| false
| true
| 370
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Train.R
\name{train_generic}
\alias{train_generic}
\title{Train an h2o model using the generic architecture}
\usage{
train_generic(model, info)
}
\arguments{
\item{model}{the name of the function to run}
\item{info}{the data for use with the model}
}
\description{
Not currently developed
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elb_service.R
\name{elb}
\alias{elb}
\title{Elastic Load Balancing}
\usage{
elb(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
}}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
A load balancer can distribute incoming traffic across your EC2
instances. This enables you to increase the availability of your
application. The load balancer also monitors the health of its
registered instances and ensures that it routes traffic only to healthy
instances. You configure your load balancer to accept incoming traffic
by specifying one or more listeners, which are configured with a
protocol and port number for connections from clients to the load
balancer and a protocol and port number for connections from the load
balancer to the instances.
Elastic Load Balancing supports three types of load balancers:
Application Load Balancers, Network Load Balancers, and Classic Load
Balancers. You can select a load balancer based on your application
needs. For more information, see the \href{https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/}{Elastic Load Balancing User Guide}.
This reference covers the 2012-06-01 API, which supports Classic Load
Balancers. The 2015-12-01 API supports Application Load Balancers and
Network Load Balancers.
To get started, create a load balancer with one or more listeners using
\code{\link[=elb_create_load_balancer]{create_load_balancer}}. Register your
instances with the load balancer using
\code{\link[=elb_register_instances_with_load_balancer]{register_instances_with_load_balancer}}.
All Elastic Load Balancing operations are \emph{idempotent}, which means that
they complete at most one time. If you repeat an operation, it succeeds
with a 200 OK response code.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- elb(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical"
)
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[=elb_add_tags]{add_tags} \tab Adds the specified tags to the specified load balancer\cr
\link[=elb_apply_security_groups_to_load_balancer]{apply_security_groups_to_load_balancer} \tab Associates one or more security groups with your load balancer in a virtual private cloud (VPC)\cr
\link[=elb_attach_load_balancer_to_subnets]{attach_load_balancer_to_subnets} \tab Adds one or more subnets to the set of configured subnets for the specified load balancer\cr
\link[=elb_configure_health_check]{configure_health_check} \tab Specifies the health check settings to use when evaluating the health state of your EC2 instances\cr
\link[=elb_create_app_cookie_stickiness_policy]{create_app_cookie_stickiness_policy} \tab Generates a stickiness policy with sticky session lifetimes that follow that of an application-generated cookie\cr
\link[=elb_create_lb_cookie_stickiness_policy]{create_lb_cookie_stickiness_policy} \tab Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser (user-agent) or a specified expiration period\cr
\link[=elb_create_load_balancer]{create_load_balancer} \tab Creates a Classic Load Balancer\cr
\link[=elb_create_load_balancer_listeners]{create_load_balancer_listeners} \tab Creates one or more listeners for the specified load balancer\cr
\link[=elb_create_load_balancer_policy]{create_load_balancer_policy} \tab Creates a policy with the specified attributes for the specified load balancer\cr
\link[=elb_delete_load_balancer]{delete_load_balancer} \tab Deletes the specified load balancer\cr
\link[=elb_delete_load_balancer_listeners]{delete_load_balancer_listeners} \tab Deletes the specified listeners from the specified load balancer\cr
\link[=elb_delete_load_balancer_policy]{delete_load_balancer_policy} \tab Deletes the specified policy from the specified load balancer\cr
\link[=elb_deregister_instances_from_load_balancer]{deregister_instances_from_load_balancer} \tab Deregisters the specified instances from the specified load balancer\cr
\link[=elb_describe_account_limits]{describe_account_limits} \tab Describes the current Elastic Load Balancing resource limits for your AWS account\cr
\link[=elb_describe_instance_health]{describe_instance_health} \tab Describes the state of the specified instances with respect to the specified load balancer\cr
\link[=elb_describe_load_balancer_attributes]{describe_load_balancer_attributes} \tab Describes the attributes for the specified load balancer\cr
\link[=elb_describe_load_balancer_policies]{describe_load_balancer_policies} \tab Describes the specified policies\cr
\link[=elb_describe_load_balancer_policy_types]{describe_load_balancer_policy_types} \tab Describes the specified load balancer policy types or all load balancer policy types\cr
\link[=elb_describe_load_balancers]{describe_load_balancers} \tab Describes the specified the load balancers\cr
\link[=elb_describe_tags]{describe_tags} \tab Describes the tags associated with the specified load balancers\cr
\link[=elb_detach_load_balancer_from_subnets]{detach_load_balancer_from_subnets} \tab Removes the specified subnets from the set of configured subnets for the load balancer\cr
\link[=elb_disable_availability_zones_for_load_balancer]{disable_availability_zones_for_load_balancer} \tab Removes the specified Availability Zones from the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC\cr
\link[=elb_enable_availability_zones_for_load_balancer]{enable_availability_zones_for_load_balancer} \tab Adds the specified Availability Zones to the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC\cr
\link[=elb_modify_load_balancer_attributes]{modify_load_balancer_attributes} \tab Modifies the attributes of the specified load balancer\cr
\link[=elb_register_instances_with_load_balancer]{register_instances_with_load_balancer} \tab Adds the specified instances to the specified load balancer\cr
\link[=elb_remove_tags]{remove_tags} \tab Removes one or more tags from the specified load balancer\cr
\link[=elb_set_load_balancer_listener_ssl_certificate]{set_load_balancer_listener_ssl_certificate} \tab Sets the certificate that terminates the specified listener's SSL connections\cr
\link[=elb_set_load_balancer_policies_for_backend_server]{set_load_balancer_policies_for_backend_server} \tab Replaces the set of policies associated with the specified port on which the EC2 instance is listening with a new set of policies\cr
\link[=elb_set_load_balancer_policies_of_listener]{set_load_balancer_policies_of_listener} \tab Replaces the current set of policies for the specified load balancer port with the specified set of policies
}
}
\examples{
\dontrun{
svc <- elb()
# This example adds two tags to the specified load balancer.
svc$add_tags(
LoadBalancerNames = list(
"my-load-balancer"
),
Tags = list(
list(
Key = "project",
Value = "lima"
),
list(
Key = "department",
Value = "digital-media"
)
)
)
}
}
|
/man/elb.Rd
|
no_license
|
cran/paws.networking
|
R
| false
| true
| 8,538
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elb_service.R
\name{elb}
\alias{elb}
\title{Elastic Load Balancing}
\usage{
elb(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
}}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
A load balancer can distribute incoming traffic across your EC2
instances. This enables you to increase the availability of your
application. The load balancer also monitors the health of its
registered instances and ensures that it routes traffic only to healthy
instances. You configure your load balancer to accept incoming traffic
by specifying one or more listeners, which are configured with a
protocol and port number for connections from clients to the load
balancer and a protocol and port number for connections from the load
balancer to the instances.
Elastic Load Balancing supports three types of load balancers:
Application Load Balancers, Network Load Balancers, and Classic Load
Balancers. You can select a load balancer based on your application
needs. For more information, see the \href{https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/}{Elastic Load Balancing User Guide}.
This reference covers the 2012-06-01 API, which supports Classic Load
Balancers. The 2015-12-01 API supports Application Load Balancers and
Network Load Balancers.
To get started, create a load balancer with one or more listeners using
\code{\link[=elb_create_load_balancer]{create_load_balancer}}. Register your
instances with the load balancer using
\code{\link[=elb_register_instances_with_load_balancer]{register_instances_with_load_balancer}}.
All Elastic Load Balancing operations are \emph{idempotent}, which means that
they complete at most one time. If you repeat an operation, it succeeds
with a 200 OK response code.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- elb(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical"
)
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[=elb_add_tags]{add_tags} \tab Adds the specified tags to the specified load balancer\cr
\link[=elb_apply_security_groups_to_load_balancer]{apply_security_groups_to_load_balancer} \tab Associates one or more security groups with your load balancer in a virtual private cloud (VPC)\cr
\link[=elb_attach_load_balancer_to_subnets]{attach_load_balancer_to_subnets} \tab Adds one or more subnets to the set of configured subnets for the specified load balancer\cr
\link[=elb_configure_health_check]{configure_health_check} \tab Specifies the health check settings to use when evaluating the health state of your EC2 instances\cr
\link[=elb_create_app_cookie_stickiness_policy]{create_app_cookie_stickiness_policy} \tab Generates a stickiness policy with sticky session lifetimes that follow that of an application-generated cookie\cr
\link[=elb_create_lb_cookie_stickiness_policy]{create_lb_cookie_stickiness_policy} \tab Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser (user-agent) or a specified expiration period\cr
\link[=elb_create_load_balancer]{create_load_balancer} \tab Creates a Classic Load Balancer\cr
\link[=elb_create_load_balancer_listeners]{create_load_balancer_listeners} \tab Creates one or more listeners for the specified load balancer\cr
\link[=elb_create_load_balancer_policy]{create_load_balancer_policy} \tab Creates a policy with the specified attributes for the specified load balancer\cr
\link[=elb_delete_load_balancer]{delete_load_balancer} \tab Deletes the specified load balancer\cr
\link[=elb_delete_load_balancer_listeners]{delete_load_balancer_listeners} \tab Deletes the specified listeners from the specified load balancer\cr
\link[=elb_delete_load_balancer_policy]{delete_load_balancer_policy} \tab Deletes the specified policy from the specified load balancer\cr
\link[=elb_deregister_instances_from_load_balancer]{deregister_instances_from_load_balancer} \tab Deregisters the specified instances from the specified load balancer\cr
\link[=elb_describe_account_limits]{describe_account_limits} \tab Describes the current Elastic Load Balancing resource limits for your AWS account\cr
\link[=elb_describe_instance_health]{describe_instance_health} \tab Describes the state of the specified instances with respect to the specified load balancer\cr
\link[=elb_describe_load_balancer_attributes]{describe_load_balancer_attributes} \tab Describes the attributes for the specified load balancer\cr
\link[=elb_describe_load_balancer_policies]{describe_load_balancer_policies} \tab Describes the specified policies\cr
\link[=elb_describe_load_balancer_policy_types]{describe_load_balancer_policy_types} \tab Describes the specified load balancer policy types or all load balancer policy types\cr
\link[=elb_describe_load_balancers]{describe_load_balancers} \tab Describes the specified the load balancers\cr
\link[=elb_describe_tags]{describe_tags} \tab Describes the tags associated with the specified load balancers\cr
\link[=elb_detach_load_balancer_from_subnets]{detach_load_balancer_from_subnets} \tab Removes the specified subnets from the set of configured subnets for the load balancer\cr
\link[=elb_disable_availability_zones_for_load_balancer]{disable_availability_zones_for_load_balancer} \tab Removes the specified Availability Zones from the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC\cr
\link[=elb_enable_availability_zones_for_load_balancer]{enable_availability_zones_for_load_balancer} \tab Adds the specified Availability Zones to the set of Availability Zones for the specified load balancer in EC2-Classic or a default VPC\cr
\link[=elb_modify_load_balancer_attributes]{modify_load_balancer_attributes} \tab Modifies the attributes of the specified load balancer\cr
\link[=elb_register_instances_with_load_balancer]{register_instances_with_load_balancer} \tab Adds the specified instances to the specified load balancer\cr
\link[=elb_remove_tags]{remove_tags} \tab Removes one or more tags from the specified load balancer\cr
\link[=elb_set_load_balancer_listener_ssl_certificate]{set_load_balancer_listener_ssl_certificate} \tab Sets the certificate that terminates the specified listener's SSL connections\cr
\link[=elb_set_load_balancer_policies_for_backend_server]{set_load_balancer_policies_for_backend_server} \tab Replaces the set of policies associated with the specified port on which the EC2 instance is listening with a new set of policies\cr
\link[=elb_set_load_balancer_policies_of_listener]{set_load_balancer_policies_of_listener} \tab Replaces the current set of policies for the specified load balancer port with the specified set of policies
}
}
\examples{
\dontrun{
svc <- elb()
# This example adds two tags to the specified load balancer.
svc$add_tags(
LoadBalancerNames = list(
"my-load-balancer"
),
Tags = list(
list(
Key = "project",
Value = "lima"
),
list(
Key = "department",
Value = "digital-media"
)
)
)
}
}
|
data <-read.csv("household_power_consumption.txt",head=TRUE,sep=";",na.strings = "?")
data2 <- transform(data, MyTime=strptime(paste(Date,Time,sep=""),format="%d/%m/%Y %T"))
startInterval <- strptime("2007-02-01 00:00:01",format="%Y-%m-%d %T")
endtInterval <- strptime("2007-02-02 23:59:59",format="%Y-%m-%d %T")
data3 <- subset ( data2, MyTime >= startInterval & MyTime <= endtInterval)
par(cex.lab=0.8, cex.axis=0.8, cex.main=0.8, cex.sub=0.8)
par(mfrow=c(1,1))
hist(data3$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="")
dev.copy(png,'plot1.png')
dev.off()
|
/plot1.R
|
no_license
|
pacosoft2000/ExData_Plotting1
|
R
| false
| false
| 589
|
r
|
data <-read.csv("household_power_consumption.txt",head=TRUE,sep=";",na.strings = "?")
data2 <- transform(data, MyTime=strptime(paste(Date,Time,sep=""),format="%d/%m/%Y %T"))
startInterval <- strptime("2007-02-01 00:00:01",format="%Y-%m-%d %T")
endtInterval <- strptime("2007-02-02 23:59:59",format="%Y-%m-%d %T")
data3 <- subset ( data2, MyTime >= startInterval & MyTime <= endtInterval)
par(cex.lab=0.8, cex.axis=0.8, cex.main=0.8, cex.sub=0.8)
par(mfrow=c(1,1))
hist(data3$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="")
dev.copy(png,'plot1.png')
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pastef.r
\name{pastef}
\alias{pastef}
\title{Paste with "/" as the separator}
\usage{
pastef(...)
}
\arguments{
\item{...}{Objects to paste together}
}
\description{
Paste with "/" as the separator
}
|
/man/pastef.Rd
|
no_license
|
James-Thorson/ss3sim
|
R
| false
| true
| 279
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pastef.r
\name{pastef}
\alias{pastef}
\title{Paste with "/" as the separator}
\usage{
pastef(...)
}
\arguments{
\item{...}{Objects to paste together}
}
\description{
Paste with "/" as the separator
}
|
context("SQLDataFrame-methods")
test.db <- system.file("extdata", "test.db", package = "SQLDataFrame")
conn <- DBI::dbConnect(dbDriver("SQLite"), dbname = test.db)
obj <- SQLDataFrame(conn = conn, dbtable = "colData",
dbkey = "sampleID")
## methods
test_that("[[,SQLDataFrame works",
{
## key values
exp <- letters
expect_identical(exp, obj[["sampleID"]])
expect_identical(exp, obj$sampleID)
exp <- rep(c("ChIP", "Input"), 13)
expect_identical(exp, obj[[1]])
exp <- tblData(obj) %>% pull(Ages)
expect_identical(exp, obj[[2]])
expect_identical(obj[[2]], obj[["Ages"]])
expect_error(obj[[2:3]], "attempt to extract more than one element")
})
test_that("[,SQLDataFrame works",
{
obj0 <- obj[]
expect_true(validObject(obj0))
expect_identical(obj0, obj)
## list_style_subsetting
obj1 <- obj[1]
expect_s4_class(obj1, "SQLDataFrame")
obj2 <- obj[c("sampleID", "Treatment")] ## have key column doesn't affect results.
expect_identical(obj1, obj2)
## 1-col subsetting, drop=TRUE by default
obj3 <- obj[, 1]
expect_false(is(obj3, "SQLDataFrame"))
## 1-col subsetting with key column, equivalent to 1-col subsetting and drop = FALSE.
obj3 <- obj[, "Treatment", drop = FALSE]
obj4 <- obj[, c("sampleID", "Treatment")]
expect_identical(obj3, obj4)
## multi-col subsetting
obj5 <- obj[, 1:2]
expect_identical(obj, obj5)
expect_identical(NULL, obj5@indexes[[2]])
## row&col subsetting
obj6 <- obj[1:5, 1:2]
expect_s4_class(obj6, "SQLDataFrame")
expect_identical(dim(obj6), c(5L, 2L))
expect_identical(colnames(obj6), colnames(obj))
expect_identical(list(1:5, NULL), obj6@indexes)
## out-of-bounds indices
expect_error(obj[1:100, ],
"subscript contains out-of-bounds indices")
expect_error(obj[, 4:5],
"subscript contains out-of-bounds indices")
})
test_that("'extractROWS,SQLDataFrame' works",
{
obj1 <- extractROWS(obj, 1:5)
expect_s4_class(obj1, "SQLDataFrame")
expect_identical(tblData(obj), tblData(obj1))
expect_identical(1:5, obj1@indexes[[1]])
expect_identical(dim(obj1), c(5L, 2L))
})
test_that("'.extractCOLS_SQLDataFrame' works",
{
obj1 <- .extractCOLS_SQLDataFrame(obj, 1:2)
expect_s4_class(obj1, "SQLDataFrame")
expect_identical(tblData(obj), tblData(obj1))
expect_identical(NULL, obj1@indexes[[2]])
expect_identical(dim(obj1), c(26L, 2L))
})
test_that("select.SQLDataFrame works",
{
obj1 <- obj %>% select(Treatment:Ages)
expect_s4_class(obj1, "SQLDataFrame")
expect_identical(tblData(obj), tblData(obj1))
expect_identical(NULL, obj1@indexes[[2]])
expect_identical(dim(obj1), c(26L, 2L))
obj1 <- obj %>% select(Ages)
expect_s4_class(obj1, "SQLDataFrame")
expect_identical(2L, obj1@indexes[[2]])
expect_identical(dim(obj1), c(26L, 1L))
obj1 <- obj %>% select("sampleID") ## key column.
expect_s4_class(obj1, "SQLDataFrame")
expect_identical(dim(obj1), c(26L, 0L))
})
test_that("filter.SQLDataFrame works",
{
obj1 <- obj %>% filter(Treatment == "ChIP")
expect_identical(dim(obj1), c(13L, 2L))
expect_equal(ridx(obj1), seq(1, 26, by=2))
expect_identical(tblData(obj), tblData(obj1))
})
test_that("mutate.SQLDataFrame works",
{
obj1 <- obj %>% mutate(Age1 = ifelse(Ages <= 30, "30th", "40th"))
expect_identical(dim(obj1), c(26L, 3L))
expect_null(ridx(obj1))
obj1 <- obj %>% filter(Treatment == "ChIP") %>%
mutate(Age1 = ifelse(Ages <= 30, "30th", "40th"))
expect_identical(dim(obj1), c(13L, 3L))
})
|
/tests/testthat/test_SQLDataFrame-methods.R
|
no_license
|
Bioconductor/SQLDataFrame
|
R
| false
| false
| 3,662
|
r
|
context("SQLDataFrame-methods")
test.db <- system.file("extdata", "test.db", package = "SQLDataFrame")
conn <- DBI::dbConnect(dbDriver("SQLite"), dbname = test.db)
obj <- SQLDataFrame(conn = conn, dbtable = "colData",
dbkey = "sampleID")
## methods
test_that("[[,SQLDataFrame works",
{
## key values
exp <- letters
expect_identical(exp, obj[["sampleID"]])
expect_identical(exp, obj$sampleID)
exp <- rep(c("ChIP", "Input"), 13)
expect_identical(exp, obj[[1]])
exp <- tblData(obj) %>% pull(Ages)
expect_identical(exp, obj[[2]])
expect_identical(obj[[2]], obj[["Ages"]])
expect_error(obj[[2:3]], "attempt to extract more than one element")
})
test_that("[,SQLDataFrame works",
{
obj0 <- obj[]
expect_true(validObject(obj0))
expect_identical(obj0, obj)
## list_style_subsetting
obj1 <- obj[1]
expect_s4_class(obj1, "SQLDataFrame")
obj2 <- obj[c("sampleID", "Treatment")] ## have key column doesn't affect results.
expect_identical(obj1, obj2)
## 1-col subsetting, drop=TRUE by default
obj3 <- obj[, 1]
expect_false(is(obj3, "SQLDataFrame"))
## 1-col subsetting with key column, equivalent to 1-col subsetting and drop = FALSE.
obj3 <- obj[, "Treatment", drop = FALSE]
obj4 <- obj[, c("sampleID", "Treatment")]
expect_identical(obj3, obj4)
## multi-col subsetting
obj5 <- obj[, 1:2]
expect_identical(obj, obj5)
expect_identical(NULL, obj5@indexes[[2]])
## row&col subsetting
obj6 <- obj[1:5, 1:2]
expect_s4_class(obj6, "SQLDataFrame")
expect_identical(dim(obj6), c(5L, 2L))
expect_identical(colnames(obj6), colnames(obj))
expect_identical(list(1:5, NULL), obj6@indexes)
## out-of-bounds indices
expect_error(obj[1:100, ],
"subscript contains out-of-bounds indices")
expect_error(obj[, 4:5],
"subscript contains out-of-bounds indices")
})
test_that("'extractROWS,SQLDataFrame' works",
{
obj1 <- extractROWS(obj, 1:5)
expect_s4_class(obj1, "SQLDataFrame")
expect_identical(tblData(obj), tblData(obj1))
expect_identical(1:5, obj1@indexes[[1]])
expect_identical(dim(obj1), c(5L, 2L))
})
test_that("'.extractCOLS_SQLDataFrame' works",
{
obj1 <- .extractCOLS_SQLDataFrame(obj, 1:2)
expect_s4_class(obj1, "SQLDataFrame")
expect_identical(tblData(obj), tblData(obj1))
expect_identical(NULL, obj1@indexes[[2]])
expect_identical(dim(obj1), c(26L, 2L))
})
test_that("select.SQLDataFrame works",
{
obj1 <- obj %>% select(Treatment:Ages)
expect_s4_class(obj1, "SQLDataFrame")
expect_identical(tblData(obj), tblData(obj1))
expect_identical(NULL, obj1@indexes[[2]])
expect_identical(dim(obj1), c(26L, 2L))
obj1 <- obj %>% select(Ages)
expect_s4_class(obj1, "SQLDataFrame")
expect_identical(2L, obj1@indexes[[2]])
expect_identical(dim(obj1), c(26L, 1L))
obj1 <- obj %>% select("sampleID") ## key column.
expect_s4_class(obj1, "SQLDataFrame")
expect_identical(dim(obj1), c(26L, 0L))
})
test_that("filter.SQLDataFrame works",
{
obj1 <- obj %>% filter(Treatment == "ChIP")
expect_identical(dim(obj1), c(13L, 2L))
expect_equal(ridx(obj1), seq(1, 26, by=2))
expect_identical(tblData(obj), tblData(obj1))
})
test_that("mutate.SQLDataFrame works",
{
obj1 <- obj %>% mutate(Age1 = ifelse(Ages <= 30, "30th", "40th"))
expect_identical(dim(obj1), c(26L, 3L))
expect_null(ridx(obj1))
obj1 <- obj %>% filter(Treatment == "ChIP") %>%
mutate(Age1 = ifelse(Ages <= 30, "30th", "40th"))
expect_identical(dim(obj1), c(13L, 3L))
})
|
testlist <- list(Rs = numeric(0), atmp = c(-6.73292524882432e+44, 1.25561609525069e+163, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(7.64681398433536e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925524631599e-68, 2.08343441298214e-168, 1.39098956557385e-309), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615864065-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 608
|
r
|
testlist <- list(Rs = numeric(0), atmp = c(-6.73292524882432e+44, 1.25561609525069e+163, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(7.64681398433536e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925524631599e-68, 2.08343441298214e-168, 1.39098956557385e-309), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lightsail_operations.R
\name{lightsail_get_relational_database_master_user_password}
\alias{lightsail_get_relational_database_master_user_password}
\title{Returns the current, previous, or pending versions of the master user
password for a Lightsail database}
\usage{
lightsail_get_relational_database_master_user_password(
relationalDatabaseName, passwordVersion)
}
\arguments{
\item{relationalDatabaseName}{[required] The name of your database for which to get the master user password.}
\item{passwordVersion}{The password version to return.
Specifying \code{CURRENT} or \code{PREVIOUS} returns the current or previous
passwords respectively. Specifying \code{PENDING} returns the newest version
of the password that will rotate to \code{CURRENT}. After the \code{PENDING}
password rotates to \code{CURRENT}, the \code{PENDING} password is no longer
available.
Default: \code{CURRENT}}
}
\description{
Returns the current, previous, or pending versions of the master user
password for a Lightsail database.
}
\details{
The \code{asdf} operation GetRelationalDatabaseMasterUserPassword supports
tag-based access control via resource tags applied to the resource
identified by relationalDatabaseName.
}
\section{Request syntax}{
\preformatted{svc$get_relational_database_master_user_password(
relationalDatabaseName = "string",
passwordVersion = "CURRENT"|"PREVIOUS"|"PENDING"
)
}
}
\keyword{internal}
|
/cran/paws.compute/man/lightsail_get_relational_database_master_user_password.Rd
|
permissive
|
peoplecure/paws
|
R
| false
| true
| 1,491
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lightsail_operations.R
\name{lightsail_get_relational_database_master_user_password}
\alias{lightsail_get_relational_database_master_user_password}
\title{Returns the current, previous, or pending versions of the master user
password for a Lightsail database}
\usage{
lightsail_get_relational_database_master_user_password(
relationalDatabaseName, passwordVersion)
}
\arguments{
\item{relationalDatabaseName}{[required] The name of your database for which to get the master user password.}
\item{passwordVersion}{The password version to return.
Specifying \code{CURRENT} or \code{PREVIOUS} returns the current or previous
passwords respectively. Specifying \code{PENDING} returns the newest version
of the password that will rotate to \code{CURRENT}. After the \code{PENDING}
password rotates to \code{CURRENT}, the \code{PENDING} password is no longer
available.
Default: \code{CURRENT}}
}
\description{
Returns the current, previous, or pending versions of the master user
password for a Lightsail database.
}
\details{
The \code{asdf} operation GetRelationalDatabaseMasterUserPassword supports
tag-based access control via resource tags applied to the resource
identified by relationalDatabaseName.
}
\section{Request syntax}{
\preformatted{svc$get_relational_database_master_user_password(
relationalDatabaseName = "string",
passwordVersion = "CURRENT"|"PREVIOUS"|"PENDING"
)
}
}
\keyword{internal}
|
rm(list = ls())
library(tools)
hydat_tecplot<-function(workdirectory,filename_list,summary_file,save_filename){
setwd(workdirectory)
time_origin<-('19000101') # set the reference date, here 1900Jan01 is the excel time
for (i in 1:length(filename_list)) {
filename=filename_list[i]
ncol <- max(count.fields(filename, sep = ","))
d=read.csv(filename,header = TRUE,col.names = paste0("V", seq_len(ncol)),stringsAsFactors=FALSE)#read in the gauge reading
n=read.csv(summary_file,header = FALSE,stringsAsFactors=FALSE)#read in the station summary
#assign flow rate and date, convert tecplot date
d_V3<- gsub(pattern="--",replacement="",d$V3,fixed=TRUE) # remove --
flow_rate<-(d$V4)
d_V3_day<-paste(d_V3,'01',sep="")# assume start from the first day of the month
d_V3_date<-as.Date(as.character(d_V3_day),format="%m%Y%d")-as.Date(as.character(time_origin),format="%Y%m%d")+1 #add one more day
#search for station name based on station number
station_num=d$V1[1]
station_name_index=which(n$V1==station_num)
station_name=n$V3[station_name_index]
#save_filename<-paste(file_path_sans_ext(station_name),"dat",sep = ("."))
zone_name<-paste("zone t=\"",file_path_sans_ext(station_name),"\"",sep = (""))
if(i == 1){
variable_name=c('variables=\"date\"\"flow rate\"')
write(c(variable_name,"\n"), file=save_filename,append=FALSE) #write variable names
}
write(paste("zone t=\"",station_name,"\"\n"), file=save_filename,append=TRUE) #write zone names
write.table(data.frame(d_V3_date,flow_rate),file=save_filename,quote = FALSE,sep="\t",row.names=FALSE,col.names=FALSE,append = TRUE) #write data
}
#X-AXIS: Date Y-AXIS:Flow rate
}
workdirectory<-("D:/ORB/hydrograph/HYDAT Hydrograph")
filename=c(
"05AA024_Monthly_MeanFlow_ts.csv",
"05AC003_Monthly_MeanFlow_ts.csv",
"05AD007_Monthly_MeanFlow_ts.csv",
"05AD028_Monthly_MeanFlow_ts.csv",
"05AE006_Monthly_MeanFlow_ts.csv",
"05AE027_Monthly_MeanFlow_ts.csv",
"05AG006_Monthly_MeanFlow_ts.csv",
"05AJ001_Monthly_MeanFlow_ts.csv",
"05BB001_Monthly_MeanFlow_ts.csv",
"05BH004_Monthly_MeanFlow_ts.csv",
"05BJ001_Monthly_MeanFlow_ts.csv",
"05BL024_Monthly_MeanFlow_ts.csv",
"05BN012_Monthly_MeanFlow_ts.csv",
"05CA009_Monthly_MeanFlow_ts.csv",
"05CC002_Monthly_MeanFlow_ts.csv",
"05CE001_Monthly_MeanFlow_ts.csv",
"05CK004_Monthly_MeanFlow_ts.csv",
"05HD039_Monthly_MeanFlow_ts.csv",
"05HG001_Monthly_MeanFlow_ts.csv"
)
summary_file=("All Stations.csv")
save_filename=( "ORB_HYDAT.dat")
hydat_tecplot(workdirectory,filename,summary_file,save_filename)
|
/Hydat_Tecplot_R/Hydat_Tecplot_monthly.R
|
no_license
|
namedyangfan/R_Aquanty
|
R
| false
| false
| 2,767
|
r
|
rm(list = ls())
library(tools)
hydat_tecplot<-function(workdirectory,filename_list,summary_file,save_filename){
setwd(workdirectory)
time_origin<-('19000101') # set the reference date, here 1900Jan01 is the excel time
for (i in 1:length(filename_list)) {
filename=filename_list[i]
ncol <- max(count.fields(filename, sep = ","))
d=read.csv(filename,header = TRUE,col.names = paste0("V", seq_len(ncol)),stringsAsFactors=FALSE)#read in the gauge reading
n=read.csv(summary_file,header = FALSE,stringsAsFactors=FALSE)#read in the station summary
#assign flow rate and date, convert tecplot date
d_V3<- gsub(pattern="--",replacement="",d$V3,fixed=TRUE) # remove --
flow_rate<-(d$V4)
d_V3_day<-paste(d_V3,'01',sep="")# assume start from the first day of the month
d_V3_date<-as.Date(as.character(d_V3_day),format="%m%Y%d")-as.Date(as.character(time_origin),format="%Y%m%d")+1 #add one more day
#search for station name based on station number
station_num=d$V1[1]
station_name_index=which(n$V1==station_num)
station_name=n$V3[station_name_index]
#save_filename<-paste(file_path_sans_ext(station_name),"dat",sep = ("."))
zone_name<-paste("zone t=\"",file_path_sans_ext(station_name),"\"",sep = (""))
if(i == 1){
variable_name=c('variables=\"date\"\"flow rate\"')
write(c(variable_name,"\n"), file=save_filename,append=FALSE) #write variable names
}
write(paste("zone t=\"",station_name,"\"\n"), file=save_filename,append=TRUE) #write zone names
write.table(data.frame(d_V3_date,flow_rate),file=save_filename,quote = FALSE,sep="\t",row.names=FALSE,col.names=FALSE,append = TRUE) #write data
}
#X-AXIS: Date Y-AXIS:Flow rate
}
workdirectory<-("D:/ORB/hydrograph/HYDAT Hydrograph")
filename=c(
"05AA024_Monthly_MeanFlow_ts.csv",
"05AC003_Monthly_MeanFlow_ts.csv",
"05AD007_Monthly_MeanFlow_ts.csv",
"05AD028_Monthly_MeanFlow_ts.csv",
"05AE006_Monthly_MeanFlow_ts.csv",
"05AE027_Monthly_MeanFlow_ts.csv",
"05AG006_Monthly_MeanFlow_ts.csv",
"05AJ001_Monthly_MeanFlow_ts.csv",
"05BB001_Monthly_MeanFlow_ts.csv",
"05BH004_Monthly_MeanFlow_ts.csv",
"05BJ001_Monthly_MeanFlow_ts.csv",
"05BL024_Monthly_MeanFlow_ts.csv",
"05BN012_Monthly_MeanFlow_ts.csv",
"05CA009_Monthly_MeanFlow_ts.csv",
"05CC002_Monthly_MeanFlow_ts.csv",
"05CE001_Monthly_MeanFlow_ts.csv",
"05CK004_Monthly_MeanFlow_ts.csv",
"05HD039_Monthly_MeanFlow_ts.csv",
"05HG001_Monthly_MeanFlow_ts.csv"
)
summary_file=("All Stations.csv")
save_filename=( "ORB_HYDAT.dat")
hydat_tecplot(workdirectory,filename,summary_file,save_filename)
|
context("npn_phenophases")
test_that("npn_phenophases works", {
npn_set_env(get_test_env())
vcr::use_cassette("npn_phenophases_1", {
pp <- npn_phenophases()
})
expect_is(pp, "data.frame")
expect_is(pp$phenophase_name, "character")
expect_equal(trimws(pp[1,"phenophase_name"]),"First leaf")
expect_gt(nrow(pp),100)
})
test_that("npn_phenophase_definitions works", {
npn_set_env(get_test_env())
vcr::use_cassette("npn_phenophase_definitions_1", {
pp <- npn_phenophase_definitions()
})
expect_is(pp, "data.frame")
expect_is(pp$phenophase_name, "character")
expect_equal(trimws(pp[1,"phenophase_name"]),"First leaf")
expect_gt(nrow(pp),100)
})
test_that("npn_phenophase_details works", {
npn_set_env(get_test_env())
vcr::use_cassette("npn_phenophase_details_1", {
pd <- npn_phenophase_details(56)
})
expect_is(pd, "data.frame")
expect_is(pd$phenophase_names, "character")
expect_equal(trimws(pd[1,"phenophase_names"]),"First leaf")
vcr::use_cassette("npn_phenophase_details_2", {
pd <- npn_phenophase_details("56,61")
})
expect_is(pd, "data.frame")
expect_is(pd$phenophase_names, "character")
expect_equal(trimws(pd[1,"phenophase_names"]),"First leaf")
expect_error(npn_phenophase_details())
expect_error(npn_phenophase_details(56,61))
})
test_that("npn_phenophases_by_species works",{
npn_set_env(get_test_env())
vcr::use_cassette("npn_phenophases_by_species_1", {
pp <- npn_phenophases_by_species(3,"2018-05-05")
})
expect_is(pp,"data.frame")
expect_is(pp$species_name,"character")
})
test_that("npn_pheno_classes works",{
npn_set_env(get_test_env())
vcr::use_cassette("npn_pheno_classes_1", {
pc <- npn_pheno_classes()
})
expect_is(pc,"data.frame")
expect_is(pc$name,"character")
expect_gt(nrow(pc),50)
})
test_that("npn_abundance_categories works",{
npn_set_env(get_test_env())
vcr::use_cassette("npn_abundance_categories_1", {
ac <- npn_abundance_categories()
})
expect_is(ac,"data.frame")
expect_is(ac$category_name,"character")
expect_gt(nrow(ac),50)
})
test_that("npn_get_phenophases_for_taxon works", {
npn_set_env(get_test_env())
vcr::use_cassette("npn_get_phenophases_for_taxon_1", {
pp <- npn_get_phenophases_for_taxon(class_ids=5,date="2018-05-05")
})
expect_is(pp,"list")
expect_is(pp[[1]]$class_name, "character")
expect_length(pp,1)
vcr::use_cassette("npn_get_phenophases_for_taxon_2", {
pp <- npn_get_phenophases_for_taxon(class_ids=c(5,6),date="2018-05-05")
})
expect_is(pp,"list")
expect_is(pp[[1]]$class_name, "character")
expect_gt(length(pp),1)
vcr::use_cassette("npn_get_phenophases_for_taxon_3", {
pp <- npn_get_phenophases_for_taxon(family_ids=c(267,268),date="2018-05-05")
})
expect_is(pp,"list")
expect_is(pp[[1]]$family_name, "character")
expect_gt(length(pp),1)
vcr::use_cassette("npn_get_phenophases_for_taxon_4", {
pp <- npn_get_phenophases_for_taxon(order_ids=c(74,75),date="2018-05-05", return_all = 0)
})
expect_is(pp,"list")
expect_is(pp[[1]]$order_name, "character")
expect_gt(length(pp),1)
vcr::use_cassette("npn_get_phenophases_for_taxon_5", {
pp <- npn_get_phenophases_for_taxon(order_ids=c(74,75),return_all = 1)
})
expect_is(pp,"list")
expect_is(pp[[1]]$order_name, "character")
expect_gt(length(pp),1)
vcr::use_cassette("npn_get_phenophases_for_taxon_6", {
pp <- npn_get_phenophases_for_taxon(order_ids=c(74,75),return_all = TRUE)
})
expect_is(pp,"list")
expect_length(pp,0)
})
|
/tests/testthat/test-npn-phenophases.R
|
permissive
|
hulaba/rnpn
|
R
| false
| false
| 3,573
|
r
|
context("npn_phenophases")
test_that("npn_phenophases works", {
npn_set_env(get_test_env())
vcr::use_cassette("npn_phenophases_1", {
pp <- npn_phenophases()
})
expect_is(pp, "data.frame")
expect_is(pp$phenophase_name, "character")
expect_equal(trimws(pp[1,"phenophase_name"]),"First leaf")
expect_gt(nrow(pp),100)
})
test_that("npn_phenophase_definitions works", {
npn_set_env(get_test_env())
vcr::use_cassette("npn_phenophase_definitions_1", {
pp <- npn_phenophase_definitions()
})
expect_is(pp, "data.frame")
expect_is(pp$phenophase_name, "character")
expect_equal(trimws(pp[1,"phenophase_name"]),"First leaf")
expect_gt(nrow(pp),100)
})
test_that("npn_phenophase_details works", {
npn_set_env(get_test_env())
vcr::use_cassette("npn_phenophase_details_1", {
pd <- npn_phenophase_details(56)
})
expect_is(pd, "data.frame")
expect_is(pd$phenophase_names, "character")
expect_equal(trimws(pd[1,"phenophase_names"]),"First leaf")
vcr::use_cassette("npn_phenophase_details_2", {
pd <- npn_phenophase_details("56,61")
})
expect_is(pd, "data.frame")
expect_is(pd$phenophase_names, "character")
expect_equal(trimws(pd[1,"phenophase_names"]),"First leaf")
expect_error(npn_phenophase_details())
expect_error(npn_phenophase_details(56,61))
})
test_that("npn_phenophases_by_species works",{
npn_set_env(get_test_env())
vcr::use_cassette("npn_phenophases_by_species_1", {
pp <- npn_phenophases_by_species(3,"2018-05-05")
})
expect_is(pp,"data.frame")
expect_is(pp$species_name,"character")
})
test_that("npn_pheno_classes works",{
npn_set_env(get_test_env())
vcr::use_cassette("npn_pheno_classes_1", {
pc <- npn_pheno_classes()
})
expect_is(pc,"data.frame")
expect_is(pc$name,"character")
expect_gt(nrow(pc),50)
})
test_that("npn_abundance_categories works",{
npn_set_env(get_test_env())
vcr::use_cassette("npn_abundance_categories_1", {
ac <- npn_abundance_categories()
})
expect_is(ac,"data.frame")
expect_is(ac$category_name,"character")
expect_gt(nrow(ac),50)
})
test_that("npn_get_phenophases_for_taxon works", {
npn_set_env(get_test_env())
vcr::use_cassette("npn_get_phenophases_for_taxon_1", {
pp <- npn_get_phenophases_for_taxon(class_ids=5,date="2018-05-05")
})
expect_is(pp,"list")
expect_is(pp[[1]]$class_name, "character")
expect_length(pp,1)
vcr::use_cassette("npn_get_phenophases_for_taxon_2", {
pp <- npn_get_phenophases_for_taxon(class_ids=c(5,6),date="2018-05-05")
})
expect_is(pp,"list")
expect_is(pp[[1]]$class_name, "character")
expect_gt(length(pp),1)
vcr::use_cassette("npn_get_phenophases_for_taxon_3", {
pp <- npn_get_phenophases_for_taxon(family_ids=c(267,268),date="2018-05-05")
})
expect_is(pp,"list")
expect_is(pp[[1]]$family_name, "character")
expect_gt(length(pp),1)
vcr::use_cassette("npn_get_phenophases_for_taxon_4", {
pp <- npn_get_phenophases_for_taxon(order_ids=c(74,75),date="2018-05-05", return_all = 0)
})
expect_is(pp,"list")
expect_is(pp[[1]]$order_name, "character")
expect_gt(length(pp),1)
vcr::use_cassette("npn_get_phenophases_for_taxon_5", {
pp <- npn_get_phenophases_for_taxon(order_ids=c(74,75),return_all = 1)
})
expect_is(pp,"list")
expect_is(pp[[1]]$order_name, "character")
expect_gt(length(pp),1)
vcr::use_cassette("npn_get_phenophases_for_taxon_6", {
pp <- npn_get_phenophases_for_taxon(order_ids=c(74,75),return_all = TRUE)
})
expect_is(pp,"list")
expect_length(pp,0)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metR_ArrowGrob.R
\name{makeContent.arrow2}
\alias{makeContent.arrow2}
\title{Make content for an arrow}
\usage{
\method{makeContent}{arrow2}(x)
}
\arguments{
\item{x}{: a grid grob}
}
\value{
a grid grob or gTree
}
\description{
Function to make content for an arrow.
}
\details{
This function will be used by grid.draw when called on an arrowGrob. It
provides a specific implementation for the generic \code{\link[grid:makeContent]{grid::makeContent()}}.
}
|
/man/makeContent.arrow2.Rd
|
permissive
|
wStockhausen/wtsGIS
|
R
| false
| true
| 536
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metR_ArrowGrob.R
\name{makeContent.arrow2}
\alias{makeContent.arrow2}
\title{Make content for an arrow}
\usage{
\method{makeContent}{arrow2}(x)
}
\arguments{
\item{x}{: a grid grob}
}
\value{
a grid grob or gTree
}
\description{
Function to make content for an arrow.
}
\details{
This function will be used by grid.draw when called on an arrowGrob. It
provides a specific implementation for the generic \code{\link[grid:makeContent]{grid::makeContent()}}.
}
|
library(tester)
### Name: is_single_negative_decimal
### Title: Is single negative decimal
### Aliases: is_single_negative_decimal
### ** Examples
is_single_negative_decimal(-3/4) # TRUE
is_single_negative_decimal(0.01) # FALSE
is_single_negative_decimal("hoskdflksfd") # FALSE
is_single_negative_decimal("1.0") # FALSE
is_single_negative_decimal(1:5) # FALSE
|
/data/genthat_extracted_code/tester/examples/is_single_negative_decimal.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 372
|
r
|
library(tester)
### Name: is_single_negative_decimal
### Title: Is single negative decimal
### Aliases: is_single_negative_decimal
### ** Examples
is_single_negative_decimal(-3/4) # TRUE
is_single_negative_decimal(0.01) # FALSE
is_single_negative_decimal("hoskdflksfd") # FALSE
is_single_negative_decimal("1.0") # FALSE
is_single_negative_decimal(1:5) # FALSE
|
### Explanatory Data Analysis: Course Project 2
###Plot 4
# Across the United States, how have emissions from coal combustion-related sources changed from 1999???2008?
library(plyr)
library(ggplot2)
##Read in data
NEI <- readRDS("./exploratory analysis/Course Project 2/exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("./exploratory analysis/Course Project 2/exdata-data-NEI_data/Source_Classification_Code.rds")
## Using grepl in SCC we find relevant SCC codes that are from combustion sources by searching in "level.one"
comb <- grepl("comb",SCC$SCC.Level.One,ignore.case= TRUE) #returns logical array
## Do the same with "coal" sources in "level.four"
coal <- grepl("coal",SCC$SCC.Level.Four,ignore.case= TRUE) #returns logical arrag
comb_coal_extract <- (comb&coal) ##returns logical array if both conditions satisfy
SCC_combcoal <- SCC[comb_coal_extract,]$SCC #extract the relevant SCC codes
NEI_combcoal <- NEI[NEI$SCC %in% SCC_combcoal,] # match the relevant SCC codes in NEI database
##Aggregate the function (using diff method from ddply for sake of learning)
df_NEIcombcoal <- aggregate(NEI_combcoal$Emissions, list(NEI_combcoal$year), FUN=sum)
##bar plot
barplot(t(as.matrix(df_NEIcombcoal$x))/10e3, beside=FALSE, xlab= "year",names.arg=df_Baltimore$year,ylab="PM2.5 emissions (10^3 Tons)", main="PM 2.5 Emissions from Coal Combustion")
dev.copy(png, file="./exploratory analysis/Course project 2/plot4.png", height=480, width=480)
dev.off()
|
/Course Project 2/plot4.R
|
no_license
|
jonneyliu/Exploratory-Data-Analysis
|
R
| false
| false
| 1,473
|
r
|
### Explanatory Data Analysis: Course Project 2
###Plot 4
# Across the United States, how have emissions from coal combustion-related sources changed from 1999???2008?
library(plyr)
library(ggplot2)
##Read in data
NEI <- readRDS("./exploratory analysis/Course Project 2/exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("./exploratory analysis/Course Project 2/exdata-data-NEI_data/Source_Classification_Code.rds")
## Using grepl in SCC we find relevant SCC codes that are from combustion sources by searching in "level.one"
comb <- grepl("comb",SCC$SCC.Level.One,ignore.case= TRUE) #returns logical array
## Do the same with "coal" sources in "level.four"
coal <- grepl("coal",SCC$SCC.Level.Four,ignore.case= TRUE) #returns logical arrag
comb_coal_extract <- (comb&coal) ##returns logical array if both conditions satisfy
SCC_combcoal <- SCC[comb_coal_extract,]$SCC #extract the relevant SCC codes
NEI_combcoal <- NEI[NEI$SCC %in% SCC_combcoal,] # match the relevant SCC codes in NEI database
##Aggregate the function (using diff method from ddply for sake of learning)
df_NEIcombcoal <- aggregate(NEI_combcoal$Emissions, list(NEI_combcoal$year), FUN=sum)
##bar plot
barplot(t(as.matrix(df_NEIcombcoal$x))/10e3, beside=FALSE, xlab= "year",names.arg=df_Baltimore$year,ylab="PM2.5 emissions (10^3 Tons)", main="PM 2.5 Emissions from Coal Combustion")
dev.copy(png, file="./exploratory analysis/Course project 2/plot4.png", height=480, width=480)
dev.off()
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(tidyr)
library(ggplot2)
library(ggthemes)
library(animation)
library(latticeExtra)
library(lubridate)
library(ggbeeswarm)
library(gridExtra)
library(data.table)
library(viridis)
library(ggbeeswarm)
library(choroplethr)
library(choroplethrMaps)
# Define UI for application that draws a histogram
ui <- shinyUI(fluidPage(
# Application title
titlePanel("Climate change in US continent"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("Choose your interested States and Year"),
selectInput("State_name",
label="Your interested State",
choices = c("New York","California", "Florida", "Illinois", "Texas"),
selected = "New York"),
sliderInput("Year_num",
"Interested Year",
min = 1743,
max = 2013,
value = 2000,
step = 1),
sliderInput("Year_range", "Range of year:",
min = 1743, max = 2013, value = c(1850,2012), step = 1),
helpText("Note: while the plot view will show only be updated"),
submitButton("Update View")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel("Ave by Year", plotOutput("aveTplot_1")),
tabPanel("Ave by Month", plotOutput("aveTplot_2")),
tabPanel("cool and hot", plotOutput("aveTplot_3")),
tabPanel("Temp variation", plotOutput("aveTplot_4")),
tabPanel("Heat map", plotOutput("aveTplot_5")),
tabPanel("Temp change", plotOutput("aveTplot_6")),
tabPanel("Spatial Temp", plotOutput("aveTplot_7"))
)
)
)
)
)
# Define server logic required to draw a histogram
setwd("E:\\Study\\Applied analytics\\4336\\ToolBox Assignment")
## Main data treatment
cData = read.csv("climate-change-earth-surface-temperature-data/GlobalLandTemperaturesByState.csv")
##Then I choose the United States, remove Hawaii and Alaska, and I separate the date into Year, Month and Day.
cData %>%
filter(Country=="United States") %>%
separate(col = dt, into = c("Year", "Month", "Day"), convert = TRUE) ->cData
cData<-na.omit(cData)
cData %>%
filter(State!="Hawaii" & State!="Alaska") -> cData1
# Remove na's
cData1 = na.omit(cData1)
server <- shinyServer(function(input, output) {
#1. PLOT 1
# show the temperature variation of different states
output$aveTplot_1 <- renderPlot({
cData1 %>%
filter(State==input$State_name) %>%
filter(Year> input$Year_range[1] & Year<input$Year_range[2]) %>%
group_by(Year) %>%
summarise(Temp = mean(AverageTemperature)) ->cData2
# draw the plots of state temperature
ggplot(data=cData2,aes(x=Year, y=Temp))+ geom_point(aes(colour=Temp))+stat_smooth(method = "loess")+ggtitle(paste("Average Temperature 1743-2013 in", input$State_name))
})
#2. PLOT 2
# extracting US's average temperature month-wise & year_wise
# month-on-month change in US's average temperature from 1743 to 2013
output$aveTplot_2 <- renderPlot({
US_temp_1<- cData1 %>%
filter(State==input$State_name) %>%
filter(Year> input$Year_range[1] & Year<input$Year_range[2]) %>%
group_by(Year,Month) %>%
summarise(ave_Temp=mean(AverageTemperature))
US_temp_1$date = paste0("01-",as.character(US_temp_1$Month),"-",as.character(US_temp_1$Year))
US_temp_1$date = as.Date(US_temp_1$date, "%d-%m-%Y")
ggplot(data=US_temp_1, aes(x = date, y = ave_Temp)) + geom_line() + geom_smooth(method="lm",size = 2) +
xlab("Years") + ylab("Average Monthly Temperature") +
theme(axis.title = element_text(size = 15), axis.text = element_text(size = 13)) +
ggtitle(paste("Average Monthly Temperature Trend of the hottest and coolest states in",input$State_name, "in Year", input$Year_range[1], "-",input$Year_range[2]))
})
#3. PLOT3
output$aveTplot_3 <- renderPlot({
# State hottest and coolest temperature trend
state_temp<-cData1 %>%
group_by(State, Year, Month) %>%
summarise(avg_Temp = mean(AverageTemperature)) %>%
filter(Month ==1)
state_temp= as.data.table(state_temp)
state_temp= state_temp[!is.na(avg_Temp),]
# State-wise comparison of average January month average temperature, for the years 1950 and 2013
ggplot(data=state_temp[state_temp$Year == input$Year_num,], aes(x = reorder(State, avg_Temp), y = avg_Temp, group = as.factor(Year), colour = as.factor(Year))) +
labs(colour = "Year") +
theme(legend.title = element_text(size = 13, face="bold")) +
theme(legend.text = element_text(size = 13)) +
geom_line(size = 1) + xlab("Coldest to Hottest States") + ylab("Average Temperature") +
theme(axis.title = element_text(size = 15, face = "bold"), axis.text.y = element_text(size = 13), axis.text.x = element_blank()) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.y = element_line(colour = "gray25"),
panel.border = element_blank())+
ggtitle(paste("State-wise Change in Temperature in", input$Year_num)) +
theme(plot.title = element_text(size = 13, lineheight=.8, face="bold"))
})
#4. PLOT 4
# Temperature variation and comparison with 2013
output$aveTplot_4 <- renderPlot({
cData7<-cData1 %>%
filter( Year==input$Year_num | Year==1854 | Year==1900 | Year==1950 | Year==2013)
cData7$Year<-as.factor(cData7$Year)
ggplot(data=cData7, aes(x=Year, y=AverageTemperature,color=AverageTemperature)) +
geom_quasirandom(size=5) + scale_colour_viridis(option = "C")+
ggtitle(paste("Temperature variation in", input$Year_num)) +
theme(plot.title = element_text(size = 13, lineheight=.8, face="bold"))
})
#5. PLOT 5
output$aveTplot_5 <- renderPlot({
cData8 <- cData1 %>%
filter(State ==input$State_name) %>%
filter(Year> input$Year_range[1] & Year<input$Year_range[2]) %>%
group_by(State, Month) %>%
summarise(avg_Temp = mean(AverageTemperature))
ggplot(cData8, aes(x = Month, y = State, fill = avg_Temp, frame = State)) +
geom_tile(color = "white", size = 0.1) +
scale_fill_gradient(name = "Average Temperature",low = "white", high = "red") +
coord_equal() +
labs(x = "Months", y = "", title = "Average Temp in the selected year range") +
theme(axis.ticks = element_blank()) +
theme(axis.text = element_text(size = 14)) +
theme(plot.title = element_text(size = 15)) +
theme(legend.title = element_text(size = 15)) +
theme(legend.text = element_text(size = 10))+
ggtitle(paste("Heat map for", input$State_name))+
theme(plot.title = element_text(size = 13, lineheight=.8, face="bold"))
})
#6. PLOT 6
# Temperature variation
output$aveTplot_6 <- renderPlot({
cData6<-cData1 %>%
filter(Year> input$Year_range[1] & Year<input$Year_range[2]) %>%
filter(State==input$State_name)
cData6$Month<-as.factor(cData6$Month)
ggplot(data=cData6, aes(x=Month, y=AverageTemperature,color=AverageTemperature))+
geom_quasirandom() + scale_colour_viridis()+
ggtitle(paste("Temperature variation in", input$State_name, "in Year", input$Year_range[1], "-",input$Year_range[2])) +
theme(plot.title = element_text(size = 13, lineheight=.8, face="bold"))
})
#7. PLOT 7
# Spatial temperature distribution
output$aveTplot_7 <- renderPlot({
## I organize the data to get a data frame for a state choropleth and print the maps for 1850 and 2013.
# Changing Georgia (State)
cData$State <- as.character(cData$State)
cData$State[cData$State=="Georgia (State)"] <- "Georgia"
cData$State<- as.factor(cData$State)
# select columns of interest
cData %>%
select(Year,AverageTemperature,State) %>%
group_by(Year,State) %>%
summarise(value=mean(AverageTemperature))-> cData4
#Data frame must have a column named region (all lower case) and another one value.
colnames(cData4)[2]<- "region"
cData4$region<-tolower(cData4$region)
cData4 %>%
filter(Year==input$Year_num) -> cData4_1
cData4_1<-cData4_1[,2:3]
print(state_choropleth(cData4_1,
title=paste("Land Temperature in Year", input$Year_num),
num_colors = 8,
legend="Degrees"),reference_map=TRUE)
})
}) # For input output main function
# Run the application
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
tanghd/climate-change
|
R
| false
| false
| 9,180
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(tidyr)
library(ggplot2)
library(ggthemes)
library(animation)
library(latticeExtra)
library(lubridate)
library(ggbeeswarm)
library(gridExtra)
library(data.table)
library(viridis)
library(ggbeeswarm)
library(choroplethr)
library(choroplethrMaps)
# Define UI for application that draws a histogram
ui <- shinyUI(fluidPage(
# Application title
titlePanel("Climate change in US continent"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("Choose your interested States and Year"),
selectInput("State_name",
label="Your interested State",
choices = c("New York","California", "Florida", "Illinois", "Texas"),
selected = "New York"),
sliderInput("Year_num",
"Interested Year",
min = 1743,
max = 2013,
value = 2000,
step = 1),
sliderInput("Year_range", "Range of year:",
min = 1743, max = 2013, value = c(1850,2012), step = 1),
helpText("Note: while the plot view will show only be updated"),
submitButton("Update View")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel("Ave by Year", plotOutput("aveTplot_1")),
tabPanel("Ave by Month", plotOutput("aveTplot_2")),
tabPanel("cool and hot", plotOutput("aveTplot_3")),
tabPanel("Temp variation", plotOutput("aveTplot_4")),
tabPanel("Heat map", plotOutput("aveTplot_5")),
tabPanel("Temp change", plotOutput("aveTplot_6")),
tabPanel("Spatial Temp", plotOutput("aveTplot_7"))
)
)
)
)
)
# Define server logic required to draw a histogram
setwd("E:\\Study\\Applied analytics\\4336\\ToolBox Assignment")
## Main data treatment
cData = read.csv("climate-change-earth-surface-temperature-data/GlobalLandTemperaturesByState.csv")
##Then I choose the United States, remove Hawaii and Alaska, and I separate the date into Year, Month and Day.
cData %>%
filter(Country=="United States") %>%
separate(col = dt, into = c("Year", "Month", "Day"), convert = TRUE) ->cData
cData<-na.omit(cData)
cData %>%
filter(State!="Hawaii" & State!="Alaska") -> cData1
# Remove na's
cData1 = na.omit(cData1)
server <- shinyServer(function(input, output) {
#1. PLOT 1
# show the temperature variation of different states
output$aveTplot_1 <- renderPlot({
cData1 %>%
filter(State==input$State_name) %>%
filter(Year> input$Year_range[1] & Year<input$Year_range[2]) %>%
group_by(Year) %>%
summarise(Temp = mean(AverageTemperature)) ->cData2
# draw the plots of state temperature
ggplot(data=cData2,aes(x=Year, y=Temp))+ geom_point(aes(colour=Temp))+stat_smooth(method = "loess")+ggtitle(paste("Average Temperature 1743-2013 in", input$State_name))
})
#2. PLOT 2
# extracting US's average temperature month-wise & year_wise
# month-on-month change in US's average temperature from 1743 to 2013
output$aveTplot_2 <- renderPlot({
US_temp_1<- cData1 %>%
filter(State==input$State_name) %>%
filter(Year> input$Year_range[1] & Year<input$Year_range[2]) %>%
group_by(Year,Month) %>%
summarise(ave_Temp=mean(AverageTemperature))
US_temp_1$date = paste0("01-",as.character(US_temp_1$Month),"-",as.character(US_temp_1$Year))
US_temp_1$date = as.Date(US_temp_1$date, "%d-%m-%Y")
ggplot(data=US_temp_1, aes(x = date, y = ave_Temp)) + geom_line() + geom_smooth(method="lm",size = 2) +
xlab("Years") + ylab("Average Monthly Temperature") +
theme(axis.title = element_text(size = 15), axis.text = element_text(size = 13)) +
ggtitle(paste("Average Monthly Temperature Trend of the hottest and coolest states in",input$State_name, "in Year", input$Year_range[1], "-",input$Year_range[2]))
})
#3. PLOT3
output$aveTplot_3 <- renderPlot({
# State hottest and coolest temperature trend
state_temp<-cData1 %>%
group_by(State, Year, Month) %>%
summarise(avg_Temp = mean(AverageTemperature)) %>%
filter(Month ==1)
state_temp= as.data.table(state_temp)
state_temp= state_temp[!is.na(avg_Temp),]
# State-wise comparison of average January month average temperature, for the years 1950 and 2013
ggplot(data=state_temp[state_temp$Year == input$Year_num,], aes(x = reorder(State, avg_Temp), y = avg_Temp, group = as.factor(Year), colour = as.factor(Year))) +
labs(colour = "Year") +
theme(legend.title = element_text(size = 13, face="bold")) +
theme(legend.text = element_text(size = 13)) +
geom_line(size = 1) + xlab("Coldest to Hottest States") + ylab("Average Temperature") +
theme(axis.title = element_text(size = 15, face = "bold"), axis.text.y = element_text(size = 13), axis.text.x = element_blank()) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.y = element_line(colour = "gray25"),
panel.border = element_blank())+
ggtitle(paste("State-wise Change in Temperature in", input$Year_num)) +
theme(plot.title = element_text(size = 13, lineheight=.8, face="bold"))
})
#4. PLOT 4
# Temperature variation and comparison with 2013
output$aveTplot_4 <- renderPlot({
cData7<-cData1 %>%
filter( Year==input$Year_num | Year==1854 | Year==1900 | Year==1950 | Year==2013)
cData7$Year<-as.factor(cData7$Year)
ggplot(data=cData7, aes(x=Year, y=AverageTemperature,color=AverageTemperature)) +
geom_quasirandom(size=5) + scale_colour_viridis(option = "C")+
ggtitle(paste("Temperature variation in", input$Year_num)) +
theme(plot.title = element_text(size = 13, lineheight=.8, face="bold"))
})
#5. PLOT 5
output$aveTplot_5 <- renderPlot({
cData8 <- cData1 %>%
filter(State ==input$State_name) %>%
filter(Year> input$Year_range[1] & Year<input$Year_range[2]) %>%
group_by(State, Month) %>%
summarise(avg_Temp = mean(AverageTemperature))
ggplot(cData8, aes(x = Month, y = State, fill = avg_Temp, frame = State)) +
geom_tile(color = "white", size = 0.1) +
scale_fill_gradient(name = "Average Temperature",low = "white", high = "red") +
coord_equal() +
labs(x = "Months", y = "", title = "Average Temp in the selected year range") +
theme(axis.ticks = element_blank()) +
theme(axis.text = element_text(size = 14)) +
theme(plot.title = element_text(size = 15)) +
theme(legend.title = element_text(size = 15)) +
theme(legend.text = element_text(size = 10))+
ggtitle(paste("Heat map for", input$State_name))+
theme(plot.title = element_text(size = 13, lineheight=.8, face="bold"))
})
#6. PLOT 6
# Temperature variation
output$aveTplot_6 <- renderPlot({
cData6<-cData1 %>%
filter(Year> input$Year_range[1] & Year<input$Year_range[2]) %>%
filter(State==input$State_name)
cData6$Month<-as.factor(cData6$Month)
ggplot(data=cData6, aes(x=Month, y=AverageTemperature,color=AverageTemperature))+
geom_quasirandom() + scale_colour_viridis()+
ggtitle(paste("Temperature variation in", input$State_name, "in Year", input$Year_range[1], "-",input$Year_range[2])) +
theme(plot.title = element_text(size = 13, lineheight=.8, face="bold"))
})
#7. PLOT 7
# Spatial temperature distribution
output$aveTplot_7 <- renderPlot({
## I organize the data to get a data frame for a state choropleth and print the maps for 1850 and 2013.
# Changing Georgia (State)
cData$State <- as.character(cData$State)
cData$State[cData$State=="Georgia (State)"] <- "Georgia"
cData$State<- as.factor(cData$State)
# select columns of interest
cData %>%
select(Year,AverageTemperature,State) %>%
group_by(Year,State) %>%
summarise(value=mean(AverageTemperature))-> cData4
#Data frame must have a column named region (all lower case) and another one value.
colnames(cData4)[2]<- "region"
cData4$region<-tolower(cData4$region)
cData4 %>%
filter(Year==input$Year_num) -> cData4_1
cData4_1<-cData4_1[,2:3]
print(state_choropleth(cData4_1,
title=paste("Land Temperature in Year", input$Year_num),
num_colors = 8,
legend="Degrees"),reference_map=TRUE)
})
}) # For input output main function
# Run the application
shinyApp(ui = ui, server = server)
|
#Karoliina Suonpää, 7.2.2017
#RStudio Exercise 3, data wrangling (data: UCI Machine Learning Repository, Student Alcohol consumption)
#set workind directory
setwd("Z:/Jatko-opinnot/Tilastotiede 2016/IODS/IODS-project/Data")
#check working directory
getwd()
#read the data from portuguese class
por<-read.csv("student-por.csv", sep=";", header=T)
#read the data from math class
mat<-read.csv("student-mat.csv", sep=";", header=T)
#check the datasets
summary(por)
str(por)
dim(por)
summary(mat)
str(mat)
dim(mat)
#Join the two datasets
# access the dplyr library
library(dplyr)
#join the mat and por data frames
join_by <- c("school","sex","age","address","famsize","Pstatus","Medu","Fedu","Mjob","Fjob","reason","nursery","internet")
mat_por <- inner_join(mat, por, by = join_by, suffix=c(".mat", ".por"))
# see the new column names
colnames(mat_por)
# check the new dataset
glimpse(mat_por)
str(mat_por)
dim(mat_por)
# create a new data frame with only the joined columns
alc <- select(mat_por, one_of(join_by))
# the columns in the datasets which were not used for joining the data
notjoined_columns <- colnames(mat)[!colnames(mat) %in% join_by]
# print out the columns not used for joining
notjoined_columns
# for every column name not used for joining...
for(column_name in notjoined_columns) {
# select two columns from 'math_por' with the same original name
two_columns <- select(mat_por, starts_with(column_name))
# select the first column vector of those two columns
first_column <- select(two_columns, 1)[[1]]
###
# if that first column vector is numeric...
if(is.numeric(first_column)) {
# take a rounded average of each row of the two columns and
# add the resulting vector to the alc data frame
alc[column_name] <- round(rowMeans(two_columns))
} else { # else if it's not numeric...
# add the first column vector to the alc data frame
alc[column_name] <- first_column
}
}
# glimpse at the new combined data
glimpse(alc)
dim(alc)
summary(alc)
#Everything seems OK
#Take the average of the answers related to weekday and weekend alcohol consumption to create a new column
#'alc_use' to the joined data.
alc <- mutate(alc, alc_use = (Dalc + Walc) / 2)
#'Then use 'alc_use' to create a new logical column 'high_use' which is TRUE for students for which 'alc_use' is
#'#greater than 2 (and FALSE otherwise).
alc <- mutate(alc, high_use = alc_use > 2)
#check the new variable
summary(alc$high_use)
#check the new combined dataset
dim(alc)
glimpse(alc)
summary(alc)
# Write CSV in and save to the Data-folder
write.csv(alc, file = "alc.csv")
#check that everything is OK
read.table("alc.csv", sep=",", header=T)
summary(read.table("alc.csv", sep=",", header=T))
|
/Data/create_alc.R
|
no_license
|
karosuon/IODS-project
|
R
| false
| false
| 2,733
|
r
|
#Karoliina Suonpää, 7.2.2017
#RStudio Exercise 3, data wrangling (data: UCI Machine Learning Repository, Student Alcohol consumption)
#set workind directory
setwd("Z:/Jatko-opinnot/Tilastotiede 2016/IODS/IODS-project/Data")
#check working directory
getwd()
#read the data from portuguese class
por<-read.csv("student-por.csv", sep=";", header=T)
#read the data from math class
mat<-read.csv("student-mat.csv", sep=";", header=T)
#check the datasets
summary(por)
str(por)
dim(por)
summary(mat)
str(mat)
dim(mat)
#Join the two datasets
# access the dplyr library
library(dplyr)
#join the mat and por data frames
join_by <- c("school","sex","age","address","famsize","Pstatus","Medu","Fedu","Mjob","Fjob","reason","nursery","internet")
mat_por <- inner_join(mat, por, by = join_by, suffix=c(".mat", ".por"))
# see the new column names
colnames(mat_por)
# check the new dataset
glimpse(mat_por)
str(mat_por)
dim(mat_por)
# create a new data frame with only the joined columns
alc <- select(mat_por, one_of(join_by))
# the columns in the datasets which were not used for joining the data
notjoined_columns <- colnames(mat)[!colnames(mat) %in% join_by]
# print out the columns not used for joining
notjoined_columns
# for every column name not used for joining...
for(column_name in notjoined_columns) {
# select two columns from 'math_por' with the same original name
two_columns <- select(mat_por, starts_with(column_name))
# select the first column vector of those two columns
first_column <- select(two_columns, 1)[[1]]
###
# if that first column vector is numeric...
if(is.numeric(first_column)) {
# take a rounded average of each row of the two columns and
# add the resulting vector to the alc data frame
alc[column_name] <- round(rowMeans(two_columns))
} else { # else if it's not numeric...
# add the first column vector to the alc data frame
alc[column_name] <- first_column
}
}
# glimpse at the new combined data
glimpse(alc)
dim(alc)
summary(alc)
#Everything seems OK
#Take the average of the answers related to weekday and weekend alcohol consumption to create a new column
#'alc_use' to the joined data.
alc <- mutate(alc, alc_use = (Dalc + Walc) / 2)
#'Then use 'alc_use' to create a new logical column 'high_use' which is TRUE for students for which 'alc_use' is
#'#greater than 2 (and FALSE otherwise).
alc <- mutate(alc, high_use = alc_use > 2)
#check the new variable
summary(alc$high_use)
#check the new combined dataset
dim(alc)
glimpse(alc)
summary(alc)
# Write CSV in and save to the Data-folder
write.csv(alc, file = "alc.csv")
#check that everything is OK
read.table("alc.csv", sep=",", header=T)
summary(read.table("alc.csv", sep=",", header=T))
|
#!/usr/bin/env Rscript
## Plotting permutations as circle plots.
args = commandArgs(trailingOnly=TRUE)
library(tidyverse)
library(forcats)
library(ggrepel)
library(wesanderson)
library(rstatix)
outdir <- "plots"
indir <- "../../data_release/"
outroot<- "Fig_LMER"
breednames <- as_tibble(read.csv(paste(indir,"/ReferenceData_breeds.csv",sep=""))) %>% rename(breed=breed_name)
breednames <- breednames %>% mutate(breed_name_short=if_else(is.na(breed_name_short),breed,breed_name_short))
permIn <- paste(indir,"DarwinsArk_20191115_survey_permutations.csv",sep="")
LMERIn <- paste(indir,"DarwinsArk_20191115_LMER_models.csv",sep="")
facIn <- paste(indir,"DarwinsArk_20191115_factors.csv",sep="")
quesIn <- paste(indir,"DarwinsArk_20191115_questions.csv",sep="")
herIn <- paste(indir,"DarwinsArk_20191115_heritability_all-SNPs.csv",sep="")
validIn <- paste(indir,"ReferenceData_breed_standards_byQuestion.csv",sep="")
sizeValidIn <- paste(indir,"ReferenceData_breed_stature.csv",sep="")
d <- as_tibble(read.csv(LMERIn,header=T))
d <- d %>% mutate(idtype=if_else(str_detect(type,"factor"),"factor","question"))
d <- d %>% mutate(qf=if_else(idtype=="question",paste("Q",id,sep=""),paste("F",id,sep="")))
d <- d %>% mutate(breed=str_replace_all(breed,"\\_"," "))
d <- breednames %>% select(breed,breed_name_short) %>% distinct() %>% inner_join(d)
# get titles etc for labelling plots
facInfo <- as_tibble(read.csv(facIn)) %>% mutate(idtype="factor")
facInfo <- facInfo %>% select(idtype,factor,name,negative,positive) %>% distinct() %>% rename(id=factor,string=name)
facInfo <- facInfo %>% filter(id <= 8) %>% mutate(plotname=paste(string," (",negative," to ",positive,")",sep="")) %>% mutate(plotname=str_replace(plotname,"Factor ","F")) #%>% mutate(ylabel=paste(string," (",negative," to ",positive,")",sep=""),ylabel_long=paste(plotname," (",negative," to ",positive,")",sep=""))
facInfo <- facInfo %>% select(idtype,id,plotname,string,negative,positive)
facInfo <- facInfo %>% mutate(string=str_replace(string,"Factor ","F"))
facInfo <- facInfo %>% mutate(qf=paste("F",id,sep=""))
facInfo <- facInfo %>% mutate(question.type="factor")
quesInfo <- as_tibble(read.csv(quesIn,header=T)) %>% mutate(idtype="question")
quesInfo <- quesInfo %>% select(idtype,id,string,abbr_text,negative,positive,question.type) %>% distinct()
quesInfo <- quesInfo %>% mutate(plotname=paste("Q",id,": ",abbr_text," (",negative," to ",positive,")",sep="")) #%>% mutate(ylabel=paste(plotname_order,": ",abbr_text," (",negative," to ",positive,")",sep=""),ylabel_long=paste(plotname," (",negative," to ",positive,")",sep=""))
quesInfo <- quesInfo %>% mutate(string=paste("Q",id,": ",abbr_text,sep="")) #%>% mutate(ylabel=paste(plotname_order,": ",abbr_text," (",negative," to ",positive,")",sep=""),ylabel_long=paste(plotname," (",negative," to ",positive,")",sep=""))
quesInfo <- quesInfo %>% select(idtype,id,plotname,question.type,string,negative,positive) %>% mutate(question.type=str_replace(question.type,"aging related surveys","other behavior"))
quesInfo <- quesInfo %>% mutate(qf=paste("Q",id,sep=""))
plotinfo <- facInfo %>% bind_rows(quesInfo) %>% mutate(id=qf)
d <- plotinfo %>% select(qf,question.type,plotname) %>% distinct() %>% right_join(d)
# get list of breeds with and without PPS results
new <- as_tibble(read.csv(permIn,header=T)) %>% filter(str_detect(type,"breed")) %>% select(set,id,idtype) %>% distinct() %>% rename(breed=set) %>% mutate(breedtype="surveys")
new <- new %>% mutate(breed=str_replace_all(breed,"\\_"," ")) %>% filter(breed %in% breednames$breed)
new <- d %>% select(id,idtype,breed) %>% distinct() %>% left_join(new) %>% replace_na(list(breedtype="new"))
d <- new %>% right_join(d)
d <- d %>% mutate(breedtype=if_else(str_detect(breed,"cocker"),"surveys",breedtype))
d <- d %>% group_by(qf) %>% summarize(minp=min(ML.anova.p.adj_benjhoch_FDR)) %>% inner_join(d)
d <- d %>% mutate(page=if_else(question.type!="other behavior","pg1",if_else(minp<0.05,"pg2","insig")))
pd <- d
pd <- pd %>% mutate(sig=if_else(ML.anova.p.adj_benjhoch_FDR<=0.05,TRUE,FALSE))
#pd <- pd %>% mutate(colorset=if_else(sig,paste("sig",breedtype),paste("nonsig",breedtype)))
pdtext <- pd %>% filter(sig) %>% mutate(breed_name_short=if_else(breedtype=="new",paste(breed_name_short,"*",sep=""),breed_name_short))
#order <- cor %>% filter(qf %in% pd$qf) %>% group_by(idtype,plotname,qf) %>% summarize(correlation=max(correlation)) %>% arrange(idtype,desc(correlation)) %>% pull(plotname)
order <- pd %>% select(idtype,id,plotname) %>% distinct() %>% arrange(idtype,id) %>% pull(plotname)
pd$question.type <- factor(pd$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
pdtext$question.type <- factor(pdtext$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
pd$plotname <- factor(pd$plotname,levels=order)
pdtext$plotname <- factor(pdtext$plotname,levels=order)
p <- ggplot(pd,aes(x=REML.t.val,y=plotname))
p <- p + geom_vline(xintercept=0,color="grey50",size=0.2)
p <- p + geom_point(aes(color=sig),size=0.75,shape=16,alpha=0.5)
p <- p + geom_text_repel(aes(label=breed_name_short),color="#A41E22",direction="both",max.overlaps=50,segment.size=0.1,size=1.25,min.segment.length = 0,data=pdtext)
p <- p + theme_minimal()
p <- p + facet_grid(question.type~.,scales="free_y",space="free_y")
p <- p + scale_color_manual(values=c("#000000","#a50f15"))
#p <- p + scale_alpha_manual(values=c(0.5,1))
p <- p + scale_x_continuous("What the mutts say - does breed ancestry influences trait? (REML.t.val)")
p <- p + scale_y_discrete(limits=rev)
#p <- p + ggtitle(title)
p <- p + theme(legend.position="none",strip.text=element_text(size=6,face="bold"),
plot.title=element_text(size=6,face="bold"),panel.grid.minor=element_blank(),
plot.subtitle=element_text(size=5),
axis.title.y = element_blank(),
axis.text.x = element_text(hjust=0.5,size=5),
axis.text.y = element_text(hjust=1,size=5),
axis.title.x=element_text(hjust=0.5,size=6,face="bold"),
strip.text.x=element_text(hjust=0.5,size=6))
nrow <- length(unique(pd$plotname))
ggsave(plot=p,filename=paste(outroot,"_all.pdf",sep=""),limitsize = FALSE,width=6.5,height=nrow*0.5)
##nrow <- length(unique((pdsig %>% filter(type!="physical trait"))$plotname))
for (inPage in unique(d$page)){
if (inPage != "insig"){
title <- "all other questions; only those with significant results"
if (inPage=="pg1"){
title <- "factors, physical traits, and motor patterns; all results"
}
pd <- d %>% filter(page==inPage)
pd <- pd %>% mutate(sig=if_else(ML.anova.p.adj_benjhoch_FDR<=0.05,TRUE,FALSE))
pd <- pd %>% mutate(colorset=if_else(sig,paste("sig",breedtype),paste("nonsig",breedtype)))
pdtext <- pd %>% filter(sig) %>% mutate(breed_name_short=if_else(breedtype=="new",paste(breed_name_short,"*",sep=""),breed_name_short))
#order <- cor %>% filter(qf %in% pd$qf) %>% group_by(idtype,plotname,qf) %>% summarize(correlation=max(correlation)) %>% arrange(idtype,desc(correlation)) %>% pull(plotname)
order <- pd %>% select(idtype,id,plotname) %>% distinct() %>% arrange(idtype,id) %>% pull(plotname)
pd$question.type <- factor(pd$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
pdtext$question.type <- factor(pdtext$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
pd$plotname <- factor(pd$plotname,levels=order)
pdtext$plotname <- factor(pdtext$plotname,levels=order)
p <- ggplot(pd,aes(x=REML.t.val,y=plotname))
p <- p + geom_vline(xintercept=0,color="grey50",size=0.2)
p <- p + geom_point(aes(color=sig),size=0.75,shape=16,alpha=0.5)
p <- p + geom_text_repel(aes(label=breed_name_short),color="#A41E22",direction="both",max.overlaps=50,segment.size=0.1,size=1.25,min.segment.length = 0,data=pdtext)
p <- p + theme_minimal()
p <- p + facet_grid(question.type~.,scales="free_y",space="free_y")
p <- p + scale_color_manual(values=c("#000000","#a50f15"))
#p <- p + scale_alpha_manual(values=c(0.5,1))
p <- p + scale_x_continuous("What the mutts say - does breed ancestry influences trait? (REML.t.val)")
p <- p + scale_y_discrete(limits=rev)
p <- p + ggtitle(title)
p <- p + theme(legend.position="none",strip.text=element_text(size=6,face="bold"),
plot.title=element_text(size=6,face="bold"),panel.grid.minor=element_blank(),
plot.subtitle=element_text(size=5),
#panel.grid.major.x=element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_text(hjust=0.5,size=5),
axis.text.y = element_text(hjust=1,size=5),
axis.title.x=element_text(hjust=0.5,size=6,face="bold"),
#legend.title = element_blank(),
#legend.text=element_text(hjust=0.5,size=6),
#legend.key.size = unit(0.1, 'in'),
strip.text.x=element_text(hjust=0.5,size=6))
nrow <- length(unique(pd$plotname))
ggsave(plot=p,filename=paste(outroot,".LMER_only.",inPage,".pdf",sep=""),limitsize = FALSE,width=6.5,height=9)
}
}
her <- as_tibble(read.csv(herIn,header=T,na.strings=c("NA","#N/A",""," ","N/A")))
her <- her %>% mutate(idtype=if_else(str_detect(class,"factor"),"factor","question")) %>% rename(id=index)
her <- her %>% filter(type=="Variance"&set=="all dogs") %>% filter(trait!="Size (Tiny)"&trait!="Size (Giant)")
her <- her %>% select(id,idtype,h2SNP)
d <- her %>% inner_join(d)
perm <- as_tibble(read.csv(permIn)) %>% select(-ndogs) %>% rename(cand_or_conf=type)
perm <- perm %>% rename(breed=set) %>% mutate(breed=str_replace_all(breed,"\\_"," "))
perm <- perm %>% filter(breed %in% breednames$breed)
pd <- perm %>% inner_join(d)
pd <- plotinfo %>% select(qf,question.type) %>% distinct() %>% right_join(pd)
perm <- as_tibble(read.csv(permIn)) %>% filter(type=="candidate_breed") %>% filter(ndogs==25) %>% select(-type,-ndogs)
perm <- perm %>% rename(breed=set) %>% mutate(breed=str_replace_all(breed,"\\_"," "))
perm <- perm %>% filter(breed %in% breednames$breed)
pd <- perm %>% inner_join(d)
## MAKE VALIDATION TABLES
pd <- pd %>% mutate(qtype2=if_else(question.type=="physical trait",question.type,"behavior"))
valid <- pd %>% select(qf,qtype2,id,idtype,breed,z,p,pcorr,REML.t.val,ML.anova.p.adj_benjhoch_FDR) %>% filter(ML.anova.p.adj_benjhoch_FDR<0.05&(p<0.025|p>0.975)) %>% filter((REML.t.val<0&z<0)|(REML.t.val>0&z>0)) %>% group_by(qtype2) %>% count() %>% rename(nmatch=n)
valid <- pd %>% select(qf,qtype2,id,idtype,breed,z,p,pcorr,REML.t.val,ML.anova.p.adj_benjhoch_FDR) %>% filter(ML.anova.p.adj_benjhoch_FDR<0.05&(p<0.025|p>0.975)) %>% group_by(qtype2) %>% count() %>% rename(ntot=n) %>% full_join(valid)
valid <- valid %>% mutate(frac=nmatch/ntot)
print(valid)
breedsizes <- as_tibble(read.csv(sizeValidIn,header=T) %>% mutate(breed=tolower(breed)))
breedsizes <- d %>% filter(id==121) %>% inner_join(breedsizes)
cor.test(breedsizes$height.cm,breedsizes$REML.t.val)
breedsizes <- breedsizes %>% filter(ML.anova.p.adj_benjhoch_FDR<0.05)
cor.test(breedsizes$height.cm,breedsizes$REML.t.val)
standards <- as_tibble(read.csv(validIn,header=T,na.strings=c("NA","","#N/A","na")))
standards <- d %>% inner_join(standards) %>% filter(ML.anova.p.adj_benjhoch_FDR<=0.05)
standards <- quesInfo %>% select(idtype,id,string,negative,positive) %>% right_join(standards)
standards <- standards %>% mutate(prediction=if_else(REML.t.val<0,negative,positive))
standards <- standards %>% select(breed,idtype,id,plotname,REML.t.val,ML.anova.p.adj_benjhoch_FDR,direction.of.change,prediction)
valid2 <- standards %>% group_by(plotname) %>% count() %>% rename(ntot=n)
valid2 <- standards %>% filter(direction.of.change==prediction) %>% group_by(plotname) %>% count() %>% rename(nmatch=n) %>% full_join(valid2)
valid2 <- standards %>% filter(direction.of.change=="other") %>% group_by(plotname) %>% count() %>% rename(nother=n) %>% full_join(valid2)
valid2 <- standards %>% group_by(plotname,direction.of.change) %>% count() %>% mutate(string=paste(n,direction.of.change)) %>% group_by(plotname) %>% summarize(string=paste(string,collapse="; ")) %>% full_join(valid2)
valid2 <- valid2 %>% replace_na(list(nother=0))
valid2 <- valid2 %>% summarize(ntot=sum(ntot),nother=sum(nother),nmatch=sum(nmatch)) %>% mutate(plotname="all") %>% bind_rows(valid2)
valid2 <- valid2 %>% mutate(percent.correct=nmatch/(ntot-nother))
print(valid2)
pd <- plotinfo %>% select(qf,question.type) %>% distinct() %>% right_join(pd)
cor <- pd %>% group_by(plotname,idtype,qf,question.type) %>% summarize(n=n(),correlation = cor(z,REML.t.val,method="pearson"),p = cor.test(z,REML.t.val,method="pearson")$p.value,ciL=cor.test(z,REML.t.val,method="pearson")$conf.int[1],ciH=cor.test(z,REML.t.val,method="pearson")$conf.int[2])
cor <- cor %>% adjust_pvalue(method="BH") %>% add_significance("p.adj")
cor <- cor %>% mutate(sig=if_else(p.adj<=0.05,TRUE,FALSE))
cor <- as_tibble(cor)
xlabels <- pd %>% group_by(qf) %>% count()
xlabels <- plotinfo %>% select(qf,string ) %>% distinct() %>% inner_join(xlabels)
xlabels <- xlabels %>% mutate(label=paste(string," (N=",n,")",sep=""))
sigpd <- cor %>% filter(p.adj<=0.05) %>% select(question.type,idtype,qf,ciH,correlation,p.adj,p.adj.signif,sig) %>% mutate(xpos=ciH+0.02)
sigpd <- sigpd %>% mutate(pstr=paste(p.adj.signif," p=",format(p.adj,digits=2),sep=""))
cor <- cor %>% mutate(sig=if_else(p.adj<0.05,TRUE,FALSE))
limits = c(min(cor$ciL)-0.01,1.2)
order <- cor %>% group_by(idtype,qf) %>% summarize(correlation=max(correlation)) %>% arrange(idtype,correlation) %>% pull(qf)
#order <- cor %>% group_by(idtype,qf) %>% summarize(her=max(her)) %>% arrange(idtype,her) %>% pull(qf)
cor$qf <- factor(cor$qf,levels=order)
sigpd$qf <- factor(sigpd$qf,levels=order)
cor$question.type <- factor(cor$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
sigpd$question.type <- factor(sigpd$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
p <- ggplot(cor,aes(y=qf,x=correlation))
p <- p + geom_vline(xintercept = 0,color="grey40",size=0.2)
p <- p + geom_point(aes(color=sig),size=1,shape=16)
p <- p + geom_segment(aes(color=sig,yend=qf,x=ciL,xend=ciH),size=0.2)
p <- p + geom_text(aes(y=qf,x=xpos,label=pstr),size=1.25,data=sigpd,vjust=0.5,hjust=0) # $color=sig,hjust=0.05))
p <- p + scale_color_manual(values=c("#878787","#b2182b"))
p <- p + scale_y_discrete("",breaks=xlabels$qf,labels=xlabels$label)
p <- p + scale_x_continuous("correlation between survey permutation z and LMMR t",limits=limits,breaks=c(-2:2)/2)
p <- p + facet_grid(question.type~.,scales="free_y",space="free_y")
p <- p + theme_minimal()
p <- p + theme(legend.position="none",strip.text=element_text(size=3,face="bold"),
plot.title=element_text(size=8,face="bold"),panel.grid.minor=element_blank(),
plot.subtitle=element_text(size=5),axis.text.y = element_text(hjust=1,size=4.5),
#panel.grid.major.x=element_blank(),
axis.title.y = element_text(hjust=0.5,size=5,face="bold"),
axis.text.x = element_text(hjust=0.5,size=5),
axis.title.x=element_text(hjust=0.5,size=5,face="bold"),
legend.title = element_blank(),
legend.text=element_text(hjust=0.5,size=5),
legend.key.size = unit(0.1, 'in'))
ggsave(plot=p,filename=paste(outroot,".cor.pdf",sep=""),width=3.5,height=16)
|
/plot/plot_LMER.R
|
no_license
|
DarwinsArk/muttomics
|
R
| false
| false
| 15,670
|
r
|
#!/usr/bin/env Rscript
## Plotting permutations as circle plots.
args = commandArgs(trailingOnly=TRUE)
library(tidyverse)
library(forcats)
library(ggrepel)
library(wesanderson)
library(rstatix)
outdir <- "plots"
indir <- "../../data_release/"
outroot<- "Fig_LMER"
breednames <- as_tibble(read.csv(paste(indir,"/ReferenceData_breeds.csv",sep=""))) %>% rename(breed=breed_name)
breednames <- breednames %>% mutate(breed_name_short=if_else(is.na(breed_name_short),breed,breed_name_short))
permIn <- paste(indir,"DarwinsArk_20191115_survey_permutations.csv",sep="")
LMERIn <- paste(indir,"DarwinsArk_20191115_LMER_models.csv",sep="")
facIn <- paste(indir,"DarwinsArk_20191115_factors.csv",sep="")
quesIn <- paste(indir,"DarwinsArk_20191115_questions.csv",sep="")
herIn <- paste(indir,"DarwinsArk_20191115_heritability_all-SNPs.csv",sep="")
validIn <- paste(indir,"ReferenceData_breed_standards_byQuestion.csv",sep="")
sizeValidIn <- paste(indir,"ReferenceData_breed_stature.csv",sep="")
d <- as_tibble(read.csv(LMERIn,header=T))
d <- d %>% mutate(idtype=if_else(str_detect(type,"factor"),"factor","question"))
d <- d %>% mutate(qf=if_else(idtype=="question",paste("Q",id,sep=""),paste("F",id,sep="")))
d <- d %>% mutate(breed=str_replace_all(breed,"\\_"," "))
d <- breednames %>% select(breed,breed_name_short) %>% distinct() %>% inner_join(d)
# get titles etc for labelling plots
facInfo <- as_tibble(read.csv(facIn)) %>% mutate(idtype="factor")
facInfo <- facInfo %>% select(idtype,factor,name,negative,positive) %>% distinct() %>% rename(id=factor,string=name)
facInfo <- facInfo %>% filter(id <= 8) %>% mutate(plotname=paste(string," (",negative," to ",positive,")",sep="")) %>% mutate(plotname=str_replace(plotname,"Factor ","F")) #%>% mutate(ylabel=paste(string," (",negative," to ",positive,")",sep=""),ylabel_long=paste(plotname," (",negative," to ",positive,")",sep=""))
facInfo <- facInfo %>% select(idtype,id,plotname,string,negative,positive)
facInfo <- facInfo %>% mutate(string=str_replace(string,"Factor ","F"))
facInfo <- facInfo %>% mutate(qf=paste("F",id,sep=""))
facInfo <- facInfo %>% mutate(question.type="factor")
quesInfo <- as_tibble(read.csv(quesIn,header=T)) %>% mutate(idtype="question")
quesInfo <- quesInfo %>% select(idtype,id,string,abbr_text,negative,positive,question.type) %>% distinct()
quesInfo <- quesInfo %>% mutate(plotname=paste("Q",id,": ",abbr_text," (",negative," to ",positive,")",sep="")) #%>% mutate(ylabel=paste(plotname_order,": ",abbr_text," (",negative," to ",positive,")",sep=""),ylabel_long=paste(plotname," (",negative," to ",positive,")",sep=""))
quesInfo <- quesInfo %>% mutate(string=paste("Q",id,": ",abbr_text,sep="")) #%>% mutate(ylabel=paste(plotname_order,": ",abbr_text," (",negative," to ",positive,")",sep=""),ylabel_long=paste(plotname," (",negative," to ",positive,")",sep=""))
quesInfo <- quesInfo %>% select(idtype,id,plotname,question.type,string,negative,positive) %>% mutate(question.type=str_replace(question.type,"aging related surveys","other behavior"))
quesInfo <- quesInfo %>% mutate(qf=paste("Q",id,sep=""))
plotinfo <- facInfo %>% bind_rows(quesInfo) %>% mutate(id=qf)
d <- plotinfo %>% select(qf,question.type,plotname) %>% distinct() %>% right_join(d)
# get list of breeds with and without PPS results
new <- as_tibble(read.csv(permIn,header=T)) %>% filter(str_detect(type,"breed")) %>% select(set,id,idtype) %>% distinct() %>% rename(breed=set) %>% mutate(breedtype="surveys")
new <- new %>% mutate(breed=str_replace_all(breed,"\\_"," ")) %>% filter(breed %in% breednames$breed)
new <- d %>% select(id,idtype,breed) %>% distinct() %>% left_join(new) %>% replace_na(list(breedtype="new"))
d <- new %>% right_join(d)
d <- d %>% mutate(breedtype=if_else(str_detect(breed,"cocker"),"surveys",breedtype))
d <- d %>% group_by(qf) %>% summarize(minp=min(ML.anova.p.adj_benjhoch_FDR)) %>% inner_join(d)
d <- d %>% mutate(page=if_else(question.type!="other behavior","pg1",if_else(minp<0.05,"pg2","insig")))
pd <- d
pd <- pd %>% mutate(sig=if_else(ML.anova.p.adj_benjhoch_FDR<=0.05,TRUE,FALSE))
#pd <- pd %>% mutate(colorset=if_else(sig,paste("sig",breedtype),paste("nonsig",breedtype)))
pdtext <- pd %>% filter(sig) %>% mutate(breed_name_short=if_else(breedtype=="new",paste(breed_name_short,"*",sep=""),breed_name_short))
#order <- cor %>% filter(qf %in% pd$qf) %>% group_by(idtype,plotname,qf) %>% summarize(correlation=max(correlation)) %>% arrange(idtype,desc(correlation)) %>% pull(plotname)
order <- pd %>% select(idtype,id,plotname) %>% distinct() %>% arrange(idtype,id) %>% pull(plotname)
pd$question.type <- factor(pd$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
pdtext$question.type <- factor(pdtext$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
pd$plotname <- factor(pd$plotname,levels=order)
pdtext$plotname <- factor(pdtext$plotname,levels=order)
p <- ggplot(pd,aes(x=REML.t.val,y=plotname))
p <- p + geom_vline(xintercept=0,color="grey50",size=0.2)
p <- p + geom_point(aes(color=sig),size=0.75,shape=16,alpha=0.5)
p <- p + geom_text_repel(aes(label=breed_name_short),color="#A41E22",direction="both",max.overlaps=50,segment.size=0.1,size=1.25,min.segment.length = 0,data=pdtext)
p <- p + theme_minimal()
p <- p + facet_grid(question.type~.,scales="free_y",space="free_y")
p <- p + scale_color_manual(values=c("#000000","#a50f15"))
#p <- p + scale_alpha_manual(values=c(0.5,1))
p <- p + scale_x_continuous("What the mutts say - does breed ancestry influences trait? (REML.t.val)")
p <- p + scale_y_discrete(limits=rev)
#p <- p + ggtitle(title)
p <- p + theme(legend.position="none",strip.text=element_text(size=6,face="bold"),
plot.title=element_text(size=6,face="bold"),panel.grid.minor=element_blank(),
plot.subtitle=element_text(size=5),
axis.title.y = element_blank(),
axis.text.x = element_text(hjust=0.5,size=5),
axis.text.y = element_text(hjust=1,size=5),
axis.title.x=element_text(hjust=0.5,size=6,face="bold"),
strip.text.x=element_text(hjust=0.5,size=6))
nrow <- length(unique(pd$plotname))
ggsave(plot=p,filename=paste(outroot,"_all.pdf",sep=""),limitsize = FALSE,width=6.5,height=nrow*0.5)
##nrow <- length(unique((pdsig %>% filter(type!="physical trait"))$plotname))
for (inPage in unique(d$page)){
if (inPage != "insig"){
title <- "all other questions; only those with significant results"
if (inPage=="pg1"){
title <- "factors, physical traits, and motor patterns; all results"
}
pd <- d %>% filter(page==inPage)
pd <- pd %>% mutate(sig=if_else(ML.anova.p.adj_benjhoch_FDR<=0.05,TRUE,FALSE))
pd <- pd %>% mutate(colorset=if_else(sig,paste("sig",breedtype),paste("nonsig",breedtype)))
pdtext <- pd %>% filter(sig) %>% mutate(breed_name_short=if_else(breedtype=="new",paste(breed_name_short,"*",sep=""),breed_name_short))
#order <- cor %>% filter(qf %in% pd$qf) %>% group_by(idtype,plotname,qf) %>% summarize(correlation=max(correlation)) %>% arrange(idtype,desc(correlation)) %>% pull(plotname)
order <- pd %>% select(idtype,id,plotname) %>% distinct() %>% arrange(idtype,id) %>% pull(plotname)
pd$question.type <- factor(pd$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
pdtext$question.type <- factor(pdtext$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
pd$plotname <- factor(pd$plotname,levels=order)
pdtext$plotname <- factor(pdtext$plotname,levels=order)
p <- ggplot(pd,aes(x=REML.t.val,y=plotname))
p <- p + geom_vline(xintercept=0,color="grey50",size=0.2)
p <- p + geom_point(aes(color=sig),size=0.75,shape=16,alpha=0.5)
p <- p + geom_text_repel(aes(label=breed_name_short),color="#A41E22",direction="both",max.overlaps=50,segment.size=0.1,size=1.25,min.segment.length = 0,data=pdtext)
p <- p + theme_minimal()
p <- p + facet_grid(question.type~.,scales="free_y",space="free_y")
p <- p + scale_color_manual(values=c("#000000","#a50f15"))
#p <- p + scale_alpha_manual(values=c(0.5,1))
p <- p + scale_x_continuous("What the mutts say - does breed ancestry influences trait? (REML.t.val)")
p <- p + scale_y_discrete(limits=rev)
p <- p + ggtitle(title)
p <- p + theme(legend.position="none",strip.text=element_text(size=6,face="bold"),
plot.title=element_text(size=6,face="bold"),panel.grid.minor=element_blank(),
plot.subtitle=element_text(size=5),
#panel.grid.major.x=element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_text(hjust=0.5,size=5),
axis.text.y = element_text(hjust=1,size=5),
axis.title.x=element_text(hjust=0.5,size=6,face="bold"),
#legend.title = element_blank(),
#legend.text=element_text(hjust=0.5,size=6),
#legend.key.size = unit(0.1, 'in'),
strip.text.x=element_text(hjust=0.5,size=6))
nrow <- length(unique(pd$plotname))
ggsave(plot=p,filename=paste(outroot,".LMER_only.",inPage,".pdf",sep=""),limitsize = FALSE,width=6.5,height=9)
}
}
her <- as_tibble(read.csv(herIn,header=T,na.strings=c("NA","#N/A",""," ","N/A")))
her <- her %>% mutate(idtype=if_else(str_detect(class,"factor"),"factor","question")) %>% rename(id=index)
her <- her %>% filter(type=="Variance"&set=="all dogs") %>% filter(trait!="Size (Tiny)"&trait!="Size (Giant)")
her <- her %>% select(id,idtype,h2SNP)
d <- her %>% inner_join(d)
perm <- as_tibble(read.csv(permIn)) %>% select(-ndogs) %>% rename(cand_or_conf=type)
perm <- perm %>% rename(breed=set) %>% mutate(breed=str_replace_all(breed,"\\_"," "))
perm <- perm %>% filter(breed %in% breednames$breed)
pd <- perm %>% inner_join(d)
pd <- plotinfo %>% select(qf,question.type) %>% distinct() %>% right_join(pd)
perm <- as_tibble(read.csv(permIn)) %>% filter(type=="candidate_breed") %>% filter(ndogs==25) %>% select(-type,-ndogs)
perm <- perm %>% rename(breed=set) %>% mutate(breed=str_replace_all(breed,"\\_"," "))
perm <- perm %>% filter(breed %in% breednames$breed)
pd <- perm %>% inner_join(d)
## MAKE VALIDATION TABLES
pd <- pd %>% mutate(qtype2=if_else(question.type=="physical trait",question.type,"behavior"))
valid <- pd %>% select(qf,qtype2,id,idtype,breed,z,p,pcorr,REML.t.val,ML.anova.p.adj_benjhoch_FDR) %>% filter(ML.anova.p.adj_benjhoch_FDR<0.05&(p<0.025|p>0.975)) %>% filter((REML.t.val<0&z<0)|(REML.t.val>0&z>0)) %>% group_by(qtype2) %>% count() %>% rename(nmatch=n)
valid <- pd %>% select(qf,qtype2,id,idtype,breed,z,p,pcorr,REML.t.val,ML.anova.p.adj_benjhoch_FDR) %>% filter(ML.anova.p.adj_benjhoch_FDR<0.05&(p<0.025|p>0.975)) %>% group_by(qtype2) %>% count() %>% rename(ntot=n) %>% full_join(valid)
valid <- valid %>% mutate(frac=nmatch/ntot)
print(valid)
breedsizes <- as_tibble(read.csv(sizeValidIn,header=T) %>% mutate(breed=tolower(breed)))
breedsizes <- d %>% filter(id==121) %>% inner_join(breedsizes)
cor.test(breedsizes$height.cm,breedsizes$REML.t.val)
breedsizes <- breedsizes %>% filter(ML.anova.p.adj_benjhoch_FDR<0.05)
cor.test(breedsizes$height.cm,breedsizes$REML.t.val)
standards <- as_tibble(read.csv(validIn,header=T,na.strings=c("NA","","#N/A","na")))
standards <- d %>% inner_join(standards) %>% filter(ML.anova.p.adj_benjhoch_FDR<=0.05)
standards <- quesInfo %>% select(idtype,id,string,negative,positive) %>% right_join(standards)
standards <- standards %>% mutate(prediction=if_else(REML.t.val<0,negative,positive))
standards <- standards %>% select(breed,idtype,id,plotname,REML.t.val,ML.anova.p.adj_benjhoch_FDR,direction.of.change,prediction)
valid2 <- standards %>% group_by(plotname) %>% count() %>% rename(ntot=n)
valid2 <- standards %>% filter(direction.of.change==prediction) %>% group_by(plotname) %>% count() %>% rename(nmatch=n) %>% full_join(valid2)
valid2 <- standards %>% filter(direction.of.change=="other") %>% group_by(plotname) %>% count() %>% rename(nother=n) %>% full_join(valid2)
valid2 <- standards %>% group_by(plotname,direction.of.change) %>% count() %>% mutate(string=paste(n,direction.of.change)) %>% group_by(plotname) %>% summarize(string=paste(string,collapse="; ")) %>% full_join(valid2)
valid2 <- valid2 %>% replace_na(list(nother=0))
valid2 <- valid2 %>% summarize(ntot=sum(ntot),nother=sum(nother),nmatch=sum(nmatch)) %>% mutate(plotname="all") %>% bind_rows(valid2)
valid2 <- valid2 %>% mutate(percent.correct=nmatch/(ntot-nother))
print(valid2)
pd <- plotinfo %>% select(qf,question.type) %>% distinct() %>% right_join(pd)
cor <- pd %>% group_by(plotname,idtype,qf,question.type) %>% summarize(n=n(),correlation = cor(z,REML.t.val,method="pearson"),p = cor.test(z,REML.t.val,method="pearson")$p.value,ciL=cor.test(z,REML.t.val,method="pearson")$conf.int[1],ciH=cor.test(z,REML.t.val,method="pearson")$conf.int[2])
cor <- cor %>% adjust_pvalue(method="BH") %>% add_significance("p.adj")
cor <- cor %>% mutate(sig=if_else(p.adj<=0.05,TRUE,FALSE))
cor <- as_tibble(cor)
xlabels <- pd %>% group_by(qf) %>% count()
xlabels <- plotinfo %>% select(qf,string ) %>% distinct() %>% inner_join(xlabels)
xlabels <- xlabels %>% mutate(label=paste(string," (N=",n,")",sep=""))
sigpd <- cor %>% filter(p.adj<=0.05) %>% select(question.type,idtype,qf,ciH,correlation,p.adj,p.adj.signif,sig) %>% mutate(xpos=ciH+0.02)
sigpd <- sigpd %>% mutate(pstr=paste(p.adj.signif," p=",format(p.adj,digits=2),sep=""))
cor <- cor %>% mutate(sig=if_else(p.adj<0.05,TRUE,FALSE))
limits = c(min(cor$ciL)-0.01,1.2)
order <- cor %>% group_by(idtype,qf) %>% summarize(correlation=max(correlation)) %>% arrange(idtype,correlation) %>% pull(qf)
#order <- cor %>% group_by(idtype,qf) %>% summarize(her=max(her)) %>% arrange(idtype,her) %>% pull(qf)
cor$qf <- factor(cor$qf,levels=order)
sigpd$qf <- factor(sigpd$qf,levels=order)
cor$question.type <- factor(cor$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
sigpd$question.type <- factor(sigpd$question.type,levels=c("factor","physical trait","physical trait related","motor pattern","other behavior"))
p <- ggplot(cor,aes(y=qf,x=correlation))
p <- p + geom_vline(xintercept = 0,color="grey40",size=0.2)
p <- p + geom_point(aes(color=sig),size=1,shape=16)
p <- p + geom_segment(aes(color=sig,yend=qf,x=ciL,xend=ciH),size=0.2)
p <- p + geom_text(aes(y=qf,x=xpos,label=pstr),size=1.25,data=sigpd,vjust=0.5,hjust=0) # $color=sig,hjust=0.05))
p <- p + scale_color_manual(values=c("#878787","#b2182b"))
p <- p + scale_y_discrete("",breaks=xlabels$qf,labels=xlabels$label)
p <- p + scale_x_continuous("correlation between survey permutation z and LMMR t",limits=limits,breaks=c(-2:2)/2)
p <- p + facet_grid(question.type~.,scales="free_y",space="free_y")
p <- p + theme_minimal()
p <- p + theme(legend.position="none",strip.text=element_text(size=3,face="bold"),
plot.title=element_text(size=8,face="bold"),panel.grid.minor=element_blank(),
plot.subtitle=element_text(size=5),axis.text.y = element_text(hjust=1,size=4.5),
#panel.grid.major.x=element_blank(),
axis.title.y = element_text(hjust=0.5,size=5,face="bold"),
axis.text.x = element_text(hjust=0.5,size=5),
axis.title.x=element_text(hjust=0.5,size=5,face="bold"),
legend.title = element_blank(),
legend.text=element_text(hjust=0.5,size=5),
legend.key.size = unit(0.1, 'in'))
ggsave(plot=p,filename=paste(outroot,".cor.pdf",sep=""),width=3.5,height=16)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dtable-bind_order_prune_subset.R
\name{dtable_subset}
\alias{dtable_subset}
\title{subset a dtable}
\usage{
dtable_subset(x, ..., all.attr = FALSE)
}
\arguments{
\item{x}{a dtable}
\item{...}{arguments passed to \code{subset}}
\item{all.attr}{keep more than just the essential attributes?}
}
\description{
select rows in a dtable
}
|
/man/dtable_subset.Rd
|
no_license
|
renlund/descripteur
|
R
| false
| true
| 412
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dtable-bind_order_prune_subset.R
\name{dtable_subset}
\alias{dtable_subset}
\title{subset a dtable}
\usage{
dtable_subset(x, ..., all.attr = FALSE)
}
\arguments{
\item{x}{a dtable}
\item{...}{arguments passed to \code{subset}}
\item{all.attr}{keep more than just the essential attributes?}
}
\description{
select rows in a dtable
}
|
library(featurefinder)
### Name: parseSplits
### Title: parseSplits
### Aliases: parseSplits
### Keywords: saveTree
### ** Examples
require(featurefinder)
data(examples)
parseSplits(treesAll[[1]][[2]])
|
/data/genthat_extracted_code/featurefinder/examples/parseSplits.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 210
|
r
|
library(featurefinder)
### Name: parseSplits
### Title: parseSplits
### Aliases: parseSplits
### Keywords: saveTree
### ** Examples
require(featurefinder)
data(examples)
parseSplits(treesAll[[1]][[2]])
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{fixed.intervals}
\alias{fixed.intervals}
\title{Fixed intervals}
\usage{
fixed.intervals(n, M)
}
\arguments{
\item{n}{a number of endpoints to choose from}
\item{M}{a number of intervals to generate}
}
\value{
a 2-column matrix with start (first column) and end (second column) points of an interval in each row
}
\description{
The function generates approximately \code{M} intervals with endpoints in \code{1},\code{2},...,\code{n}, without random drawing. This routine
can be used inside \code{\link{wbs}} function and is typically not called directly by the user.
}
\details{
Function finds the minimal \code{m} such that \eqn{M\leq \frac{m(m-1)}{2}}{\code{M} <= \code{m(m-1)/2}}.
Then it generates \code{m} approximately equally-spaced positive integers lower than \code{n} and returns all possible intervals consisting of any two of these points.
}
\examples{
fixed.intervals(10,100)
}
\seealso{
\code{\link{random.intervals}} \code{\link{wbs}}
}
|
/man/fixed.intervals.Rd
|
no_license
|
pra1981/wbs
|
R
| false
| false
| 1,013
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{fixed.intervals}
\alias{fixed.intervals}
\title{Fixed intervals}
\usage{
fixed.intervals(n, M)
}
\arguments{
\item{n}{a number of endpoints to choose from}
\item{M}{a number of intervals to generate}
}
\value{
a 2-column matrix with start (first column) and end (second column) points of an interval in each row
}
\description{
The function generates approximately \code{M} intervals with endpoints in \code{1},\code{2},...,\code{n}, without random drawing. This routine
can be used inside \code{\link{wbs}} function and is typically not called directly by the user.
}
\details{
Function finds the minimal \code{m} such that \eqn{M\leq \frac{m(m-1)}{2}}{\code{M} <= \code{m(m-1)/2}}.
Then it generates \code{m} approximately equally-spaced positive integers lower than \code{n} and returns all possible intervals consisting of any two of these points.
}
\examples{
fixed.intervals(10,100)
}
\seealso{
\code{\link{random.intervals}} \code{\link{wbs}}
}
|
summary_des <- function(J, type, alpha, beta, delta, ratio, Pi0, Pi1,
nCmax, equal, w, piO, efficacy, futility,
efficacy_type, efficacy_param, futility_type,
futility_param) {
if (J == 1) {
stage <- "one-stage"
} else {
stage <- "two-stage"
}
if (type == "barnard") {
dashes <- 57
design <- "Barnard's exact test"
} else if (type == "binomial") {
dashes <- 59
design <- "an exact binomial test"
} else if (type == "fisher") {
dashes <- 56
design <- "Fisher's exact test"
} else {
dashes <- 74
design <- "one-arm and two-arm testing decisions"
}
message(" ", rep("-", dashes))
message(" Design of a ", stage, " trial based on ", design)
message(" ", rep("-", dashes))
message("\n ---------------")
message(" Hypothesis test")
message(" ---------------")
message(" You have chosen to test the following hypothesis")
message(" H0 : piE <= piC")
message(" with the following type-I error constraint")
if (length(Pi0) == 1) {
message(" P(", Pi0, ",", Pi0, ") <= alpha = ", alpha)
} else {
message(" max_{pi in Pi0} P(pi,pi) <= alpha = ", alpha, ", Pi0 = [",
Pi0[1], ",", Pi0[2], "]")
}
message(" and the following type-II error constraint")
if (length(Pi1) == 1) {
message(" P(", Pi1, ",", Pi1 + delta, ") >= 1 - beta = ", 1 - beta)
} else {
message(" max_{pi in Pi1} P(pi,pi+delta) >= 1 - beta = ", 1 - beta,
", Pi1 = [", Pi1[1], ",", Pi1[2], "], delta = ", delta)
}
message("\n ------------")
message(" Restrictions")
message(" ------------")
message(" - You have chosen to limit the allowed maximal sample ",
"size in the control arm, nC,\n to: ", nCmax)
if (J == 1) {
message(" - The sample size in the experimental arm, nE, will be set",
" to: r x nC, with r = ", ratio)
} else {
if (equal) {
message(" - You have chosen to restrict the sample sizes in the",
" control arm in each stage, n1C\n and n2C, such that: n1C = ",
"n2C")
} else {
message(" - You have chosen to allow the sample sizes in the",
" control arm in each stage, n1C\n and n2C, to take different",
" values")
}
message(" - The sample sizes in the experimental arm in each ",
"stage, n1E and n2E, will be se\n to: r x n1C and r x n2C ",
"respectively, with r = ", ratio)
if (type == "fisher") {
if (efficacy_type == 0) {
message(" - You have chosen to prevent early stopping for ",
"efficacy. Thus e1z1 = Inf, for all z1, in all",
"\n considered designs")
} else if (efficacy_type == 1) {
if (efficacy_param == -0.5) {
message(" - You have chosen to include early stopping for ",
"efficacy, with e1z1 = [0.5*(n1C + n1E)*delta]_* + 1, for ",
"all z1, in all considered designs")
} else {
message(" - You have chosen to include early stopping for ",
"efficacy, with e1z1 = ", efficacy_param, ", for all z1, in ",
"all considered designs.")
}
} else {
message(" - You have chosen to include early stopping for ",
"efficacy, with e1z1 chosen for each z1, in each considered ",
"design, to control the probability of committing a type-I ",
"error at the end of stage one to ", efficacy_param)
}
if (futility_type == 0) {
message(" - You have chosen to prevent early stopping for ",
"futility. Thus f1z1 = -Inf, for all z1, in all considered ",
"designs")
} else if (futility_type == 1) {
message(" - You have chosen to include early stopping for ",
"futility, with f1z1 = ", futility_param, ", for all z1, in ",
"all considered designs.")
} else {
message(" - You have chosen to include early stopping for ",
"futility, with f1z1 chosen for each z1, in each considered ",
"design, to control the probability of committing a type-II ",
"error at the end of stage one to ", futility_param)
}
} else {
if (futility) {
message(" - You have chosen to allow early stopping for ",
"futility")
} else {
if (type != "sat") {
message(" - You have chosen to prevent early stopping for ",
"futility. Thus f1 = -Inf in all considered designs")
} else {
message(" - You have chosen to prevent early stopping for ",
"futility. Thus fS1 = fT1 = -inf in all considered ",
"designs")
}
}
if (efficacy) {
message(" - You have chosen to allow early stopping for ",
"efficacy")
} else {
if (type != "sat") {
message(" - You have chosen to prevent early stopping for ",
"efficacy. Thus e1 = Inf in all considered\n designs")
} else {
message(" - You have chosen to prevent early stopping for ",
"efficacy. Thus eS1 = eT1 = Inf in all\n considered ",
"designs")
}
}
}
message("\n The design will be optimised for:")
message(" w1ESS(piO,piO) + w2ESS(piO,piO + delta) + w3max_pi ESS(pi,",
"pi)\n + w4max_{piC,piE} ESS(piC,piE) + w5max N")
message(" with:")
message(" w1 = ", w[1], ", w2 = ", w[2], ", w3 = ", w[3], ", w4 = ",
w[4], ", w5 = ", w[5])
message(" and piO = ", piO)
}
}
summary_opchar <- function(des, pi, k) {
if (des$J == 1) {
stage <- "one-stage"
} else {
stage <- "two-stage"
}
if (des$type == "barnard") {
design <- "barnard's exact test"
dashes <- 77
} else if (des$type == "binomial") {
design <- "an exact binomial test"
dashes <- 79
} else if (des$type == "fisher") {
design <- "Fisher's exact test"
dashes <- 76
} else if (des$type == "sat") {
design <- "one-arm and two-arm testing\n decisions"
dashes <- 84
}
message(" ", rep("-", dashes))
message(" Operating characteristics of a ", stage, " design based on ",
design)
message(" ", rep("-", dashes))
message("\n You have chosen to analytically determine the operating ",
"characteristics of a design with")
if (des$J == 1) {
message(" - n1C = ", des$nC)
message(" - n1E = ", des$nE)
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
}
} else if (des$J == 2) {
message(" - n1C = ", des$nC[1])
message(" - n2C = ", des$nC[2])
message(" - n1E = ", des$nE[1])
message(" - n2E = ", des$nE[2])
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
message(" - f1 = ", round(des$boundaries$f1, 3))
message(" - e2 = ", round(des$boundaries$e2, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
message(" - f10 = ", des$boundaries$f1[1], ", ..., f11 = ",
des$boundaries$f1[2], ", ..., f1", des$nC + des$nE, " = ",
des$boundaries$f1[des$nC + des$nE + 1])
message(" - e200 = ", des$boundaries$e2[1, 1], ", ..., ",
"e2", des$nC[1] + des$nE[1], des$nC[2] + des$nE[2], " = ",
des$boundaries$e2[des$nC[1] + des$nE[1], des$nC[2] + des$nE[2]])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
message(" - fS1 = ", des$boundaries$fS1)
message(" - fT1 = ", des$boundaries$fT1)
message(" - eS2 = ", des$boundaries$eS2)
message(" - eT2 = ", des$boundaries$eT2)
}
}
if (nrow(pi) == 1) {
message(" when pi = (", pi[1, 1], ", ", pi[1, 2], ")'.")
} else if (nrow(pi) == 2) {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', (", pi[2, 1],
", ", pi[2, 2], ")'}.")
} else {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', ..., (",
pi[nrow(pi), 1], ", ", pi[nrow(pi), 2], ")'}.")
}
}
summary_ph2rand_des <- function(x) {
J <- x$J
type <- x$type
alpha <- x$alpha
beta <- x$beta
delta <- x$delta
Pi0 <- x$Pi0
Pi1 <- x$Pi1
if (J == 1) {
stage <- "single-stage"
} else {
stage <- "two-stage"
}
if (type == "barnard") {
dashes <- 47
design <- "Barnard's exact test"
} else if (type == "binomial") {
dashes <- 49
design <- "an exact binomial test"
} else if (type == "fisher") {
dashes <- 46
design <- "Fisher's exact test"
} else {
dashes <- 57
design <- "one-arm and two-arm testing decisions"
}
message(" ", rep("-", dashes))
message(" A ", stage, " trial based on ", design)
message(" ", rep("-", dashes))
message("\n ---------------")
message(" Hypothesis test")
message(" ---------------")
message(" You have chosen to test the following hypothesis")
message(" H0 : piE <= piC")
message(" with the following type-I error constraint")
if (length(Pi0) == 1) {
message(" P(", Pi0, ",", Pi0, ") <= alpha = ", alpha)
} else {
message(" max_{pi in Pi0} P(pi,pi) <= alpha = ", alpha, ", Pi0 = [",
Pi0[1], ",", Pi0[2], "]")
}
message(" and the following type-II error constraint")
if (length(Pi1) == 1) {
message(" P(", Pi1, ",", Pi1 + delta, ") >= 1 - beta = ", 1 - beta)
} else {
message(" max_{pi in Pi1} P(pi,pi+delta) >= 1 - beta = ", 1 - beta,
", Pi1 = [", Pi1[1], ",", Pi1[2], "], delta = ", delta)
}
message("\n -----------------")
message(" Design parameters")
message(" -----------------")
message(" The design has:")
if (x$J == 1) {
message(" - n1C = ", x$nC)
message(" - n1E = ", x$nE)
if (x$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(x$boundaries$e1, 3))
} else if (x$type == "fisher") {
message(" - e10 = ", x$boundaries$e1[1], ", ..., e11 = ",
x$boundaries$e1[2], ", ..., e1", x$nC + x$nE, " = ",
x$boundaries$e1[x$nC + x$nE + 1])
} else if (x$type == "sat") {
message(" - eS1 = ", x$boundaries$eS1)
message(" - eT1 = ", x$boundaries$eT1)
}
} else if (x$J == 2) {
message(" - n1C = ", x$nC[1])
message(" - n2C = ", x$nC[2])
message(" - n1E = ", x$nE[1])
message(" - n2E = ", x$nE[2])
if (x$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(x$boundaries$e1, 3))
message(" - f1 = ", round(x$boundaries$f1, 3))
message(" - e2 = ", round(x$boundaries$e2, 3))
} else if (x$type == "fisher") {
message(" - e10 = ", x$boundaries$e1[1], ", ..., e11 = ",
x$boundaries$e1[2], ", ..., e1", x$nC + x$nE, " = ",
x$boundaries$e1[x$nC + x$nE + 1])
message(" - f10 = ", x$boundaries$f1[1], ", ..., f11 = ",
x$boundaries$f1[2], ", ..., f1", x$nC + x$nE, " = ",
x$boundaries$f1[x$nC + x$nE + 1])
message(" - e200 = ", x$boundaries$e2[1, 1], ", ..., ",
"e2", x$nC[1] + x$nE[1], x$nC[2] + x$nE[2], " = ",
x$boundaries$e2[x$nC[1] + x$nE[1], x$nC[2] + x$nE[2]])
} else if (x$type == "sat") {
message(" - eS1 = ", x$boundaries$eS1)
message(" - eT1 = ", x$boundaries$eT1)
message(" - fS1 = ", x$boundaries$fS1)
message(" - fT1 = ", x$boundaries$fT1)
message(" - eS2 = ", x$boundaries$eS2)
message(" - eT2 = ", x$boundaries$eT2)
}
}
message("\n -------------------------")
message(" Operating Characteristics")
message(" -------------------------")
message(" Key operating characteristics include")
if (x$J == 1) {
print(x$opchar)
} else {
print(x$opchar[, -(11:13)])
}
}
summary_pmf <- function(des, pi, k) {
if (des$J == 1) {
stage <- "one-stage"
} else {
stage <- "two-stage"
}
if (des$type == "barnard") {
design <- "barnard's exact test"
dashes <- 55
} else if (des$type == "binomial") {
design <- "an exact binomial test"
dashes <- 57
} else if (des$type == "fisher") {
design <- "Fisher's exact test"
dashes <- 54
} else if (des$type == "sat") {
design <- "single-arm and two-arm testing\n decisions"
dashes <- 65
}
message(" ", rep("-", dashes))
message(" PMF of a ", stage, " design based on ", design)
message(" ", rep("-", dashes))
message("\n You have chosen to find the PMF of a design with")
if (des$J == 1) {
message(" - n1C = ", des$nC)
message(" - n1E = ", des$nE)
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
}
} else if (des$J == 2) {
message(" - n1C = ", des$nC[1])
message(" - n2C = ", des$nC[2])
message(" - n1E = ", des$nE[1])
message(" - n2E = ", des$nE[2])
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
message(" - f1 = ", round(des$boundaries$f1, 3))
message(" - e2 = ", round(des$boundaries$e2, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
message(" - f10 = ", des$boundaries$f1[1], ", ..., f11 = ",
des$boundaries$f1[2], ", ..., f1", des$nC + des$nE, " = ",
des$boundaries$f1[des$nC + des$nE + 1])
message(" - e200 = ", des$boundaries$e2[1, 1], ", ..., ",
"e2", des$nC[1] + des$nE[1], des$nC[2] + des$nE[2], " = ",
des$boundaries$e2[des$nC[1] + des$nE[1], des$nC[2] + des$nE[2]])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
message(" - fS1 = ", des$boundaries$fS1)
message(" - fT1 = ", des$boundaries$fT1)
message(" - eS2 = ", des$boundaries$eS2)
message(" - eT2 = ", des$boundaries$eT2)
}
}
if (nrow(pi) == 1) {
message(" when pi = (", pi[1, 1], ", ", pi[1, 2], ")'.")
} else if (nrow(pi) == 2) {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', (", pi[2, 1],
", ", pi[2, 2], ")'}.")
} else {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', ..., (",
pi[nrow(pi), 1], ", ", pi[nrow(pi), 2], ")'}.")
}
}
summary_sim <- function(des, pi, k, replicates) {
if (des$J == 1) {
stage <- "one-stage"
} else {
stage <- "two-stage"
}
if (des$type == "barnard") {
design <- "barnard's exact test"
dashes <- 77
} else if (des$type == "binomial") {
design <- "an exact binomial test"
dashes <- 79
} else if (des$type == "fisher") {
design <- "Fisher's exact test"
dashes <- 76
} else if (des$type == "sat") {
design <- "one-arm and two-arm testing\n decisions"
dashes <- 84
}
message(" ", rep("-", dashes))
message(" Operating characteristics of a ", stage, " design based on ",
design)
message(" ", rep("-", dashes))
message("\n You have chosen to estimate via simulation the operating ",
"characteristics of a design\n with")
if (des$J == 1) {
message(" - n1C = ", des$nC)
message(" - n1E = ", des$nE)
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
}
} else if (des$J == 2) {
message(" - n1C = ", des$nC[1])
message(" - n2C = ", des$nC[2])
message(" - n1E = ", des$nE[1])
message(" - n2E = ", des$nE[2])
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
message(" - f1 = ", round(des$boundaries$f1, 3))
message(" - e2 = ", round(des$boundaries$e2, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
message(" - f10 = ", des$boundaries$f1[1], ", ..., f11 = ",
des$boundaries$f1[2], ", ..., f1", des$nC + des$nE, " = ",
des$boundaries$f1[des$nC + des$nE + 1])
message(" - e200 = ", des$boundaries$e2[1, 1], ", ..., ",
"e2", des$nC[1] + des$nE[1], des$nC[2] + des$nE[2], " = ",
des$boundaries$e2[des$nC[1] + des$nE[1], des$nC[2] + des$nE[2]])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
message(" - fS1 = ", des$boundaries$fS1)
message(" - fT1 = ", des$boundaries$fT1)
message(" - eS2 = ", des$boundaries$eS2)
message(" - eT2 = ", des$boundaries$eT2)
}
}
if (nrow(pi) == 1) {
message(" when pi = (", pi[1, 1], ", ", pi[1, 2], ")'.")
} else if (nrow(pi) == 2) {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', (", pi[2, 1],
", ", pi[2, 2], ")'}.")
} else {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', ..., (",
pi[nrow(pi), 1], ", ", pi[nrow(pi), 2], ")'}.")
}
message("\n ", replicates, " simulations will be used for each value of pi.")
}
summary_terminal <- function(des, k) {
if (des$J == 1) {
stage <- "one-stage"
} else {
stage <- "two-stage"
}
if (des$type == "barnard") {
design <- "barnard's exact test"
dashes <- 67
} else if (des$type == "binomial") {
design <- "an exact binomial test"
dashes <- 69
} else if (des$type == "fisher") {
design <- "Fisher's exact test"
dashes <- 66
} else if (des$type == "sat") {
design <- "single-arm and two-arm testing\n decisions"
dashes <- 77
}
message(" ", rep("-", dashes))
message(" Terminal points of a ", stage, " based on ", design)
message(" ", rep("-", dashes))
message("\n You have chosen to find the terminal points of a design with")
if (des$J == 1) {
message(" - n1C = ", des$nC)
message(" - n1E = ", des$nE)
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", des$boundaries$e1)
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
}
} else if (des$J == 2) {
message(" - n1C = ", des$nC[1])
message(" - n2C = ", des$nC[2])
message(" - n1E = ", des$nE[1])
message(" - n2E = ", des$nE[2])
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", des$boundaries$e1)
message(" - f1 = ", des$boundaries$f1)
message(" - e2 = ", des$boundaries$e2)
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
message(" - f10 = ", des$boundaries$f1[1], ", ..., f11 = ",
des$boundaries$f1[2], ", ..., f1", des$nC + des$nE, " = ",
des$boundaries$f1[des$nC + des$nE + 1])
message(" - e200 = ", des$boundaries$e2[1, 1], ", ..., ",
"e2", des$nC[1] + des$nE[1], des$nC[2] + des$nE[2], " = ",
des$boundaries$e2[des$nC[1] + des$nE[1], des$nC[2] + des$nE[2]])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
message(" - fS1 = ", des$boundaries$fS1)
message(" - fT1 = ", des$boundaries$fT1)
message(" - eS2 = ", des$boundaries$eS2)
message(" - eT2 = ", des$boundaries$eT2)
}
}
}
|
/R/summaries.R
|
no_license
|
cran/ph2rand
|
R
| false
| false
| 21,759
|
r
|
summary_des <- function(J, type, alpha, beta, delta, ratio, Pi0, Pi1,
nCmax, equal, w, piO, efficacy, futility,
efficacy_type, efficacy_param, futility_type,
futility_param) {
if (J == 1) {
stage <- "one-stage"
} else {
stage <- "two-stage"
}
if (type == "barnard") {
dashes <- 57
design <- "Barnard's exact test"
} else if (type == "binomial") {
dashes <- 59
design <- "an exact binomial test"
} else if (type == "fisher") {
dashes <- 56
design <- "Fisher's exact test"
} else {
dashes <- 74
design <- "one-arm and two-arm testing decisions"
}
message(" ", rep("-", dashes))
message(" Design of a ", stage, " trial based on ", design)
message(" ", rep("-", dashes))
message("\n ---------------")
message(" Hypothesis test")
message(" ---------------")
message(" You have chosen to test the following hypothesis")
message(" H0 : piE <= piC")
message(" with the following type-I error constraint")
if (length(Pi0) == 1) {
message(" P(", Pi0, ",", Pi0, ") <= alpha = ", alpha)
} else {
message(" max_{pi in Pi0} P(pi,pi) <= alpha = ", alpha, ", Pi0 = [",
Pi0[1], ",", Pi0[2], "]")
}
message(" and the following type-II error constraint")
if (length(Pi1) == 1) {
message(" P(", Pi1, ",", Pi1 + delta, ") >= 1 - beta = ", 1 - beta)
} else {
message(" max_{pi in Pi1} P(pi,pi+delta) >= 1 - beta = ", 1 - beta,
", Pi1 = [", Pi1[1], ",", Pi1[2], "], delta = ", delta)
}
message("\n ------------")
message(" Restrictions")
message(" ------------")
message(" - You have chosen to limit the allowed maximal sample ",
"size in the control arm, nC,\n to: ", nCmax)
if (J == 1) {
message(" - The sample size in the experimental arm, nE, will be set",
" to: r x nC, with r = ", ratio)
} else {
if (equal) {
message(" - You have chosen to restrict the sample sizes in the",
" control arm in each stage, n1C\n and n2C, such that: n1C = ",
"n2C")
} else {
message(" - You have chosen to allow the sample sizes in the",
" control arm in each stage, n1C\n and n2C, to take different",
" values")
}
message(" - The sample sizes in the experimental arm in each ",
"stage, n1E and n2E, will be se\n to: r x n1C and r x n2C ",
"respectively, with r = ", ratio)
if (type == "fisher") {
if (efficacy_type == 0) {
message(" - You have chosen to prevent early stopping for ",
"efficacy. Thus e1z1 = Inf, for all z1, in all",
"\n considered designs")
} else if (efficacy_type == 1) {
if (efficacy_param == -0.5) {
message(" - You have chosen to include early stopping for ",
"efficacy, with e1z1 = [0.5*(n1C + n1E)*delta]_* + 1, for ",
"all z1, in all considered designs")
} else {
message(" - You have chosen to include early stopping for ",
"efficacy, with e1z1 = ", efficacy_param, ", for all z1, in ",
"all considered designs.")
}
} else {
message(" - You have chosen to include early stopping for ",
"efficacy, with e1z1 chosen for each z1, in each considered ",
"design, to control the probability of committing a type-I ",
"error at the end of stage one to ", efficacy_param)
}
if (futility_type == 0) {
message(" - You have chosen to prevent early stopping for ",
"futility. Thus f1z1 = -Inf, for all z1, in all considered ",
"designs")
} else if (futility_type == 1) {
message(" - You have chosen to include early stopping for ",
"futility, with f1z1 = ", futility_param, ", for all z1, in ",
"all considered designs.")
} else {
message(" - You have chosen to include early stopping for ",
"futility, with f1z1 chosen for each z1, in each considered ",
"design, to control the probability of committing a type-II ",
"error at the end of stage one to ", futility_param)
}
} else {
if (futility) {
message(" - You have chosen to allow early stopping for ",
"futility")
} else {
if (type != "sat") {
message(" - You have chosen to prevent early stopping for ",
"futility. Thus f1 = -Inf in all considered designs")
} else {
message(" - You have chosen to prevent early stopping for ",
"futility. Thus fS1 = fT1 = -inf in all considered ",
"designs")
}
}
if (efficacy) {
message(" - You have chosen to allow early stopping for ",
"efficacy")
} else {
if (type != "sat") {
message(" - You have chosen to prevent early stopping for ",
"efficacy. Thus e1 = Inf in all considered\n designs")
} else {
message(" - You have chosen to prevent early stopping for ",
"efficacy. Thus eS1 = eT1 = Inf in all\n considered ",
"designs")
}
}
}
message("\n The design will be optimised for:")
message(" w1ESS(piO,piO) + w2ESS(piO,piO + delta) + w3max_pi ESS(pi,",
"pi)\n + w4max_{piC,piE} ESS(piC,piE) + w5max N")
message(" with:")
message(" w1 = ", w[1], ", w2 = ", w[2], ", w3 = ", w[3], ", w4 = ",
w[4], ", w5 = ", w[5])
message(" and piO = ", piO)
}
}
summary_opchar <- function(des, pi, k) {
if (des$J == 1) {
stage <- "one-stage"
} else {
stage <- "two-stage"
}
if (des$type == "barnard") {
design <- "barnard's exact test"
dashes <- 77
} else if (des$type == "binomial") {
design <- "an exact binomial test"
dashes <- 79
} else if (des$type == "fisher") {
design <- "Fisher's exact test"
dashes <- 76
} else if (des$type == "sat") {
design <- "one-arm and two-arm testing\n decisions"
dashes <- 84
}
message(" ", rep("-", dashes))
message(" Operating characteristics of a ", stage, " design based on ",
design)
message(" ", rep("-", dashes))
message("\n You have chosen to analytically determine the operating ",
"characteristics of a design with")
if (des$J == 1) {
message(" - n1C = ", des$nC)
message(" - n1E = ", des$nE)
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
}
} else if (des$J == 2) {
message(" - n1C = ", des$nC[1])
message(" - n2C = ", des$nC[2])
message(" - n1E = ", des$nE[1])
message(" - n2E = ", des$nE[2])
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
message(" - f1 = ", round(des$boundaries$f1, 3))
message(" - e2 = ", round(des$boundaries$e2, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
message(" - f10 = ", des$boundaries$f1[1], ", ..., f11 = ",
des$boundaries$f1[2], ", ..., f1", des$nC + des$nE, " = ",
des$boundaries$f1[des$nC + des$nE + 1])
message(" - e200 = ", des$boundaries$e2[1, 1], ", ..., ",
"e2", des$nC[1] + des$nE[1], des$nC[2] + des$nE[2], " = ",
des$boundaries$e2[des$nC[1] + des$nE[1], des$nC[2] + des$nE[2]])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
message(" - fS1 = ", des$boundaries$fS1)
message(" - fT1 = ", des$boundaries$fT1)
message(" - eS2 = ", des$boundaries$eS2)
message(" - eT2 = ", des$boundaries$eT2)
}
}
if (nrow(pi) == 1) {
message(" when pi = (", pi[1, 1], ", ", pi[1, 2], ")'.")
} else if (nrow(pi) == 2) {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', (", pi[2, 1],
", ", pi[2, 2], ")'}.")
} else {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', ..., (",
pi[nrow(pi), 1], ", ", pi[nrow(pi), 2], ")'}.")
}
}
summary_ph2rand_des <- function(x) {
J <- x$J
type <- x$type
alpha <- x$alpha
beta <- x$beta
delta <- x$delta
Pi0 <- x$Pi0
Pi1 <- x$Pi1
if (J == 1) {
stage <- "single-stage"
} else {
stage <- "two-stage"
}
if (type == "barnard") {
dashes <- 47
design <- "Barnard's exact test"
} else if (type == "binomial") {
dashes <- 49
design <- "an exact binomial test"
} else if (type == "fisher") {
dashes <- 46
design <- "Fisher's exact test"
} else {
dashes <- 57
design <- "one-arm and two-arm testing decisions"
}
message(" ", rep("-", dashes))
message(" A ", stage, " trial based on ", design)
message(" ", rep("-", dashes))
message("\n ---------------")
message(" Hypothesis test")
message(" ---------------")
message(" You have chosen to test the following hypothesis")
message(" H0 : piE <= piC")
message(" with the following type-I error constraint")
if (length(Pi0) == 1) {
message(" P(", Pi0, ",", Pi0, ") <= alpha = ", alpha)
} else {
message(" max_{pi in Pi0} P(pi,pi) <= alpha = ", alpha, ", Pi0 = [",
Pi0[1], ",", Pi0[2], "]")
}
message(" and the following type-II error constraint")
if (length(Pi1) == 1) {
message(" P(", Pi1, ",", Pi1 + delta, ") >= 1 - beta = ", 1 - beta)
} else {
message(" max_{pi in Pi1} P(pi,pi+delta) >= 1 - beta = ", 1 - beta,
", Pi1 = [", Pi1[1], ",", Pi1[2], "], delta = ", delta)
}
message("\n -----------------")
message(" Design parameters")
message(" -----------------")
message(" The design has:")
if (x$J == 1) {
message(" - n1C = ", x$nC)
message(" - n1E = ", x$nE)
if (x$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(x$boundaries$e1, 3))
} else if (x$type == "fisher") {
message(" - e10 = ", x$boundaries$e1[1], ", ..., e11 = ",
x$boundaries$e1[2], ", ..., e1", x$nC + x$nE, " = ",
x$boundaries$e1[x$nC + x$nE + 1])
} else if (x$type == "sat") {
message(" - eS1 = ", x$boundaries$eS1)
message(" - eT1 = ", x$boundaries$eT1)
}
} else if (x$J == 2) {
message(" - n1C = ", x$nC[1])
message(" - n2C = ", x$nC[2])
message(" - n1E = ", x$nE[1])
message(" - n2E = ", x$nE[2])
if (x$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(x$boundaries$e1, 3))
message(" - f1 = ", round(x$boundaries$f1, 3))
message(" - e2 = ", round(x$boundaries$e2, 3))
} else if (x$type == "fisher") {
message(" - e10 = ", x$boundaries$e1[1], ", ..., e11 = ",
x$boundaries$e1[2], ", ..., e1", x$nC + x$nE, " = ",
x$boundaries$e1[x$nC + x$nE + 1])
message(" - f10 = ", x$boundaries$f1[1], ", ..., f11 = ",
x$boundaries$f1[2], ", ..., f1", x$nC + x$nE, " = ",
x$boundaries$f1[x$nC + x$nE + 1])
message(" - e200 = ", x$boundaries$e2[1, 1], ", ..., ",
"e2", x$nC[1] + x$nE[1], x$nC[2] + x$nE[2], " = ",
x$boundaries$e2[x$nC[1] + x$nE[1], x$nC[2] + x$nE[2]])
} else if (x$type == "sat") {
message(" - eS1 = ", x$boundaries$eS1)
message(" - eT1 = ", x$boundaries$eT1)
message(" - fS1 = ", x$boundaries$fS1)
message(" - fT1 = ", x$boundaries$fT1)
message(" - eS2 = ", x$boundaries$eS2)
message(" - eT2 = ", x$boundaries$eT2)
}
}
message("\n -------------------------")
message(" Operating Characteristics")
message(" -------------------------")
message(" Key operating characteristics include")
if (x$J == 1) {
print(x$opchar)
} else {
print(x$opchar[, -(11:13)])
}
}
summary_pmf <- function(des, pi, k) {
if (des$J == 1) {
stage <- "one-stage"
} else {
stage <- "two-stage"
}
if (des$type == "barnard") {
design <- "barnard's exact test"
dashes <- 55
} else if (des$type == "binomial") {
design <- "an exact binomial test"
dashes <- 57
} else if (des$type == "fisher") {
design <- "Fisher's exact test"
dashes <- 54
} else if (des$type == "sat") {
design <- "single-arm and two-arm testing\n decisions"
dashes <- 65
}
message(" ", rep("-", dashes))
message(" PMF of a ", stage, " design based on ", design)
message(" ", rep("-", dashes))
message("\n You have chosen to find the PMF of a design with")
if (des$J == 1) {
message(" - n1C = ", des$nC)
message(" - n1E = ", des$nE)
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
}
} else if (des$J == 2) {
message(" - n1C = ", des$nC[1])
message(" - n2C = ", des$nC[2])
message(" - n1E = ", des$nE[1])
message(" - n2E = ", des$nE[2])
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
message(" - f1 = ", round(des$boundaries$f1, 3))
message(" - e2 = ", round(des$boundaries$e2, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
message(" - f10 = ", des$boundaries$f1[1], ", ..., f11 = ",
des$boundaries$f1[2], ", ..., f1", des$nC + des$nE, " = ",
des$boundaries$f1[des$nC + des$nE + 1])
message(" - e200 = ", des$boundaries$e2[1, 1], ", ..., ",
"e2", des$nC[1] + des$nE[1], des$nC[2] + des$nE[2], " = ",
des$boundaries$e2[des$nC[1] + des$nE[1], des$nC[2] + des$nE[2]])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
message(" - fS1 = ", des$boundaries$fS1)
message(" - fT1 = ", des$boundaries$fT1)
message(" - eS2 = ", des$boundaries$eS2)
message(" - eT2 = ", des$boundaries$eT2)
}
}
if (nrow(pi) == 1) {
message(" when pi = (", pi[1, 1], ", ", pi[1, 2], ")'.")
} else if (nrow(pi) == 2) {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', (", pi[2, 1],
", ", pi[2, 2], ")'}.")
} else {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', ..., (",
pi[nrow(pi), 1], ", ", pi[nrow(pi), 2], ")'}.")
}
}
summary_sim <- function(des, pi, k, replicates) {
if (des$J == 1) {
stage <- "one-stage"
} else {
stage <- "two-stage"
}
if (des$type == "barnard") {
design <- "barnard's exact test"
dashes <- 77
} else if (des$type == "binomial") {
design <- "an exact binomial test"
dashes <- 79
} else if (des$type == "fisher") {
design <- "Fisher's exact test"
dashes <- 76
} else if (des$type == "sat") {
design <- "one-arm and two-arm testing\n decisions"
dashes <- 84
}
message(" ", rep("-", dashes))
message(" Operating characteristics of a ", stage, " design based on ",
design)
message(" ", rep("-", dashes))
message("\n You have chosen to estimate via simulation the operating ",
"characteristics of a design\n with")
if (des$J == 1) {
message(" - n1C = ", des$nC)
message(" - n1E = ", des$nE)
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
}
} else if (des$J == 2) {
message(" - n1C = ", des$nC[1])
message(" - n2C = ", des$nC[2])
message(" - n1E = ", des$nE[1])
message(" - n2E = ", des$nE[2])
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", round(des$boundaries$e1, 3))
message(" - f1 = ", round(des$boundaries$f1, 3))
message(" - e2 = ", round(des$boundaries$e2, 3))
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
message(" - f10 = ", des$boundaries$f1[1], ", ..., f11 = ",
des$boundaries$f1[2], ", ..., f1", des$nC + des$nE, " = ",
des$boundaries$f1[des$nC + des$nE + 1])
message(" - e200 = ", des$boundaries$e2[1, 1], ", ..., ",
"e2", des$nC[1] + des$nE[1], des$nC[2] + des$nE[2], " = ",
des$boundaries$e2[des$nC[1] + des$nE[1], des$nC[2] + des$nE[2]])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
message(" - fS1 = ", des$boundaries$fS1)
message(" - fT1 = ", des$boundaries$fT1)
message(" - eS2 = ", des$boundaries$eS2)
message(" - eT2 = ", des$boundaries$eT2)
}
}
if (nrow(pi) == 1) {
message(" when pi = (", pi[1, 1], ", ", pi[1, 2], ")'.")
} else if (nrow(pi) == 2) {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', (", pi[2, 1],
", ", pi[2, 2], ")'}.")
} else {
message(" when pi in {(", pi[1, 1], ", ", pi[1, 2], ")', ..., (",
pi[nrow(pi), 1], ", ", pi[nrow(pi), 2], ")'}.")
}
message("\n ", replicates, " simulations will be used for each value of pi.")
}
summary_terminal <- function(des, k) {
if (des$J == 1) {
stage <- "one-stage"
} else {
stage <- "two-stage"
}
if (des$type == "barnard") {
design <- "barnard's exact test"
dashes <- 67
} else if (des$type == "binomial") {
design <- "an exact binomial test"
dashes <- 69
} else if (des$type == "fisher") {
design <- "Fisher's exact test"
dashes <- 66
} else if (des$type == "sat") {
design <- "single-arm and two-arm testing\n decisions"
dashes <- 77
}
message(" ", rep("-", dashes))
message(" Terminal points of a ", stage, " based on ", design)
message(" ", rep("-", dashes))
message("\n You have chosen to find the terminal points of a design with")
if (des$J == 1) {
message(" - n1C = ", des$nC)
message(" - n1E = ", des$nE)
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", des$boundaries$e1)
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
}
} else if (des$J == 2) {
message(" - n1C = ", des$nC[1])
message(" - n2C = ", des$nC[2])
message(" - n1E = ", des$nE[1])
message(" - n2E = ", des$nE[2])
if (des$type %in% c("barnard", "binomial")) {
message(" - e1 = ", des$boundaries$e1)
message(" - f1 = ", des$boundaries$f1)
message(" - e2 = ", des$boundaries$e2)
} else if (des$type == "fisher") {
message(" - e10 = ", des$boundaries$e1[1], ", ..., e11 = ",
des$boundaries$e1[2], ", ..., e1", des$nC + des$nE, " = ",
des$boundaries$e1[des$nC + des$nE + 1])
message(" - f10 = ", des$boundaries$f1[1], ", ..., f11 = ",
des$boundaries$f1[2], ", ..., f1", des$nC + des$nE, " = ",
des$boundaries$f1[des$nC + des$nE + 1])
message(" - e200 = ", des$boundaries$e2[1, 1], ", ..., ",
"e2", des$nC[1] + des$nE[1], des$nC[2] + des$nE[2], " = ",
des$boundaries$e2[des$nC[1] + des$nE[1], des$nC[2] + des$nE[2]])
} else if (des$type == "sat") {
message(" - eS1 = ", des$boundaries$eS1)
message(" - eT1 = ", des$boundaries$eT1)
message(" - fS1 = ", des$boundaries$fS1)
message(" - fT1 = ", des$boundaries$fT1)
message(" - eS2 = ", des$boundaries$eS2)
message(" - eT2 = ", des$boundaries$eT2)
}
}
}
|
hpc <- read.csv('household_power_consumption.txt', sep=";", na.strings="?", as.is=T)
feb_hpc <- hpc[hpc$Date == '1/2/2007' | hpc$Date == '2/2/2007', ]
feb_hpc$DateTime <- strptime(paste(feb_hpc$Date, feb_hpc$Time, sep=" "), format="%d/%m/%Y %H:%M:%S")
png(file="plot4.png",width=480,height=480)
par(mfcol=c(2,2))
plot(feb_hpc$DateTime,feb_hpc$Global_active_power, type="l", ylab="Global Active Power", xlab="")
plot(feb_hpc$DateTime,feb_hpc$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(feb_hpc$DateTime, feb_hpc$Sub_metering_2, col="red")
lines(feb_hpc$DateTime, feb_hpc$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black", "red", "blue"),lwd=1,bty="n")
with(feb_hpc, plot(DateTime, Voltage, type="l", xlab="datetime"))
with(feb_hpc, plot(DateTime, Global_reactive_power, type="l", xlab="datetime"))
dev.off()
|
/plot4.R
|
no_license
|
retrofactor/ExData_Plotting1
|
R
| false
| false
| 897
|
r
|
hpc <- read.csv('household_power_consumption.txt', sep=";", na.strings="?", as.is=T)
feb_hpc <- hpc[hpc$Date == '1/2/2007' | hpc$Date == '2/2/2007', ]
feb_hpc$DateTime <- strptime(paste(feb_hpc$Date, feb_hpc$Time, sep=" "), format="%d/%m/%Y %H:%M:%S")
png(file="plot4.png",width=480,height=480)
par(mfcol=c(2,2))
plot(feb_hpc$DateTime,feb_hpc$Global_active_power, type="l", ylab="Global Active Power", xlab="")
plot(feb_hpc$DateTime,feb_hpc$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(feb_hpc$DateTime, feb_hpc$Sub_metering_2, col="red")
lines(feb_hpc$DateTime, feb_hpc$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black", "red", "blue"),lwd=1,bty="n")
with(feb_hpc, plot(DateTime, Voltage, type="l", xlab="datetime"))
with(feb_hpc, plot(DateTime, Global_reactive_power, type="l", xlab="datetime"))
dev.off()
|
rm( list = ls( all.names = TRUE) ) ; invisible( gc( ) )
setwd( "/users/user/Desktop/studies 2014-2015/Robust methods/assign02/tex" )
Sweave( file = "../assign02.Stex" )
B <- pnorm( .5 )
C <- 8 * ( 1 - B ) / ( 3 * pi )
## Define functions
## The actual distribution function
F <- function( x ) suppressWarnings(
C * pi / 2 + ifelse( x < 0, C * atan( x ),
B - 1 + ifelse( x <= 4, pnorm( ( x - 2 ) / 4 ),
B + C/2 * atan( ( x - 4 )^2 ) ) ) )
## The inverse of the distribution function
F_inv <- function( u ) suppressWarnings(
ifelse( u < C * pi / 2, tan( u / C - pi / 2 ),
ifelse( u <= C * pi / 2 + 2 * B - 1,
4 * qnorm( u - C * pi / 2 + 1 - B ) + 2,
4 + sqrt( tan( 2 * ( u - C * pi / 2 - 2 * B + 1 ) / C ) ) ) ) )
## This function implements the Pearson-Tukey robust
## mean estimator with 5 quantiles.
pt.mean <- function( x ) {
## The selected quantiles
q <- c( 1, 4, 8, 12, 15 ) / 16
## ... are weighted according to:
w <- c( 1, 1, 2, 1, 1 ) / 6
sum( w * sort( x )[ round( q * length( x ) ) ] )
}
## An implementation of the Hodges-Lehman estimator
hl.mean <- function( x ) {
## Compute all pairwise means
w <- outer( x, x, function( a, b ) .5*( a + b ) )
## Discard a half
median( w[ outer( seq_along( x ), seq_along( x ), `>=` ) ] )
}
## Carry out a simulation study
sim_study <- function( nobs, samples,
seed = NULL, fun = c( mean, pt.mean, hl.mean ) ) {
## Run the simulation
if( is.numeric( seed ) ) set.seed( seed )
generated <- replicate( n = samples, {
F_inv( runif( n = nobs ) )
}, simplify = FALSE )
## Compute the supplied functions of each replication
names( fun ) <- as.character( substitute( fun )[-1] )
result <- do.call( data.frame, args =
lapply( fun, function( fn ) sapply( generated, fn ) ) )
}
## Estimate risk
risk <- function( data, risk )
mean( do.call( risk, args = list( data ) ) )
## Risk functions: by default hypothesis a normal distribution with mean 2
## and standard deviation 4.
## Square Loss
r1 <- function( theta, theta_0 = 2 )
abs( theta - theta_0 ) ^ 2
## risk well function
r2 <- function( theta, theta_0 = 2 )
pmin( 1, abs( theta - theta_0 ) )
## L^1 risk with indistinguishability region
r3 <- function( theta, theta_0 = 2 )
ifelse( abs( theta - theta_0 ) >= .3, abs( theta - theta_0 ), 0 )
##
ss <- lapply( c( 1, 2, 4, 8, 16, 32, 64 ) * 10, function( nobs ) {
mean_est <- sim_study( nobs = nobs, samples = 50 )
sapply( c( r1 = r1, r2 = r2, r3 = r3 ),
function( r ) sapply( mean_est, risk, risk = r ) )
} )
qplot( pt_mean )
qplot( hl_mean )
|
/year_14_15/fall_2014/robust_methods/assignments/assign02/problem_3.R
|
permissive
|
ivannz/study_notes
|
R
| false
| false
| 2,585
|
r
|
rm( list = ls( all.names = TRUE) ) ; invisible( gc( ) )
setwd( "/users/user/Desktop/studies 2014-2015/Robust methods/assign02/tex" )
Sweave( file = "../assign02.Stex" )
B <- pnorm( .5 )
C <- 8 * ( 1 - B ) / ( 3 * pi )
## Define functions
## The actual distribution function
F <- function( x ) suppressWarnings(
C * pi / 2 + ifelse( x < 0, C * atan( x ),
B - 1 + ifelse( x <= 4, pnorm( ( x - 2 ) / 4 ),
B + C/2 * atan( ( x - 4 )^2 ) ) ) )
## The inverse of the distribution function
F_inv <- function( u ) suppressWarnings(
ifelse( u < C * pi / 2, tan( u / C - pi / 2 ),
ifelse( u <= C * pi / 2 + 2 * B - 1,
4 * qnorm( u - C * pi / 2 + 1 - B ) + 2,
4 + sqrt( tan( 2 * ( u - C * pi / 2 - 2 * B + 1 ) / C ) ) ) ) )
## This function implements the Pearson-Tukey robust
## mean estimator with 5 quantiles.
pt.mean <- function( x ) {
## The selected quantiles
q <- c( 1, 4, 8, 12, 15 ) / 16
## ... are weighted according to:
w <- c( 1, 1, 2, 1, 1 ) / 6
sum( w * sort( x )[ round( q * length( x ) ) ] )
}
## An implementation of the Hodges-Lehman estimator
hl.mean <- function( x ) {
## Compute all pairwise means
w <- outer( x, x, function( a, b ) .5*( a + b ) )
## Discard a half
median( w[ outer( seq_along( x ), seq_along( x ), `>=` ) ] )
}
## Carry out a simulation study
sim_study <- function( nobs, samples,
seed = NULL, fun = c( mean, pt.mean, hl.mean ) ) {
## Run the simulation
if( is.numeric( seed ) ) set.seed( seed )
generated <- replicate( n = samples, {
F_inv( runif( n = nobs ) )
}, simplify = FALSE )
## Compute the supplied functions of each replication
names( fun ) <- as.character( substitute( fun )[-1] )
result <- do.call( data.frame, args =
lapply( fun, function( fn ) sapply( generated, fn ) ) )
}
## Estimate risk
risk <- function( data, risk )
mean( do.call( risk, args = list( data ) ) )
## Risk functions: by default hypothesis a normal distribution with mean 2
## and standard deviation 4.
## Square Loss
r1 <- function( theta, theta_0 = 2 )
abs( theta - theta_0 ) ^ 2
## risk well function
r2 <- function( theta, theta_0 = 2 )
pmin( 1, abs( theta - theta_0 ) )
## L^1 risk with indistinguishability region
r3 <- function( theta, theta_0 = 2 )
ifelse( abs( theta - theta_0 ) >= .3, abs( theta - theta_0 ), 0 )
##
ss <- lapply( c( 1, 2, 4, 8, 16, 32, 64 ) * 10, function( nobs ) {
mean_est <- sim_study( nobs = nobs, samples = 50 )
sapply( c( r1 = r1, r2 = r2, r3 = r3 ),
function( r ) sapply( mean_est, risk, risk = r ) )
} )
qplot( pt_mean )
qplot( hl_mean )
|
# Sentiment lexicons
# Load dplyr and tidytext
library(dplyr)
library(tidytext)
# Choose the bing lexicon
get_sentiments("bing")
# Choose the nrc lexicon
get_sentiments("nrc") %>%
count(sentiment) # Count words by sentiment
###############################################################
## Inner join to implement sentiment analysis
# geocoded_tweets has been pre-defined
geocoded_tweets
# Access bing lexicon: bing
bing <- get_sentiments("bing")
# Use data frame with text data
geocoded_tweets %>%
# With inner join, implement sentiment analysis using `bing`
inner_join(bing)
###############################################################
# Find most common sadness words
tweets_nrc
tweets_nrc %>%
# Filter to only choose the words associated with sadness
filter(sentiment == 'sadness') %>%
# Group by word
group_by(word) %>%
# Use the summarize verb to find the mean frequency
summarize(freq = mean(freq)) %>%
# Arrange to sort in order of descending frequency
arrange(desc(freq))
########################################################
# Find most common joy words
# tweets_nrc has been pre-defined
tweets_nrc
joy_words <- tweets_nrc %>%
# Filter to choose only words associated with joy
filter(sentiment == 'joy') %>%
# Group by each word
group_by(word) %>%
# Use the summarize verb to find the mean frequency
summarize(freq = mean(freq)) %>%
# Arrange to sort in order of descending frequency
arrange(desc(freq))
# Load ggplot2
library(ggplot2)
joy_words %>%
top_n(20) %>%
mutate(word = reorder(word, freq)) %>%
# Use aes() to put words on the x-axis and frequency on the y-axis
ggplot(aes(word, freq)) +
# Make a bar chart with geom_col()
geom_col() +
coord_flip()
########################################################
# common words in different states
# tweets_nrc has been pre-defined
tweets_nrc
tweets_nrc %>%
# Find only the words for the state of Utah and associated with joy
filter(state == "utah",
sentiment == 'joy') %>%
# Arrange to sort in order of descending frequency
arrange(desc(freq))
tweets_nrc %>%
# Find only the words for the state of Louisiana and associated with joy
filter(state == "louisiana",
sentiment == 'joy') %>%
# Arrange to sort in order of descending frequency
arrange(desc(freq))
########################################################
# States with most positive twitter users
# tweets_bing has been pre-defined
tweets_bing
tweets_bing %>%
# Group by two columns: state and sentiment
group_by(state, sentiment) %>%
# Use summarize to calculate the mean frequency for these groups
summarize(freq = mean(freq)) %>%
spread(sentiment, freq) %>%
ungroup() %>%
# Calculate the ratio of positive to negative words
mutate(ratio = positive / negative,
state = reorder(state, ratio)) %>%
# Use aes() to put state on the x-axis and ratio on the y-axis
ggplot(aes(state, ratio)) +
# Make a plot with points using geom_point()
geom_point()+
coord_flip()
#######################################################################################################
# Shakespeare
# Pipe the shakespeare data frame to the next line
shakespeare %>%
# Use count to find out how many titles/types there are
count(title,type)
##############################################################
# Unnesting text to words
# Load tidytext
library(tidytext)
tidy_shakespeare <- shakespeare %>%
# Group by the titles of the plays
group_by(title) %>%
# Define a new column linenumber
mutate(linenumber = row_number()) %>%
# Transform the non-tidy text data to tidy text data
unnest_tokens(word, text) %>%
ungroup()
# Pipe the tidy Shakespeare data frame to the next line
tidy_shakespeare %>%
# Use count to find out how many times each word is used
count(word, sort = TRUE)
#############################################################
# Sentiment analysis of Shakespeare
shakespeare_sentiment <- tidy_shakespeare %>%
# Implement sentiment analysis with the "bing" lexicon
inner_join(get_sentiments('bing') )
shakespeare_sentiment %>%
# Find how many positive/negative words each play has
count(title,sentiment)
##########################################
# Tragedy or Comedy?
sentiment_counts <- tidy_shakespeare %>%
# Implement sentiment analysis using the "bing" lexicon
inner_join(get_sentiments('bing')) %>%
# Count the number of words by title, type, and sentiment
count(title,type,sentiment)
sentiment_counts %>%
# Group by the titles of the plays
group_by(title) %>%
# Find the total number of words in each play
mutate(total = sum(n),
# Calculate the number of words divided by the total
percent = n/total) %>%
# Filter the results for only negative sentiment
filter(sentiment == 'negative') %>%
arrange(percent)
#####################################################
# Most common positive and negative words
word_counts <- tidy_shakespeare %>%
# Implement sentiment analysis using the "bing" lexicon
inner_join(get_sentiments('bing')) %>%
# Count by word and sentiment
count(word,sentiment)
top_words <- word_counts %>%
# Group by sentiment
group_by(sentiment) %>%
# Take the top 10 for each sentiment
top_n(10) %>%
ungroup() %>%
# Make word a factor in order of n
mutate(word = reorder(word, n))
# Use aes() to put words on the x-axis and n on the y-axis
ggplot(top_words, aes(word, n, fill = sentiment)) +
# Make a bar chart with geom_col()
geom_col(show.legend = FALSE) +
facet_wrap(~sentiment, scales = "free") +
coord_flip()
################################################################
# Word contributions by play
tidy_shakespeare %>%
# Count by title and word
count(title, word, sort = TRUE) %>%
# Implement sentiment analysis using the "afinn" lexicon
inner_join(get_sentiments('afinn')) %>%
# Filter to only examine the scores for Macbeth that are negative
filter(title == 'The Tragedy of Macbeth', score < 0)
################################################################
# Calculating a contribution score
sentiment_contributions <- tidy_shakespeare %>%
# Count by title and word
count(title, word, sort = TRUE) %>%
# Implement sentiment analysis using the "afinn" lexicon
inner_join(get_sentiments("afinn")) %>%
# Group by title
group_by(title) %>%
# Calculate a contribution for each word in each title
mutate(contribution = score * n / sum(n)) %>%
ungroup()
sentiment_contributions
sentiment_contributions %>%
# Filter for Hamlet
filter(title == 'Hamlet, Prince of Denmark') %>%
# Arrange to see the most negative words
arrange(contribution)
sentiment_contributions %>%
# Filter for The Merchant of Venice
filter(title == 'The Merchant of Venice') %>%
# Arrange to see the most positive words
arrange(desc(contribution))
##########################################################
# Sentiment changes through a play
tidy_shakespeare %>%
# Implement sentiment analysis using "bing" lexicon
inner_join(get_sentiments('bing')) %>%
# Count using four arguments
count(title,type,index = linenumber %/% 70,sentiment)
#######################################################
# Calculating net sentiment
# Load the tidyr package
library(tidyr)
tidy_shakespeare %>%
inner_join(get_sentiments("bing")) %>%
count(title, type, index = linenumber %/% 70, sentiment) %>%
# Spread sentiment and n across multiple columns
spread(sentiment, n, fill = 0) %>%
# Use mutate to find net sentiment
mutate(sentiment = positive - negative)
##############################################################
# Visualizing Narrative Arcs
library(tidyr)
# Load the ggplot2 package
library(ggplot2)
tidy_shakespeare %>%
inner_join(get_sentiments("bing")) %>%
count(title, type, index = linenumber %/% 70, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative) %>%
# Put index on x-axis, sentiment on y-axis, and map comedy/tragedy to fill
ggplot(aes(index,sentiment,fill = type)) +
# Make a bar chart with geom_col()
geom_col() +
# Separate panels for each title with facet_wrap()
facet_wrap(~ title,scales = 'free_x')
###################################################################################################################
######### Analyzing TV News ##############
# Tidy the text
# Load the tidytext package
library(tidytext)
# Pipe the climate_text dataset to the next line
tidy_tv <- climate_text %>%
# Transform the non-tidy text data to tidy text data
unnest_tokens(word,text)
######## Counting totals
tidy_tv %>%
anti_join(stop_words) %>%
# Count by word with sort = TRUE
count(word,sort = TRUE)
tidy_tv %>%
# Count by station
count(station) %>%
# Rename the new column station_total
rename(station_total = n)
###################### Sentiment analysis
tv_sentiment <- tidy_tv %>%
# Group by station
group_by(station) %>%
# Define a new column station_total
mutate(station_total = n()) %>%
ungroup() %>%
# Implement sentiment analysis with the NRC lexicon
inner_join(get_sentiments('nrc'))
######################################
# Which stations use the most negative words?
tv_sentiment %>%
count(station, sentiment, station_total) %>%
# Define a new column percent
mutate(percent = n / station_total) %>%
# Filter only for negative words
filter(sentiment == 'negative') %>%
# Arrange by percent
arrange(percent)
# Now do the same but for positive words
tv_sentiment %>%
count(station, sentiment, station_total) %>%
# Define a new column percent
mutate(percent = n / station_total) %>%
# Filter only for negative words
filter(sentiment == 'positive') %>%
# Arrange by percent
arrange(percent)
#########################
## Contribution to sentiment score
tv_sentiment %>%
# Count by word and sentiment
count(word,sentiment) %>%
# Group by sentiment
group_by(sentiment) %>%
# Take the top 10 words for each sentiment
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word, n)) %>%
# Set up the plot with aes()
ggplot(aes(word,n,fill = sentiment)) +
geom_col(show.legend = FALSE) +
facet_wrap(~ sentiment, scales = "free") +
coord_flip()
############## Word choice and TV station
tv_sentiment %>%
# Filter for only negative words
filter(sentiment == 'negative') %>%
# Count by word and station
count(word,station) %>%
# Group by station
group_by(station) %>%
# Take the top 10 words for each station
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(paste(word, station, sep = "__"), n)) %>%
# Set up the plot with aes()
ggplot(aes(word,n,fill = station)) +
geom_col(show.legend = FALSE) +
scale_x_discrete(labels = function(x) gsub("__.+$", "", x)) +
facet_wrap(~ station, nrow = 2, scales = "free") +
coord_flip()
######################
# Visualizing sentiment over time
# Load the lubridate package
library(lubridate)
sentiment_by_time <- tidy_tv %>%
# Define a new column using floor_date()
mutate(date = floor_date(show_date, unit = "6 months")) %>%
# Group by date
group_by(date) %>%
mutate(total_words = n()) %>%
ungroup() %>%
# Implement sentiment analysis using the NRC lexicon
inner_join(get_sentiments('nrc'))
sentiment_by_time %>%
# Filter for positive and negative words
filter(sentiment == 'positive' | sentiment == 'negative') %>%
# Count by date, sentiment, and total_words
count(date,sentiment,total_words) %>%
ungroup() %>%
mutate(percent = n / total_words) %>%
# Set up the plot with aes()
ggplot(aes(date,percent,col = sentiment)) +
geom_line(size = 1.5) +
geom_smooth(method = "lm", se = FALSE, lty = 2) +
expand_limits(y = 0)
###########################
# Word changes over time
tidy_tv %>%
# Define a new column that rounds each date to the nearest 1 month
mutate(date = floor_date(show_date,unit = 'month')) %>%
filter(word %in% c("threat", "hoax", "denier",
"real", "warming", "hurricane")) %>%
# Count by date and word
count(date,word) %>%
ungroup() %>%
# Set up your plot with aes()
ggplot(aes(date,n,col = word)) +
# Make facets by word
facet_wrap(~word) +
geom_line(size = 1.5, show.legend = FALSE) +
expand_limits(y = 0)
##################################################################################################################
# Tidying song lyrics
# Load the tidytext package
library(tidytext)
# Pipe song_lyrics to the next line
tidy_lyrics <- song_lyrics %>%
# Transform the lyrics column to a word column
unnest_tokens(word,lyrics)
# Print tidy_lyrics
tidy_lyrics
####################################################
# Calculating word totals per song
totals <- tidy_lyrics %>%
# Count by song to find the word totals for each song
count(song) %>%
# Rename the new column
rename(total_words = n)
# Print totals
totals
lyric_counts <- tidy_lyrics %>%
# Combine totals with tidy_lyrics using the "song" column
left_join(totals, by = "song")
######################################
# Sentiment analysis of lyrics
lyric_sentiment <- lyric_counts %>%
# Implement sentiment analysis with the "nrc" lexicon
inner_join(get_sentiments('nrc'))
lyric_sentiment %>%
# Find how many sentiment words each song has
count(song, sentiment, sort = TRUE)
##############################################
# Most positive and negative songs
# What songs have the highest proportion of negative words?
lyric_sentiment %>%
# Count using three arguments
count(song,sentiment,total_words) %>%
ungroup() %>%
# Make a new percent column with mutate
mutate(percent = n / total_words) %>%
# Filter for only negative words
filter(sentiment == 'negative') %>%
# Arrange by descending percent
arrange(desc(percent))
# What songs have the highest proportion of positive words?
lyric_sentiment %>%
# Count using three arguments
count(song,sentiment,total_words) %>%
ungroup() %>%
# Make a new percent column with mutate
mutate(percent = n / total_words) %>%
# Filter for only positive words
filter(sentiment == 'positive') %>%
# Arrange by descending percent
arrange(desc(percent))
#############################################
# Relationship between sentiment and Billboard rank
lyric_sentiment %>%
filter(sentiment == "positive") %>%
# Count by song, Billboard rank, and the total number of words
count(song,rank,total_words) %>%
ungroup() %>%
# Use the correct dplyr verb to make two new columns
mutate(percent = n / total_words,
rank = 10 * floor(rank / 10)) %>%
ggplot(aes(as.factor(rank), percent)) +
# Make a boxplot
geom_boxplot()
#########################################################
# More on Billboard and sentiment
lyric_sentiment %>%
# Filter for only negative words
filter(sentiment == 'negative') %>%
# Count by song, Billboard rank, and the total number of words
count(song,rank,total_words) %>%
ungroup() %>%
# Mutate to make a percent column
mutate(percent = n / total_words,
rank = 10 * floor(rank / 10)) %>%
# Use ggplot to set up a plot with rank and percent
ggplot(aes(as.factor(rank), percent)) +
# Make a boxplot
geom_boxplot()
###########################################################
# Sentiment scores by year
# How is negative sentiment changing over time?
lyric_sentiment %>%
# Filter for only negative words
filter(sentiment == 'negative') %>%
# Count by song, year, and the total number of words
count(song,year,total_words) %>%
ungroup() %>%
mutate(percent = n / total_words,
year = 10 * floor(year / 10)) %>%
# Use ggplot to set up a plot with year and percent
ggplot(aes(as.factor(year),percent)) +
geom_boxplot()
# How is positive sentiment changing over time?
lyric_sentiment %>%
# Filter for only negative words
filter(sentiment == 'positive') %>%
# Count by song, year, and the total number of words
count(song,year,total_words) %>%
ungroup() %>%
mutate(percent = n / total_words,
year = 10 * floor(year / 10)) %>%
# Use ggplot to set up a plot with year and percent
ggplot(aes(as.factor(year),percent)) +
geom_boxplot()
##########################################
# Modeling negative sentiment
negative_by_year <- lyric_sentiment %>%
# Filter for negative words
filter(sentiment =='negative') %>%
count(song, year, total_words) %>%
ungroup() %>%
# Define a new column: percent
mutate(percent = n / total_words)
# Specify the model with percent as the response and year as the predictor
model_negative <- lm(percent ~ year, data = negative_by_year)
# Use summary to see the results of the model fitting
summary(model_negative)
########################################################
# Modeling positive sentiment
positive_by_year <- lyric_sentiment %>%
filter(sentiment == "positive") %>%
# Count by song, year, and total number of words
count(song,year, total_words) %>%
ungroup() %>%
# Define a new column: percent
mutate(percent = n / total_words)
# Fit a linear model with percent as the response and year as the predictor
model_positive <- lm(percent ~ year, data = positive_by_year)
# Use summary to see the results of the model fitting
summary(model_positive)
|
/Tidy Text Mining.R
|
no_license
|
spensorflow/Utilities
|
R
| false
| false
| 17,902
|
r
|
# Sentiment lexicons
# Load dplyr and tidytext
library(dplyr)
library(tidytext)
# Choose the bing lexicon
get_sentiments("bing")
# Choose the nrc lexicon
get_sentiments("nrc") %>%
count(sentiment) # Count words by sentiment
###############################################################
## Inner join to implement sentiment analysis
# geocoded_tweets has been pre-defined
geocoded_tweets
# Access bing lexicon: bing
bing <- get_sentiments("bing")
# Use data frame with text data
geocoded_tweets %>%
# With inner join, implement sentiment analysis using `bing`
inner_join(bing)
###############################################################
# Find most common sadness words
tweets_nrc
tweets_nrc %>%
# Filter to only choose the words associated with sadness
filter(sentiment == 'sadness') %>%
# Group by word
group_by(word) %>%
# Use the summarize verb to find the mean frequency
summarize(freq = mean(freq)) %>%
# Arrange to sort in order of descending frequency
arrange(desc(freq))
########################################################
# Find most common joy words
# tweets_nrc has been pre-defined
tweets_nrc
joy_words <- tweets_nrc %>%
# Filter to choose only words associated with joy
filter(sentiment == 'joy') %>%
# Group by each word
group_by(word) %>%
# Use the summarize verb to find the mean frequency
summarize(freq = mean(freq)) %>%
# Arrange to sort in order of descending frequency
arrange(desc(freq))
# Load ggplot2
library(ggplot2)
joy_words %>%
top_n(20) %>%
mutate(word = reorder(word, freq)) %>%
# Use aes() to put words on the x-axis and frequency on the y-axis
ggplot(aes(word, freq)) +
# Make a bar chart with geom_col()
geom_col() +
coord_flip()
########################################################
# common words in different states
# tweets_nrc has been pre-defined
tweets_nrc
tweets_nrc %>%
# Find only the words for the state of Utah and associated with joy
filter(state == "utah",
sentiment == 'joy') %>%
# Arrange to sort in order of descending frequency
arrange(desc(freq))
tweets_nrc %>%
# Find only the words for the state of Louisiana and associated with joy
filter(state == "louisiana",
sentiment == 'joy') %>%
# Arrange to sort in order of descending frequency
arrange(desc(freq))
########################################################
# States with most positive twitter users
# tweets_bing has been pre-defined
tweets_bing
tweets_bing %>%
# Group by two columns: state and sentiment
group_by(state, sentiment) %>%
# Use summarize to calculate the mean frequency for these groups
summarize(freq = mean(freq)) %>%
spread(sentiment, freq) %>%
ungroup() %>%
# Calculate the ratio of positive to negative words
mutate(ratio = positive / negative,
state = reorder(state, ratio)) %>%
# Use aes() to put state on the x-axis and ratio on the y-axis
ggplot(aes(state, ratio)) +
# Make a plot with points using geom_point()
geom_point()+
coord_flip()
#######################################################################################################
# Shakespeare
# Pipe the shakespeare data frame to the next line
shakespeare %>%
# Use count to find out how many titles/types there are
count(title,type)
##############################################################
# Unnesting text to words
# Load tidytext
library(tidytext)
tidy_shakespeare <- shakespeare %>%
# Group by the titles of the plays
group_by(title) %>%
# Define a new column linenumber
mutate(linenumber = row_number()) %>%
# Transform the non-tidy text data to tidy text data
unnest_tokens(word, text) %>%
ungroup()
# Pipe the tidy Shakespeare data frame to the next line
tidy_shakespeare %>%
# Use count to find out how many times each word is used
count(word, sort = TRUE)
#############################################################
# Sentiment analysis of Shakespeare
shakespeare_sentiment <- tidy_shakespeare %>%
# Implement sentiment analysis with the "bing" lexicon
inner_join(get_sentiments('bing') )
shakespeare_sentiment %>%
# Find how many positive/negative words each play has
count(title,sentiment)
##########################################
# Tragedy or Comedy?
sentiment_counts <- tidy_shakespeare %>%
# Implement sentiment analysis using the "bing" lexicon
inner_join(get_sentiments('bing')) %>%
# Count the number of words by title, type, and sentiment
count(title,type,sentiment)
sentiment_counts %>%
# Group by the titles of the plays
group_by(title) %>%
# Find the total number of words in each play
mutate(total = sum(n),
# Calculate the number of words divided by the total
percent = n/total) %>%
# Filter the results for only negative sentiment
filter(sentiment == 'negative') %>%
arrange(percent)
#####################################################
# Most common positive and negative words
word_counts <- tidy_shakespeare %>%
# Implement sentiment analysis using the "bing" lexicon
inner_join(get_sentiments('bing')) %>%
# Count by word and sentiment
count(word,sentiment)
top_words <- word_counts %>%
# Group by sentiment
group_by(sentiment) %>%
# Take the top 10 for each sentiment
top_n(10) %>%
ungroup() %>%
# Make word a factor in order of n
mutate(word = reorder(word, n))
# Use aes() to put words on the x-axis and n on the y-axis
ggplot(top_words, aes(word, n, fill = sentiment)) +
# Make a bar chart with geom_col()
geom_col(show.legend = FALSE) +
facet_wrap(~sentiment, scales = "free") +
coord_flip()
################################################################
# Word contributions by play
tidy_shakespeare %>%
# Count by title and word
count(title, word, sort = TRUE) %>%
# Implement sentiment analysis using the "afinn" lexicon
inner_join(get_sentiments('afinn')) %>%
# Filter to only examine the scores for Macbeth that are negative
filter(title == 'The Tragedy of Macbeth', score < 0)
################################################################
# Calculating a contribution score
sentiment_contributions <- tidy_shakespeare %>%
# Count by title and word
count(title, word, sort = TRUE) %>%
# Implement sentiment analysis using the "afinn" lexicon
inner_join(get_sentiments("afinn")) %>%
# Group by title
group_by(title) %>%
# Calculate a contribution for each word in each title
mutate(contribution = score * n / sum(n)) %>%
ungroup()
sentiment_contributions
sentiment_contributions %>%
# Filter for Hamlet
filter(title == 'Hamlet, Prince of Denmark') %>%
# Arrange to see the most negative words
arrange(contribution)
sentiment_contributions %>%
# Filter for The Merchant of Venice
filter(title == 'The Merchant of Venice') %>%
# Arrange to see the most positive words
arrange(desc(contribution))
##########################################################
# Sentiment changes through a play
tidy_shakespeare %>%
# Implement sentiment analysis using "bing" lexicon
inner_join(get_sentiments('bing')) %>%
# Count using four arguments
count(title,type,index = linenumber %/% 70,sentiment)
#######################################################
# Calculating net sentiment
# Load the tidyr package
library(tidyr)
tidy_shakespeare %>%
inner_join(get_sentiments("bing")) %>%
count(title, type, index = linenumber %/% 70, sentiment) %>%
# Spread sentiment and n across multiple columns
spread(sentiment, n, fill = 0) %>%
# Use mutate to find net sentiment
mutate(sentiment = positive - negative)
##############################################################
# Visualizing Narrative Arcs
library(tidyr)
# Load the ggplot2 package
library(ggplot2)
tidy_shakespeare %>%
inner_join(get_sentiments("bing")) %>%
count(title, type, index = linenumber %/% 70, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative) %>%
# Put index on x-axis, sentiment on y-axis, and map comedy/tragedy to fill
ggplot(aes(index,sentiment,fill = type)) +
# Make a bar chart with geom_col()
geom_col() +
# Separate panels for each title with facet_wrap()
facet_wrap(~ title,scales = 'free_x')
###################################################################################################################
######### Analyzing TV News ##############
# Tidy the text
# Load the tidytext package
library(tidytext)
# Pipe the climate_text dataset to the next line
tidy_tv <- climate_text %>%
# Transform the non-tidy text data to tidy text data
unnest_tokens(word,text)
######## Counting totals
tidy_tv %>%
anti_join(stop_words) %>%
# Count by word with sort = TRUE
count(word,sort = TRUE)
tidy_tv %>%
# Count by station
count(station) %>%
# Rename the new column station_total
rename(station_total = n)
###################### Sentiment analysis
tv_sentiment <- tidy_tv %>%
# Group by station
group_by(station) %>%
# Define a new column station_total
mutate(station_total = n()) %>%
ungroup() %>%
# Implement sentiment analysis with the NRC lexicon
inner_join(get_sentiments('nrc'))
######################################
# Which stations use the most negative words?
tv_sentiment %>%
count(station, sentiment, station_total) %>%
# Define a new column percent
mutate(percent = n / station_total) %>%
# Filter only for negative words
filter(sentiment == 'negative') %>%
# Arrange by percent
arrange(percent)
# Now do the same but for positive words
tv_sentiment %>%
count(station, sentiment, station_total) %>%
# Define a new column percent
mutate(percent = n / station_total) %>%
# Filter only for negative words
filter(sentiment == 'positive') %>%
# Arrange by percent
arrange(percent)
#########################
## Contribution to sentiment score
tv_sentiment %>%
# Count by word and sentiment
count(word,sentiment) %>%
# Group by sentiment
group_by(sentiment) %>%
# Take the top 10 words for each sentiment
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word, n)) %>%
# Set up the plot with aes()
ggplot(aes(word,n,fill = sentiment)) +
geom_col(show.legend = FALSE) +
facet_wrap(~ sentiment, scales = "free") +
coord_flip()
############## Word choice and TV station
tv_sentiment %>%
# Filter for only negative words
filter(sentiment == 'negative') %>%
# Count by word and station
count(word,station) %>%
# Group by station
group_by(station) %>%
# Take the top 10 words for each station
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(paste(word, station, sep = "__"), n)) %>%
# Set up the plot with aes()
ggplot(aes(word,n,fill = station)) +
geom_col(show.legend = FALSE) +
scale_x_discrete(labels = function(x) gsub("__.+$", "", x)) +
facet_wrap(~ station, nrow = 2, scales = "free") +
coord_flip()
######################
# Visualizing sentiment over time
# Load the lubridate package
library(lubridate)
sentiment_by_time <- tidy_tv %>%
# Define a new column using floor_date()
mutate(date = floor_date(show_date, unit = "6 months")) %>%
# Group by date
group_by(date) %>%
mutate(total_words = n()) %>%
ungroup() %>%
# Implement sentiment analysis using the NRC lexicon
inner_join(get_sentiments('nrc'))
sentiment_by_time %>%
# Filter for positive and negative words
filter(sentiment == 'positive' | sentiment == 'negative') %>%
# Count by date, sentiment, and total_words
count(date,sentiment,total_words) %>%
ungroup() %>%
mutate(percent = n / total_words) %>%
# Set up the plot with aes()
ggplot(aes(date,percent,col = sentiment)) +
geom_line(size = 1.5) +
geom_smooth(method = "lm", se = FALSE, lty = 2) +
expand_limits(y = 0)
###########################
# Word changes over time
tidy_tv %>%
# Define a new column that rounds each date to the nearest 1 month
mutate(date = floor_date(show_date,unit = 'month')) %>%
filter(word %in% c("threat", "hoax", "denier",
"real", "warming", "hurricane")) %>%
# Count by date and word
count(date,word) %>%
ungroup() %>%
# Set up your plot with aes()
ggplot(aes(date,n,col = word)) +
# Make facets by word
facet_wrap(~word) +
geom_line(size = 1.5, show.legend = FALSE) +
expand_limits(y = 0)
##################################################################################################################
# Tidying song lyrics
# Load the tidytext package
library(tidytext)
# Pipe song_lyrics to the next line
tidy_lyrics <- song_lyrics %>%
# Transform the lyrics column to a word column
unnest_tokens(word,lyrics)
# Print tidy_lyrics
tidy_lyrics
####################################################
# Calculating word totals per song
totals <- tidy_lyrics %>%
# Count by song to find the word totals for each song
count(song) %>%
# Rename the new column
rename(total_words = n)
# Print totals
totals
lyric_counts <- tidy_lyrics %>%
# Combine totals with tidy_lyrics using the "song" column
left_join(totals, by = "song")
######################################
# Sentiment analysis of lyrics
lyric_sentiment <- lyric_counts %>%
# Implement sentiment analysis with the "nrc" lexicon
inner_join(get_sentiments('nrc'))
lyric_sentiment %>%
# Find how many sentiment words each song has
count(song, sentiment, sort = TRUE)
##############################################
# Most positive and negative songs
# What songs have the highest proportion of negative words?
lyric_sentiment %>%
# Count using three arguments
count(song,sentiment,total_words) %>%
ungroup() %>%
# Make a new percent column with mutate
mutate(percent = n / total_words) %>%
# Filter for only negative words
filter(sentiment == 'negative') %>%
# Arrange by descending percent
arrange(desc(percent))
# What songs have the highest proportion of positive words?
lyric_sentiment %>%
# Count using three arguments
count(song,sentiment,total_words) %>%
ungroup() %>%
# Make a new percent column with mutate
mutate(percent = n / total_words) %>%
# Filter for only positive words
filter(sentiment == 'positive') %>%
# Arrange by descending percent
arrange(desc(percent))
#############################################
# Relationship between sentiment and Billboard rank
lyric_sentiment %>%
filter(sentiment == "positive") %>%
# Count by song, Billboard rank, and the total number of words
count(song,rank,total_words) %>%
ungroup() %>%
# Use the correct dplyr verb to make two new columns
mutate(percent = n / total_words,
rank = 10 * floor(rank / 10)) %>%
ggplot(aes(as.factor(rank), percent)) +
# Make a boxplot
geom_boxplot()
#########################################################
# More on Billboard and sentiment
lyric_sentiment %>%
# Filter for only negative words
filter(sentiment == 'negative') %>%
# Count by song, Billboard rank, and the total number of words
count(song,rank,total_words) %>%
ungroup() %>%
# Mutate to make a percent column
mutate(percent = n / total_words,
rank = 10 * floor(rank / 10)) %>%
# Use ggplot to set up a plot with rank and percent
ggplot(aes(as.factor(rank), percent)) +
# Make a boxplot
geom_boxplot()
###########################################################
# Sentiment scores by year
# How is negative sentiment changing over time?
lyric_sentiment %>%
# Filter for only negative words
filter(sentiment == 'negative') %>%
# Count by song, year, and the total number of words
count(song,year,total_words) %>%
ungroup() %>%
mutate(percent = n / total_words,
year = 10 * floor(year / 10)) %>%
# Use ggplot to set up a plot with year and percent
ggplot(aes(as.factor(year),percent)) +
geom_boxplot()
# How is positive sentiment changing over time?
lyric_sentiment %>%
# Filter for only negative words
filter(sentiment == 'positive') %>%
# Count by song, year, and the total number of words
count(song,year,total_words) %>%
ungroup() %>%
mutate(percent = n / total_words,
year = 10 * floor(year / 10)) %>%
# Use ggplot to set up a plot with year and percent
ggplot(aes(as.factor(year),percent)) +
geom_boxplot()
##########################################
# Modeling negative sentiment
negative_by_year <- lyric_sentiment %>%
# Filter for negative words
filter(sentiment =='negative') %>%
count(song, year, total_words) %>%
ungroup() %>%
# Define a new column: percent
mutate(percent = n / total_words)
# Specify the model with percent as the response and year as the predictor
model_negative <- lm(percent ~ year, data = negative_by_year)
# Use summary to see the results of the model fitting
summary(model_negative)
########################################################
# Modeling positive sentiment
positive_by_year <- lyric_sentiment %>%
filter(sentiment == "positive") %>%
# Count by song, year, and total number of words
count(song,year, total_words) %>%
ungroup() %>%
# Define a new column: percent
mutate(percent = n / total_words)
# Fit a linear model with percent as the response and year as the predictor
model_positive <- lm(percent ~ year, data = positive_by_year)
# Use summary to see the results of the model fitting
summary(model_positive)
|
/Operaciones de matrices.R
|
no_license
|
Katherine-Ramirez-Cubillos/Introducci-n-Programaci-n
|
R
| false
| false
| 2,268
|
r
| ||
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/lfda.R
\name{\%^\%}
\alias{\%^\%}
\title{Negative One Half Matrix Power Operator}
\usage{
x \%^\% n
}
\arguments{
\item{x}{the matrix we want to operate on}
\item{n}{the exponent}
}
\value{
the matrix after negative one half power
}
\description{
This function defines operation for negative one half matrix
power operator
}
|
/pkg/caret/man/grapes-pow-grapes.Rd
|
no_license
|
wwbrannon/caret
|
R
| false
| false
| 413
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/lfda.R
\name{\%^\%}
\alias{\%^\%}
\title{Negative One Half Matrix Power Operator}
\usage{
x \%^\% n
}
\arguments{
\item{x}{the matrix we want to operate on}
\item{n}{the exponent}
}
\value{
the matrix after negative one half power
}
\description{
This function defines operation for negative one half matrix
power operator
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/http-browse.r
\name{BROWSE}
\alias{BROWSE}
\title{Open specified url in browser.}
\usage{
BROWSE(url = NULL, config = list(), ..., handle = NULL)
}
\arguments{
\item{url}{the url of the page to retrieve}
\item{config}{All configuration options are ignored because the request
is handled by the browser, not \pkg{RCurl}.}
\item{...}{Further named parameters, such as \code{query}, \code{path}, etc,
passed on to \code{\link[=modify_url]{modify_url()}}. Unnamed parameters will be combined
with \code{\link[=config]{config()}}.}
\item{handle}{The handle to use with this request. If not
supplied, will be retrieved and reused from the \code{\link[=handle_pool]{handle_pool()}}
based on the scheme, hostname and port of the url. By default \pkg{httr}
requests to the same scheme/host/port combo. This substantially reduces
connection time, and ensures that cookies are maintained over multiple
requests to the same host. See \code{\link[=handle_pool]{handle_pool()}} for more
details.}
}
\value{
A \code{\link[=response]{response()}} object.
}
\description{
(This isn't really a http verb, but it seems to follow the same format).
}
\details{
Only works in interactive sessions.
}
\examples{
BROWSE("http://google.com")
BROWSE("http://had.co.nz")
}
\seealso{
Other http methods:
\code{\link{DELETE}()},
\code{\link{GET}()},
\code{\link{HEAD}()},
\code{\link{PATCH}()},
\code{\link{POST}()},
\code{\link{PUT}()},
\code{\link{VERB}()}
}
\concept{http methods}
|
/man/BROWSE.Rd
|
permissive
|
r-lib/httr
|
R
| false
| true
| 1,537
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/http-browse.r
\name{BROWSE}
\alias{BROWSE}
\title{Open specified url in browser.}
\usage{
BROWSE(url = NULL, config = list(), ..., handle = NULL)
}
\arguments{
\item{url}{the url of the page to retrieve}
\item{config}{All configuration options are ignored because the request
is handled by the browser, not \pkg{RCurl}.}
\item{...}{Further named parameters, such as \code{query}, \code{path}, etc,
passed on to \code{\link[=modify_url]{modify_url()}}. Unnamed parameters will be combined
with \code{\link[=config]{config()}}.}
\item{handle}{The handle to use with this request. If not
supplied, will be retrieved and reused from the \code{\link[=handle_pool]{handle_pool()}}
based on the scheme, hostname and port of the url. By default \pkg{httr}
requests to the same scheme/host/port combo. This substantially reduces
connection time, and ensures that cookies are maintained over multiple
requests to the same host. See \code{\link[=handle_pool]{handle_pool()}} for more
details.}
}
\value{
A \code{\link[=response]{response()}} object.
}
\description{
(This isn't really a http verb, but it seems to follow the same format).
}
\details{
Only works in interactive sessions.
}
\examples{
BROWSE("http://google.com")
BROWSE("http://had.co.nz")
}
\seealso{
Other http methods:
\code{\link{DELETE}()},
\code{\link{GET}()},
\code{\link{HEAD}()},
\code{\link{PATCH}()},
\code{\link{POST}()},
\code{\link{PUT}()},
\code{\link{VERB}()}
}
\concept{http methods}
|
library(BiodiversityR)
### Name: accumresult
### Title: Alternative Species Accumulation Curve Results
### Aliases: accumresult accumplot accumcomp
### Keywords: multivariate
### ** Examples
library(vegan)
data(dune.env)
data(dune)
dune.env$site.totals <- apply(dune,1,sum)
Accum.1 <- accumresult(dune, y=dune.env, scale='site.totals', method='exact', conditioned=TRUE)
Accum.1
accumplot(Accum.1)
accumcomp(dune, y=dune.env, factor='Management', method='exact', legend=FALSE, conditioned=TRUE)
## CLICK IN THE GRAPH TO INDICATE WHERE THE LEGEND NEEDS TO BE PLACED FOR
## OPTION WHERE LEGEND=TRUE (DEFAULT).
|
/data/genthat_extracted_code/BiodiversityR/examples/accumresult.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 614
|
r
|
library(BiodiversityR)
### Name: accumresult
### Title: Alternative Species Accumulation Curve Results
### Aliases: accumresult accumplot accumcomp
### Keywords: multivariate
### ** Examples
library(vegan)
data(dune.env)
data(dune)
dune.env$site.totals <- apply(dune,1,sum)
Accum.1 <- accumresult(dune, y=dune.env, scale='site.totals', method='exact', conditioned=TRUE)
Accum.1
accumplot(Accum.1)
accumcomp(dune, y=dune.env, factor='Management', method='exact', legend=FALSE, conditioned=TRUE)
## CLICK IN THE GRAPH TO INDICATE WHERE THE LEGEND NEEDS TO BE PLACED FOR
## OPTION WHERE LEGEND=TRUE (DEFAULT).
|
source('load.R')
library(bayesm)
col.boxscores <- function(x) { colSums( x[ , 5:ncol(x)], na.rm = TRUE ) }
espn.schedule <- dbGetData( "SELECT * FROM `schedule`" )
ratings <- dbGetData("SELECT * FROM `ratings`")
strength <- dbGetData("SELECT * FROM `strength`")
for(i in 3:ncol(strength))
{
strength[ , i ] <- as.numeric( strength[ , i ] )
}
# whether or not to skip
overwrite.output <- FALSE
n.mcmc <- 10000
# columns in the boxscore -- scale these by minute later
boxscores.index <- 6:19
# the only ones worth predicting (the ones that contribute to the actual score)
boxscores.predict <- list('ftm' = 1, 'fgm' = 2, 'threem' = 3)
# espn.seasons <- 2009:2013
espn.seasons <- c(2009)
for( espn.season in espn.seasons )
{
cat( paste("\n\n NEW SEASON: ", espn.season, sep = "") )
season.schedule <- espn.schedule[ espn.schedule$year == espn.season, ]
season.strength <- strength[ strength$season == espn.season, ]
season.teams <- sort( unique( c(season.schedule$homeid, season.schedule$visitid) ) )
for( teamid in season.teams )
{
cat( paste("\n\t", teamid, sep = "") )
team.schedule <- season.schedule[ season.schedule$homeid == teamid | season.schedule$visitid == teamid, ]
home <- team.schedule$homeid == teamid
# extract the opponents from the schedule
opp.ids <- team.schedule$homeid
opp.ids[ home ] <- team.schedule$visitid[ home ]
# get team's own boxscores
team.boxscores <- dbGetData( paste( "SELECT * FROM `boxscores` WHERE `gameid` != 0 AND `teamid` = ", teamid, " AND `playerid` != 0 AND `year` = ", espn.season, sep = "" ) )
# skip if there aren't any boxscores
if( nrow(team.boxscores) == 0 )
next
# scale per minute of play
team.boxscores[ , boxscores.index ] <- team.boxscores[ , boxscores.index ] / team.boxscores$min
# get rid of 0 minute contributors
team.boxscores <- team.boxscores[ team.boxscores$min > 0, ]
# append strength stats to boxscores
matched <- match( team.boxscores$gameid, team.schedule$gameid )
team.boxscores$oppid <- opp.ids[ matched ]
matched <- match( team.boxscores$oppid, season.strength$teamid )
team.boxscores$opp.strength_off <- season.strength$strength_off[ matched ]
team.boxscores$opp.strength_def <- season.strength$strength_def[ matched ]
playerids <- sort(unique( team.boxscores$playerid ))
for( boxscore.predict in names(boxscores.predict) )
{
cat( paste("\n\t\t", boxscore.predict, "\n\n", sep = "") )
boxscore.number <- boxscores.predict[[ boxscore.predict ]]
try
({
# see if there are any files in the output directory. if so, skip it
if( !overwrite.output )
{
already.exists <- dbGetData( paste( "SELECT COUNT(*) AS `count` FROM `mcmc_densities` WHERE `season` = ", espn.season, " AND `teamid` = ", teamid, " AND `boxscore` = ", boxscore.number, sep = "") )
if( already.exists$count[1] > 0 )
{
next
}
}
i <- 1
reg.data <- list()
for( playerid in playerids )
{
player.boxscores <- team.boxscores[ team.boxscores$playerid == playerid, ]
# see if the player actually contributed to that boxscore
if( sum( player.boxscores[[ boxscore.predict ]] ) > 0 )
{
y <- player.boxscores[[ boxscore.predict ]]
X <- as.matrix(cbind( 1, player.boxscores[ , c('opp.strength_def', 'opp.strength_off') ] ))
reg.data[[ i ]] <- list( y = y, X = X, playerid = playerid )
i <- i + 1
}
}
# some stupid tiny edge teams require checking to see if there's any data for the model
if( length( reg.data ) > 0 )
{
# run it! (read up on Z stuff later)
mcmc <- list( R = n.mcmc, keep = 1 )
out <- rhierLinearModel( Data = list(regdata = reg.data), Mcmc = mcmc )
#colnames(out$betadraw) <- c('int', 'opp.strength_def', 'opp.strength_off')
model.params <- c('int', 'def', 'off')
# write the coefficients out
for( i in 1:length(reg.data) )
{
for( j in 1:length(model.params) )
{
model.param <- model.params[j]
approx <- density.approximate( out$betadraw[i,j,] )
insert <- list()
insert[[ 'season' ]] <- espn.season
insert[[ 'teamid' ]] <- teamid
insert[[ 'boxscore' ]] <- boxscore.number
insert[[ 'playerid' ]] <- reg.data[[ i ]]$playerid
insert[[ 'model' ]] <- paste("'", model.param, "'", sep = "")
insert[[ 'density' ]] <- paste("'", approx$dist, "'", sep = "")
insert[[ 'loss' ]] <- approx$loss
insert[[ 'params' ]] <- paste("'", serialize.strip(toJSON(approx$params)), "'", sep = "")
sql <- paste( "REPLACE INTO `mcmc_densities` (`", paste(names(insert), collapse = "`,`"), "`) VALUES (", paste(insert, collapse = ","), ")", sep = "")
print(sql)
dbSendQuery( con, sql )
}
}
}
})
}
}
}
|
/mcmc_approx.R
|
no_license
|
drewlanenga/jackboot-firebase
|
R
| false
| false
| 4,828
|
r
|
source('load.R')
library(bayesm)
col.boxscores <- function(x) { colSums( x[ , 5:ncol(x)], na.rm = TRUE ) }
espn.schedule <- dbGetData( "SELECT * FROM `schedule`" )
ratings <- dbGetData("SELECT * FROM `ratings`")
strength <- dbGetData("SELECT * FROM `strength`")
for(i in 3:ncol(strength))
{
strength[ , i ] <- as.numeric( strength[ , i ] )
}
# whether or not to skip
overwrite.output <- FALSE
n.mcmc <- 10000
# columns in the boxscore -- scale these by minute later
boxscores.index <- 6:19
# the only ones worth predicting (the ones that contribute to the actual score)
boxscores.predict <- list('ftm' = 1, 'fgm' = 2, 'threem' = 3)
# espn.seasons <- 2009:2013
espn.seasons <- c(2009)
for( espn.season in espn.seasons )
{
cat( paste("\n\n NEW SEASON: ", espn.season, sep = "") )
season.schedule <- espn.schedule[ espn.schedule$year == espn.season, ]
season.strength <- strength[ strength$season == espn.season, ]
season.teams <- sort( unique( c(season.schedule$homeid, season.schedule$visitid) ) )
for( teamid in season.teams )
{
cat( paste("\n\t", teamid, sep = "") )
team.schedule <- season.schedule[ season.schedule$homeid == teamid | season.schedule$visitid == teamid, ]
home <- team.schedule$homeid == teamid
# extract the opponents from the schedule
opp.ids <- team.schedule$homeid
opp.ids[ home ] <- team.schedule$visitid[ home ]
# get team's own boxscores
team.boxscores <- dbGetData( paste( "SELECT * FROM `boxscores` WHERE `gameid` != 0 AND `teamid` = ", teamid, " AND `playerid` != 0 AND `year` = ", espn.season, sep = "" ) )
# skip if there aren't any boxscores
if( nrow(team.boxscores) == 0 )
next
# scale per minute of play
team.boxscores[ , boxscores.index ] <- team.boxscores[ , boxscores.index ] / team.boxscores$min
# get rid of 0 minute contributors
team.boxscores <- team.boxscores[ team.boxscores$min > 0, ]
# append strength stats to boxscores
matched <- match( team.boxscores$gameid, team.schedule$gameid )
team.boxscores$oppid <- opp.ids[ matched ]
matched <- match( team.boxscores$oppid, season.strength$teamid )
team.boxscores$opp.strength_off <- season.strength$strength_off[ matched ]
team.boxscores$opp.strength_def <- season.strength$strength_def[ matched ]
playerids <- sort(unique( team.boxscores$playerid ))
for( boxscore.predict in names(boxscores.predict) )
{
cat( paste("\n\t\t", boxscore.predict, "\n\n", sep = "") )
boxscore.number <- boxscores.predict[[ boxscore.predict ]]
try
({
# see if there are any files in the output directory. if so, skip it
if( !overwrite.output )
{
already.exists <- dbGetData( paste( "SELECT COUNT(*) AS `count` FROM `mcmc_densities` WHERE `season` = ", espn.season, " AND `teamid` = ", teamid, " AND `boxscore` = ", boxscore.number, sep = "") )
if( already.exists$count[1] > 0 )
{
next
}
}
i <- 1
reg.data <- list()
for( playerid in playerids )
{
player.boxscores <- team.boxscores[ team.boxscores$playerid == playerid, ]
# see if the player actually contributed to that boxscore
if( sum( player.boxscores[[ boxscore.predict ]] ) > 0 )
{
y <- player.boxscores[[ boxscore.predict ]]
X <- as.matrix(cbind( 1, player.boxscores[ , c('opp.strength_def', 'opp.strength_off') ] ))
reg.data[[ i ]] <- list( y = y, X = X, playerid = playerid )
i <- i + 1
}
}
# some stupid tiny edge teams require checking to see if there's any data for the model
if( length( reg.data ) > 0 )
{
# run it! (read up on Z stuff later)
mcmc <- list( R = n.mcmc, keep = 1 )
out <- rhierLinearModel( Data = list(regdata = reg.data), Mcmc = mcmc )
#colnames(out$betadraw) <- c('int', 'opp.strength_def', 'opp.strength_off')
model.params <- c('int', 'def', 'off')
# write the coefficients out
for( i in 1:length(reg.data) )
{
for( j in 1:length(model.params) )
{
model.param <- model.params[j]
approx <- density.approximate( out$betadraw[i,j,] )
insert <- list()
insert[[ 'season' ]] <- espn.season
insert[[ 'teamid' ]] <- teamid
insert[[ 'boxscore' ]] <- boxscore.number
insert[[ 'playerid' ]] <- reg.data[[ i ]]$playerid
insert[[ 'model' ]] <- paste("'", model.param, "'", sep = "")
insert[[ 'density' ]] <- paste("'", approx$dist, "'", sep = "")
insert[[ 'loss' ]] <- approx$loss
insert[[ 'params' ]] <- paste("'", serialize.strip(toJSON(approx$params)), "'", sep = "")
sql <- paste( "REPLACE INTO `mcmc_densities` (`", paste(names(insert), collapse = "`,`"), "`) VALUES (", paste(insert, collapse = ","), ")", sep = "")
print(sql)
dbSendQuery( con, sql )
}
}
}
})
}
}
}
|
\name{NextTable}
\alias{NextTable}
\title{Calculates the table of Kendall distances in (N+1)! space, given those in N!
space.}
\usage{
NextTable(last.table, N.last)
}
\arguments{
\item{last.table}{Table of distances in N! space.}
\item{N.last}{N}
}
\value{
Table of distances in (N+1)! space.
}
\description{
This is identical to counting the number of fully-ordered
vectors at each bubble sort distance in (N+1)! space.
}
\author{
Erik Gregory
}
\keyword{bubblesort}
\keyword{Kendall}
|
/man/NextTable.Rd
|
no_license
|
cran/RMallow
|
R
| false
| false
| 502
|
rd
|
\name{NextTable}
\alias{NextTable}
\title{Calculates the table of Kendall distances in (N+1)! space, given those in N!
space.}
\usage{
NextTable(last.table, N.last)
}
\arguments{
\item{last.table}{Table of distances in N! space.}
\item{N.last}{N}
}
\value{
Table of distances in (N+1)! space.
}
\description{
This is identical to counting the number of fully-ordered
vectors at each bubble sort distance in (N+1)! space.
}
\author{
Erik Gregory
}
\keyword{bubblesort}
\keyword{Kendall}
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Aggregate the Metrics of a GeoTimeseries.
#'
#' @param x a GeoTimeseries object.
#' @param by (character vector) name(s) of column(s) by which to group.
#' @param FUN (function) function to apply to each metric column.
#' @param metrics (character vector) metrics to aggregate. Default is all
#' metrics.
#' @param ... optional arguments passed to FUN.
#'
#' @return A data.frame object.
#'
#' @note
#' Uses \code{aggregate.data.frame} to do the aggregation. This function
#' omits rows that have missing values in the '\code{by}' columns.
#'
#' @seealso AggregateTimeseries.
aggregate.GeoTimeseries <- function(x, by=kGeo, FUN=base::sum,
metrics=NULL, ...) {
SetMessageContextString("aggregate.GeoTimeseries")
on.exit(SetMessageContextString())
assert_that(is.function(FUN))
all.metrics <- GetInfo(x, "metrics")
if (is.null(metrics)) {
metrics <- all.metrics
}
# Ensure that all 'metrics' are there.
CheckForMissingColumns(metrics, dataframe=x)
# Ensure that all 'by' columns are there.
CheckForMissingColumns(by, dataframe=x)
# 'metrics' and 'by' cannot intersect.
assert_that(length(intersect(metrics, by)) == 0L,
msg=Message("'metrics' and 'by' cannot intersect"))
dfx <- as.data.frame(x)[metrics]
dfb <- as.data.frame(x)[by]
dfa <- aggregate(dfx, by=dfb, FUN=FUN, ...)
return(dfa)
}
|
/R/aggregate.R
|
permissive
|
ovative-group/GeoexperimentsResearch
|
R
| false
| false
| 1,978
|
r
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Aggregate the Metrics of a GeoTimeseries.
#'
#' @param x a GeoTimeseries object.
#' @param by (character vector) name(s) of column(s) by which to group.
#' @param FUN (function) function to apply to each metric column.
#' @param metrics (character vector) metrics to aggregate. Default is all
#' metrics.
#' @param ... optional arguments passed to FUN.
#'
#' @return A data.frame object.
#'
#' @note
#' Uses \code{aggregate.data.frame} to do the aggregation. This function
#' omits rows that have missing values in the '\code{by}' columns.
#'
#' @seealso AggregateTimeseries.
aggregate.GeoTimeseries <- function(x, by=kGeo, FUN=base::sum,
metrics=NULL, ...) {
SetMessageContextString("aggregate.GeoTimeseries")
on.exit(SetMessageContextString())
assert_that(is.function(FUN))
all.metrics <- GetInfo(x, "metrics")
if (is.null(metrics)) {
metrics <- all.metrics
}
# Ensure that all 'metrics' are there.
CheckForMissingColumns(metrics, dataframe=x)
# Ensure that all 'by' columns are there.
CheckForMissingColumns(by, dataframe=x)
# 'metrics' and 'by' cannot intersect.
assert_that(length(intersect(metrics, by)) == 0L,
msg=Message("'metrics' and 'by' cannot intersect"))
dfx <- as.data.frame(x)[metrics]
dfb <- as.data.frame(x)[by]
dfa <- aggregate(dfx, by=dfb, FUN=FUN, ...)
return(dfa)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary_sc.R
\name{summary_sc}
\alias{summary_sc}
\title{Summary of indicator performance scores}
\usage{
summary_sc(scores_tbl, crit_scores = INDperform::crit_scores_tmpl)
}
\arguments{
\item{scores_tbl}{The output tibble from the \code{\link{scoring}}
function.}
\item{crit_scores}{The(un)modified criterion-scoring template
\code{crit_scores_tmpl}; required to calculate the scores in
percentage. Has to be the same than used in \code{scoring}. Default
is the unmodified template \code{crit_scores_tmpl}.}
}
\value{
The function returns a list of 2 data frames
\describe{
\item{\code{overview}}{IND-specific scores and percentages from
max. score for all criteria (crit 9 and 10 averaged across
all sign. pressures and the number of significant pressures).}
\item{\code{subcriteria_per_press}}{IND- and pressure-specific scores for
all (sub-)criteria and the percentages from max. criterion score.}
}
}
\description{
Summarizes the scoring output tibble so that IND-specific scores for each
criterion as well as the pressure-specific sub-criteria scores (in crit.
9 and 10) can be easily compared.
}
\examples{
# Using the Baltic Sea demo data in this package
scores_tbl <- scoring(trend_tbl = model_trend_ex, mod_tbl = all_results_ex,
press_type = press_type_ex)
summary_sc(scores_tbl)
}
\seealso{
Other score-based IND performance functions: \code{\link{clust_sc}},
\code{\link{dist_sc}}, \code{\link{expect_resp}},
\code{\link{plot_clust_sc}},
\code{\link{plot_spiechart}}, \code{\link{scoring}}
}
|
/man/summary_sc.Rd
|
no_license
|
romainfrancois/INDperform
|
R
| false
| true
| 1,620
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary_sc.R
\name{summary_sc}
\alias{summary_sc}
\title{Summary of indicator performance scores}
\usage{
summary_sc(scores_tbl, crit_scores = INDperform::crit_scores_tmpl)
}
\arguments{
\item{scores_tbl}{The output tibble from the \code{\link{scoring}}
function.}
\item{crit_scores}{The(un)modified criterion-scoring template
\code{crit_scores_tmpl}; required to calculate the scores in
percentage. Has to be the same than used in \code{scoring}. Default
is the unmodified template \code{crit_scores_tmpl}.}
}
\value{
The function returns a list of 2 data frames
\describe{
\item{\code{overview}}{IND-specific scores and percentages from
max. score for all criteria (crit 9 and 10 averaged across
all sign. pressures and the number of significant pressures).}
\item{\code{subcriteria_per_press}}{IND- and pressure-specific scores for
all (sub-)criteria and the percentages from max. criterion score.}
}
}
\description{
Summarizes the scoring output tibble so that IND-specific scores for each
criterion as well as the pressure-specific sub-criteria scores (in crit.
9 and 10) can be easily compared.
}
\examples{
# Using the Baltic Sea demo data in this package
scores_tbl <- scoring(trend_tbl = model_trend_ex, mod_tbl = all_results_ex,
press_type = press_type_ex)
summary_sc(scores_tbl)
}
\seealso{
Other score-based IND performance functions: \code{\link{clust_sc}},
\code{\link{dist_sc}}, \code{\link{expect_resp}},
\code{\link{plot_clust_sc}},
\code{\link{plot_spiechart}}, \code{\link{scoring}}
}
|
\name{HyperGParams-class}
\docType{class}
%% I don't know where these really belong :-(
\alias{conditional}
\alias{conditional<-}
\alias{geneIds<-}
\alias{ontology}
\alias{ontology<-}
\alias{pvalueCutoff<-}
\alias{testDirection<-}
\alias{universeGeneIds}
%% end :-(
\alias{HyperGParams-class}
\alias{initialize,HyperGParams-method}
\alias{geneIds,HyperGParams-method}
\alias{geneIds<-,HyperGParams,logical-method}
\alias{geneIds<-,HyperGParams,ANY-method}
\alias{annotation,HyperGParams-method}
\alias{annotation<-,HyperGParams,character-method}
\alias{conditional,HyperGParams-method}
\alias{ontology,HyperGParams-method}
\alias{pvalueCutoff,HyperGParams-method}
\alias{pvalueCutoff<-,HyperGParams-method}
\alias{testDirection,HyperGParams-method}
\alias{testDirection<-,HyperGParams-method}
\alias{universeGeneIds,HyperGParams-method}
\alias{categoryName}
\alias{categoryName,HyperGParams-method}
\alias{organism,HyperGParams-method}
\alias{makeValidParams,HyperGParams-method}
\alias{show,HyperGParams-method}
\title{Class "HyperGParams"}
\description{
An abstract (VIRTUAL) parameter class for representing all parameters
needed by a method specializing the \code{hyperGTest}
generic. You should only use subclasses of this class directly.
}
\section{Objects from the Class}{
Objects of this class cannot be instantiated directly.
}
\section{Slots}{
\describe{
\item{\code{geneIds}:}{Object of class \code{"ANY"}: A vector of
gene identifiers. Numeric and character vectors are probably the
only things that make sense. These are the gene ids for the
selected gene set.}
\item{\code{universeGeneIds}:}{Object of class \code{"ANY"}: A
vector of gene ids in the same format as \code{geneIds} defining a
subset of the gene ids on the chip that will be used as the
universe for the hypergeometric calculation. If this is
\code{NULL} or has length zero, then all gene ids on the chip will
be used.}
\item{\code{annotation}:}{Object of class
\code{"ANY"}. Functionally, this is either a string giving the name of the
annotation data package for the chip used to generate the data, or
the name of an annotation object downloaded using AnnotationHub.}
\item{\code{categorySubsetIds}:}{Object of class \code{"ANY"}:
If the test method supports it, can be used to specify a subset of
category ids to include in the test instead of all possible
category ids.}
\item{\code{categoryName}:}{A string describing the category.
Usually set automatically by subclasses. For example "GO".}
\item{\code{pvalueCutoff}:}{The p-value to use as a cutoff for
significance for testing methods that require it. This value
will also be passed on to the result instance and used for
display and counting of significant results. The default is
0.01.}
\item{\code{testDirection}:}{A string indicating whether the test
should be for overrepresentation (\code{"over"}) or
underrepresentation (\code{"under"}).}
\item{\code{datPkg}:}{Holds a DatPkg object which is of a
particular type that in turn varies with the kind of annotation
package this is.}
}
}
\section{Methods}{
\describe{
\item{hyperGTest}{\code{signature(p =
"HyperGParams")}: Perform hypergeometric tests to
assess overrepresentation of category ids in the gene set. See the
documentation for the generic function for details. This method
must be called with a proper subclass of
\code{HyperGParams}.}
\item{\code{geneIds(object)}, \code{geneIds(object) <- value}}{Accessors for
the gene identifiers that will be used as the selected gene
list.}
\item{code{annotation(object)}}{Accessor for annotation. If you want
to change the annotation for an existing instance, use the
replacement form.}
\item{\code{ontology(object)}}{Accessor for GO ontology.}
\item{\code{organism(object)}}{Accessor for the organism character
string used as an identifier in \code{DatPkg}.}
\item{\code{pvalueCutoff(r)}, \code{pvalueCutoff(r) <-
value}}{Accessor for the p-value cutoff. When setting,
\code{value} should be a numeric value between zero and one.}
\item{\code{testDirection}}{Accessor for the test direction. When setting,
\code{value} must be either "over" or "under".}
\item{\code{universeGeneIds(r)}}{accessor for vector of gene
identifiers.}
}
}
\author{S. Falcon}
\seealso{
\code{\link{HyperGResult-class}}
\code{\link{GOHyperGParams-class}}
\code{\link{KEGGHyperGParams-class}}
\code{\link{hyperGTest}}
}
\keyword{classes}
|
/man/HyperGParams-class.Rd
|
no_license
|
Bioconductor/Category
|
R
| false
| false
| 4,698
|
rd
|
\name{HyperGParams-class}
\docType{class}
%% I don't know where these really belong :-(
\alias{conditional}
\alias{conditional<-}
\alias{geneIds<-}
\alias{ontology}
\alias{ontology<-}
\alias{pvalueCutoff<-}
\alias{testDirection<-}
\alias{universeGeneIds}
%% end :-(
\alias{HyperGParams-class}
\alias{initialize,HyperGParams-method}
\alias{geneIds,HyperGParams-method}
\alias{geneIds<-,HyperGParams,logical-method}
\alias{geneIds<-,HyperGParams,ANY-method}
\alias{annotation,HyperGParams-method}
\alias{annotation<-,HyperGParams,character-method}
\alias{conditional,HyperGParams-method}
\alias{ontology,HyperGParams-method}
\alias{pvalueCutoff,HyperGParams-method}
\alias{pvalueCutoff<-,HyperGParams-method}
\alias{testDirection,HyperGParams-method}
\alias{testDirection<-,HyperGParams-method}
\alias{universeGeneIds,HyperGParams-method}
\alias{categoryName}
\alias{categoryName,HyperGParams-method}
\alias{organism,HyperGParams-method}
\alias{makeValidParams,HyperGParams-method}
\alias{show,HyperGParams-method}
\title{Class "HyperGParams"}
\description{
An abstract (VIRTUAL) parameter class for representing all parameters
needed by a method specializing the \code{hyperGTest}
generic. You should only use subclasses of this class directly.
}
\section{Objects from the Class}{
Objects of this class cannot be instantiated directly.
}
\section{Slots}{
\describe{
\item{\code{geneIds}:}{Object of class \code{"ANY"}: A vector of
gene identifiers. Numeric and character vectors are probably the
only things that make sense. These are the gene ids for the
selected gene set.}
\item{\code{universeGeneIds}:}{Object of class \code{"ANY"}: A
vector of gene ids in the same format as \code{geneIds} defining a
subset of the gene ids on the chip that will be used as the
universe for the hypergeometric calculation. If this is
\code{NULL} or has length zero, then all gene ids on the chip will
be used.}
\item{\code{annotation}:}{Object of class
\code{"ANY"}. Functionally, this is either a string giving the name of the
annotation data package for the chip used to generate the data, or
the name of an annotation object downloaded using AnnotationHub.}
\item{\code{categorySubsetIds}:}{Object of class \code{"ANY"}:
If the test method supports it, can be used to specify a subset of
category ids to include in the test instead of all possible
category ids.}
\item{\code{categoryName}:}{A string describing the category.
Usually set automatically by subclasses. For example "GO".}
\item{\code{pvalueCutoff}:}{The p-value to use as a cutoff for
significance for testing methods that require it. This value
will also be passed on to the result instance and used for
display and counting of significant results. The default is
0.01.}
\item{\code{testDirection}:}{A string indicating whether the test
should be for overrepresentation (\code{"over"}) or
underrepresentation (\code{"under"}).}
\item{\code{datPkg}:}{Holds a DatPkg object which is of a
particular type that in turn varies with the kind of annotation
package this is.}
}
}
\section{Methods}{
\describe{
\item{hyperGTest}{\code{signature(p =
"HyperGParams")}: Perform hypergeometric tests to
assess overrepresentation of category ids in the gene set. See the
documentation for the generic function for details. This method
must be called with a proper subclass of
\code{HyperGParams}.}
\item{\code{geneIds(object)}, \code{geneIds(object) <- value}}{Accessors for
the gene identifiers that will be used as the selected gene
list.}
\item{code{annotation(object)}}{Accessor for annotation. If you want
to change the annotation for an existing instance, use the
replacement form.}
\item{\code{ontology(object)}}{Accessor for GO ontology.}
\item{\code{organism(object)}}{Accessor for the organism character
string used as an identifier in \code{DatPkg}.}
\item{\code{pvalueCutoff(r)}, \code{pvalueCutoff(r) <-
value}}{Accessor for the p-value cutoff. When setting,
\code{value} should be a numeric value between zero and one.}
\item{\code{testDirection}}{Accessor for the test direction. When setting,
\code{value} must be either "over" or "under".}
\item{\code{universeGeneIds(r)}}{accessor for vector of gene
identifiers.}
}
}
\author{S. Falcon}
\seealso{
\code{\link{HyperGResult-class}}
\code{\link{GOHyperGParams-class}}
\code{\link{KEGGHyperGParams-class}}
\code{\link{hyperGTest}}
}
\keyword{classes}
|
#' Microaggregation
#'
#' Function to perform various methods of microaggregation.
#'
#' On \url{http://neon.vb.cbs.nl/casc/Glossary.htm} one can found the
#' \dQuote{official} definition of microaggregation:
#'
#' Records are grouped based on a proximity measure of variables of interest,
#' and the same small groups of records are used in calculating aggregates for
#' those variables. The aggregates are released instead of the individual
#' record values.
#'
#' The recommended method is \dQuote{rmd} which forms the proximity using
#' multivariate distances based on robust methods. It is an extension of the
#' well-known method \dQuote{mdav}. However, when computational speed is
#' important, method \dQuote{mdav} is the preferable choice.
#'
#' While for the proximity measure very different concepts can be used, the
#' aggregation itself is naturally done with the arithmetic mean.
#' Nevertheless, other measures of location can be used for aggregation,
#' especially when the group size for aggregation has been taken higher than 3.
#' Since the median seems to be unsuitable for microaggregation because of
#' being highly robust, other mesures which are included can be chosen. If a
#' complex sample survey is microaggregated, the corresponding sampling weights
#' should be determined to either aggregate the values by the weighted
#' arithmetic mean or the weighted median.
#'
#' This function contains also a method with which the data can be clustered
#' with a variety of different clustering algorithms. Clustering observations
#' before applying microaggregation might be useful. Note, that the data are
#' automatically standardised before clustering.
#'
#' The usage of clustering method \sQuote{Mclust} requires package mclust02,
#' which must be loaded first. The package is not loaded automatically, since
#' the package is not under GPL but comes with a different licence.
#'
#' The are also some projection methods for microaggregation included. The
#' robust version \sQuote{pppca} or \sQuote{clustpppca} (clustering at first)
#' are fast implementations and provide almost everytime the best results.
#'
#' Univariate statistics are preserved best with the individual ranking method
#' (we called them \sQuote{onedims}, however, often this method is named
#' \sQuote{individual ranking}), but multivariate statistics are strong
#' affected.
#'
#' With method \sQuote{simple} one can apply microaggregation directly on the
#' (unsorted) data. It is useful for the comparison with other methods as a
#' benchmark, i.e. replies the question how much better is a sorting of the
#' data before aggregation.
#'
#' @name microaggregation
#' @docType methods
#' @param obj either an object of class \code{\link{sdcMicroObj-class}} or a \code{data.frame}
#' @param variables variables to microaggregate. For \code{NULL}: If obj is of class
#' sdcMicroObj, all numerical key variables are chosen per default. For
#' \code{data.frames}, all columns are chosen per default.
#' @param aggr aggregation level (default=3)
#' @param strata_variables for \code{data.frames}, by-variables for applying microaggregation only
#' within strata defined by the variables. For \code{\link{sdcMicroObj-class}}-objects, the
#' stratification-variable defined in slot \code{@strataVar} is used. This slot can be changed any
#' time using \code{strataVar<-}.
#' @param method pca, rmd, onedims, single, simple, clustpca, pppca,
#' clustpppca, mdav, clustmcdpca, influence, mcdpca
#' @param nc number of cluster, if the chosen method performs cluster analysis
#' @param weights sampling weights. If obj is of class sdcMicroObj the vector
#' of sampling weights is chosen automatically. If determined, a weighted
#' version of the aggregation measure is chosen automatically, e.g. weighted
#' median or weighted mean.
#' @param clustermethod clustermethod, if necessary
#' @param measure aggregation statistic, mean, median, trim, onestep (default=mean)
#' @param trim trimming percentage, if measure=trim
#' @param varsort variable for sorting, if method=single
#' @param transf transformation for data x
#' @return If \sQuote{obj} was of class \code{\link{sdcMicroObj-class}} the corresponding
#' slots are filled, like manipNumVars, risk and utility. If \sQuote{obj} was
#' of class \dQuote{data.frame}, an object of class \dQuote{micro} with following entities is returned:
#' \itemize{
#' \item{\code{x}: }{original data}
#' \item{\code{mx}: }{the microaggregated dataset}
#' \item{\code{method}: }{method}
#' \item{\code{aggr}: }{aggregation level}
#' \item{\code{measure}: }{proximity measure for aggregation}}
#' @note if only one variable is specified, \code{\link{mafast}} is applied and argument \code{method} is ignored.
#' Parameters \code{measure} are ignored for methods \code{mdav} and \code{rmd}.
#' @author Matthias Templ, Bernhard Meindl
#'
#' For method \dQuote{mdav}: This work is being supported by the International
#' Household Survey Network (IHSN) and funded by a DGF Grant provided by the
#' World Bank to the PARIS21 Secretariat at the Organisation for Economic
#' Co-operation and Development (OECD). This work builds on previous work
#' which is elsewhere acknowledged.
#'
#' Author for the integration of the code for mdav in R: Alexander Kowarik.
#' @seealso \code{\link{summary.micro}}, \code{\link{plotMicro}},
#' \code{\link{valTable}}
#' @references
#' Templ, M. and Meindl, B., \emph{Robust Statistics Meets SDC: New Disclosure
#' Risk Measures for Continuous Microdata Masking}, Lecture Notes in Computer
#' Science, Privacy in Statistical Databases, vol. 5262, pp. 113-126, 2008.
#'
#' Templ, M. \emph{Statistical Disclosure Control for Microdata Using the
#' R-Package sdcMicro}, Transactions on Data Privacy, vol. 1, number 2, pp.
#' 67-85, 2008. \url{http://www.tdp.cat/issues/abs.a004a08.php}
#'
#' Templ, M. \emph{New Developments in Statistical Disclosure Control and
#' Imputation: Robust Statistics Applied to Official Statistics},
#' Suedwestdeutscher Verlag fuer Hochschulschriften, 2009, ISBN: 3838108280,
#' 264 pages.
#'
#' Templ, M. Statistical Disclosure Control for Microdata: Methods and Applications in R.
#' \emph{Springer International Publishing}, 287 pages, 2017. ISBN 978-3-319-50272-4. \doi{10.1007/978-3-319-50272-4}
#' \doi{10.1007/978-3-319-50272-4}
#'
#' Templ, M. and Meindl, B. and Kowarik, A.: \emph{Statistical Disclosure Control for
#' Micro-Data Using the R Package sdcMicro}, Journal of Statistical Software,
#' 67 (4), 1--36, 2015.
#' @keywords manip
#' @rdname microaggregation
#' @export
#' @examples
#' data(Tarragona)
#' m1 <- microaggregation(Tarragona, method='onedims', aggr=3)
#' ## summary(m1)
#' data(testdata)
#' m2 <- microaggregation(testdata[1:100,c('expend','income','savings')],
#' method='mdav', aggr=4)
#' summary(m2)
#'
#' ## for objects of class sdcMicro:
#' ## no stratification because @strataVar is NULL
#' data(testdata2)
#' sdc <- createSdcObj(testdata2,
#' keyVars=c('urbrur','roof','walls','water','electcon','sex'),
#' numVars=c('expend','income','savings'), w='sampling_weight')
#' sdc <- microaggregation(sdc, variables=c("expend","income"))
#'
#' ## with stratification by 'relat'
#' strataVar(sdc) <- "relat"
#' sdc <- microaggregation(sdc, variables=c("savings"))
microaggregation <- function(obj, variables=NULL, aggr=3, strata_variables=NULL,
method="mdav", weights=NULL, nc=8, clustermethod="clara",
measure="mean", trim=0, varsort=1, transf="log") {
if (!is.data.frame(obj) & !is.null(strata_variables)) {
message("Argument 'strata_variables' is ignored. Only variables specified in slot 'strataVar' (if any) of the input object are used!\n")
}
microaggregationX(obj=obj, variables=variables, aggr=aggr, strata_variables=strata_variables,
method=method, weights=weights, nc=nc, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=varsort, transf=transf)
}
setGeneric("microaggregationX", function(obj, variables=NULL, aggr=3, strata_variables=NULL,
method="mdav", weights=NULL, nc=8, clustermethod="clara",
measure="mean", trim=0, varsort=1, transf="log") {
standardGeneric("microaggregationX")
})
setMethod(f="microaggregationX", signature=c("sdcMicroObj"), definition=function(obj,
variables=NULL, aggr=3, method="mdav", nc=8, clustermethod="clara",
measure="mean", trim=0, varsort=1, transf="log") {
x <- get.sdcMicroObj(obj, type="manipNumVars")
if (is.null(variables)) {
variables <- colnames(x)
}
strataVars <- get.sdcMicroObj(obj, type="strataVar")
if (length(strataVars) > 0) {
sx <- get.sdcMicroObj(obj, type="origData")[, strataVars, drop=FALSE]
x <- cbind(x, sx)
strataVars <- utils::tail(colnames(x), 1)
}
weights <- get.sdcMicroObj(obj, type="weightVar")
if (!is.null(weights)) {
weights <- get.sdcMicroObj(obj, type="origData")[, weights]
}
if (any(weights < 0)) {
warnMsg <- "negative weights have been detected!\n"
obj <- addWarning(obj, warnMsg=warnMsg, method="microaggregation", variable=NA)
}
res <- microaggregationWORK(x, variables=variables, aggr=aggr, strata_variables=strataVars,
method=method, weights=weights, nc=nc, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=varsort, transf=transf)
obj <- nextSdcObj(obj)
x[, variables] <- res$mx[, variables]
obj <- set.sdcMicroObj(obj, type="manipNumVars", input=list(as.data.frame(x[,
colnames(obj@origData)[obj@numVars], drop=FALSE])))
obj <- dRisk(obj)
obj <- dUtility(obj)
obj
})
setMethod(f="microaggregationX", signature=c("data.frame"), definition=function(obj,
variables=NULL, aggr=3, strata_variables=NULL, method="mdav", weights=NULL,
nc=8, clustermethod="clara", measure="mean", trim=0, varsort=1,
transf="log") {
if (is.null(variables)) {
variables <- colnames(obj)
}
microaggregationWORK(x=obj, variables=variables, aggr=aggr, strata_variables=strata_variables,
method=method, weights=weights, nc=nc, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=varsort, transf=transf)
})
microaggregationWORK <- function(x, variables=colnames(x), method="mdav", aggr=3,
weights=NULL, nc=8, clustermethod="clara", measure="mean",
trim=0, varsort=1, transf="log", strata_variables=NULL) {
## helper-functions used in several micro_xxx() functions
factorOfTotals <- function(x, aggr) {
n <- dim(x)[1]
abgerundet <- floor(n/aggr)
fot <- n/abgerundet
return(fot)
}
weightedQuantile <- function(x, weights=NULL, probs=seq(0, 1, 0.25), sorted=FALSE, na.rm=FALSE) {
if (!is.numeric(x)) {
stop("'x' must be a numeric vector")
}
n <- length(x)
if (n == 0 || (!isTRUE(na.rm) && any(is.na(x)))) {
return(rep.int(NA, length(probs)))
}
if (!is.null(weights)) {
if (!is.numeric(weights)) {
stop("'weights' must be a numeric vector")
} else if (length(weights) != n) {
stop("'weights' must have the same length as 'x'")
} else if (!all(is.finite(weights))) {
stop("missing or infinite weights")
}
if (any(weights < 0)) {
warnMsg <- "negative weights have been detected!\n"
warning(warnMsg)
}
if (!is.numeric(probs) || all(is.na(probs)) || isTRUE(any(probs < 0 | probs > 1))) {
stop("'probs' must be a numeric vector with values in [0,1]")
}
if (all(weights == 0)) {
warnMsg <- "all weights equal 0!\n"
obj <- addWarning(obj, warnMsg=warnMsg, method="microaggregation", variable=NA)
warning(warnMsg)
return(rep.int(0, length(probs)))
}
}
if (isTRUE(na.rm)) {
indices <- !is.na(x)
x <- x[indices]
if (!is.null(weights))
weights <- weights[indices]
}
if (!isTRUE(sorted)) {
order <- order(x)
x <- x[order]
weights <- weights[order]
}
if (is.null(weights))
rw <- (1:n)/n else rw <- cumsum(weights)/sum(weights)
q <- sapply(probs, function(p) {
if (p == 0)
return(x[1]) else if (p == 1)
return(x[n])
select <- min(which(rw >= p))
if (rw[select] == p)
mean(x[select:(select + 1)]) else x[select]
})
return(unname(q))
}
weightedMedian <- function(x, weights=NULL, sorted=FALSE, na.rm=FALSE) {
weightedQuantile(x, weights, probs=0.5, sorted=sorted, na.rm=na.rm)
}
indexMicro <- function(x, aggr) {
n <- dim(x)[1]
if (n < 2 * aggr) {
stop(paste0("Too less observations (", n, ") for aggregate =", aggr,"\n"))
}
aa <- seq(1, n, aggr)
j <- 1
teiler <- n/aggr
d1 <- 1:n
index <- list()
if (teiler %in% 1:n) {
for (i in 1:length(aa)) {
index[[i]] <- d1[j:(j + aggr - 1)]
j <- j + aggr
}
} else {
for (i in 1:(length(aa) - 2)) {
index[[i]] <- d1[j:(j + aggr - 1)]
j <- j + aggr
}
index[[i + 1]] <- d1[(j):n]
}
index
}
means <- function(x, index, measure, trim=0) {
m <- matrix(ncol=ncol(x), nrow=length(index))
if (measure == "mean" & is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- colMeans(x[index[[i]], ])
}
}
if (measure == "median" & is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- apply(x[index[[i]], ], 2, median)
}
}
if (measure == "mean" & !is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- apply(x[index[[i]], ], 2, function(x) stats::weighted.mean(x,
w=weights[index[[i]]]))
}
}
if (measure == "median" & !is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- apply(x[index[[i]], ], 2, function(x) weightedMedian(x,
weights=weights[index[[i]]]))
}
}
if (measure == "trim") {
for (i in 1:length(index)) {
for (j in 1:length(index[[i]])) {
m[i, ] <- apply(x[index[[i]], ], 2, mean, trim=trim)
}
}
}
if (measure == "onestep") {
y <- x
constant <- 3/1.486
for (i in 1:length(index)) {
m1 <- apply(x[index[[i]], ], 2, median)
m2 <- apply(x[index[[i]], ], 2, mad)
limit1 <- m1 + constant * m2
limit2 <- m1 - constant * m2
for (ii in 1:length(index[[i]])) {
if (any(x[index[[i]][ii], ] > limit1)) {
w <- which(x[index[[i]][ii], ] > limit1)
le <- length(w)
y[index[[i]][ii], w] <- limit1[w]
}
if (any(x[index[[i]][ii], ] < limit2)) {
w <- which(x[index[[i]][ii], ] < limit2)
le <- length(w)
y[index[[i]][ii], w] <- limit2[w]
}
m[i, ] <- colMeans(y[index[[i]], ])
}
}
}
colnames(m) <- colnames(x)
return(m)
}
blowup <- function(x, mr, aggr) {
n <- dim(x)[1]
aa <- seq(1, n, aggr)
j <- 1
teiler <- n/aggr
d1 <- 1:n
xx <- matrix(0, ncol=ncol(x), nrow=nrow(x))
if (teiler %in% 1:n) {
for (i in 1:length(aa)) {
for (s in j:(j + aggr - 1)) {
xx[s, ] <- as.matrix(mr[i, , drop=FALSE])
}
j <- j + aggr
}
} else {
for (i in 1:(length(aa) - 2)) {
for (s in j:(j + aggr - 1)) {
xx[s, ] <- as.matrix(mr[i, , drop=FALSE])
}
j <- j + aggr
}
for (s in j:n) {
xx[s, ] <- mr[i + 1, ]
}
}
rownames(xx) <- rownames(x)
xx
}
clust <- function(x, nc, clustermethod="clara", transf="log") {
if (transf == "none") {
y <- x
}
if (transf == "log") {
y <- scale(log(x))
}
if (transf == "boxcox") {
lambda <- car::powerTransform(x)$lambda
y <- scale(car::bcPower(x, lambda))
}
if (clustermethod == "clara") {
a <- clara(x, nc)
clustresult <- a$clust
centers <- a$med
size <- a$clusinfo[, 1]
}
if (clustermethod == "pam") {
a <- pam(x, nc)
clustresult <- a$clust
centers <- a$med
size <- a$clusinfo[, 1]
}
if (clustermethod == "kmeans") {
a <- stats::kmeans(x, nc)
centers <- a$centers
clustresult <- a$cluster
size <- a$size
}
if (clustermethod == "cmeans") {
a <- e1071::cmeans(x, nc)
centers <- a$centers
clustresult <- a$cluster
size <- a$size
res@mem <- a$mem
}
if (clustermethod == "bclust") {
a <- e1071::bclust(x, nc)
centers <- a$centers
groesse <- rep(0, nc)
for (i in seq(nc)) {
groesse[i] <- length(which(a$cluster == i))
}
size <- groesse
clustresult <- a$cluster
}
list(centers=centers, clustresult=clustresult, nc=nc)
}
prcompRob <- function(X, k=0, sca="mad", scores=TRUE) {
n <- nrow(X)
p <- ncol(X)
if (k == 0) {
p1 <- min(n, p)
} else {
p1 <- k
}
S <- rep(1, p1)
V <- matrix(1:(p * p1), ncol=p1, nrow=p)
P <- diag(p)
m <- apply(X, 2, median)
Xcentr <- scale(X, center=m, scale=FALSE)
for (k in 1:p1) {
B <- Xcentr %*% P
Bnorm <- sqrt(apply(B^2, 1, sum))
A <- diag(1/Bnorm) %*% B
Y <- A %*% P %*% t(X)
if (sca == "mad")
s <- apply(Y, 1, mad)
# if (sca == 'tau') s <- apply(Y, 1, scale.tau) if (sca == 'A') s <- apply(Y, 1,
# scale.a)
j <- order(s)[n]
S[k] <- s[j]
V[, k] <- A[j, ]
if (V[1, k] < 0)
V[, k] <- (-1) * V[, k]
P <- P - (V[, k] %*% t(V[, k]))
}
if (scores) {
list(scale=S, loadings=V, scores=Xcentr %*% V)
} else list(scale=S, loadings=V)
}
# implementations of microaggregation methods
micro_simple <- function(x, aggr, measure, trim) {
index <- indexMicro(x, aggr)
m <- means(x=x, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
return(list(x=x, method="simple", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_single <- function(x, aggr, measure, trim, varsort) {
sortvec <- sort(x[, varsort], index.return=TRUE)$ix
xx <- x[sortvec, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="single", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=varsort, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_onedims <- function(x, aggr, measure, trim) {
i <- dim(x)[2]
xx <- sapply(1:i, function(i) {
x[order(x[, i]), i]
})
xxx <- sapply(1:i, function(i) {
rank(x[, i], ties.method="first")
})
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
b <- blowup(x, m, aggr)
y <- x
for (i in 1:dim(x)[2]) {
y[, i] <- b[xxx[, i], i]
}
return(list(x=x, method="onedims", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=y, fot=0))
}
micro_pca <- function(x, aggr, measure, trim) {
p <- stats::princomp(scale(x))
s1 <- sort(p$scores[, 1], index.return=TRUE)$ix
xx <- x[s1, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="pca", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_mcdpca <- function(x, aggr, measure, trim) {
x.mcd <- cov.mcd(x, cor=TRUE)
x.scale <- scale(x, x.mcd$center, sqrt(diag(x.mcd$cor)))
p <- stats::princomp(x.scale, covmat=x.mcd)
s1 <- sort(p$scores[, 1], index.return=TRUE)$ix
xx <- x[s1, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="mcdpca", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_pppca <- function(x, aggr, measure, trim) {
p <- prcompRob(x)
s1 <- sort(p$scores[, 1], index.return=TRUE)$ix
xx <- x[s1, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="pppca", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_influence <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
j <- matrix(ncol=1, nrow=nc)
vmax <- matrix(ncol=1, nrow=nc)
for (i in 1:nc) {
j[i, ] <- max(cent[, i])
vmax[i, ] <- which(cent[, i] == j[i, ])
}
ncols <- c(1:ncol(x))
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
s <- x[w, , drop=FALSE]
xx[[i]] <- s[order(s[, vmax[i]]), ]
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(yy)
return(list(x=x, method="influence", clustering=TRUE, aggr=aggr,
nc=ac.scale$nc, xm=m, roundxm=mr, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=NULL, transf=transf, blowup=TRUE,
blowxm=blowxm, fot=0))
}
micro_clustpca <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
if (length(w) < dim(x)[2]) {
y <- x[w, , drop=FALSE]
xx[[i]] <- y[order(y[, varsort]), ]
} else {
p <- stats::princomp(scale(x[w, , drop=FALSE]))$scores[, 1]
psortind <- sort(p, index.return=TRUE)$ix
y <- x[w, , drop=FALSE]
xx[[i]] <- y[psortind, ]
}
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="clustpca", clustering=TRUE, aggr=aggr, nc=ac.scale$nc,
xm=m, roundxm=mr, clustermethod=clustermethod, measure=measure,
trim=trim, varsort=NULL, transf=transf, blowup=TRUE, blowxm=blowxm,
fot=0))
}
micro_clustmcdpca <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
if (length(w) < dim(x)[2]) {
y <- x[w, , drop=FALSE]
xx[[i]] <- y[order(y[, varsort]), ]
} else {
message("length(w):", length(w), "\n")
x.mcd <- cov.mcd(x[w, ], cor=TRUE)
x.scale <- scale(x[w, ], x.mcd$center, sqrt(diag(x.mcd$cor)))
p <- stats::princomp(x.scale, covmat=x.mcd)$scores[, 1]
psortind <- sort(p, index.return=TRUE)$ix
y <- x[w, , drop=FALSE]
xx[[i]] <- y[psortind, ]
}
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="clustmcdpca", clustering=TRUE, aggr=aggr,
nc=ac.scale$nc, xm=m, roundxm=mr, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=NULL, transf=transf, blowup=TRUE,
blowxm=blowxm, fot=0))
}
micro_clustpppca <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
if (length(w) < dim(x)[2]) {
y <- x[w, , drop=FALSE]
xx[[i]] <- y[order(y[, varsort]), ]
} else {
p <- prcompRob(x[w, , drop=FALSE], 1)$scores
psortind <- sort(p, index.return=TRUE)$ix
y <- x[w, , drop=FALSE]
xx[[i]] <- y[psortind, ]
}
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="clustpppca", clustering=TRUE, aggr=aggr,
nc=ac.scale$nc, xm=m, roundxm=mr, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=NULL, transf=transf, blowup=TRUE,
blowxm=blowxm, fot=0))
}
micro_rmd <- function(x, aggr) {
kn <- function(ds, aggr) {
w <- rep(0, aggr)
for (i in 1:aggr) {
w[i] <- which.min(ds)
ds[w[i]] <- NA
}
return(w)
}
y <- x
cm <- colMeans(x, na.rm=TRUE)
csd <- apply(x, 2, sd, na.rm=TRUE)
len <- nrow(y)
y <- apply(y, 2, function(x) (x - mean(x, na.rm=TRUE))/sd(x, na.rm=TRUE))
d <- as.matrix(stats::dist(y))
set.seed(123)
rr <- covMcd(y)
md <- stats::mahalanobis(y, center=rr$center, cov=rr$cov)
diag(d) <- 0
for (i in 1:(floor(dim(x)[1]/aggr) - 1)) {
s <- which.max(md)
w <- kn(d[, s], aggr)
d[w, ] <- NA
md[w] <- NA
y[w, ] <- rep(colMeans(y[w, ]), each=aggr)
}
w <- which(!is.na(d[, 1]))
y[w, ] <- rep(colMeans(y[w, ]), each=length(w))
for (i in 1:dim(x)[2]) {
y[, i] <- as.numeric((y[, i] * csd[i]) + cm[i])
}
return(list(x=x, method="rmd", clustering=FALSE, aggr=aggr, nc=NULL,
xm=y, roundxm=round(y), clustermethod=NULL, measure=NULL, trim=NULL,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=y, fot=0))
}
micro_mdav <- function(x, aggr) {
resX <- mdav(x, variables=NULL, weights=NULL, K=aggr, missing=-999)
return(list(x=x, method="mdav", clustering=FALSE, aggr=aggr, nc=NULL,
xm=NULL, roundxm=NULL, clustermethod=NULL, measure="mean", trim=NULL,
varsort=NULL, transf=NULL, blowup=FALSE, blowxm=resX, fot=0))
}
stopifnot(method %in% c("simple", "single", "onedims", "pca", "mcdpca", "pppca",
"clustmcdpca", "clustpppca", "clustpca", "rmd", "mdav", "influence"))
rownames(x) <- 1:nrow(x)
if (length(variables) == 1) {
res <- list()
res$mx <- mafast(x, variables=variables, by=strata_variables, aggr=aggr,
measure=eval(parse(text=measure)))
res$x <- x
res$method <- "mafast"
res$aggr <- aggr
res$measure <- measure
res$fot <- factorOfTotals(x, aggr)
class(res) <- "micro"
return(res)
}
xall <- x
if (!is.null(strata_variables)) {
if (!all(strata_variables %in% colnames(x))) {
stop("strata_variables are not found in the data set!")
}
byvar <- rep("", nrow(x))
for (i in 1:length(strata_variables)) {
byvar <- paste(byvar, x[, strata_variables[i]], sep="-")
}
xsp <- split(x, as.factor(byvar))
} else {
xsp <- list(dataset=x)
}
reslist <- list()
for (spind in 1:length(xsp)) {
x <- xsp[[spind]][, variables, drop=FALSE]
if (method == "simple") {
res <- micro_simple(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "single") {
res <- micro_single(x=x, aggr=aggr, measure=measure, trim=trim, varsort=varsort)
}
if (method == "onedims") {
res <- micro_onedims(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "pca") {
res <- micro_pca(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "mcdpca") {
res <- micro_mcdpca(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "pppca") {
res <- micro_pppca(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "influence") {
res <- micro_influence(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "clustpca") {
res <- micro_clustpca(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "clustmcdpca") {
res <- micro_clustmcdpca(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "clustpppca") {
res <- micro_clustpppca(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "rmd") {
res <- micro_rmd(x=x, aggr=aggr)
}
if (method == "mdav") {
res <- micro_mdav(x, aggr)
}
res$fot <- factorOfTotals(x, aggr)
reslist[[spind]] <- res
}
res <- reslist[[1]]
if (length(reslist) > 1) {
blowxm <- vector()
fot <- vector()
for (i in 1:length(reslist)) {
blowxm <- rbind(blowxm, reslist[[i]]$blowxm)
fot <- c(fot, reslist[[i]]$fot)
}
res$x <- xall
res$blowxm <- blowxm
names(fot) <- substring(names(xsp), 2)
res$fot <- fot
}
res$x <- res$x[order(as.numeric(rownames(res$x))), ]
res$blowxm <- res$blowxm[order(as.numeric(rownames(res$blowxm))), ]
res$blowxm <- res$blowxm[1:nrow(xall), ]
class(res) <- "micro"
res$mx <- as.data.frame(res$blowxm)
colnames(res$mx) <- variables
resv <- c("x", "mx", "method", "aggr", "measure")
res1 <- list()
for (v in resv) {
res1[[v]] <- res[[v]]
}
class(res1) <- "micro"
return(res1)
}
#' Print method for objects from class micro
#'
#' printing an object of class \code{micro}
#'
#' @param x object from class micro
#' @param \dots Additional arguments passed through.
#' @return information about method and aggregation level from objects of class
#' micro.
#' @author Matthias Templ
#' @seealso \code{\link{microaggregation}}
#' @keywords print
#' @method print micro
#' @export
#' @examples
#'
#' data(free1)
#' free1 <- as.data.frame(free1)
#' m1 <- microaggregation(free1[, 31:34], method='onedims', aggr=3)
#' m1
#'
print.micro <- function(x, ...) {
message(paste("\n Object created with method", x$method, "and aggregation level",
x$aggr))
message("\n -------------------------\n")
message("x ... original values \n")
print(summary(x$x))
message("\n -------------------------\n")
message("mx ... microaggregated values\n")
print(summary(x$mx))
message("\n -------------------------\n")
message("Try names(your object from class micro) for more details")
message("\n")
}
#' Summary method for objects from class micro
#'
#' Summary method for objects from class \sQuote{micro}.
#'
#' This function computes several measures of information loss, such as
#'
#' @param object objects from class micro
#' @param \dots Additional arguments passed through.
#' @return
#' \item{meanx }{A conventional summary of the original data}
#' \item{meanxm }{A conventional summary of the microaggregated data}
#' \item{amean }{average relative absolute deviation of means}
#' \item{amedian}{average relative absolute deviation of medians}
#' \item{aonestep }{average relative absolute deviation of onestep from median}
#' \item{devvar }{average relative absolute deviation of variances}
#' \item{amad }{average relative absolute deviation of the mad}
#' \item{acov }{average relative absolute deviation of covariances}
#' \item{arcov }{average relative absolute deviation of robust (with mcd) covariances}
#' \item{acor }{average relative absolute deviation of correlations}
#' \item{arcor }{average relative absolute deviation of robust (with mcd) correlations}
#' \item{acors }{average relative absolute deviation of rank-correlations}
#' \item{adlm }{average absolute deviation of lm regression coefficients (without intercept)}
#' \item{adlts}{average absolute deviation of lts regression coefficients (without intercept)}
#' \item{apcaload }{average absolute deviation of pca loadings}
#' \item{apppacaload }{average absolute deviation of robust (with projection pursuit approach) pca loadings}
#' \item{atotals }{average relative absolute deviation of totals}
#' \item{pmtotals }{average relative deviation of totals}
#' @author Matthias Templ
#' @seealso \code{\link{microaggregation}}, \code{\link{valTable}}
#' @references Templ, M. \emph{Statistical Disclosure Control for Microdata
#' Using the R-Package sdcMicro}, Transactions on Data Privacy, vol. 1, number
#' 2, pp. 67-85, 2008. \url{http://www.tdp.cat/issues/abs.a004a08.php}
#' @keywords print
#' @method summary micro
#' @export
#' @examples
#'
#' data(Tarragona)
#' m1 <- microaggregation(Tarragona, method='onedims', aggr=3)
#' ## summary(m1)
summary.micro <- function(object, ...) {
prcompRob <- function(X, k=0, sca="mad", scores=TRUE) {
## Copyright: Croux and Filzmoser
n <- nrow(X)
p <- ncol(X)
if (k == 0) {
p1 <- min(n, p)
} else {
p1 <- k
}
S <- rep(1, p1)
V <- matrix(1:(p * p1), ncol=p1, nrow=p)
P <- diag(p)
m <- apply(X, 2, median)
Xcentr <- scale(X, center=m, scale=FALSE)
for (k in 1:p1) {
B <- Xcentr %*% P
Bnorm <- sqrt(apply(B^2, 1, sum))
A <- diag(1/Bnorm) %*% B
Y <- A %*% P %*% t(X)
if (sca == "mad")
s <- apply(Y, 1, mad)
# if (sca == 'tau') s <- apply(Y, 1, scale.tau) if (sca == 'A') s <- apply(Y, 1,
# scale.a)
j <- order(s)[n]
S[k] <- s[j]
V[, k] <- A[j, ]
if (V[1, k] < 0)
V[, k] <- (-1) * V[, k]
P <- P - (V[, k] %*% t(V[, k]))
}
if (scores) {
list(scale=S, loadings=V, scores=Xcentr %*% V)
} else list(scale=S, loadings=V)
}
x1 <- as.data.frame(object$x)
x2 <- as.data.frame(object$mx)
colnames(x2) <- colnames(x1)
amx <- mapply(mean, x1)
amxn <- mapply(mean, x2)
amean <- sum(abs(amx - amxn)/(abs(amx)))
meds1 <- mapply(median, x1)
meds2 <- mapply(median, x2)
amedian <- sum(abs(meds1 - meds2) / abs(meds1), na.rm = TRUE)
onestep <- function(x) {
y <- x
constant <- 3/1.486
m1 <- mapply(median, x)
m2 <- mapply(mad, x)
limit1 <- m1 + constant * m2
limit2 <- m1 - constant * m2
for (i in 1:dim(x)[2]) {
if (any(x[, i] > limit1[i])) {
w <- which(x[, i] > limit1[i])
le <- length(w)
y[w, i] <- limit1[i]
}
if (any(x[, i] < limit2[i])) {
w <- which(x[, i] < limit2[i])
le <- length(w)
y[w, i] <- limit2[i]
}
}
y
}
aox <- onestep(x1)
aox <- mapply(mean, aox)
aoxm <- onestep(x2)
aoxm <- mapply(mean, aoxm)
aonestep <- sum(abs(aox - aoxm) / abs(aox), na.rm = TRUE)
devvar <- sum(abs(var(x1) - var(x2))/abs(var(x1)))/length(x1)
amx <- mapply(mad, x1)
amxn <- mapply(mad, x2)
amad <- sum(abs(amx - amxn) / (abs(amx)), na.rm = TRUE)
acov <- sum(abs(cov(x1) - cov(x2))/abs(cov(x1)))/(2 * length(x1))
arcov <- NA
acor <- sum(abs(cor(x1) - cor(x2))/abs(cor(x1)))/(2 * length(x2))
arcor <- NA
acors <- sum(abs(cor(x1, method = "spearman") - cor(x2, method = "spearman")) /
abs(cor(x1, method = "spearman"))) / (2 * length(x1))
l1 <- lm(as.matrix(x1[, 1]) ~ as.matrix(x1[, -1]))$coeff
l2 <- lm(as.matrix(x2[, 1]) ~ as.matrix(x2[, -1]))$coeff
adlm <- sum(abs(l1[2:length(l1)] - l2[2:length(l2)]), na.rm = TRUE)
adlts <- NA
if (dim(x1)[1] > dim(x1)[2] && dim(x2)[1] > dim(x2)[2]) {
p1 <- stats::princomp(x1)
p2 <- stats::princomp(x2)
cp1 <- colMeans(p1$load)
cp2 <- colMeans(p2$load)
apcaload <- sum(abs(cp1 - cp2)/abs(cp1))
} else {
apcaload <- "too less observations"
}
if (dim(x1)[1] > dim(x1)[2] && dim(x2)[1] > dim(x2)[2]) {
p1 <- prcompRob(x1)
p2 <- prcompRob(x2)
cp1 <- colMeans(p1$load)
cp2 <- colMeans(p2$load)
apppcaload <- sum(abs(cp1 - cp2)/abs(cp1))
} else {
apppcaload <- "too less observations"
}
cmx1 <- apply(x1, 2, sum)
cmx2 <- apply(x2, 2, sum) * object$fot
atotals <- sum(abs((cmx1 - cmx2)/cmx1))
pmtotals <- sum((cmx2 - cmx1)/cmx1)
util1 <- dUtility(x1, x2)
deigenvalues <- dUtility(x1, x2, method = "eigen")
risk0 <- dRisk(x1, x2)
r <- dRiskRMD(x1, x2, k = 0.7)
risk1 <- r$risk1
risk2 <- r$risk2
wrisk1 <- r$wrisk1
wrisk2 <- r$wrisk2
list(
meansx = summary(x1),
meansxm = summary(x2),
amean = amean,
amedian = amedian,
aonestep = aonestep,
devvar = devvar,
amad = amad,
acov = acov,
arcov = arcov,
acor = acor,
arcor = arcor,
acors = acors,
adlm = adlm,
adlts = adlts,
apcaload = apcaload,
apppcaload = apppcaload,
totalsOrig = cmx1,
totalsMicro = cmx2,
atotals = atotals,
pmtotals = pmtotals,
util1 = util1,
deigenvalues = deigenvalues,
risk0 = risk0,
risk1 = risk1,
risk2 = risk2,
wrisk1 = wrisk1,
wrisk2 = wrisk2)
}
|
/R/microaggregation.R
|
no_license
|
thijsbenschop/sdcMicro
|
R
| false
| false
| 38,121
|
r
|
#' Microaggregation
#'
#' Function to perform various methods of microaggregation.
#'
#' On \url{http://neon.vb.cbs.nl/casc/Glossary.htm} one can found the
#' \dQuote{official} definition of microaggregation:
#'
#' Records are grouped based on a proximity measure of variables of interest,
#' and the same small groups of records are used in calculating aggregates for
#' those variables. The aggregates are released instead of the individual
#' record values.
#'
#' The recommended method is \dQuote{rmd} which forms the proximity using
#' multivariate distances based on robust methods. It is an extension of the
#' well-known method \dQuote{mdav}. However, when computational speed is
#' important, method \dQuote{mdav} is the preferable choice.
#'
#' While for the proximity measure very different concepts can be used, the
#' aggregation itself is naturally done with the arithmetic mean.
#' Nevertheless, other measures of location can be used for aggregation,
#' especially when the group size for aggregation has been taken higher than 3.
#' Since the median seems to be unsuitable for microaggregation because of
#' being highly robust, other mesures which are included can be chosen. If a
#' complex sample survey is microaggregated, the corresponding sampling weights
#' should be determined to either aggregate the values by the weighted
#' arithmetic mean or the weighted median.
#'
#' This function contains also a method with which the data can be clustered
#' with a variety of different clustering algorithms. Clustering observations
#' before applying microaggregation might be useful. Note, that the data are
#' automatically standardised before clustering.
#'
#' The usage of clustering method \sQuote{Mclust} requires package mclust02,
#' which must be loaded first. The package is not loaded automatically, since
#' the package is not under GPL but comes with a different licence.
#'
#' The are also some projection methods for microaggregation included. The
#' robust version \sQuote{pppca} or \sQuote{clustpppca} (clustering at first)
#' are fast implementations and provide almost everytime the best results.
#'
#' Univariate statistics are preserved best with the individual ranking method
#' (we called them \sQuote{onedims}, however, often this method is named
#' \sQuote{individual ranking}), but multivariate statistics are strong
#' affected.
#'
#' With method \sQuote{simple} one can apply microaggregation directly on the
#' (unsorted) data. It is useful for the comparison with other methods as a
#' benchmark, i.e. replies the question how much better is a sorting of the
#' data before aggregation.
#'
#' @name microaggregation
#' @docType methods
#' @param obj either an object of class \code{\link{sdcMicroObj-class}} or a \code{data.frame}
#' @param variables variables to microaggregate. For \code{NULL}: If obj is of class
#' sdcMicroObj, all numerical key variables are chosen per default. For
#' \code{data.frames}, all columns are chosen per default.
#' @param aggr aggregation level (default=3)
#' @param strata_variables for \code{data.frames}, by-variables for applying microaggregation only
#' within strata defined by the variables. For \code{\link{sdcMicroObj-class}}-objects, the
#' stratification-variable defined in slot \code{@strataVar} is used. This slot can be changed any
#' time using \code{strataVar<-}.
#' @param method pca, rmd, onedims, single, simple, clustpca, pppca,
#' clustpppca, mdav, clustmcdpca, influence, mcdpca
#' @param nc number of cluster, if the chosen method performs cluster analysis
#' @param weights sampling weights. If obj is of class sdcMicroObj the vector
#' of sampling weights is chosen automatically. If determined, a weighted
#' version of the aggregation measure is chosen automatically, e.g. weighted
#' median or weighted mean.
#' @param clustermethod clustermethod, if necessary
#' @param measure aggregation statistic, mean, median, trim, onestep (default=mean)
#' @param trim trimming percentage, if measure=trim
#' @param varsort variable for sorting, if method=single
#' @param transf transformation for data x
#' @return If \sQuote{obj} was of class \code{\link{sdcMicroObj-class}} the corresponding
#' slots are filled, like manipNumVars, risk and utility. If \sQuote{obj} was
#' of class \dQuote{data.frame}, an object of class \dQuote{micro} with following entities is returned:
#' \itemize{
#' \item{\code{x}: }{original data}
#' \item{\code{mx}: }{the microaggregated dataset}
#' \item{\code{method}: }{method}
#' \item{\code{aggr}: }{aggregation level}
#' \item{\code{measure}: }{proximity measure for aggregation}}
#' @note if only one variable is specified, \code{\link{mafast}} is applied and argument \code{method} is ignored.
#' Parameters \code{measure} are ignored for methods \code{mdav} and \code{rmd}.
#' @author Matthias Templ, Bernhard Meindl
#'
#' For method \dQuote{mdav}: This work is being supported by the International
#' Household Survey Network (IHSN) and funded by a DGF Grant provided by the
#' World Bank to the PARIS21 Secretariat at the Organisation for Economic
#' Co-operation and Development (OECD). This work builds on previous work
#' which is elsewhere acknowledged.
#'
#' Author for the integration of the code for mdav in R: Alexander Kowarik.
#' @seealso \code{\link{summary.micro}}, \code{\link{plotMicro}},
#' \code{\link{valTable}}
#' @references
#' Templ, M. and Meindl, B., \emph{Robust Statistics Meets SDC: New Disclosure
#' Risk Measures for Continuous Microdata Masking}, Lecture Notes in Computer
#' Science, Privacy in Statistical Databases, vol. 5262, pp. 113-126, 2008.
#'
#' Templ, M. \emph{Statistical Disclosure Control for Microdata Using the
#' R-Package sdcMicro}, Transactions on Data Privacy, vol. 1, number 2, pp.
#' 67-85, 2008. \url{http://www.tdp.cat/issues/abs.a004a08.php}
#'
#' Templ, M. \emph{New Developments in Statistical Disclosure Control and
#' Imputation: Robust Statistics Applied to Official Statistics},
#' Suedwestdeutscher Verlag fuer Hochschulschriften, 2009, ISBN: 3838108280,
#' 264 pages.
#'
#' Templ, M. Statistical Disclosure Control for Microdata: Methods and Applications in R.
#' \emph{Springer International Publishing}, 287 pages, 2017. ISBN 978-3-319-50272-4. \doi{10.1007/978-3-319-50272-4}
#' \doi{10.1007/978-3-319-50272-4}
#'
#' Templ, M. and Meindl, B. and Kowarik, A.: \emph{Statistical Disclosure Control for
#' Micro-Data Using the R Package sdcMicro}, Journal of Statistical Software,
#' 67 (4), 1--36, 2015.
#' @keywords manip
#' @rdname microaggregation
#' @export
#' @examples
#' data(Tarragona)
#' m1 <- microaggregation(Tarragona, method='onedims', aggr=3)
#' ## summary(m1)
#' data(testdata)
#' m2 <- microaggregation(testdata[1:100,c('expend','income','savings')],
#' method='mdav', aggr=4)
#' summary(m2)
#'
#' ## for objects of class sdcMicro:
#' ## no stratification because @strataVar is NULL
#' data(testdata2)
#' sdc <- createSdcObj(testdata2,
#' keyVars=c('urbrur','roof','walls','water','electcon','sex'),
#' numVars=c('expend','income','savings'), w='sampling_weight')
#' sdc <- microaggregation(sdc, variables=c("expend","income"))
#'
#' ## with stratification by 'relat'
#' strataVar(sdc) <- "relat"
#' sdc <- microaggregation(sdc, variables=c("savings"))
microaggregation <- function(obj, variables=NULL, aggr=3, strata_variables=NULL,
method="mdav", weights=NULL, nc=8, clustermethod="clara",
measure="mean", trim=0, varsort=1, transf="log") {
if (!is.data.frame(obj) & !is.null(strata_variables)) {
message("Argument 'strata_variables' is ignored. Only variables specified in slot 'strataVar' (if any) of the input object are used!\n")
}
microaggregationX(obj=obj, variables=variables, aggr=aggr, strata_variables=strata_variables,
method=method, weights=weights, nc=nc, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=varsort, transf=transf)
}
setGeneric("microaggregationX", function(obj, variables=NULL, aggr=3, strata_variables=NULL,
method="mdav", weights=NULL, nc=8, clustermethod="clara",
measure="mean", trim=0, varsort=1, transf="log") {
standardGeneric("microaggregationX")
})
setMethod(f="microaggregationX", signature=c("sdcMicroObj"), definition=function(obj,
variables=NULL, aggr=3, method="mdav", nc=8, clustermethod="clara",
measure="mean", trim=0, varsort=1, transf="log") {
x <- get.sdcMicroObj(obj, type="manipNumVars")
if (is.null(variables)) {
variables <- colnames(x)
}
strataVars <- get.sdcMicroObj(obj, type="strataVar")
if (length(strataVars) > 0) {
sx <- get.sdcMicroObj(obj, type="origData")[, strataVars, drop=FALSE]
x <- cbind(x, sx)
strataVars <- utils::tail(colnames(x), 1)
}
weights <- get.sdcMicroObj(obj, type="weightVar")
if (!is.null(weights)) {
weights <- get.sdcMicroObj(obj, type="origData")[, weights]
}
if (any(weights < 0)) {
warnMsg <- "negative weights have been detected!\n"
obj <- addWarning(obj, warnMsg=warnMsg, method="microaggregation", variable=NA)
}
res <- microaggregationWORK(x, variables=variables, aggr=aggr, strata_variables=strataVars,
method=method, weights=weights, nc=nc, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=varsort, transf=transf)
obj <- nextSdcObj(obj)
x[, variables] <- res$mx[, variables]
obj <- set.sdcMicroObj(obj, type="manipNumVars", input=list(as.data.frame(x[,
colnames(obj@origData)[obj@numVars], drop=FALSE])))
obj <- dRisk(obj)
obj <- dUtility(obj)
obj
})
setMethod(f="microaggregationX", signature=c("data.frame"), definition=function(obj,
variables=NULL, aggr=3, strata_variables=NULL, method="mdav", weights=NULL,
nc=8, clustermethod="clara", measure="mean", trim=0, varsort=1,
transf="log") {
if (is.null(variables)) {
variables <- colnames(obj)
}
microaggregationWORK(x=obj, variables=variables, aggr=aggr, strata_variables=strata_variables,
method=method, weights=weights, nc=nc, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=varsort, transf=transf)
})
microaggregationWORK <- function(x, variables=colnames(x), method="mdav", aggr=3,
weights=NULL, nc=8, clustermethod="clara", measure="mean",
trim=0, varsort=1, transf="log", strata_variables=NULL) {
## helper-functions used in several micro_xxx() functions
factorOfTotals <- function(x, aggr) {
n <- dim(x)[1]
abgerundet <- floor(n/aggr)
fot <- n/abgerundet
return(fot)
}
weightedQuantile <- function(x, weights=NULL, probs=seq(0, 1, 0.25), sorted=FALSE, na.rm=FALSE) {
if (!is.numeric(x)) {
stop("'x' must be a numeric vector")
}
n <- length(x)
if (n == 0 || (!isTRUE(na.rm) && any(is.na(x)))) {
return(rep.int(NA, length(probs)))
}
if (!is.null(weights)) {
if (!is.numeric(weights)) {
stop("'weights' must be a numeric vector")
} else if (length(weights) != n) {
stop("'weights' must have the same length as 'x'")
} else if (!all(is.finite(weights))) {
stop("missing or infinite weights")
}
if (any(weights < 0)) {
warnMsg <- "negative weights have been detected!\n"
warning(warnMsg)
}
if (!is.numeric(probs) || all(is.na(probs)) || isTRUE(any(probs < 0 | probs > 1))) {
stop("'probs' must be a numeric vector with values in [0,1]")
}
if (all(weights == 0)) {
warnMsg <- "all weights equal 0!\n"
obj <- addWarning(obj, warnMsg=warnMsg, method="microaggregation", variable=NA)
warning(warnMsg)
return(rep.int(0, length(probs)))
}
}
if (isTRUE(na.rm)) {
indices <- !is.na(x)
x <- x[indices]
if (!is.null(weights))
weights <- weights[indices]
}
if (!isTRUE(sorted)) {
order <- order(x)
x <- x[order]
weights <- weights[order]
}
if (is.null(weights))
rw <- (1:n)/n else rw <- cumsum(weights)/sum(weights)
q <- sapply(probs, function(p) {
if (p == 0)
return(x[1]) else if (p == 1)
return(x[n])
select <- min(which(rw >= p))
if (rw[select] == p)
mean(x[select:(select + 1)]) else x[select]
})
return(unname(q))
}
weightedMedian <- function(x, weights=NULL, sorted=FALSE, na.rm=FALSE) {
weightedQuantile(x, weights, probs=0.5, sorted=sorted, na.rm=na.rm)
}
indexMicro <- function(x, aggr) {
n <- dim(x)[1]
if (n < 2 * aggr) {
stop(paste0("Too less observations (", n, ") for aggregate =", aggr,"\n"))
}
aa <- seq(1, n, aggr)
j <- 1
teiler <- n/aggr
d1 <- 1:n
index <- list()
if (teiler %in% 1:n) {
for (i in 1:length(aa)) {
index[[i]] <- d1[j:(j + aggr - 1)]
j <- j + aggr
}
} else {
for (i in 1:(length(aa) - 2)) {
index[[i]] <- d1[j:(j + aggr - 1)]
j <- j + aggr
}
index[[i + 1]] <- d1[(j):n]
}
index
}
means <- function(x, index, measure, trim=0) {
m <- matrix(ncol=ncol(x), nrow=length(index))
if (measure == "mean" & is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- colMeans(x[index[[i]], ])
}
}
if (measure == "median" & is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- apply(x[index[[i]], ], 2, median)
}
}
if (measure == "mean" & !is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- apply(x[index[[i]], ], 2, function(x) stats::weighted.mean(x,
w=weights[index[[i]]]))
}
}
if (measure == "median" & !is.null(weights)) {
for (i in 1:length(index)) {
m[i, ] <- apply(x[index[[i]], ], 2, function(x) weightedMedian(x,
weights=weights[index[[i]]]))
}
}
if (measure == "trim") {
for (i in 1:length(index)) {
for (j in 1:length(index[[i]])) {
m[i, ] <- apply(x[index[[i]], ], 2, mean, trim=trim)
}
}
}
if (measure == "onestep") {
y <- x
constant <- 3/1.486
for (i in 1:length(index)) {
m1 <- apply(x[index[[i]], ], 2, median)
m2 <- apply(x[index[[i]], ], 2, mad)
limit1 <- m1 + constant * m2
limit2 <- m1 - constant * m2
for (ii in 1:length(index[[i]])) {
if (any(x[index[[i]][ii], ] > limit1)) {
w <- which(x[index[[i]][ii], ] > limit1)
le <- length(w)
y[index[[i]][ii], w] <- limit1[w]
}
if (any(x[index[[i]][ii], ] < limit2)) {
w <- which(x[index[[i]][ii], ] < limit2)
le <- length(w)
y[index[[i]][ii], w] <- limit2[w]
}
m[i, ] <- colMeans(y[index[[i]], ])
}
}
}
colnames(m) <- colnames(x)
return(m)
}
blowup <- function(x, mr, aggr) {
n <- dim(x)[1]
aa <- seq(1, n, aggr)
j <- 1
teiler <- n/aggr
d1 <- 1:n
xx <- matrix(0, ncol=ncol(x), nrow=nrow(x))
if (teiler %in% 1:n) {
for (i in 1:length(aa)) {
for (s in j:(j + aggr - 1)) {
xx[s, ] <- as.matrix(mr[i, , drop=FALSE])
}
j <- j + aggr
}
} else {
for (i in 1:(length(aa) - 2)) {
for (s in j:(j + aggr - 1)) {
xx[s, ] <- as.matrix(mr[i, , drop=FALSE])
}
j <- j + aggr
}
for (s in j:n) {
xx[s, ] <- mr[i + 1, ]
}
}
rownames(xx) <- rownames(x)
xx
}
clust <- function(x, nc, clustermethod="clara", transf="log") {
if (transf == "none") {
y <- x
}
if (transf == "log") {
y <- scale(log(x))
}
if (transf == "boxcox") {
lambda <- car::powerTransform(x)$lambda
y <- scale(car::bcPower(x, lambda))
}
if (clustermethod == "clara") {
a <- clara(x, nc)
clustresult <- a$clust
centers <- a$med
size <- a$clusinfo[, 1]
}
if (clustermethod == "pam") {
a <- pam(x, nc)
clustresult <- a$clust
centers <- a$med
size <- a$clusinfo[, 1]
}
if (clustermethod == "kmeans") {
a <- stats::kmeans(x, nc)
centers <- a$centers
clustresult <- a$cluster
size <- a$size
}
if (clustermethod == "cmeans") {
a <- e1071::cmeans(x, nc)
centers <- a$centers
clustresult <- a$cluster
size <- a$size
res@mem <- a$mem
}
if (clustermethod == "bclust") {
a <- e1071::bclust(x, nc)
centers <- a$centers
groesse <- rep(0, nc)
for (i in seq(nc)) {
groesse[i] <- length(which(a$cluster == i))
}
size <- groesse
clustresult <- a$cluster
}
list(centers=centers, clustresult=clustresult, nc=nc)
}
prcompRob <- function(X, k=0, sca="mad", scores=TRUE) {
n <- nrow(X)
p <- ncol(X)
if (k == 0) {
p1 <- min(n, p)
} else {
p1 <- k
}
S <- rep(1, p1)
V <- matrix(1:(p * p1), ncol=p1, nrow=p)
P <- diag(p)
m <- apply(X, 2, median)
Xcentr <- scale(X, center=m, scale=FALSE)
for (k in 1:p1) {
B <- Xcentr %*% P
Bnorm <- sqrt(apply(B^2, 1, sum))
A <- diag(1/Bnorm) %*% B
Y <- A %*% P %*% t(X)
if (sca == "mad")
s <- apply(Y, 1, mad)
# if (sca == 'tau') s <- apply(Y, 1, scale.tau) if (sca == 'A') s <- apply(Y, 1,
# scale.a)
j <- order(s)[n]
S[k] <- s[j]
V[, k] <- A[j, ]
if (V[1, k] < 0)
V[, k] <- (-1) * V[, k]
P <- P - (V[, k] %*% t(V[, k]))
}
if (scores) {
list(scale=S, loadings=V, scores=Xcentr %*% V)
} else list(scale=S, loadings=V)
}
# implementations of microaggregation methods
micro_simple <- function(x, aggr, measure, trim) {
index <- indexMicro(x, aggr)
m <- means(x=x, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
return(list(x=x, method="simple", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_single <- function(x, aggr, measure, trim, varsort) {
sortvec <- sort(x[, varsort], index.return=TRUE)$ix
xx <- x[sortvec, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="single", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=varsort, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_onedims <- function(x, aggr, measure, trim) {
i <- dim(x)[2]
xx <- sapply(1:i, function(i) {
x[order(x[, i]), i]
})
xxx <- sapply(1:i, function(i) {
rank(x[, i], ties.method="first")
})
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
b <- blowup(x, m, aggr)
y <- x
for (i in 1:dim(x)[2]) {
y[, i] <- b[xxx[, i], i]
}
return(list(x=x, method="onedims", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=y, fot=0))
}
micro_pca <- function(x, aggr, measure, trim) {
p <- stats::princomp(scale(x))
s1 <- sort(p$scores[, 1], index.return=TRUE)$ix
xx <- x[s1, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="pca", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_mcdpca <- function(x, aggr, measure, trim) {
x.mcd <- cov.mcd(x, cor=TRUE)
x.scale <- scale(x, x.mcd$center, sqrt(diag(x.mcd$cor)))
p <- stats::princomp(x.scale, covmat=x.mcd)
s1 <- sort(p$scores[, 1], index.return=TRUE)$ix
xx <- x[s1, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="mcdpca", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_pppca <- function(x, aggr, measure, trim) {
p <- prcompRob(x)
s1 <- sort(p$scores[, 1], index.return=TRUE)$ix
xx <- x[s1, ]
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="pppca", clustering=FALSE, aggr=aggr, nc=NULL,
xm=m, roundxm=mr, clustermethod=NULL, measure=measure, trim=trim,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=blowxm, fot=0))
}
micro_influence <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
j <- matrix(ncol=1, nrow=nc)
vmax <- matrix(ncol=1, nrow=nc)
for (i in 1:nc) {
j[i, ] <- max(cent[, i])
vmax[i, ] <- which(cent[, i] == j[i, ])
}
ncols <- c(1:ncol(x))
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
s <- x[w, , drop=FALSE]
xx[[i]] <- s[order(s[, vmax[i]]), ]
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(yy)
return(list(x=x, method="influence", clustering=TRUE, aggr=aggr,
nc=ac.scale$nc, xm=m, roundxm=mr, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=NULL, transf=transf, blowup=TRUE,
blowxm=blowxm, fot=0))
}
micro_clustpca <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
if (length(w) < dim(x)[2]) {
y <- x[w, , drop=FALSE]
xx[[i]] <- y[order(y[, varsort]), ]
} else {
p <- stats::princomp(scale(x[w, , drop=FALSE]))$scores[, 1]
psortind <- sort(p, index.return=TRUE)$ix
y <- x[w, , drop=FALSE]
xx[[i]] <- y[psortind, ]
}
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="clustpca", clustering=TRUE, aggr=aggr, nc=ac.scale$nc,
xm=m, roundxm=mr, clustermethod=clustermethod, measure=measure,
trim=trim, varsort=NULL, transf=transf, blowup=TRUE, blowxm=blowxm,
fot=0))
}
micro_clustmcdpca <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
if (length(w) < dim(x)[2]) {
y <- x[w, , drop=FALSE]
xx[[i]] <- y[order(y[, varsort]), ]
} else {
message("length(w):", length(w), "\n")
x.mcd <- cov.mcd(x[w, ], cor=TRUE)
x.scale <- scale(x[w, ], x.mcd$center, sqrt(diag(x.mcd$cor)))
p <- stats::princomp(x.scale, covmat=x.mcd)$scores[, 1]
psortind <- sort(p, index.return=TRUE)$ix
y <- x[w, , drop=FALSE]
xx[[i]] <- y[psortind, ]
}
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="clustmcdpca", clustering=TRUE, aggr=aggr,
nc=ac.scale$nc, xm=m, roundxm=mr, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=NULL, transf=transf, blowup=TRUE,
blowxm=blowxm, fot=0))
}
micro_clustpppca <- function(x, aggr, measure, trim, clustermethod, transf, nc) {
ac.scale <- clust(x=x, nc=nc, clustermethod=clustermethod, transf=transf)
cent <- matrix(ac.scale$centers, ncol=nc, byrow=TRUE)
xx <- list()
for (i in 1:nc) {
w <- which(ac.scale$clustresult == i)
if (length(w) < dim(x)[2]) {
y <- x[w, , drop=FALSE]
xx[[i]] <- y[order(y[, varsort]), ]
} else {
p <- prcompRob(x[w, , drop=FALSE], 1)$scores
psortind <- sort(p, index.return=TRUE)$ix
y <- x[w, , drop=FALSE]
xx[[i]] <- y[psortind, ]
}
}
yy <- NULL
for (i in 1:nc) {
yy <- rbind(yy, matrix(unlist(xx[[i]]), ncol=ncol(x), dimnames=list(rownames(xx[[i]]),
colnames(xx[[i]]))))
}
xx <- yy
index <- indexMicro(xx, aggr)
m <- means(x=xx, index=index, measure=measure, trim=trim)
mr <- round(m)
blowxm <- blowup(x, m, aggr)
rownames(blowxm) <- rownames(xx)
return(list(x=x, method="clustpppca", clustering=TRUE, aggr=aggr,
nc=ac.scale$nc, xm=m, roundxm=mr, clustermethod=clustermethod,
measure=measure, trim=trim, varsort=NULL, transf=transf, blowup=TRUE,
blowxm=blowxm, fot=0))
}
micro_rmd <- function(x, aggr) {
kn <- function(ds, aggr) {
w <- rep(0, aggr)
for (i in 1:aggr) {
w[i] <- which.min(ds)
ds[w[i]] <- NA
}
return(w)
}
y <- x
cm <- colMeans(x, na.rm=TRUE)
csd <- apply(x, 2, sd, na.rm=TRUE)
len <- nrow(y)
y <- apply(y, 2, function(x) (x - mean(x, na.rm=TRUE))/sd(x, na.rm=TRUE))
d <- as.matrix(stats::dist(y))
set.seed(123)
rr <- covMcd(y)
md <- stats::mahalanobis(y, center=rr$center, cov=rr$cov)
diag(d) <- 0
for (i in 1:(floor(dim(x)[1]/aggr) - 1)) {
s <- which.max(md)
w <- kn(d[, s], aggr)
d[w, ] <- NA
md[w] <- NA
y[w, ] <- rep(colMeans(y[w, ]), each=aggr)
}
w <- which(!is.na(d[, 1]))
y[w, ] <- rep(colMeans(y[w, ]), each=length(w))
for (i in 1:dim(x)[2]) {
y[, i] <- as.numeric((y[, i] * csd[i]) + cm[i])
}
return(list(x=x, method="rmd", clustering=FALSE, aggr=aggr, nc=NULL,
xm=y, roundxm=round(y), clustermethod=NULL, measure=NULL, trim=NULL,
varsort=NULL, transf=NULL, blowup=TRUE, blowxm=y, fot=0))
}
micro_mdav <- function(x, aggr) {
resX <- mdav(x, variables=NULL, weights=NULL, K=aggr, missing=-999)
return(list(x=x, method="mdav", clustering=FALSE, aggr=aggr, nc=NULL,
xm=NULL, roundxm=NULL, clustermethod=NULL, measure="mean", trim=NULL,
varsort=NULL, transf=NULL, blowup=FALSE, blowxm=resX, fot=0))
}
stopifnot(method %in% c("simple", "single", "onedims", "pca", "mcdpca", "pppca",
"clustmcdpca", "clustpppca", "clustpca", "rmd", "mdav", "influence"))
rownames(x) <- 1:nrow(x)
if (length(variables) == 1) {
res <- list()
res$mx <- mafast(x, variables=variables, by=strata_variables, aggr=aggr,
measure=eval(parse(text=measure)))
res$x <- x
res$method <- "mafast"
res$aggr <- aggr
res$measure <- measure
res$fot <- factorOfTotals(x, aggr)
class(res) <- "micro"
return(res)
}
xall <- x
if (!is.null(strata_variables)) {
if (!all(strata_variables %in% colnames(x))) {
stop("strata_variables are not found in the data set!")
}
byvar <- rep("", nrow(x))
for (i in 1:length(strata_variables)) {
byvar <- paste(byvar, x[, strata_variables[i]], sep="-")
}
xsp <- split(x, as.factor(byvar))
} else {
xsp <- list(dataset=x)
}
reslist <- list()
for (spind in 1:length(xsp)) {
x <- xsp[[spind]][, variables, drop=FALSE]
if (method == "simple") {
res <- micro_simple(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "single") {
res <- micro_single(x=x, aggr=aggr, measure=measure, trim=trim, varsort=varsort)
}
if (method == "onedims") {
res <- micro_onedims(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "pca") {
res <- micro_pca(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "mcdpca") {
res <- micro_mcdpca(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "pppca") {
res <- micro_pppca(x=x, aggr=aggr, measure=measure, trim=trim)
}
if (method == "influence") {
res <- micro_influence(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "clustpca") {
res <- micro_clustpca(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "clustmcdpca") {
res <- micro_clustmcdpca(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "clustpppca") {
res <- micro_clustpppca(x=x, aggr=aggr, measure=measure, trim=trim,
clustermethod=clustermethod, transf=transf, nc=nc)
}
if (method == "rmd") {
res <- micro_rmd(x=x, aggr=aggr)
}
if (method == "mdav") {
res <- micro_mdav(x, aggr)
}
res$fot <- factorOfTotals(x, aggr)
reslist[[spind]] <- res
}
res <- reslist[[1]]
if (length(reslist) > 1) {
blowxm <- vector()
fot <- vector()
for (i in 1:length(reslist)) {
blowxm <- rbind(blowxm, reslist[[i]]$blowxm)
fot <- c(fot, reslist[[i]]$fot)
}
res$x <- xall
res$blowxm <- blowxm
names(fot) <- substring(names(xsp), 2)
res$fot <- fot
}
res$x <- res$x[order(as.numeric(rownames(res$x))), ]
res$blowxm <- res$blowxm[order(as.numeric(rownames(res$blowxm))), ]
res$blowxm <- res$blowxm[1:nrow(xall), ]
class(res) <- "micro"
res$mx <- as.data.frame(res$blowxm)
colnames(res$mx) <- variables
resv <- c("x", "mx", "method", "aggr", "measure")
res1 <- list()
for (v in resv) {
res1[[v]] <- res[[v]]
}
class(res1) <- "micro"
return(res1)
}
#' Print method for objects from class micro
#'
#' printing an object of class \code{micro}
#'
#' @param x object from class micro
#' @param \dots Additional arguments passed through.
#' @return information about method and aggregation level from objects of class
#' micro.
#' @author Matthias Templ
#' @seealso \code{\link{microaggregation}}
#' @keywords print
#' @method print micro
#' @export
#' @examples
#'
#' data(free1)
#' free1 <- as.data.frame(free1)
#' m1 <- microaggregation(free1[, 31:34], method='onedims', aggr=3)
#' m1
#'
print.micro <- function(x, ...) {
message(paste("\n Object created with method", x$method, "and aggregation level",
x$aggr))
message("\n -------------------------\n")
message("x ... original values \n")
print(summary(x$x))
message("\n -------------------------\n")
message("mx ... microaggregated values\n")
print(summary(x$mx))
message("\n -------------------------\n")
message("Try names(your object from class micro) for more details")
message("\n")
}
#' Summary method for objects from class micro
#'
#' Summary method for objects from class \sQuote{micro}.
#'
#' This function computes several measures of information loss, such as
#'
#' @param object objects from class micro
#' @param \dots Additional arguments passed through.
#' @return
#' \item{meanx }{A conventional summary of the original data}
#' \item{meanxm }{A conventional summary of the microaggregated data}
#' \item{amean }{average relative absolute deviation of means}
#' \item{amedian}{average relative absolute deviation of medians}
#' \item{aonestep }{average relative absolute deviation of onestep from median}
#' \item{devvar }{average relative absolute deviation of variances}
#' \item{amad }{average relative absolute deviation of the mad}
#' \item{acov }{average relative absolute deviation of covariances}
#' \item{arcov }{average relative absolute deviation of robust (with mcd) covariances}
#' \item{acor }{average relative absolute deviation of correlations}
#' \item{arcor }{average relative absolute deviation of robust (with mcd) correlations}
#' \item{acors }{average relative absolute deviation of rank-correlations}
#' \item{adlm }{average absolute deviation of lm regression coefficients (without intercept)}
#' \item{adlts}{average absolute deviation of lts regression coefficients (without intercept)}
#' \item{apcaload }{average absolute deviation of pca loadings}
#' \item{apppacaload }{average absolute deviation of robust (with projection pursuit approach) pca loadings}
#' \item{atotals }{average relative absolute deviation of totals}
#' \item{pmtotals }{average relative deviation of totals}
#' @author Matthias Templ
#' @seealso \code{\link{microaggregation}}, \code{\link{valTable}}
#' @references Templ, M. \emph{Statistical Disclosure Control for Microdata
#' Using the R-Package sdcMicro}, Transactions on Data Privacy, vol. 1, number
#' 2, pp. 67-85, 2008. \url{http://www.tdp.cat/issues/abs.a004a08.php}
#' @keywords print
#' @method summary micro
#' @export
#' @examples
#'
#' data(Tarragona)
#' m1 <- microaggregation(Tarragona, method='onedims', aggr=3)
#' ## summary(m1)
summary.micro <- function(object, ...) {
prcompRob <- function(X, k=0, sca="mad", scores=TRUE) {
## Copyright: Croux and Filzmoser
n <- nrow(X)
p <- ncol(X)
if (k == 0) {
p1 <- min(n, p)
} else {
p1 <- k
}
S <- rep(1, p1)
V <- matrix(1:(p * p1), ncol=p1, nrow=p)
P <- diag(p)
m <- apply(X, 2, median)
Xcentr <- scale(X, center=m, scale=FALSE)
for (k in 1:p1) {
B <- Xcentr %*% P
Bnorm <- sqrt(apply(B^2, 1, sum))
A <- diag(1/Bnorm) %*% B
Y <- A %*% P %*% t(X)
if (sca == "mad")
s <- apply(Y, 1, mad)
# if (sca == 'tau') s <- apply(Y, 1, scale.tau) if (sca == 'A') s <- apply(Y, 1,
# scale.a)
j <- order(s)[n]
S[k] <- s[j]
V[, k] <- A[j, ]
if (V[1, k] < 0)
V[, k] <- (-1) * V[, k]
P <- P - (V[, k] %*% t(V[, k]))
}
if (scores) {
list(scale=S, loadings=V, scores=Xcentr %*% V)
} else list(scale=S, loadings=V)
}
x1 <- as.data.frame(object$x)
x2 <- as.data.frame(object$mx)
colnames(x2) <- colnames(x1)
amx <- mapply(mean, x1)
amxn <- mapply(mean, x2)
amean <- sum(abs(amx - amxn)/(abs(amx)))
meds1 <- mapply(median, x1)
meds2 <- mapply(median, x2)
amedian <- sum(abs(meds1 - meds2) / abs(meds1), na.rm = TRUE)
onestep <- function(x) {
y <- x
constant <- 3/1.486
m1 <- mapply(median, x)
m2 <- mapply(mad, x)
limit1 <- m1 + constant * m2
limit2 <- m1 - constant * m2
for (i in 1:dim(x)[2]) {
if (any(x[, i] > limit1[i])) {
w <- which(x[, i] > limit1[i])
le <- length(w)
y[w, i] <- limit1[i]
}
if (any(x[, i] < limit2[i])) {
w <- which(x[, i] < limit2[i])
le <- length(w)
y[w, i] <- limit2[i]
}
}
y
}
aox <- onestep(x1)
aox <- mapply(mean, aox)
aoxm <- onestep(x2)
aoxm <- mapply(mean, aoxm)
aonestep <- sum(abs(aox - aoxm) / abs(aox), na.rm = TRUE)
devvar <- sum(abs(var(x1) - var(x2))/abs(var(x1)))/length(x1)
amx <- mapply(mad, x1)
amxn <- mapply(mad, x2)
amad <- sum(abs(amx - amxn) / (abs(amx)), na.rm = TRUE)
acov <- sum(abs(cov(x1) - cov(x2))/abs(cov(x1)))/(2 * length(x1))
arcov <- NA
acor <- sum(abs(cor(x1) - cor(x2))/abs(cor(x1)))/(2 * length(x2))
arcor <- NA
acors <- sum(abs(cor(x1, method = "spearman") - cor(x2, method = "spearman")) /
abs(cor(x1, method = "spearman"))) / (2 * length(x1))
l1 <- lm(as.matrix(x1[, 1]) ~ as.matrix(x1[, -1]))$coeff
l2 <- lm(as.matrix(x2[, 1]) ~ as.matrix(x2[, -1]))$coeff
adlm <- sum(abs(l1[2:length(l1)] - l2[2:length(l2)]), na.rm = TRUE)
adlts <- NA
if (dim(x1)[1] > dim(x1)[2] && dim(x2)[1] > dim(x2)[2]) {
p1 <- stats::princomp(x1)
p2 <- stats::princomp(x2)
cp1 <- colMeans(p1$load)
cp2 <- colMeans(p2$load)
apcaload <- sum(abs(cp1 - cp2)/abs(cp1))
} else {
apcaload <- "too less observations"
}
if (dim(x1)[1] > dim(x1)[2] && dim(x2)[1] > dim(x2)[2]) {
p1 <- prcompRob(x1)
p2 <- prcompRob(x2)
cp1 <- colMeans(p1$load)
cp2 <- colMeans(p2$load)
apppcaload <- sum(abs(cp1 - cp2)/abs(cp1))
} else {
apppcaload <- "too less observations"
}
cmx1 <- apply(x1, 2, sum)
cmx2 <- apply(x2, 2, sum) * object$fot
atotals <- sum(abs((cmx1 - cmx2)/cmx1))
pmtotals <- sum((cmx2 - cmx1)/cmx1)
util1 <- dUtility(x1, x2)
deigenvalues <- dUtility(x1, x2, method = "eigen")
risk0 <- dRisk(x1, x2)
r <- dRiskRMD(x1, x2, k = 0.7)
risk1 <- r$risk1
risk2 <- r$risk2
wrisk1 <- r$wrisk1
wrisk2 <- r$wrisk2
list(
meansx = summary(x1),
meansxm = summary(x2),
amean = amean,
amedian = amedian,
aonestep = aonestep,
devvar = devvar,
amad = amad,
acov = acov,
arcov = arcov,
acor = acor,
arcor = arcor,
acors = acors,
adlm = adlm,
adlts = adlts,
apcaload = apcaload,
apppcaload = apppcaload,
totalsOrig = cmx1,
totalsMicro = cmx2,
atotals = atotals,
pmtotals = pmtotals,
util1 = util1,
deigenvalues = deigenvalues,
risk0 = risk0,
risk1 = risk1,
risk2 = risk2,
wrisk1 = wrisk1,
wrisk2 = wrisk2)
}
|
rm(list=ls())
poisson<-function(n){
y<-0:(n-1)
y<-(pi^y/factorial(y))*exp(-pi)
return(1-sum(y))
}
n<-5
poisson(n)
|
/ex7.R
|
permissive
|
LAntoine/DEMI2E2-TPs-R
|
R
| false
| false
| 119
|
r
|
rm(list=ls())
poisson<-function(n){
y<-0:(n-1)
y<-(pi^y/factorial(y))*exp(-pi)
return(1-sum(y))
}
n<-5
poisson(n)
|
Test translator definition with union output
|
/test/unittest/translators/tst.UnionOutputTrans.r
|
permissive
|
oracle/dtrace-utils
|
R
| false
| false
| 45
|
r
|
Test translator definition with union output
|
findNum <- function(data, alpha = 0.95, min.frac = 0.05, nlam = 20, type = "linear", num = 5, del = 0.9){
reset <- 10
step <- 1
gamma <- 0.8
inner.iter <- 1000
outer.iter <- 1000
thresh = 10^(-3)
outer.thresh = thresh
n <- nrow(data$x)
if(type == "linear"){
X <- data$x
y <- data$y
n <- nrow(X)
p <- ncol(X)
## Setting up group lasso stuff ##
ord <- order(index)
index <- index[ord]
X <- X[,ord]
unOrd <- match(1:length(ord),ord)
## Coming up with other C++ info ##
groups <- unique(index)
num.groups <- length(groups)
range.group.ind <- rep(0,(num.groups+1))
for(i in 1:num.groups){
range.group.ind[i] <- min(which(index == groups[i])) - 1
}
range.group.ind[num.groups + 1] <- ncol(X)
group.length <- diff(range.group.ind)
beta.naught <- rep(0,ncol(X))
beta <- beta.naught
beta.is.zero <- rep(1, num.groups)
beta.old <- rep(0, ncol(X))
beta <- array(0, c(ncol(X),nlam,nlam))
#matrix(0, nrow = ncol(X), ncol = nlam)
eta <- rep(0,n)
max.lam <- max(t(X)%*%y)/n
is.nonzero <- 0
above <- 0
change <- 0
move <- 0.99
while(is.nonzero != 5){
junk <- .C("linNest", X = as.double(as.vector(X)), y = as.double(y), index = as.integer(index), nrow = as.integer(nrow(X)), ncol = as.integer(ncol(X)), numGroup = as.integer(num.groups), rangeGroupInd = as.integer(range.group.ind), groupLen = as.integer(group.length), lambda1 = as.double(alpha*max.lam), lambda2 = as.double((1-alpha)*max.lam), beta = as.double(beta.old), innerIter = as.integer(inner.iter), outerIter = as.integer(outer.iter), thresh = as.double(thresh), outerThresh = as.double(outer.thresh), eta = as.double(eta), gamma = as.double(gamma), betaIsZero = as.integer(beta.is.zero), step = as.double(step), reset = as.integer(reset))
is.nonzero <- sum(abs(junk$beta))
if(is.nonzero < num){
change <- above
max.lam <- max.lam * move
above <- 0
}
if(is.nonzero > num){
change <- 1 - above
max.lam <- max.lam /move
above <- 1
}
if(change == 1){
move <- move * del
}
}
}
return(junk$beta)
}
|
/Experiments/Tibshirani2013/SGL/R/zPathCalcExact.r
|
no_license
|
adityagc/MS-Research
|
R
| false
| false
| 2,087
|
r
|
findNum <- function(data, alpha = 0.95, min.frac = 0.05, nlam = 20, type = "linear", num = 5, del = 0.9){
reset <- 10
step <- 1
gamma <- 0.8
inner.iter <- 1000
outer.iter <- 1000
thresh = 10^(-3)
outer.thresh = thresh
n <- nrow(data$x)
if(type == "linear"){
X <- data$x
y <- data$y
n <- nrow(X)
p <- ncol(X)
## Setting up group lasso stuff ##
ord <- order(index)
index <- index[ord]
X <- X[,ord]
unOrd <- match(1:length(ord),ord)
## Coming up with other C++ info ##
groups <- unique(index)
num.groups <- length(groups)
range.group.ind <- rep(0,(num.groups+1))
for(i in 1:num.groups){
range.group.ind[i] <- min(which(index == groups[i])) - 1
}
range.group.ind[num.groups + 1] <- ncol(X)
group.length <- diff(range.group.ind)
beta.naught <- rep(0,ncol(X))
beta <- beta.naught
beta.is.zero <- rep(1, num.groups)
beta.old <- rep(0, ncol(X))
beta <- array(0, c(ncol(X),nlam,nlam))
#matrix(0, nrow = ncol(X), ncol = nlam)
eta <- rep(0,n)
max.lam <- max(t(X)%*%y)/n
is.nonzero <- 0
above <- 0
change <- 0
move <- 0.99
while(is.nonzero != 5){
junk <- .C("linNest", X = as.double(as.vector(X)), y = as.double(y), index = as.integer(index), nrow = as.integer(nrow(X)), ncol = as.integer(ncol(X)), numGroup = as.integer(num.groups), rangeGroupInd = as.integer(range.group.ind), groupLen = as.integer(group.length), lambda1 = as.double(alpha*max.lam), lambda2 = as.double((1-alpha)*max.lam), beta = as.double(beta.old), innerIter = as.integer(inner.iter), outerIter = as.integer(outer.iter), thresh = as.double(thresh), outerThresh = as.double(outer.thresh), eta = as.double(eta), gamma = as.double(gamma), betaIsZero = as.integer(beta.is.zero), step = as.double(step), reset = as.integer(reset))
is.nonzero <- sum(abs(junk$beta))
if(is.nonzero < num){
change <- above
max.lam <- max.lam * move
above <- 0
}
if(is.nonzero > num){
change <- 1 - above
max.lam <- max.lam /move
above <- 1
}
if(change == 1){
move <- move * del
}
}
}
return(junk$beta)
}
|
fast_SFET <- function(strata, treatment, nsubjects, nresponders,
case, data = NULL, prec = 1e-4, side = c("up")){
nstrata <- strata %>% unique() %>% length()
if (data %>% is.null()) {
indata <- tibble(strata = strata, treatment = treatment, nsubjects = nsubjects,
nresponders = nresponders) %>% arrange(strata)
} else {
indata <- data %$% tibble(strata = strata, treatment = treatment, nsubjects = nsubjects,
nresponders = nresponders) %>% arrange(strata)
}
ss <- nresponders %>% subset(treatment == case) %>% sum()
zz <- indata %>%
group_by(strata) %>%
summarise(tot_responders = sum(nresponders)) %>%
.$tot_responders
mm <- indata %>%
filter(treatment == case) %>%
.$nsubjects
nn <- indata %>%
group_by(strata) %>%
summarise(tot_subjects = sum(nsubjects)) %>%
.$tot_subjects
mlower <- pmax(0, zz-nn+mm)
mupper <- pmin(zz, mm)
mcounts <- mupper - mlower + 1 ## all plausible counts
# x_range <- vector(mode = "list", length = nstrata)
# for(i in 1:nstrata){
# x_range[[i]] <- m_lower[i]:m_upper[i]
# }
## imaginary combination of underlying table: expand.grid(x_range)
x_prob <- x_range <- vector(mode = "list", length = nstrata)
for(i in 1:nstrata){
x_range[[i]] <- mlower[i]:mupper[i]
}
## calculate the hypergeometric density in each 2*2 table
for(i in 1:nstrata){
x_prob[[i]] <- dhyper(x_range[[i]], m = mm[i], n = nn[i] - mm[i], k = zz[i], log=TRUE)
}
## acceleration, reduce the sample space for computation
## compute the maximum log probability that can be reduced from each stratum
log_pthresh <- log(1 - (1-prec)^(1 / nstrata))
## for each stratum, remove the points that are unlikely
for (i in 1:nstrata) {
remained <- x_prob[[i]] >= log_pthresh - log(mcounts[i])
mcounts[i] <- sum(remained)
x_range[[i]] <- x_range[[i]][remained]
x_prob[[i]] <- x_prob[[i]][remained]
mlower[i] <- x_range[[i]][1]
mupper[i] <- x_range[[i]][mcounts[i]]
}
## initialize pvalue and the iterating machine
pvalue <- pvalue_up <- pvalue_lower <- 0
current <- rep(1, nstrata)
while(1) {
# print(current)
# print(pvalue)
## iterating over all combinations of indices
for (i in 1:(nstrata - 1)) {
if (current[i] > mcounts[i]) {
current[i] <- 1
current[i+1] <- current[i+1] +1
}
}
if (current[nstrata] > mcounts[nstrata]) {
if (side == "up"| side =="lower") {
return(pvalue)
} else {
pvalue <- min(1, 2 * min(pvalue_up, pvalue_lower))
return(pvalue)
}
}
if (side == "up") {
if (sum(current) + sum(mlower) - nstrata >= ss) {
x_samp_prob <- rep(NA, nstrata)
for (j in 1:nstrata) {
x_samp_prob[j] <- x_prob[[j]][current[j]]
}
pvalue <- pvalue + exp(sum(x_samp_prob))
}
} else if (side == "lower") {
if (sum(current) + sum(mlower) - nstrata <= ss) {
x_samp_prob <- rep(NA, nstrata)
for (j in 1:nstrata) {
x_samp_prob[j] <- x_prob[[j]][current[j]]
}
pvalue <- pvalue + exp(sum(x_samp_prob))
}
} else if (side == "both") {
## up-tail probability
if (sum(current) + sum(mlower) - nstrata >= ss) {
x_samp_prob <- rep(NA, nstrata)
for (j in 1:nstrata) {
x_samp_prob[j] <- x_prob[[j]][current[j]]
}
pvalue_up <- pvalue_up + exp(sum(x_samp_prob))
}
## lower-tail probability
if (sum(current) + sum(mlower) - nstrata <= ss) {
x_samp_prob <- rep(NA, nstrata)
for (j in 1:nstrata) {
x_samp_prob[j] <- x_prob[[j]][current[j]]
}
pvalue_lower <- pvalue_lower + exp(sum(x_samp_prob))
}
} else {
stop("side should be one of \"up\", \"lower\" or \"both\".")
}
current[1] <- current[1] + 1
}
}
# strata <- rep(1:3, each = 2)
# treatment <- rep(1:2, 3)
# nsubjects <- c(670, 530, 500, 710, 490, 610)
# nresponders <- c(21, 22, 19, 16, 20, 24)
# case <- 1
# prec <- 1e-4
#
# tt1 <- system.time(test.p.exact.up <- fast_SFET(strata, treatment, nsubjects, nresponders, case, side = "up"))
# # p-value: 0.395
# # user system elapsed
# # 0.43 0.00 0.44
#
# tt2 <- system.time(test.p.exact.lower <- fast_SFET(strata, treatment, nsubjects, nresponders, case, side = "lower"))
# # p-value: 0.675
# # user system elapsed
# # 0.22 0.00 0.22
#
# tt3 <- system.time(test.p.exact.both <- fast_SFET(strata, treatment, nsubjects, nresponders, case, side = "both"))
# # p-value: 0.79
# # user system elapsed
# # 0.36 0.00 0.36
|
/SFET/R/fast_SFET.R
|
no_license
|
KenLi93/Stratified-Fisher-Exact-Test
|
R
| false
| false
| 4,713
|
r
|
fast_SFET <- function(strata, treatment, nsubjects, nresponders,
case, data = NULL, prec = 1e-4, side = c("up")){
nstrata <- strata %>% unique() %>% length()
if (data %>% is.null()) {
indata <- tibble(strata = strata, treatment = treatment, nsubjects = nsubjects,
nresponders = nresponders) %>% arrange(strata)
} else {
indata <- data %$% tibble(strata = strata, treatment = treatment, nsubjects = nsubjects,
nresponders = nresponders) %>% arrange(strata)
}
ss <- nresponders %>% subset(treatment == case) %>% sum()
zz <- indata %>%
group_by(strata) %>%
summarise(tot_responders = sum(nresponders)) %>%
.$tot_responders
mm <- indata %>%
filter(treatment == case) %>%
.$nsubjects
nn <- indata %>%
group_by(strata) %>%
summarise(tot_subjects = sum(nsubjects)) %>%
.$tot_subjects
mlower <- pmax(0, zz-nn+mm)
mupper <- pmin(zz, mm)
mcounts <- mupper - mlower + 1 ## all plausible counts
# x_range <- vector(mode = "list", length = nstrata)
# for(i in 1:nstrata){
# x_range[[i]] <- m_lower[i]:m_upper[i]
# }
## imaginary combination of underlying table: expand.grid(x_range)
x_prob <- x_range <- vector(mode = "list", length = nstrata)
for(i in 1:nstrata){
x_range[[i]] <- mlower[i]:mupper[i]
}
## calculate the hypergeometric density in each 2*2 table
for(i in 1:nstrata){
x_prob[[i]] <- dhyper(x_range[[i]], m = mm[i], n = nn[i] - mm[i], k = zz[i], log=TRUE)
}
## acceleration, reduce the sample space for computation
## compute the maximum log probability that can be reduced from each stratum
log_pthresh <- log(1 - (1-prec)^(1 / nstrata))
## for each stratum, remove the points that are unlikely
for (i in 1:nstrata) {
remained <- x_prob[[i]] >= log_pthresh - log(mcounts[i])
mcounts[i] <- sum(remained)
x_range[[i]] <- x_range[[i]][remained]
x_prob[[i]] <- x_prob[[i]][remained]
mlower[i] <- x_range[[i]][1]
mupper[i] <- x_range[[i]][mcounts[i]]
}
## initialize pvalue and the iterating machine
pvalue <- pvalue_up <- pvalue_lower <- 0
current <- rep(1, nstrata)
while(1) {
# print(current)
# print(pvalue)
## iterating over all combinations of indices
for (i in 1:(nstrata - 1)) {
if (current[i] > mcounts[i]) {
current[i] <- 1
current[i+1] <- current[i+1] +1
}
}
if (current[nstrata] > mcounts[nstrata]) {
if (side == "up"| side =="lower") {
return(pvalue)
} else {
pvalue <- min(1, 2 * min(pvalue_up, pvalue_lower))
return(pvalue)
}
}
if (side == "up") {
if (sum(current) + sum(mlower) - nstrata >= ss) {
x_samp_prob <- rep(NA, nstrata)
for (j in 1:nstrata) {
x_samp_prob[j] <- x_prob[[j]][current[j]]
}
pvalue <- pvalue + exp(sum(x_samp_prob))
}
} else if (side == "lower") {
if (sum(current) + sum(mlower) - nstrata <= ss) {
x_samp_prob <- rep(NA, nstrata)
for (j in 1:nstrata) {
x_samp_prob[j] <- x_prob[[j]][current[j]]
}
pvalue <- pvalue + exp(sum(x_samp_prob))
}
} else if (side == "both") {
## up-tail probability
if (sum(current) + sum(mlower) - nstrata >= ss) {
x_samp_prob <- rep(NA, nstrata)
for (j in 1:nstrata) {
x_samp_prob[j] <- x_prob[[j]][current[j]]
}
pvalue_up <- pvalue_up + exp(sum(x_samp_prob))
}
## lower-tail probability
if (sum(current) + sum(mlower) - nstrata <= ss) {
x_samp_prob <- rep(NA, nstrata)
for (j in 1:nstrata) {
x_samp_prob[j] <- x_prob[[j]][current[j]]
}
pvalue_lower <- pvalue_lower + exp(sum(x_samp_prob))
}
} else {
stop("side should be one of \"up\", \"lower\" or \"both\".")
}
current[1] <- current[1] + 1
}
}
# strata <- rep(1:3, each = 2)
# treatment <- rep(1:2, 3)
# nsubjects <- c(670, 530, 500, 710, 490, 610)
# nresponders <- c(21, 22, 19, 16, 20, 24)
# case <- 1
# prec <- 1e-4
#
# tt1 <- system.time(test.p.exact.up <- fast_SFET(strata, treatment, nsubjects, nresponders, case, side = "up"))
# # p-value: 0.395
# # user system elapsed
# # 0.43 0.00 0.44
#
# tt2 <- system.time(test.p.exact.lower <- fast_SFET(strata, treatment, nsubjects, nresponders, case, side = "lower"))
# # p-value: 0.675
# # user system elapsed
# # 0.22 0.00 0.22
#
# tt3 <- system.time(test.p.exact.both <- fast_SFET(strata, treatment, nsubjects, nresponders, case, side = "both"))
# # p-value: 0.79
# # user system elapsed
# # 0.36 0.00 0.36
|
117984e2d590475c0e94c743c3c5d79d query64_query51_1344n.qdimacs 1143 2701
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query64_query51_1344n/query64_query51_1344n.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 72
|
r
|
117984e2d590475c0e94c743c3c5d79d query64_query51_1344n.qdimacs 1143 2701
|
#########
## Experiment
#########
resdir = 'res/'
assets = c("EURUSD","EURGBP")
filterings = c(6,9,12)
correlations = seq(from=-0.95,to=0.95,by=0.05)
omega0 = 144
source('functions.R')
library(doParallel)
cl <- makeCluster(40)
registerDoParallel(cl)
for(filtering in filterings){
# load data
x1=lpDataFromDir('data/filtered','EURUSD')
x2=lpDataFromDir('data/filtered','EURGBP')
# refilter
xf1 = gaussianFilter(x1,filtering);
xf2 = gaussianFilter(x2,filtering);
# generate synthetic data for each level of correlation
synthlength = length(xf1)-(4*omega0)
synth = matrix(0,2*synthlength,length(correlations))
for(i in 1:length(correlations)){
s=synthAssets(xf1,xf2,correlations[i],omega0);
synth[,i]=c(s[,1],s[,2])
}
rhoeff=c()
for(j in 1:length(correlations)){
x1s=synth[1:synthlength,j];
x2s=synth[(synthlength+1):(2*synthlength),j];
rhoeff = append(rhoeff,cor(diff(x1s),diff(x2s)))
}
# compute perfs in //
# estimated computing time : for t \in [1:100] at filtering=6 : tau = 160s
# length(x1s)=6000 -> tautot = tau*60 ~ 3h !
# filtering = 12 -> tautot ~ 1,5h
#
res <- foreach(j=1:ncol(synth)) %dopar% {
source('functions.R');source('models.R')
x1s=sample(synth[1:synthlength,j],filtering/2);x2s=sample(synth[(synthlength+1):(2*synthlength),j],filtering/2);
#t=system.time(predictionMSE(x1s[1:200],x2s[1:200],288*2/filtering,2));t
m=predictionMSE(x1s,x2s,288*2/filtering,2)
error=(m$expected-m$pred)^2
res=c(error[,1],error[,2])
res
}
# get results into data frame
vals_mat = matrix(0,length(res),length(res[[1]]))
for(a in 1:length(res)){vals_mat[a,]=res[[a]]}
v = data.frame(vals_mat);
write.table(v,file=paste0(resdir,"all_",filtering,".csv"),sep=";",row.names=FALSE,col.names=FALSE)
write.table(rhoeff,file=paste0(resdir,"rhoeff_",filtering,".csv"),sep=";",row.names=FALSE,col.names=FALSE)
}
stopCluster(cl)
|
/Models/experiment.R
|
no_license
|
zone86/synthetic_data
|
R
| false
| false
| 2,040
|
r
|
#########
## Experiment
#########
resdir = 'res/'
assets = c("EURUSD","EURGBP")
filterings = c(6,9,12)
correlations = seq(from=-0.95,to=0.95,by=0.05)
omega0 = 144
source('functions.R')
library(doParallel)
cl <- makeCluster(40)
registerDoParallel(cl)
for(filtering in filterings){
# load data
x1=lpDataFromDir('data/filtered','EURUSD')
x2=lpDataFromDir('data/filtered','EURGBP')
# refilter
xf1 = gaussianFilter(x1,filtering);
xf2 = gaussianFilter(x2,filtering);
# generate synthetic data for each level of correlation
synthlength = length(xf1)-(4*omega0)
synth = matrix(0,2*synthlength,length(correlations))
for(i in 1:length(correlations)){
s=synthAssets(xf1,xf2,correlations[i],omega0);
synth[,i]=c(s[,1],s[,2])
}
rhoeff=c()
for(j in 1:length(correlations)){
x1s=synth[1:synthlength,j];
x2s=synth[(synthlength+1):(2*synthlength),j];
rhoeff = append(rhoeff,cor(diff(x1s),diff(x2s)))
}
# compute perfs in //
# estimated computing time : for t \in [1:100] at filtering=6 : tau = 160s
# length(x1s)=6000 -> tautot = tau*60 ~ 3h !
# filtering = 12 -> tautot ~ 1,5h
#
res <- foreach(j=1:ncol(synth)) %dopar% {
source('functions.R');source('models.R')
x1s=sample(synth[1:synthlength,j],filtering/2);x2s=sample(synth[(synthlength+1):(2*synthlength),j],filtering/2);
#t=system.time(predictionMSE(x1s[1:200],x2s[1:200],288*2/filtering,2));t
m=predictionMSE(x1s,x2s,288*2/filtering,2)
error=(m$expected-m$pred)^2
res=c(error[,1],error[,2])
res
}
# get results into data frame
vals_mat = matrix(0,length(res),length(res[[1]]))
for(a in 1:length(res)){vals_mat[a,]=res[[a]]}
v = data.frame(vals_mat);
write.table(v,file=paste0(resdir,"all_",filtering,".csv"),sep=";",row.names=FALSE,col.names=FALSE)
write.table(rhoeff,file=paste0(resdir,"rhoeff_",filtering,".csv"),sep=";",row.names=FALSE,col.names=FALSE)
}
stopCluster(cl)
|
#devtools::test("asremlPlus")
context("prediction_presentation")
asr41.lib <- "D:\\Analyses\\R ASReml4.1"
cat("#### Test for Intercept prediction on Oats with asreml41\n")
test_that("predict_Intercept4", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
data(Oats.dat)
m1.asr <- asreml(Yield ~ Nitrogen*Variety,
random=~Blocks/Wplots,
data=Oats.dat)
testthat::expect_equal(length(m1.asr$vparameters),3)
current.asrt <- as.asrtests(m1.asr)
#Test for Intercept predict
Int.pred <- predict(m1.asr, classify="(Intercept)")$pvals
testthat::expect_equal(nrow(Int.pred), 1)
testthat::expect_true(abs( Int.pred$predicted.value - 103.9722) < 1e-04)
Int.diffs <- predictPlus(m1.asr, classify="(Intercept)")
testthat::expect_equal(length(Int.diffs),7)
testthat::expect_equal(nrow(Int.diffs$predictions), 1)
testthat::expect_true(abs( Int.diffs$predictions$predicted.value - 103.9722) < 1e-04)
xtitl <- "Overall mean"
names(xtitl) <- "Intercept"
testthat::expect_silent(plotPredictions(classify="(Intercept)", y = "predicted.value",
data = Int.diffs$predictions,
y.title = "Yield", titles = xtitl,
error.intervals = "Conf"))
})
cat("#### Test for predictPlus.asreml41\n")
test_that("predictPlus.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
data(WaterRunoff.dat)
asreml.options(keep.order = TRUE) #required for asreml4 only
testthat::expect_warning(current.asr <- asreml(fixed = pH ~ Benches + (Sources * (Type + Species)),
random = ~ Benches:MainPlots,
data= WaterRunoff.dat))
testthat::expect_output(current.asrt <- as.asrtests(current.asr, NULL, NULL),
regexp = "Calculating denominator DF")
testthat::expect_silent(diffs <- predictPlus(classify = "Sources:Type",
asreml.obj = current.asr, tables = "none",
wald.tab = current.asrt$wald.tab,
present = c("Type","Species","Sources")))
testthat::expect_is(diffs, "alldiffs")
#### Get the observed combinations of the factors and variables in classify
class.facs <- c("Species","Date","xDay")
levs <- as.data.frame(table(WaterRunoff.dat[class.facs]))
levs <- levs[do.call(order, levs), ]
levs <- as.list(levs[levs$Freq != 0, class.facs])
levs$xDay <- as.numfac(levs$xDay)
current.asr <- asreml(fixed = log.Turbidity ~ Benches + Sources + Type + Species +
Sources:Type + Sources:Species +
Sources:xDay + Species:xDay + Species:Date,
data = WaterRunoff.dat)
current.asrt <- as.asrtests(current.asr, NULL, NULL)
diffs.p <- predictPlus(asreml.obj = current.asr,
classify="Species:Date:xDay",
term = "Species:Date",
parallel = TRUE, levels=levs,
present=c("Type","Species","Sources"),
x.num = "xDay", x.fac = "Date",
x.plot.values=c(0,28,56,84), tables = "none",
wald.tab = current.asrt$wald.tab)
testthat::expect_is(diffs.p, "alldiffs")
})
cat("#### Test for plotPredictions.asreml41\n")
test_that("plotPredictions.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(ggplot2)
library(dae)
data(WaterRunoff.dat)
#### Get the observed combinations of the factors and variables in classify
class.facs <- c("Species","Date","xDay")
levs <- as.data.frame(table(WaterRunoff.dat[class.facs]))
levs <- levs[do.call(order, levs), ]
levs <- as.list(levs[levs$Freq != 0, class.facs])
levs$xDay <- as.numfac(levs$xDay)
asreml.options(keep.order = TRUE) #required for asreml4 only
current.asr <- asreml(fixed = log.Turbidity ~ Benches + Sources + Type + Species +
Sources:Type + Sources:Species +
Sources:xDay + Species:xDay + Species:Date,
data = WaterRunoff.dat)
current.asrt <- as.asrtests(current.asr, NULL, NULL)
predictions <- predict(current.asr, class="Species:Date:xDay",
parallel = TRUE, levels = levs,
present = c("Type","Species","Sources"))$pvals
predictions <- predictions[predictions$status == "Estimable",]
x.title <- "Days since first observation"
names(x.title) <- "xDay"
#Get predictions without specifying levels
plotPredictions(classify="Species:Date:xDay", y = "predicted.value",
data = predictions, wald.tab = current.asrt$wald.tab,
x.num = "xDay", x.fac = "Date",
titles = x.title,
y.title = "Predicted log(Turbidity)",
present = c("Type","Species","Sources"),
error.intervals = "none",
ggplotFuncs = list(ggtitle("Transformed turbidity over time")))
#Specify the levs and parallel = TRUE
diffs <- predictPlus(asreml.obj = current.asr,
classify="Species:Date:xDay",
term = "Species:Date",
present=c("Type","Species","Sources"),
x.num = "xDay", x.fac = "Date",
parallel = TRUE, levels = levs,
x.plot.values=c(0,28,56,84),
wald.tab = current.asrt$wald.tab)
plotPredictions(classify="Species:Date:xDay", y = "predicted.value",
data = diffs$predictions, wald.tab = current.asrt$wald.tab,
x.num = "xDay", x.fac = "Date",
titles = x.title,
y.title = "Predicted log(Turbidity)")
testthat::expect_silent("dummy")
})
cat("#### Test for predictPresent.asreml41\n")
test_that("predictPresent.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(dae)
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
data(WaterRunoff.dat)
#### Get the observed combinations of the factors and variables in classify
class.facs <- c("Species","Date","xDay")
levs <- as.data.frame(table(WaterRunoff.dat[class.facs]))
levs <- levs[do.call(order, levs), ]
levs <- as.list(levs[levs$Freq != 0, class.facs])
levs$xDay <- as.numfac(levs$xDay)
titles <- list("Days since first observation", "Days since first observation", "pH", "Turbidity (NTU)")
names(titles) <- names(WaterRunoff.dat)[c(5,7,11:12)]
asreml.options(keep.order = TRUE) #required for asreml4 only
current.asr <- asreml(fixed = log.Turbidity ~ Benches + Sources + Type + Species +
Sources:Type + Sources:Species + Sources:Species:xDay +
Sources:Species:Date,
data = WaterRunoff.dat)
current.asrt <- as.asrtests(current.asr, NULL, NULL)
#Example that fails because Date has levels that are not numeric in nature
testthat::expect_error(diff.list <- predictPresent(terms = "Date:Sources:Species",
asreml.obj = current.asrt$asreml.obj,
wald.tab = current.asrt$wald.tab,
x.fac = "Date",
plots = "predictions",
error.intervals = "StandardError",
titles = titles,
transform.power = 0,
present = c("Type","Species","Sources"),
tables = "differences",
level.length = 6))
#Example that does not produce predictions because has Date but not xDay
testthat::expect_error(diff.list <- predictPresent(terms = "Date:Sources:Species",
asreml.obj = current.asrt$asreml.obj,
wald.tab = current.asrt$wald.tab,
plots = "predictions",
error.intervals = "StandardError",
titles = titles,
transform.power = 0,
present = c("Type","Species","Sources","Date"),
tables = "differences",
level.length = 6))
#### Get the observed combinations of the factors and variables in classify
class.facs <- c("Sources","Species","Date","xDay")
levs <- as.data.frame(table(WaterRunoff.dat[class.facs]))
levs <- levs[do.call(order, levs), ]
levs <- as.list(levs[levs$Freq != 0, class.facs])
levs$xDay <- as.numfac(levs$xDay)
# parallel and levels are arguments from predict.asreml
diff.list <- predictPresent.asreml(asreml.obj = current.asrt$asreml.obj,
terms = "Date:Sources:Species:xDay",
x.num = "xDay", x.fac = "Date",
parallel = TRUE, levels = levs,
wald.tab = current.asrt$wald.tab,
plots = "predictions",
error.intervals = "StandardError",
titles = titles,
transform.power = 0,
present = c("Type","Species","Sources"),
tables = "none",
level.length = 6)
testthat::expect_equal(length(diff.list), 1)
testthat::expect_match(names(diff.list), "Date.Sources.Species.xDay")
# test that backtransforms have halfLSD intervals
diff.list <- predictPresent.asreml(asreml.obj = current.asrt$asreml.obj,
terms = "Date:Sources:Species:xDay",
x.num = "xDay", x.fac = "Date",
parallel = TRUE, levels = levs,
wald.tab = current.asrt$wald.tab,
plots = "backtransforms",
error.intervals = "halfLeast",
avsed.tolerance = 1,
titles = titles,
transform.power = 0,
present = c("Type","Species","Sources"),
tables = "none",
level.length = 6)
testthat::expect_equal(length(diff.list), 1)
testthat::expect_match(names(diff.list), "Date.Sources.Species.xDay")
testthat::expect_true(all(c("upper.halfLeastSignificant.limit",
"lower.halfLeastSignificant.limit") %in%
names(diff.list$Date.Sources.Species.xDay$backtransforms)))
})
#### This test is not relevant to asreml3 because its saving of sed and vcov are different
cat("#### Test for error when no predictions.asreml41\n")
test_that("noPredictions.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
data(gw.dat)
current.asr <- do.call(asreml,
args=list(fixed = y ~ Species*Substrate*Irrigation,
random = ~ Row + Column,
keep.order=TRUE, data = gw.dat,
maxit=50, workspace = 1e08, stepsize = 0.0001))
current.asrt <- as.asrtests(current.asr, NULL, NULL)
current.asrt <- rmboundary(current.asrt)
testthat::expect_error(diffs <- predictPresent(current.asrt$asreml.obj,
terms = "Irrigation",
error.intervals = "Conf",
wald.tab = current.asrt$wald.tab,
tables = "none")[[1]],
regexp = "predict.asreml has not returned the sed component for the predictions as requested",
fixed = TRUE)
testthat::expect_error(diffs <- predictPresent(current.asrt$asreml.obj,
terms = "Irrigation",
linear.transformation = ~ Irrigation,
error.intervals = "Conf",
wald.tab = current.asrt$wald.tab,
tables = "none")[[1]],
regexp = "predict.asreml has not returned the variance matrix of the predictions as requested",
fixed = TRUE)
})
cat("#### Test for plotPvalues.asreml41\n")
test_that("plotPvalues.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
library(reshape2)
data(WaterRunoff.dat)
asreml.options(keep.order = TRUE) #required for asreml4 only
testthat::expect_output(current.asr <- asreml(fixed = pH ~ Benches + (Sources * (Type + Species)),
random = ~ Benches:MainPlots,
data= WaterRunoff.dat))
current.asrt <- as.asrtests(current.asr, NULL, NULL)
diffs <- predictPlus.asreml(classify = "Sources:Type",
asreml.obj = current.asr, tables = "none",
wald.tab = current.asrt$wald.tab,
present = c("Type","Species","Sources"))
testthat::expect_is(diffs, "alldiffs")
p <- diffs$p.differences
p <- within(reshape2::melt(p),
{
Var1 <- factor(Var1, levels=dimnames(diffs$p.differences)[[1]])
Var2 <- factor(Var2, levels=levels(Var1))
})
names(p) <- c("Rows","Columns","p")
testthat::expect_silent(plotPvalues(p, x = "Rows", y = "Columns",
gridspacing = rep(c(3,4), c(4,2)),
show.sig = TRUE))
#Test different size, face and colour
testthat::expect_silent(plotPvalues(p, x = "Rows", y = "Columns",
gridspacing = rep(c(3,4), c(4,2)),
show.sig = TRUE, sig.size = 5, sig.colour = "blue"))
testthat::expect_silent(plotPvalues(p, x = "Rows", y = "Columns",
gridspacing = rep(c(3,4), c(4,2)),
show.sig = TRUE, sig.size = 5, sig.face = "bold",
sig.family = "serif"))
#Plot with sections
pdata <- plotPvalues(diffs, sections = "Sources", show.sig = TRUE)
testthat::expect_equal(nrow(pdata$pvalues), 400)
testthat::expect_equal(ncol(pdata$pvalues), 5)
testthat::expect_true(all(c("Rows","Columns","p","sections1","sections2") %in% names(pdata$pvalues)))
testthat::expect_equal(length(pdata$plots), 6)
testthat::expect_equal(names(pdata$plots), c("Rainwater","Recycled water","Tap water",
"Rain+Basalt","Rain+Dolomite","Rain+Quartzite"))
#Plot without sections, but automatic gridspacing
pupdata <- plotPvalues(diffs, show.sig = TRUE, factors.per.grid = 1)
testthat::expect_equal(nrow(pupdata$pvalues), 400)
testthat::expect_equal(ncol(pupdata$pvalues), 3)
testthat::expect_true(all(c("Rows","Columns","p") %in% names(pupdata$pvalues)))
testthat::expect_equal(sum(!is.na(pupdata$pvalues$p)), 380)
testthat::expect_equal(length(pupdata$plots), 1)
#Plot without sections, but automatic gridspacing and upper triangle
pupdata <- plotPvalues(diffs, show.sig = TRUE, factors.per.grid = 1,
triangles = "upper")
testthat::expect_equal(nrow(pupdata$pvalues), 400)
testthat::expect_equal(ncol(pupdata$pvalues), 3)
testthat::expect_true(all(c("Rows","Columns","p") %in% names(pupdata$pvalues)))
testthat::expect_equal(sum(!is.na(pupdata$pvalues$p)), 190)
#Plot without sections, but manual gridspacing and upper triangle
pupdata <- plotPvalues(diffs, show.sig = TRUE, gridspacing = rep(c(3,4), c(4,2)),
triangles = "upper")
testthat::expect_equal(nrow(pupdata$pvalues), 400)
testthat::expect_equal(ncol(pupdata$pvalues), 3)
testthat::expect_true(all(c("Rows","Columns","p") %in% names(pupdata$pvalues)))
testthat::expect_equal(sum(!is.na(pupdata$pvalues$p)), 190)
#Plot without sections, but manual gridspacing and lower triangle
pupdata <- plotPvalues(diffs, sections = "Sources", show.sig = TRUE, triangles = "upper")
pupdata$pvalues <- na.omit(pupdata$pvalues)
testthat::expect_equal(nrow(pupdata$pvalues), 190)
testthat::expect_equal(ncol(pupdata$pvalues), 5)
testthat::expect_true(all(c("Rows","Columns","p","sections1","sections2") %in%
names(pupdata$pvalues)))
})
cat("#### Test for plotPvalues.asreml41\n")
test_that("plotPvalues.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
LeafSucculence.diff <- readRDS("./data/LeafSucculence.diff")
LeafSucculence.diff <- LeafSucculence.diff[[1]]
pdata <- plotPvalues(LeafSucculence.diff, gridspacing = 3, show.sig = TRUE,
axis.labels = TRUE)
testthat::expect_equal(nrow(pdata$pvalue), 144)
testthat::expect_equal(ncol(pdata$pvalues), 3)
testthat::expect_true(all(c("Rows","Columns","p") %in% names(pdata$pvalues)))
pdata <- plotPvalues(LeafSucculence.diff, factors.per.grid = 2, show.sig = TRUE,
axis.labels = TRUE)
testthat::expect_equal(nrow(pdata$pvalues), 144)
testthat::expect_equal(ncol(pdata$pvalues), 3)
testthat::expect_true(all(c("Rows","Columns","p") %in% names(pdata$pvalues)))
pdata <- plotPvalues(LeafSucculence.diff, sections = c("Depths","Slope"),
show.sig = TRUE, axis.labels = TRUE)
testthat::expect_equal(nrow(pdata$pvalues), 144)
testthat::expect_equal(ncol(pdata$pvalues), 5)
testthat::expect_true(all(c("Rows","Columns","p","sections1","sections2") %in% names(pdata$pvalues)))
})
cat("#### Test for factor combinations asreml41\n")
test_that("factor.combinations.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
LeafSucculence.diff <- readRDS("./data/LeafSucculence.diff")
LeafSucculence.diff <- LeafSucculence.diff[[1]]
LeafSucculence.diff <- recalcLSD(LeafSucculence.diff, LSDtype = "factor.combinations",
LSDby = "Species")
testthat::expect_warning(LeafSucculence.diff <- redoErrorIntervals(LeafSucculence.diff,
error.intervals = "half"))
testthat::expect_equal(nrow(LeafSucculence.diff$LSD), 3)
testthat::expect_equal(ncol(LeafSucculence.diff$LSD), 8)
testthat::expect_true(all(c("P1","P2","P3") %in% rownames(LeafSucculence.diff$LSD)))
testthat::expect_false("lower.halfLeastSignificant.limit" %in% names(LeafSucculence.diff$predictions))
testthat::expect_true(names(LeafSucculence.diff$predictions)[length(names(
LeafSucculence.diff$predictions))] == "est.status")
})
cat("#### Test for recalcLSD.alldiffs4\n")
test_that("recalcLSD.alldiffs4", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
data(WaterRunoff.dat)
asreml.options(keep.order = TRUE) #required for asreml4 only
testthat::expect_output(current.asr <- asreml(fixed = pH ~ Benches + (Sources * (Type + Species)),
random = ~ Benches:MainPlots,
data= WaterRunoff.dat))
current.asrt <- as.asrtests(current.asr, NULL, NULL)
diffs <- predictPlus.asreml(classify = "Sources:Type",
asreml.obj = current.asr, tables = "none",
wald.tab = current.asrt$wald.tab,
present = c("Type","Species","Sources"))
testthat::expect_is(diffs, "alldiffs")
diffs <- recalcLSD.alldiffs(diffs, LSDtype = "factor.combinations", LSDby = "Sources")
testthat::expect_equal(nrow(diffs$LSD), 6)
testthat::expect_equal(ncol(diffs$LSD), 8)
testthat::expect_warning(diffs <- redoErrorIntervals(diffs,
error.intervals = "halfLeastSignificant"))
testthat::expect_false("upper.halfLeastSignificant.limit" %in% names(diffs$predictions))
})
cat("#### Test for LSDby4\n")
test_that("LSDby4", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
#example 9-1 from Montgomery 5 edn
#Set up data.frame
Pressure.lev <- c(10,15,20)
Speed.lev <- c(100,120,140)
Nozzle.lev <- c("A", "B", "C")
Fac3Syrup.dat <- fac.gen(generate=list(Nozzle = Nozzle.lev,
Pressure = Pressure.lev, Speed = Speed.lev),
each=2)
Fac3Syrup.dat <- within(Fac3Syrup.dat,
{
SpeedPress <- fac.combine(list(Speed,Pressure),
combine.levels = TRUE)
WSpeedPress <- fac.nested(SpeedPress)
})
Fac3Syrup.dat <- data.frame(Test = factor(1:54), Fac3Syrup.dat)
Fac3Syrup.dat$Loss <- c(-35,-25,-45,-60,-40,15, 110,75,-10,30,80,54,
4,5,-40,-30,31,36, 17,24,-65,-58,20,4,
55,120,-55,-44,110,44, -23,-5,-64,-62,-20,-31,
-39,-35,-55,-67,15,-30, 90,113,-28,-26,110,135,
-30,-55,-61,-52,54,4)+70
Fac3Syrup.dat <- with(Fac3Syrup.dat, Fac3Syrup.dat[order(SpeedPress, WSpeedPress),])
#Analysis
interaction.ABC.plot(Loss, Pressure, Speed, Nozzle, data=Fac3Syrup.dat)
Fac3Syrup.aov <- aov(Loss ~ Nozzle * Pressure * Speed + Error(Test), Fac3Syrup.dat)
summary(Fac3Syrup.aov)
m1 <- do.call("asreml",
args = list(Loss ~ Nozzle * Pressure * Speed,
residual = ~idh(SpeedPress):WSpeedPress,
data = Fac3Syrup.dat))
testthat::expect_true(abs(summary(m1)$varcomp$component[2] - 27.5) < 1e-05)
wald.tab <- wald.asreml(m1, denDF = "numeric")$Wald
testthat::expect_equal(nrow(wald.tab), 8)
diffs <- predictPlus(m1, classify = "Nozzle:Pressure:Speed",
#linear.transformation = ~(Nozzle + Pressure):Speed,
wald.tab = wald.tab,
tables = "none")
testthat::expect_true("upper.Confidence.limit" %in% names(diffs$predictions))
testthat::expect_true(all(c( "LSDtype", "LSDstatistic") %in% names(attributes(diffs))))
testthat::expect_true(is.null(attr(diffs, which = "LSDby")))
testthat::expect_true((attr(diffs, which = "LSDtype") == "overall"))
#Calculate LSD, but leave as CIs
diffs.LSD <- recalcLSD(diffs, LSDtype = "factor",
LSDby = c("Speed","Pressure"))
testthat::expect_equal(nrow(diffs.LSD$LSD), 9)
testthat::expect_true(abs(diffs.LSD$LSD$minLSD[1]- 11.92550) < 1e-05)
testthat::expect_true(all(abs(diffs.LSD$LSD$minLSD- diffs.LSD$LSD$maxLSD) < 1e-05))
testthat::expect_true(all(c( "LSDtype", "LSDby", "LSDstatistic") %in% names(attributes(diffs.LSD))))
testthat::expect_true((attr(diffs.LSD, which = "LSDtype") == "factor.combinations"))
testthat::expect_true("upper.Confidence.limit" %in% names(diffs$predictions))
#Convert from CI to LSI
diffs.LSI <- redoErrorIntervals(diffs.LSD, error.intervals = "half")
testthat::expect_true("upper.halfLeastSignificant.limit" %in% names(diffs.LSI$predictions))
testthat::expect_equal(nrow(diffs.LSI$LSD), 9)
diffs <- redoErrorIntervals(diffs, error.intervals = "half", LSDtype = "factor",
LSDby = c("Speed","Pressure"), wald.tab = wald.tab,
tables = "none")
testthat::expect_true("upper.halfLeastSignificant.limit" %in% names(diffs$predictions))
testthat::expect_equal(nrow(diffs$LSD), 9)
testthat::expect_true(abs(diffs$LSD$minLSD[1]- 11.92550) < 1e-05)
testthat::expect_true(all(abs(diffs$LSD$minLSD- diffs$LSD$maxLSD) < 1e-05))
#Test changing the LSDby
testthat::expect_warning(diff.Press <-
redoErrorIntervals(diffs, error.intervals = "half",
LSDtype = "factor",
LSDby = "Pressure", wald.tab = wald.tab,
tables = "none"))
diff.Press$LSD
testthat::expect_equal(nrow(diff.Press$LSD), 3)
testthat::expect_true(abs(diff.Press$LSD$minLSD[1]- 11.92550) < 1e-05)
testthat::expect_true(abs(diff.Press$LSD$meanLSD[1]- 41.13342) < 1e-05)
testthat::expect_true(abs(diff.Press$LSD$maxLSD[1]- 67.62672) < 1e-05)
#No LSDtype
testthat::expect_warning(diff.Press <-
redoErrorIntervals(diffs, error.intervals = "half",
LSDby = "Pressure", wald.tab = wald.tab,
tables = "none"))
testthat::expect_equal(nrow(diff.Press$LSD), 3)
testthat::expect_true(abs(diff.Press$LSD$minLSD[1]- 11.92550) < 1e-05)
testthat::expect_true(abs(diff.Press$LSD$meanLSD[1]- 41.13342) < 1e-05)
testthat::expect_true(abs(diff.Press$LSD$maxLSD[1]- 67.62672) < 1e-05)
testthat::expect_warning(diff.all <-
redoErrorIntervals(diffs, error.intervals = "half",
LSDtype = "overall",
LSDby = NULL, wald.tab = wald.tab,
tables = "none"))
testthat::expect_equal(nrow(diff.all$LSD), 1)
testthat::expect_true(rownames(diff.all$LSD) == "overall")
testthat::expect_true(abs(diff.all$LSD$minLSD[1]- 11.92550) < 1e-05)
#LSDtype = overall only
testthat::expect_warning(diff.all <-
redoErrorIntervals(diffs, error.intervals = "half",
LSDtype = "overall",
wald.tab = wald.tab,
tables = "none"))
testthat::expect_equal(nrow(diff.all$LSD), 1)
testthat::expect_true(abs(diff.all$LSD$minLSD[1]- 11.92550) < 1e-05)
#Test predictPlus with LSD options
#With linear transformation and LSDtype = "factor combinations"
diffs.LSD <- predictPlus(m1, classify = "Nozzle:Pressure:Speed",
linear.transformation = ~(Nozzle + Pressure):Speed,
error.intervals = "half", LSDtype = "factor", LSDby = c("Speed", "Pressure"),
wald.tab = wald.tab,
tables = "none")
testthat::expect_true("upper.halfLeastSignificant.limit" %in% names(diffs.LSD$predictions))
testthat::expect_true(all(c( "LSDtype", "LSDby", "LSDstatistic") %in% names(attributes(diffs.LSD))))
testthat::expect_true((attr(diffs.LSD, which = "LSDtype") == "factor.combinations"))
testthat::expect_true(all(c( "LSDtype", "LSDby", "LSDstatistic", "LSDvalues") %in%
names(attributes(diffs.LSD$predictions))))
testthat::expect_true(attr(diffs.LSD$predictions, which = "LSDtype") == "factor.combinations")
testthat::expect_true(attr(diffs.LSD$predictions, which = "LSDstatistic") == "mean")
})
|
/tests/testthat/test41PredictionsPresentation.r
|
no_license
|
cran/asremlPlus
|
R
| false
| false
| 29,280
|
r
|
#devtools::test("asremlPlus")
context("prediction_presentation")
asr41.lib <- "D:\\Analyses\\R ASReml4.1"
cat("#### Test for Intercept prediction on Oats with asreml41\n")
test_that("predict_Intercept4", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
data(Oats.dat)
m1.asr <- asreml(Yield ~ Nitrogen*Variety,
random=~Blocks/Wplots,
data=Oats.dat)
testthat::expect_equal(length(m1.asr$vparameters),3)
current.asrt <- as.asrtests(m1.asr)
#Test for Intercept predict
Int.pred <- predict(m1.asr, classify="(Intercept)")$pvals
testthat::expect_equal(nrow(Int.pred), 1)
testthat::expect_true(abs( Int.pred$predicted.value - 103.9722) < 1e-04)
Int.diffs <- predictPlus(m1.asr, classify="(Intercept)")
testthat::expect_equal(length(Int.diffs),7)
testthat::expect_equal(nrow(Int.diffs$predictions), 1)
testthat::expect_true(abs( Int.diffs$predictions$predicted.value - 103.9722) < 1e-04)
xtitl <- "Overall mean"
names(xtitl) <- "Intercept"
testthat::expect_silent(plotPredictions(classify="(Intercept)", y = "predicted.value",
data = Int.diffs$predictions,
y.title = "Yield", titles = xtitl,
error.intervals = "Conf"))
})
cat("#### Test for predictPlus.asreml41\n")
test_that("predictPlus.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
data(WaterRunoff.dat)
asreml.options(keep.order = TRUE) #required for asreml4 only
testthat::expect_warning(current.asr <- asreml(fixed = pH ~ Benches + (Sources * (Type + Species)),
random = ~ Benches:MainPlots,
data= WaterRunoff.dat))
testthat::expect_output(current.asrt <- as.asrtests(current.asr, NULL, NULL),
regexp = "Calculating denominator DF")
testthat::expect_silent(diffs <- predictPlus(classify = "Sources:Type",
asreml.obj = current.asr, tables = "none",
wald.tab = current.asrt$wald.tab,
present = c("Type","Species","Sources")))
testthat::expect_is(diffs, "alldiffs")
#### Get the observed combinations of the factors and variables in classify
class.facs <- c("Species","Date","xDay")
levs <- as.data.frame(table(WaterRunoff.dat[class.facs]))
levs <- levs[do.call(order, levs), ]
levs <- as.list(levs[levs$Freq != 0, class.facs])
levs$xDay <- as.numfac(levs$xDay)
current.asr <- asreml(fixed = log.Turbidity ~ Benches + Sources + Type + Species +
Sources:Type + Sources:Species +
Sources:xDay + Species:xDay + Species:Date,
data = WaterRunoff.dat)
current.asrt <- as.asrtests(current.asr, NULL, NULL)
diffs.p <- predictPlus(asreml.obj = current.asr,
classify="Species:Date:xDay",
term = "Species:Date",
parallel = TRUE, levels=levs,
present=c("Type","Species","Sources"),
x.num = "xDay", x.fac = "Date",
x.plot.values=c(0,28,56,84), tables = "none",
wald.tab = current.asrt$wald.tab)
testthat::expect_is(diffs.p, "alldiffs")
})
cat("#### Test for plotPredictions.asreml41\n")
test_that("plotPredictions.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(ggplot2)
library(dae)
data(WaterRunoff.dat)
#### Get the observed combinations of the factors and variables in classify
class.facs <- c("Species","Date","xDay")
levs <- as.data.frame(table(WaterRunoff.dat[class.facs]))
levs <- levs[do.call(order, levs), ]
levs <- as.list(levs[levs$Freq != 0, class.facs])
levs$xDay <- as.numfac(levs$xDay)
asreml.options(keep.order = TRUE) #required for asreml4 only
current.asr <- asreml(fixed = log.Turbidity ~ Benches + Sources + Type + Species +
Sources:Type + Sources:Species +
Sources:xDay + Species:xDay + Species:Date,
data = WaterRunoff.dat)
current.asrt <- as.asrtests(current.asr, NULL, NULL)
predictions <- predict(current.asr, class="Species:Date:xDay",
parallel = TRUE, levels = levs,
present = c("Type","Species","Sources"))$pvals
predictions <- predictions[predictions$status == "Estimable",]
x.title <- "Days since first observation"
names(x.title) <- "xDay"
#Get predictions without specifying levels
plotPredictions(classify="Species:Date:xDay", y = "predicted.value",
data = predictions, wald.tab = current.asrt$wald.tab,
x.num = "xDay", x.fac = "Date",
titles = x.title,
y.title = "Predicted log(Turbidity)",
present = c("Type","Species","Sources"),
error.intervals = "none",
ggplotFuncs = list(ggtitle("Transformed turbidity over time")))
#Specify the levs and parallel = TRUE
diffs <- predictPlus(asreml.obj = current.asr,
classify="Species:Date:xDay",
term = "Species:Date",
present=c("Type","Species","Sources"),
x.num = "xDay", x.fac = "Date",
parallel = TRUE, levels = levs,
x.plot.values=c(0,28,56,84),
wald.tab = current.asrt$wald.tab)
plotPredictions(classify="Species:Date:xDay", y = "predicted.value",
data = diffs$predictions, wald.tab = current.asrt$wald.tab,
x.num = "xDay", x.fac = "Date",
titles = x.title,
y.title = "Predicted log(Turbidity)")
testthat::expect_silent("dummy")
})
cat("#### Test for predictPresent.asreml41\n")
test_that("predictPresent.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(dae)
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
data(WaterRunoff.dat)
#### Get the observed combinations of the factors and variables in classify
class.facs <- c("Species","Date","xDay")
levs <- as.data.frame(table(WaterRunoff.dat[class.facs]))
levs <- levs[do.call(order, levs), ]
levs <- as.list(levs[levs$Freq != 0, class.facs])
levs$xDay <- as.numfac(levs$xDay)
titles <- list("Days since first observation", "Days since first observation", "pH", "Turbidity (NTU)")
names(titles) <- names(WaterRunoff.dat)[c(5,7,11:12)]
asreml.options(keep.order = TRUE) #required for asreml4 only
current.asr <- asreml(fixed = log.Turbidity ~ Benches + Sources + Type + Species +
Sources:Type + Sources:Species + Sources:Species:xDay +
Sources:Species:Date,
data = WaterRunoff.dat)
current.asrt <- as.asrtests(current.asr, NULL, NULL)
#Example that fails because Date has levels that are not numeric in nature
testthat::expect_error(diff.list <- predictPresent(terms = "Date:Sources:Species",
asreml.obj = current.asrt$asreml.obj,
wald.tab = current.asrt$wald.tab,
x.fac = "Date",
plots = "predictions",
error.intervals = "StandardError",
titles = titles,
transform.power = 0,
present = c("Type","Species","Sources"),
tables = "differences",
level.length = 6))
#Example that does not produce predictions because has Date but not xDay
testthat::expect_error(diff.list <- predictPresent(terms = "Date:Sources:Species",
asreml.obj = current.asrt$asreml.obj,
wald.tab = current.asrt$wald.tab,
plots = "predictions",
error.intervals = "StandardError",
titles = titles,
transform.power = 0,
present = c("Type","Species","Sources","Date"),
tables = "differences",
level.length = 6))
#### Get the observed combinations of the factors and variables in classify
class.facs <- c("Sources","Species","Date","xDay")
levs <- as.data.frame(table(WaterRunoff.dat[class.facs]))
levs <- levs[do.call(order, levs), ]
levs <- as.list(levs[levs$Freq != 0, class.facs])
levs$xDay <- as.numfac(levs$xDay)
# parallel and levels are arguments from predict.asreml
diff.list <- predictPresent.asreml(asreml.obj = current.asrt$asreml.obj,
terms = "Date:Sources:Species:xDay",
x.num = "xDay", x.fac = "Date",
parallel = TRUE, levels = levs,
wald.tab = current.asrt$wald.tab,
plots = "predictions",
error.intervals = "StandardError",
titles = titles,
transform.power = 0,
present = c("Type","Species","Sources"),
tables = "none",
level.length = 6)
testthat::expect_equal(length(diff.list), 1)
testthat::expect_match(names(diff.list), "Date.Sources.Species.xDay")
# test that backtransforms have halfLSD intervals
diff.list <- predictPresent.asreml(asreml.obj = current.asrt$asreml.obj,
terms = "Date:Sources:Species:xDay",
x.num = "xDay", x.fac = "Date",
parallel = TRUE, levels = levs,
wald.tab = current.asrt$wald.tab,
plots = "backtransforms",
error.intervals = "halfLeast",
avsed.tolerance = 1,
titles = titles,
transform.power = 0,
present = c("Type","Species","Sources"),
tables = "none",
level.length = 6)
testthat::expect_equal(length(diff.list), 1)
testthat::expect_match(names(diff.list), "Date.Sources.Species.xDay")
testthat::expect_true(all(c("upper.halfLeastSignificant.limit",
"lower.halfLeastSignificant.limit") %in%
names(diff.list$Date.Sources.Species.xDay$backtransforms)))
})
#### This test is not relevant to asreml3 because its saving of sed and vcov are different
cat("#### Test for error when no predictions.asreml41\n")
test_that("noPredictions.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
data(gw.dat)
current.asr <- do.call(asreml,
args=list(fixed = y ~ Species*Substrate*Irrigation,
random = ~ Row + Column,
keep.order=TRUE, data = gw.dat,
maxit=50, workspace = 1e08, stepsize = 0.0001))
current.asrt <- as.asrtests(current.asr, NULL, NULL)
current.asrt <- rmboundary(current.asrt)
testthat::expect_error(diffs <- predictPresent(current.asrt$asreml.obj,
terms = "Irrigation",
error.intervals = "Conf",
wald.tab = current.asrt$wald.tab,
tables = "none")[[1]],
regexp = "predict.asreml has not returned the sed component for the predictions as requested",
fixed = TRUE)
testthat::expect_error(diffs <- predictPresent(current.asrt$asreml.obj,
terms = "Irrigation",
linear.transformation = ~ Irrigation,
error.intervals = "Conf",
wald.tab = current.asrt$wald.tab,
tables = "none")[[1]],
regexp = "predict.asreml has not returned the variance matrix of the predictions as requested",
fixed = TRUE)
})
cat("#### Test for plotPvalues.asreml41\n")
test_that("plotPvalues.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
library(reshape2)
data(WaterRunoff.dat)
asreml.options(keep.order = TRUE) #required for asreml4 only
testthat::expect_output(current.asr <- asreml(fixed = pH ~ Benches + (Sources * (Type + Species)),
random = ~ Benches:MainPlots,
data= WaterRunoff.dat))
current.asrt <- as.asrtests(current.asr, NULL, NULL)
diffs <- predictPlus.asreml(classify = "Sources:Type",
asreml.obj = current.asr, tables = "none",
wald.tab = current.asrt$wald.tab,
present = c("Type","Species","Sources"))
testthat::expect_is(diffs, "alldiffs")
p <- diffs$p.differences
p <- within(reshape2::melt(p),
{
Var1 <- factor(Var1, levels=dimnames(diffs$p.differences)[[1]])
Var2 <- factor(Var2, levels=levels(Var1))
})
names(p) <- c("Rows","Columns","p")
testthat::expect_silent(plotPvalues(p, x = "Rows", y = "Columns",
gridspacing = rep(c(3,4), c(4,2)),
show.sig = TRUE))
#Test different size, face and colour
testthat::expect_silent(plotPvalues(p, x = "Rows", y = "Columns",
gridspacing = rep(c(3,4), c(4,2)),
show.sig = TRUE, sig.size = 5, sig.colour = "blue"))
testthat::expect_silent(plotPvalues(p, x = "Rows", y = "Columns",
gridspacing = rep(c(3,4), c(4,2)),
show.sig = TRUE, sig.size = 5, sig.face = "bold",
sig.family = "serif"))
#Plot with sections
pdata <- plotPvalues(diffs, sections = "Sources", show.sig = TRUE)
testthat::expect_equal(nrow(pdata$pvalues), 400)
testthat::expect_equal(ncol(pdata$pvalues), 5)
testthat::expect_true(all(c("Rows","Columns","p","sections1","sections2") %in% names(pdata$pvalues)))
testthat::expect_equal(length(pdata$plots), 6)
testthat::expect_equal(names(pdata$plots), c("Rainwater","Recycled water","Tap water",
"Rain+Basalt","Rain+Dolomite","Rain+Quartzite"))
#Plot without sections, but automatic gridspacing
pupdata <- plotPvalues(diffs, show.sig = TRUE, factors.per.grid = 1)
testthat::expect_equal(nrow(pupdata$pvalues), 400)
testthat::expect_equal(ncol(pupdata$pvalues), 3)
testthat::expect_true(all(c("Rows","Columns","p") %in% names(pupdata$pvalues)))
testthat::expect_equal(sum(!is.na(pupdata$pvalues$p)), 380)
testthat::expect_equal(length(pupdata$plots), 1)
#Plot without sections, but automatic gridspacing and upper triangle
pupdata <- plotPvalues(diffs, show.sig = TRUE, factors.per.grid = 1,
triangles = "upper")
testthat::expect_equal(nrow(pupdata$pvalues), 400)
testthat::expect_equal(ncol(pupdata$pvalues), 3)
testthat::expect_true(all(c("Rows","Columns","p") %in% names(pupdata$pvalues)))
testthat::expect_equal(sum(!is.na(pupdata$pvalues$p)), 190)
#Plot without sections, but manual gridspacing and upper triangle
pupdata <- plotPvalues(diffs, show.sig = TRUE, gridspacing = rep(c(3,4), c(4,2)),
triangles = "upper")
testthat::expect_equal(nrow(pupdata$pvalues), 400)
testthat::expect_equal(ncol(pupdata$pvalues), 3)
testthat::expect_true(all(c("Rows","Columns","p") %in% names(pupdata$pvalues)))
testthat::expect_equal(sum(!is.na(pupdata$pvalues$p)), 190)
#Plot without sections, but manual gridspacing and lower triangle
pupdata <- plotPvalues(diffs, sections = "Sources", show.sig = TRUE, triangles = "upper")
pupdata$pvalues <- na.omit(pupdata$pvalues)
testthat::expect_equal(nrow(pupdata$pvalues), 190)
testthat::expect_equal(ncol(pupdata$pvalues), 5)
testthat::expect_true(all(c("Rows","Columns","p","sections1","sections2") %in%
names(pupdata$pvalues)))
})
cat("#### Test for plotPvalues.asreml41\n")
test_that("plotPvalues.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
LeafSucculence.diff <- readRDS("./data/LeafSucculence.diff")
LeafSucculence.diff <- LeafSucculence.diff[[1]]
pdata <- plotPvalues(LeafSucculence.diff, gridspacing = 3, show.sig = TRUE,
axis.labels = TRUE)
testthat::expect_equal(nrow(pdata$pvalue), 144)
testthat::expect_equal(ncol(pdata$pvalues), 3)
testthat::expect_true(all(c("Rows","Columns","p") %in% names(pdata$pvalues)))
pdata <- plotPvalues(LeafSucculence.diff, factors.per.grid = 2, show.sig = TRUE,
axis.labels = TRUE)
testthat::expect_equal(nrow(pdata$pvalues), 144)
testthat::expect_equal(ncol(pdata$pvalues), 3)
testthat::expect_true(all(c("Rows","Columns","p") %in% names(pdata$pvalues)))
pdata <- plotPvalues(LeafSucculence.diff, sections = c("Depths","Slope"),
show.sig = TRUE, axis.labels = TRUE)
testthat::expect_equal(nrow(pdata$pvalues), 144)
testthat::expect_equal(ncol(pdata$pvalues), 5)
testthat::expect_true(all(c("Rows","Columns","p","sections1","sections2") %in% names(pdata$pvalues)))
})
cat("#### Test for factor combinations asreml41\n")
test_that("factor.combinations.asreml41", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
LeafSucculence.diff <- readRDS("./data/LeafSucculence.diff")
LeafSucculence.diff <- LeafSucculence.diff[[1]]
LeafSucculence.diff <- recalcLSD(LeafSucculence.diff, LSDtype = "factor.combinations",
LSDby = "Species")
testthat::expect_warning(LeafSucculence.diff <- redoErrorIntervals(LeafSucculence.diff,
error.intervals = "half"))
testthat::expect_equal(nrow(LeafSucculence.diff$LSD), 3)
testthat::expect_equal(ncol(LeafSucculence.diff$LSD), 8)
testthat::expect_true(all(c("P1","P2","P3") %in% rownames(LeafSucculence.diff$LSD)))
testthat::expect_false("lower.halfLeastSignificant.limit" %in% names(LeafSucculence.diff$predictions))
testthat::expect_true(names(LeafSucculence.diff$predictions)[length(names(
LeafSucculence.diff$predictions))] == "est.status")
})
cat("#### Test for recalcLSD.alldiffs4\n")
test_that("recalcLSD.alldiffs4", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
data(WaterRunoff.dat)
asreml.options(keep.order = TRUE) #required for asreml4 only
testthat::expect_output(current.asr <- asreml(fixed = pH ~ Benches + (Sources * (Type + Species)),
random = ~ Benches:MainPlots,
data= WaterRunoff.dat))
current.asrt <- as.asrtests(current.asr, NULL, NULL)
diffs <- predictPlus.asreml(classify = "Sources:Type",
asreml.obj = current.asr, tables = "none",
wald.tab = current.asrt$wald.tab,
present = c("Type","Species","Sources"))
testthat::expect_is(diffs, "alldiffs")
diffs <- recalcLSD.alldiffs(diffs, LSDtype = "factor.combinations", LSDby = "Sources")
testthat::expect_equal(nrow(diffs$LSD), 6)
testthat::expect_equal(ncol(diffs$LSD), 8)
testthat::expect_warning(diffs <- redoErrorIntervals(diffs,
error.intervals = "halfLeastSignificant"))
testthat::expect_false("upper.halfLeastSignificant.limit" %in% names(diffs$predictions))
})
cat("#### Test for LSDby4\n")
test_that("LSDby4", {
skip_if_not_installed("asreml")
skip_on_cran()
library(asreml, lib.loc = asr41.lib)
library(asremlPlus)
library(dae)
#example 9-1 from Montgomery 5 edn
#Set up data.frame
Pressure.lev <- c(10,15,20)
Speed.lev <- c(100,120,140)
Nozzle.lev <- c("A", "B", "C")
Fac3Syrup.dat <- fac.gen(generate=list(Nozzle = Nozzle.lev,
Pressure = Pressure.lev, Speed = Speed.lev),
each=2)
Fac3Syrup.dat <- within(Fac3Syrup.dat,
{
SpeedPress <- fac.combine(list(Speed,Pressure),
combine.levels = TRUE)
WSpeedPress <- fac.nested(SpeedPress)
})
Fac3Syrup.dat <- data.frame(Test = factor(1:54), Fac3Syrup.dat)
Fac3Syrup.dat$Loss <- c(-35,-25,-45,-60,-40,15, 110,75,-10,30,80,54,
4,5,-40,-30,31,36, 17,24,-65,-58,20,4,
55,120,-55,-44,110,44, -23,-5,-64,-62,-20,-31,
-39,-35,-55,-67,15,-30, 90,113,-28,-26,110,135,
-30,-55,-61,-52,54,4)+70
Fac3Syrup.dat <- with(Fac3Syrup.dat, Fac3Syrup.dat[order(SpeedPress, WSpeedPress),])
#Analysis
interaction.ABC.plot(Loss, Pressure, Speed, Nozzle, data=Fac3Syrup.dat)
Fac3Syrup.aov <- aov(Loss ~ Nozzle * Pressure * Speed + Error(Test), Fac3Syrup.dat)
summary(Fac3Syrup.aov)
m1 <- do.call("asreml",
args = list(Loss ~ Nozzle * Pressure * Speed,
residual = ~idh(SpeedPress):WSpeedPress,
data = Fac3Syrup.dat))
testthat::expect_true(abs(summary(m1)$varcomp$component[2] - 27.5) < 1e-05)
wald.tab <- wald.asreml(m1, denDF = "numeric")$Wald
testthat::expect_equal(nrow(wald.tab), 8)
diffs <- predictPlus(m1, classify = "Nozzle:Pressure:Speed",
#linear.transformation = ~(Nozzle + Pressure):Speed,
wald.tab = wald.tab,
tables = "none")
testthat::expect_true("upper.Confidence.limit" %in% names(diffs$predictions))
testthat::expect_true(all(c( "LSDtype", "LSDstatistic") %in% names(attributes(diffs))))
testthat::expect_true(is.null(attr(diffs, which = "LSDby")))
testthat::expect_true((attr(diffs, which = "LSDtype") == "overall"))
#Calculate LSD, but leave as CIs
diffs.LSD <- recalcLSD(diffs, LSDtype = "factor",
LSDby = c("Speed","Pressure"))
testthat::expect_equal(nrow(diffs.LSD$LSD), 9)
testthat::expect_true(abs(diffs.LSD$LSD$minLSD[1]- 11.92550) < 1e-05)
testthat::expect_true(all(abs(diffs.LSD$LSD$minLSD- diffs.LSD$LSD$maxLSD) < 1e-05))
testthat::expect_true(all(c( "LSDtype", "LSDby", "LSDstatistic") %in% names(attributes(diffs.LSD))))
testthat::expect_true((attr(diffs.LSD, which = "LSDtype") == "factor.combinations"))
testthat::expect_true("upper.Confidence.limit" %in% names(diffs$predictions))
#Convert from CI to LSI
diffs.LSI <- redoErrorIntervals(diffs.LSD, error.intervals = "half")
testthat::expect_true("upper.halfLeastSignificant.limit" %in% names(diffs.LSI$predictions))
testthat::expect_equal(nrow(diffs.LSI$LSD), 9)
diffs <- redoErrorIntervals(diffs, error.intervals = "half", LSDtype = "factor",
LSDby = c("Speed","Pressure"), wald.tab = wald.tab,
tables = "none")
testthat::expect_true("upper.halfLeastSignificant.limit" %in% names(diffs$predictions))
testthat::expect_equal(nrow(diffs$LSD), 9)
testthat::expect_true(abs(diffs$LSD$minLSD[1]- 11.92550) < 1e-05)
testthat::expect_true(all(abs(diffs$LSD$minLSD- diffs$LSD$maxLSD) < 1e-05))
#Test changing the LSDby
testthat::expect_warning(diff.Press <-
redoErrorIntervals(diffs, error.intervals = "half",
LSDtype = "factor",
LSDby = "Pressure", wald.tab = wald.tab,
tables = "none"))
diff.Press$LSD
testthat::expect_equal(nrow(diff.Press$LSD), 3)
testthat::expect_true(abs(diff.Press$LSD$minLSD[1]- 11.92550) < 1e-05)
testthat::expect_true(abs(diff.Press$LSD$meanLSD[1]- 41.13342) < 1e-05)
testthat::expect_true(abs(diff.Press$LSD$maxLSD[1]- 67.62672) < 1e-05)
#No LSDtype
testthat::expect_warning(diff.Press <-
redoErrorIntervals(diffs, error.intervals = "half",
LSDby = "Pressure", wald.tab = wald.tab,
tables = "none"))
testthat::expect_equal(nrow(diff.Press$LSD), 3)
testthat::expect_true(abs(diff.Press$LSD$minLSD[1]- 11.92550) < 1e-05)
testthat::expect_true(abs(diff.Press$LSD$meanLSD[1]- 41.13342) < 1e-05)
testthat::expect_true(abs(diff.Press$LSD$maxLSD[1]- 67.62672) < 1e-05)
testthat::expect_warning(diff.all <-
redoErrorIntervals(diffs, error.intervals = "half",
LSDtype = "overall",
LSDby = NULL, wald.tab = wald.tab,
tables = "none"))
testthat::expect_equal(nrow(diff.all$LSD), 1)
testthat::expect_true(rownames(diff.all$LSD) == "overall")
testthat::expect_true(abs(diff.all$LSD$minLSD[1]- 11.92550) < 1e-05)
#LSDtype = overall only
testthat::expect_warning(diff.all <-
redoErrorIntervals(diffs, error.intervals = "half",
LSDtype = "overall",
wald.tab = wald.tab,
tables = "none"))
testthat::expect_equal(nrow(diff.all$LSD), 1)
testthat::expect_true(abs(diff.all$LSD$minLSD[1]- 11.92550) < 1e-05)
#Test predictPlus with LSD options
#With linear transformation and LSDtype = "factor combinations"
diffs.LSD <- predictPlus(m1, classify = "Nozzle:Pressure:Speed",
linear.transformation = ~(Nozzle + Pressure):Speed,
error.intervals = "half", LSDtype = "factor", LSDby = c("Speed", "Pressure"),
wald.tab = wald.tab,
tables = "none")
testthat::expect_true("upper.halfLeastSignificant.limit" %in% names(diffs.LSD$predictions))
testthat::expect_true(all(c( "LSDtype", "LSDby", "LSDstatistic") %in% names(attributes(diffs.LSD))))
testthat::expect_true((attr(diffs.LSD, which = "LSDtype") == "factor.combinations"))
testthat::expect_true(all(c( "LSDtype", "LSDby", "LSDstatistic", "LSDvalues") %in%
names(attributes(diffs.LSD$predictions))))
testthat::expect_true(attr(diffs.LSD$predictions, which = "LSDtype") == "factor.combinations")
testthat::expect_true(attr(diffs.LSD$predictions, which = "LSDstatistic") == "mean")
})
|
\name{richards}
\alias{richards}
\alias{richards.deriv}
\alias{richards.deriv.x}
\alias{richards.inv}
\alias{solveB}
\alias{solveE}
\title{ Richards functions: Five parameter (5PL) logistic curves }
\description{ The Richards function, the inverse of
the function, and the derivative of \code{richards(x)}
(and the derivative of \code{richards(x)} with
respect to \code{log(x)} in the point \code{x},
that is derivative of \code{richards(exp(u))} with
respect to \code{u} in the point \code{x = exp(u)}).
In other implementation of Richards functions and
four parameter logistic functions the argument is
\code{log(x)} of the here used argument \code{x}.
Here we have a positive concentration in mind,
where value depending on the concentration can
be described by Richards functions
in the logarithm of the concentration.
}
\usage{
richards(x, a = 0.1, d = 2.4,
e = solveE(x50, b, ny), x50 = 100,
b = solveB(x = x50, a = a, d = d, ny = ny,
x50 = x50, b4 = b4), b4 = 1,
ny = k - 1, k = 2)
richards.deriv(x, a = 0.1, d = 2.4,
e = solveE(x50, b, ny), x50 = 100,
b = solveB(x = x50, a = a, d = d, ny = ny,
x50 = x50, b4 = b4), b4 = 1,
ny = k - 1, k = 2)
richards.deriv.x(x, a = 0.1, d = 2.4,
e = solveE(x50, b, ny), x50 = 100,
b = solveB(x = x50, a = a, d = d, ny = ny,
x50 = x50, b4 = b4), b4 = 1,
ny = k - 1, k = 2)
richards.inv(y, a = 0.1, d = 2.4,
e = solveE(x50, b, ny), x50 = 100,
b = solveB(x = x50, a = a, d = d, ny = ny,
x50 = x50, b4 = b4), b4 = 1,
ny = k - 1, k = 2)
solveE(x50, b, ny = k - 1, k = 2)
solveB(x = x50, a = 0.1, d = 2.4, x50 = 100, b4 = 1, ny = k - 1, k = 2)
}
\arguments{
\item{x}{ a numeric vector of values at which to evaluate the function. }
\item{y}{ a numeric vector of values at which to evaluate the function. }
\item{a}{ a numeric parameter representing the horizontal asymptote on the
left side (very small values of input) for \code{b} positive,
else the horizontal asymptote on the right side:
Zero concentration responce. }
\item{d}{ a numeric parameter representing the horizontal asymptote on the
right side (very large values of input) for \code{b} positive,
else the horizontal asymptote on the left side:
Infinite concentration responce. }
\item{b}{ a numeric (reciprocal) scale parameter on the input axis,
'growth rate': slope factor.
(The 'growth' parameter \code{b} should be negative, and \code{a}
is thus the right side (larger) asymptote, for Bertalenffy models.)
}
\item{x50}{ a numeric parameter representing the input value at
the center of the curve: \eqn{IC_{50}}{IC_50}.
The value of the \code{Richards} function will be midway
between \code{a} and \code{d} at \code{x50}. }
\item{e}{ a parameter determine the input value at
the inflection point of the curve. }
\item{ny}{ a numeric shape parameter that affects near
which asymptote maximum 'growth' occurs. }
\item{k}{ an alternative numeric parameter for \code{ny}.
(The \code{m} of Richards (1959) is \code{k}
with \code{ny = k - 1}.) }
\item{b4}{ a numeric parameter giving the (reciprocal) scale parameter of
the four parameter logistic curve with slope as the 'richards'
function at \code{x50}, and same asymptotes. }
}
\details{
\code{richards(x)} computes \code{d + (a - d)/z^(1/ny)} with
\code{z = (1 + ny * (x/e)^b) = (1 + (2^ny-1) * (x/x50)^b)}
for \code{ny} different from 0.
For \code{ny} equal to 0 the limiting Gompertz curve
\code{d + (a - d) * exp(-z)} with \code{z = (x/e)^b} is found.
\code{solveE} converts the parameter \code{x50} into the parameter \code{e},
the inflection point of the curve: \code{e = x50/((2^(ny) - 1)/ny)^(1/b)}.
By \code{(2^ny-1) * (x/x50)^b = ny * (x/e)^b = ny * exp(b * (log(x)-log(e)))}
we recognize the notation of, e.g., Yin et al., 2003, and also see, how
these parameters of the above Richards function relates to the parameters
'xmid' (\code{log(x50)}) and 'scal' (\code{1/b}) of \link{SSfpl}
for \code{ny} equal 1 and the argument input the logarithm of \code{x},
\code{log(x)}.
\code{solveB} finds the parameter \code{b} for the Richard curve such
that the slope of the Richard curve is the same as the slope of
the four parameter logistic curve with parameter \code{b4} at \code{x}
(where the asymptotes \code{a} and \code{d} (and location \code{x50})
of the two curves are identical).
See \link{SSny1} for some examples on the same value found by
different functions.
We observe that to compute \code{z^(1/ny) = (1 + (2^ny-1)*(x/x50)^b)^(1/ny)}
for some values of \code{ny} the mantis \code{z = 1 + (2^ny-1)*(x/x50)^b}
should be positive. For \code{ny > 0} this is always the case
(since \code{x} and \code{x50} are positive).
For \code{ny < 0} the power function is undefined for \code{z} negative,
unless \code{1/ny} is an integer.
The here given Richards function will for \code{ny < 0} and \code{1/ny}
an even integer have a local extreme at the asymptote \code{d},
the right hand side asymptote (\code{b} positive),
and will for \code{ny < 0} and \code{1/ny} an odd integer
just pass \code{d} for infinity.
}
% In the demo \code{sourceModified} some \code{selfStart} methods
% are presented with the functions constant the upper asymptote for
% \code{z^(1/ny) = (1 + (2^ny-1)*(x/x50)^b)^(1/ny)} negative.
\value{ A vector with function values. }
\author{ Jens Henrik Badsberg }
\references{
Richards, F.J. 1959: A flexible growth function for empirical use.
J. Exp. Bot. 10: 290-300.
Xinyou Yin, Jan Goudriaan, Egbert A. Lantinga, Jan Vos And Huub J. Spiertz:
A Flexible Sigmoid Function of Determinate Growth.
2003; Annals of Botany 91: 361-371.
Gottschalk, Paul G., Dunn, John R.:
The five-parameter logistic:
a characterization and comparison with the four-parameter logistic.
2005; Anal Biochem. Aug 1;343:54-65.
}
\keyword{ package }
\examples{
demo(showNy)
par(mfrow = c(1, 2))
# pdf("Relevant.pdf")
richardsLines
backFitFpl
relevant()
derivatives
derivatives(xlim = c(0.5, 20000), ylim = c(0, 0.1), ylim.x = c(0, 1.2), f = 4)
par(mfrow = c(2, 4))
# pdf("All.pdf")
# pdf("BelowE.pdf")
belowE(outer = FALSE, line = 1)
# pdf("Extra.pdf")
extra(outer = FALSE, line = 1)
# pdf("Below.pdf")
below(outer = FALSE, line = 1)
# pdf("Above.pdf")
above(outer = FALSE, line = 1)
for (i in 2:16) print(richards(10000, ny = -1/i))
}
|
/man/richards.Rd
|
no_license
|
cran/richards
|
R
| false
| false
| 7,016
|
rd
|
\name{richards}
\alias{richards}
\alias{richards.deriv}
\alias{richards.deriv.x}
\alias{richards.inv}
\alias{solveB}
\alias{solveE}
\title{ Richards functions: Five parameter (5PL) logistic curves }
\description{ The Richards function, the inverse of
the function, and the derivative of \code{richards(x)}
(and the derivative of \code{richards(x)} with
respect to \code{log(x)} in the point \code{x},
that is derivative of \code{richards(exp(u))} with
respect to \code{u} in the point \code{x = exp(u)}).
In other implementation of Richards functions and
four parameter logistic functions the argument is
\code{log(x)} of the here used argument \code{x}.
Here we have a positive concentration in mind,
where value depending on the concentration can
be described by Richards functions
in the logarithm of the concentration.
}
\usage{
richards(x, a = 0.1, d = 2.4,
e = solveE(x50, b, ny), x50 = 100,
b = solveB(x = x50, a = a, d = d, ny = ny,
x50 = x50, b4 = b4), b4 = 1,
ny = k - 1, k = 2)
richards.deriv(x, a = 0.1, d = 2.4,
e = solveE(x50, b, ny), x50 = 100,
b = solveB(x = x50, a = a, d = d, ny = ny,
x50 = x50, b4 = b4), b4 = 1,
ny = k - 1, k = 2)
richards.deriv.x(x, a = 0.1, d = 2.4,
e = solveE(x50, b, ny), x50 = 100,
b = solveB(x = x50, a = a, d = d, ny = ny,
x50 = x50, b4 = b4), b4 = 1,
ny = k - 1, k = 2)
richards.inv(y, a = 0.1, d = 2.4,
e = solveE(x50, b, ny), x50 = 100,
b = solveB(x = x50, a = a, d = d, ny = ny,
x50 = x50, b4 = b4), b4 = 1,
ny = k - 1, k = 2)
solveE(x50, b, ny = k - 1, k = 2)
solveB(x = x50, a = 0.1, d = 2.4, x50 = 100, b4 = 1, ny = k - 1, k = 2)
}
\arguments{
\item{x}{ a numeric vector of values at which to evaluate the function. }
\item{y}{ a numeric vector of values at which to evaluate the function. }
\item{a}{ a numeric parameter representing the horizontal asymptote on the
left side (very small values of input) for \code{b} positive,
else the horizontal asymptote on the right side:
Zero concentration responce. }
\item{d}{ a numeric parameter representing the horizontal asymptote on the
right side (very large values of input) for \code{b} positive,
else the horizontal asymptote on the left side:
Infinite concentration responce. }
\item{b}{ a numeric (reciprocal) scale parameter on the input axis,
'growth rate': slope factor.
(The 'growth' parameter \code{b} should be negative, and \code{a}
is thus the right side (larger) asymptote, for Bertalenffy models.)
}
\item{x50}{ a numeric parameter representing the input value at
the center of the curve: \eqn{IC_{50}}{IC_50}.
The value of the \code{Richards} function will be midway
between \code{a} and \code{d} at \code{x50}. }
\item{e}{ a parameter determine the input value at
the inflection point of the curve. }
\item{ny}{ a numeric shape parameter that affects near
which asymptote maximum 'growth' occurs. }
\item{k}{ an alternative numeric parameter for \code{ny}.
(The \code{m} of Richards (1959) is \code{k}
with \code{ny = k - 1}.) }
\item{b4}{ a numeric parameter giving the (reciprocal) scale parameter of
the four parameter logistic curve with slope as the 'richards'
function at \code{x50}, and same asymptotes. }
}
\details{
\code{richards(x)} computes \code{d + (a - d)/z^(1/ny)} with
\code{z = (1 + ny * (x/e)^b) = (1 + (2^ny-1) * (x/x50)^b)}
for \code{ny} different from 0.
For \code{ny} equal to 0 the limiting Gompertz curve
\code{d + (a - d) * exp(-z)} with \code{z = (x/e)^b} is found.
\code{solveE} converts the parameter \code{x50} into the parameter \code{e},
the inflection point of the curve: \code{e = x50/((2^(ny) - 1)/ny)^(1/b)}.
By \code{(2^ny-1) * (x/x50)^b = ny * (x/e)^b = ny * exp(b * (log(x)-log(e)))}
we recognize the notation of, e.g., Yin et al., 2003, and also see, how
these parameters of the above Richards function relates to the parameters
'xmid' (\code{log(x50)}) and 'scal' (\code{1/b}) of \link{SSfpl}
for \code{ny} equal 1 and the argument input the logarithm of \code{x},
\code{log(x)}.
\code{solveB} finds the parameter \code{b} for the Richard curve such
that the slope of the Richard curve is the same as the slope of
the four parameter logistic curve with parameter \code{b4} at \code{x}
(where the asymptotes \code{a} and \code{d} (and location \code{x50})
of the two curves are identical).
See \link{SSny1} for some examples on the same value found by
different functions.
We observe that to compute \code{z^(1/ny) = (1 + (2^ny-1)*(x/x50)^b)^(1/ny)}
for some values of \code{ny} the mantis \code{z = 1 + (2^ny-1)*(x/x50)^b}
should be positive. For \code{ny > 0} this is always the case
(since \code{x} and \code{x50} are positive).
For \code{ny < 0} the power function is undefined for \code{z} negative,
unless \code{1/ny} is an integer.
The here given Richards function will for \code{ny < 0} and \code{1/ny}
an even integer have a local extreme at the asymptote \code{d},
the right hand side asymptote (\code{b} positive),
and will for \code{ny < 0} and \code{1/ny} an odd integer
just pass \code{d} for infinity.
}
% In the demo \code{sourceModified} some \code{selfStart} methods
% are presented with the functions constant the upper asymptote for
% \code{z^(1/ny) = (1 + (2^ny-1)*(x/x50)^b)^(1/ny)} negative.
\value{ A vector with function values. }
\author{ Jens Henrik Badsberg }
\references{
Richards, F.J. 1959: A flexible growth function for empirical use.
J. Exp. Bot. 10: 290-300.
Xinyou Yin, Jan Goudriaan, Egbert A. Lantinga, Jan Vos And Huub J. Spiertz:
A Flexible Sigmoid Function of Determinate Growth.
2003; Annals of Botany 91: 361-371.
Gottschalk, Paul G., Dunn, John R.:
The five-parameter logistic:
a characterization and comparison with the four-parameter logistic.
2005; Anal Biochem. Aug 1;343:54-65.
}
\keyword{ package }
\examples{
demo(showNy)
par(mfrow = c(1, 2))
# pdf("Relevant.pdf")
richardsLines
backFitFpl
relevant()
derivatives
derivatives(xlim = c(0.5, 20000), ylim = c(0, 0.1), ylim.x = c(0, 1.2), f = 4)
par(mfrow = c(2, 4))
# pdf("All.pdf")
# pdf("BelowE.pdf")
belowE(outer = FALSE, line = 1)
# pdf("Extra.pdf")
extra(outer = FALSE, line = 1)
# pdf("Below.pdf")
below(outer = FALSE, line = 1)
# pdf("Above.pdf")
above(outer = FALSE, line = 1)
for (i in 2:16) print(richards(10000, ny = -1/i))
}
|
## This script needs to be run in the same folder as "exdata-data-household_power_consumption.zip"
## This script outputs a png file, plot4.png for Exploratory Data Analysis Course Project 1
## Unzip Data
unzip("exdata-data-household_power_consumption.zip")
## load files
electricPowerConsumption <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
## reduce data to 2007-02-01 and 2007-02-02
electricPowerConsumption <- subset(electricPowerConsumption, Date == "1/2/2007"| Date == "2/2/2007")
## create new variable DateTime in electricPowerConsumption data set
dates <- electricPowerConsumption$Date
times <- electricPowerConsumption$Time
x <- paste(dates, times)
electricPowerConsumption$DateTime <- x
electricPowerConsumption$DateTime <- strptime(electricPowerConsumption$DateTime, format = "%d/%m/%Y %H:%M:%S")
## set Date as class date
electricPowerConsumption$Date <- as.Date(electricPowerConsumption$Date, format = "%d/%m/%Y")
## set time as class time
electricPowerConsumption$Time <- strptime(electricPowerConsumption$Time, format = "%H:%M:%S")
## Open Dev
dev.cur()
## Open png device
png(filename = "plot4.png", width = 480, height = 480)
## Set par for 4 plots
par(mfcol = c(2, 2))
## Draw plot 1
plot(electricPowerConsumption$DateTime, electricPowerConsumption$Global_active_power, ylab = "Global Active Power (kilowatts)", type = "l", xlab = "")
## Draw plot 2
plot(electricPowerConsumption$DateTime, electricPowerConsumption$Sub_metering_1, ylab = "Energy sub metering", type = "l", xlab = "")
## Add Sub_metering_2 to plot 2
points(electricPowerConsumption$DateTime, electricPowerConsumption$Sub_metering_2, type = "l", col = "red")
## Add Sub_metering_3 to plot 2
points(electricPowerConsumption$DateTime, electricPowerConsumption$Sub_metering_3, type = "l", col = "blue")
## Add legend to plot 2
legend("topright", col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty =1, bty = "n")
## Draw plot 3
plot(electricPowerConsumption$DateTime, electricPowerConsumption$Voltage, ylab = "Voltage", type = "l", xlab = "datetime")
## Draw plot 4
plot(electricPowerConsumption$DateTime, electricPowerConsumption$Global_reactive_power, ylab = "Global_reactive_power", type = "l", xlab = "datetime")
## Close dev
dev.off()
|
/plot4.R
|
no_license
|
Luciferase/ExData_Plotting1
|
R
| false
| false
| 2,337
|
r
|
## This script needs to be run in the same folder as "exdata-data-household_power_consumption.zip"
## This script outputs a png file, plot4.png for Exploratory Data Analysis Course Project 1
## Unzip Data
unzip("exdata-data-household_power_consumption.zip")
## load files
electricPowerConsumption <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
## reduce data to 2007-02-01 and 2007-02-02
electricPowerConsumption <- subset(electricPowerConsumption, Date == "1/2/2007"| Date == "2/2/2007")
## create new variable DateTime in electricPowerConsumption data set
dates <- electricPowerConsumption$Date
times <- electricPowerConsumption$Time
x <- paste(dates, times)
electricPowerConsumption$DateTime <- x
electricPowerConsumption$DateTime <- strptime(electricPowerConsumption$DateTime, format = "%d/%m/%Y %H:%M:%S")
## set Date as class date
electricPowerConsumption$Date <- as.Date(electricPowerConsumption$Date, format = "%d/%m/%Y")
## set time as class time
electricPowerConsumption$Time <- strptime(electricPowerConsumption$Time, format = "%H:%M:%S")
## Open Dev
dev.cur()
## Open png device
png(filename = "plot4.png", width = 480, height = 480)
## Set par for 4 plots
par(mfcol = c(2, 2))
## Draw plot 1
plot(electricPowerConsumption$DateTime, electricPowerConsumption$Global_active_power, ylab = "Global Active Power (kilowatts)", type = "l", xlab = "")
## Draw plot 2
plot(electricPowerConsumption$DateTime, electricPowerConsumption$Sub_metering_1, ylab = "Energy sub metering", type = "l", xlab = "")
## Add Sub_metering_2 to plot 2
points(electricPowerConsumption$DateTime, electricPowerConsumption$Sub_metering_2, type = "l", col = "red")
## Add Sub_metering_3 to plot 2
points(electricPowerConsumption$DateTime, electricPowerConsumption$Sub_metering_3, type = "l", col = "blue")
## Add legend to plot 2
legend("topright", col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty =1, bty = "n")
## Draw plot 3
plot(electricPowerConsumption$DateTime, electricPowerConsumption$Voltage, ylab = "Voltage", type = "l", xlab = "datetime")
## Draw plot 4
plot(electricPowerConsumption$DateTime, electricPowerConsumption$Global_reactive_power, ylab = "Global_reactive_power", type = "l", xlab = "datetime")
## Close dev
dev.off()
|
#load the gene names with the high expression with the mouse names that are not the human names.
total_human_membrane<-read.delim("index/uniprot_human_cellmembrane_Aug2017.tab")
row.names(total_human_membrane)<-total_human_membrane$Entry
total_human_secreted<-read.delim("index/uniprot_human_secreted_Aug2017.tab")
row.names(total_human_secreted)<-total_human_secreted$Entry
#Change the label that so that the Uniprot ID has the column name of "Entry
colnames(humanIDfromMouse)<-c("Gene.names","Entry")
#Merge the two tables. This makes sure that all the gene names are there.
expressed_human_secreted_SC<- merge(humanIDfromMouse,total_human_secreted,by="Entry")
expressed_human_secreted_EC<- merge(humanIDfromMouse,total_human_secreted,by="Entry")
#Merge the two tables. This gets rid of the duplicated gene names and keeps the uniprot ID as unique.#expressed_human_secreted<-subset(total_human_secreted, Entry %in% humanIDfromMouse$Entry)
expressed_human_membrane_SC<- merge(humanIDfromMouse,total_human_membrane,by="Entry")
expressed_human_membrane_EC<- merge(humanIDfromMouse,total_human_membrane,by="Entry")
# Specify the ligand and receptor for each subset
SC_ligand<- rbind(expressed_human_membrane_SC,expressed_human_secreted_SC)
EC_ligand<- rbind(expressed_human_membrane_EC,expressed_human_secreted_EC)
SC_receptor<- expressed_human_membrane_SC
EC_receptor<- expressed_human_membrane_EC
|
/ExpressedGenesinCompartment.R
|
no_license
|
verma014/Cell-Cell-interaction
|
R
| false
| false
| 1,401
|
r
|
#load the gene names with the high expression with the mouse names that are not the human names.
total_human_membrane<-read.delim("index/uniprot_human_cellmembrane_Aug2017.tab")
row.names(total_human_membrane)<-total_human_membrane$Entry
total_human_secreted<-read.delim("index/uniprot_human_secreted_Aug2017.tab")
row.names(total_human_secreted)<-total_human_secreted$Entry
#Change the label that so that the Uniprot ID has the column name of "Entry
colnames(humanIDfromMouse)<-c("Gene.names","Entry")
#Merge the two tables. This makes sure that all the gene names are there.
expressed_human_secreted_SC<- merge(humanIDfromMouse,total_human_secreted,by="Entry")
expressed_human_secreted_EC<- merge(humanIDfromMouse,total_human_secreted,by="Entry")
#Merge the two tables. This gets rid of the duplicated gene names and keeps the uniprot ID as unique.#expressed_human_secreted<-subset(total_human_secreted, Entry %in% humanIDfromMouse$Entry)
expressed_human_membrane_SC<- merge(humanIDfromMouse,total_human_membrane,by="Entry")
expressed_human_membrane_EC<- merge(humanIDfromMouse,total_human_membrane,by="Entry")
# Specify the ligand and receptor for each subset
SC_ligand<- rbind(expressed_human_membrane_SC,expressed_human_secreted_SC)
EC_ligand<- rbind(expressed_human_membrane_EC,expressed_human_secreted_EC)
SC_receptor<- expressed_human_membrane_SC
EC_receptor<- expressed_human_membrane_EC
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pit.R
\name{pit_df}
\alias{pit_df}
\title{Probability Integral Transformation (data.frame Format)}
\usage{
pit_df(
data,
plot = TRUE,
full_output = FALSE,
n_replicates = 100,
num_bins = NULL,
verbose = FALSE
)
}
\arguments{
\item{data}{a data.frame with the following columns: `true_value`,
`prediction`, `sample`}
\item{plot}{logical. If TRUE, a histogram of the PIT values will be returned
as well}
\item{full_output}{return all individual p_values and computed u_t values
for the randomised PIT. Usually not needed.}
\item{n_replicates}{the number of tests to perform,
each time re-randomising the PIT}
\item{num_bins}{the number of bins in the PIT histogram (if plot == TRUE)
If not given, the square root of n will be used}
\item{verbose}{if TRUE (default is FALSE) more error messages are printed.
Usually, this should not be needed, but may help with debugging.}
}
\value{
a list with the following components:
\itemize{
\item \code{data}: the input data.frame (not including rows where prediction is `NA`),
with added columns `pit_p_val` and `pit_sd`
\item \code{hist_PIT} a plot object with the PIT histogram. Only returned
if \code{plot == TRUE}. Call
\code{plot(PIT(...)$hist_PIT)} to display the histogram.
\item \code{p_values}: all p_values generated from the Anderson-Darling tests on the
(randomised) PIT. Only returned if \code{full_output = TRUE}
\item \code{u}: the u_t values internally computed. Only returned if
\code{full_output = TRUE}
}
}
\description{
Wrapper around `pit()` for use in data.frames
}
\details{
see \code{\link{pit}}
}
\examples{
example <- scoringutils::continuous_example_data
result <- pit_df(example, full_output = TRUE)
}
\references{
Sebastian Funk, Anton Camacho, Adam J. Kucharski, Rachel Lowe,
Rosalind M. Eggo, W. John Edmunds (2019) Assessing the performance of
real-time epidemic forecasts: A case study of Ebola in the Western Area
region of Sierra Leone, 2014-15, <doi:10.1371/journal.pcbi.1006785>
}
|
/man/pit_df.Rd
|
permissive
|
elray1/scoringutils
|
R
| false
| true
| 2,052
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pit.R
\name{pit_df}
\alias{pit_df}
\title{Probability Integral Transformation (data.frame Format)}
\usage{
pit_df(
data,
plot = TRUE,
full_output = FALSE,
n_replicates = 100,
num_bins = NULL,
verbose = FALSE
)
}
\arguments{
\item{data}{a data.frame with the following columns: `true_value`,
`prediction`, `sample`}
\item{plot}{logical. If TRUE, a histogram of the PIT values will be returned
as well}
\item{full_output}{return all individual p_values and computed u_t values
for the randomised PIT. Usually not needed.}
\item{n_replicates}{the number of tests to perform,
each time re-randomising the PIT}
\item{num_bins}{the number of bins in the PIT histogram (if plot == TRUE)
If not given, the square root of n will be used}
\item{verbose}{if TRUE (default is FALSE) more error messages are printed.
Usually, this should not be needed, but may help with debugging.}
}
\value{
a list with the following components:
\itemize{
\item \code{data}: the input data.frame (not including rows where prediction is `NA`),
with added columns `pit_p_val` and `pit_sd`
\item \code{hist_PIT} a plot object with the PIT histogram. Only returned
if \code{plot == TRUE}. Call
\code{plot(PIT(...)$hist_PIT)} to display the histogram.
\item \code{p_values}: all p_values generated from the Anderson-Darling tests on the
(randomised) PIT. Only returned if \code{full_output = TRUE}
\item \code{u}: the u_t values internally computed. Only returned if
\code{full_output = TRUE}
}
}
\description{
Wrapper around `pit()` for use in data.frames
}
\details{
see \code{\link{pit}}
}
\examples{
example <- scoringutils::continuous_example_data
result <- pit_df(example, full_output = TRUE)
}
\references{
Sebastian Funk, Anton Camacho, Adam J. Kucharski, Rachel Lowe,
Rosalind M. Eggo, W. John Edmunds (2019) Assessing the performance of
real-time epidemic forecasts: A case study of Ebola in the Western Area
region of Sierra Leone, 2014-15, <doi:10.1371/journal.pcbi.1006785>
}
|
## GENOME UTILS
#' Tag duplicate names by increasing numbers.
#' @param names a vector of characters
#' @export
tagDuplicates <- function(names) {
sel <- paste(names,".1",sep="")
cnt <- 2
while( sum(duplicated(sel)) ) {
sel[duplicated(sel)] <- sub("\\..*",paste(".",cnt,sep=""),
sel[duplicated(sel)])
cnt <- cnt+1
}
sub("\\.1$","",sel)
}
#' Generate chromosome index \code{chrS} from lengths
#' @param chrL an ordered vector of chromosome lengths; where the
#' order must correspond to chromosome numbering in feature tables
#' for which chrS is used
#' @export
getChrSum <- function(chrL) c(0,cumsum(chrL))
## util to insert rows, after suggestion by user Ari B. Friedman at
## \url{https://stackoverflow.com/a/11562428}
insertRow <- function(existingDF, newrow, r) {
if ( r==nrow(existingDF)+1 ) # insert as last row?
existingDF <- rbind(existingDF, newrow)
else if ( r<=nrow(existingDF) ) { # insert in between
existingDF <- as.data.frame(existingDF,stringsAsFactors=FALSE)
idx <- seq(r, nrow(existingDF)) # shift all by one below r
existingDF[idx+1,] <- existingDF[idx,]
existingDF[r,] <- newrow
} else
stop("wrong index, can't be >nrow(<existing data.frame>)")
existingDF
}
#' insert rows as specified positions
#'
#' Util to insert multiple rows at specified positions
#' in a \code{data.frame}, expanding single-row code by user
#' Ari B. Friedman at
#' \url{https://stackoverflow.com/questions/11561856/add-new-row-to-dataframe-at-specific-row-index-not-appendedlooping through new rows}
#' @param existingDF existing \code{data.frame}
#' @param newrows rows to add to \code{existingDF}
#' @param r positions in the existing data.frame at which rows are to
#' be inserted; \code{length(r)} must equal \code{nrow(newrows)}, and
#' all indices \code{r<=nrow(existingDF)+1}.
#' @export
insertRows <- function(existingDF, newrows, r ) {
## check that r is sorted and all <= nrow(existingDF)
r <- sort(r) # SORT!
if ( any(r>nrow(existingDF)+1) )
stop("row indices must refer to existing data.frame and",
" can not be >nrow(<existing data.frame>)+1")
new <- existingDF
for ( i in 1:nrow(newrows) )
new <- insertRow(new, newrows[i,], r[i]+i-1)
new
}
#' Splits genome features spanning annotated ends of circular chromosomes.
#'
#' Splits genome features that span start/end coordinates of circular
#' chromosomes, and adds the downstream half with optional modification
#' of ID, and type values. Circular features are recognized here by
#' start > end, in left->right direction of genome annotation.
#' Strand information MUST NOT BE ENCODED IN start/end coordinate direction,
#' but explicitly provided via a strand column!
#' Note that only the upstream half retains
#' all column information (exceptions: see argument \code{copyCols}),
#' the downstream half will only carry information on coordinates, and
#' optionally updated feature type and ID.
#' The update will only happen if the passed table contains type and ID
#' information (see argument \code{idCols}. The split can be reversed
#' by function \code{removeCircularFeatures}.
#' @param features a list of genomic features with coordinates
#' @param chrL obligatory list of chromosome lengths, in order used
#' in chromosome column in \code{features} (see argument \code{coorCols}
#' @param coorCols ordered string vector providing the column names
#' of coordinate columns to be used; must be of length 4 and provide in
#' order: chromosome number (refering to argument \code{chrL}), start, end,
#' and strand (see argument \code{reverse})
#' @param reverse allowed indicators of reverse strand features
#' in strand column (see argument \code{coorCols})
#' @param idTag tag to add to downstream ID and type
#' @param idCols named vector of column names for feature ID, type,
#' and feature parent; note that a "parent" column will be added if not present
#' to refer the downstream half to its upstream feature, which retains
#' all other information
#' @param copyCols copy values to circular feature copy; either logical
#' \code{TRUE} to copy all columns, or a vector of column indices or names
#' @param insertRows insert the circular features below their parent,
#' if set to \code{FALSE} circular features will just be appended; this
#' saves a lot of time for large datasets
#' @seealso \code{removeCircularFeatures}
#' @export
expandCircularFeatures <- function(features, chrL,
coorCols=c("chr","start","end","strand"),
reverse=c("-",-1),
idTag="-circ2", idCols=c(ID="ID",type="type",
parent="parent"),
copyCols=FALSE,
insertRows=TRUE) {
## chromosome index - revert from chrL
## add parent column if not present
if ( idCols["ID"]%in%colnames(features) &
!idCols["parent"] %in% colnames(features) ) {
features <- cbind(features,parent=rep(NA,nrow(features)))
}
## filter
if ( typeof(copyCols)=="logical" ) {
if ( copyCols )
copyCols <- colnames(features)
} else if ( typeof(copyCols)=="integer" )
copyCols <- colnames(features)[copyCols]
## get all coordinates
start <- features[,coorCols[2]] # "start"
end <- features[,coorCols[3]] # "end"
strand <- features[,coorCols[4]] # "strand"
rev <- strand%in%reverse
## get circular
circ <- start>end # ASSUMES ORDERED START/END
cidx <- which(circ) # index in original
if ( sum(circ)==0 )
return(features)
## copy and rename (ID_circ#, type: type_circular, parent: ID)
cfeat <- as.data.frame(matrix(NA,ncol=ncol(features),nrow=length(cidx)))
colnames(cfeat) <- colnames(features)
## copy requested columns
cfeat[,copyCols] <- features[cidx,copyCols]
## set up type
if ( idCols["ID"]%in%colnames(features) ) {
cfeat[,idCols["parent"]] <- features[cidx,idCols["ID"]]
cfeat[,idCols["ID"]] <- paste(features[cidx,idCols["ID"]],idTag,sep="")
}
if ( idCols["type"]%in%colnames(features) )
cfeat[,idCols["type"]] <- paste(features[cidx,idCols["type"]],
idTag,sep="")
crev <- rev[cidx]
## set up coordinates
## c("chr","start","end","strand")
cfeat[,coorCols] <- features[cidx,coorCols]
## reverse coordinates
## copy: end becomes chromosome length
cfeat[crev,coorCols[3]] <- chrL[cfeat[crev,coorCols[1]]]
## original: start becomes 1
features[circ&rev,coorCols[2]] <- 1
## forward coordinates
## copy: start becomes 1
cfeat[!crev,coorCols[2]] <- 1
## original: end becomes chromosome length
features[circ&!rev,coorCols[3]] <- chrL[features[circ&!rev,coorCols[1]]]
## insert below original & return
## TODO: find faster version via ID mapping!
## TODO: without insertRows table seems to have an empty first line!?
if ( insertRows )
res <- insertRows(features,cfeat,cidx+1)
else
res <- rbind(features,cfeat)
res
}
#' NOT WORKING - Undo \code{expandCircularFeatures}
#' searches for circular features by the \code{idTag} added
#' to ID and type columns in \code{expandCircularFeatures},
#' and maps downstream coordinates back to original features.
#' @param features list of genomic features with coordinates
#' @param coorCols ordered string vector providing the column names
#' of coordinate columns to be used; must be of length 4 and provide in
#' order: chromosome number (refering to argument \code{chrL}), start, end,
#' and strand
#' @param idTag tag used for tagging downstream halves
#' @param idCols named vector of column names for feature ID, type,
#' and feature parent; note that a "parent" column will be removed if
#' it is (a) empty and (b) argument \code{rmParent==TRUE}
#' @param rmParent rm the parent column
#' @seealso \code{expandCircularFeatures}
#' @export
removeCircularFeatures <- function(features,
coorCols=c("chr","start","end","strand"),
idTag="-circ2",
idCols=c(ID="ID",type="type",
parent="parent"),
rmParent=TRUE) {
idCols <- idCols[idCols%in%colnames(features)]
if ( length(idCols)==0 )
stop("no columns present to scan for idTag, use argument idCols")
cidx <- grep(idTag, features[,idCols[1]])
}
#' convert chromosome coordinates to continuous index
#' @param features a table of chromosome features that must contain
#' the chromosome number (option \code{chrCol}), one or more chromosome
#' positions (option \code{cols}) and strand information (column
#' \code{strandCol}).
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information; simply the cumulative lengths of ordered chrosomes,
#' see function \code{\link{getChrSum}}
#' @param chrMap a vector of chromosome names using \code{features}' chromosome
#' column, in the same order as \code{chrS}
#' @param cols name of the columns giving coordinates that will be mapped
#' to continuous index
#' @param chrCol name of the column that gives the chromosome number
#' @param strandCol name of the column that gives forward/reverse strand
#' information
#' @param reverse a vector of possible reverse strand indicators
#' @param circular suppresses re-sorting to start < end for circular chromosomes
#' @export
coor2index <- function(features, chrS, chrMap,
cols=c("start","end","coor"),
chrCol="chr", strandCol="strand",
reverse=c("-",-1), circular=FALSE) {
## coordinate columns
cols <- cols[cols%in%colnames(features)]
## strand column - if not present, infer from start>end
if ( strandCol%in%colnames(features) ) {
strand <- as.character(features[,strandCol])
} else {
strand <- rep("+", nrow(features))
## if start/end are available, infer from from start>end
if ( sum(c("start","end")%in%colnames(features))==2 )
strand[features[,"start"]>features[,"end"]] <- "-"
}
## re-order start>end; only for non-circular chromosomes
## TODO: add circular info to chrS
if ( sum(c("start","end")%in%colnames(features))==2 & !circular ) {
rev <- features[,"start"]>features[,"end"]
ends <- features[rev,"start"]
features[rev,"start"] <- features[rev,"end"]
features[rev,"end"] <- ends
}
## chromosome of each feature
chr <- features[,chrCol]
## map chromosomes to index
## TODO: automate, if chromosomes are not numeric!?
if ( !missing(chrMap) ) {
chrIdx <- 1:length(chrMap)
names(chrIdx) <- chrMap
chr <- chrIdx[as.character(chr)]
}
if ( any(!is.numeric(chr)) )
stop("chromosomes must be a numeric index; use chromosome name map with argument `chrMap'!")
## check for missing chromosome info and issue warning
## remember and also set chr to NA below
## TODO: check for missing info in other functions as well
nachr <- numeric()
if ( any(is.na(chr)) ) {
nachr <- which(is.na(chr) )
warning("some chromosomes are not available (NA)")
}
## convert to index
for ( col in cols ) {
features[,col] <- features[,col]+chrS[chr]
minus <- strand%in%reverse
features[minus,col] <- features[minus,col]+max(chrS)
}
features[,chrCol] <- 1
if ( length(nachr)>0 )
features[nachr,chrCol] <- NA
## TODO: map so that start < end??
features
}
#' Simple version of \code{\link{index2coor}} for single values
#' @param pos the continuous index position that will be mapped to
#' chromosome coordinates
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information
#' @param strands forward/reverse strand indicators
#' @export
idx2coor <- function(pos, chrS, strands=c(1,-1)) {
coor <- cbind(chr=rep(1,length(pos)),coor=pos,strand=rep(NA,length(pos)))
for ( i in 1:(length(chrS)-1) ) {
## frw strand
current <- pos>chrS[i] & pos<=chrS[i+1]
coor[current,"coor"] <- pos[current] - chrS[i]
coor[current,"chr"] <- i
coor[current,"strand"] <- strands[1]
## rev strand
current <- pos>(chrS[i]+max(chrS)) & pos<=(chrS[i+1]+max(chrS))
coor[current] <- pos[current] - chrS[i] - max(chrS)
coor[current,"chr"] <- i
coor[current,"strand"] <- strands[2]
}
coor
}
#' get the chromosome from continuous index
#' @param idx index position for which chromosome information is reported
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information
#' @return returns the chromosome number
#' @export
idx2chr <- function(idx,chrS) {
chr <- sapply(idx,function(x) which(chrS>=x)[1]-1)
if ( any(is.na(chr)) )
chr[is.na(chr)] <- sapply(idx[is.na(chr)],function(x) # reverse strand
which((chrS+max(chrS))>=x)[1]-1)
chr
}
#' get the strand from continuous index
#' @param idx index position for which strand information is reported
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information
#' @return returns the strand
#' @export
idx2str <- function(idx,chrS)
ifelse(idx > max(chrS),-1,1)
#' convert continuous index to chromosome coordinates (reverse of
#' \code{\link{coor2index}})
#' @param features a table of chromosome features that must contain
#' the chromosome number (option \code{chrCol}), one or more chromosome
#' positions (option \code{cols}) and strand information (column
#' \code{strandCol}).
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information
#' @param chrMap a vector of chromosome names, in the same order as
#' \code{chrS}; if provided chromosome index will be mapped back to
#' chromosome name
#' @param cols names of the columns giving coordinates that will be mapped
#' to continuous index
#' @param chrCol name of the column that gives the chromosome number
#' @param relCol relative position mapping left/right -> upstream/downstream,
#' depending on strand
#' @param strandCol name of the column that gives forward/reverse strand
#' information
#' @param strands forward/reverse strand indicators
#' @export
index2coor <- function(features, chrS, chrMap,
cols=c("start","end","coor"),
chrCol="chr", strandCol="strand", relCol,
strands=c(1,-1)) {
cols <- cols[cols%in%colnames(features)]
## add relative position column:
## left -> upstream/downstream, right -> downstream/upstream
cpcols <- cols
rel2factor <- FALSE # stores wether a relative position column was factor
if ( !missing(relCol) ) {
if ( relCol%in%colnames(features) ) {
cpcols <- c(cpcols, relCol)
## CONVERT TO CHARACTER
if ( class(features[,relCol])=="factor" ) {
features[,relCol] <- as.character(features[,relCol])
rel2factor <- TRUE
}
} else
warning("relative position column 'relCol' passed as, ",relCol,
"but not present in columns.")
}
orig <- features[,cpcols,drop=FALSE]
## add chromosome and strand columns, if not present
if ( !chrCol%in%colnames(features) )
features <- cbind(chr=rep(NA,nrow(features)),features)
if ( !strandCol%in%colnames(features) )
features <- cbind(features,strand=rep(NA,nrow(features)))
## remap values back to original coordinates
for ( i in 1:(length(chrS)-1) ) {
## forward strand
current <- orig[,cols[1]]>chrS[i] & orig[,cols[1]]<=chrS[i+1]
for ( col in cols )
features[current,col] <- orig[current,col] - chrS[i]
features[current,chrCol] <- i
features[current,strandCol] <- strands[1]
## relative position mapping left/right -> upstream/downstream
if ( !missing(relCol) ) {
tmpcol <- orig[current,relCol]
tmpcol <- gsub("left","upstream", tmpcol)
tmpcol <- gsub("right","downstream", tmpcol)
features[current,relCol] <- tmpcol
}
## reverse strand
current <- orig[,cols[1]]>(chrS[i]+max(chrS)) &
orig[,cols[1]]<=(chrS[i+1]+max(chrS))
for ( col in cols )
features[current,col] <- orig[current,col] - chrS[i] - max(chrS)
features[current,chrCol] <- i
features[current,strandCol] <- strands[2]
## relative position mapping left/right -> downstream/upstream
if ( !missing(relCol) ) {
tmpcol <- orig[current,relCol]
tmpcol <- gsub("left","downstream", tmpcol)
tmpcol <- gsub("right","upstream", tmpcol)
features[current,relCol] <- tmpcol
}
}
## positions as factor
if ( rel2factor)
features[,relCol] <- factor(features[,relCol])
## return to chromosome names
if ( !missing(chrMap) ) {
chrIdx <- 1:length(chrMap)
names(chrIdx) <- chrMap
features[,chrCol] <- chrMap[features[,chrCol]]
}
features
}
#' switches the strand information (reverse<->forward) of genomic
#' features with continuously indexed chromosome coordinates
#' @param features genomic features with continuously indexed
#' chromosome coordinates
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information
#' @param cols names of the columns holding the continuous index
#' @export
switchStrand <- function(features,chrS, cols=c("start","end","coor")) {
cols <- cols[cols%in%colnames(features)]
orig <- features[,cols]
for ( col in cols ) {
## forward -> reverse
current <- orig[,col] <= max(chrS)
features[current,col] <- orig[current,col] + max(chrS)
## reverse -> forward
current <- orig[,col] > max(chrS)
features[current,col] <- orig[current,col] - max(chrS)
}
features
}
#' align genome data at specified coordinates (e.g. TSS)
#' @param coors genome positions (chromosome, coordinate, strand)
#' at which data will be aligned
#' (TODO: allow start/end coors and set NA if beyond)
#' @param data genome data to be aligned; NOTE, that currently this
#' is required to be fully expanded matrix covering each chromosome position,
#' i.e. \code{nrow(data)==max(chrS)}
#' (TODO: allow non-expanded data)
#' @param dst upstream/downstream length to be aligned
## (TODO: allow different upstream and downstream ranges)
## (TODO: allow individual ranges)
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in \code{data}, derived from chromosome length
#' information, see function \code{\link{getChrSum}}
#' @param coorCols ordered string vector providing the column names
#' of coordinate columns to be used; must be of length 3 and provide in
#' order: chromosome number (refering to argument \code{chrS}), position
#' and strand (see argument \code{reverse})
#' @param reverse a vector of possible reverse strand indicators, all other
#' values in the strand column will be taken as forward strand!
## TODO: generalize for not fully expanded data w/o chrS
## TODO: allow different downstream and upstream ranges
#' @export
alignData <- function(coors, data, dst=500, chrS,
coorCols=c(chr="chr", position="coor", strand="strand"),
reverse=c("-",-1)) {
## get coordinates
starts <- as.numeric(coors[,coorCols["position"]])
chrs <- as.numeric(coors[,coorCols["chr"]])
strands <- as.character(coors[,coorCols["strand"]])
## catch wrong coordinates, eg. due to use of coor2index(coors)
if ( any(starts>max(chrS) ) )
stop(sum(starts>max(chrS)),
" start coordinates are beyond chromosome length in index `chrS`")
## catch wrong data dimension
if ( nrow(data)!=max(chrS) )
stop("`data` rows (", nrow(data),
") do not cover the full chromosome length in index `chrS`")
## add chromosome lengths to get the index (row) in data
starts <- chrS[chrs] + starts
## ranges in full data
rng <- t(apply(t(starts), 2, function(x) (x-dst):(x+dst)))
## reverse for reverse strand
rng <- cbind(!strands%in%reverse,rng)
rng <- t(apply(rng, 1, function(x) {
if (x[1]==1) return(x[2:length(x)])
else return(rev(x[2:length(x)]))
}))
## cut chromosome ends
## TODO: implement circular chromsomes!
## TODO: add warning, eg. if icoors were passed negative
## strand values are beyond chromosome ends
rng <- cbind(min=chrS[chrs],max=chrS[chrs+1],rng)
rng <- t(apply(rng, 1, function(x) {
rm <- x[3:length(x)] <= x[1] | x[3:length(x)] >= x[2];
x <- x[3:length(x)]; x[rm] <- NA;return(x)
}))
## split off coordinate columns, if not separately supplied
firstcol <- 1
if ( sum(c("chr","coor")%in%colnames(data))==2 )
firstcol <- 3
## get aligned data for each data column
geneData <- list()
for ( i in firstcol:ncol(data) )
geneData <- append(geneData,
list(t(apply(rng, 1, function(x) data[x,i]))))
names(geneData) <- colnames(data)[firstcol:ncol(data)]
## relative coordinates as colnames
xax <- -dst:dst
geneData <- lapply(geneData, function(x) {colnames(x) <- xax; x})
## copy rownames
geneData <- lapply(geneData, function(x) {rownames(x) <- rownames(coors);x})
return(geneData)
}
## TODO: alignment on relative segment length
alignData_relative <- function(coors, data, dst=500, chrS,
coorCols=c(chr="chr", start="start", end="end",
strand="strand"),
reverse=c("-",-1)) {
## TODO: sort start<ends?
starts <- as.numeric(coors[,coorCols["start"]])
ends <- as.numeric(coors[,coorCols["end"]])
chrs <- as.numeric(coors[,coorCols["chr"]])
strands <- as.character(coors[,coorCols["strand"]])
## get length and relative coordinates for each segment -1 to 2 rel.length
## then summarize in bins
## add chromosome lengths to get direct index
starts <- chrS[chrs] + starts
## TODO: should rev.strand be shifted by one?
rng <- t(apply(t(starts), 2, function(x) (x-dst):(x+dst)))
rng <- cbind(!strands%in%reverse,rng)
## reverse for reverse strand
rng <- t(apply(rng,1,function(x)
if (x[1]==1) return(x[2:length(x)])
else return(rev(x[2:length(x)]))))
## cut chromosome ends
## TODO: implement circular chromsomes!
##chr <- feats[,"chr"] ## THIS NOT PASSED!?
rng <- cbind(min=chrS[chrs],max=chrS[chrs+1],rng)
rng <- t(apply(rng,1,function(x) {
rm <- x[3:length(x)] <= x[1] | x[3:length(x)] >= x[2];
x <- x[3:length(x)]; x[rm] <- NA;return(x)}))
## get data!
## split off coordinate columns, if not separately supplied
firstcol <- 1
if ( sum(c("chr","coor")%in%colnames(data))==2 )
firstcol <- 3
geneData <- list()
for ( i in firstcol:ncol(data) )
geneData <- append(geneData,
list(t(apply(rng, 1, function(x) data[x,i]))))
names(geneData) <- colnames(data)[firstcol:ncol(data)]
## relative coordinates as colnames
xax <- -dst:dst
geneData <- lapply(geneData, function(x) {colnames(x) <- xax; x})
## copy rownames
geneData <- lapply(geneData, function(x) {rownames(x) <- rownames(coors);x})
return(geneData)
}
#' export internal coordinate format to bed file format
#' @param coor genomic coordinates
#' @param file optional name for bed output file
#' @param coors column names of coordinate columns
#' @param reverse reverse strand characters
#' @param name column name for bed file name column (column 4)
#' @param score column name for bed file score column (column 5)
#' @param prefix prefix to be added to the name and score columns (column 4,5),
#' used by segmenTools interface to bedtools for unique names.
#' @param verb verbosity level, 0: silent
#' @seealso bed2coor
#' @export
coor2bed <- function(coor, file,
coors=c(chr="chr", start="start", end="end",
strand="strand"),
reverse=c("-",-1), name, score, prefix, verb=1) {
## add missing columns
if ( missing(name) ) {
rm <- which(colnames(coor)=="name") # rm existing name column
if ( length(rm)>0 ) coor <- coor[,-rm]
coor <- cbind(coor, name=paste0("id",1:nrow(coor)))
name <- "name"
}
if ( missing(score) ) {
rm <- which(colnames(coor)=="score") # rm existing score column
if ( length(rm)>0 ) coor <- coor[,-rm]
coor <- cbind(coor, score=rep(0, nrow(coor)))
score <- "score"
}
## sort start/end
starts <- apply(coor[,coors[c("start","end")]], 1, min)
ends <- apply(coor[,coors[c("start","end")]], 1, max)
coor[,coors["start"]] <- starts
coor[,coors["end"]] <- ends
## convert strand column
reverse <- coor[,"strand"] %in% reverse
coor[ reverse,coors["strand"]] <- "-"
coor[!reverse,coors["strand"]] <- "+"
## order by starts
coor <- coor[order(coor[,coors["start"]]), ]
coor <- coor[order(coor[,coors["chr"]]), ]
## convert starts to 0-based
## NOTE: end is non-inclusive in bed-format, and
## thus not corrected!
coor[,"start"] <- coor[,"start"]-1
## add prefix
if ( !missing(prefix) ) {
coor[,name] <- paste0(prefix, coor[,name])
coor[,score] <- paste0(prefix, coor[,score])
}
## chromosomes must begin with chr
coor[,coors["chr"]] <- paste0("chr", sprintf("%02d",coor[,coors["chr"]]))
bed <- coor[,c(coors[c("chr","start","end")], name, score,coors["strand"])]
if( !missing(file) ) {
if ( verb>0 )
cat(paste("writing bed file:", file,"\n"))
options(scipen=999) ## avoid scientific notation
write.table(x=bed, file=file, sep="\t",
quote=FALSE, row.names=FALSE, col.names=FALSE)
}
invisible(bed)
}
|
/R/coor2index.R
|
no_license
|
raim/segmenTools
|
R
| false
| false
| 26,982
|
r
|
## GENOME UTILS
#' Tag duplicate names by increasing numbers.
#' @param names a vector of characters
#' @export
tagDuplicates <- function(names) {
sel <- paste(names,".1",sep="")
cnt <- 2
while( sum(duplicated(sel)) ) {
sel[duplicated(sel)] <- sub("\\..*",paste(".",cnt,sep=""),
sel[duplicated(sel)])
cnt <- cnt+1
}
sub("\\.1$","",sel)
}
#' Generate chromosome index \code{chrS} from lengths
#' @param chrL an ordered vector of chromosome lengths; where the
#' order must correspond to chromosome numbering in feature tables
#' for which chrS is used
#' @export
getChrSum <- function(chrL) c(0,cumsum(chrL))
## util to insert rows, after suggestion by user Ari B. Friedman at
## \url{https://stackoverflow.com/a/11562428}
insertRow <- function(existingDF, newrow, r) {
if ( r==nrow(existingDF)+1 ) # insert as last row?
existingDF <- rbind(existingDF, newrow)
else if ( r<=nrow(existingDF) ) { # insert in between
existingDF <- as.data.frame(existingDF,stringsAsFactors=FALSE)
idx <- seq(r, nrow(existingDF)) # shift all by one below r
existingDF[idx+1,] <- existingDF[idx,]
existingDF[r,] <- newrow
} else
stop("wrong index, can't be >nrow(<existing data.frame>)")
existingDF
}
#' insert rows as specified positions
#'
#' Util to insert multiple rows at specified positions
#' in a \code{data.frame}, expanding single-row code by user
#' Ari B. Friedman at
#' \url{https://stackoverflow.com/questions/11561856/add-new-row-to-dataframe-at-specific-row-index-not-appendedlooping through new rows}
#' @param existingDF existing \code{data.frame}
#' @param newrows rows to add to \code{existingDF}
#' @param r positions in the existing data.frame at which rows are to
#' be inserted; \code{length(r)} must equal \code{nrow(newrows)}, and
#' all indices \code{r<=nrow(existingDF)+1}.
#' @export
insertRows <- function(existingDF, newrows, r ) {
## check that r is sorted and all <= nrow(existingDF)
r <- sort(r) # SORT!
if ( any(r>nrow(existingDF)+1) )
stop("row indices must refer to existing data.frame and",
" can not be >nrow(<existing data.frame>)+1")
new <- existingDF
for ( i in 1:nrow(newrows) )
new <- insertRow(new, newrows[i,], r[i]+i-1)
new
}
#' Splits genome features spanning annotated ends of circular chromosomes.
#'
#' Splits genome features that span start/end coordinates of circular
#' chromosomes, and adds the downstream half with optional modification
#' of ID, and type values. Circular features are recognized here by
#' start > end, in left->right direction of genome annotation.
#' Strand information MUST NOT BE ENCODED IN start/end coordinate direction,
#' but explicitly provided via a strand column!
#' Note that only the upstream half retains
#' all column information (exceptions: see argument \code{copyCols}),
#' the downstream half will only carry information on coordinates, and
#' optionally updated feature type and ID.
#' The update will only happen if the passed table contains type and ID
#' information (see argument \code{idCols}. The split can be reversed
#' by function \code{removeCircularFeatures}.
#' @param features a list of genomic features with coordinates
#' @param chrL obligatory list of chromosome lengths, in order used
#' in chromosome column in \code{features} (see argument \code{coorCols}
#' @param coorCols ordered string vector providing the column names
#' of coordinate columns to be used; must be of length 4 and provide in
#' order: chromosome number (refering to argument \code{chrL}), start, end,
#' and strand (see argument \code{reverse})
#' @param reverse allowed indicators of reverse strand features
#' in strand column (see argument \code{coorCols})
#' @param idTag tag to add to downstream ID and type
#' @param idCols named vector of column names for feature ID, type,
#' and feature parent; note that a "parent" column will be added if not present
#' to refer the downstream half to its upstream feature, which retains
#' all other information
#' @param copyCols copy values to circular feature copy; either logical
#' \code{TRUE} to copy all columns, or a vector of column indices or names
#' @param insertRows insert the circular features below their parent,
#' if set to \code{FALSE} circular features will just be appended; this
#' saves a lot of time for large datasets
#' @seealso \code{removeCircularFeatures}
#' @export
expandCircularFeatures <- function(features, chrL,
coorCols=c("chr","start","end","strand"),
reverse=c("-",-1),
idTag="-circ2", idCols=c(ID="ID",type="type",
parent="parent"),
copyCols=FALSE,
insertRows=TRUE) {
## chromosome index - revert from chrL
## add parent column if not present
if ( idCols["ID"]%in%colnames(features) &
!idCols["parent"] %in% colnames(features) ) {
features <- cbind(features,parent=rep(NA,nrow(features)))
}
## filter
if ( typeof(copyCols)=="logical" ) {
if ( copyCols )
copyCols <- colnames(features)
} else if ( typeof(copyCols)=="integer" )
copyCols <- colnames(features)[copyCols]
## get all coordinates
start <- features[,coorCols[2]] # "start"
end <- features[,coorCols[3]] # "end"
strand <- features[,coorCols[4]] # "strand"
rev <- strand%in%reverse
## get circular
circ <- start>end # ASSUMES ORDERED START/END
cidx <- which(circ) # index in original
if ( sum(circ)==0 )
return(features)
## copy and rename (ID_circ#, type: type_circular, parent: ID)
cfeat <- as.data.frame(matrix(NA,ncol=ncol(features),nrow=length(cidx)))
colnames(cfeat) <- colnames(features)
## copy requested columns
cfeat[,copyCols] <- features[cidx,copyCols]
## set up type
if ( idCols["ID"]%in%colnames(features) ) {
cfeat[,idCols["parent"]] <- features[cidx,idCols["ID"]]
cfeat[,idCols["ID"]] <- paste(features[cidx,idCols["ID"]],idTag,sep="")
}
if ( idCols["type"]%in%colnames(features) )
cfeat[,idCols["type"]] <- paste(features[cidx,idCols["type"]],
idTag,sep="")
crev <- rev[cidx]
## set up coordinates
## c("chr","start","end","strand")
cfeat[,coorCols] <- features[cidx,coorCols]
## reverse coordinates
## copy: end becomes chromosome length
cfeat[crev,coorCols[3]] <- chrL[cfeat[crev,coorCols[1]]]
## original: start becomes 1
features[circ&rev,coorCols[2]] <- 1
## forward coordinates
## copy: start becomes 1
cfeat[!crev,coorCols[2]] <- 1
## original: end becomes chromosome length
features[circ&!rev,coorCols[3]] <- chrL[features[circ&!rev,coorCols[1]]]
## insert below original & return
## TODO: find faster version via ID mapping!
## TODO: without insertRows table seems to have an empty first line!?
if ( insertRows )
res <- insertRows(features,cfeat,cidx+1)
else
res <- rbind(features,cfeat)
res
}
#' NOT WORKING - Undo \code{expandCircularFeatures}
#' searches for circular features by the \code{idTag} added
#' to ID and type columns in \code{expandCircularFeatures},
#' and maps downstream coordinates back to original features.
#' @param features list of genomic features with coordinates
#' @param coorCols ordered string vector providing the column names
#' of coordinate columns to be used; must be of length 4 and provide in
#' order: chromosome number (refering to argument \code{chrL}), start, end,
#' and strand
#' @param idTag tag used for tagging downstream halves
#' @param idCols named vector of column names for feature ID, type,
#' and feature parent; note that a "parent" column will be removed if
#' it is (a) empty and (b) argument \code{rmParent==TRUE}
#' @param rmParent rm the parent column
#' @seealso \code{expandCircularFeatures}
#' @export
removeCircularFeatures <- function(features,
coorCols=c("chr","start","end","strand"),
idTag="-circ2",
idCols=c(ID="ID",type="type",
parent="parent"),
rmParent=TRUE) {
idCols <- idCols[idCols%in%colnames(features)]
if ( length(idCols)==0 )
stop("no columns present to scan for idTag, use argument idCols")
cidx <- grep(idTag, features[,idCols[1]])
}
#' convert chromosome coordinates to continuous index
#' @param features a table of chromosome features that must contain
#' the chromosome number (option \code{chrCol}), one or more chromosome
#' positions (option \code{cols}) and strand information (column
#' \code{strandCol}).
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information; simply the cumulative lengths of ordered chrosomes,
#' see function \code{\link{getChrSum}}
#' @param chrMap a vector of chromosome names using \code{features}' chromosome
#' column, in the same order as \code{chrS}
#' @param cols name of the columns giving coordinates that will be mapped
#' to continuous index
#' @param chrCol name of the column that gives the chromosome number
#' @param strandCol name of the column that gives forward/reverse strand
#' information
#' @param reverse a vector of possible reverse strand indicators
#' @param circular suppresses re-sorting to start < end for circular chromosomes
#' @export
coor2index <- function(features, chrS, chrMap,
cols=c("start","end","coor"),
chrCol="chr", strandCol="strand",
reverse=c("-",-1), circular=FALSE) {
## coordinate columns
cols <- cols[cols%in%colnames(features)]
## strand column - if not present, infer from start>end
if ( strandCol%in%colnames(features) ) {
strand <- as.character(features[,strandCol])
} else {
strand <- rep("+", nrow(features))
## if start/end are available, infer from from start>end
if ( sum(c("start","end")%in%colnames(features))==2 )
strand[features[,"start"]>features[,"end"]] <- "-"
}
## re-order start>end; only for non-circular chromosomes
## TODO: add circular info to chrS
if ( sum(c("start","end")%in%colnames(features))==2 & !circular ) {
rev <- features[,"start"]>features[,"end"]
ends <- features[rev,"start"]
features[rev,"start"] <- features[rev,"end"]
features[rev,"end"] <- ends
}
## chromosome of each feature
chr <- features[,chrCol]
## map chromosomes to index
## TODO: automate, if chromosomes are not numeric!?
if ( !missing(chrMap) ) {
chrIdx <- 1:length(chrMap)
names(chrIdx) <- chrMap
chr <- chrIdx[as.character(chr)]
}
if ( any(!is.numeric(chr)) )
stop("chromosomes must be a numeric index; use chromosome name map with argument `chrMap'!")
## check for missing chromosome info and issue warning
## remember and also set chr to NA below
## TODO: check for missing info in other functions as well
nachr <- numeric()
if ( any(is.na(chr)) ) {
nachr <- which(is.na(chr) )
warning("some chromosomes are not available (NA)")
}
## convert to index
for ( col in cols ) {
features[,col] <- features[,col]+chrS[chr]
minus <- strand%in%reverse
features[minus,col] <- features[minus,col]+max(chrS)
}
features[,chrCol] <- 1
if ( length(nachr)>0 )
features[nachr,chrCol] <- NA
## TODO: map so that start < end??
features
}
#' Simple version of \code{\link{index2coor}} for single values
#' @param pos the continuous index position that will be mapped to
#' chromosome coordinates
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information
#' @param strands forward/reverse strand indicators
#' @export
idx2coor <- function(pos, chrS, strands=c(1,-1)) {
coor <- cbind(chr=rep(1,length(pos)),coor=pos,strand=rep(NA,length(pos)))
for ( i in 1:(length(chrS)-1) ) {
## frw strand
current <- pos>chrS[i] & pos<=chrS[i+1]
coor[current,"coor"] <- pos[current] - chrS[i]
coor[current,"chr"] <- i
coor[current,"strand"] <- strands[1]
## rev strand
current <- pos>(chrS[i]+max(chrS)) & pos<=(chrS[i+1]+max(chrS))
coor[current] <- pos[current] - chrS[i] - max(chrS)
coor[current,"chr"] <- i
coor[current,"strand"] <- strands[2]
}
coor
}
#' get the chromosome from continuous index
#' @param idx index position for which chromosome information is reported
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information
#' @return returns the chromosome number
#' @export
idx2chr <- function(idx,chrS) {
chr <- sapply(idx,function(x) which(chrS>=x)[1]-1)
if ( any(is.na(chr)) )
chr[is.na(chr)] <- sapply(idx[is.na(chr)],function(x) # reverse strand
which((chrS+max(chrS))>=x)[1]-1)
chr
}
#' get the strand from continuous index
#' @param idx index position for which strand information is reported
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information
#' @return returns the strand
#' @export
idx2str <- function(idx,chrS)
ifelse(idx > max(chrS),-1,1)
#' convert continuous index to chromosome coordinates (reverse of
#' \code{\link{coor2index}})
#' @param features a table of chromosome features that must contain
#' the chromosome number (option \code{chrCol}), one or more chromosome
#' positions (option \code{cols}) and strand information (column
#' \code{strandCol}).
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information
#' @param chrMap a vector of chromosome names, in the same order as
#' \code{chrS}; if provided chromosome index will be mapped back to
#' chromosome name
#' @param cols names of the columns giving coordinates that will be mapped
#' to continuous index
#' @param chrCol name of the column that gives the chromosome number
#' @param relCol relative position mapping left/right -> upstream/downstream,
#' depending on strand
#' @param strandCol name of the column that gives forward/reverse strand
#' information
#' @param strands forward/reverse strand indicators
#' @export
index2coor <- function(features, chrS, chrMap,
cols=c("start","end","coor"),
chrCol="chr", strandCol="strand", relCol,
strands=c(1,-1)) {
cols <- cols[cols%in%colnames(features)]
## add relative position column:
## left -> upstream/downstream, right -> downstream/upstream
cpcols <- cols
rel2factor <- FALSE # stores wether a relative position column was factor
if ( !missing(relCol) ) {
if ( relCol%in%colnames(features) ) {
cpcols <- c(cpcols, relCol)
## CONVERT TO CHARACTER
if ( class(features[,relCol])=="factor" ) {
features[,relCol] <- as.character(features[,relCol])
rel2factor <- TRUE
}
} else
warning("relative position column 'relCol' passed as, ",relCol,
"but not present in columns.")
}
orig <- features[,cpcols,drop=FALSE]
## add chromosome and strand columns, if not present
if ( !chrCol%in%colnames(features) )
features <- cbind(chr=rep(NA,nrow(features)),features)
if ( !strandCol%in%colnames(features) )
features <- cbind(features,strand=rep(NA,nrow(features)))
## remap values back to original coordinates
for ( i in 1:(length(chrS)-1) ) {
## forward strand
current <- orig[,cols[1]]>chrS[i] & orig[,cols[1]]<=chrS[i+1]
for ( col in cols )
features[current,col] <- orig[current,col] - chrS[i]
features[current,chrCol] <- i
features[current,strandCol] <- strands[1]
## relative position mapping left/right -> upstream/downstream
if ( !missing(relCol) ) {
tmpcol <- orig[current,relCol]
tmpcol <- gsub("left","upstream", tmpcol)
tmpcol <- gsub("right","downstream", tmpcol)
features[current,relCol] <- tmpcol
}
## reverse strand
current <- orig[,cols[1]]>(chrS[i]+max(chrS)) &
orig[,cols[1]]<=(chrS[i+1]+max(chrS))
for ( col in cols )
features[current,col] <- orig[current,col] - chrS[i] - max(chrS)
features[current,chrCol] <- i
features[current,strandCol] <- strands[2]
## relative position mapping left/right -> downstream/upstream
if ( !missing(relCol) ) {
tmpcol <- orig[current,relCol]
tmpcol <- gsub("left","downstream", tmpcol)
tmpcol <- gsub("right","upstream", tmpcol)
features[current,relCol] <- tmpcol
}
}
## positions as factor
if ( rel2factor)
features[,relCol] <- factor(features[,relCol])
## return to chromosome names
if ( !missing(chrMap) ) {
chrIdx <- 1:length(chrMap)
names(chrIdx) <- chrMap
features[,chrCol] <- chrMap[features[,chrCol]]
}
features
}
#' switches the strand information (reverse<->forward) of genomic
#' features with continuously indexed chromosome coordinates
#' @param features genomic features with continuously indexed
#' chromosome coordinates
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in the continuous index, derived from chromosome length
#' information
#' @param cols names of the columns holding the continuous index
#' @export
switchStrand <- function(features,chrS, cols=c("start","end","coor")) {
cols <- cols[cols%in%colnames(features)]
orig <- features[,cols]
for ( col in cols ) {
## forward -> reverse
current <- orig[,col] <= max(chrS)
features[current,col] <- orig[current,col] + max(chrS)
## reverse -> forward
current <- orig[,col] > max(chrS)
features[current,col] <- orig[current,col] - max(chrS)
}
features
}
#' align genome data at specified coordinates (e.g. TSS)
#' @param coors genome positions (chromosome, coordinate, strand)
#' at which data will be aligned
#' (TODO: allow start/end coors and set NA if beyond)
#' @param data genome data to be aligned; NOTE, that currently this
#' is required to be fully expanded matrix covering each chromosome position,
#' i.e. \code{nrow(data)==max(chrS)}
#' (TODO: allow non-expanded data)
#' @param dst upstream/downstream length to be aligned
## (TODO: allow different upstream and downstream ranges)
## (TODO: allow individual ranges)
#' @param chrS the chromosome index, indicating the start position
#' of each chromosome in \code{data}, derived from chromosome length
#' information, see function \code{\link{getChrSum}}
#' @param coorCols ordered string vector providing the column names
#' of coordinate columns to be used; must be of length 3 and provide in
#' order: chromosome number (refering to argument \code{chrS}), position
#' and strand (see argument \code{reverse})
#' @param reverse a vector of possible reverse strand indicators, all other
#' values in the strand column will be taken as forward strand!
## TODO: generalize for not fully expanded data w/o chrS
## TODO: allow different downstream and upstream ranges
#' @export
alignData <- function(coors, data, dst=500, chrS,
coorCols=c(chr="chr", position="coor", strand="strand"),
reverse=c("-",-1)) {
## get coordinates
starts <- as.numeric(coors[,coorCols["position"]])
chrs <- as.numeric(coors[,coorCols["chr"]])
strands <- as.character(coors[,coorCols["strand"]])
## catch wrong coordinates, eg. due to use of coor2index(coors)
if ( any(starts>max(chrS) ) )
stop(sum(starts>max(chrS)),
" start coordinates are beyond chromosome length in index `chrS`")
## catch wrong data dimension
if ( nrow(data)!=max(chrS) )
stop("`data` rows (", nrow(data),
") do not cover the full chromosome length in index `chrS`")
## add chromosome lengths to get the index (row) in data
starts <- chrS[chrs] + starts
## ranges in full data
rng <- t(apply(t(starts), 2, function(x) (x-dst):(x+dst)))
## reverse for reverse strand
rng <- cbind(!strands%in%reverse,rng)
rng <- t(apply(rng, 1, function(x) {
if (x[1]==1) return(x[2:length(x)])
else return(rev(x[2:length(x)]))
}))
## cut chromosome ends
## TODO: implement circular chromsomes!
## TODO: add warning, eg. if icoors were passed negative
## strand values are beyond chromosome ends
rng <- cbind(min=chrS[chrs],max=chrS[chrs+1],rng)
rng <- t(apply(rng, 1, function(x) {
rm <- x[3:length(x)] <= x[1] | x[3:length(x)] >= x[2];
x <- x[3:length(x)]; x[rm] <- NA;return(x)
}))
## split off coordinate columns, if not separately supplied
firstcol <- 1
if ( sum(c("chr","coor")%in%colnames(data))==2 )
firstcol <- 3
## get aligned data for each data column
geneData <- list()
for ( i in firstcol:ncol(data) )
geneData <- append(geneData,
list(t(apply(rng, 1, function(x) data[x,i]))))
names(geneData) <- colnames(data)[firstcol:ncol(data)]
## relative coordinates as colnames
xax <- -dst:dst
geneData <- lapply(geneData, function(x) {colnames(x) <- xax; x})
## copy rownames
geneData <- lapply(geneData, function(x) {rownames(x) <- rownames(coors);x})
return(geneData)
}
## TODO: alignment on relative segment length
alignData_relative <- function(coors, data, dst=500, chrS,
coorCols=c(chr="chr", start="start", end="end",
strand="strand"),
reverse=c("-",-1)) {
## TODO: sort start<ends?
starts <- as.numeric(coors[,coorCols["start"]])
ends <- as.numeric(coors[,coorCols["end"]])
chrs <- as.numeric(coors[,coorCols["chr"]])
strands <- as.character(coors[,coorCols["strand"]])
## get length and relative coordinates for each segment -1 to 2 rel.length
## then summarize in bins
## add chromosome lengths to get direct index
starts <- chrS[chrs] + starts
## TODO: should rev.strand be shifted by one?
rng <- t(apply(t(starts), 2, function(x) (x-dst):(x+dst)))
rng <- cbind(!strands%in%reverse,rng)
## reverse for reverse strand
rng <- t(apply(rng,1,function(x)
if (x[1]==1) return(x[2:length(x)])
else return(rev(x[2:length(x)]))))
## cut chromosome ends
## TODO: implement circular chromsomes!
##chr <- feats[,"chr"] ## THIS NOT PASSED!?
rng <- cbind(min=chrS[chrs],max=chrS[chrs+1],rng)
rng <- t(apply(rng,1,function(x) {
rm <- x[3:length(x)] <= x[1] | x[3:length(x)] >= x[2];
x <- x[3:length(x)]; x[rm] <- NA;return(x)}))
## get data!
## split off coordinate columns, if not separately supplied
firstcol <- 1
if ( sum(c("chr","coor")%in%colnames(data))==2 )
firstcol <- 3
geneData <- list()
for ( i in firstcol:ncol(data) )
geneData <- append(geneData,
list(t(apply(rng, 1, function(x) data[x,i]))))
names(geneData) <- colnames(data)[firstcol:ncol(data)]
## relative coordinates as colnames
xax <- -dst:dst
geneData <- lapply(geneData, function(x) {colnames(x) <- xax; x})
## copy rownames
geneData <- lapply(geneData, function(x) {rownames(x) <- rownames(coors);x})
return(geneData)
}
#' export internal coordinate format to bed file format
#' @param coor genomic coordinates
#' @param file optional name for bed output file
#' @param coors column names of coordinate columns
#' @param reverse reverse strand characters
#' @param name column name for bed file name column (column 4)
#' @param score column name for bed file score column (column 5)
#' @param prefix prefix to be added to the name and score columns (column 4,5),
#' used by segmenTools interface to bedtools for unique names.
#' @param verb verbosity level, 0: silent
#' @seealso bed2coor
#' @export
coor2bed <- function(coor, file,
coors=c(chr="chr", start="start", end="end",
strand="strand"),
reverse=c("-",-1), name, score, prefix, verb=1) {
## add missing columns
if ( missing(name) ) {
rm <- which(colnames(coor)=="name") # rm existing name column
if ( length(rm)>0 ) coor <- coor[,-rm]
coor <- cbind(coor, name=paste0("id",1:nrow(coor)))
name <- "name"
}
if ( missing(score) ) {
rm <- which(colnames(coor)=="score") # rm existing score column
if ( length(rm)>0 ) coor <- coor[,-rm]
coor <- cbind(coor, score=rep(0, nrow(coor)))
score <- "score"
}
## sort start/end
starts <- apply(coor[,coors[c("start","end")]], 1, min)
ends <- apply(coor[,coors[c("start","end")]], 1, max)
coor[,coors["start"]] <- starts
coor[,coors["end"]] <- ends
## convert strand column
reverse <- coor[,"strand"] %in% reverse
coor[ reverse,coors["strand"]] <- "-"
coor[!reverse,coors["strand"]] <- "+"
## order by starts
coor <- coor[order(coor[,coors["start"]]), ]
coor <- coor[order(coor[,coors["chr"]]), ]
## convert starts to 0-based
## NOTE: end is non-inclusive in bed-format, and
## thus not corrected!
coor[,"start"] <- coor[,"start"]-1
## add prefix
if ( !missing(prefix) ) {
coor[,name] <- paste0(prefix, coor[,name])
coor[,score] <- paste0(prefix, coor[,score])
}
## chromosomes must begin with chr
coor[,coors["chr"]] <- paste0("chr", sprintf("%02d",coor[,coors["chr"]]))
bed <- coor[,c(coors[c("chr","start","end")], name, score,coors["strand"])]
if( !missing(file) ) {
if ( verb>0 )
cat(paste("writing bed file:", file,"\n"))
options(scipen=999) ## avoid scientific notation
write.table(x=bed, file=file, sep="\t",
quote=FALSE, row.names=FALSE, col.names=FALSE)
}
invisible(bed)
}
|
#' R Markdown format for Distill articles
#'
#' Scientific and technical writing, native to the web.
#'
#' Distill articles feature attractive, reader-friendly typography, flexible
#' layout options for visualizations, and full support for footnotes and
#' citations.
#'
#' @inheritParams rmarkdown::html_document
#'
#' @param toc_float Float the table of contents to the left when the article
#' is displayed at widths > 1000px. If set to `FALSE` or the width is less
#' than 1000px the table of contents will be placed above the article body.
#' @param smart Produce typographically correct output, converting straight
#' quotes to curly quotes, `---` to em-dashes, `--` to en-dashes, and
#' `...` to ellipses.
#' @param code_folding Include code blocks hidden, and allow users to
#' optionally display the code by clicking a "Show code" button just above
#' the output. Pass a character vector to customize the text of the
#' "Show code" button. You can also specify `code_folding` as chunk option
#' for per-chunk folding behavior.
#' @param highlight Syntax highlighting style. Supported styles include
#' "default", "rstudio", "tango", "pygments", "kate", "monochrome", "espresso",
#' "zenburn", "breezedark", and "haddock". Pass NULL to prevent syntax
#' highlighting.
#' @param highlight_downlit Use the \pkg{downlit} package to highlight
#' R code (including providing hyperlinks to function documentation).
#' @param theme CSS file with theme variable definitions
#'
#' @import rmarkdown
#' @import htmltools
#' @import downlit
#'
#' @export
distill_article <- function(toc = FALSE,
toc_depth = 3,
toc_float = TRUE,
fig_width = 6.5,
fig_height = 4,
fig_retina = 2,
fig_caption = TRUE,
dev = "png",
smart = TRUE,
code_folding = FALSE,
self_contained = TRUE,
highlight = "default",
highlight_downlit = TRUE,
mathjax = "default",
extra_dependencies = NULL,
theme = NULL,
css = NULL,
includes = NULL,
keep_md = FALSE,
lib_dir = NULL,
md_extensions = NULL,
pandoc_args = NULL,
...) {
# validate that we have pandoc 2
validate_pandoc_version()
# build pandoc args
args <- c("--standalone")
# table of contents
args <- c(args, pandoc_toc_args(toc, toc_depth))
# toc_float
if (toc_float) {
args <- c(args, pandoc_variable_arg("toc-float", "1"))
}
# add highlighting
args <- c(args, distill_highlighting_args(highlight))
# turn off downlit if there is no highlighting at all
if (is.null(highlight))
highlight_downlit <- FALSE
# add template
args <- c(args, "--template",
pandoc_path_arg(distill_resource("default.html")))
# use link citations (so we can do citation conversion)
args <- c(args, "--metadata=link-citations:true")
# establish knitr options
knitr_options <- knitr_options_html(fig_width = fig_width,
fig_height = fig_height,
fig_retina = fig_retina,
keep_md = keep_md,
dev = dev)
knitr_options$opts_chunk$echo <- identical(code_folding, FALSE)
knitr_options$opts_chunk$warning <- FALSE
knitr_options$opts_chunk$message <- FALSE
knitr_options$opts_chunk$comment <- NA
knitr_options$opts_chunk$R.options <- list(width = 70)
knitr_options$opts_chunk$code_folding <- code_folding
knitr_options$opts_knit$bookdown.internal.label <- TRUE
knitr_options$opts_hooks <- list()
knitr_options$opts_hooks$preview <- knitr_preview_hook
knitr_options$opts_hooks$code_folding <- function(options) {
if (!identical(code_folding, FALSE)) {
options[["echo"]] <- TRUE
}
options
}
knitr_options$knit_hooks <- knit_hooks(downlit = highlight_downlit)
# shared variables
site_config <- NULL
encoding <- NULL
# metadata_includes are includes derived from this file's metadata
# (as opposed to site level includes which we already process)
metadata_includes <- list()
# post-knit
post_knit <- function(metadata, input_file, runtime, encoding, ...) {
# save encoding
encoding <<- encoding
# run R code in metadata
metadata <- eval_metadata(metadata)
# determine metadata_includes
metadata_output <- metadata[["output"]]
if (is.list(metadata_output)) {
metadata_distill <- metadata_output[["distill::distill_article"]]
if (is.list(metadata_distill)) {
metadata_includes <<- metadata_distill[["includes"]]
}
}
# pandoc args
args <- c()
# additional user css
for (css_file in css)
args <- c(args, "--css", pandoc_path_arg(css_file))
# compute knitr output file
output_file <- file_with_meta_ext(input_file, "knit", "md")
# normalize site config and see if we are in a collection
in_collection <- FALSE
site_config <<- site_config(input_file, encoding)
if (is.null(site_config)) {
# default site_config to empty
site_config <<- list()
# set in_collection flag
in_collection <- !is.null(find_site_dir(input_file))
}
# provide a default date of today for in_collection
if (is.null(metadata[["date"]]) && in_collection) {
metadata$date <- date_today()
args <- c(args, pandoc_variable_arg("date", metadata$date))
}
# make copy of metdata before transforming
embedable_metadata <- metadata
# fixup author for embedding
embedable_metadata$author <- fixup_author(embedable_metadata$author)
# transform configuration
transformed <- transform_configuration(
file = output_file,
site_config = site_config,
collection_config = NULL,
metadata = metadata,
auto_preview = !self_contained
)
site_config <- transformed$site_config
metadata <- transformed$metadata
# pickup canonical and citation urls
embedable_metadata$citation_url <- embedable_metadata$citation_url
embedable_metadata$canonical_url <- embedable_metadata$canonical_url
# create metadata json
metadata_json <- embedded_metadata(embedable_metadata)
# list of html dependencies (if we navigation then we get jquery
# from site dependencies so don't include it here)
html_deps <- list()
if (!have_navigation(site_config)) {
html_deps <- list(html_dependency_jquery())
} else {
html_deps <- list()
}
html_deps <- append(html_deps, list(
html_dependency_popper(),
html_dependency_tippy(),
html_dependency_anchor(),
html_dependency_bowser(),
html_dependency_webcomponents(),
html_dependency_distill()
))
# resolve listing
listing <- list()
# special handling for listing pages
if (!is.null(metadata$listing)) {
# can be either a character vector with a collection name or a list
# of articles by collection
if (is.list(metadata$listing))
listing <- resolve_yaml_listing(input_file, site_config, metadata, metadata$listing)
else
listing <- resolve_listing(input_file, site_config, metadata)
}
if (length(listing) > 0) {
# indicate we are are using a listing layout
args <- c(args, pandoc_variable_arg("layout", "listing"))
# forward feed_url if we generated a feed
if (!is.null(listing$feed))
args <- c(args,
pandoc_variable_arg("feed", url_path(site_config$base_url, listing$feed))
)
}
# add html dependencies
knitr::knit_meta_add(html_deps)
# add site related dependencies
ensure_site_dependencies(site_config, dirname(input_file))
# resolve theme from site if it's not specified in the article
if ((is.null(theme) || !file.exists(theme))) {
theme <- theme_from_site_config(find_site_dir(input_file), site_config)
}
# header includes: distill then user
in_header <- c(metadata_in_header(site_config, metadata, self_contained),
citation_references_in_header(input_file, metadata$bibliography),
metadata_json,
manifest_in_header(site_config, input_file, metadata, self_contained),
navigation_in_header_file(site_config),
distill_in_header_file(theme))
# before body includes: distill then user
before_body <- c(front_matter_before_body(metadata),
navigation_before_body_file(dirname(input_file), site_config),
site_before_body_file(site_config),
metadata_includes$before_body,
listing$html)
# after body includes: user then distill
after_body <- c(metadata_includes$after_body,
site_after_body_file(site_config),
appendices_after_body_file(input_file, site_config, metadata),
navigation_after_body_file(dirname(input_file), site_config))
# populate args
args <- c(args, pandoc_include_args(
in_header = in_header,
before_body = before_body,
after_body = after_body
))
# return args
args
}
pre_processor <- function(yaml_front_matter, utf8_input, runtime, knit_meta,
files_dir, output_dir, ...) {
pandoc_include_args(in_header = c(site_in_header_file(site_config),
metadata_includes$in_header))
}
on_exit <- function() {
validate_rstudio_version()
}
# return format
output_format(
knitr = knitr_options,
pandoc = pandoc_options(to = "html5",
from = from_rmarkdown(fig_caption, md_extensions),
args = args),
keep_md = keep_md,
clean_supporting = self_contained,
post_knit = post_knit,
pre_processor = pre_processor,
post_processor = distill_article_post_processor(function() encoding, self_contained),
on_exit = on_exit,
base_format = html_document_base(
smart = smart,
self_contained = self_contained,
lib_dir = lib_dir,
mathjax = mathjax,
template = "default",
pandoc_args = pandoc_args,
bootstrap_compatible = FALSE,
extra_dependencies = extra_dependencies,
...
)
)
}
distill_highlighting_args <- function(highlight) {
# The default highlighting is a custom pandoc theme based on
# https://github.com/ericwbailey/a11y-syntax-highlighting
# It's in a JSON theme file as described here:
#
# https://pandoc.org/MANUAL.html#syntax-highlighting
#
# To create the theme we started with pandoc --print-highlight-style haddock
# (since that was the closest pandoc them to textmate) then made
# the following changes to create the RStudio textmate version:
#
# https://github.com/rstudio/distill/compare/02b241083b8ca5cda90954c6c37e9f11bf830b2c...13fb0f6b34e9d04df0bd24a02980e29105a8f68d#diff-f088084fe658ee281215b486b2f18dab
#
# all available pandoc highlighting tokens are enumerated here:
#
# https://github.com/jgm/skylighting/blob/a1d02a0db6260c73aaf04aae2e6e18b569caacdc/skylighting-core/src/Skylighting/Format/HTML.hs#L117-L147
#
default <- distill_resource("arrow.theme")
# if it's "rstudio", then use an embedded theme file
if (identical(highlight, "rstudio")) {
highlight <- distill_resource("rstudio.theme")
}
rmarkdown::pandoc_highlight_args(highlight, default)
}
knitr_preview_hook <- function(options) {
if (isTRUE(options$preview))
options$out.extra <- c(options$out.extra, "data-distill-preview=1")
options
}
knit_hooks <- function(downlit) {
# capture the default chunk and source hooks
previous_hooks <- knitr::knit_hooks$get()
on.exit(knitr::knit_hooks$restore(previous_hooks), add = TRUE)
knitr::render_markdown()
default_chunk_hook <- knitr::knit_hooks$get("chunk")
default_source_hook <- knitr::knit_hooks$get("source")
# apply chunk hook
hooks <- list(
chunk = function(x, options) {
# apply default layout
if (is.null(options$layout))
options$layout <- "l-body"
# apply default hook and determine padding
output <- default_chunk_hook(x, options)
pad_chars <- nchar(output) - nchar(sub("^ +", "", output))
padding <- paste(rep(' ', pad_chars), collapse = '')
# enclose default output in div (with appropriate padding)
paste0(
padding, '<div class="layout-chunk" data-layout="', options$layout, '">\n',
output, '\n',
padding, '\n',
padding, '</div>\n'
)
}
)
# source hook to do downlit processing and code_folding
hooks$source <- function(x, options) {
code_folding <- not_null(options[["code_folding"]], FALSE)
if (downlit && options$engine == "R") {
code <- highlight(paste0(x, "\n", collapse = ""),
classes_pandoc(),
pre_class = NULL)
if (is.na(code)) {
x <- default_source_hook(x, options)
} else {
x <- paste0("<div class=\"sourceCode\">",
"<pre class=\"sourceCode r\">",
"<code class=\"sourceCode r\">",
code,
"</code></pre></div>")
x <- paste0(x, "\n")
}
} else {
x <- default_source_hook(x, options)
}
if (!identical(code_folding, FALSE)) {
if (identical(code_folding, TRUE)) {
code_folding <- "Show code"
} else {
code_folding <- as.character(code_folding)
}
x <- paste0("<details>\n<summary>", code_folding ,"</summary>\n", x, "\n</details>")
}
x
}
if (downlit) {
# document hook to inject a fake empty code block a the end of the
# document (to force pandoc to including highlighting cssm which it
# might not do if all chunks are handled by downlit)
hooks$document <- function(x, options) {
c(x, "```{.r .distill-force-highlighting-css}", "```")
}
}
# return hooks
hooks
}
validate_pandoc_version <- function() {
if (!pandoc_available("2.0")) {
msg <-
if (!is.null(rstudio_version())) {
msg <- paste("Distill requires RStudio v1.2 or greater.",
"Please update at:",
"https://www.rstudio.com/rstudio/download/preview/")
} else {
msg <- paste("Distill requires Pandoc v2.0 or greater",
"Please update at:",
"https://github.com/jgm/pandoc/releases/latest")
}
stop(msg, call. = FALSE)
}
}
distill_in_header <- function(theme = NULL) {
doRenderTags(distill_in_header_html(theme))
}
distill_in_header_file <- function(theme = NULL) {
html_file(distill_in_header_html(theme))
}
distill_in_header_html <- function(theme = NULL) {
distill_html <- html_from_file(
system.file("rmarkdown/templates/distill_article/resources/distill.html",
package = "distill")
)
theme_html <- theme_in_header_html(theme)
placeholder_html("distill", distill_html, theme_html)
}
theme_in_header_html <- function(theme) {
if (!is.null(theme)) {
tagList(
includeCSS(distill_resource("base-variables.css")),
includeCSS(theme),
includeCSS(distill_resource("base-style.css"))
)
} else {
NULL
}
}
theme_in_header_file <- function(theme) {
html_file(theme_in_header_html(theme))
}
|
/R/distill_article.R
|
permissive
|
rmflight/distill
|
R
| false
| false
| 15,831
|
r
|
#' R Markdown format for Distill articles
#'
#' Scientific and technical writing, native to the web.
#'
#' Distill articles feature attractive, reader-friendly typography, flexible
#' layout options for visualizations, and full support for footnotes and
#' citations.
#'
#' @inheritParams rmarkdown::html_document
#'
#' @param toc_float Float the table of contents to the left when the article
#' is displayed at widths > 1000px. If set to `FALSE` or the width is less
#' than 1000px the table of contents will be placed above the article body.
#' @param smart Produce typographically correct output, converting straight
#' quotes to curly quotes, `---` to em-dashes, `--` to en-dashes, and
#' `...` to ellipses.
#' @param code_folding Include code blocks hidden, and allow users to
#' optionally display the code by clicking a "Show code" button just above
#' the output. Pass a character vector to customize the text of the
#' "Show code" button. You can also specify `code_folding` as chunk option
#' for per-chunk folding behavior.
#' @param highlight Syntax highlighting style. Supported styles include
#' "default", "rstudio", "tango", "pygments", "kate", "monochrome", "espresso",
#' "zenburn", "breezedark", and "haddock". Pass NULL to prevent syntax
#' highlighting.
#' @param highlight_downlit Use the \pkg{downlit} package to highlight
#' R code (including providing hyperlinks to function documentation).
#' @param theme CSS file with theme variable definitions
#'
#' @import rmarkdown
#' @import htmltools
#' @import downlit
#'
#' @export
distill_article <- function(toc = FALSE,
toc_depth = 3,
toc_float = TRUE,
fig_width = 6.5,
fig_height = 4,
fig_retina = 2,
fig_caption = TRUE,
dev = "png",
smart = TRUE,
code_folding = FALSE,
self_contained = TRUE,
highlight = "default",
highlight_downlit = TRUE,
mathjax = "default",
extra_dependencies = NULL,
theme = NULL,
css = NULL,
includes = NULL,
keep_md = FALSE,
lib_dir = NULL,
md_extensions = NULL,
pandoc_args = NULL,
...) {
# validate that we have pandoc 2
validate_pandoc_version()
# build pandoc args
args <- c("--standalone")
# table of contents
args <- c(args, pandoc_toc_args(toc, toc_depth))
# toc_float
if (toc_float) {
args <- c(args, pandoc_variable_arg("toc-float", "1"))
}
# add highlighting
args <- c(args, distill_highlighting_args(highlight))
# turn off downlit if there is no highlighting at all
if (is.null(highlight))
highlight_downlit <- FALSE
# add template
args <- c(args, "--template",
pandoc_path_arg(distill_resource("default.html")))
# use link citations (so we can do citation conversion)
args <- c(args, "--metadata=link-citations:true")
# establish knitr options
knitr_options <- knitr_options_html(fig_width = fig_width,
fig_height = fig_height,
fig_retina = fig_retina,
keep_md = keep_md,
dev = dev)
knitr_options$opts_chunk$echo <- identical(code_folding, FALSE)
knitr_options$opts_chunk$warning <- FALSE
knitr_options$opts_chunk$message <- FALSE
knitr_options$opts_chunk$comment <- NA
knitr_options$opts_chunk$R.options <- list(width = 70)
knitr_options$opts_chunk$code_folding <- code_folding
knitr_options$opts_knit$bookdown.internal.label <- TRUE
knitr_options$opts_hooks <- list()
knitr_options$opts_hooks$preview <- knitr_preview_hook
knitr_options$opts_hooks$code_folding <- function(options) {
if (!identical(code_folding, FALSE)) {
options[["echo"]] <- TRUE
}
options
}
knitr_options$knit_hooks <- knit_hooks(downlit = highlight_downlit)
# shared variables
site_config <- NULL
encoding <- NULL
# metadata_includes are includes derived from this file's metadata
# (as opposed to site level includes which we already process)
metadata_includes <- list()
# post-knit
post_knit <- function(metadata, input_file, runtime, encoding, ...) {
# save encoding
encoding <<- encoding
# run R code in metadata
metadata <- eval_metadata(metadata)
# determine metadata_includes
metadata_output <- metadata[["output"]]
if (is.list(metadata_output)) {
metadata_distill <- metadata_output[["distill::distill_article"]]
if (is.list(metadata_distill)) {
metadata_includes <<- metadata_distill[["includes"]]
}
}
# pandoc args
args <- c()
# additional user css
for (css_file in css)
args <- c(args, "--css", pandoc_path_arg(css_file))
# compute knitr output file
output_file <- file_with_meta_ext(input_file, "knit", "md")
# normalize site config and see if we are in a collection
in_collection <- FALSE
site_config <<- site_config(input_file, encoding)
if (is.null(site_config)) {
# default site_config to empty
site_config <<- list()
# set in_collection flag
in_collection <- !is.null(find_site_dir(input_file))
}
# provide a default date of today for in_collection
if (is.null(metadata[["date"]]) && in_collection) {
metadata$date <- date_today()
args <- c(args, pandoc_variable_arg("date", metadata$date))
}
# make copy of metdata before transforming
embedable_metadata <- metadata
# fixup author for embedding
embedable_metadata$author <- fixup_author(embedable_metadata$author)
# transform configuration
transformed <- transform_configuration(
file = output_file,
site_config = site_config,
collection_config = NULL,
metadata = metadata,
auto_preview = !self_contained
)
site_config <- transformed$site_config
metadata <- transformed$metadata
# pickup canonical and citation urls
embedable_metadata$citation_url <- embedable_metadata$citation_url
embedable_metadata$canonical_url <- embedable_metadata$canonical_url
# create metadata json
metadata_json <- embedded_metadata(embedable_metadata)
# list of html dependencies (if we navigation then we get jquery
# from site dependencies so don't include it here)
html_deps <- list()
if (!have_navigation(site_config)) {
html_deps <- list(html_dependency_jquery())
} else {
html_deps <- list()
}
html_deps <- append(html_deps, list(
html_dependency_popper(),
html_dependency_tippy(),
html_dependency_anchor(),
html_dependency_bowser(),
html_dependency_webcomponents(),
html_dependency_distill()
))
# resolve listing
listing <- list()
# special handling for listing pages
if (!is.null(metadata$listing)) {
# can be either a character vector with a collection name or a list
# of articles by collection
if (is.list(metadata$listing))
listing <- resolve_yaml_listing(input_file, site_config, metadata, metadata$listing)
else
listing <- resolve_listing(input_file, site_config, metadata)
}
if (length(listing) > 0) {
# indicate we are are using a listing layout
args <- c(args, pandoc_variable_arg("layout", "listing"))
# forward feed_url if we generated a feed
if (!is.null(listing$feed))
args <- c(args,
pandoc_variable_arg("feed", url_path(site_config$base_url, listing$feed))
)
}
# add html dependencies
knitr::knit_meta_add(html_deps)
# add site related dependencies
ensure_site_dependencies(site_config, dirname(input_file))
# resolve theme from site if it's not specified in the article
if ((is.null(theme) || !file.exists(theme))) {
theme <- theme_from_site_config(find_site_dir(input_file), site_config)
}
# header includes: distill then user
in_header <- c(metadata_in_header(site_config, metadata, self_contained),
citation_references_in_header(input_file, metadata$bibliography),
metadata_json,
manifest_in_header(site_config, input_file, metadata, self_contained),
navigation_in_header_file(site_config),
distill_in_header_file(theme))
# before body includes: distill then user
before_body <- c(front_matter_before_body(metadata),
navigation_before_body_file(dirname(input_file), site_config),
site_before_body_file(site_config),
metadata_includes$before_body,
listing$html)
# after body includes: user then distill
after_body <- c(metadata_includes$after_body,
site_after_body_file(site_config),
appendices_after_body_file(input_file, site_config, metadata),
navigation_after_body_file(dirname(input_file), site_config))
# populate args
args <- c(args, pandoc_include_args(
in_header = in_header,
before_body = before_body,
after_body = after_body
))
# return args
args
}
pre_processor <- function(yaml_front_matter, utf8_input, runtime, knit_meta,
files_dir, output_dir, ...) {
pandoc_include_args(in_header = c(site_in_header_file(site_config),
metadata_includes$in_header))
}
on_exit <- function() {
validate_rstudio_version()
}
# return format
output_format(
knitr = knitr_options,
pandoc = pandoc_options(to = "html5",
from = from_rmarkdown(fig_caption, md_extensions),
args = args),
keep_md = keep_md,
clean_supporting = self_contained,
post_knit = post_knit,
pre_processor = pre_processor,
post_processor = distill_article_post_processor(function() encoding, self_contained),
on_exit = on_exit,
base_format = html_document_base(
smart = smart,
self_contained = self_contained,
lib_dir = lib_dir,
mathjax = mathjax,
template = "default",
pandoc_args = pandoc_args,
bootstrap_compatible = FALSE,
extra_dependencies = extra_dependencies,
...
)
)
}
distill_highlighting_args <- function(highlight) {
# The default highlighting is a custom pandoc theme based on
# https://github.com/ericwbailey/a11y-syntax-highlighting
# It's in a JSON theme file as described here:
#
# https://pandoc.org/MANUAL.html#syntax-highlighting
#
# To create the theme we started with pandoc --print-highlight-style haddock
# (since that was the closest pandoc them to textmate) then made
# the following changes to create the RStudio textmate version:
#
# https://github.com/rstudio/distill/compare/02b241083b8ca5cda90954c6c37e9f11bf830b2c...13fb0f6b34e9d04df0bd24a02980e29105a8f68d#diff-f088084fe658ee281215b486b2f18dab
#
# all available pandoc highlighting tokens are enumerated here:
#
# https://github.com/jgm/skylighting/blob/a1d02a0db6260c73aaf04aae2e6e18b569caacdc/skylighting-core/src/Skylighting/Format/HTML.hs#L117-L147
#
default <- distill_resource("arrow.theme")
# if it's "rstudio", then use an embedded theme file
if (identical(highlight, "rstudio")) {
highlight <- distill_resource("rstudio.theme")
}
rmarkdown::pandoc_highlight_args(highlight, default)
}
knitr_preview_hook <- function(options) {
if (isTRUE(options$preview))
options$out.extra <- c(options$out.extra, "data-distill-preview=1")
options
}
knit_hooks <- function(downlit) {
# capture the default chunk and source hooks
previous_hooks <- knitr::knit_hooks$get()
on.exit(knitr::knit_hooks$restore(previous_hooks), add = TRUE)
knitr::render_markdown()
default_chunk_hook <- knitr::knit_hooks$get("chunk")
default_source_hook <- knitr::knit_hooks$get("source")
# apply chunk hook
hooks <- list(
chunk = function(x, options) {
# apply default layout
if (is.null(options$layout))
options$layout <- "l-body"
# apply default hook and determine padding
output <- default_chunk_hook(x, options)
pad_chars <- nchar(output) - nchar(sub("^ +", "", output))
padding <- paste(rep(' ', pad_chars), collapse = '')
# enclose default output in div (with appropriate padding)
paste0(
padding, '<div class="layout-chunk" data-layout="', options$layout, '">\n',
output, '\n',
padding, '\n',
padding, '</div>\n'
)
}
)
# source hook to do downlit processing and code_folding
hooks$source <- function(x, options) {
code_folding <- not_null(options[["code_folding"]], FALSE)
if (downlit && options$engine == "R") {
code <- highlight(paste0(x, "\n", collapse = ""),
classes_pandoc(),
pre_class = NULL)
if (is.na(code)) {
x <- default_source_hook(x, options)
} else {
x <- paste0("<div class=\"sourceCode\">",
"<pre class=\"sourceCode r\">",
"<code class=\"sourceCode r\">",
code,
"</code></pre></div>")
x <- paste0(x, "\n")
}
} else {
x <- default_source_hook(x, options)
}
if (!identical(code_folding, FALSE)) {
if (identical(code_folding, TRUE)) {
code_folding <- "Show code"
} else {
code_folding <- as.character(code_folding)
}
x <- paste0("<details>\n<summary>", code_folding ,"</summary>\n", x, "\n</details>")
}
x
}
if (downlit) {
# document hook to inject a fake empty code block a the end of the
# document (to force pandoc to including highlighting cssm which it
# might not do if all chunks are handled by downlit)
hooks$document <- function(x, options) {
c(x, "```{.r .distill-force-highlighting-css}", "```")
}
}
# return hooks
hooks
}
validate_pandoc_version <- function() {
if (!pandoc_available("2.0")) {
msg <-
if (!is.null(rstudio_version())) {
msg <- paste("Distill requires RStudio v1.2 or greater.",
"Please update at:",
"https://www.rstudio.com/rstudio/download/preview/")
} else {
msg <- paste("Distill requires Pandoc v2.0 or greater",
"Please update at:",
"https://github.com/jgm/pandoc/releases/latest")
}
stop(msg, call. = FALSE)
}
}
distill_in_header <- function(theme = NULL) {
doRenderTags(distill_in_header_html(theme))
}
distill_in_header_file <- function(theme = NULL) {
html_file(distill_in_header_html(theme))
}
distill_in_header_html <- function(theme = NULL) {
distill_html <- html_from_file(
system.file("rmarkdown/templates/distill_article/resources/distill.html",
package = "distill")
)
theme_html <- theme_in_header_html(theme)
placeholder_html("distill", distill_html, theme_html)
}
theme_in_header_html <- function(theme) {
if (!is.null(theme)) {
tagList(
includeCSS(distill_resource("base-variables.css")),
includeCSS(theme),
includeCSS(distill_resource("base-style.css"))
)
} else {
NULL
}
}
theme_in_header_file <- function(theme) {
html_file(theme_in_header_html(theme))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prediction_objects.R
\name{List}
\alias{List}
\title{List Object}
\usage{
List(items = NULL, nextPageToken = NULL, selfLink = NULL)
}
\arguments{
\item{items}{List of models}
\item{nextPageToken}{Pagination token to fetch the next page, if one exists}
\item{selfLink}{A URL to re-request this resource}
}
\value{
List object
}
\description{
List Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
|
/googlepredictionv16.auto/man/List.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 531
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prediction_objects.R
\name{List}
\alias{List}
\title{List Object}
\usage{
List(items = NULL, nextPageToken = NULL, selfLink = NULL)
}
\arguments{
\item{items}{List of models}
\item{nextPageToken}{Pagination token to fetch the next page, if one exists}
\item{selfLink}{A URL to re-request this resource}
}
\value{
List object
}
\description{
List Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
|
##################################################################################
## ADDS "LAND BEAR" STATUS TO ALL.V2 DATABASE ############################
## ############################
##################################################################################
# CREATES TABLE FOR REPRODUCTIVE STATUS
rm(list = ls())
library(dplyr)
load('all_v2.RData')
load('land_bears_ows.RData')
ows$repro <- 0 # 242 bears in ows
ows$land_bear <- ifelse(ows$id %in% lb$id, 1, 0)
test <- subset(ows, land_bear == 1); unique(test$id) # 23 land bears
repro <- ows %>% # 242 bears in repro
mutate(repro = replace(repro, DenYr == 1, 1)) %>%
mutate(repro = replace(repro, coy == 1, 2)) %>%
mutate(repro = replace(repro, yearling == 1, 3)) %>%
dplyr::select(id, land_bear, repro)
repro$repro <- factor(repro$repro, labels = c("Unknown", "Denning", "COY", "Yearling"))
repro1 <- repro %>%
group_by(id) %>%
slice_head()
sub.land.repro1 <- subset(repro1, land_bear == 1) # 23 land bears
table(repro1$land_bear_ows, repro1$repro) # Not correct numbers
repro1 <- repro1 %>%
mutate(bearType = if_else(land_bear == 1, "land", "ice"))
table(repro1$bearType, repro1$repro)
#save(all.v2, file = "all_v2.RData")
####
|
/add_land_bear_to_allv2_and_repro_table.R
|
no_license
|
anniekellner/ch1_landing
|
R
| false
| false
| 1,247
|
r
|
##################################################################################
## ADDS "LAND BEAR" STATUS TO ALL.V2 DATABASE ############################
## ############################
##################################################################################
# CREATES TABLE FOR REPRODUCTIVE STATUS
rm(list = ls())
library(dplyr)
load('all_v2.RData')
load('land_bears_ows.RData')
ows$repro <- 0 # 242 bears in ows
ows$land_bear <- ifelse(ows$id %in% lb$id, 1, 0)
test <- subset(ows, land_bear == 1); unique(test$id) # 23 land bears
repro <- ows %>% # 242 bears in repro
mutate(repro = replace(repro, DenYr == 1, 1)) %>%
mutate(repro = replace(repro, coy == 1, 2)) %>%
mutate(repro = replace(repro, yearling == 1, 3)) %>%
dplyr::select(id, land_bear, repro)
repro$repro <- factor(repro$repro, labels = c("Unknown", "Denning", "COY", "Yearling"))
repro1 <- repro %>%
group_by(id) %>%
slice_head()
sub.land.repro1 <- subset(repro1, land_bear == 1) # 23 land bears
table(repro1$land_bear_ows, repro1$repro) # Not correct numbers
repro1 <- repro1 %>%
mutate(bearType = if_else(land_bear == 1, "land", "ice"))
table(repro1$bearType, repro1$repro)
#save(all.v2, file = "all_v2.RData")
####
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SegmentedHypTestEngine.R
\name{run_scenario.SegmentedHypTestEngine}
\alias{run_scenario.SegmentedHypTestEngine}
\title{run_scenario.SegmentedHypTestEngine}
\usage{
\method{run_scenario}{SegmentedHypTestEngine}(
segmented_hyp_test_engine,
segmented_researcher,
stat_procedure,
effects_list
)
}
\arguments{
\item{segmented_hyp_test_engine}{A SegmentedHypTestEngine instance for method
dispatch}
\item{segmented_researcher}{Correctly initialised instance of
SegmentedResearcher}
\item{stat_procedure}{Correctly initialised instance of any class descended
from StatProcedureBase, e.g. OneSampleT, PearsonR, etc.}
\item{effects_list}{A list of TrueEffect instances. This must be a list, not
simply a vector. Cast with list() if necessary. Each TrueEffect instance
holds fields effect_size and effect_size_probability. A TrueEffect with
effect_size 0 (i.e. H0 is true) may be included. Probabilities across all effect
sizes must sum to 1 or an exception is thrown.}
}
\value{
A SegmentedHypTestResult instance which holds the average expected outcomes
across all effect sizes, weighted by associated probability.
}
\description{
\code{run_scenario.SegmentedHypTestEngine} numerically determines expected
outcomes for a complete study scenario. It wraps various internal methods of
class SegmentedHypTestEngine (developers can cf. source code).
}
|
/R/segHT v1.4.0/man/run_scenario.SegmentedHypTestEngine.Rd
|
no_license
|
milleratotago/Independent_Segments_R
|
R
| false
| true
| 1,429
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SegmentedHypTestEngine.R
\name{run_scenario.SegmentedHypTestEngine}
\alias{run_scenario.SegmentedHypTestEngine}
\title{run_scenario.SegmentedHypTestEngine}
\usage{
\method{run_scenario}{SegmentedHypTestEngine}(
segmented_hyp_test_engine,
segmented_researcher,
stat_procedure,
effects_list
)
}
\arguments{
\item{segmented_hyp_test_engine}{A SegmentedHypTestEngine instance for method
dispatch}
\item{segmented_researcher}{Correctly initialised instance of
SegmentedResearcher}
\item{stat_procedure}{Correctly initialised instance of any class descended
from StatProcedureBase, e.g. OneSampleT, PearsonR, etc.}
\item{effects_list}{A list of TrueEffect instances. This must be a list, not
simply a vector. Cast with list() if necessary. Each TrueEffect instance
holds fields effect_size and effect_size_probability. A TrueEffect with
effect_size 0 (i.e. H0 is true) may be included. Probabilities across all effect
sizes must sum to 1 or an exception is thrown.}
}
\value{
A SegmentedHypTestResult instance which holds the average expected outcomes
across all effect sizes, weighted by associated probability.
}
\description{
\code{run_scenario.SegmentedHypTestEngine} numerically determines expected
outcomes for a complete study scenario. It wraps various internal methods of
class SegmentedHypTestEngine (developers can cf. source code).
}
|
parentalSplit <- function(x,expressionChildren,expressionParental,verbose=FALSE, debugMode=0){
if(verbose && debugMode==1)if(x%%100==0)cat("parentalSplit starting withour errors in checkpoint for row:",x,"\n")
s <- proc.time()
expressionParentalRow <- expressionParental[which(rownames(expressionParental) %in% rownames(expressionChildren)[x]),]
genotypeMatrixRow <- lapply(expressionChildren[x,],parentalSplitSub,expressionParentalRow)
e <- proc.time()
if(verbose && debugMode==2)if(x%%100==0)cat("parentalSplit for row:",x,"done in:",(e-s)[3],"seconds.\n")
invisible(genotypeMatrixRow)
}
parentalSplitSub <- function(expressionChildrenElement,expressionParentalRow){
distance1 <- abs(expressionChildrenElement-expressionParentalRow[1])
distance2 <- abs(expressionChildrenElement-expressionParentalRow[2])
if(distance1<=distance2){
genotypeMatrixElement <- 0
}else{
genotypeMatrixElement <- 1
}
invisible(genotypeMatrixElement)
}
checkGeno <- function(genoMatrix1,genoMatrix2){
genoMatrix1 <- mapMarkers(genoMatrix1,genoMatrix2,mapMode=1)
genoMatrix1 <- mapMarkers(genoMatrix1,genoMatrix2,mapMode=2)
genoMatrix2 <- mapMarkers(genoMatrix2,genoMatrix1,mapMode=1)
genoMatrix2 <- mapMarkers(genoMatrix2,genoMatrix1,mapMode=2)
print(dim(genoMatrix2))
print(dim(genoMatrix1))
correct <- sum(as.numeric(genoMatrix1)==as.numeric(genoMatrix2))/length(genoMatrix1)
cat("Matrices are the same in:",correct,"%\n")
}
|
/backup/parentalSplit.r
|
no_license
|
KonradZych/GBIC2011
|
R
| false
| false
| 1,431
|
r
|
parentalSplit <- function(x,expressionChildren,expressionParental,verbose=FALSE, debugMode=0){
if(verbose && debugMode==1)if(x%%100==0)cat("parentalSplit starting withour errors in checkpoint for row:",x,"\n")
s <- proc.time()
expressionParentalRow <- expressionParental[which(rownames(expressionParental) %in% rownames(expressionChildren)[x]),]
genotypeMatrixRow <- lapply(expressionChildren[x,],parentalSplitSub,expressionParentalRow)
e <- proc.time()
if(verbose && debugMode==2)if(x%%100==0)cat("parentalSplit for row:",x,"done in:",(e-s)[3],"seconds.\n")
invisible(genotypeMatrixRow)
}
parentalSplitSub <- function(expressionChildrenElement,expressionParentalRow){
distance1 <- abs(expressionChildrenElement-expressionParentalRow[1])
distance2 <- abs(expressionChildrenElement-expressionParentalRow[2])
if(distance1<=distance2){
genotypeMatrixElement <- 0
}else{
genotypeMatrixElement <- 1
}
invisible(genotypeMatrixElement)
}
checkGeno <- function(genoMatrix1,genoMatrix2){
genoMatrix1 <- mapMarkers(genoMatrix1,genoMatrix2,mapMode=1)
genoMatrix1 <- mapMarkers(genoMatrix1,genoMatrix2,mapMode=2)
genoMatrix2 <- mapMarkers(genoMatrix2,genoMatrix1,mapMode=1)
genoMatrix2 <- mapMarkers(genoMatrix2,genoMatrix1,mapMode=2)
print(dim(genoMatrix2))
print(dim(genoMatrix1))
correct <- sum(as.numeric(genoMatrix1)==as.numeric(genoMatrix2))/length(genoMatrix1)
cat("Matrices are the same in:",correct,"%\n")
}
|
#This program for confirmed and deaths percentages over USA
library(ggplot2)
library(formattable)
prgname = "usa_states_percentages.r"
# # reading the external dataset (covid=19 confirmed and deaths up to 12 June 2020)
cvdata <- read.csv("/home/jayanthikishore/Desktop/SASUniversityEdition/myfolders/COVID-19/USAcovid_results/USA_13july2020_statewise_per.csv")
dim(cvdata)
head(cvdata)
summary(cvdata)
nn <- nrow(cvdata)
nn
# state plot background and COVID-19 pie chart
# # https://eriqande.github.io/rep-res-web/lectures/making-maps-with-R.html
states <- map_data("state")
# dim(states)
# head(states) #print(states)
# tail(states)
png(file="~/Downloads/R_programs/Results/USA_confirmed_deathspercentage_13july2020_piemap1.png", width=800, height=600)
us <- map_data('state')
gg1 <- ggplot(us, aes(long, lat)) +
geom_map(map=us, fill="grey97", color="grey") +
coord_quickmap()+
guides(fill=FALSE)
deathper <- (formattable(cvdata$Death_per, digits = 1, format = "f"))
ded <- (formattable(percent(deathper/100.),digits=2,format="f"))
recvrper <- (formattable(cvdata$Recover_per, digits = 1, format = "f"))
gg1 + geom_point(aes(x=Long_,y=Lat+0.6),data=cvdata,size=1) +
geom_text(data=cvdata,aes(x=Long_,y=Lat,label=paste(deathper)),color="red",size=3,parse=TRUE) +
geom_text(data=cvdata,aes(x=Long_,y=Lat-0.6,label=paste(recvrper)),color="blue",size=3,parse=TRUE) +
annotate("text", label="Death percentage",x=-112.,y=28,size=5,color="red") +
annotate("text", label="Recover percentage",x=-112.,y=27,size=5,color="blue") +
annotate("text", label=prgname,x=-112.,y=23,size=3,color="black") +
labs(title = "Each state COVID-19 cases: Death percentage",
subtitle = "22 Jan 2020 - 13 July 2020",
caption = "Data source: WHO_ICTRP",
FILL = NULL) + labs(size='Deaths') +
theme_bw() +
theme(legend.position = c(0.9,0.08),
legend.justification = c(1,0),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank())
dev.off()
|
/R_prg8.r
|
no_license
|
Kishore1818/R_programs
|
R
| false
| false
| 2,104
|
r
|
#This program for confirmed and deaths percentages over USA
library(ggplot2)
library(formattable)
prgname = "usa_states_percentages.r"
# # reading the external dataset (covid=19 confirmed and deaths up to 12 June 2020)
cvdata <- read.csv("/home/jayanthikishore/Desktop/SASUniversityEdition/myfolders/COVID-19/USAcovid_results/USA_13july2020_statewise_per.csv")
dim(cvdata)
head(cvdata)
summary(cvdata)
nn <- nrow(cvdata)
nn
# state plot background and COVID-19 pie chart
# # https://eriqande.github.io/rep-res-web/lectures/making-maps-with-R.html
states <- map_data("state")
# dim(states)
# head(states) #print(states)
# tail(states)
png(file="~/Downloads/R_programs/Results/USA_confirmed_deathspercentage_13july2020_piemap1.png", width=800, height=600)
us <- map_data('state')
gg1 <- ggplot(us, aes(long, lat)) +
geom_map(map=us, fill="grey97", color="grey") +
coord_quickmap()+
guides(fill=FALSE)
deathper <- (formattable(cvdata$Death_per, digits = 1, format = "f"))
ded <- (formattable(percent(deathper/100.),digits=2,format="f"))
recvrper <- (formattable(cvdata$Recover_per, digits = 1, format = "f"))
gg1 + geom_point(aes(x=Long_,y=Lat+0.6),data=cvdata,size=1) +
geom_text(data=cvdata,aes(x=Long_,y=Lat,label=paste(deathper)),color="red",size=3,parse=TRUE) +
geom_text(data=cvdata,aes(x=Long_,y=Lat-0.6,label=paste(recvrper)),color="blue",size=3,parse=TRUE) +
annotate("text", label="Death percentage",x=-112.,y=28,size=5,color="red") +
annotate("text", label="Recover percentage",x=-112.,y=27,size=5,color="blue") +
annotate("text", label=prgname,x=-112.,y=23,size=3,color="black") +
labs(title = "Each state COVID-19 cases: Death percentage",
subtitle = "22 Jan 2020 - 13 July 2020",
caption = "Data source: WHO_ICTRP",
FILL = NULL) + labs(size='Deaths') +
theme_bw() +
theme(legend.position = c(0.9,0.08),
legend.justification = c(1,0),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank())
dev.off()
|
# Sam Welch
# A simple script to determine the possible 1,2,4 and 8 combinations of 8 stressors across 4 categories.
# 7th May 2018
setwd("C:/Users/Sam Welch/Google Drive/ICL Ecological Applications/Project/Scripts")
stressors <- c("Chloramphenicol", "Amoxycillin", "Atrazine", "Metaldehyde", "Copper", "Cadmium", "Benzo[a]pyrene", "Benzene")
mixture_one <- combn(stressors, 1)
mixture_two <- combn(stressors, 2)
mixture_four <- combn(stressors, 4)
mixture_eight <- combn(stressors, 8)
mixture_four
write.csv(mixture_two, file = "Results/mixture_two.csv", row.names = FALSE, col.names = FALSE)
write.csv(mixture_four, file = "Results/mixture_four.csv", row.names = FALSE, col.names = FALSE)
write.csv(mixture_eight, file = "Results/mixture_eight.csv", row.names = FALSE, col.names = FALSE)
|
/Scripts/Code/Archive/Combination_Calculator.R
|
no_license
|
samawelch/MScProject
|
R
| false
| false
| 795
|
r
|
# Sam Welch
# A simple script to determine the possible 1,2,4 and 8 combinations of 8 stressors across 4 categories.
# 7th May 2018
setwd("C:/Users/Sam Welch/Google Drive/ICL Ecological Applications/Project/Scripts")
stressors <- c("Chloramphenicol", "Amoxycillin", "Atrazine", "Metaldehyde", "Copper", "Cadmium", "Benzo[a]pyrene", "Benzene")
mixture_one <- combn(stressors, 1)
mixture_two <- combn(stressors, 2)
mixture_four <- combn(stressors, 4)
mixture_eight <- combn(stressors, 8)
mixture_four
write.csv(mixture_two, file = "Results/mixture_two.csv", row.names = FALSE, col.names = FALSE)
write.csv(mixture_four, file = "Results/mixture_four.csv", row.names = FALSE, col.names = FALSE)
write.csv(mixture_eight, file = "Results/mixture_eight.csv", row.names = FALSE, col.names = FALSE)
|
\name{YearEntrez-methods}
\docType{methods}
\alias{YearEntrez-methods}
\alias{YearEntrez,Medline-method}
\title{ ~~ Methods for Function \code{YearEntrez} in Package \pkg{RISmed} ~~}
\description{
~~ Methods for function \code{YearEntrez} in package \pkg{RISmed} ~~
}
\section{Methods}{
\describe{
\item{\code{signature(object = "Medline")}}{
%% ~~describe this method here~~
}
}}
\keyword{methods}
|
/man/YearEntrez-methods.Rd
|
no_license
|
mayunlong89/RISmed
|
R
| false
| false
| 403
|
rd
|
\name{YearEntrez-methods}
\docType{methods}
\alias{YearEntrez-methods}
\alias{YearEntrez,Medline-method}
\title{ ~~ Methods for Function \code{YearEntrez} in Package \pkg{RISmed} ~~}
\description{
~~ Methods for function \code{YearEntrez} in package \pkg{RISmed} ~~
}
\section{Methods}{
\describe{
\item{\code{signature(object = "Medline")}}{
%% ~~describe this method here~~
}
}}
\keyword{methods}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iterators.R
\name{igraph-vs-indexing2}
\alias{igraph-vs-indexing2}
\alias{[[.igraph.vs}
\title{Select vertices and show their metadata}
\usage{
\method{[[}{igraph.vs}(x, ...)
}
\arguments{
\item{x}{A vertex sequence.}
\item{...}{Additional arguments, passed to \code{[}.}
}
\value{
The double bracket operator returns another vertex sequence,
with meta-data (attribute) printing turned on. See details below.
}
\description{
The double bracket operator can be used on vertex sequences, to print
the meta-data (vertex attributes) of the vertices in the sequence.
}
\details{
Technically, when used with vertex sequences, the double bracket
operator does exactly the same as the single bracket operator,
but the resulting vertex sequence is printed differently: all
attributes of the vertices in the sequence are printed as well.
See \code{\link{[.igraph.vs}} for more about indexing vertex sequences.
}
\examples{
g <- make_ring(10) \%>\%
set_vertex_attr("color", value = "red") \%>\%
set_vertex_attr("name", value = LETTERS[1:10])
V(g)
V(g)[[]]
V(g)[1:5]
V(g)[[1:5]]
}
\seealso{
Other vertex and edge sequences: \code{\link{E}},
\code{\link{V}}, \code{\link{igraph-es-attributes}},
\code{\link{igraph-es-indexing2}},
\code{\link{igraph-es-indexing}},
\code{\link{igraph-vs-attributes}},
\code{\link{igraph-vs-indexing}},
\code{\link{print.igraph.es}},
\code{\link{print.igraph.vs}}
Other vertex and edge sequence operations: \code{\link{c.igraph.es}},
\code{\link{c.igraph.vs}},
\code{\link{difference.igraph.es}},
\code{\link{difference.igraph.vs}},
\code{\link{igraph-es-indexing2}},
\code{\link{igraph-es-indexing}},
\code{\link{igraph-vs-indexing}},
\code{\link{intersection.igraph.es}},
\code{\link{intersection.igraph.vs}},
\code{\link{rev.igraph.es}}, \code{\link{rev.igraph.vs}},
\code{\link{union.igraph.es}},
\code{\link{union.igraph.vs}},
\code{\link{unique.igraph.es}},
\code{\link{unique.igraph.vs}}
}
\concept{vertex and edge sequence operations}
\concept{vertex and edge sequences}
|
/man/igraph-vs-indexing2.Rd
|
no_license
|
andresrabinovich/igraph
|
R
| false
| true
| 2,124
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iterators.R
\name{igraph-vs-indexing2}
\alias{igraph-vs-indexing2}
\alias{[[.igraph.vs}
\title{Select vertices and show their metadata}
\usage{
\method{[[}{igraph.vs}(x, ...)
}
\arguments{
\item{x}{A vertex sequence.}
\item{...}{Additional arguments, passed to \code{[}.}
}
\value{
The double bracket operator returns another vertex sequence,
with meta-data (attribute) printing turned on. See details below.
}
\description{
The double bracket operator can be used on vertex sequences, to print
the meta-data (vertex attributes) of the vertices in the sequence.
}
\details{
Technically, when used with vertex sequences, the double bracket
operator does exactly the same as the single bracket operator,
but the resulting vertex sequence is printed differently: all
attributes of the vertices in the sequence are printed as well.
See \code{\link{[.igraph.vs}} for more about indexing vertex sequences.
}
\examples{
g <- make_ring(10) \%>\%
set_vertex_attr("color", value = "red") \%>\%
set_vertex_attr("name", value = LETTERS[1:10])
V(g)
V(g)[[]]
V(g)[1:5]
V(g)[[1:5]]
}
\seealso{
Other vertex and edge sequences: \code{\link{E}},
\code{\link{V}}, \code{\link{igraph-es-attributes}},
\code{\link{igraph-es-indexing2}},
\code{\link{igraph-es-indexing}},
\code{\link{igraph-vs-attributes}},
\code{\link{igraph-vs-indexing}},
\code{\link{print.igraph.es}},
\code{\link{print.igraph.vs}}
Other vertex and edge sequence operations: \code{\link{c.igraph.es}},
\code{\link{c.igraph.vs}},
\code{\link{difference.igraph.es}},
\code{\link{difference.igraph.vs}},
\code{\link{igraph-es-indexing2}},
\code{\link{igraph-es-indexing}},
\code{\link{igraph-vs-indexing}},
\code{\link{intersection.igraph.es}},
\code{\link{intersection.igraph.vs}},
\code{\link{rev.igraph.es}}, \code{\link{rev.igraph.vs}},
\code{\link{union.igraph.es}},
\code{\link{union.igraph.vs}},
\code{\link{unique.igraph.es}},
\code{\link{unique.igraph.vs}}
}
\concept{vertex and edge sequence operations}
\concept{vertex and edge sequences}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit.R
\name{ssd_fit_dists}
\alias{ssd_fit_dists}
\title{Fit Distributions}
\usage{
ssd_fit_dists(data, left = "Conc", right = left, weight = NULL,
dists = c("lnorm", "llog", "gompertz", "lgumbel", "gamma", "weibull"),
silent = FALSE)
}
\arguments{
\item{data}{A data frame.}
\item{left}{A string of the column in data with the left concentration values.}
\item{right}{A string of the column in data with the right concentration values.}
\item{weight}{A string of the column in data with the weightings (or NULL)}
\item{dists}{A character vector of the distributions to fit.}
\item{silent}{A flag indicating whether fits should fail silently.}
}
\value{
An object of class fitdists (a list of \code{\link[fitdistrplus]{fitdist}} objects).
}
\description{
Fits one or more distributions to species sensitivity data.
}
\details{
By default the 'lnorm', 'llog', 'gompertz', 'lgumbel', 'gamma' and 'weibull'
distributions are fitted to the data.
The ssd_fit_dists function has also been
tested with the 'pareto' distribution.
If weight specifies a column in the data frame with positive integers,
weighted estimation occurs.
However, currently only the resultant parameter estimates are available (via coef).
If the `right` argument is different to the `left` argument then the data are considered to be censored.
It may be possible to use artificial censoring to improve the estimates in the extreme tails
(Liu et al 2018).
}
\examples{
ssd_fit_dists(boron_data)
data(fluazinam, package = "fitdistrplus")
ssd_fit_dists(fluazinam, left = "left", right = "right")
}
\references{
Liu, Y., Salibián-Barrera, M., Zamar, R.H., and Zidek, J.V. 2018. Using artificial censoring to improve extreme tail quantile estimates. Journal of the Royal Statistical Society: Series C (Applied Statistics).
}
|
/man/ssd_fit_dists.Rd
|
permissive
|
flor14/ssdtools
|
R
| false
| true
| 1,875
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit.R
\name{ssd_fit_dists}
\alias{ssd_fit_dists}
\title{Fit Distributions}
\usage{
ssd_fit_dists(data, left = "Conc", right = left, weight = NULL,
dists = c("lnorm", "llog", "gompertz", "lgumbel", "gamma", "weibull"),
silent = FALSE)
}
\arguments{
\item{data}{A data frame.}
\item{left}{A string of the column in data with the left concentration values.}
\item{right}{A string of the column in data with the right concentration values.}
\item{weight}{A string of the column in data with the weightings (or NULL)}
\item{dists}{A character vector of the distributions to fit.}
\item{silent}{A flag indicating whether fits should fail silently.}
}
\value{
An object of class fitdists (a list of \code{\link[fitdistrplus]{fitdist}} objects).
}
\description{
Fits one or more distributions to species sensitivity data.
}
\details{
By default the 'lnorm', 'llog', 'gompertz', 'lgumbel', 'gamma' and 'weibull'
distributions are fitted to the data.
The ssd_fit_dists function has also been
tested with the 'pareto' distribution.
If weight specifies a column in the data frame with positive integers,
weighted estimation occurs.
However, currently only the resultant parameter estimates are available (via coef).
If the `right` argument is different to the `left` argument then the data are considered to be censored.
It may be possible to use artificial censoring to improve the estimates in the extreme tails
(Liu et al 2018).
}
\examples{
ssd_fit_dists(boron_data)
data(fluazinam, package = "fitdistrplus")
ssd_fit_dists(fluazinam, left = "left", right = "right")
}
\references{
Liu, Y., Salibián-Barrera, M., Zamar, R.H., and Zidek, J.V. 2018. Using artificial censoring to improve extreme tail quantile estimates. Journal of the Royal Statistical Society: Series C (Applied Statistics).
}
|
# Script to compile the RegMap data.
#
# Before running this script, download the RegMap data from
# bergelson.uchicago.edu/wp-content/uploads/2015/04/call_method_75.tar.gz
# and extract the file call_method_75_TAIR9.csv into the "data" directory.
#
library(methods)
library(data.table)
source("functions.R")
# LOAD SAMPLE INFO
# ----------------
# Retrieve the following sample info: array id, ecotype id, genotyping
# intensity, geographic co-ordinates (latitude and longitude), region
# label, and country label. This should produce a data frame with
# 1,307 rows (samples) and 6 columns.
cat("Reading sample info.\n")
regmap.info <- read.regmap.info("../data/call_method_75_info.tsv")
# LOAD PHENOTYPE DATA
# -------------------
# This should produce a data frame, "regmap.pheno" , with 948 rows
# (samples) and 48 columns (phenotypes).
cat("Reading phenotype data.\n")
regmap.pheno <- read.regmap.pheno("../data/allvars948_notnormd_011311.txt")
# LOAD GENOTYPE DATA
# ------------------
# Load the genotype data and information about the genetic markers.
cat("Reading genotype data.\n")
out <- read.regmap.geno("../data/call_method_75_TAIR9.csv")
regmap.markers <- out$markers
regmap.geno <- out$geno
rm(out)
# Convert the genotype data to a binary matrix.
cat("Converting genotype data to a binary matrix.\n")
regmap.geno <- regmap.geno.as.binary(regmap.geno)
# Reorder the rows of regmap.info so that they match up with the rows
# of regmap.geno.
rows <- match(rownames(regmap.geno),rownames(regmap.info))
regmap.info <- regmap.info[rows,]
# SUMMARIZE DATA
# --------------
cat("regmap.info:",paste(dim(regmap.info),collapse = " x "),"\n")
cat("regmap.pheno:",paste(dim(regmap.pheno),collapse = " x "),"\n")
cat("regmap.markers:",paste(dim(regmap.markers),collapse = " x "),"\n")
cat("regmap.geno:",paste(dim(regmap.geno),collapse = " x "),"\n")
# SAVE DATA TO FILE
# -----------------
save(list = c("regmap.info","regmap.pheno","regmap.markers","regmap.geno"),
file = "regmap.RData")
|
/code/compile.regmap.R
|
no_license
|
argdata/R-large-scale
|
R
| false
| false
| 2,017
|
r
|
# Script to compile the RegMap data.
#
# Before running this script, download the RegMap data from
# bergelson.uchicago.edu/wp-content/uploads/2015/04/call_method_75.tar.gz
# and extract the file call_method_75_TAIR9.csv into the "data" directory.
#
library(methods)
library(data.table)
source("functions.R")
# LOAD SAMPLE INFO
# ----------------
# Retrieve the following sample info: array id, ecotype id, genotyping
# intensity, geographic co-ordinates (latitude and longitude), region
# label, and country label. This should produce a data frame with
# 1,307 rows (samples) and 6 columns.
cat("Reading sample info.\n")
regmap.info <- read.regmap.info("../data/call_method_75_info.tsv")
# LOAD PHENOTYPE DATA
# -------------------
# This should produce a data frame, "regmap.pheno" , with 948 rows
# (samples) and 48 columns (phenotypes).
cat("Reading phenotype data.\n")
regmap.pheno <- read.regmap.pheno("../data/allvars948_notnormd_011311.txt")
# LOAD GENOTYPE DATA
# ------------------
# Load the genotype data and information about the genetic markers.
cat("Reading genotype data.\n")
out <- read.regmap.geno("../data/call_method_75_TAIR9.csv")
regmap.markers <- out$markers
regmap.geno <- out$geno
rm(out)
# Convert the genotype data to a binary matrix.
cat("Converting genotype data to a binary matrix.\n")
regmap.geno <- regmap.geno.as.binary(regmap.geno)
# Reorder the rows of regmap.info so that they match up with the rows
# of regmap.geno.
rows <- match(rownames(regmap.geno),rownames(regmap.info))
regmap.info <- regmap.info[rows,]
# SUMMARIZE DATA
# --------------
cat("regmap.info:",paste(dim(regmap.info),collapse = " x "),"\n")
cat("regmap.pheno:",paste(dim(regmap.pheno),collapse = " x "),"\n")
cat("regmap.markers:",paste(dim(regmap.markers),collapse = " x "),"\n")
cat("regmap.geno:",paste(dim(regmap.geno),collapse = " x "),"\n")
# SAVE DATA TO FILE
# -----------------
save(list = c("regmap.info","regmap.pheno","regmap.markers","regmap.geno"),
file = "regmap.RData")
|
# 03-layout.R
library(shiny)
ui <- fluidPage(
fluidRow(
column(3),
column(5, sliderInput(inputId = "num",
label = "Choose a number",
value = 25, min = 1, max = 100))
),
fluidRow(
column(4, offset = 8,
plotOutput("hist")
)
)
)
server <- function(input, output) {
output$hist <- renderPlot({
hist(rnorm(input$num))
})
}
shinyApp(ui = ui, server = server)
|
/ch04-shiny/shiny.rstudio.com-tutorial/part-3-code/03-layout.R
|
no_license
|
spylu/sta141b-notes
|
R
| false
| false
| 403
|
r
|
# 03-layout.R
library(shiny)
ui <- fluidPage(
fluidRow(
column(3),
column(5, sliderInput(inputId = "num",
label = "Choose a number",
value = 25, min = 1, max = 100))
),
fluidRow(
column(4, offset = 8,
plotOutput("hist")
)
)
)
server <- function(input, output) {
output$hist <- renderPlot({
hist(rnorm(input$num))
})
}
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/components.R
\name{list_group_386}
\alias{list_group_386}
\title{Create a Bootstrap 386 list group container}
\usage{
list_group_386(..., width = 4)
}
\arguments{
\item{...}{Slot for \link{list_group_item_386}.}
\item{width}{List group width. 4 by default. Between 1 and 12.}
}
\description{
Create a Bootstrap 386 list group container
}
\examples{
if(interactive()){
library(shiny)
library(shiny386)
shinyApp(
ui = page_386(
fluidRow(
list_group_386(
list_group_item_386(
type = "basic",
"Cras justo odio"
),
list_group_item_386(
type = "basic",
"Dapibus ac facilisis in"
),
list_group_item_386(
type = "basic",
"Morbi leo risus"
)
),
list_group_386(
list_group_item_386(
"Cras justo odio",
active = TRUE,
disabled = FALSE,
type = "action",
src = "http://www.google.fr"
),
list_group_item_386(
active = FALSE,
disabled = FALSE,
type = "action",
"Dapibus ac facilisis in",
src = "http://www.google.fr"
),
list_group_item_386(
"Morbi leo risus",
active = FALSE,
disabled = TRUE,
type = "action",
src = "http://www.google.fr"
)
),
list_group_386(
list_group_item_386(
"Donec id elit non mi porta gravida at eget metus.
Maecenas sed diam eget risus varius blandit.",
active = TRUE,
disabled = FALSE,
type = "heading",
title = "List group item heading",
subtitle = "3 days ago",
footer = "Donec id elit non mi porta."
),
list_group_item_386(
"Donec id elit non mi porta gravida at eget metus.
Maecenas sed diam eget risus varius blandit.",
active = FALSE,
disabled = FALSE,
type = "heading",
title = "List group item heading",
subtitle = "3 days ago",
footer = "Donec id elit non mi porta."
)
)
)
),
server = function(input, output) {}
)
}
}
\author{
David Granjon, \email{dgranjon@ymail.com}
}
|
/man/list_group_386.Rd
|
permissive
|
bright-spark/shiny386
|
R
| false
| true
| 2,241
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/components.R
\name{list_group_386}
\alias{list_group_386}
\title{Create a Bootstrap 386 list group container}
\usage{
list_group_386(..., width = 4)
}
\arguments{
\item{...}{Slot for \link{list_group_item_386}.}
\item{width}{List group width. 4 by default. Between 1 and 12.}
}
\description{
Create a Bootstrap 386 list group container
}
\examples{
if(interactive()){
library(shiny)
library(shiny386)
shinyApp(
ui = page_386(
fluidRow(
list_group_386(
list_group_item_386(
type = "basic",
"Cras justo odio"
),
list_group_item_386(
type = "basic",
"Dapibus ac facilisis in"
),
list_group_item_386(
type = "basic",
"Morbi leo risus"
)
),
list_group_386(
list_group_item_386(
"Cras justo odio",
active = TRUE,
disabled = FALSE,
type = "action",
src = "http://www.google.fr"
),
list_group_item_386(
active = FALSE,
disabled = FALSE,
type = "action",
"Dapibus ac facilisis in",
src = "http://www.google.fr"
),
list_group_item_386(
"Morbi leo risus",
active = FALSE,
disabled = TRUE,
type = "action",
src = "http://www.google.fr"
)
),
list_group_386(
list_group_item_386(
"Donec id elit non mi porta gravida at eget metus.
Maecenas sed diam eget risus varius blandit.",
active = TRUE,
disabled = FALSE,
type = "heading",
title = "List group item heading",
subtitle = "3 days ago",
footer = "Donec id elit non mi porta."
),
list_group_item_386(
"Donec id elit non mi porta gravida at eget metus.
Maecenas sed diam eget risus varius blandit.",
active = FALSE,
disabled = FALSE,
type = "heading",
title = "List group item heading",
subtitle = "3 days ago",
footer = "Donec id elit non mi porta."
)
)
)
),
server = function(input, output) {}
)
}
}
\author{
David Granjon, \email{dgranjon@ymail.com}
}
|
# @file OhdsiRTools.R
#
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of OhdsiRTools
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @keywords internal
"_PACKAGE"
#' @importFrom methods getFunction is
#' @importFrom utils capture.output help memory.limit setTxtProgressBar txtProgressBar
#' installed.packages packageDescription sessionInfo write.csv read.csv install.packages menu
#' @importFrom stats aggregate
#' @importFrom httr GET POST content set_config config add_headers
#' @import openxlsx
NULL
|
/R/OhdsiRTools.R
|
permissive
|
rachita-c/OhdsiRTools
|
R
| false
| false
| 1,052
|
r
|
# @file OhdsiRTools.R
#
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of OhdsiRTools
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @keywords internal
"_PACKAGE"
#' @importFrom methods getFunction is
#' @importFrom utils capture.output help memory.limit setTxtProgressBar txtProgressBar
#' installed.packages packageDescription sessionInfo write.csv read.csv install.packages menu
#' @importFrom stats aggregate
#' @importFrom httr GET POST content set_config config add_headers
#' @import openxlsx
NULL
|
# format-funs.R
#' Format a numeric vector as currency
#'
#' @export
currency <- function(x, digits = 2) {
signif(x, digits = digits) %>% {
scales::dollar_format()(.)
} %>%
str_replace("\\$-", "-$")
}
#' Format a numeric vector as millions of dollars
#'
#' @export
Mdollar <- function(x, digits = 2) {
signif(x / 10^6, digits = digits) %>% {
scales::dollar_format()(.)
} %>%
format(justify = "right") %>%
str_replace("\\$-", "-$") #%>%
#str_c(" M")
}
#' Format a numeric vector rounded to sigfigs and apply bigmark
#'
#' @export
rndmark <- function(x, digits) {
signif(x, digits) %>%
format(big.mark = ",")
}
|
/R/format-funs.R
|
permissive
|
USEPA/ONG-NSPS-OOOOa-Policy-Review-Analysis
|
R
| false
| false
| 652
|
r
|
# format-funs.R
#' Format a numeric vector as currency
#'
#' @export
currency <- function(x, digits = 2) {
signif(x, digits = digits) %>% {
scales::dollar_format()(.)
} %>%
str_replace("\\$-", "-$")
}
#' Format a numeric vector as millions of dollars
#'
#' @export
Mdollar <- function(x, digits = 2) {
signif(x / 10^6, digits = digits) %>% {
scales::dollar_format()(.)
} %>%
format(justify = "right") %>%
str_replace("\\$-", "-$") #%>%
#str_c(" M")
}
#' Format a numeric vector rounded to sigfigs and apply bigmark
#'
#' @export
rndmark <- function(x, digits) {
signif(x, digits) %>%
format(big.mark = ",")
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ripal.R
\name{topPasswords}
\alias{topPasswords}
\title{Extract top "n" passwords from a password list}
\usage{
topPasswords(passwords, n = 10)
}
\arguments{
\item{passwords}{a data frame of passwords}
\item{n}{how many most prevalent passwords to include in the resultant data frame (default: 10)}
}
\value{
data frame of count & ratio of top 'n' passwords
}
\description{
Returns the top "n" (i.e. most prevalent) paasswords in the passwords
data frame
}
\examples{
passwords <- readPass("singles.org.txt")
top.20 <- topPasswords(passwords, 20)
}
|
/man/topPasswords.Rd
|
permissive
|
stevecoward/ripal
|
R
| false
| false
| 637
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ripal.R
\name{topPasswords}
\alias{topPasswords}
\title{Extract top "n" passwords from a password list}
\usage{
topPasswords(passwords, n = 10)
}
\arguments{
\item{passwords}{a data frame of passwords}
\item{n}{how many most prevalent passwords to include in the resultant data frame (default: 10)}
}
\value{
data frame of count & ratio of top 'n' passwords
}
\description{
Returns the top "n" (i.e. most prevalent) paasswords in the passwords
data frame
}
\examples{
passwords <- readPass("singles.org.txt")
top.20 <- topPasswords(passwords, 20)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/InfinitySparseMatrix.R
\name{as.InfinitySparseMatrix}
\alias{as.InfinitySparseMatrix}
\title{Convert an object to InfinitySparseMatrix}
\usage{
as.InfinitySparseMatrix(x)
}
\arguments{
\item{x}{An object which can be coerced into InfinitySparseMatrix, typically a matrix.}
}
\value{
An InfinitySparseMatrix
}
\description{
Convert an object to InfinitySparseMatrix
}
|
/man/as.InfinitySparseMatrix.Rd
|
permissive
|
markmfredrickson/optmatch
|
R
| false
| true
| 445
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/InfinitySparseMatrix.R
\name{as.InfinitySparseMatrix}
\alias{as.InfinitySparseMatrix}
\title{Convert an object to InfinitySparseMatrix}
\usage{
as.InfinitySparseMatrix(x)
}
\arguments{
\item{x}{An object which can be coerced into InfinitySparseMatrix, typically a matrix.}
}
\value{
An InfinitySparseMatrix
}
\description{
Convert an object to InfinitySparseMatrix
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selection_module.R
\name{selection_module}
\alias{selection_module}
\title{Feature Selection Module}
\usage{
selection_module(CBRMSR, method = c("BIRF", "rknn"))
}
\arguments{
\item{CBRMSR}{A CBRMSR object}
\item{method}{The method for feature selection. Options are BIRF (Balanced Iterative Random Forest) and rknn (random KNN)}
}
\description{
Feature Selection Module
}
\examples{
\dontrun{
# Feature selection with Balanced Iterative Random Forest
CBRMSR <- selection_module(CBRMSR, method = "BIRF")
# Feature selection with random KNN
CBRMSR <- selection_module(CBRMSR, method = "rknn")
}
}
|
/man/selection_module.Rd
|
no_license
|
bhioswego/CBRMSR
|
R
| false
| true
| 675
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selection_module.R
\name{selection_module}
\alias{selection_module}
\title{Feature Selection Module}
\usage{
selection_module(CBRMSR, method = c("BIRF", "rknn"))
}
\arguments{
\item{CBRMSR}{A CBRMSR object}
\item{method}{The method for feature selection. Options are BIRF (Balanced Iterative Random Forest) and rknn (random KNN)}
}
\description{
Feature Selection Module
}
\examples{
\dontrun{
# Feature selection with Balanced Iterative Random Forest
CBRMSR <- selection_module(CBRMSR, method = "BIRF")
# Feature selection with random KNN
CBRMSR <- selection_module(CBRMSR, method = "rknn")
}
}
|
\name{geneSim}
\alias{geneSim}
\title{Semantic Similarity Between two Genes}
\usage{
geneSim(gene1, gene2, ont = "MF", organism = "human",
measure = "Wang", drop = "IEA", combine = "BMA")
}
\arguments{
\item{gene1}{Entrez gene id.}
\item{gene2}{Another entrez gene id.}
\item{ont}{One of "MF", "BP", and "CC" subontologies.}
\item{organism}{One of "anopheles", "arabidopsis",
"bovine", "canine", "chicken", "chimp", "coelicolor",
"ecolik12","ecsakai", "fly", "human", "malaria", "mouse",
"pig", "rat","rhesus", "worm", "xenopus", "yeast" and
"zebrafish".}
\item{measure}{One of "Resnik", "Lin", "Rel", "Jiang" and
"Wang" methods.}
\item{drop}{A set of evidence codes based on which
certain annotations are dropped. Use NULL to keep all GO
annotations.}
\item{combine}{One of "max", "average", "rcmax", "BMA"
methods, for combining semantic similarity scores of
multiple GO terms associated with protein or multiple
proteins assiciated with protein cluster.}
}
\value{
list of similarity value and corresponding GO.
}
\description{
Given two genes, this function will calculate the
semantic similarity between them, and return their
semantic similarity and the corresponding GO terms
}
\examples{
geneSim("241", "251", ont="MF", organism="human", measure="Wang")
}
\references{
Yu et al. (2010) GOSemSim: an R package for measuring
semantic similarity among GO terms and gene products
\emph{Bioinformatics} (Oxford, England), 26:7 976--978,
April 2010. ISSN 1367-4803
\url{http://bioinformatics.oxfordjournals.org/cgi/content/abstract/26/7/976}
PMID: 20179076
}
\seealso{
\code{\link{goSim}} \code{\link{mgoSim}}
\code{\link{mgeneSim}} \code{\link{clusterSim}}
\code{\link{mclusterSim}}
}
\keyword{manip}
|
/2X/2.14/GOSemSim/man/geneSim.Rd
|
no_license
|
GuangchuangYu/bioc-release
|
R
| false
| false
| 1,777
|
rd
|
\name{geneSim}
\alias{geneSim}
\title{Semantic Similarity Between two Genes}
\usage{
geneSim(gene1, gene2, ont = "MF", organism = "human",
measure = "Wang", drop = "IEA", combine = "BMA")
}
\arguments{
\item{gene1}{Entrez gene id.}
\item{gene2}{Another entrez gene id.}
\item{ont}{One of "MF", "BP", and "CC" subontologies.}
\item{organism}{One of "anopheles", "arabidopsis",
"bovine", "canine", "chicken", "chimp", "coelicolor",
"ecolik12","ecsakai", "fly", "human", "malaria", "mouse",
"pig", "rat","rhesus", "worm", "xenopus", "yeast" and
"zebrafish".}
\item{measure}{One of "Resnik", "Lin", "Rel", "Jiang" and
"Wang" methods.}
\item{drop}{A set of evidence codes based on which
certain annotations are dropped. Use NULL to keep all GO
annotations.}
\item{combine}{One of "max", "average", "rcmax", "BMA"
methods, for combining semantic similarity scores of
multiple GO terms associated with protein or multiple
proteins assiciated with protein cluster.}
}
\value{
list of similarity value and corresponding GO.
}
\description{
Given two genes, this function will calculate the
semantic similarity between them, and return their
semantic similarity and the corresponding GO terms
}
\examples{
geneSim("241", "251", ont="MF", organism="human", measure="Wang")
}
\references{
Yu et al. (2010) GOSemSim: an R package for measuring
semantic similarity among GO terms and gene products
\emph{Bioinformatics} (Oxford, England), 26:7 976--978,
April 2010. ISSN 1367-4803
\url{http://bioinformatics.oxfordjournals.org/cgi/content/abstract/26/7/976}
PMID: 20179076
}
\seealso{
\code{\link{goSim}} \code{\link{mgoSim}}
\code{\link{mgeneSim}} \code{\link{clusterSim}}
\code{\link{mclusterSim}}
}
\keyword{manip}
|
# Sesion 2
pacman::p_load(tidyverse,tidytext,schrute,tm,ggthemes,pals)
mydata <- schrute::theoffice
mydata
################# Frecuencias
stop_words <- stopwords(kind="en")
other_stop <- c("oh","ok","okay","uh","yeah","hey","well")
#ley de zipf
#las palabras que menos importan son las que más frecuentes son
tokens.office <- mydata %>%
select(season,episode,text,imdb_rating) %>%
unnest_tokens(word,text) %>% #tokenizar (combinacion de carac. única)
filter(!word %in% stop_words) %>%
filter(!word %in% other_stop) %>%
filter(!str_detect(word,"[[:digit:]]")) #expresion regular en PERL
# Crear frecuencias
freq.office <- tokens.office %>%
count(word,season) %>%
arrange(-n)
top.office <- freq.office %>%
group_by(season,word)%>%
summarise(max = max(n)) %>%
top_n(5) %>%
arrange(season,-max)
top.office %>%
arrange(-max) %>%
group_by(season) %>%
top_n(5) %>%
ungroup() %>%
ggplot(aes(x=reorder( word,max),
y=max,
fill = word))+
geom_col(show.legend = FALSE)+
facet_wrap(~season, scales = "free")+
coord_flip()+
labs(title="Palabras más representativas en The Office",
subtitle = "Top 5 por temporada",
y = "Repeticiones",
x = "Palabras",
caption = "Ejercicio de @nerudista para el Curso Virtual de Text Mining de @jmtoral")+
scale_fill_manual(values=as.vector(stepped(22)))+
theme_clean()+
#theme_solarized_2()+
theme(
strip.text.x = element_text(size = 12,
face="bold.italic"),
plot.title = element_text(size = 18),
plot.subtitle = element_text(size = 15),
axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
plot.caption = element_text(face="italic")
)
################# TF-IDF
tfidf.office <- freq.office %>%
bind_tf_idf(word,season,n) #este orden es muy importante. Debe respetarse
################## N-GRAMS
ngramas.office <- mydata %>%
#filter(season,episode,text) %>%
unnest_tokens(bigrama,text,token="ngrams", n=4) %>%
select(season,bigrama)
bind_tf_idf(bigrama,libro,n)
|
/EjerciciosClases/theOffice-bigrams.R
|
no_license
|
nerudista/Curso_Virtual_Text_Mining
|
R
| false
| false
| 2,138
|
r
|
# Sesion 2
pacman::p_load(tidyverse,tidytext,schrute,tm,ggthemes,pals)
mydata <- schrute::theoffice
mydata
################# Frecuencias
stop_words <- stopwords(kind="en")
other_stop <- c("oh","ok","okay","uh","yeah","hey","well")
#ley de zipf
#las palabras que menos importan son las que más frecuentes son
tokens.office <- mydata %>%
select(season,episode,text,imdb_rating) %>%
unnest_tokens(word,text) %>% #tokenizar (combinacion de carac. única)
filter(!word %in% stop_words) %>%
filter(!word %in% other_stop) %>%
filter(!str_detect(word,"[[:digit:]]")) #expresion regular en PERL
# Crear frecuencias
freq.office <- tokens.office %>%
count(word,season) %>%
arrange(-n)
top.office <- freq.office %>%
group_by(season,word)%>%
summarise(max = max(n)) %>%
top_n(5) %>%
arrange(season,-max)
top.office %>%
arrange(-max) %>%
group_by(season) %>%
top_n(5) %>%
ungroup() %>%
ggplot(aes(x=reorder( word,max),
y=max,
fill = word))+
geom_col(show.legend = FALSE)+
facet_wrap(~season, scales = "free")+
coord_flip()+
labs(title="Palabras más representativas en The Office",
subtitle = "Top 5 por temporada",
y = "Repeticiones",
x = "Palabras",
caption = "Ejercicio de @nerudista para el Curso Virtual de Text Mining de @jmtoral")+
scale_fill_manual(values=as.vector(stepped(22)))+
theme_clean()+
#theme_solarized_2()+
theme(
strip.text.x = element_text(size = 12,
face="bold.italic"),
plot.title = element_text(size = 18),
plot.subtitle = element_text(size = 15),
axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
plot.caption = element_text(face="italic")
)
################# TF-IDF
tfidf.office <- freq.office %>%
bind_tf_idf(word,season,n) #este orden es muy importante. Debe respetarse
################## N-GRAMS
ngramas.office <- mydata %>%
#filter(season,episode,text) %>%
unnest_tokens(bigrama,text,token="ngrams", n=4) %>%
select(season,bigrama)
bind_tf_idf(bigrama,libro,n)
|
createHist <- function(){
hist(as.numeric(OOCM[,11]), main = "Frequency of Death Rates after Heart Attack", xlab = "% of Heart Attack Patients who Die", ylab = "Number of Hospitals")
}
|
/2 - R Programming/R Programming Week 4 - Final Project/CreateHist.R
|
no_license
|
sandyjaugust/Data-Science-Specialization-Coursera
|
R
| false
| false
| 190
|
r
|
createHist <- function(){
hist(as.numeric(OOCM[,11]), main = "Frequency of Death Rates after Heart Attack", xlab = "% of Heart Attack Patients who Die", ylab = "Number of Hospitals")
}
|
library(deSolve)
t <- seq(0, 300, by = 0.01)
init <- c(0, 0.5, 0, 0, 1)
inf_info <- data.frame()
# Protein Stuff
# X[1] = SFT
# X[2] = SP
# X[3] = FA
# X[4] = SFT/SP Ratio
# X[5] = Number of leaves
# X[6] = Change in number of leaves
parms_list <- list(
k_1 <- 0.05,
k_2 <- 0.075,
k_3 <- 0.075,
# k_SFT <- 0.1,
# k_SP <- 0.1,
# k_FA <- 0.1,
K_13 <- 0.25,
h_13 <- 2,
beta <- 0.33,
gamma <- 0.2,
delta <- c(0.1, 0.1, 0.1, 0, 0),
mutants <- c(0, 0, 0)
)
parms_list$init <- init
#Protein concentrations of SFT, SP, FA, change in leaves, leaf number
SFT_threshold <- function(SFT_conc) {
flower <- 0
threshold <- 0
if(SFT_conc < 0.75) {
flower <- 0
vegetative <- 1
}
else{
flower <- 1
vegetative <- 0
}
return(c(flower, vegetative))
}
#creates an inflorescence (row) in data frame whenever SFT exceeds the arbitrary threshold 0.3
create_inf <- function(SFT_conc) {
if(SFT_conc > 0.3) {
data <- c(0, "NOT FLOWERED")
inf_info <<- rbind(inf_info, data)
}
}
#not used right now but when the flowering meristem gets modeled (individual flowers), this will tell when a inflorescence meristem stop being a sink
inflorescence_check <- function(inf_data) {
for(i in 1:inf_data[i,]){
if(inf_data[i,1] > 0.3) {
inf_data[i,2] <- "FLOWERED"
}
}
}
tomato_model <- function(t, X, parms=NULL,...) {
with(as.list(parms),{
#state variables
p_13 <- X[1]/(K_13^h_13 + X[1]^h_13)
SFT <- X[1]
SFT <- k_1*X[4] - beta*nrow(inf_info)*SFT #needs some work because it doesn't make physical sense with no cap; proportional
# SFT_meristem <- k_SFT*SFT*(1 - SFT)
SP <- k_2*X[4]*SFT_threshold(SFT)[2]
# SP_meristem <- k_SP*SP*(1 - SP)
FA <- k_3*X[4] + gamma*p_13 - beta*nrow(inf_info)
# FA_cap <- k_FA*FA*(1 - FA)
# flower_ratio <- SFT/SP
create_inf(SFT)
# inflorescence_check(inf_infO)
derivatives <- rep(0, length(X))
derivatives[1:3] <- c(SFT, SP, FA)#, flower_ratio)
derivatives[1:3] <- derivatives[1:3] * (1 - mutants)
derivatives[4] <- X[5]
derivatives <- derivatives - delta*X
return(list(
Derivatives <- derivatives
))
})
}
fit_model = function(parms){
s1 <- ode(y = c(parms$init),
times = t,
func = tomato_model,
parms=parms,
method='lsoda')
# rootfun = root_fun,
# events = list(func = eventsfun,root=T,terminalroot=terminalroot))
return(s1)
}
s1 <- fit_model(parms_list)
cols = c('red','blue','black')#'green','gray')
time_scale <- 1
x=t
x=x*time_scale
plot(NA,NA,xlim = range(x),ylim = c(0,5))#range(s1[,-1]))
for(i in 2:4){
lines(s1[,1]*time_scale,s1[,i],col=cols[i-1])
}
|
/project_marinara/Tomato_Models/tomato_model.R
|
no_license
|
jkhta/Runcie_Lab
|
R
| false
| false
| 3,208
|
r
|
library(deSolve)
t <- seq(0, 300, by = 0.01)
init <- c(0, 0.5, 0, 0, 1)
inf_info <- data.frame()
# Protein Stuff
# X[1] = SFT
# X[2] = SP
# X[3] = FA
# X[4] = SFT/SP Ratio
# X[5] = Number of leaves
# X[6] = Change in number of leaves
parms_list <- list(
k_1 <- 0.05,
k_2 <- 0.075,
k_3 <- 0.075,
# k_SFT <- 0.1,
# k_SP <- 0.1,
# k_FA <- 0.1,
K_13 <- 0.25,
h_13 <- 2,
beta <- 0.33,
gamma <- 0.2,
delta <- c(0.1, 0.1, 0.1, 0, 0),
mutants <- c(0, 0, 0)
)
parms_list$init <- init
#Protein concentrations of SFT, SP, FA, change in leaves, leaf number
SFT_threshold <- function(SFT_conc) {
flower <- 0
threshold <- 0
if(SFT_conc < 0.75) {
flower <- 0
vegetative <- 1
}
else{
flower <- 1
vegetative <- 0
}
return(c(flower, vegetative))
}
#creates an inflorescence (row) in data frame whenever SFT exceeds the arbitrary threshold 0.3
create_inf <- function(SFT_conc) {
if(SFT_conc > 0.3) {
data <- c(0, "NOT FLOWERED")
inf_info <<- rbind(inf_info, data)
}
}
#not used right now but when the flowering meristem gets modeled (individual flowers), this will tell when a inflorescence meristem stop being a sink
inflorescence_check <- function(inf_data) {
for(i in 1:inf_data[i,]){
if(inf_data[i,1] > 0.3) {
inf_data[i,2] <- "FLOWERED"
}
}
}
tomato_model <- function(t, X, parms=NULL,...) {
with(as.list(parms),{
#state variables
p_13 <- X[1]/(K_13^h_13 + X[1]^h_13)
SFT <- X[1]
SFT <- k_1*X[4] - beta*nrow(inf_info)*SFT #needs some work because it doesn't make physical sense with no cap; proportional
# SFT_meristem <- k_SFT*SFT*(1 - SFT)
SP <- k_2*X[4]*SFT_threshold(SFT)[2]
# SP_meristem <- k_SP*SP*(1 - SP)
FA <- k_3*X[4] + gamma*p_13 - beta*nrow(inf_info)
# FA_cap <- k_FA*FA*(1 - FA)
# flower_ratio <- SFT/SP
create_inf(SFT)
# inflorescence_check(inf_infO)
derivatives <- rep(0, length(X))
derivatives[1:3] <- c(SFT, SP, FA)#, flower_ratio)
derivatives[1:3] <- derivatives[1:3] * (1 - mutants)
derivatives[4] <- X[5]
derivatives <- derivatives - delta*X
return(list(
Derivatives <- derivatives
))
})
}
fit_model = function(parms){
s1 <- ode(y = c(parms$init),
times = t,
func = tomato_model,
parms=parms,
method='lsoda')
# rootfun = root_fun,
# events = list(func = eventsfun,root=T,terminalroot=terminalroot))
return(s1)
}
s1 <- fit_model(parms_list)
cols = c('red','blue','black')#'green','gray')
time_scale <- 1
x=t
x=x*time_scale
plot(NA,NA,xlim = range(x),ylim = c(0,5))#range(s1[,-1]))
for(i in 2:4){
lines(s1[,1]*time_scale,s1[,i],col=cols[i-1])
}
|
library(ggplot2)
library(reshape2)
library(scales)
args <- commandArgs( trailingOnly = TRUE )
data <- read.csv( args[1], sep=",", header=TRUE )
rownames(data) <- data[,1]
data[,1] <- NULL
x <- data.frame( Sample=names(data), Total_Reads=as.numeric(as.matrix(data["Number_of_input_reads",])), Unique_Reads=as.numeric(as.matrix(data["Uniquely_mapped_reads_number",])))
x1 <- melt(x, id.var="Sample")
png( args[2], width = 8, height = 8, unit="in",res=300 )
upper_limit <- max(x$Total_Reads)
limits <- seq( 0, upper_limit, length.out=10)
colors <- c(Total_Reads="Grey", Unique_Reads="Blue")
ggplot(x1, aes(x=Sample, y=value, fill=variable)) + geom_bar( stat = "identity", position="identity") +
scale_y_continuous("",limits=c(0,upper_limit), labels=comma, breaks=limits) + scale_fill_manual(values=colors) +
labs( title="Read Alignment Report\n\n", x = "Sample Names", y="") + guides(fill=guide_legend(title=NULL)) +
theme_bw() + theme(axis.text.x = element_text(angle=90, hjust = 1, vjust=0.5, size=10))
dev.off()
|
/scripts/map_stats.R
|
no_license
|
vangalamaheshh/trim_and_align
|
R
| false
| false
| 1,023
|
r
|
library(ggplot2)
library(reshape2)
library(scales)
args <- commandArgs( trailingOnly = TRUE )
data <- read.csv( args[1], sep=",", header=TRUE )
rownames(data) <- data[,1]
data[,1] <- NULL
x <- data.frame( Sample=names(data), Total_Reads=as.numeric(as.matrix(data["Number_of_input_reads",])), Unique_Reads=as.numeric(as.matrix(data["Uniquely_mapped_reads_number",])))
x1 <- melt(x, id.var="Sample")
png( args[2], width = 8, height = 8, unit="in",res=300 )
upper_limit <- max(x$Total_Reads)
limits <- seq( 0, upper_limit, length.out=10)
colors <- c(Total_Reads="Grey", Unique_Reads="Blue")
ggplot(x1, aes(x=Sample, y=value, fill=variable)) + geom_bar( stat = "identity", position="identity") +
scale_y_continuous("",limits=c(0,upper_limit), labels=comma, breaks=limits) + scale_fill_manual(values=colors) +
labs( title="Read Alignment Report\n\n", x = "Sample Names", y="") + guides(fill=guide_legend(title=NULL)) +
theme_bw() + theme(axis.text.x = element_text(angle=90, hjust = 1, vjust=0.5, size=10))
dev.off()
|
#' Creates an object of class naRes (NA Result)
#'
#' This function takes in an omicsData object, and outputs a list of two data
#' frames, one containing the number of missing values by sample, and the other
#' containing the number of missing values by molecule
#'
#' @param omicsData an object of class "pepData", "proData", "metabData",
#' "lipidData", "nmrData", or "seqData", created by \code{\link{as.pepData}},
#' \code{\link{as.proData}}, \code{\link{as.metabData}},
#' \code{\link{as.lipidData}}, \code{\link{as.nmrData}}, or
#' \code{\link{as.seqData}}, respectively.
#'
#' @return S3 object of class naRes, which is a list of two data frames, one
#' containing the number of missing values per sample, and the other
#' containing the number of missing values per molecule. For count data,
#' zeroes represent missing values; for abundance data, NA's represent missing
#' values. This object can be used with 'plot' and 'summary' methods to
#' examine the missing values in the dataset.
#'
#' @examples
#' library(pmartRdata)
#' result1 = missingval_result(omicsData = lipid_neg_object)
#' result2 = missingval_result(omicsData = metab_object)
#'
#' @export
#'
missingval_result<- function(omicsData){
# Add a check
#check for correct class
if(!inherits(omicsData, c("pepData", "proData", "lipidData",
"metabData", "nmrData", "seqData"))) {
stop (paste("omicsData must have class of the following, 'pepData',",
"'proData', 'lipidData', 'metabData', 'nmrData', 'seqData'",
sep = " "))
}
# pulling cname attr
# edata_cname<- attr(omicsData, "cnames")$edata_cname
edata_cname<- get_edata_cname(omicsData)
edata_cname_id<- which(names(omicsData$e_data) == edata_cname)
# fdata_cname<- attr(omicsData, "cnames")$fdata_cname
fdata_cname<- get_fdata_cname(omicsData)
# Count the number of NA or zeros values per column.
if(inherits(omicsData, "seqData")){
res_per_col<- colSums((omicsData$e_data[, -edata_cname_id]) == 0)
res_by_sample<- data.frame(
"sample_names" = names(omicsData$e_data[, -edata_cname_id]),
"num_zeros" = as.numeric(res_per_col)
)
} else {
res_per_col<- colSums(is.na(omicsData$e_data[, -edata_cname_id]))
res_by_sample<- data.frame(
"sample_names" = names(omicsData$e_data[, -edata_cname_id]),
"num_NA" = as.numeric(res_per_col)
)
}
names(res_by_sample)[1] <- fdata_cname
# Merge res_by_sample with f_data to get additional columns of f_data. For
# example, the Group and VizSampNames columns. Group is used to color the plot
# and VizSampNames is used to display shorter sample names.
res_by_sample <- merge(res_by_sample, omicsData$f_data, by = fdata_cname)
# Check if the group designation function has been run. The group_DF info will
# be used to add the "Group" column to res_by_sample. This column may contain
# the same data as another column in f_data but it will have a different name
# from the f_data column.
if (!is.null(attr(omicsData, "group_DF"))) {
res_by_sample <- merge(res_by_sample, attr(omicsData, "group_DF"))
}
# Count the number of NA values per row.
if(inherits(omicsData, "seqData")){
res_per_row <- rowSums(omicsData$e_data[, -edata_cname_id] == 0)
res_by_molecule <- data.frame("molecule"= omicsData$e_data[, edata_cname_id],
"num_zeros"= as.numeric(res_per_row))
names(res_by_molecule)[1] <- edata_cname
result<- list("zeros.by.sample" = res_by_sample,
"zeros.by.molecule" = res_by_molecule)
class(result) <- "naRes"
attr(result, "cnames") <- list("edata_cname" = edata_cname,
"fdata_cname" = fdata_cname)
} else {
res_per_row <- rowSums(is.na(omicsData$e_data[, -edata_cname_id]))
res_by_molecule <- data.frame("molecule"= omicsData$e_data[, edata_cname_id],
"num_NA"= as.numeric(res_per_row))
names(res_by_molecule)[1] <- edata_cname
result<- list("na.by.sample" = res_by_sample,
"na.by.molecule" = res_by_molecule)
class(result) <- "naRes"
attr(result, "cnames") <- list("edata_cname" = edata_cname,
"fdata_cname" = fdata_cname)
}
return (result)
}
|
/R/missingval_result.R
|
permissive
|
clabornd/pmartR
|
R
| false
| false
| 4,411
|
r
|
#' Creates an object of class naRes (NA Result)
#'
#' This function takes in an omicsData object, and outputs a list of two data
#' frames, one containing the number of missing values by sample, and the other
#' containing the number of missing values by molecule
#'
#' @param omicsData an object of class "pepData", "proData", "metabData",
#' "lipidData", "nmrData", or "seqData", created by \code{\link{as.pepData}},
#' \code{\link{as.proData}}, \code{\link{as.metabData}},
#' \code{\link{as.lipidData}}, \code{\link{as.nmrData}}, or
#' \code{\link{as.seqData}}, respectively.
#'
#' @return S3 object of class naRes, which is a list of two data frames, one
#' containing the number of missing values per sample, and the other
#' containing the number of missing values per molecule. For count data,
#' zeroes represent missing values; for abundance data, NA's represent missing
#' values. This object can be used with 'plot' and 'summary' methods to
#' examine the missing values in the dataset.
#'
#' @examples
#' library(pmartRdata)
#' result1 = missingval_result(omicsData = lipid_neg_object)
#' result2 = missingval_result(omicsData = metab_object)
#'
#' @export
#'
missingval_result<- function(omicsData){
# Add a check
#check for correct class
if(!inherits(omicsData, c("pepData", "proData", "lipidData",
"metabData", "nmrData", "seqData"))) {
stop (paste("omicsData must have class of the following, 'pepData',",
"'proData', 'lipidData', 'metabData', 'nmrData', 'seqData'",
sep = " "))
}
# pulling cname attr
# edata_cname<- attr(omicsData, "cnames")$edata_cname
edata_cname<- get_edata_cname(omicsData)
edata_cname_id<- which(names(omicsData$e_data) == edata_cname)
# fdata_cname<- attr(omicsData, "cnames")$fdata_cname
fdata_cname<- get_fdata_cname(omicsData)
# Count the number of NA or zeros values per column.
if(inherits(omicsData, "seqData")){
res_per_col<- colSums((omicsData$e_data[, -edata_cname_id]) == 0)
res_by_sample<- data.frame(
"sample_names" = names(omicsData$e_data[, -edata_cname_id]),
"num_zeros" = as.numeric(res_per_col)
)
} else {
res_per_col<- colSums(is.na(omicsData$e_data[, -edata_cname_id]))
res_by_sample<- data.frame(
"sample_names" = names(omicsData$e_data[, -edata_cname_id]),
"num_NA" = as.numeric(res_per_col)
)
}
names(res_by_sample)[1] <- fdata_cname
# Merge res_by_sample with f_data to get additional columns of f_data. For
# example, the Group and VizSampNames columns. Group is used to color the plot
# and VizSampNames is used to display shorter sample names.
res_by_sample <- merge(res_by_sample, omicsData$f_data, by = fdata_cname)
# Check if the group designation function has been run. The group_DF info will
# be used to add the "Group" column to res_by_sample. This column may contain
# the same data as another column in f_data but it will have a different name
# from the f_data column.
if (!is.null(attr(omicsData, "group_DF"))) {
res_by_sample <- merge(res_by_sample, attr(omicsData, "group_DF"))
}
# Count the number of NA values per row.
if(inherits(omicsData, "seqData")){
res_per_row <- rowSums(omicsData$e_data[, -edata_cname_id] == 0)
res_by_molecule <- data.frame("molecule"= omicsData$e_data[, edata_cname_id],
"num_zeros"= as.numeric(res_per_row))
names(res_by_molecule)[1] <- edata_cname
result<- list("zeros.by.sample" = res_by_sample,
"zeros.by.molecule" = res_by_molecule)
class(result) <- "naRes"
attr(result, "cnames") <- list("edata_cname" = edata_cname,
"fdata_cname" = fdata_cname)
} else {
res_per_row <- rowSums(is.na(omicsData$e_data[, -edata_cname_id]))
res_by_molecule <- data.frame("molecule"= omicsData$e_data[, edata_cname_id],
"num_NA"= as.numeric(res_per_row))
names(res_by_molecule)[1] <- edata_cname
result<- list("na.by.sample" = res_by_sample,
"na.by.molecule" = res_by_molecule)
class(result) <- "naRes"
attr(result, "cnames") <- list("edata_cname" = edata_cname,
"fdata_cname" = fdata_cname)
}
return (result)
}
|
ggplot(vantazh, aes(y=y2013, x=reorder(type, -y2013)))+geom_bar(stat="identity", fill="#da2028", width=.5)+
xlab("Види транспорту")+
ylab("Одиниці")+
ggtitle("Заголовок")+
labs(subtitle="Підзаголовок", caption="Дані з data.gov.ua")+
theme(
legend.title=element_blank(),
plot.title = element_text(size=20, family="Roboto", face="bold", colour = "#515151", margin=margin(0,0,10,0)),
plot.subtitle = element_text(size=16, family="Roboto", color= "#515151", margin=margin(0,0,25,0)),
plot.caption = element_text(size=12, family="Roboto", color="#515151", face="italic"),
panel.border=element_blank(),
panel.background = element_rect(fill = "#ffffff"),
plot.background = element_rect(fill = "#ffffff"),
axis.title.x=element_text(size=12, family="Roboto", colour = "#515151", margin=margin(15,0,0,0)),
axis.title.y=element_text(size=12, family="Roboto", colour = "#515151", margin=margin(0,10,0,0)),
axis.ticks=element_blank(),
axis.text=element_text(size=10, family="Roboto", colour = "#515151"),
panel.grid.major = element_line(colour = "#E8DCDC", size=.2),
panel.grid.minor = element_blank(),
plot.margin = unit(c(20, 20, 20, 20), "points")
)
ggplot(salary, aes(x=dek))+geom_histogram(binwidth=500, col="white", fill="#da2028")+theme_bw()+
ggtitle("Розподіл зарплат декані\nв в університетах")+
labs(caption="Дані від Стадного")+
theme(
plot.title = element_text(size=20, family="Roboto", face="bold", colour = "#515151", margin=margin(0,0,10,0)),
plot.subtitle = element_text(size=16, family="Roboto", color= "#515151", margin=margin(0,0,25,0)),
plot.caption = element_text(size=12, family="Roboto", color="#515151", face="italic", margin=margin(15,0,0,0)),
panel.border=element_blank(),
panel.background = element_rect(fill = "#ffffff"),
plot.background = element_rect(fill = "#ffffff"),
axis.title=element_blank(),
axis.ticks=element_blank(),
axis.text=element_text(size=10, family="Roboto", colour = "#515151"),
panel.grid.major = element_line(colour = "#E8DCDC", size=.2),
panel.grid.minor = element_blank(),
plot.margin = unit(c(20, 20, 20, 20), "points")
)+scale_x_continuous(breaks=seq(0, 14000, by = 2000))
#+coord_flip() - horizontal
|
/bar.R
|
no_license
|
n-mouse/R-templates
|
R
| false
| false
| 2,366
|
r
|
ggplot(vantazh, aes(y=y2013, x=reorder(type, -y2013)))+geom_bar(stat="identity", fill="#da2028", width=.5)+
xlab("Види транспорту")+
ylab("Одиниці")+
ggtitle("Заголовок")+
labs(subtitle="Підзаголовок", caption="Дані з data.gov.ua")+
theme(
legend.title=element_blank(),
plot.title = element_text(size=20, family="Roboto", face="bold", colour = "#515151", margin=margin(0,0,10,0)),
plot.subtitle = element_text(size=16, family="Roboto", color= "#515151", margin=margin(0,0,25,0)),
plot.caption = element_text(size=12, family="Roboto", color="#515151", face="italic"),
panel.border=element_blank(),
panel.background = element_rect(fill = "#ffffff"),
plot.background = element_rect(fill = "#ffffff"),
axis.title.x=element_text(size=12, family="Roboto", colour = "#515151", margin=margin(15,0,0,0)),
axis.title.y=element_text(size=12, family="Roboto", colour = "#515151", margin=margin(0,10,0,0)),
axis.ticks=element_blank(),
axis.text=element_text(size=10, family="Roboto", colour = "#515151"),
panel.grid.major = element_line(colour = "#E8DCDC", size=.2),
panel.grid.minor = element_blank(),
plot.margin = unit(c(20, 20, 20, 20), "points")
)
ggplot(salary, aes(x=dek))+geom_histogram(binwidth=500, col="white", fill="#da2028")+theme_bw()+
ggtitle("Розподіл зарплат декані\nв в університетах")+
labs(caption="Дані від Стадного")+
theme(
plot.title = element_text(size=20, family="Roboto", face="bold", colour = "#515151", margin=margin(0,0,10,0)),
plot.subtitle = element_text(size=16, family="Roboto", color= "#515151", margin=margin(0,0,25,0)),
plot.caption = element_text(size=12, family="Roboto", color="#515151", face="italic", margin=margin(15,0,0,0)),
panel.border=element_blank(),
panel.background = element_rect(fill = "#ffffff"),
plot.background = element_rect(fill = "#ffffff"),
axis.title=element_blank(),
axis.ticks=element_blank(),
axis.text=element_text(size=10, family="Roboto", colour = "#515151"),
panel.grid.major = element_line(colour = "#E8DCDC", size=.2),
panel.grid.minor = element_blank(),
plot.margin = unit(c(20, 20, 20, 20), "points")
)+scale_x_continuous(breaks=seq(0, 14000, by = 2000))
#+coord_flip() - horizontal
|
#' Use Redundancy Rule to Simulate Ecological Diversification of a Biota.
#'
#' Implement Monte Carlo simulation of a biota undergoing ecological
#' diversification using the redundancy rule.
#'
#' @param nreps Vector of integers (such as a sequence) specifying sample number
#' produced. Only used when function is applied within \code{lapply} or
#' related function. Default \code{nreps = 1} or any other integer produces a
#' single sample.
#' @param Sseed Integer giving number of species (or other taxa) to use at start
#' of simulation.
#' @param ecospace An ecospace framework (functional trait space) of class
#' \code{ecospace}.
#' @param Smax Maximum number of species (or other taxa) to include in
#' simulation.
#' @param strength Strength parameter controlling probability that redundancy
#' rule is followed during simulation. Values must range between
#' \code{strength = 1} (default, rules always implemented) and \code{strength = 0}
#' (rules never implemented).
#'
#' @details Simulations are implemented as Monte Carlo processes in which
#' species are added iteratively to assemblages, with all added species having
#' their character states specified by the model rules, here the 'redundancy'
#' rule. Simulations begin with the seeding of \code{Sseed} number of species,
#' chosen at random (with replacement) from either the species pool (if
#' provided in the \code{weight.file} when building the ecospace framework
#' using \code{create_ecospace}) or following the neutral-rule algorithm (if a
#' pool is not provided). Once seeded, the simulations proceed iteratively
#' (character-by-character, species-by-species) by following the appropriate
#' algorithm, as explained below, until terminated at \code{Smax}.
#'
#' \strong{Redundancy rule algorithm:} Pick one existing species at random and
#' create a new species using that species' characters as a template. A
#' character is modified (using a random multinomial draw from the ecospace
#' framework) according to the \code{strength} parameter. Default
#' \code{strength = 1} always implements the redundancy rule, whereas
#' \code{strength = 0} never implements it (essentially making the simulation
#' follow the \code{\link{neutral}} rule.) Because new character states can be
#' any allowed by the ecospace framework, there is the possibility of
#' obtaining redundancy greater than that specified by a strength parameter
#' less than 1 (if, for example, the new randomly chosen character states are
#' identical to those of the template species).
#'
#' Redundancy rules tend to produce ecospaces with discrete clusters of
#' functionally similar species. Additional details on the redundancy
#' simulation are provided in Novack-Gottshall (2016a,b), including
#' sensitivity to ecospace framework (functional trait space) structure,
#' recommendations for model selection, and basis in ecological and
#' evolutionary theory.
#'
#' @return Returns a data frame with \code{Smax} rows (representing species) and
#' as many columns as specified by number of characters/states (functional
#' traits) in the ecospace framework. Columns will have the same data type
#' (numeric, factor, ordered numeric, or ordered factor) as specified in the
#' ecospace framework.
#'
#' @note The function has been written to allow usage (using
#' \code{\link{lapply}} or some other list-apply function) in 'embarrassingly
#' parallel' implementations in a high-performance computing environment.
#'
#' @author Phil Novack-Gottshall \email{pnovack-gottshall@@ben.edu}
#'
#' @references Bush, A. and P.M. Novack-Gottshall. 2012. Modelling the
#' ecological-functional diversification of marine Metazoa on geological time
#' scales. \emph{Biology Letters} 8: 151-155.
#' @references Novack-Gottshall, P.M. 2016a. General models of ecological
#' diversification. I. Conceptual synthesis. \emph{Paleobiology} 42: 185-208.
#' @references Novack-Gottshall, P.M. 2016b. General models of ecological
#' diversification. II. Simulations and empirical applications.
#' \emph{Paleobiology} 42: 209-239.
#'
#' @seealso \code{\link{create_ecospace}}, \code{\link{neutral}},
#' \code{\link{partitioning}}, \code{\link{expansion}}
#'
#' @examples
#' # Create an ecospace framework with 15 3-state factor characters
#' # Can also accept following character types: "numeric", "ord.num", "ord.fac"
#' nchar <- 15
#' ecospace <- create_ecospace(nchar = nchar, char.state = rep(3, nchar),
#' char.type = rep("factor", nchar))
#'
#' # Single (default) sample produced by redundancy function (with strength = 1):
#' Sseed <- 5
#' Smax <- 50
#' x <- redundancy(Sseed = Sseed, Smax = Smax, ecospace = ecospace)
#' head(x, 10)
#'
#' # Plot results, showing order of assembly
#' # (Seed species in red, next 5 in black, remainder in gray)
#' # Notice the redundancy model produces an ecospace with discrete clusters of life habits
#' seq <- seq(nchar)
#' types <- sapply(seq, function(seq) ecospace[[seq]]$type)
#' if(any(types == "ord.fac" | types == "factor")) pc <- prcomp(FD::gowdis(x)) else
#' pc <- prcomp(x)
#' plot(pc$x, type = "n", main = paste("Redundancy model,\n", Smax, "species"))
#' text(pc$x[,1], pc$x[,2], labels = seq(Smax), col = c(rep("red", Sseed), rep("black", 5),
#' rep("slategray", (Smax - Sseed - 5))), pch = c(rep(19, Sseed), rep(21, (Smax - Sseed))),
#' cex = .8)
#'
#' # Change strength parameter so new species are 95% identical:
#' x <- redundancy(Sseed = Sseed, Smax = Smax, ecospace = ecospace, strength = 0.95)
#' if(any(types == "ord.fac" | types == "factor")) pc <- prcomp(FD::gowdis(x)) else
#' pc <- prcomp(x)
#' plot(pc$x, type = "n", main = paste("Redundancy model,\n", Smax, "species"))
#' text(pc$x[,1], pc$x[,2], labels = seq(Smax), col = c(rep("red", Sseed), rep("black", 5),
#' rep("slategray", (Smax - Sseed - 5))), pch = c(rep(19, Sseed), rep(21, (Smax - Sseed))),
#' cex = .8)
#'
#' # Create 5 samples using multiple nreps and lapply (can be slow)
#' nreps <- 1:5
#' samples <- lapply(X = nreps, FUN = redundancy, Sseed = 5, Smax = 50, ecospace)
#' str(samples)
#'
#' @export
redundancy <- function(nreps = 1, Sseed, Smax, ecospace, strength = 1) {
if (strength < 0 | strength > 1)
stop("strength must have a value between 0 and 1\n")
nchar <- length(ecospace) - 1
seq <- seq_len(nchar)
pool <- ecospace[[length(ecospace)]]$pool
state.names <-
unlist(sapply(seq, function(seq)
colnames(ecospace[[seq]]$char.space)[seq_len(ncol(ecospace[[seq]]$char.space) - 3)]))
cs <-
sapply(seq, function(seq)
ncol(ecospace[[seq]]$char.space) - 3)
c.start <- c(1, cumsum(cs)[1:nchar - 1] + 1)
c.end <- cumsum(cs)
data <- prep_data(ecospace, Smax)
for (sp in 1:Smax) {
if (sp <= Sseed) {
if (!is.logical(pool)) {
data[sp, ] <- pool[sample2(seq_len(nrow(pool)), 1), ]
} else {
for (ch in 1:nchar) {
c.sp <- ecospace[[ch]]$char.space
data[sp, c.start[ch]:c.end[ch]] <-
c.sp[c.sp[(rmultinom(1, 1, prob = c.sp$pro) == 1), ncol(c.sp)], seq_len(cs[ch])]
}
}
} else {
pick <- sample2(seq_len(sp - 1), 1)
for (ch in 1:nchar) {
if (runif(1, 0, 1) <= strength) {
data[sp, c.start[ch]:c.end[ch]] <- data[pick, c.start[ch]:c.end[ch]]
} else {
c.sp <- ecospace[[ch]]$char.space
data[sp, c.start[ch]:c.end[ch]] <-
c.sp[c.sp[(rmultinom(1, 1, prob = c.sp$pro) == 1), ncol(c.sp)], seq_len(cs[ch])]
}
}
}
}
return(data)
}
|
/R/redundancy.R
|
no_license
|
cran/ecospace
|
R
| false
| false
| 7,755
|
r
|
#' Use Redundancy Rule to Simulate Ecological Diversification of a Biota.
#'
#' Implement Monte Carlo simulation of a biota undergoing ecological
#' diversification using the redundancy rule.
#'
#' @param nreps Vector of integers (such as a sequence) specifying sample number
#' produced. Only used when function is applied within \code{lapply} or
#' related function. Default \code{nreps = 1} or any other integer produces a
#' single sample.
#' @param Sseed Integer giving number of species (or other taxa) to use at start
#' of simulation.
#' @param ecospace An ecospace framework (functional trait space) of class
#' \code{ecospace}.
#' @param Smax Maximum number of species (or other taxa) to include in
#' simulation.
#' @param strength Strength parameter controlling probability that redundancy
#' rule is followed during simulation. Values must range between
#' \code{strength = 1} (default, rules always implemented) and \code{strength = 0}
#' (rules never implemented).
#'
#' @details Simulations are implemented as Monte Carlo processes in which
#' species are added iteratively to assemblages, with all added species having
#' their character states specified by the model rules, here the 'redundancy'
#' rule. Simulations begin with the seeding of \code{Sseed} number of species,
#' chosen at random (with replacement) from either the species pool (if
#' provided in the \code{weight.file} when building the ecospace framework
#' using \code{create_ecospace}) or following the neutral-rule algorithm (if a
#' pool is not provided). Once seeded, the simulations proceed iteratively
#' (character-by-character, species-by-species) by following the appropriate
#' algorithm, as explained below, until terminated at \code{Smax}.
#'
#' \strong{Redundancy rule algorithm:} Pick one existing species at random and
#' create a new species using that species' characters as a template. A
#' character is modified (using a random multinomial draw from the ecospace
#' framework) according to the \code{strength} parameter. Default
#' \code{strength = 1} always implements the redundancy rule, whereas
#' \code{strength = 0} never implements it (essentially making the simulation
#' follow the \code{\link{neutral}} rule.) Because new character states can be
#' any allowed by the ecospace framework, there is the possibility of
#' obtaining redundancy greater than that specified by a strength parameter
#' less than 1 (if, for example, the new randomly chosen character states are
#' identical to those of the template species).
#'
#' Redundancy rules tend to produce ecospaces with discrete clusters of
#' functionally similar species. Additional details on the redundancy
#' simulation are provided in Novack-Gottshall (2016a,b), including
#' sensitivity to ecospace framework (functional trait space) structure,
#' recommendations for model selection, and basis in ecological and
#' evolutionary theory.
#'
#' @return Returns a data frame with \code{Smax} rows (representing species) and
#' as many columns as specified by number of characters/states (functional
#' traits) in the ecospace framework. Columns will have the same data type
#' (numeric, factor, ordered numeric, or ordered factor) as specified in the
#' ecospace framework.
#'
#' @note The function has been written to allow usage (using
#' \code{\link{lapply}} or some other list-apply function) in 'embarrassingly
#' parallel' implementations in a high-performance computing environment.
#'
#' @author Phil Novack-Gottshall \email{pnovack-gottshall@@ben.edu}
#'
#' @references Bush, A. and P.M. Novack-Gottshall. 2012. Modelling the
#' ecological-functional diversification of marine Metazoa on geological time
#' scales. \emph{Biology Letters} 8: 151-155.
#' @references Novack-Gottshall, P.M. 2016a. General models of ecological
#' diversification. I. Conceptual synthesis. \emph{Paleobiology} 42: 185-208.
#' @references Novack-Gottshall, P.M. 2016b. General models of ecological
#' diversification. II. Simulations and empirical applications.
#' \emph{Paleobiology} 42: 209-239.
#'
#' @seealso \code{\link{create_ecospace}}, \code{\link{neutral}},
#' \code{\link{partitioning}}, \code{\link{expansion}}
#'
#' @examples
#' # Create an ecospace framework with 15 3-state factor characters
#' # Can also accept following character types: "numeric", "ord.num", "ord.fac"
#' nchar <- 15
#' ecospace <- create_ecospace(nchar = nchar, char.state = rep(3, nchar),
#' char.type = rep("factor", nchar))
#'
#' # Single (default) sample produced by redundancy function (with strength = 1):
#' Sseed <- 5
#' Smax <- 50
#' x <- redundancy(Sseed = Sseed, Smax = Smax, ecospace = ecospace)
#' head(x, 10)
#'
#' # Plot results, showing order of assembly
#' # (Seed species in red, next 5 in black, remainder in gray)
#' # Notice the redundancy model produces an ecospace with discrete clusters of life habits
#' seq <- seq(nchar)
#' types <- sapply(seq, function(seq) ecospace[[seq]]$type)
#' if(any(types == "ord.fac" | types == "factor")) pc <- prcomp(FD::gowdis(x)) else
#' pc <- prcomp(x)
#' plot(pc$x, type = "n", main = paste("Redundancy model,\n", Smax, "species"))
#' text(pc$x[,1], pc$x[,2], labels = seq(Smax), col = c(rep("red", Sseed), rep("black", 5),
#' rep("slategray", (Smax - Sseed - 5))), pch = c(rep(19, Sseed), rep(21, (Smax - Sseed))),
#' cex = .8)
#'
#' # Change strength parameter so new species are 95% identical:
#' x <- redundancy(Sseed = Sseed, Smax = Smax, ecospace = ecospace, strength = 0.95)
#' if(any(types == "ord.fac" | types == "factor")) pc <- prcomp(FD::gowdis(x)) else
#' pc <- prcomp(x)
#' plot(pc$x, type = "n", main = paste("Redundancy model,\n", Smax, "species"))
#' text(pc$x[,1], pc$x[,2], labels = seq(Smax), col = c(rep("red", Sseed), rep("black", 5),
#' rep("slategray", (Smax - Sseed - 5))), pch = c(rep(19, Sseed), rep(21, (Smax - Sseed))),
#' cex = .8)
#'
#' # Create 5 samples using multiple nreps and lapply (can be slow)
#' nreps <- 1:5
#' samples <- lapply(X = nreps, FUN = redundancy, Sseed = 5, Smax = 50, ecospace)
#' str(samples)
#'
#' @export
redundancy <- function(nreps = 1, Sseed, Smax, ecospace, strength = 1) {
if (strength < 0 | strength > 1)
stop("strength must have a value between 0 and 1\n")
nchar <- length(ecospace) - 1
seq <- seq_len(nchar)
pool <- ecospace[[length(ecospace)]]$pool
state.names <-
unlist(sapply(seq, function(seq)
colnames(ecospace[[seq]]$char.space)[seq_len(ncol(ecospace[[seq]]$char.space) - 3)]))
cs <-
sapply(seq, function(seq)
ncol(ecospace[[seq]]$char.space) - 3)
c.start <- c(1, cumsum(cs)[1:nchar - 1] + 1)
c.end <- cumsum(cs)
data <- prep_data(ecospace, Smax)
for (sp in 1:Smax) {
if (sp <= Sseed) {
if (!is.logical(pool)) {
data[sp, ] <- pool[sample2(seq_len(nrow(pool)), 1), ]
} else {
for (ch in 1:nchar) {
c.sp <- ecospace[[ch]]$char.space
data[sp, c.start[ch]:c.end[ch]] <-
c.sp[c.sp[(rmultinom(1, 1, prob = c.sp$pro) == 1), ncol(c.sp)], seq_len(cs[ch])]
}
}
} else {
pick <- sample2(seq_len(sp - 1), 1)
for (ch in 1:nchar) {
if (runif(1, 0, 1) <= strength) {
data[sp, c.start[ch]:c.end[ch]] <- data[pick, c.start[ch]:c.end[ch]]
} else {
c.sp <- ecospace[[ch]]$char.space
data[sp, c.start[ch]:c.end[ch]] <-
c.sp[c.sp[(rmultinom(1, 1, prob = c.sp$pro) == 1), ncol(c.sp)], seq_len(cs[ch])]
}
}
}
}
return(data)
}
|
# Overlap of two densities of circular distributions
# Use ants to test
# dat_common should be loaded
library(circular)
library(sfsmisc)
# test with two time datasets
testdat <- dat_common %>%
filter(site == 'Duke', chamber %in% c(2,3), sp == 'apru')
x1 <- testdat$time[testdat$chamber == 2]
x2 <- testdat$time[testdat$chamber == 3]
plot(density(x1, bw = 1))
plot(density(x2, bw = 1))
# Convert x1 and x2 to circular objects
x1c <- circular(x1, units = 'hours', template = 'clock24')
x2c <- circular(x2, units = 'hours', template = 'clock24')
plot(x1c, stack = TRUE, shrink = 2)
plot(x2c, stack = TRUE, shrink = 2)
bandwidth <- 12
plot(density(x1c, bw = bandwidth), shrink = 2)
plot(density(x2c, bw = bandwidth), shrink = 2)
x1d <- density(x1c, bw = bandwidth)
x2d <- density(x2c, bw = bandwidth)
w <- pmin(x1d$y, x2d$y)
total <- integrate.xy(x1d$x, x1d$y) + integrate.xy(x2d$x, x2d$y)
intersection <- integrate.xy(x1d$x, w)
( overlap <- 2 * intersection / total )
pairwise_overlap(x1, x2, bw = 1)
pairwise_overlap(x1, x2)
circular_overlap <- function(a, b, norm = TRUE, bw, n = NULL) {
# clean input
a <- as.numeric(na.omit(a))
b <- as.numeric(na.omit(b))
# convert input to circular
acirc <- circular(a, units = 'hours', template = 'clock24')
bcirc <- circular(a, units = 'hours', template = 'clock24')
# generate kernel densities
# add option to use user-defined n
# Must specify bandwidth
if (is.null(n)) n <- 512 # Default value if not given
da <- density(a, bw=bw, n=n)
db <- density(b, bw=bw, n=n)
d <- data.frame(x=da$x, a=da$y, b=db$y)
# If not normalized, multiply each density entry by the length of each vector
if (!norm) {
d$a <- d$a * length(a)
d$b <- d$b * length(b)
}
# calculate intersection densities
d$w <- pmin(d$a, d$b)
# integrate areas under curves
integral_a <- sfsmisc::integrate.xy(d$x, d$a)
integral_b <- sfsmisc::integrate.xy(d$x, d$b)
total <- integral_a + integral_b
intersection <- sfsmisc::integrate.xy(d$x, d$w)
# compute overlap coefficient
overlap <- 2 * intersection / total
overlap_a <- intersection / integral_a
overlap_b <- intersection / integral_b
return(c(overlap = overlap, overlap_a = overlap_a, overlap_b = overlap_b))
}
circular_overlap(x1, x2, bw = 0.3)
# Watson two sample test --------------------------------------------------
# This is the analog of the K-S test for circular data. Are the two distributions the same?
# It is implemented in circular::watson.two
watson.two.test(x = x1c, y = x2c)
# If test stat is greater than the critical value, we reject H0 that they are from the same distribution.
|
/deprecated/testcircular.R
|
no_license
|
maryglover/anttime
|
R
| false
| false
| 2,664
|
r
|
# Overlap of two densities of circular distributions
# Use ants to test
# dat_common should be loaded
library(circular)
library(sfsmisc)
# test with two time datasets
testdat <- dat_common %>%
filter(site == 'Duke', chamber %in% c(2,3), sp == 'apru')
x1 <- testdat$time[testdat$chamber == 2]
x2 <- testdat$time[testdat$chamber == 3]
plot(density(x1, bw = 1))
plot(density(x2, bw = 1))
# Convert x1 and x2 to circular objects
x1c <- circular(x1, units = 'hours', template = 'clock24')
x2c <- circular(x2, units = 'hours', template = 'clock24')
plot(x1c, stack = TRUE, shrink = 2)
plot(x2c, stack = TRUE, shrink = 2)
bandwidth <- 12
plot(density(x1c, bw = bandwidth), shrink = 2)
plot(density(x2c, bw = bandwidth), shrink = 2)
x1d <- density(x1c, bw = bandwidth)
x2d <- density(x2c, bw = bandwidth)
w <- pmin(x1d$y, x2d$y)
total <- integrate.xy(x1d$x, x1d$y) + integrate.xy(x2d$x, x2d$y)
intersection <- integrate.xy(x1d$x, w)
( overlap <- 2 * intersection / total )
pairwise_overlap(x1, x2, bw = 1)
pairwise_overlap(x1, x2)
circular_overlap <- function(a, b, norm = TRUE, bw, n = NULL) {
# clean input
a <- as.numeric(na.omit(a))
b <- as.numeric(na.omit(b))
# convert input to circular
acirc <- circular(a, units = 'hours', template = 'clock24')
bcirc <- circular(a, units = 'hours', template = 'clock24')
# generate kernel densities
# add option to use user-defined n
# Must specify bandwidth
if (is.null(n)) n <- 512 # Default value if not given
da <- density(a, bw=bw, n=n)
db <- density(b, bw=bw, n=n)
d <- data.frame(x=da$x, a=da$y, b=db$y)
# If not normalized, multiply each density entry by the length of each vector
if (!norm) {
d$a <- d$a * length(a)
d$b <- d$b * length(b)
}
# calculate intersection densities
d$w <- pmin(d$a, d$b)
# integrate areas under curves
integral_a <- sfsmisc::integrate.xy(d$x, d$a)
integral_b <- sfsmisc::integrate.xy(d$x, d$b)
total <- integral_a + integral_b
intersection <- sfsmisc::integrate.xy(d$x, d$w)
# compute overlap coefficient
overlap <- 2 * intersection / total
overlap_a <- intersection / integral_a
overlap_b <- intersection / integral_b
return(c(overlap = overlap, overlap_a = overlap_a, overlap_b = overlap_b))
}
circular_overlap(x1, x2, bw = 0.3)
# Watson two sample test --------------------------------------------------
# This is the analog of the K-S test for circular data. Are the two distributions the same?
# It is implemented in circular::watson.two
watson.two.test(x = x1c, y = x2c)
# If test stat is greater than the critical value, we reject H0 that they are from the same distribution.
|
library('Matrix')
library(igraph)
library(ggplot2)
library(patchwork)
eX <- readMM('datasets/morphineNeurons/mock/control.mtx')
rownames(eX) <- readLines('datasets/morphineNeurons/mock/controlGenes.tsv')
eX <- rowMeans(eX)
eY <- readMM('datasets/morphineNeurons/morphine/morphine.mtx')
rownames(eY) <- readLines('datasets/morphineNeurons/morphine/morphineGenes.tsv')
eY <- rowMeans(eY)
nX <- readMM('results/tensorOutput/X_10X500morphineNeuron_Itensor.mtx')
rownames(nX) <- readLines('results/tensorOutput/genes_10X500morphineNeuron_Itensor.mtx')
colnames(nX) <- readLines('results/tensorOutput/genes_10X500morphineNeuron_Itensor.mtx')
nX <- graph_from_adjacency_matrix(nX!=0)
nX <- degree(nX)
nY <- readMM('results/tensorOutput/Y_10X500morphineNeuron_Itensor.mtx')
rownames(nY) <- readLines('results/tensorOutput/genes_10X500morphineNeuron_Itensor.mtx')
colnames(nY) <- readLines('results/tensorOutput/genes_10X500morphineNeuron_Itensor.mtx')
nY <- graph_from_adjacency_matrix(nY!=0)
nY <- degree(nY)
O <- read.csv('results/sym10X500morphineNeuron_Itensor_Dalignment.csv')
Z <- O$distance
names(Z) <- O$gene
sharedGenes <- intersect(intersect(intersect(names(eX), names(nX)), intersect(names(eY), names(nY))), names(Z))
DF <- data.frame(eX = eX[sharedGenes], eY = eY[sharedGenes], nX = nX[sharedGenes], nY = nY[sharedGenes], Z = Z[sharedGenes])
DF <- DF[(DF[,3] != 0) & (DF[,4] != 0),]
DF$eX <- log1p(DF$eX)
DF$eY <- log1p(DF$eY)
DF$nX <- log10(DF$nX)
DF$nY <- log10(DF$nY)
DF$Z <- log10(DF$Z)
A <- ggplot(DF, mapping = aes(eX, nX)) +
geom_point(col = densCols(DF$eX, DF$nX, colramp = hcl.colors), cex = 0.5) +
theme_bw() +
xlab(expression(log(Average~Expression + 1))) +
ylab(expression(log[10](Degree))) +
labs(title = 'Control', subtitle = parse(text = paste0('rho == ', round(cor(DF$eX, DF$nX, method = 'sp'),3)))) +
theme(plot.title = element_text(face = 2))
B <- ggplot(DF, mapping = aes(eY, nY)) +
geom_point(col = densCols(DF$eY, DF$nY, colramp = hcl.colors), cex = 0.5) +
theme_bw() +
xlab(expression(log(Average~Expression + 1))) +
ylab(expression(log[10](Degree))) +
labs(title = 'Morphine', subtitle = parse(text = paste0('rho == ', round(cor(DF$eY, DF$nY, method = 'sp'),3)))) +
theme(plot.title = element_text(face = 2))
C <- ggplot(DF, mapping = aes(eX, Z)) +
geom_point(col = densCols(DF$eX, DF$Z, colramp = hcl.colors), cex = 0.5) +
theme_bw() +
labs(subtitle = parse(text = paste0('rho == ', round(cor(DF$eX, DF$Z, method = 'sp'),3)))) +
xlab(expression(log(Average~Expression + 1))) +
ylab(expression(log[10](Distance)))
D <- ggplot(DF, mapping = aes(eY, Z)) + geom_point(col = densCols(DF$eY, DF$Z, colramp = hcl.colors), cex = 0.5) +
theme_bw() +
labs(subtitle = parse(text = paste0('rho == ', round(cor(DF$eY, DF$Z, method = 'sp'),3)))) +
xlab(expression(log(Average~Expression + 1))) +
ylab(expression(log[10](Distance)))
png('EDComparison.png', width = 1500, height = 1500, res = 300)
A + B + C + D
dev.off()
|
/inst/manuscript/EDcomparison.R
|
no_license
|
Leonrunning/scTenifoldNet
|
R
| false
| false
| 3,004
|
r
|
library('Matrix')
library(igraph)
library(ggplot2)
library(patchwork)
eX <- readMM('datasets/morphineNeurons/mock/control.mtx')
rownames(eX) <- readLines('datasets/morphineNeurons/mock/controlGenes.tsv')
eX <- rowMeans(eX)
eY <- readMM('datasets/morphineNeurons/morphine/morphine.mtx')
rownames(eY) <- readLines('datasets/morphineNeurons/morphine/morphineGenes.tsv')
eY <- rowMeans(eY)
nX <- readMM('results/tensorOutput/X_10X500morphineNeuron_Itensor.mtx')
rownames(nX) <- readLines('results/tensorOutput/genes_10X500morphineNeuron_Itensor.mtx')
colnames(nX) <- readLines('results/tensorOutput/genes_10X500morphineNeuron_Itensor.mtx')
nX <- graph_from_adjacency_matrix(nX!=0)
nX <- degree(nX)
nY <- readMM('results/tensorOutput/Y_10X500morphineNeuron_Itensor.mtx')
rownames(nY) <- readLines('results/tensorOutput/genes_10X500morphineNeuron_Itensor.mtx')
colnames(nY) <- readLines('results/tensorOutput/genes_10X500morphineNeuron_Itensor.mtx')
nY <- graph_from_adjacency_matrix(nY!=0)
nY <- degree(nY)
O <- read.csv('results/sym10X500morphineNeuron_Itensor_Dalignment.csv')
Z <- O$distance
names(Z) <- O$gene
sharedGenes <- intersect(intersect(intersect(names(eX), names(nX)), intersect(names(eY), names(nY))), names(Z))
DF <- data.frame(eX = eX[sharedGenes], eY = eY[sharedGenes], nX = nX[sharedGenes], nY = nY[sharedGenes], Z = Z[sharedGenes])
DF <- DF[(DF[,3] != 0) & (DF[,4] != 0),]
DF$eX <- log1p(DF$eX)
DF$eY <- log1p(DF$eY)
DF$nX <- log10(DF$nX)
DF$nY <- log10(DF$nY)
DF$Z <- log10(DF$Z)
A <- ggplot(DF, mapping = aes(eX, nX)) +
geom_point(col = densCols(DF$eX, DF$nX, colramp = hcl.colors), cex = 0.5) +
theme_bw() +
xlab(expression(log(Average~Expression + 1))) +
ylab(expression(log[10](Degree))) +
labs(title = 'Control', subtitle = parse(text = paste0('rho == ', round(cor(DF$eX, DF$nX, method = 'sp'),3)))) +
theme(plot.title = element_text(face = 2))
B <- ggplot(DF, mapping = aes(eY, nY)) +
geom_point(col = densCols(DF$eY, DF$nY, colramp = hcl.colors), cex = 0.5) +
theme_bw() +
xlab(expression(log(Average~Expression + 1))) +
ylab(expression(log[10](Degree))) +
labs(title = 'Morphine', subtitle = parse(text = paste0('rho == ', round(cor(DF$eY, DF$nY, method = 'sp'),3)))) +
theme(plot.title = element_text(face = 2))
C <- ggplot(DF, mapping = aes(eX, Z)) +
geom_point(col = densCols(DF$eX, DF$Z, colramp = hcl.colors), cex = 0.5) +
theme_bw() +
labs(subtitle = parse(text = paste0('rho == ', round(cor(DF$eX, DF$Z, method = 'sp'),3)))) +
xlab(expression(log(Average~Expression + 1))) +
ylab(expression(log[10](Distance)))
D <- ggplot(DF, mapping = aes(eY, Z)) + geom_point(col = densCols(DF$eY, DF$Z, colramp = hcl.colors), cex = 0.5) +
theme_bw() +
labs(subtitle = parse(text = paste0('rho == ', round(cor(DF$eY, DF$Z, method = 'sp'),3)))) +
xlab(expression(log(Average~Expression + 1))) +
ylab(expression(log[10](Distance)))
png('EDComparison.png', width = 1500, height = 1500, res = 300)
A + B + C + D
dev.off()
|
#' calculate_primary_doctor
#'
#' @param doctor_npi1
#' @param doctor_npi2
#' @param group_reqs
#' @param class_reqs
#' @param specialization_reqs
#'
#' @return
#' @export
#'
#' @examples
calculate_primary_doctor <- function(doctor_npi1,
doctor_npi2,
group_reqs=NA,
class_reqs=NA,
specialization_reqs=NA){
if(doctor_npi1==0){
return("NOT SPECIFIED")
}
if(is.na(doctor_npi2)){
return(npi_full[npi==doctor_npi1] %>% .[["clean_name"]])
}
class_reqs <- class_reqs %>% stri_split_regex("\\|\\|\\|") %>% unlist()
specialization_reqs <- specialization_reqs %>% stri_split_regex("\\|\\|\\|") %>% unlist()
npi_subset <- npi_full[npi %in% c(doctor_npi1, doctor_npi2)]
# print(npi_subset)
# #
# print(group_reqs)
# #
if(!is.na(group_reqs)){
npi_subset <- npi_subset[osa_group %in% group_reqs]
}
# print(npi_subset)
# #
# print(class_reqs)
if(!is.na(class_reqs)){
npi_subset <- npi_subset[osa_class %in% class_reqs]
}
# print(specialization_reqs)
#
# print(npi_subset)
#
# npi_subset
#
primary_provider <- npi_subset[["clean_name"]]
if(length(primary_provider) >1){
print("multiple meet class req")
if(!is.na(specialization_reqs)){
npi_subset <- npi_subset[stri_trim_both(osa_specialization) %in% specialization_reqs]
}
primary_provider2 <- npi_subset[["clean_name"]]
print(primary_provider2)
if(is.null(primary_provider2) || length(primary_provider2)==0){
return("NONE_FIT_SPEC_REQ")
}else{
if(length(primary_provider2)==1){
return(primary_provider2)
}else{
return("TWO_FIT_ALL_SPECS")
}
}
}else if (length(primary_provider) ==1){
return (primary_provider)
}else{
return("BOTH_DOC_FAIL_CRIT")
}
# if(is.null(primary_provider)){
# "NO_PRI_PROV"
# }else{
# primary_provider
# }
}
# calculate_primary_doctor(doctor_npi1="1437348943",
# doctor_npi2="1306945308",
# class_reqs="Internal Medicine",
# specialization_reqs="Gastroenterology"
# ) %>% print()
# #
# calculate_primary_doctor(doctor_npi1="1437348943",
# doctor_npi2="1457493967",
# class_reqs="Internal Medicine",
# specialization_reqs="Gastroenterology"
# ) %>% print()
#
# calculate_primary_doctor(doctor_npi1="1275514499",
# doctor_npi2="1649374216",
# class_reqs="Internal Medicine",
# specialization_reqs="Gastroenterology"
# ) %>% print()
#
# calculate_primary_doctor(doctor_npi1="1215969837",
# doctor_npi2="1174539449",
# class_reqs="Internal Medicine",
# specialization_reqs="Gastroenterology"
# ) %>% print()
#
# calculate_primary_doctor(doctor_npi1="1073653036",
# doctor_npi2="1225473671",
# class_reqs="Internal Medicine",
# specialization_reqs="Gastroenterology"
# ) %>% print()
|
/R/calculate_primary_doctor.R
|
no_license
|
utah-osa/hcctools2
|
R
| false
| false
| 3,565
|
r
|
#' calculate_primary_doctor
#'
#' @param doctor_npi1
#' @param doctor_npi2
#' @param group_reqs
#' @param class_reqs
#' @param specialization_reqs
#'
#' @return
#' @export
#'
#' @examples
calculate_primary_doctor <- function(doctor_npi1,
doctor_npi2,
group_reqs=NA,
class_reqs=NA,
specialization_reqs=NA){
if(doctor_npi1==0){
return("NOT SPECIFIED")
}
if(is.na(doctor_npi2)){
return(npi_full[npi==doctor_npi1] %>% .[["clean_name"]])
}
class_reqs <- class_reqs %>% stri_split_regex("\\|\\|\\|") %>% unlist()
specialization_reqs <- specialization_reqs %>% stri_split_regex("\\|\\|\\|") %>% unlist()
npi_subset <- npi_full[npi %in% c(doctor_npi1, doctor_npi2)]
# print(npi_subset)
# #
# print(group_reqs)
# #
if(!is.na(group_reqs)){
npi_subset <- npi_subset[osa_group %in% group_reqs]
}
# print(npi_subset)
# #
# print(class_reqs)
if(!is.na(class_reqs)){
npi_subset <- npi_subset[osa_class %in% class_reqs]
}
# print(specialization_reqs)
#
# print(npi_subset)
#
# npi_subset
#
primary_provider <- npi_subset[["clean_name"]]
if(length(primary_provider) >1){
print("multiple meet class req")
if(!is.na(specialization_reqs)){
npi_subset <- npi_subset[stri_trim_both(osa_specialization) %in% specialization_reqs]
}
primary_provider2 <- npi_subset[["clean_name"]]
print(primary_provider2)
if(is.null(primary_provider2) || length(primary_provider2)==0){
return("NONE_FIT_SPEC_REQ")
}else{
if(length(primary_provider2)==1){
return(primary_provider2)
}else{
return("TWO_FIT_ALL_SPECS")
}
}
}else if (length(primary_provider) ==1){
return (primary_provider)
}else{
return("BOTH_DOC_FAIL_CRIT")
}
# if(is.null(primary_provider)){
# "NO_PRI_PROV"
# }else{
# primary_provider
# }
}
# calculate_primary_doctor(doctor_npi1="1437348943",
# doctor_npi2="1306945308",
# class_reqs="Internal Medicine",
# specialization_reqs="Gastroenterology"
# ) %>% print()
# #
# calculate_primary_doctor(doctor_npi1="1437348943",
# doctor_npi2="1457493967",
# class_reqs="Internal Medicine",
# specialization_reqs="Gastroenterology"
# ) %>% print()
#
# calculate_primary_doctor(doctor_npi1="1275514499",
# doctor_npi2="1649374216",
# class_reqs="Internal Medicine",
# specialization_reqs="Gastroenterology"
# ) %>% print()
#
# calculate_primary_doctor(doctor_npi1="1215969837",
# doctor_npi2="1174539449",
# class_reqs="Internal Medicine",
# specialization_reqs="Gastroenterology"
# ) %>% print()
#
# calculate_primary_doctor(doctor_npi1="1073653036",
# doctor_npi2="1225473671",
# class_reqs="Internal Medicine",
# specialization_reqs="Gastroenterology"
# ) %>% print()
|
graphics.off()
remove(list = ls())
library(ggplot2)
library(cowplot)
library(reshape2)
valeurs <- data.frame(read.csv("MDH2 0.5Xan Xox ph6.55 ttes10s.csv", sep = ';', dec = ".", header = TRUE))
c <- 1:ncol(valeurs[-1])
df <- valeurs[-1][ , c%%6==1]
df <- cbind(df[1:31], valeurs[1])
colnames(df) <- c(c(1:31), "wavelength")
df1 <- melt(df, id.vars="wavelength")
g <- ggplot (df1, aes(x= wavelength, y= value, group = variable, color = variable))
g <- g + geom_line()
g <- g + theme(legend.position = "none")
g <- g + scale_x_continuous(name = "\nWavelength (nm)", limits = c(300,400))
g <- g + scale_y_continuous(name = "absorbance\n")
g
save_plot('MDH2 0.5Xan Xox ph6.55 1min.png', g, base_aspect_ratio = 1.3)
|
/air12/MDH2 0.5Xan Xox ph6.55.R
|
no_license
|
CathyCat88/thesis
|
R
| false
| false
| 715
|
r
|
graphics.off()
remove(list = ls())
library(ggplot2)
library(cowplot)
library(reshape2)
valeurs <- data.frame(read.csv("MDH2 0.5Xan Xox ph6.55 ttes10s.csv", sep = ';', dec = ".", header = TRUE))
c <- 1:ncol(valeurs[-1])
df <- valeurs[-1][ , c%%6==1]
df <- cbind(df[1:31], valeurs[1])
colnames(df) <- c(c(1:31), "wavelength")
df1 <- melt(df, id.vars="wavelength")
g <- ggplot (df1, aes(x= wavelength, y= value, group = variable, color = variable))
g <- g + geom_line()
g <- g + theme(legend.position = "none")
g <- g + scale_x_continuous(name = "\nWavelength (nm)", limits = c(300,400))
g <- g + scale_y_continuous(name = "absorbance\n")
g
save_plot('MDH2 0.5Xan Xox ph6.55 1min.png', g, base_aspect_ratio = 1.3)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.