blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
75275e53e9ce7e88f54107dc36c95696030e43ba
cff7a73825a6405ecb2b667beb4c607ed3358508
/08_model_classic.R
6e21bfac5eca5d9bee3940ebad5bbc1fa5fd0f37
[]
no_license
kmatusz/mgr
cc308a362d19bf1855bd7b346f161ac9c486dec1
40fa62b1834ae9228e5919b953e30899dc43fad5
refs/heads/master
2023-07-07T01:44:45.197192
2021-08-12T21:07:40
2021-08-12T21:07:40
246,847,687
1
0
null
null
null
null
UTF-8
R
false
false
32,776
r
08_model_classic.R
# Script for modeling whether the customer bought second time in the shop # Data from first purchase is used TRAIN_MODELS <- TRUE library(readr) library(tidyverse) library("leaflet") library(psych) library(lubridate) library(cluster) library(factoextra) library(caret) library(rpart) # Data preparation ---- orders <- read_csv("data/olist_orders_dataset.csv") customers <- read_csv("data/olist_customers_dataset.csv") geolocation <- read_csv("data/olist_geolocation_dataset.csv") order_items <- read_csv("data/olist_order_items_dataset.csv") order_payments <- read_csv("data/olist_order_payments_dataset.csv") order_reviews <- read_csv("data/olist_order_reviews_dataset.csv") products <- read_csv("data/olist_products_dataset.csv") sellers <- read_csv("data/olist_sellers_dataset.csv") product_translation <- read_csv("data/product_category_name_translation.csv") order_reviews_topics <- read_csv("data/reviews_with_topic.csv") load(file = 'data/05_orders_enhanced.Rdata') first_orders geolocation %>% select(1,2,3) %>% group_by(geolocation_zip_code_prefix) %>% filter(row_number() == 1) %>% ungroup() -> geolocation2 geolocation2 %>% head(5) order_items %>% group_by(order_id) %>% summarise(no_items = max(order_item_id), sum_freight = sum(freight_value, na.rm=T) ) -> order_items2 order_items %>% select(order_id, product_id, price) %>% group_by(order_id) %>% filter(price == max(price)) %>% left_join(products %>% select(1,2)) %>% select(1,4) -> order_items3 first_orders %>% left_join(geolocation2, by = c('customer_zip_code_prefix' = 'geolocation_zip_code_prefix')) %>% left_join(order_items2) %>% left_join(order_items3) %>% select( payment_value, review_score, if_second_order, geolocation_lat, geolocation_lng, no_items, sum_freight # product_category_name ) %>% mutate(if_second_order = as.factor(if_second_order)) -> to_model to_model %>% head() # Prepare data for caret ---- to_model %>% mutate(no_items = ifelse(is.na(no_items), 1, no_items), if_second_order = ifelse(as.character(if_second_order) == '1', 'yes', 'no') ) %>% drop_na() -> to_model2 save(to_model2, file='08_to_model2.Rdata') # Train test split, create dataset with upsampling ----- set.seed(10) training_obs <- createDataPartition(to_model2$if_second_order, p = 0.7, list = FALSE) to_model_train <- to_model2[training_obs[,1],] to_model_test <- to_model2[-training_obs[,1],] up_train2 <- upSample(x = to_model_train %>% select(-if_second_order), y = as.factor(to_model_train$if_second_order)) %>% as_tibble() %>% mutate(if_second_order = as.character(Class)) %>% select(-Class) # Functions for bootstraping AUC on test set ----- bootstrap_auc <- function(model, test_set, no_resamples){ out_roc <- vector('numeric', no_resamples) len_test <- nrow(test_set) for (i in 1:no_resamples){ idxes <- sample(1:len_test, size = len_test, replace = T) temp_test <- test_set[idxes,] predictions_temp = predict(model, temp_test,type = 'prob') roc_temp <- pROC::roc(as.numeric(temp_test$if_second_order == "yes"), predictions_temp[, 1]) out_roc[i] <- roc_temp$auc i <- i+1 } out_roc } bootstrap_summary<- function(out_roc){ tibble(auc =out_roc) %>% summary %>% print mean_auc <- mean(out_roc) tibble(auc =out_roc, y=0) %>% ggplot(aes(x=auc)) + geom_density() + geom_jitter(aes(y=y), alpha=0.5) + geom_vline(xintercept = mean_auc, color = 'red') } calc_metrics <- function(model, to_model_test, run_confusion_matrix = F){ predictions1 <- predict(model, to_model_test,type = 'prob') roc_test1 <- pROC::roc(as.numeric(to_model_test$if_second_order == "yes"), predictions1[, 1]) plot(roc_test1) title('ROC curve on test set \n') print('Calc AUC on test set:') print(roc_test1) if(run_confusion_matrix){ confusionMatrix(data = as.factor(as.numeric(predictions1[, 2] > 0.5)), # probability of yes was more than reference = as.factor(as.numeric(to_model_test$if_second_order == "yes")), # definitions of the "success" label positive = '1') } # print('Quantiles of predicted responses:') # print('-----') # predictions1[,2] %>% # quantile(seq(0.8,1, 0.01)) } # Model 1 - GBM on standard hyperparameters with upsampling (AUC test 0.5921) ---- # Define train control fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = T, repeats = 1) # Train model set.seed(825) if (TRAIN_MODELS){ model1 <- train(if_second_order ~., up_train2, method = "gbm", # method="glm", metric="ROC", # family=binomial(), trControl = fitControl # tuneGrid = gbmGrid ) save(model1, file = 'models_cache/model1.Rdata') } else { load('models_cache/model1.Rdata') } model1 # Create metrics - roc and confusion matrix calc_metrics(model1, to_model_test) # Bootstrap for test set out_roc1 <- bootstrap_auc(model1, to_model_test, no_resamples = 100) bootstrap_summary(out_roc1) # Model 2 - GBM on standard hyperparameters without upsampling (Auc test 0.5899) ---- # Define train control fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = T, repeats = 1) # Train model set.seed(825) if (TRAIN_MODELS){ model2 <- train(if_second_order ~., to_model_train, method = "gbm", # method="glm", metric="ROC", # family=binomial(), trControl = fitControl # tuneGrid = gbmGrid ) save(model2, file = 'models_cache/model2.Rdata') } else { load('models_cache/model2.Rdata') } model2 # Create metrics - roc and confusion matrix calc_metrics(model2, to_model_test) # Bootstrap for test set out_roc2 <- bootstrap_auc(model2, to_model_test, no_resamples = 100) bootstrap_summary(out_roc2) # Model 3 - Logistic with upsampling (Auc test 0.5575) ---- # Define train control fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = T, repeats = 1) # Train model set.seed(825) if (TRAIN_MODELS){ model3 <- train(if_second_order ~., up_train2, method="glm", metric="ROC", family=binomial(), trControl = fitControl # tuneGrid = gbmGrid ) save(model3, file = 'models_cache/model3.Rdata') } else { load('models_cache/model3.Rdata') } model3 # Create metrics - roc and confusion matrix calc_metrics(model3, to_model_test) # Bootstrap for test set out_roc3 <- bootstrap_auc(model3, to_model_test, no_resamples = 100) bootstrap_summary(out_roc3) # Model 4 - Logistic without upsampling (Auc test 0.5563) ---- # Define train control fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = T, repeats = 1) # Train model set.seed(825) if (TRAIN_MODELS){ model4 <- train(if_second_order ~., to_model_train, method="glm", metric="ROC", family=binomial(), trControl = fitControl # tuneGrid = gbmGrid ) save(model4, file = 'models_cache/model4.Rdata') } else { load('models_cache/model4.Rdata') } model4 # Create metrics - roc and confusion matrix calc_metrics(model4, to_model_test) # Bootstrap for test set out_roc4 <- bootstrap_auc(model4, to_model_test, no_resamples = 100) bootstrap_summary(out_roc4) # Model 5 - XGB extensive hyperparameters search with upsampling (Auc test 0.6159) ---- # Define train control fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = T, repeats = 1) # Train model set.seed(825) if (TRAIN_MODELS){ # if (TRUE){ model5 <- train(if_second_order ~., up_train2, method="xgbTree", metric="ROC", trControl = fitControl, verbose = T # tuneGrid = gbmGrid ) save(model5, file = 'models_cache/model5.Rdata') } else { load('models_cache/model5.Rdata') } model5 # Create metrics - roc and confusion matrix calc_metrics(model5, to_model_test) # Bootstrap for test set out_roc5 <- bootstrap_auc(model5, to_model_test, no_resamples = 100) bootstrap_summary(out_roc5) # Data preparation with geographic info ---- load('data/preprocessed/spatial_all_by_zip.Rdata') spatial_all_by_zip first_orders %>% left_join(geolocation2, by = c('customer_zip_code_prefix' = 'geolocation_zip_code_prefix')) %>% left_join(spatial_all_by_zip, by = c('customer_zip_code_prefix' = 'geolocation_zip_code_prefix')) %>% left_join(order_items2) %>% left_join(order_items3) %>% mutate(if_second_order = as.numeric(if_second_order)) %>% select_if(is.numeric) %>% mutate(if_second_order = as.factor(if_second_order))%>% mutate(no_items = ifelse(is.na(no_items), 1, no_items), if_second_order = ifelse(as.character(if_second_order) == '1', 'yes', 'no') ) %>% # Replace NA with column mean mutate_all(~ifelse(is.na(.x), mean(.x, na.rm = TRUE), .x)) -> to_model_geo set.seed(10) training_obs_geo <- createDataPartition(to_model_geo$if_second_order, p = 0.7, list = FALSE) to_model_train_geo <- to_model_geo[training_obs_geo[,1],] to_model_test_geo <- to_model_geo[-training_obs_geo[,1],] up_train_geo <- upSample(x = to_model_train_geo %>% select(-if_second_order), y = as.factor(to_model_train_geo$if_second_order)) %>% as_tibble() %>% mutate(if_second_order = as.character(Class)) %>% select(-Class) # prcomp(up_train_geo %>% select(-if_second_order),scale. = T) -> pca_model # pca_model %>% summary() # Model geo 1- extensive XGB search with upsampling on geo data (Auc test 0.5546) ---- # Define train control fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = T, repeats = 1) # Train model set.seed(825) if (TRAIN_MODELS){ # if (TRUE){ model_geo1 <- train(if_second_order ~., up_train_geo, method="xgbTree", metric="ROC", trControl = fitControl, verbose = T # tuneGrid = gbmGrid ) save(model_geo1, file = 'models_cache/model_geo1.Rdata') } else { load('models_cache/model_geo1.Rdata') } model_geo1 # Create metrics - roc and confusion matrix calc_metrics(model_geo1, to_model_test_geo) # Bootstrap for test set out_roc_geo1 <- bootstrap_auc(model_geo1, to_model_test_geo, no_resamples = 100) bootstrap_summary(out_roc_geo1) # Data preparation - PCA on geo dataset ---- library(factoextra) to_model_train pca_model <- prcomp(to_model_train_geo %>% select(-if_second_order), scale = TRUE,center = TRUE) summary(pca_model) fviz_eig(pca_model) # Setting no of components to 8 - 90% of variability pca_model_8 <- prcomp(up_train_geo %>% select(-if_second_order), scale = TRUE,center = TRUE,rank. = 10) summary(pca_model_8) predict(pca_model_8, up_train_geo) %>% as_tibble() %>% mutate(if_second_order = up_train_geo$if_second_order) -> up_train_geo_pca predict(pca_model_8, to_model_test_geo) %>% as_tibble() %>% mutate(if_second_order = to_model_test_geo$if_second_order) -> to_model_test_geo_pca # Model geo 2 - XGB search with upsampling on geo data with PCA on 8 components (Auc test 0.5289) ---- # Define train control fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = T, repeats = 1) # Train model set.seed(825) if (TRAIN_MODELS){ # if (TRUE){ model_geo2 <- train(if_second_order ~., up_train_geo_pca, method="xgbTree", metric="ROC", trControl = fitControl, verbose = T # tuneGrid = gbmGrid ) save(model_geo2, file = 'models_cache/model_geo2.Rdata') #AAAAAA } else { load('models_cache/model_geo2.Rdata') } model_geo2 # Create metrics - roc and confusion matrix calc_metrics(model_geo2, to_model_test_geo_pca) # Bootstrap for test set out_roc_geo2 <- bootstrap_auc(model_geo2, to_model_test_geo_pca, no_resamples = 100) bootstrap_summary(out_roc_geo2) # Data preparation - dbscan on geo data ---- library(dbscan) to_model2 %>% select(geolocation_lat, geolocation_lng) %>% sample_n(10000) %>% ggplot(aes(geolocation_lng, geolocation_lat)) + geom_point(alpha = 0.5) ->p plotly::ggplotly(p) to_model2 %>% select(geolocation_lat, geolocation_lng) -> geo dbscan::kNNdistplot(geo, k =1000) abline(h = 5) # 2 is ok # dbscan_geo <- dbscan::dbscan(geo, 2, 100) dbscan_geo <- dbscan::dbscan(geo, 0.2, 100) # 23.3S # 23.75S - 50 km geo %>% mutate(cluster = dbscan_geo$cluster) %>% mutate(agglomeration = ifelse(cluster==0, 0,1)) %>% ggplot(aes(geolocation_lng, geolocation_lat, color = agglomeration)) + geom_point(alpha=0.5) library("factoextra") fviz_cluster(dbscan_geo, geo, stand = FALSE, frame = FALSE, geom = "point") to_model2 %>% mutate(cluster = dbscan_geo$cluster) %>% mutate(if_agglomeration = ifelse(cluster==0, 0,1)) %>% select(-cluster) -> to_model3 # Train test split, create dataset with upsampling set.seed(10) training_obs <- createDataPartition(to_model3$if_second_order, p = 0.7, list = FALSE) to_model_train <- to_model3[training_obs[,1],] to_model_test <- to_model3[-training_obs[,1],] up_train3 <- upSample(x = to_model_train %>% select(-if_second_order), y = as.factor(to_model_train$if_second_order)) %>% as_tibble() %>% mutate(if_second_order = as.character(Class)) %>% select(-Class) # prepare data with topic info ----- order_reviews_topics %>% select(order_id, review_score, starts_with('topic_')) first_orders %>% left_join(geolocation2, by = c('customer_zip_code_prefix' = 'geolocation_zip_code_prefix')) %>% left_join(order_items2) %>% left_join(order_items3) %>% left_join(order_reviews_topics %>% select(order_id, review_score, starts_with('topic_'))) %>% select( payment_value, review_score, if_second_order, geolocation_lat, geolocation_lng, no_items, sum_freight, starts_with('topic_') # product_category_name ) %>% mutate(if_second_order = as.factor(if_second_order)) -> to_model_topic to_model_topic %>% head() # Prepare data for caret ---- to_model_topic %>% mutate(no_items = ifelse(is.na(no_items), 1, no_items), if_second_order = ifelse(as.character(if_second_order) == '1', 'yes', 'no') ) %>% drop_na() -> to_model_topic2 save(to_model_topic2, file='08_to_model_topic2.Rdata') # load('08_to_model_topic2.Rdata') # Train test split, create dataset with upsampling ----- set.seed(10) training_obs <- createDataPartition(to_model_topic2$if_second_order, p = 0.7, list = FALSE) to_model_train <- to_model_topic2[training_obs[,1],] to_model_test <- to_model_topic2[-training_obs[,1],] up_train_topic <- upSample(x = to_model_train %>% select(-if_second_order), y = as.factor(to_model_train$if_second_order)) %>% as_tibble() %>% mutate(if_second_order = as.character(Class)) %>% select(-Class) # Model reviews topics 1 - XGB with upsampling on data from topics and the rest (Auc test 0.6431) ---- # Define train control fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = T,returnResamp = 'all', repeats = 1) # Train model set.seed(825) if (TRAIN_MODELS){ # if (TRUE){ model_topic1 <- train(if_second_order ~., up_train_topic, method="xgbTree", metric="ROC", trControl = fitControl, verbose = T # tuneGrid = gbmGrid ) save(model_topic1, file = 'models_cache/model_topic1.Rdata') } else { load('models_cache/model_topic1.Rdata') } # Create metrics - roc and confusion matrix calc_metrics(model_topic1, to_model_test) calc_metrics(model_topic1, to_model_train) # Bootstrap for test set out_roc_topic1 <- bootstrap_auc(model_topic1, to_model_test, no_resamples = 100) bootstrap_summary(out_roc_topic1) # Variable importance plot(varImp(model_topic1)) # cluster not valuable at all to_model3 %>% group_by(cluster) %>% summarise(how_many_second = sum(ifelse(if_second_order=='yes', 1, 0)), a = n() ) %>% mutate(b = how_many_second/a) # Join all types of information ----- order_items %>% group_by(order_id) %>% summarise(no_items = max(order_item_id), sum_freight = sum(freight_value, na.rm=T) ) -> order_items2 order_items %>% select(order_id, product_id, price) %>% group_by(order_id) %>% filter(price == max(price)) %>% left_join(products %>% select(1,2)) %>% select(1,4) -> order_items3 first_orders %>% left_join(geolocation2, by = c('customer_zip_code_prefix' = 'geolocation_zip_code_prefix')) %>% left_join(spatial_all_by_zip, by = c('customer_zip_code_prefix' = 'geolocation_zip_code_prefix')) %>% left_join(order_items2) %>% left_join(order_items3) %>% left_join(order_reviews_topics %>% select(order_id, starts_with('topic_'))%>%group_by(order_id)%>%head(1)) %>% mutate(if_second_order = as.numeric(if_second_order)) %>% select_if(is.numeric) %>% mutate(no_items = ifelse(is.na(no_items), 1, no_items), if_second_order = ifelse(as.character(if_second_order) == '1', 'yes', 'no') ) %>% # Replace NA with column mean, no. NAs around 300 per 100 000 obs mutate_all(~ifelse(is.na(.x), mean(.x, na.rm = TRUE), .x)) -> to_model_all_0 dbscan_geo <- dbscan::dbscan(to_model_all_0%>%select(geolocation_lat,geolocation_lng), 0.2, 100) to_model_all_0 %>% mutate(cluster = dbscan_geo$cluster) %>% mutate(agglomeration = ifelse(cluster==0, 0,1)) %>% select(-cluster) -> to_model_all_1 to_model_all_1 %>% head() to_model_all_1 %>% summary() # Prepare data for caret ---- to_model_all_1 %>% drop_na() -> to_model_all save(to_model_all, file='08_to_model_all.Rdata') # Train test split, create dataset with upsampling ----- set.seed(10) training_obs <- createDataPartition(to_model_all$if_second_order, p = 0.7, list = FALSE) to_model_train <- to_model_all[training_obs[,1],] to_model_test <- to_model_all[-training_obs[,1],] up_train <- upSample(x = to_model_train %>% select(-if_second_order), y = as.factor(to_model_train$if_second_order)) %>% as_tibble() %>% mutate(if_second_order = as.character(Class)) %>% select(-Class) up_train$if_second_order %>% table # Model all1- extensive XGB search with upsampling on full dataset (Auc test 0.62, train 0.9999) ---- # Define train control fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = T, verboseIter = TRUE, repeats = 1) tune_grid <- expand.grid(nrounds=c(100,200,400), max_depth = c(3:7), eta = c(0.01, 0.1,0.2), gamma = c(0.01), colsample_bytree = c(0.75), subsample = c(0.50), min_child_weight = c(0,1)) # tune_grid <- expand.grid(nrounds = 200, # max_depth = 5, # eta = 0.05, # gamma = 0.01, # colsample_bytree = 0.75, # min_child_weight = 0, # subsample = 0.5) # Train model set.seed(825) if (TRAIN_MODELS){ # if (TRUE){ model_all1 <- train(if_second_order ~., up_train, method="xgbTree", metric="ROC", trControl = fitControl, verbose = T, tuneGrid = tune_grid, ) save(model_all1, file = 'models_cache/model_all1.Rdata') } else { load('models_cache/model_all1.Rdata') } model_all1 plot(varImp(model_all1)) plot(model_all1$finalModel) # 9:40 # Create metrics - roc and confusion matrix calc_metrics(model_all1, to_model_test) calc_metrics(model_all1, to_model_train) # way too much overfitting - auc train 0.9999 (!!!), while test 0.62 # Bootstrap for test set out_roc_all1 <- bootstrap_auc(model_all1, to_model_test, no_resamples = 100) bootstrap_summary(out_roc_all1) # Model all2- extensive XGB search with upsampling on full dataset, (Auc test ) ---- # Define train control fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = T, verboseIter = TRUE, repeats = 1) tune_grid <- expand.grid(nrounds=c(400), max_depth = c(3,5), eta = c(0.001, 0.01, 0.1), gamma = c(0.01), colsample_bytree = c(0.1, 0.2, 0.5), subsample = c(0.50), min_child_weight = c(0,1)) # tune_grid <- expand.grid(nrounds=c(400), # max_depth = c(7), # eta = c(0.2), # gamma = c(0.01), # colsample_bytree = c(0.75), # subsample = c(0.50), # min_child_weight = c(0,1)) # Train model set.seed(825) if (TRAIN_MODELS){ # if (TRUE){ model_all2 <- train(if_second_order ~., up_train, method="xgbTree", metric="ROC", trControl = fitControl, verbose = T, tuneGrid = tune_grid, ) save(model_all2, file = 'models_cache/model_all2.Rdata') } else { load('models_cache/model_all2.Rdata') } model_all2 plot(model_all2$finalModel) # Create metrics - roc and confusion matrix calc_metrics(model_all2, to_model_test) calc_metrics(model_all2, to_model_train) # Bootstrap for test set out_roc_all2 <- bootstrap_auc(model_all2, to_model_test, no_resamples = 100) bootstrap_summary(out_roc_all2) # Model all3- extensive XGB search with upsampling on full dataset, fighting overfitting (Auc test ) ---- # Define train control fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3,returnResamp = 'all', classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = F, verboseIter = TRUE, repeats = 1) tune_grid <- expand.grid(nrounds=c(1:800), max_depth = c(5), eta = c(0.01), gamma = c(0.01), colsample_bytree = c(0.01), subsample = c(0.10), min_child_weight = c(0)) # tune_grid <- expand.grid(nrounds=c(400), # max_depth = c(7), # eta = c(0.2), # gamma = c(0.01), # colsample_bytree = c(0.75), # subsample = c(0.50), # min_child_weight = c(0,1)) # Train model set.seed(825) if (TRAIN_MODELS){ # if (TRUE){ model_all3 <- train(if_second_order ~., up_train, method="xgbTree", metric="ROC", trControl = fitControl, verbose = 1, tuneGrid = tune_grid, ) save(model_all3, file = 'models_cache/model_all3.Rdata') } else { load('models_cache/model_all3.Rdata') } model_all3$results %>% select(nrounds, ROC) %>% plot plot(model_all3$finalModel) # Create metrics - roc and confusion matrix calc_metrics(model_all3, to_model_test) calc_metrics(model_all3, to_model_train) # Bootstrap for test set out_roc_all2 <- bootstrap_auc(model_all2, to_model_test, no_resamples = 100) bootstrap_summary(out_roc_all2) # Save models for markdown viz ---- save( to_model_test, model1, out_roc1, model2, out_roc2, model3, out_roc3, model4, out_roc4, model5, out_roc5, to_model_test_geo, to_model_test_geo_pca, model_geo1, out_roc_geo1, model_geo2, out_roc_geo2, model_topic1, out_roc_topic1, file = 'models_cache/08_all_models.Rdata' ) break # Logistic regression with caret and without upsampling---- fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary,allowParallel = T, ## repeated ten times repeats = 3) set.seed(825) model_glm <- train(if_second_order ~., to_model_train, # method = "gbm", method="glm", metric="ROC", family=binomial(), trControl = fitControl # tuneGrid = gbmGrid ## This last option is actually one ## for gbm() that passes through ) model_glm predictions_glm = predict(model_glm, to_model_test,type = 'prob') ROC_glm <- pROC::roc(as.numeric(to_model_test$if_second_order == "yes"), predictions_glm[, 1]) plot(ROC_glm) ROC_glm # 0.54 # Logistic regression with caret and with upsampling---- fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary,allowParallel = T, ## repeated ten times repeats = 3) set.seed(825) model_glm2 <- train(if_second_order ~., up_train2, # method = "gbm", method="glm", metric="ROC", family=binomial(), trControl = fitControl # tuneGrid = gbmGrid ## This last option is actually one ## for gbm() that passes through ) model_glm2 predictions_glm2= predict(model_glm2, to_model_test,type = 'prob') ROC_glm2 <- pROC::roc(as.numeric(to_model_test$if_second_order == "yes"), predictions_glm2[, 1]) plot(ROC_glm2) ROC_glm2 # 0.54 # XGB ---- fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary,allowParallel = T, verboseIter = TRUE, repeats = 2) # gbmGrid_long <- expand.grid(interaction.depth = c(1, 5, 10, 20), # n.trees = (1:6)*50, # shrinkage = c(0.01, 0.1, 0.5, 0.9), # n.minobsinnode = c(1,5,10,20, 50, 100)) set.seed(825) gbmFit4 <- train(if_second_order ~., up_train2, # method = "gbm", method="xgbTree", metric="ROC", # family=binomial(), trControl = fitControl # tuneGrid = gbmGrid_long ## This last option is actually one ## for gbm() that passes through ) # stopCluster(cl) gbmFit4 predictions4 = predict(gbmFit4, to_model_test,type = 'prob') roc4 <- pROC::roc(as.numeric(to_model_test$if_second_order == "yes"), predictions4[, 1]) plot(roc4) roc4 # 0.61 - not good, not bad varImp(gbmFit4) confusionMatrix(data = as.factor(as.numeric(predictions3[, 2] > 0.5)), # probability of yes was more than reference = as.factor(as.numeric(to_model_test$if_second_order == "yes")), # definitions of the "success" label positive = '1') predictions3[,2] %>% quantile(seq(0.8,1, 0.01)) # save.image() # Geo Model ---- set.seed(825) fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, verboseIter = TRUE, repeats = 1) gbmGrid_long <- expand.grid(interaction.depth = c(3), n.trees = (3)*50, shrinkage = c(0.1), n.minobsinnode = c(20)) xgb_geo1 <- train(if_second_order ~., to_model_train, method = "gbm", # method="xgbTree", metric="ROC", # family=binomial(), trControl = fitControl, tuneGrid = gbmGrid_long ) # stopCluster(cl) xgb_geo1 predictions4 = predict(xgb_geo1, to_model_test,type = 'prob') roc4 <- pROC::roc(as.numeric(to_model_test$if_second_order == "yes"), predictions4[, 1]) plot(roc4) roc4 # run PCA and scale prcomp(to_model_geo %>% select(-if_second_order),scale. = T) -> pca_model pca_model %>% summary() # first 8 components prcomp(to_model_geo %>% select(-if_second_order),scale. = T,rank. = 8) -> pca_model2 pca_model2$x %>% cbind(to_model_geo%>% select(if_second_order)) %>% as_tibble() -> to_model_pca to_model_pca$if_second_order %>% table() set.seed(10) training_obs <- createDataPartition(to_model_pca$if_second_order, p = 0.7, list = FALSE) to_model_train <- to_model_pca[training_obs[,1],] to_model_test <- to_model_pca[-training_obs[,1],] up_train4 <- upSample(x = to_model_train %>% select(-if_second_order), y = as.factor(to_model_train$if_second_order)) %>% as_tibble() %>% mutate(if_second_order = as.character(Class)) %>% select(-Class) # Geo Model ---- set.seed(825) fitControl <- trainControl(## 10-fold CV method = "repeatedcv", number = 3, classProbs = TRUE, summaryFunction = twoClassSummary, verboseIter = TRUE, repeats = 1) gbmGrid_long <- expand.grid(interaction.depth = c(3), n.trees = (3)*50, shrinkage = c(0.1), n.minobsinnode = c(20)) xgb_geo2 <- train(if_second_order ~., up_train4, method = "gbm", # method="xgbTree", metric="ROC", # family=binomial(), trControl = fitControl, tuneGrid = gbmGrid_long ) xgb_geo2 predictions5 = predict(xgb_geo2, to_model_test,type = 'prob') roc5 <- pROC::roc(as.numeric(to_model_test$if_second_order == "yes"), predictions5[, 1]) plot(roc5) roc5
c295799e43f7f48252c0e7c3a16bab1e36e7f4c5
90d74d03513e588f1f0161846dfd9657c78feae8
/R/action.R
b804f977f7ce4fa04584dbb8fcaf7af8b6b0c4a0
[ "MIT" ]
permissive
ropensci/unifir
859fe03f09e7f2a96cc0785d02f1153c465f24c1
e5c1df562b43751775e04777e204777646390c42
refs/heads/main
2023-05-23T12:33:33.104020
2022-12-04T15:15:46
2022-12-04T15:15:46
373,628,173
22
0
NOASSERTION
2022-12-02T17:12:52
2021-06-03T20:06:53
R
UTF-8
R
false
false
4,468
r
action.R
#' Build and execute a `unifir_script` #' #' @param script The `unifir_script` object (as generated by [make_script]) #' to build and execute. #' @param write Boolean: Write the generated script to a file? #' @param exec Boolean: Execute the script inside of the Unity project? Note #' that if `write = FALSE`, `exec` cannot be `TRUE`. #' @param quit Boolean: Quit Unity after execution? #' #' @examples #' # First, create a script object. #' # CRAN doesn't have Unity installed, so pass #' # a waiver object to skip the Unity-lookup stage: #' script <- make_script("example_script", #' unity = waiver() #' ) #' #' # Then add any number of props to it: #' script <- add_light(script) #' #' # Then call `action` to execute the script! #' \donttest{ #' if (interactive()) { #' action(script) #' } #' } #' #' @return If `exec = FALSE`, the original `unifir_script` object passed to #' `script`. If `exec = TRUE`, the same `unifir_script` object with its #' props replaced by the C# they generate. #' #' @export action <- function(script, write = TRUE, exec = TRUE, quit = TRUE) { debug <- check_debug() if (debug) write <- exec <- FALSE if (!write && exec) stop("Cannot execute script without writing it!") stopifnot(create_scene_folders(script, debug)) # Prep script by filling in NULLs with random values: script <- set_script_defaults(script, debug) # Execute prop build methods sequentially: for (i in seq_along(script$props)) { script$props[[i]] <- script$props[[i]]$build( script, script$props[[i]], debug ) } # Combine props into single vector, format for C#: script$props <- paste0(script$props, sep = "\n") beats <- paste0(script$beats[script$beats$exec, ]$name, "();", collapse = "\n " ) script$using <- unique(script$using) script$using <- paste0("using ", script$using, ";", collapse = "\n") if (write) { writeLines( c( paste(script$using, "\n"), paste("public class", script$script_name, "{"), script$props, " static void MainFunc() {", paste0(" ", beats), " }", "}" ), file.path( script$project, "Assets", "Editor", paste0(script$script_name, ".cs") ) ) } # nocov start # Skipping test coverage here because I can't install Unity on GH Actions # So unless I set up my own build box for CI this is a manual test job if (exec) { output <- system( paste0( shQuote(find_unity()), " -batchmode", if (quit) " -quit", " -projectPath ", script$project, " -executeMethod ", script$script_name, ".MainFunc" ) ) if (output != "0") stop(output) } # nocov end return(invisible(script)) } #' Fill in plot holes in a script #' #' @param script The unifir_script to fill elements of #' @param debug Boolean: run in debug mode? set_script_defaults <- function(script, debug) { scene_dir <- file.path(script$project, "Assets", "Scenes") if (is.null(script$scene_name)) { script$scene_name <- proceduralnames::make_english_names(1, 4, sep = "", case = "title" ) } if (is.null(script$script_name)) { script$script_name <- proceduralnames::make_english_names(1, 4, sep = "", case = "title" ) } if (file.exists(file.path(scene_dir, script$scene_name))) { script$scene_exists <- TRUE } script$clone() } create_scene_folders <- function(script, debug) { if ( !debug && ( # If initialize_project is NULL and the directory is missing: (is.null(script$initialize_project) && !dir.exists(script$project)) || # Or if initialize_project is TRUE: (!is.null(script$initialize_project) && script$initialize_project))) { # Create a unity project at that directory: create_unity_project(script$project, unity = script$unity) } if (!debug) create_if_not(file.path(script$project, "Assets", "Scenes"), TRUE) if (!debug) create_if_not(file.path(script$project, "Assets", "Editor")) return(invisible(TRUE)) }
8760352d4f27aeba72cc4dbe3ff4829eb12301f2
022b2058a1b86c9673de480c026b3772f8f38292
/data-raw/01-import-references.R
805f40ac5429387561b2fdd5502cb09816bd8770
[]
no_license
kamapu/biblio
316f98678270ac1db8419d516bca3c7bb56c7aea
a0a222c57ea322f114afc74903c0096b3acfabcc
refs/heads/master
2023-05-11T17:44:15.062504
2023-04-29T08:20:57
2023-04-29T08:20:57
237,954,340
0
2
null
2023-02-20T23:07:29
2020-02-03T11:56:46
TeX
UTF-8
R
false
false
356
r
01-import-references.R
# TODO: Importing references as example data # # Author: Miguel Alvarez ################################################################################ library(biblio) library(usethis) # Adding example data synopsis <- read_bib("inst/LuebertPliscoff.bib") use_data(synopsis, overwrite = TRUE) # TODO: new_lib() template from data-raw/fields_list.ods
3f71ba9d3f00d28431757fb851f86b15285dcb3b
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
/FDA_Pesticide_Glossary/formaldehyde_methane.R
8bf2d76a77100641c77d91037ec6fdab27fea6a0
[ "MIT" ]
permissive
andrewdefries/andrewdefries.github.io
026aad7bd35d29d60d9746039dd7a516ad6c215f
d84f2c21f06c40b7ec49512a4fb13b4246f92209
refs/heads/master
2016-09-06T01:44:48.290950
2015-05-01T17:19:42
2015-05-01T17:19:42
17,783,203
0
1
null
null
null
null
UTF-8
R
false
false
276
r
formaldehyde_methane.R
library("knitr") library("rgl") #knit("formaldehyde_methane.Rmd") #markdownToHTML('formaldehyde_methane.md', 'formaldehyde_methane.html', options=c("use_xhml")) #system("pandoc -s formaldehyde_methane.html -o formaldehyde_methane.pdf") knit2html('formaldehyde_methane.Rmd')
8dd0884df0c1b030750c12390c602d6f7fd5313d
6464efbccd76256c3fb97fa4e50efb5d480b7c8c
/cran/paws.internet.of.things/man/iotanalytics_list_datastores.Rd
fe8402a21d16b2a5702f28bfe11bf1bf522201cf
[ "Apache-2.0" ]
permissive
johnnytommy/paws
019b410ad8d4218199eb7349eb1844864bd45119
a371a5f2207b534cf60735e693c809bd33ce3ccf
refs/heads/master
2020-09-14T23:09:23.848860
2020-04-06T21:49:17
2020-04-06T21:49:17
223,286,996
1
0
NOASSERTION
2019-11-22T00:29:10
2019-11-21T23:56:19
null
UTF-8
R
false
true
633
rd
iotanalytics_list_datastores.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/iotanalytics_operations.R \name{iotanalytics_list_datastores} \alias{iotanalytics_list_datastores} \title{Retrieves a list of data stores} \usage{ iotanalytics_list_datastores(nextToken, maxResults) } \arguments{ \item{nextToken}{The token for the next set of results.} \item{maxResults}{The maximum number of results to return in this request. The default value is 100.} } \description{ Retrieves a list of data stores. } \section{Request syntax}{ \preformatted{svc$list_datastores( nextToken = "string", maxResults = 123 ) } } \keyword{internal}
de340bc50641473c16ee387cbdf23288c9788f07
573a8b848c3ee979c4fb0852105519df58bcc827
/man/jgc.Rd
15eb22585ae89a58b14668c0e20c6e8c1314f662
[]
no_license
s-u/rJava
0228c0df9ab40a04568f4b93bdcf6d55299f9e9a
6d362c1e804d85f699e61742e103df0b8634c489
refs/heads/master
2023-04-18T19:46:31.708379
2022-12-30T05:07:29
2022-12-30T05:07:29
15,511,357
214
84
null
2022-04-27T22:47:26
2013-12-29T16:37:44
Java
UTF-8
R
false
false
634
rd
jgc.Rd
\name{.jgc} \alias{.jgc} \title{ Invoke Java Garbage Collection } \description{ \code{.jgc} invokes the R and Java garbage collectors. } \usage{ .jgc(R.gc = TRUE, ...) } \arguments{ \item{R.gc}{logical, if \code{TRUE} then \code{gc(\dots)} is called first, if \code{FALSE} only Java garbage collector is called} \item{\dots}{any additional parameters passed to \code{gc()}} } \details{ \code{.jgc} invokes the R garbage collector (unless \code{R.gc=FALSE}) which removes any unused Java references and then invokes the Java garbage collector to reclaim Java heap space. } \author{ Simon Urbanek } \keyword{interface}
7d97cf76194a680d369f9e7227904b3840eae9ee
f0861f49786faae3984d1d2c908b720f2c016e9e
/load.R
119e278f53480cfbd9c02215ee350c1da668bb31
[]
no_license
Blurry/ExData_Plotting1
483659660d0cb274057d0463c41ea04a339850f4
01a5e1de28bf2347dee1b2746590bf3f61a5a9fe
refs/heads/master
2021-01-19T07:37:22.468369
2016-01-10T23:18:32
2016-01-10T23:18:32
49,386,381
0
0
null
2016-01-10T21:31:08
2016-01-10T21:31:07
null
UTF-8
R
false
false
583
r
load.R
# Sys.setlocale("LC_TIME","C") #Get weekdays in English in RStudio require(dplyr) || install.packages("dplyr") library(dplyr) # Load data set data<- read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?") # Load only for 2007-02-01 and 2007-02-01 data <- filter(power, Date == "1/2/2007" | Date == "2/2/2007") # Convert Date and Time data <- transform(power, dateAndTime = strptime(paste(as.Date(Date,"%d/%m/%Y"), Time),"%Y-%m-%d %H:%M:%S"))
d64a8972ce1801a4beb8e662999601057f3aa7d2
b988414b463f6601d27625e653fcb1680681b01e
/R/04_RW_cleanDHS_kids2.R
5df7371692068317e07126585c7e85683c572c0c
[ "MIT" ]
permissive
ccjolley/RwandaLAM
b4f60c2ac577633def0a6a8d7014d5d47f0f2062
6c8dca82b02b01361e142b6c7c948a05e3fc5033
refs/heads/master
2020-12-24T23:56:03.125874
2016-08-29T17:36:48
2016-08-29T17:36:48
60,806,784
0
0
null
2016-07-29T16:55:23
2016-06-09T21:00:19
R
UTF-8
R
false
false
13,692
r
04_RW_cleanDHS_kids2.R
# Cleanup kids dataset for Rwanda DHS data -------------------------- # Nada Petrovic, USAID PPL & GeoCenter, npetrovic@usaid.gov # 16 June 2016 # (c) 2016 via MIT License ## PULLING VARIABLES WE WANT ## Reads in excel spreadsheet that states which variables to keep ## from kids data set kids_labels_tokeep<-read.csv('Excel/kids_labels_tokeep.csv') ## Relabels "NA" values (ie variables that have not been decided on yet) as 0 ## so that they are not selected. From the Excel spreadsheet pulls the list ## of variables to keep and what they should be renamed. kids_labels_tokeep$Keep[is.na(kids_labels_tokeep$Keep)] <- 0 data_subset_vars <- as.character(kids_labels_tokeep$var[kids_labels_tokeep$Keep==1]) data_rename_vars <- as.character(kids_labels_tokeep$renamedVar[kids_labels_tokeep$Keep==1]) ## Creates new clean data frame that is a subset of the overall data frame, ## and renames the variables. kids_clean <- kids_all[data_subset_vars] names(kids_clean) <- data_rename_vars ## Creates functions that will be useful for pulling attributes/variable codes later source('R/VarCodeDescrip_Functions.R') ## Creates smaller data frame of labels to make it easier to query relevant info kids_clean_labels<-make_cleanLabelMat(kids_labels,data_subset_vars,data_rename_vars) ## Creates household id variable for merging with hh data set, note ## this is not the same as hhid due to weirdness in spacing of DHS ## paste job. It can only be merged with a homemade household id ## like this one. kids_clean$cluster_hh_num <- paste(kids_clean$cluster_num, kids_clean$hh_num) ##RECODING AND CLEANING VARIABLES ## Age in months ## ## Calculate from interview date & dob ## Note: no NA's in either interview date or dob, and values look normal kids_clean$age_calc_months <- (kids_clean$interview_date_cmc-kids_clean$dob_cmc) ## table(kids_clean$age_calc_months, exclude=NULL) #output looks good, no NAs, min=0 max=59 ## qplot(kids_clean$age_calc_months/12) #looks fairly flat, bump around 6 mo, fewer 4-5 ## Wealth Index ## #table(kids_clean$wealth_index,exclude=NULL) #output looks good, no NAs, values are: 1:5 #qplot(kids_clean$wealth_index, geom="bar") #1 has the most, somewhat flat otherwise ## Sex, 0=male, 1=female #table(kids_clean$sex,exclude=NULL)/nrow(kids_clean) #looks good, no NAs, M:50.6% F:49.4% kids_clean$sex<-kids_clean$sex-1 #recodes so that male is 0 and female is 1 ## Stunting ## ## Height for Age percentile #Replace all 9998 values with NA, total NAs:4332, total data=3524 kids_clean$height_age_percentile <-na_if(kids_clean$height_age_percentile,9998) #The percentile needs to be divided by 100 kids_clean$height_age_percentile <-kids_clean$height_age_percentile/100 #qplot(kids_clean$height_age_percentile) # looks like pw, huge spike near 0 ## Height for Age zscore, measured in std devs above/below mean ## Replace all 9998 values with NA, total NAs:4332, total data=3524 kids_clean$height_age_zscore <-na_if(kids_clean$height_age_zscore,9998) ## The zscore needs to be divided by 100 kids_clean$height_age_zscore <-kids_clean$height_age_zscore/100 #qplot(kids_clean$height_age_zscore) # looks like gaussian, centered between -1 & -2 ## Wasting ## ## Weight for Age percentile #Replace all 9998 values with NA, total NAs:4332, total data=3524 kids_clean$weight_age_percentile <-na_if(kids_clean$weight_age_percentile,9998) #The percentile needs to be divided by 100 kids_clean$weight_age_percentile <-kids_clean$weight_age_percentile/100 #qplot(kids_clean$weight_age_percentile) # big spike near 0 ## Weight for Age zscore, measured in std devs above/below mean ## Replace all 9998 values with NA, total NAs:4332, total data=3524 kids_clean$weight_age_zscore <-na_if(kids_clean$weight_age_zscore,9998) ## The zscore needs to be divided by 100 kids_clean$weight_age_zscore <-kids_clean$weight_age_zscore/100 #qplot(kids_clean$weight_age_zscore) # looks like gaussian, centered between 0 & -1 ## Body Mass Index (weight for height) ## ## Weight for height percentile #Replace all 9998 values with NA, total NAs:4332, total data=3524 kids_clean$weight_height_percentile <-na_if(kids_clean$weight_height_percentile,9998) #The percentile needs to be divided by 100 kids_clean$weight_height_percentile <-kids_clean$weight_height_percentile/100 #qplot(kids_clean$weight_height_percentile) # plot looks odd, is actually linearly #increasing, possibly indication that children are more stunted than wasted? ## Weight for height zscore, measured in std devs above/below mean ## Replace all 9998 values with NA, total NAs:4332, total data=3524 kids_clean$weight_height_zscore <-na_if(kids_clean$weight_height_zscore,9998) ## The zscore needs to be divided by 100 kids_clean$weight_height_zscore <-kids_clean$weight_height_zscore/100 #qplot(kids_clean$weight_height_zscore) # looks like gaussian, centered around 0 ## Mother's Stunting ## ## Height for Age percentile #Replace all 9998 values with NA, total NAs:3981, total data=3875 kids_clean$mother_height_age_percentile <-na_if(kids_clean$mother_height_age_percentile,9998) #The percentile needs to be divided by 100 kids_clean$mother_height_age_percentile <-kids_clean$mother_height_age_percentile/100 #qplot(kids_clean$mother_height_age_percentile) ## Height for Age zscore, measured in std devs above/below mean ## Replace all 9998 values with NA, total NAs:4332, total data=3875 kids_clean$mother_height_age_zscore <-na_if(kids_clean$mother_height_age_zscore,9998) ## The zscore needs to be divided by 100 kids_clean$mother_height_age_zscore <-kids_clean$mother_height_age_zscore/100 #qplot(kids_clean$mother_height_age_zscore) ## Mother's Education ## # Highest educational level with 0=none, 1=primary, 2=secondary, 3=higher # table(kids_clean$mother_ed_level,exclude=NULL) #No NAs # qplot(kids_clean$mother_ed_level,geom="bar") # Mostly primary # Highest year of education 0-8 # table(kids_clean$mother_ed_year,exclude=NULL) #NAs=1147 -not sure why this is so many more # than levels # qplot(kids_clean$mother_ed_year,geom="bar") #mostly 4-6 years ## Child's diet # Note: Exact wording of the question is: "Now I would like to ask you about liquids or foods #that (NAME FROM 649) had yesterday during the day or at night. I am interested in whether your child had the item I mention even if it was combined with other foods. Did (NAME FROM 649) drink or eat:_____ [Where 649 refers to question about "Youngest child living with her born between 2013 and 2015] ## Checking if "gave child" variables vary by child within the household, by seeing if the sum total of all the food variables + caseid has more uniqueness than caseid alone #diet_food_tot <- rowSums(select(kids_clean, contains("diet")), na.rm=TRUE) #length(unique(kids_clean$caseid)) ## Output: [1] 5955 #length(unique(paste(kids_clean$caseid,diet_food_tot))) ## Output: [1] 5955 ## It does not seem to vary per child because both outputs are the same # make submatrix of diet div calculations kids_diet <- select(kids_clean,contains("diet"), -diet_other_food) <<<<<<< HEAD # Answers are 0=no, 1=yes, 8=don't know. Recode 8 to NA. kids_diet<-na_if(kids_diet,8) ======= ## The zscore needs to be divided by 100 kids_clean$height_age_zscore <- (kids_clean$height_age_zscore / 100) # Check z-score versus age library(ggthemes) library(viridis) # No noticable difference between boys and girls stunting, does peak around 20 months kids_clean %>% mutate(stunted = ifelse(height_age_zscore <= -2, 1, 0)) %>% ggplot(aes(x = age_calc_months, y = stunted, colour = factor(sex))) + #geom_jitter(width = 0.25, height = 0.25) + stat_smooth(method = "loess", se = TRUE, span = 0.75, size = 1.15, alpha = 0.1) + theme_fivethirtyeight() + ggtitle("stunting appears to peak near 20 months") # Steady downward trend for stunting and wealth, not surprising kids_clean %>% mutate(stunted = ifelse(height_age_zscore <= -2, 1, 0)) %>% ggplot(aes(x = wealth_index, y = stunted, colour = factor(sex))) + #geom_jitter(width = 0.25, height = 0.25) + stat_smooth(method = "loess", se = TRUE, span = 0.75, size = 1.15, alpha = 0.1) + theme_fivethirtyeight() + ggtitle("stunting declines steadily with wealth (asset accumulation)") # Finally, break it down by wealth and age category # TODO: figure out if 1 is boy or girl! kids_clean %>% mutate(stunted = ifelse(height_age_zscore <= -2, 1, 0), agegroup = cut(age_calc_months, seq(0, 60, by = 6))) %>% group_by(agegroup, wealth_index, sex) %>% summarise(stunting = mean(stunted, na.rm = TRUE)) %>% filter(stunting != 0) %>% ggplot(aes(x = agegroup, y = wealth_index, fill = stunting)) + geom_tile(colour = 'white',size = 0.25, stat = "identity") + scale_fill_viridis(option="D") + geom_text(aes(y = wealth_index, x = agegroup, label = sprintf("%1.0f%%", round(100*stunting, 2)), size = 1)) + theme_fivethirtyeight() + facet_wrap(~sex, nrow = 2) ## Note Max is 59 months, ie only children <5 ## Checking if "gave child" variables vary by child, by seeing if the sum total ## of all the food variables + caseid has more uniqueness than caseid alone child_food_tot <- rowSums(select(kids_clean, contains("child")), na.rm=TRUE) length(unique(kids_clean$caseid)) ## Output: [1] 5955 length(unique(paste(kids_clean$caseid,child_food_tot))) ## Output: [1] 5955 ## It does not seem to vary per child # Wording of question: "Now I would like to ask you about liquids or foods #that (NAME FROM 649) had yesterday during the day or at night. #I am interested in whether your child had the #item I mention even if it was #combined with other foods. #Did (NAME FROM 649) drink or eat: # Where 649 refers to question about "Youngest child living with her born #between 2013 and #2015" kids_diet <- select(kids_clean,contains("child"), -child_other_food) >>>>>>> Dev ## Checking if there are rows with NAs in some but not all entries table(rowSums(is.na(kids_diet))) #Output: # 0 1 3 13 #4534 48 1 3273 ##Indeces of the rows that are all NAs #rows_allNAs <- rowSums(is.na(kids_diet)) == 13 #summary(kids_diet) ## Calculate WDDS ## The categories are: 1. Starchy staples (WDDS_starch) ## 2. Dark green leafy vegetables (WDDS_veg_green) ## 3. Other Vitamin A rich fruit and veg (WDDS_vitA) ## 4. Other fruit and veg (WDDS_veg_other) ## 5. Organ meat (WDDS_organ) ## 6. Meat and fish (WDDS_meat_fish) ## 7. Eggs (WDDS_eggs) ## 8. Legumes, nuts, and seeds (WDDS_legumes) ## 9. Milk and milk products (WDDS_dairy) <<<<<<< HEAD ======= na0 <- function(x) { ifelse(!is.na(x),x,0) } >>>>>>> Dev ## Next action: Clean this up! # Put data frames back in, not sure how to make rowwise command work. # Note: using rowMeans instead of rowSums due to weird behavior of rowSums wrt na.rm=TRUE (ie NA+NA+NA=0) kids_diet = kids_diet %>% <<<<<<< HEAD mutate(WDDS_starch=(rowMeans(data.frame(kids_diet$diet_tubers,kids_diet$diet_cereals), na.rm=TRUE)>0)*1, WDDS_veg_green=diet_veg_dark_green, WDDS_vitA=(rowMeans(data.frame(kids_diet$diet_veg_yellow_orange,kids_diet$diet_fruit_vit_a), na.rm=TRUE)>0)*1, WDDS_veg_other=diet_fruit_other, WDDS_organ=diet_meat_organ, WDDS_meat_fish=(rowMeans(data.frame(kids_diet$diet_meat,kids_diet$diet_fish), na.rm=TRUE)>0)*1, WDDS_eggs=diet_eggs, WDDS_legumes=diet_legumes_nuts, WDDS_dairy=(rowMeans(data.frame(kids_diet$diet_milk,kids_diet$diet_milk_products), na.rm=TRUE)>0)*1) ##Checking how many NAs there are per row table(rowSums(is.na(select(kids_diet,contains("WDDS"))))) #Output: # 0 1 2 9 #4565 17 1 3273 ======= mutate(WDDS_starch=na0(child_tubers) + na0(child_cereals), WDDS_veg_green=na0(child_veg_dark_green), WDDS_vitA=na0(child_veg_yellow_orange)+na0(child_fruit_vit_a), WDDS_veg_other=na0(child_fruit_other), WDDS_organ=na0(child_meat_organ), WDDS_meat_fish=na0(child_meat)+na0(child_fish), WDDS_eggs=na0(child_eggs), WDDS_legumes=na0(child_legumes_nuts), WDDS_dairy=na0(child_milk)+na0(child_milk_products)) kids_diet$WDDS_DietDiv <- kids_diet %>% select(WDDS_starch:WDDS_dairy) %>% rowSums() kids_clean$DietDiv_WDDS <- kids_diet$WDDS_DietDiv >>>>>>> Dev kids_diet$WDDS_total<-rowSums(select(kids_diet,contains("WDDS"))) #table(kids_diet, exclude=NULL) # 0 1 2 3 4 5 6 7 8 <NA> #1446 317 607 883 676 398 179 44 15 3291 <<<<<<< HEAD kids_clean<-data.frame(kids_clean,select(kids_diet,contains("WDDS"))) ##Variable summary: ## interview_date_cmc: units=cmc ## dob_cmc: date of birth, units=cmc ## age_calc_monts: age of child, units=months ## wealth_index: cumulative wealth, 1=poorest,2=poorer,3=middle,4=richer,5=richest ## sex: 0=male, 1=female ## height_age_percentile: continuous, min=0, max=100 ## height_age_zscore: continuous, min=-5.9 max=5.4 ## weight_age_percentile: continuous, min=0, max=100 ## weight_age_zscore: continuous, min=-4.8 max=4.8 ## weight_height_percentile: continuous, min=0, max=100 ## weight_height_zscore: continuous, min=-4.0 max=5.8 ## mother_height_age_percentile: continuous, min=, max= ## mother_height_age_zscore: continuous, min=, max= ## mother_ed_level: highest level of mother's education, 0=none, 1=primary, ## 2=secondary, 3=higher ## mother_ed_year: highest year of education, 0-8, note: more NAs than level ## diet_xx: whether or not child was given ... ======= >>>>>>> Dev
7bf48e3fe50983fd06d6e75261ef5ddaf54c45d9
3714ad3bb1d62aa0fc7b4cebf1e926c7996ecfe1
/hypothesis_solver.R
a799fe39759599873c2576c8bfd94ef13c1fe44a
[]
no_license
hietalai/Automatic_Exam_Solutions
9aea8c51d119b8d435538cb5071a41a7d8b5fce5
431510039cc9d31ab3536a7883ddbaf351e2ac94
refs/heads/master
2021-08-27T18:52:47.987864
2017-11-23T11:02:42
2017-11-23T11:02:42
111,798,590
0
0
null
null
null
null
ISO-8859-1
R
false
false
9,650
r
hypothesis_solver.R
### Hypothesis testing require(xtable) # Function that produces results of test decision decision <- function(test, krit, side = "ne"){ if(side == "ne"){ if(test > -krit & test < krit){ cat(paste("Teststatistikan hamnar ej i det kritiska området.")) decision <- "ej" } else { cat(paste("Teststatistikan hamnar i det kritiska området.")) decision <- "" } } else if(side == "<"){ if(test > -krit){ cat(paste("Teststatistikan hamnar ej i det kritiska området.")) decision <- "ej" } else { cat(paste("Teststatistikan hamnar i det kritiska området.")) decision <- "" } } else { if(test < krit){ cat(paste("Teststatistikan hamnar ej i det kritiska området.")) decision <- "ej" } else { cat(paste("Teststatistikan hamnar i det kritiska området.")) decision <- "" } } return(decision) } ## Testing differences of proportions prop.dif.test <- function(n, x, d0 = 0, alpha = 0.05, side = "ne"){ if(length(n) != length(x)){ stop("Lengths of n and p are different.") } else { p <- x/n } if(any(n*p*(1-p) < 5)){ stop("Populations not deemed normally distributed.") } # State information known from the data cat(paste("$$ p_1 = ", round(p[1], 3), "$$")) cat(paste("$$ p_2 = ", round(p[2], 3), "$$")) cat(paste("$$ n_1 = ", n[1], "$$")) cat(paste("$$ n_2 = ", n[2], "$$")) # Calculate pooled proportions pp <- sum(x)/sum(n) cat(paste("$$ p_p = \\frac{", sum(x),"}{", sum(n) , "} = ", round(pp, 3), "$$")) # Present hypotheses cat(paste("$$ H_0: \\pi_1 - \\pi_2 =", d0, "$$")) if(nchar(side) > 1){ cat(paste("$$ H_a: \\pi_1 - \\pi_2", paste("\\", side, sep = ""), d0, "$$")) } else { cat(paste("$$ H_a: \\pi_1 - \\pi_2", side, d0, "$$")) } # Calculating the test statistic if(d0 == 0){ cat(paste("$$ z_{test} = \\frac{", round(p[1], 3), "-", round(p[2], 3), "}{ \\sqrt{", round(pp, 3), "*", round(1-pp, 3), "* \\left( \\frac{1}{", n[1], "} + \\frac{1}{", n[2], "} \\right)}} $$")) cat(paste("$$ z_{test} = \\frac{", round(p[1] - p[2], 3), "}{", round(sqrt(pp*(1-pp)*(sum(1/n))), 3), "} $$")) ztest <- (p[1] - p[2])/sqrt(pp*(1-pp)*(sum(1/n))) cat(paste("$$ z_{test} = ", round(ztest, 2), " $$")) } else { # hypothesis testing without pooled proportion cat("Nothing created here yet") } # Calculating the critical value if(nchar(side) > 1){ zkrit <- qnorm(1-(alpha/2)) cat(paste("$$ z_{krit} = z_{1 - \\alpha/2} = \\pm", round(zkrit, 2), "$$")) } else { zkrit <- qnorm(1-alpha) if(side == "<"){ cat(paste("$$ z_{krit} = z_{\\alpha} = -", round(zkrit, 2), "$$")) } else { cat(paste("$$ z_{krit} = z_{1 - \\alpha} = ", round(zkrit, 2), "$$")) } } # Decision beslut <- decision(test = ztest, krit = zkrit, side = side) cat(paste(" $H_0$ kan ", beslut, " förkastas.")) } ## Testing differences of two means mean.dif.test <- function(x.mean, x.sigma, x.n, d0 = 0, alpha = 0.05, side = "ne", approx = TRUE, var.equal = TRUE){ if(length(x.mean) != length(x.sigma)){ stop("Lengths of the means and standard deviations are different.") } # Present hypotheses cat(paste("$$ H_0: \\mu_1 - \\mu_2 =", d0, "$$")) if(nchar(side) > 1){ cat(paste("$$ H_a: \\mu_1 - \\mu_2", paste("\\", side, sep = ""), d0, "$$")) } else { cat(paste("$$ H_a: \\mu_1 - \\mu_2", side, d0, "$$")) } if(approx){ if(min(x.n) > 30){ cat("Eftersom $n^* > 30$ approximeras $t$ med $z$.") # Calculating the test statistic cat(paste("$$ z_{test} = \\frac{", round(x.mean[1], 3), "-", round(x.mean[2], 3), "}{ \\sqrt{ \\frac{", round(x.sigma[1], 3), "^2}{", x.n[1], "} + \\frac{", round(x.sigma[2], 3), "^2}{", x.n[2], "}}} $$")) cat(paste("$$ z_{test} = \\frac{", round(x.mean[1] - x.mean[2], 3), "}{", round(sqrt(sum(x.sigma^2/x.n)), 3), "} $$")) test <- (x.mean[1] - x.mean[2])/sqrt(sum(x.sigma^2/x.n)) cat(paste("$$ z_{test} = ", round(test, 2), " $$")) # Calculating the critical value if(nchar(side) > 1){ krit <- qnorm(1-(alpha/2)) cat(paste("$$ z_{krit} = z_{1 - \\alpha/2} = \\pm", round(krit, 2), "$$")) } else { krit <- qnorm(1-alpha) if(side == "<"){ cat(paste("$$ z_{krit} = z_{\\alpha} = -", round(krit, 2), "$$")) } else { cat(paste("$$ z_{krit} = z_{1 - \\alpha} = ", round(krit, 2), "$$")) } } } else { # T-test cat("Eftersom $n^* < 30$ måste $t$-fördelningen användas.") # Calculating the test statistic cat(paste("$$ t_{test} = \\frac{", round(x.mean[1], 3), "-", round(x.mean[2], 3), "}{ \\sqrt{ \\frac{", round(x.sigma[1], 3), "^2}{", x.n[1], "} + \\frac{", round(x.sigma[2], 3), "^2}{", x.n[2], "}}} $$")) cat(paste("$$ t_{test} = \\frac{", round(x.mean[1] - x.mean[2], 3), "}{", round(sqrt(sum(x.sigma^2/x.n)), 3), "} $$")) test <- (x.mean[1] - x.mean[2])/sqrt(sum(x.sigma^2/x.n)) cat(paste("$$ t_{test} = ", round(test, 2), " $$")) # Calculating the critical value if(nchar(side) > 1){ krit <- qt(1-(alpha/2), df = min(x.n) - 1) cat(paste("$$ t_{krit} = t_{1 - \\alpha/2} = \\pm", round(krit, 2), "$$")) } else { krit <- qt(1-alpha, df = min(x.n) - 1) if(side == "<"){ cat(paste("$$ t_{krit} = t_{\\alpha} = -", round(krit, 2), "$$")) } else { cat(paste("$$ t_{krit} = t_{1 - \\alpha} = ", round(krit, 2), "$$")) } } } } else { if(all(x.n > 30)){ cat("Eftersom båda grupperna har n > 30 approximeras $t$ med $z$.") # Calculating the test statistic cat(paste("$$ z_{test} = \\frac{", round(x.mean[1], 3), "-", round(x.mean[2], 3), "}{ \\sqrt{ \\frac{", round(x.sigma[1], 3), "^2}{", x.n[1], "} + \\frac{", round(x.sigma[2], 3), "^2}{", x.n[2], "}}} $$")) cat(paste("$$ z_{test} = \\frac{", round(x.mean[1] - x.mean[2], 3), "}{", round(sqrt(sum(x.sigma^2/x.n)), 3), "} $$")) test <- (x.mean[1] - x.mean[2])/sqrt(sum(x.sigma^2/x.n)) cat(paste("$$ z_{test} = ", round(test, 2), " $$")) # Calculating the critical value if(nchar(side) > 1){ krit <- qnorm(1-(alpha/2)) cat(paste("$$ z_{krit} = z_{1 - \\alpha/2} = \\pm", round(krit, 2), "$$")) } else { krit <- qnorm(1-alpha) if(side == "<"){ cat(paste("$$ z_{krit} = z_{\\alpha} = -", round(krit, 2), "$$")) } else { cat(paste("$$ z_{krit} = z_{1 - \\alpha} = ", round(krit, 2), "$$")) } } } else if(var.equal == TRUE){ # Calculating the test statistic cat(paste("Vi antar här att varianserna är lika för de två grupperna.")) pooled_var <- sum((x.n - 1)*x.sigma^2)/(sum(x.n) - 2) cat(paste("$$ s_p^2 = \\frac{(n_1 - 1)*s_1^2 + (n_2 - 1)*s_2^2}{(n_1 + n_2 - 2)} $$")) cat(paste("$$ = \\frac{", (x.n[1] - 1), "*", round(x.sigma[1], 3), "^2 + ", (x.n[2] - 1), "*", round(x.sigma[2], 3), "^2} {", (sum(x.n) - 2), "} = ", round(pooled_var, 3), " $$")) cat(paste("$$ t_{test} = \\frac{", round(x.mean[1], 3), "-", round(x.mean[2], 3), "}{ \\sqrt{", round(pooled_var, 3), " * \\left(\\frac{1}{", x.n[1], "} + \\frac{1}{", x.n[2], "}\\right)}} $$")) cat(paste("$$ t_{test} = \\frac{", round(x.mean[1] - x.mean[2], 3), "}{", round(sqrt(sum(pooled_var/x.n)), 3), "} $$")) test <- (x.mean[1] - x.mean[2])/sqrt(sum(pooled_var/x.n)) cat(paste("$$ t_{test} = ", round(test, 2), " $$")) # Calculating the critical value if(nchar(side) > 1){ krit <- qt(1-(alpha/2), df = sum(x.n) - 2) cat(paste("$$ t_{krit} = t_{(n_1 + n_2 -2), 1 - \\alpha/2} = \\pm", round(krit, 2), "$$")) } else { krit <- qt(1-alpha, df = sum(x.n) - 2) if(side == "<"){ cat(paste("$$ t_{krit} = t_{(n_1 + n_2 -2), \\alpha} = -", round(krit, 2), "$$")) } else { cat(paste("$$ t_{krit} = t_{(n_1 + n_2 -2), 1 - \\alpha} = ", round(krit, 2), "$$")) } } } } # Decision beslut <- decision(test = test, krit = krit, side = side) cat(paste(" $H_0$ kan ", beslut, " förkastas.")) } ## Chi-square test chi.square <- function(obs, alpha = 0.05){ exp <- obs for(i in 1:nrow(exp)){ exp[i,] <- colSums(obs)*sum(exp[i,])/sum(obs) } print(xtable(exp, digits = c(1, rep(2, ncol(exp))), caption = "Förväntade frekvenser (E)"), comment = FALSE) if(any(exp < 1, sum(exp < 5) > (ncol(exp)*nrow(exp)*0.2))){ cat("Kraven uppfylls inte och ett $\\chi^2$-test kan ej beräknas") } else { # Present hypotheses cat(paste("$$ H_0:~Det~finns~inga~skillnader $$")) cat(paste("$$ H_a:~Det~finns~skillnader $$")) # Calculate statistic chisqtest <- sum((obs-exp)^2/exp) cat("$$ \\chi_{test}^2 = \\sum{\\frac{(O-E)^2}{E}} = ", round(chisqtest, 3), "$$") # Critical value chisqcrit <- qchisq(1-alpha, (nrow(obs)-1)*(ncol(obs)-1)) cat("$$ \\chi_{krit}^2 = \\chi^2_{(", nrow(obs), "-1)(", ncol(obs), " -1);", alpha, "} = ", round(chisqcrit, 3), "$$") # Decision beslut <- decision(chisqtest, chisqcrit, side = "ge") cat(paste(" $H_0$ kan ", beslut, " förkastas.")) } }
fa00dc5e8da7cbf2d2586856454671d663a67be0
9872e08ae5789ab289f248955da479056935534b
/man/get_bw.Rd
1412c4c660c29517b4ffa581e49ef67346e14137
[]
no_license
mastoffel/partR2
f452bd5057d466fb97464c8e87b6a9031a5df895
48a0c105236d9effdb533768e8f9cb0a799c28c3
refs/heads/master
2021-09-19T11:03:28.148454
2021-08-05T13:29:17
2021-08-05T13:29:17
99,020,542
23
3
null
null
null
null
UTF-8
R
false
true
317
rd
get_bw.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_helpers.R \name{get_bw} \alias{get_bw} \title{Get beta weights} \usage{ get_bw(mod) } \arguments{ \item{mod}{merMod object.} } \value{ data.frame with bw instead of raw estimates } \description{ Get beta weights } \keyword{internal}
c58d903b00f790e53c84a3e9bd533862eecd7172
94bacf8ae33f625e602140d254c11b6fe9edfbbc
/man/cond1.mat.Rd
b8a61c63406a2ab55b057c9986cd621e15047682
[]
no_license
cran/varmixt
7deb71f4f40c3fccc8fbd6e84a1327e31cfcb6a2
3a4d2d30de189eab78e3cdcec090f91c955a7e08
refs/heads/master
2021-01-22T01:28:33.623587
2005-06-17T00:00:00
2005-06-17T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
452
rd
cond1.mat.Rd
\name{cond1.mat} \alias{cond1.mat} \title{Extraction of the matrix with normalized expression data in condition 1} \description{This function extracts the matrix with normalized expression data in condition 1 } \usage{ cond1.mat(data) } \arguments{ \item{data}{gene expression data object} } \details{ } \value{ } \references{} \author{Paul Delmar} \note{} \seealso{} \examples{ } \keyword{internal}
ac8f46d488d63f18ea1d1e5268c2af936e269eda
2deb84edc4c05b36a8b0c5a73a70a49d8ec48f07
/R/iv_factorial.R
83d19142b51694f25b0a2fe6e3e8d28a4f722c39
[]
no_license
cran/factiv
4407299595fd65138db93d58777297ea2b7caf14
97914277c51bc620687724c81a812161555a1f94
refs/heads/master
2023-05-01T12:16:00.059092
2021-05-21T07:00:05
2021-05-21T07:00:05
369,583,550
0
0
null
null
null
null
UTF-8
R
false
false
19,472
r
iv_factorial.R
##' Estimates principal stratum-specific effects and interactions in a ##' 2^K factorial experiment ##' ##' This function estimates treatment effects for 2^K factorial ##' experiments in the face of noncompliance on all factors. A ##' monotonicity assumption is assumed for both treatment-instrument ##' pairs, along with treatment exclusion. See Blackwell (2017) for ##' more details on those assumptions. ##' ##' ##' The procedure uses iterative generalized method of moments (GMM) ##' to estimate both the proportions of each compliance class (also ##' known as principal strata) and the average potential outcomes ##' within those classes. It also provides estimates of several ##' one-way, joint, and interactive treatment effects within these ##' classes. ##' ##' Under the above assumptions, the compliance classes are the ##' product of the compliance classes for each treatment-instrument ##' pair. For instance, \code{"cc"} is the class that would comply ##' with both treatments, \code{"ca"} is the class that would comply ##' with the first treatment and always take the second treatment, and ##' \code{"cn"} is the class that would comply with the first ##' treatment and never take the second treatment. Finally, note that ##' treatment effects are only well-defined for compliance classes for ##' which there is compliance on at least one treatment. ##' ##' @title IV Estimation of 2^K Factorial Design ##' @param formula formula specification of the factorial design with ##' noncompliance. The right-hand side of the formula should have ##' two components separated by the `|` symbol, with the first ##' component containing the K binary treatment variables and the ##' second component containing the K binary instruments associated ##' with each treatment variable. The order of the variables in the ##' formula must match. ##' @param data A data.frame on which to apply the `formula`. ##' @param subset subset of the data to pass to estimation. ##' @param method character indiciating if the estimator should be ##' `"lm"` using the least squares approach (default) or ##' `"cmd"` to estimate via efficent minimum distance estimator. ##' @param level the confidence level required. ##' @return A list of class `iv_factorial` that contains the following ##' components: ##' \item{rho}{vector of estimated compliance class ##' probabilities.} ##' \item{psi}{vector of the estimated conditional mean of the outcome ##' within the compliance classes.} ##' \item{vcov}{estimated asymptotic variance matrix of the combined ##' `rho` and `psi` parameters.} ##' \item{pcafe_est}{vector of estimated main effects of each factor among ##' perfect compliers.} ##' \item{pcafe_se}{vector of estimated standard errors for the ##' estimated effects in `tau`.} ##' \item{pcafe_cis}{a matrix of confidence intervals for the PCAFE ##' estimates.} ##' \item{level}{the confidence level of the returned confience ##' intervals.} ##' @author Matt Blackwell ##' @references Matthew Blackwell (2017) Instrumental Variable Methods ##' for Conditional Effects and Causal Interaction in Voter ##' Mobilization Experiments, Journal of the American Statistical ##' Association, 112:518, 590-599, ##' \doi{10.1080/01621459.2016.1246363} ##' ##' Matthew Blackwell and Nicole Pashley (2020) "Noncompliance in ##' Factorial Experiments." Working paper. ##' ##' @examples ##' data(newhaven) ##' ##' out <- iv_factorial(turnout_98 ~ inperson + phone | inperson_rand ##' + phone_rand, data = newhaven) ##' ##' summary(out) ##' ##' @export ##' @importFrom stats model.matrix model.response iv_factorial <- function(formula, data, subset, method = "lm", level = 0.95) { cl <- match.call(expand.dots = TRUE) mf <- match.call(expand.dots = FALSE) stopifnot(method %in% c("lm", "cmd")) m <- match( x = c("formula", "data", "subset"), table = names(mf), nomatch = 0L ) mf <- mf[c(1L, m)] mf[[1L]] <- as.name("model.frame") mf$drop.unused.levels <- TRUE ## must be valid formula formula <- Formula::as.Formula(formula) stopifnot(length(formula)[1] == 1L, length(formula)[2] %in% 1:2) if (inherits(try(terms(formula), silent = TRUE), "try-error")) { stop("cannot use dot '.' in formulas") } mt_d <- terms(formula, data = data, rhs = 1) attr(mt_d, "intercept") <- 0 mt_z <- terms(formula, data = data, rhs = 2) attr(mt_z, "intercept") <- 0 ## add to mf call mf$formula <- formula # finally evaluate model.frame, create data matrix mf <- eval(mf, parent.frame()) mt <- attr(mf, "terms") # terms object Y <- model.response(mf, "numeric") D <- model.matrix(mt_d, mf) Z <- model.matrix(mt_z, mf) K <- dim(Z)[2] if (method == "lm") { out <- factiv_lm_fit(Y, D, Z) } else { out <- factiv_cmd_fit(Y, D, Z) } effs <- psi_to_tau(out$psi, out$rho, K, out$vcov, colnames(D)) out$pcafe_est <- effs$pcafe_est out$pcafe_se <- effs$pcafe_se out$mcafe_est <- effs$mcafe_est out$mcafe_se <- effs$mcafe_se out$level <- level alpha <- (1 - level) / 2 qq <- abs(qnorm(alpha)) out$pcafe_cis <- matrix(NA, nrow = length(out$pcafe_est), ncol = 2) out$pcafe_cis[, 1] <- out$pcafe_est - qq * out$pcafe_se out$pcafe_cis[, 2] <- out$pcafe_est + qq * out$pcafe_se out$mcafe_cis <- matrix(NA, nrow = length(out$mcafe_est), ncol = 2) out$mcafe_cis[, 1] <- out$mcafe_est - qq * out$mcafe_se out$mcafe_cis[, 2] <- out$mcafe_est + qq * out$mcafe_se rownames(out$pcafe_cis) <- names(out$pcafe_est) colnames(out$pcafe_cis) <- c("ci_lower", "ci_upper") rownames(out$mcafe_cis) <- names(out$mcafe_est) colnames(out$mcafe_cis) <- c("ci_lower", "ci_upper") out$pcafe_se[out$pcafe_se == 0] <- NA out$mcafe_se[out$mcafe_se == 0] <- NA class(out) <- "iv_factorial" out$call <- cl out$df.residual <- nrow(D) - ncol(out$vcov) return(out) } factiv_lm_fit <- function(y, d, z) { K <- dim(d)[2] N <- dim(d)[1] J <- 2 ^ K if (K != dim(z)[2]) stop("d/z dims do not match") dz_vals <- rep(list(c(1, 0)), 2 * K) ps_grid <- expand.grid(rep(list(c("a", "n", "c")), K)) dz_d_grid <- expand.grid(dz_vals)[, 1:K] dz_z_grid <- expand.grid(dz_vals)[, (K + 1):(2 * K)] dz_z_grid_str <- do.call(paste0, dz_z_grid) dz_d_grid_str <- do.call(paste0, dz_d_grid) ps_type <- 2 + dz_z_grid - dz_d_grid + 2 * dz_d_grid * dz_z_grid ps_dict <- list("a", c("n", "c"), "n", c("a", "c")) A <- matrix(1, nrow = nrow(dz_d_grid), ncol = nrow(ps_grid)) for (k in 1:K) { k_mat <- sapply(ps_dict[ps_type[,k]], function(x) 1 * (ps_grid[,k] %in% x)) A <- A * t(k_mat) } colnames(A) <- do.call(paste0, ps_grid) rownames(A) <- paste0(dz_d_grid_str, "_", dz_z_grid_str) Aw <- solve(crossprod(A)) %*% t(A) B <- matrix(0, nrow = nrow(dz_d_grid), ncol = nrow(dz_d_grid)) rownames(B) <- rownames(A) hold <- list(c("n", "c"), c("a", "c")) for (j in 1:nrow(unique(dz_d_grid))) { this_str <- unique(dz_d_grid_str)[j] this_strata <- expand.grid(hold[unlist(unique(dz_d_grid)[j, ]) + 1]) s_names <- do.call(paste0, this_strata) grab_rows <- dz_d_grid_str == this_str B[grab_rows, grab_rows] <- A[grab_rows, s_names] colnames(B)[grab_rows] <- paste0(this_str, "_", s_names) } Bw <- solve(crossprod(B)) %*% t(B) z_grid <- expand.grid(rep(list(c(1, 0)), times = K)) z_grid_str <- do.call(paste0, z_grid) z_str <- apply(z, 1, paste0, collapse = "") d_grid <- expand.grid(rep(list(c(1, 0)), times = K)) d_grid_str <- do.call(paste0, d_grid) d_str <- apply(d, 1, paste0, collapse = "") ## calculate H and R data R <- matrix(0, nrow = N, ncol = J) for (j in 1:J) { R[d_str == d_grid_str[j], j] <- 1 } H <- y * R s_z <- array(NA, dim = c(2 * J, 2 * J, J)) Hbar <- matrix(NA, nrow = J, ncol = J) Rbar <- matrix(NA, nrow = J, ncol = J) tot_p <- ncol(A) + ncol(B) Q_z <- array(0, dim = c(tot_p, 2 * J, J)) r_ind <- 1:ncol(A) h_ind <- (ncol(A) + 1) : tot_p vcv <- matrix(0, ncol = tot_p, nrow = tot_p) theta <- rep(0, times = tot_p) for (j in 1:J) { this_z <- z_grid_str[j] jj <- which(z_str == this_z) Hbar[, j] <- colMeans(H[jj, ]) Rbar[, j] <- colMeans(R[jj, ]) jjj_A <- grep(paste0("_", this_z), colnames(Aw)) jjj_B <- grep(paste0("_", this_z), colnames(Bw)) Q_z[r_ind, 1:J, j] <- Aw[, jjj_A] Q_z[h_ind, (J + 1):(2 * J), j] <- Bw[, jjj_B] s_z[,, j] <- var(cbind(R[jj, ], H[jj, ])) theta <- theta + Q_z[, , j] %*% c(Rbar[, j], Hbar[, j]) vcv <- vcv + (1 / length(jj)) * (Q_z[,, j] %*% s_z[,, j] %*% t(Q_z[,, j])) } rho <- theta[r_ind] names(rho) <- colnames(A) psi <- theta[h_ind] names(psi) <- colnames(B) rownames(vcv) <- c(colnames(A), colnames(B)) colnames(vcv) <- c(colnames(A), colnames(B)) return(list(A = A, B = B, Hbar = Hbar, Rbar = Rbar, rho = rho, psi = psi, vcov = vcv)) } factiv_cmd_fit <- function(y, d, z) { K <- dim(d)[2] N <- dim(d)[1] J <- 2 ^ K if (K != dim(z)[2]) stop("d/z dims do not match") dz_vals <- rep(list(c(1, 0)), 2 * K) ps_grid <- expand.grid(rep(list(c("a", "n", "c")), K)) dz_d_grid <- expand.grid(dz_vals)[, 1:K] dz_z_grid <- expand.grid(dz_vals)[, (K + 1):(2 * K)] dz_z_grid_str <- do.call(paste0, dz_z_grid) dz_d_grid_str <- do.call(paste0, dz_d_grid) ps_type <- 2 + dz_z_grid - dz_d_grid + 2 * dz_d_grid * dz_z_grid ps_dict <- list("a", c("n", "c"), "n", c("a", "c")) A <- matrix(1, nrow = nrow(dz_d_grid), ncol = nrow(ps_grid)) for (k in 1:K) { k_mat <- sapply(ps_dict[ps_type[,k]], function(x) 1 * (ps_grid[,k] %in% x)) A <- A * t(k_mat) } colnames(A) <- do.call(paste0, ps_grid) rownames(A) <- paste0(dz_d_grid_str, "_", dz_z_grid_str) drop_mom <- grep(paste0(c(rep(0, times = K), "_"), collapse = ""), rownames(A)) drop_par <- grep(paste0(rep("n", times = K), collapse = ""), colnames(A)) AA <- A[-drop_mom, -drop_par] Aw <- solve(crossprod(AA)) %*% t(AA) B <- matrix(0, nrow = nrow(dz_d_grid), ncol = nrow(dz_d_grid)) rownames(B) <- rownames(A) hold <- list(c("n", "c"), c("a", "c")) for (j in 1:nrow(unique(dz_d_grid))) { this_str <- unique(dz_d_grid_str)[j] this_strata <- expand.grid(hold[unlist(unique(dz_d_grid)[j, ]) + 1]) s_names <- do.call(paste0, this_strata) grab_rows <- dz_d_grid_str == this_str B[grab_rows, grab_rows] <- A[grab_rows, s_names] colnames(B)[grab_rows] <- paste0(this_str, "_", s_names) } Bw <- solve(crossprod(B)) %*% t(B) z_grid <- expand.grid(rep(list(c(1, 0)), times = K)) z_grid_str <- do.call(paste0, z_grid) z_str <- apply(z, 1, paste0, collapse = "") d_grid <- expand.grid(rep(list(c(1, 0)), times = K)) d_grid_str <- do.call(paste0, d_grid) d_str <- apply(d, 1, paste0, collapse = "") R <- matrix(0, nrow = N, ncol = J) for (j in 1:J) { R[d_str == d_grid_str[j], j] <- 1 } H <- y * R R <- R[, -J] s_z <- array(NA, dim = c(2 * J - 1, 2 * J - 1, J)) Hbar <- matrix(NA, nrow = J, ncol = J) Rbar <- matrix(NA, nrow = J - 1, ncol = J) tot_p <- ncol(AA) + ncol(B) Q_z <- array(0, dim = c(tot_p, 2 * J - 1, J)) r_ind <- 1:ncol(AA) h_ind <- (ncol(AA) + 1) : tot_p vcv <- matrix(0, ncol = tot_p, nrow = tot_p) theta <- rep(0, times = tot_p) HR_var <- matrix(0, nrow = ncol(R) * J + ncol(H) * J, ncol = ncol(R) * J + ncol(H) * J) for (j in 1:J) { this_z <- z_grid_str[j] jj <- which(z_str == this_z) Hbar[, j] <- colMeans(H[jj, ]) Rbar[, j] <- colMeans(R[jj, ]) jjj_A <- grep(paste0("_", this_z), colnames(Aw)) jjj_B <- grep(paste0("_", this_z), colnames(Bw)) Q_z[r_ind, 1:(J - 1), j] <- Aw[, jjj_A] Q_z[h_ind, J:(2 * J - 1), j] <- Bw[, jjj_B] s_z[,,j] <- var(cbind(R[jj, ], H[jj, ])) HR_var[c(jjj_A, jjj_B + ncol(Aw)), c(jjj_A, jjj_B + ncol(Aw))] <- s_z[,, j] theta <- theta + Q_z[, , j] %*% c(Rbar[, j], Hbar[, j]) vcv <- vcv + (1 / length(jj)) * (Q_z[,, j] %*% s_z[,, j] %*% t(Q_z[,, j])) } rho <- theta[r_ind] names(rho) <- colnames(AA) psi <- theta[h_ind] names(psi) <- colnames(B) if (qr(HR_var)$rank == ncol(HR_var)) { Ar <- nrow(AA) Aw_opt <- solve(crossprod(AA, solve(HR_var[1:Ar, 1:Ar]) %*% AA)) %*% t(AA) %*% solve(HR_var[1:Ar,1:Ar]) Bw_opt <- solve(crossprod(B, solve(HR_var[-(1:Ar),-(1:Ar)]) %*% B)) %*% t(B) %*% solve(HR_var[-(1:Ar),-(1:Ar)]) vcv2 <- matrix(0, ncol = tot_p, nrow = tot_p) theta2 <- rep(0, times = tot_p) for (j in 1:J) { this_z <- z_grid_str[j] jj <- which(z_str == this_z) jjj_A <- grep(paste0("_", this_z), colnames(Aw)) jjj_B <- grep(paste0("_", this_z), colnames(Bw)) Q_z[r_ind, 1:(J - 1), j] <- Aw_opt[, jjj_A] Q_z[h_ind, J:(2 * J - 1), j] <- Bw_opt[, jjj_B] theta2 <- theta2 + Q_z[, , j] %*% c(Rbar[, j], Hbar[, j]) vcv2 <- vcv2 + (1 / length(jj)) * (Q_z[,, j] %*% s_z[,, j] %*% t(Q_z[,, j])) } rho2 <- theta2[r_ind] names(rho2) <- colnames(AA) psi2 <- theta2[h_ind] names(psi2) <- colnames(B) } else { warning("singular weight matrix with cmd, using lm...") rho2 <- rho psi2 <- psi vcv2 <- vcv } rownames(vcv2) <- c(colnames(AA), colnames(B)) colnames(vcv2) <- c(colnames(AA), colnames(B)) return(list(A = A, B = B, Hbar = Hbar, Rbar = Rbar, rho = rho2, psi = psi2, vcov = vcv2)) } psi_to_tau <- function(psi, rho, K, vcv, var_names) { J <- 2 ^ K - 1 L <- 2 ^ K ## reference grids ps_grid <- expand.grid(rep(list(c("a", "n", "c")), K)) z_grid <- expand.grid(rep(list(c(1, 0)), times = K)) z_grid_str <- do.call(paste0, z_grid) ## creating contrast matrices g <- expand.grid(rep(list(c(1, -1)), times = K)) rownames(g) <- z_grid_str g_m_psi <- matrix(0, nrow = length(rho) + length(psi), ncol = J) rownames(g_m_psi) <- rownames(vcv) g_s_psi <- g_m_rho <- g_s_rho <- g_m_psi psi_d_grid <- sapply(strsplit(rownames(vcv), "_"), function(x) x[[1]]) psi_ps_grid <- sapply(strsplit(names(psi), "_"), function(x) x[[2]]) num_c <- rowSums(ps_grid == "c") names(num_c) <- do.call(paste0, ps_grid) psi_adj <- c(rep(0, times = length(rho)), num_c[psi_ps_grid]) g_s_rho[paste0(rep("c", K), collapse = ""),] <- 1 scomps_psi <- grep(paste0(c("_", rep("c", K)), collapse = ""), rownames(vcv)) comb_list <- all_subsets(1:K) eff_labs <- character(J) for (j in 1:J) { this_comb <- comb_list[[j]] k <- length(this_comb) eff_labs[j] <- paste0(var_names[this_comb], collapse = ":") this_contr <- g[, this_comb, drop = FALSE] this_contr <- apply(this_contr, 1, prod) mcomps <- rowSums(ps_grid[, this_comb, drop = FALSE] == rep("c", k)) == k mcomps <- apply(ps_grid[mcomps,, drop = FALSE], 1, paste0, collapse = "") mcomps_psi <- grep(paste0("_(", paste0(mcomps, collapse = "|"), ")"), rownames(vcv)) g_m_rho[mcomps, j] <- 1 g_m_psi[mcomps_psi, j] <- this_contr[psi_d_grid[mcomps_psi]] g_s_psi[scomps_psi, j] <- this_contr[psi_d_grid[scomps_psi]] } colnames(g_m_psi) <- colnames(g_s_psi) <- eff_labs colnames(g_m_rho) <- colnames(g_s_rho) <- eff_labs g_m_psi <- g_m_psi / 2 ^ (psi_adj - 1) g_s_psi <- g_s_psi / 2 ^ (K - 1) theta <- c(rho, psi) m_num <- c(t(g_m_psi) %*% theta) m_den <- c(t(g_m_rho) %*% theta) s_num <- c(t(g_s_psi) %*% theta) s_den <- c(t(g_s_rho) %*% theta) mcafe_est <- m_num / m_den pcafe_est <- s_num / s_den m_num_var <- diag(t(g_m_psi) %*% vcv %*% g_m_psi) s_num_var <- diag(t(g_s_psi) %*% vcv %*% g_s_psi) m_den_var <- diag(t(g_m_rho) %*% vcv %*% g_m_rho) s_den_var <- diag(t(g_s_rho) %*% vcv %*% g_s_rho) m_cov <- diag(t(g_m_psi) %*% vcv %*% g_m_rho) s_cov <- diag(t(g_s_psi) %*% vcv %*% g_s_rho) mcafe_var <- m_den ^ (-2) * m_num_var + (m_num ^ 2 / m_den ^ 4) * m_den_var - 2 * (m_num / m_den ^ 3) * m_cov pcafe_var <- s_den ^ (-2) * s_num_var + (s_num ^ 2 / s_den ^ 4) * s_den_var - 2 * (s_num / s_den ^ 3) * s_cov mcafe_se <- sqrt(mcafe_var) pcafe_se <- sqrt(pcafe_var) names(pcafe_est) <- names(pcafe_se) <- eff_labs names(mcafe_est) <- names(mcafe_se) <- eff_labs ## last effect is really an pcafe mcafe_est <- mcafe_est[-J] mcafe_se <- mcafe_se[-J] return(list(mcafe_est = mcafe_est, mcafe_se = mcafe_se, pcafe_est = pcafe_est, pcafe_se = pcafe_se)) } all_subsets <- function(x) { unlist(lapply(x, function(z) as.list(as.data.frame(combn(x, z)))), recursive = FALSE) } #' @export print.iv_factorial <- function(x, ...) { cat("\nCall:\n") print(x$call) cat("\nMain effects:\n") print(x$tau) cat("\n") invisible(x) } #' @export summary.iv_factorial <- function(object, ...) { rdf <- object$df.residual tval <- object$pcafe_est / object$pcafe_se pval <- 2 * pt(abs(tval), rdf, lower.tail = FALSE) out <- object[c("call", "terms", "vcov")] out$coefficients <- cbind(object$pcafe_est, object$pcafe_se, tval, pval) out$c_prob <- object$rho[length(object$rho)] c_pos <- sum(!is.na(object$rho)) - 1 out$c_prob_se <- sqrt(out$vcov[c_pos, c_pos]) class(out) <- "summary.iv_factorial" out } #' @export print.summary.iv_factorial <- function(x, digits = max(3L, getOption("digits") - 3L), ...) { cat("\n Main effects among perfect compliers:\n") stats::printCoefmat(x$coefficients, digits = digits) cat("\nEstimated prob. of perfect compliers: ", formatC(x$c_prob, digits), "\tSE = ", formatC(x$c_prob_se, digits)) cat("\n") invisible(x) } ##' Tidy summarizes information about the components of a model. ##' ##' ##' @title Tidy an iv_factorial object ##' @param x An `iv_factorial` object produced by a call to [factiv::iv_factorial()] ##' @param conf.int Logical indicating whether or not to include a ##' confidence interval in the tidied output. Defaults to `FALSE` ##' @param conf.level The confidence level to use for the confidence ##' interval if `conf.int = TRUE`. Must be strictly greater than 0 ##' and less than 1. Defaults to 0.95, which corresponds to a 95 ##' percent confidence interval. ##' @param ... Additional arguments. Not used. Needed to match generic ##' signature only. ##' @return A [tibble::tibble()] with columns: ##' \item{term}{The name of the effect term.} ##' \item{estimand}{Which complier effect being estimated.} ##' \item{estimate}{The estimated value of the effect.} ##' \item{std.error}{The estimated standard error of the effect.} ##' \item{conf.low}{Lower bound of the confidence interval for the ##' estimate.} ##' \item{conf.high}{Upper bound of the confidence interval for the ##' estimate.} ##' @author Matt Blackwell ##' @export tidy.iv_factorial <- function(x, conf.int = FALSE, conf.level = 0.95, ...) { tms <- c(names(x$mcafe_est), names(x$pcafe_est)) estimands <- c(rep("MCAFE", length(x$mcafe_est)), rep("PCAFE", length(x$pcafe_est))) ests <- c(x$mcafe_est, x$pcafe_est) ses <- c(x$mcafe_se, x$pcafe_se) ret <- tibble::tibble(term = tms, estimand = estimands, estimate = ests, std.error = ses) if (conf.int) { alpha <- (1 - conf.level) / 2 qq <- abs(qnorm(alpha)) ret <- dplyr::mutate(ret, conf.low = estimate - qq * std.error, conf.high = estimate + qq * std.error) } return(ret) } #' @importFrom utils globalVariables globalVariables( c( "estimate", "std.error" ) )
8df71c0c58d8158645ee2b054f3eb212c87bfd18
70341290df2fa49f2f03508f1421f85f9196e5ef
/R/fit_logit.R
b5953d8a7b5eb4d073378b1c6f27a1af3595e173
[]
no_license
jmsigner/amt
f74f5621513ccfec0593facc78fc3bfa667a5df1
8df534d5dee8fafb75b0aeb95717cf85141c8ddb
refs/heads/master
2023-07-23T15:49:56.907437
2023-07-17T16:19:36
2023-07-17T16:19:36
76,946,660
36
16
null
2022-07-30T07:07:56
2016-12-20T10:22:09
R
UTF-8
R
false
false
792
r
fit_logit.R
#' Fit logistic regression #' #' This function is a wrapper around `stats::glm` for a piped workflows. #' @param data `[data.frame]` \cr The data used to fit a model. #' @param formula `[formula]` \cr The model formula. #' @param ... Further arguments passed to `stats::glm`. #' @name fit_logit #' @returns A list with the model output. #' @export fit_logit <- function(data, formula, ...) { m <- stats::glm(formula, data = data, family = stats::binomial(link = "logit"), ...) m <- list(model = m) class(m) <- c("fit_logit", "glm", class(m)) m } #' @export coef.fit_logit <- function(object, ...) { stats::coef(object$model, ...) } #' @export summary.fit_logit <- function(object, ...) { base::summary(object$model, ...) } #' @rdname fit_logit #' @export fit_rsf <- fit_logit
f426f763eeee61d462ad51bb01ae7fce269fbaa7
0102f791bde704863c4822abf1aee441b0715a53
/risk_network/visNetwork-master/man/visExport.Rd
cbe91d4448b8eabff8891502bf115b85324f987d
[ "MIT" ]
permissive
WilliamTorsett/data_analytics
34ade6b3505a793332f91c2b0190f7509559c78b
01f98c5afaf652990897fd1f9d9df1ae2d3d2ee3
refs/heads/master
2022-12-06T14:40:42.492982
2020-08-30T14:40:30
2020-08-30T14:40:30
258,614,710
0
1
MIT
2020-08-30T14:40:31
2020-04-24T20:15:57
HTML
UTF-8
R
false
true
1,770
rd
visExport.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/visExport.R \name{visExport} \alias{visExport} \title{Network export configuration} \usage{ visExport( graph, type = "png", name = "network", label = paste0("Export as ", type), background = "#fff", float = "right", style = NULL, loadDependencies = TRUE, ... ) } \arguments{ \item{graph}{: a visNetwork object} \item{type}{: Type of export. One of "png" (default), "jpeg" or "pdf"} \item{name}{: name of imgage, default to "network"} \item{label}{: Label on button, default to "Export as png/jpeg/pdf"} \item{background}{: background color, default to white (#fff). Work only if network background is transparent.} \item{float}{: button postion, default to "right"} \item{style}{: button css style.} \item{loadDependencies}{/ Boolean. TRUE by default. Load libraries for export (fileSaver, Blob, canvastoBlob, html2canvas, jsPDF)} \item{...}{: arguments for \link{addExport}} } \description{ Network export configuration. This function only work within shiny or a web browser. } \examples{ \dontrun{ nodes <- data.frame(id = 1:3, group = c("B", "A", "B")) edges <- data.frame(from = c(1,2), to = c(2,3)) visNetwork(nodes, edges) %>% visGroups(groupname = "A", color = "red") %>% visGroups(groupname = "B", color = "lightblue") %>% visLegend() %>% visExport() visNetwork(nodes, edges) %>% visGroups(groupname = "A", color = "red") %>% visGroups(groupname = "B", color = "lightblue") %>% visLegend() %>% visExport(type = "jpeg", name = "export-network", float = "left", label = "Save network", background = "purple", style= "") } } \references{ See online documentation \url{http://datastorm-open.github.io/visNetwork/} } \seealso{ \link{visSave} }
fa922499d68e5330add6b0b93c70c8dcffe44357
1874a54baed6d1d6500328b3a9e81cccf82e9c0b
/code-bill.r
0bec03e970a692367628d8c174d88aa38991a721
[]
no_license
anishpurohit/R-codes
7902334bb68bdfb05b0710eca1e786c77ec65e81
dcc6c6d5e8d2cd975b7fc84cdb564627ddfc3bee
refs/heads/master
2021-01-11T14:18:58.043801
2017-06-19T08:19:55
2017-06-19T08:19:55
81,337,570
0
0
null
null
null
null
UTF-8
R
false
false
3,373
r
code-bill.r
setwd("C:/Users/dell/Desktop/Data Science/R Code/Telecom-LR Bill Case (1)/Telecom-LR Bill Case") data <- read.csv("billdata18052014.csv",stringsAsFactors = FALSE) library(dplyr) glimpse(data) table(data$salary) data <- data %>% mutate(Payer_Company = as.numeric(Payer =="Company"), Payer_Self = as.numeric(Payer =="Self")) %>% select(-Payer) data <- data %>% mutate(Travel_high = as.numeric(Travel =="High"), Travel_medium = as.numeric(Travel =="Medium")) %>% select(-Travel) data <- data %>% mutate(Relatives_Abroad_Yes = as.numeric(RelativesAbroad =="Yes")) %>% select(-RelativesAbroad) data <- data %>% mutate(job_business = as.numeric(jobtype =="Business"), job_pvt = as.numeric(jobtype =="Private"), job_govt = as.numeric(jobtype =="Government")) %>% select(-jobtype) table(data$AvgBill) data.bk <- data colnames(data) #Setting the seed to review set.seed(3) #Splitting the data in train and test s=sample(1:nrow(data),0.7*nrow(data)) train.temp.set=data[s,] test.set=data[-s,] #Splitting the data in train and validation s1=sample(1:nrow(train.temp.set),0.7*nrow(train.temp.set)) train.set=train.temp.set[s1,] val.set=train.temp.set[-s1,] library(car) fit = (lm(AvgBill~., data=train.set)) vif(fit) dep.vars <-attributes(alias(fit)$Complete)$dimnames[[1]] formula.new <- as.formula( paste( paste(deparse(AvgBill~.), collapse=""), paste(dep.vars, collapse="-"), sep="-" ) ) fit_new = (lm(AvgBill ~ . - dv_jt_pvt - dv_payer_company - ClsRelativesCnt - dv_travel_low - dv_travel_medium - dv_travel_high - dv_Rel_yes - dv_Rel_No - Payer_Company - Payer_Self - Travel_high - Travel_medium - Relatives_Abroad_Yes - job_business - job_pvt - job_govt, data=train.set)) fit_new = (lm(AvgBill ~ . - dv_jt_pvt - dv_payer_company - ClsRelativesCnt - dv_travel_low - dv_travel_medium - dv_travel_high - dv_Rel_yes - dv_Rel_No - Payer_Company - Payer_Self - Travel_high - Travel_medium - Relatives_Abroad_Yes - job_business - job_pvt - job_govt -StdAvgBill, data=train.set)) sort(vif(fit_new), decreasing = T) library(sandwich) vcovHC(fit_new,omega=NULL, type="HC4") library("lmtest") coeftest(fit_new,df=Inf,vcov=vcovHC(fit_new,type="HC4")) ### for val data fit_val = (lm(AvgBill ~ . - dv_jt_pvt - dv_payer_company - ClsRelativesCnt - dv_travel_low - dv_travel_medium - dv_travel_high - dv_Rel_yes - dv_Rel_No - Payer_Company - Payer_Self - Travel_high - Travel_medium - Relatives_Abroad_Yes - job_business - job_pvt - job_govt -StdAvgBill, data=val.set)) sort(vif(fit_val), decreasing = T) summary(fit_val) fitted(fit_val) plot(fit_val) rmse=sqrt(mean((predict(fit_val,test.set)-test.set$AvgBill)**2)) rmse # visual agreement plot(test.set$AvgBill,predict(fit_val,test.set)) # Fit diagnostics # Error randomness plot(fit_val,which=1) # There doesnt seem to be any pattern , we need not worry about our model definition # Error Normality plot(fit_val,which=2) hist(fit_val$residuals,breaks = 20) # Error variance plot(fit_val,which=3) # Outliers detection : None found , cook's distance < 1 for all obs plot(fit_val,which=4)
763b0291def07b8e315472761dac885f2935275b
3a9f4106a5c6fac300371f78daab9164b099dd7f
/R Scripts/Calculations/Avg Cost.R
cd7a9cabb87addd6748ce0e2d553287fc306ac43
[]
no_license
orlandofreedom/FantasyFootballAnalyticsR
3a9eb19d8b151211784b8b9c1dbf13ce5b37bb62
a69f0f83e9ebd8d3953aaa5092132d71f9425bd1
refs/heads/master
2020-12-31T06:23:13.192077
2014-12-07T06:00:33
2014-12-07T06:00:33
null
0
0
null
null
null
null
UTF-8
R
false
false
12,136
r
Avg Cost.R
########################### # File: Avg Cost.R # Description: Downloads a Player's Avg Cost in Yahoo Auction Drafts # Date: 3/3/2013 # Author: Isaac Petersen (isaac@fantasyfootballanalytics.net) # Notes: # To do: ########################### #Library library("stringr") library("XML") library("data.table") #Functions source(paste(getwd(),"/R Scripts/Functions/Functions.R", sep="")) source(paste(getwd(),"/R Scripts/Functions/League Settings.R", sep="")) is.odd <- function(x) x %% 2 != 0 #Load data load(paste(getwd(),"/Data/VOR.RData", sep="")) ############### # Yahoo ############### #Scrape data yahoo_baseurl <- "http://football.fantasysports.yahoo.com/f1/draftanalysis?" yahoo_pos <- list(QB="QB", RB="RB", WR="WR", TE="TE", K="K", DST="DEF") yahoo_pages <- list(Cost="AD", ADP="SD") yahoo_urls <- paste0(yahoo_baseurl, "tab=", yahoo_pages, "&pos=", rep(yahoo_pos, each=length(yahoo_pages))) yahoo <- lapply(yahoo_urls, function(x) {data.table(readHTMLTable(x, stringsAsFactors = FALSE)[2]$draftanalysistable)}) yahooList <- yahoo yahooListCost <- list() yahooListADP <- list() for(i in 1:length(yahooList)){ #Add position to projection yahooList[[i]][,pos := rep(names(yahoo_pos), each=length(yahoo_pages))[i]] yahooList[[i]][,pos := as.factor(pos)] #Add variable names if(is.odd(i)){ setnames(yahooList[[i]], c("player","costProjected_yahoo","cost_yahoo","draftedPct_yahoo","pos")) yahooListCost[[(i+1)/2]] <- setDT(yahooList[[i]]) } else{ setnames(yahooList[[i]], c("player","adp_yahoo","avgRound_yahoo","draftedPct_yahoo","pos")) yahooListADP[[i/2]] <- setDT(yahooList[[i]]) } } #Merge avgCost_yahoo <- rbindlist(yahooListCost, use.names=TRUE, fill=TRUE) adp_yahoo <- rbindlist(yahooListADP, use.names=TRUE, fill=TRUE) #Player name, position, and team avgCost_yahoo[,player := str_trim(sapply(str_split(player, "\n"), "[[", 2))] avgCost_yahoo[,team_yahoo := toupper(str_trim(str_sub(player, start=str_locate(player, "-")[,1]-4, end=str_locate(player, "-")[,1]-2)))] avgCost_yahoo[,name_yahoo := str_trim(str_sub(player, start=0, end=nchar(player)-8))] avgCost_yahoo[which(pos == "DST"), name_yahoo := convertTeamName(team_yahoo)] avgCost_yahoo[,name := nameMerge(name_yahoo)] adp_yahoo[,player := str_trim(sapply(str_split(player, "\n"), "[[", 2))] adp_yahoo[,team_yahoo := toupper(str_trim(str_sub(player, start=str_locate(player, "-")[,1]-4, end=str_locate(player, "-")[,1]-2)))] adp_yahoo[,name_yahoo := str_trim(str_sub(player, start=0, end=nchar(player)-8))] adp_yahoo[which(pos == "DST"), name_yahoo := convertTeamName(team_yahoo)] adp_yahoo[,name := nameMerge(name_yahoo)] #Merge ADP & avgCost costADP_yahoo <- merge(avgCost_yahoo[,c("name","name_yahoo","pos","team_yahoo","costProjected_yahoo","cost_yahoo","draftedPct_yahoo"), with=FALSE], adp_yahoo[,c("name","pos","adp_yahoo","avgRound_yahoo"), with=FALSE], by=c("name","pos"), all=TRUE) #Remove special characters numericVars <- c("costProjected_yahoo","cost_yahoo","draftedPct_yahoo","adp_yahoo","avgRound_yahoo") costADP_yahoo[, (numericVars) := lapply(.SD, function(x) gsub("\\%", "", gsub("\\$", "", x))), .SDcols = numericVars] #Convert to numeric costADP_yahoo[, (numericVars) := lapply(.SD, function(x) as.numeric(as.character(x))), .SDcols = numericVars] #Calculations costADP_yahoo[,costAvg_yahoo := rowMeans(costADP_yahoo[,c("costProjected_yahoo","cost_yahoo"), with=FALSE], na.rm=TRUE)] costADP_yahoo[,costMax_yahoo := apply(costADP_yahoo[,c("costProjected_yahoo","cost_yahoo"), with=FALSE], 1, function(x) max(x, na.rm=TRUE))] #Rename players #projections_yahoo[projections_yahoo$name=="STEVIEJOHNSON", "name"] <- "STEVEJOHNSON" #Subset costADP_yahoo <- costADP_yahoo[,c("name","name_yahoo","pos","team_yahoo","cost_yahoo","costProjected_yahoo","costAvg_yahoo","costMax_yahoo","adp_yahoo"), with=FALSE] ############### # ESPN ############### #Scrape data espn_baseurl <- "http://games.espn.go.com/ffl/livedraftresults?" espn_pos <- list(QB="QB", RB="RB", WR="WR", TE="TE", K="K", DST="D/ST", DT="DT", DE="DE", LB="LB", CB="CB", S="S") espn_urls <- paste0(espn_baseurl, "position=", espn_pos) espn <- lapply(espn_urls, function(x) {data.table(readHTMLTable(x, stringsAsFactors = FALSE)$`NULL`)}) espnList <- espn for(i in 1:length(espnList)){ #Add variable names setnames(espnList[[i]], c("info","player","pos","adp_espn","adp7Day","cost_espn","costAvg7Day_espn","draftedPct_espn")) #Trim Dimensions espnList[[i]] <- espnList[[i]][-c(1:4, nrow(espnList[[i]])),] #Add position to projection espnList[[i]][,pos := names(espn_pos)[i]] espnList[[i]][,pos := as.factor(pos)] } #Merge players across positions avgCost_espn <- rbindlist(espnList, use.names=TRUE, fill=TRUE) #Player names avgCost_espn[,name_espn := str_sub(player, end=str_locate(string=player, ",")[,1]-1)] avgCost_espn[,name_espn := str_replace_all(name_espn, "\\*", "")] avgCost_espn[,name := nameMerge(avgCost_espn$name_espn)] #Player teams avgCost_espn[,team_espn := str_sub(player, start=str_locate(string=player, ",")[,1]+2, end = str_locate(string=player, ",")[,1]+4)] avgCost_espn[,team_espn := str_trim(avgCost_espn$team_espn, side="right")] avgCost_espn[,team_espn := toupper(team_espn)] #Remove special characters numericVars <- c("adp_espn","adp7Day","cost_espn","costAvg7Day_espn","draftedPct_espn") avgCost_espn[, (numericVars) := lapply(.SD, function(x) gsub("\\+", "", x)), .SDcols = numericVars] #Convert to numeric avgCost_espn[, (numericVars) := lapply(.SD, function(x) as.numeric(as.character(x))), .SDcols = numericVars] #Subset avgCost_espn <- avgCost_espn[,c("name","name_espn","pos","team_espn","cost_espn","adp_espn"), with=FALSE] ############### # FantasyPros ############### #Scrape data avgCost_fp <- readHTMLTable("http://www.fantasypros.com/nfl/auction-values/overall.php", stringsAsFactors = FALSE)$data #Clean data avgCost_fp$name_fp <- str_sub(avgCost_fp[,c("Player (pos, team, bye)")], end=str_locate(avgCost_fp[,c("Player (pos, team, bye)")], ',')[,1]-1) avgCost_fp$name <- nameMerge(avgCost_fp$name_fp) avgCost_fp$cost_fp <- as.numeric(sub("\\$","", avgCost_fp$Ave)) avgCost_fp$team_fp <- nameMerge(str_sub(avgCost_fp[,c("Player (pos, team, bye)")], start=str_locate(avgCost_fp[,c("Player (pos, team, bye)")], "\\(")[,1]+1, end=str_locate(avgCost_fp[,c("Player (pos, team, bye)")], "\\(")[,1]+3)) avgCost_fp$pos_fp <- as.factor(nameMerge(str_sub(avgCost_fp[,c("Player (pos, team, bye)")], start=str_locate(avgCost_fp[,c("Player (pos, team, bye)")], "\\,")[,1]+2, end=str_locate(avgCost_fp[,c("Player (pos, team, bye)")], "\\,")[,1]+3))) avgCost_fp$adp_fp <- as.numeric(avgCost_fp$ADP) avgCost_fp$ecr_fp <- as.numeric(avgCost_fp$ECR) #Rename Players avgCost_fp[grep("CHRISTOPHERIVORY", avgCost_fp[,c("name")]),"name"] <- "CHRISIVORY" #avgCost_fp[avgCost_fp$name=="DOMANIQUEDAVIS", "name"] <- "DOMINIQUEDAVIS" #Subset cost_fp <- avgCost_fp[,c("name","name_fp","pos_fp","team_fp","cost_fp","adp_fp","ecr_fp")] ############### # FantasyFootballCalculator ############### #Scrape data adp_ffc <- readHTMLTable("http://fantasyfootballcalculator.com/adp.php?teams=10", stringsAsFactors = FALSE)$`NULL` #Clean data adp_ffc$adp_ffc <- as.numeric(adp_ffc$Overall) adp_ffc$adpSD_ffc <- as.numeric(adp_ffc$Std.Dev) adp_ffc$name_ffc <- adp_ffc$Name adp_ffc$name <- nameMerge(adp_ffc$Name) adp_ffc$pos_ffc <- as.factor(adp_ffc$Pos) adp_ffc$team_ffc <- adp_ffc$Team #Subset adp_ffc <- adp_ffc[,c("name","name_ffc","pos_ffc","team_ffc","adp_ffc","adpSD_ffc")] ############### # Merge ############### costList <- list(cost_yahoo, cost_espn, cost_fp, adp_ffc) #projections, avgCost <- merge_recurse(costList, by=c("name")) #,"pos" #Set player name as most common instance across sources nametable <- apply(avgCost[,paste("name", c("yahoo","espn","fp","ffc"), sep="_")], 1, table) avgCost$player <- names(sapply(nametable,`[`,1)) avgCost$player[which(avgCost$player == "")] <- NA #Set team name as most common instance across sources teamtable <- apply(avgCost[,paste("team", c("yahoo","espn","fp","ffc"), sep="_")], 1, table) avgCost$team <- names(sapply(teamtable,`[`,1)) avgCost$team[which(avgCost$team == "")] <- NA #Set position as most common instance across sources postable <- apply(avgCost[,paste("pos", c("yahoo","espn","fp","ffc"), sep="_")], 1, table) avgCost$pos <- as.factor(names(sapply(postable,`[`,1))) avgCost$pos[which(avgCost$pos == "")] <- NA #Remove duplicate cases avgCost[avgCost$name %in% avgCost$name[duplicated(avgCost$name)],] dropNames <- nameMerge(c("Alex Smith","Ryan Griffin","Zach Miller","Steve Smith","Mike Williams")) #,"Chris Givens" dropVariables <- c("pos_espn","pos_espn","team_espn","team_espn","team_espn") #,"team_espn" dropLabels <- c("TE","QB","CHI","FA","FA") #,"NO" avgCost2 <- ddply(avgCost, .(name), numcolwise(mean), na.rm=TRUE) for(i in 1:length(dropNames)){ if(dim(avgCost[-which(avgCost[,"name"] == dropNames[i] & avgCost[,dropVariables[i]] == dropLabels[i]),])[1] > 0){ avgCost <- avgCost[-which(avgCost[,"name"] == dropNames[i] & avgCost[,dropVariables[i]] == dropLabels[i]),] } } avgCost <- merge(avgCost2, avgCost[,c("name","player","pos","team")], by="name") drops <- c("player","pos","team") avgCost <- avgCost[,!names(avgCost) %in% drops] projections <- merge(projections, avgCost, by="name", all.x=TRUE) #Remove duplicate cases projections[projections$name %in% projections$name[duplicated(projections$name)],] ############### # Calculate projected cost ############### #Select which source of cost to use projections$avgCost <- projections$costAvg_yahoo #Calculate Overall Rank projections$overallRank <- rank(-projections$projections, ties.method="min") #Apply 10% price premium to 33 players with highest projected points, apply 10% price premium for players lower than rank 66 projections$inflatedCost <- ceiling(projections$avgCost * (leagueCap/defaultCap) * 1.0) projections$inflatedCost[projections$overallRank <= 33] <- ceiling(projections$avgCost[projections$overallRank <= 33] * (leagueCap/defaultCap) * 1.1) projections$inflatedCost[projections$overallRank >= 34 & projections$overallRank <= 66] <- ceiling(projections$avgCost[projections$overallRank >= 34 & projections$overallRank <= 66] * (leagueCap/defaultCap) * 1.0) projections$inflatedCost[projections$overallRank >= 67] <- ceiling(projections$avgCost[projections$overallRank >= 67] * (leagueCap/defaultCap) * 0.9) projections$inflatedCost[is.na(projections$inflatedCost)==TRUE] <- 1 projections$inflatedCost[projections$inflatedCost==0] <- 1 projections$avgCost[is.na(projections$avgCost)==TRUE] <- 1 projections$inflatedCost[is.na(projections$inflatedCost)==TRUE] <- 1 projections[,c("cost_yahoo","costProjected_yahoo","costAvg_yahoo","costMax_yahoo","cost_espn","cost_fp")][is.na(projections[,c("cost_yahoo","costProjected_yahoo","costAvg_yahoo","costMax_yahoo","cost_espn","cost_fp")])] <- 1 #Order data projections <- projections[order(projections$overallRank),] row.names(projections) <- 1:dim(projections)[1] #Density Plot ggplot(projections, aes(x=inflatedCost)) + geom_density(fill="green", alpha=.3) + xlab("Player's Intrinsic Value (Cost)") + ggtitle("Density Plot of Players' Intrinsic Value") + theme(legend.title=element_blank()) ggsave(paste(getwd(),"/Figures/Inflated Cost.jpg", sep=""), width=10, height=10) dev.off() #Save file save(projections, file = paste(getwd(), "/Data/AvgCost.RData", sep="")) write.csv(projections, file=paste(getwd(), "/Data/AvgCost.csv", sep=""), row.names=FALSE) save(projections, file = paste(getwd(), "/Data/Historical Cost/AvgCost-", season, ".RData", sep="")) write.csv(projections, file=paste(getwd(), "/Data/Historical Cost/AvgCost-", season, ".csv", sep=""), row.names=FALSE) save(projections, file = paste(getwd(), "/Data/Rankings.RData", sep="")) write.csv(projections, file=paste(getwd(), "/Data/Rankings.csv", sep=""), row.names=FALSE) save(projections, file = paste(getwd(), "/Data/Historical Rankings/Rankings-", season, ".RData", sep="")) write.csv(projections, file=paste(getwd(), "/Data/Historical Rankings/Rankings-", season, ".csv", sep=""), row.names=FALSE)
a18ab3e555fb15d65bf0c73b8caade247caec02c
92b26722872126c02235b0afbad858ad9a2ee3c3
/plot4.R
2c01a828a3c91e2b3bc47a6037b44dff3866da08
[]
no_license
drmallan/ExData_Plotting1
5098be3e7d3b5c27784b68ae836de8b23aae0ea7
e57757e163346920216695f62c7263192d1d31e2
refs/heads/master
2021-01-18T08:17:40.841354
2015-08-09T10:59:50
2015-08-09T11:05:57
40,405,538
0
0
null
2015-08-08T14:34:35
2015-08-08T14:34:35
null
UTF-8
R
false
false
993
r
plot4.R
x <- read.table("household_power_consumption.txt",sep=";",header = TRUE,stringsAsFactors=FALSE) # Subset the two dates of interest y <- subset(x,Date=="1/2/2007" | Date=="2/2/2007") # Combine date and time variables y$Combined <- strptime(paste(y$Date, y$Time),"%d/%m/%Y %H:%M:%S") par(mfrow=c(2,2)) # Plot four charts with(y, { plot(Combined,Global_active_power,type="l",ylab="Global Active Power",xlab="") plot(Combined, Voltage,type="l",ylab="Voltage",xlab="datetime") plot(Combined, Sub_metering_1,type="l",ylab="Energy sub metering",xlab="") points(Combined, Sub_metering_2,col="red",type="l") points(Combined, Sub_metering_3,col="blue",type="l") recordGraphics(legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col = c("black","red","blue"),lty=1), list(), getNamespace("graphics")) plot(Combined, Global_reactive_power,type="l",ylab="Global_reactive_power",xlab="datetime") }) # Send output to PNG file dev.copy(png,file="plot4.png") dev.off()
375f926b3bacc3b213cc57fa0d8f04a1904d01a6
2a6de67ff224d8f0be975a190cfd1415cecafe3e
/R/lubridate.R
12d9ddaeddd00dd7b6e69747df521e1ea8da4468
[]
no_license
RupeshMohan/R_Program
76c0ee5c8450c32de814139438da916d437596f8
aa7ba715e251b4b790a76fa27771d28d3932d85d
refs/heads/master
2020-04-02T12:43:11.119233
2018-10-25T05:04:40
2018-10-25T05:04:40
154,447,945
0
0
null
null
null
null
UTF-8
R
false
false
291
r
lubridate.R
require(lubridate) now() # current Time update(now(),year=2015,month=5) # Modify dates # ADD TIME DATE DAY YEAR now()+ddays(2) now()+dyears(2) now()+dminutes(45) now()+dweeks(4) now()+dhours(4) now()+dseconds(47852) # EXtract data date(now()) minute(now()) year(now()) second(now())
de486d5addf25e96ce436ce262872df646ba8ae2
88795fcefdd51b2228f8d59d3ac704516872732b
/man/getZifNetwork.Rd
26aadf36083f676f3b04d0b9bd37731575c4afd7
[]
no_license
amcdavid/SingleCellAnalysis
58eb070174dffa8a290556440894823ce9c78b08
54cb0bb77a219953d276ef6f3c18fe555c674440
refs/heads/master
2016-09-09T20:10:46.954250
2014-07-14T21:13:21
2014-07-14T21:13:21
13,047,205
0
0
null
null
null
null
UTF-8
R
false
false
827
rd
getZifNetwork.Rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{getZifNetwork} \alias{getZifNetwork} \title{Put coefficients from a set of network regressions into a matrix/array} \usage{ getZifNetwork(listOfFits, l.c, l.d, collapse = FALSE, union = TRUE, layers) } \arguments{ \item{listOfFits}{output from fitZifNetwork} \item{l.c}{continuous lambda value, if missing use lambda attribute from listofFits} \item{l.d}{discrete lambda value, see l.c} \item{collapse}{should the network be collapsed between layers?} \item{union}{currently ignored} \item{layers}{upon which layers in the listOfFits should we operate (eg, discrete, continuous or both)} } \value{ an array } \description{ The array is ngenes X ngenes X {2,3}, with the last dimension depending on whether zero-inflated or cg.regression2 predictors were used. }
9f6b37acf6d00d28a74d039474118d211e99e523
607c7b6b3f4f75734aa0254b4bb60aa88db64abe
/man/wea.Rd
a968f4575f644e14d7afb725838fc055340b1edc
[]
no_license
quantide/qdata
ef912b7839bcc987b4a57bb8ec7680a5e488a523
38b09e3feb735aec9d938d023595a8f3587a1b6e
refs/heads/master
2021-01-12T18:14:05.939900
2017-01-26T11:24:54
2017-01-26T11:24:54
71,345,165
2
1
null
null
null
null
UTF-8
R
false
true
1,434
rd
wea.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/qdata-data.R \name{wea} \alias{wea} \title{Weather Daily Data in Canberra} \format{\code{wea} is a tbl data frame with 366 observations on 24 variables.} \usage{ data(wea) } \description{ The \code{wea} data deals with weather daily data (temperature, rain, wind, etc.) in Canberra, Australia, from 2007-11-01 to 2008-10-31. } \details{ The 5 variables of the \code{wea} tbl data frame are the following ones: \itemize{ \item \code{Date} (Date), format: "2007-11-01" "2007-11-02" "2007-11-03" "2007-11-04" ... \item \code{Location} Factor w/ 49 levels \item \code{MinTemp} (numeric) \item \code{MaxTemp} (numeric) \item \code{Rainfall} (numeric) \item \code{Evaporation} (numeric) \item \code{Sunshine} (numeric) \item \code{WindGustDir} Ord.factor w/ 16 levels \item \code{WindGustSpeed} (numeric) \item \code{WindDir9am} Ord.factor w/ 16 levels \item \code{WindDir3pm} Ord.factor w/ 16 levels \item \code{WindSpeed9am} (numeric) \item \code{WindSpeed3pm} (numeric) \item \code{Humidity9am} (numeric) \item \code{Humidity3pm} (numeric) \item \code{Pressure9am} (numeric) \item \code{Pressure3pm} (numeric) \item \code{Cloud9am} (numeric) \item \code{Cloud3pm} (numeric) \item \code{Temp9am} (numeric) \item \code{Temp3pm} (numeric) \item \code{RainToday} Factor w/ 2 levels \item \code{RISK_MM} (numeric) \item \code{RainTomorrow} Factor w/ 2 levels } }
93e53f363456bae21bd2706f850f8dcdd700494f
6c2174723cc610eeff9b6b060c240b2bad7c78e7
/ShinyLeaflet/ui.R
b9bd0715da6f3215c94b27e89f54549a36f41dfa
[]
no_license
tsdaemon/lviv_criminal_map
83ebd09b803793c6d2a597e442def03bd3da83f3
d95c2d03b871817ebd5a980bdcbf851dd37ffa83
refs/heads/master
2021-01-19T10:29:02.429742
2017-04-11T00:46:42
2017-04-11T00:46:42
87,871,001
2
0
null
null
null
null
UTF-8
R
false
false
1,762
r
ui.R
library(leaflet) library(leaflet.extras) presentation_vals <- c("Точки" = "dots", "Heat map" = "heatmap", "По районах" = "bydistrict") type_vals <- c("Усі" = "all", "Крадіжки" = "theft", "Пограбування" = "robbery", "Шахрайства" = "fraud") # Choices for drop-downs navbarPage("Кримінальна карта Львова", id="nav", tabPanel("", div(class="outer", tags$head( # Include our custom CSS includeCSS("styles.css"), includeScript("gomap.js") ), leafletOutput("map", width="100%", height="100%"), # Shiny versions prior to 0.11 should use class="modal" instead. absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto", width = 330, height = "auto", h2("Налаштування"), selectInput("type", "Тип запису", type_vals, selected = "all"), checkboxInput("dots", "Точки"), checkboxInput("heatmap", "Heatmap"), checkboxInput("bydistrict", "Райони", value = TRUE), plotOutput("bydistrictBarChart", height = 400) ), tags$div(id="cite", 'Підготовано з використанням ', tags$a(href='http://opendata.city-adm.lviv.ua/', "Портала відкритих даних Львова"), ' Анатолієм Стегнієм (2017).' ) ) ) )
e2c7cd256abd696d9cbcec084da28d9202ec1e9b
c842bcf306bccc2bb4f1b1aa1c6788a2f70f8be6
/hzarscript_loop.R
3dc88182bc44f4abd465c12cc4fa4d4672f03e93
[]
no_license
kiralong/HZAR_pipeline
243deb98b4bc746496dafdc462e0bad994aec59b
ff87417a2cfd98b92f68fe901f6e44312e9819e6
refs/heads/main
2023-05-02T15:53:44.644401
2021-05-25T17:52:56
2021-05-25T17:52:56
369,340,067
1
0
null
null
null
null
UTF-8
R
false
false
14,019
r
hzarscript_loop.R
#!/usr/bin/env Rscript # May 6th, 2021 ## Load the packages library(hzar) library(scales) # Set working directory setwd("/your/working/directory") # set the name of you SNP ID file # This is a list of just the locus IDs and chromosome coordinates to tell hzar what to loop over. # These are the first 4 columns of the `populations.sumstats.tsv`from Stacks. I am using a sample dataset looking at a single manakin chromosome to look at the clines around the gene RDH10 snp_ID_file <- "NW_021939396.1_RDH10_brum34_snps.tsv" #This is the input file for hzar, which is the --hzar file output by Stacks. Don't forget to add your population distances and remove the stacks header from this file! hzar_input_file <- "populations.rdh10.hzar.csv" ##Read in data to R ## Also note that R is adding an 'X' to the beginning of each locus ID because they are numerical and R wants a letter there manakinRAD <- read.csv(hzar_input_file, comment.char = '#') # Sort by the numerical population ID # Make sure that the population IDs are numerical so they get sorted in the right order manakinRAD <- manakinRAD[order(manakinRAD$Population, decreasing = TRUE),] ## Add your population distances to the population ID's cg <- 00.00 pr <- 20.75 ru <- 29.50 qp <- 42.50 ro <- 48.50 el <- 71.25 wl <- 79.00 ss <- 92.50 dists <- c(cg, pr, ru, qp, ro, el, wl, ss) # Add these distances to the manakinRAD data manakinRAD$Distance <- dists #Get a list of just the locus IDs and chromosome coordinates # These are the first 4 columns of the `populations.sumstats.tsv`from Stacks lociID <- read.delim(snp_ID_file, header=FALSE, stringsAsFactors = FALSE) lociID$V5 <- paste(lociID$V1,'_',lociID$V4,sep='') lociID$V6 <- paste('X',lociID$V5,sep='') colnames(lociID) <- c('locus','chromosome','basepair','column','snp_id','hzar_id') # Vectors for final per-snp dataframe snps <- c() center.km <- c() width.km <- c() p.min <- c() p.max <- c() best.model <- c() scaffold <- c() basepair <- c() center.low <- c() center.med <- c() center.high <- c() width.low <- c() width.med <- c() width.high <- c() ## Distance Offset, should be slightly larger than your max and min population distances offset=20 minDist=0 maxDist=95 # Dataframe with all observed clines for graphing all clines later ## The loop will add the values for the other clines kms <- (minDist-offset):(maxDist+offset) all_clines <- data.frame(kms) ## A typical chain length. This value is the default setting from the HZAR package. chainLength=1e5; ## Make each model run off a separate seed mainSeed= list(A=c(596,528,124,978,544,99), B=c(528,124,978,544,99,596), C=c(124,978,544,99,596,528)) # Colors for the plots # Re-add the observed data for the sites, with colors and legends colors <- c('cornflowerblue', # CG 'burlywood4', # PR 'deeppink2', # RU 'chartreuse3', # QP 'darkgoldenrod3', # RO 'darkorchid3', # EL 'mediumseagreen', # WL 'firebrick3') # SS pop_ids <- c('10','9','8','6','5','4','3','2') # Save plots in a PDF pdf('./individual_snp_clines.pdf',width=4,height=4) # Loop over all the loci for (i in 1:nrow(lociID)) { #for (i in 1:2) { # i=1 # Select the current SNP curr.snp <- lociID[i,] snp_id <- curr.snp$snp_id loc <- curr.snp$hzar_id chrom <- curr.snp$chromosome bp <- curr.snp$basepair ## If running HZAR in a cluster environment, this allows you to grep "PROCESSING" out of the output/error file to see which locus HZAR is on message('## ----- PROCESSING LOCUS ', i, ' - ', snp_id, ' ----- ##\n', sep=' ') ## Molecular Analysis ## Blank out space in memory to hold molecular analysis if(length(apropos("^mkn$",ignore.case=FALSE)) == 0 || !is.list(mkn) ) mkn <- list() ## We are doing just the one allele at one locus, but it is a ## good to stay organized. # Instead of using the literal string of the locus ID, we are using # the variable `loc` and indexing with it mkn[[loc]] <- list() ## Space to hold the observed data mkn[[loc]][['obs']] <- list() ## Space to hold the models to fit mkn[[loc]][['models']] <- list() ## Space to hold the compiled fit request mkn[[loc]][['fitRs']] <- list() ## Space to hold the output data chains mkn[[loc]][['runs']] <- list() ## Space to hold the analyzed data mkn[[loc]][['analysis']] <- list() ## Load the locus of interest locA <- paste(loc,'.A',sep='') locN <- paste(loc,'.N',sep='') mkn[[loc]][['obs']] <- hzar.doMolecularData1DPops(manakinRAD$Distance, manakinRAD[,locA], manakinRAD[,locN]) ## Make a helper function mkn.loadLocAmodel <- function(scaling,tails, id=paste(scaling,tails,sep=".")) mkn[[loc]]$models[[id]] <<- hzar.makeCline1DFreq(mkn[[loc]]$obs, scaling, tails) mkn.loadLocAmodel("fixed","none","modelI"); mkn.loadLocAmodel("free" ,"none","modelII"); mkn.loadLocAmodel("free" ,"both","modelIII"); ## Modify all models to focus on the region where the observed ## data were collected. ## Observations were between 0 and about 80 km. mkn[[loc]]$models <- sapply(mkn[[loc]]$models, hzar.model.addBoxReq, minDist-offset, maxDist+offset, simplify=FALSE) ## Compile each of the models to prepare for fitting mkn[[loc]]$fitRs[['init']] <- sapply(mkn[[loc]]$models, hzar.first.fitRequest.old.ML, obsData=mkn[[loc]]$obs, verbose=FALSE, simplify=FALSE) ## Update the settings for the fitter if desired. ### Model I mkn[[loc]]$fitRs$init$modelI$mcmcParam$chainLength <- chainLength # 1e5 by default mkn[[loc]]$fitRs$init$modelI$mcmcParam$burnin <- chainLength %/% 10 # 1e4 by default mkn[[loc]]$fitRs$init$modelI$mcmcParam$seed[[1]] <- mainSeed$A ### Model II mkn[[loc]]$fitRs$init$modelII$mcmcParam$chainLength <- chainLength # 1e5 by default mkn[[loc]]$fitRs$init$modelII$mcmcParam$burnin <- chainLength %/% 10 # 1e4 by default mkn[[loc]]$fitRs$init$modelII$mcmcParam$seed[[1]] <- mainSeed$B ### Model III mkn[[loc]]$fitRs$init$modelIII$mcmcParam$chainLength <- chainLength # 1e5 by default mkn[[loc]]$fitRs$init$modelIII$mcmcParam$burnin <- chainLength %/% 10 # 1e4 by default mkn[[loc]]$fitRs$init$modelIII$mcmcParam$seed[[1]] <- mainSeed$C # RUN MODEL I ## Run just one of the models for an initial chain mkn[[loc]]$runs$init <- list() mkn[[loc]]$runs$init$modelI <- hzar.doFit(mkn[[loc]]$fitRs$init$modelI) ## Plot the trace # plot(hzar.mcmc.bindLL(mkn[[loc]]$runs$init$modelI)) # RUN MODEL II ## Run another model for an initial chain mkn[[loc]]$runs$init$modelII <- hzar.doFit(mkn[[loc]]$fitRs$init$modelII) ## Plot the trace # plot(hzar.mcmc.bindLL(mkn[[loc]]$runs$init$modelII)) # RUN MODEL III ## Run another model for an initial chain mkn[[loc]]$runs$init$modelIII <- hzar.doFit(mkn[[loc]]$fitRs$init$modelIII) ## Plot the trace # plot(hzar.mcmc.bindLL(mkn[[loc]]$runs$init$modelIII)) ## Compile a new set of fit requests using the initial chains mkn[[loc]]$fitRs$chains <- lapply(mkn[[loc]]$runs$init, hzar.next.fitRequest) ## Replicate each fit request 3 times, keeping the original ## seeds while switching to a new seed channel. mkn[[loc]]$fitRs$chains <- hzar.multiFitRequest(mkn[[loc]]$fitRs$chains, each=3, baseSeed=NULL) ## Go ahead and run a chain of 3 runs for every fit requester ## ARC: This step can take quite a while to run because it looks for the models to ## converge. The code doesn't seem to behave properly when they do not. Might need ## to incorporate a check to stop the function after X amount of time when that happens. ## NOTE: This part is needed. mkn[[loc]]$runs$chains <- hzar.doChain.multi(mkn[[loc]]$fitRs$chains, doPar=FALSE, inOrder=FALSE, count=3) ## Start aggregation of data for analysis ## Create a model data group for the null model (expected allele ## frequency independent of distance along cline) to include in ## analysis. ## I think this is needed again. mkn[[loc]]$analysis$initDGs <- list( nullModel = hzar.dataGroup.null(mkn[[loc]]$obs)) ## Create a model data group (hzar.dataGroup object) for each ## model from the initial runs. mkn[[loc]]$analysis$initDGs$modelI <- hzar.dataGroup.add(mkn[[loc]]$runs$init$modelI) mkn[[loc]]$analysis$initDGs$modelII <- hzar.dataGroup.add(mkn[[loc]]$runs$init$modelII) mkn[[loc]]$analysis$initDGs$modelIII <- hzar.dataGroup.add(mkn[[loc]]$runs$init$modelIII) ## Create a hzar.obsDataGroup object from the four hzar.dataGroup ## just created, copying the naming scheme (nullModel, modelI, ## modelII, modelIII). mkn[[loc]]$analysis$oDG <- hzar.make.obsDataGroup(mkn[[loc]]$analysis$initDGs) mkn[[loc]]$analysis$oDG <- hzar.copyModelLabels(mkn[[loc]]$analysis$initDGs, mkn[[loc]]$analysis$oDG) ## Convert all 27 runs to hzar.dataGroup objects, adding them to ## the hzar.obsDataGroup object. mkn[[loc]]$analysis$oDG <- hzar.make.obsDataGroup(lapply(mkn[[loc]]$runs$chains, hzar.dataGroup.add), mkn[[loc]]$analysis$oDG) ## Do model selection based on the AICc scores mkn[[loc]]$analysis$AICcTable <- hzar.AICc.hzar.obsDataGroup(mkn[[loc]]$analysis$oDG) ## Print out the model with the minimum AICc score best_model <- print(mkn[[loc]]$analysis$model.name <- rownames(mkn[[loc]]$analysis$AICcTable )[[ which.min(mkn[[loc]]$analysis$AICcTable$AICc )]]) ## Extract the hzar.dataGroup object for the selected model mkn[[loc]]$analysis$model.selected <- mkn[[loc]]$analysis$oDG$data.groups[[mkn[[loc]]$analysis$model.name]] ## Print the maximum likelihood cline for the selected model ## Need to capture this maximum liklihood output max_likelihood_cline <- hzar.get.ML.cline(mkn[[loc]]$analysis$model.selected) center <- max_likelihood_cline$param.all$center width <- max_likelihood_cline$param.all$width pMin <- max_likelihood_cline$param.all$pMin pMax <- max_likelihood_cline$param.all$pMax # print(max_likelihood_cline) ## Obtain the confidence intervals conf <- hzar.qScores.dataGroup(mkn[[loc]]$analysis$model.selected) center_low <- conf[1,]$center center_med <- conf[2,]$center center_high <- conf[3,]$center width_low <- conf[1,]$width width_med <- conf[2,]$width width_high <- conf[3,]$width # Make the main cline plot par(mar=c(5.1, 4.1, 4.1, 4.5), xpd=TRUE) ## Plot the 95% credible cline region for the selected model hzar.plot.fzCline(mkn[[loc]]$analysis$model.selected, ylab='Allele Frequency', xlab='Distance from M. vitellinus (Km)', main=paste('SNP location:\n',chrom,bp), ylim=c(0,1), xlim=c(0,100), las=1 ) # Re-add the main cline line, but thicker hzar.plot.cline(mkn[[loc]]$analysis$model.selected, add=TRUE, lwd=2) # @ARC: If you need to plot the invidivual clines, they are stored in #cline_line <- mkn[[loc]]$analysis$model.selected$obsData$frame cline_line <- max_likelihood_cline$clineFunc((minDist-offset):(maxDist+offset)) ## Store current cline in all clines table all_clines[,snp_id] <- cline_line ## Look at a graph of the observed data hzar.plot.obsData(mkn[[loc]]$obs, add=TRUE, col='black', bg=colors, pch=21, cex=1.25) # Add legend legend('right', inset=c(-0.3,0), legend=pop_ids, pch=21, pt.bg=colors) ## Add data to DF center.km <- c(center.km, center) width.km <- c(width.km, width) best.model <- c(best.model, best_model) snps <- c(snps, snp_id) p.min <- c(p.min, pMin) p.max <- c(p.max, pMax) scaffold <- c(scaffold, chrom) basepair <- c(basepair,bp) center.low <- c(center.low, center_low) center.med <- c(center.med, center_med) center.high <- c(center.high, center_high) width.low <- c(width.low, width_low) width.med <- c(width.med, width_med) width.high <- c(width.high, width_high) } # Close plot .=dev.off() # Create Final dataframe cline_table <- data.frame(scaffold, basepair, snps, center.km, width.km, p.min, p.max, best.model, center.low, center.med, center.high, width.low, width.med, width.high) # Save the cline table dataframe into a file write.table(cline_table, file='./snp_clines_parameters.tsv', quote=FALSE, sep='\t', eol='\n', row.names = FALSE, col.names = colnames(cline_table)) # Save the all clines dataframe into a file write.table(all_clines, file='./all_clines.tsv', quote=FALSE, sep='\t', eol='\n', row.names = FALSE, col.names = colnames(all_clines))
1c85607e520e5b0d1b6f906211db28e72b718806
129735feaf576bdcdffbe5d65d6085473ca64eab
/man/threshold.Rd
20a89a59ac8a20a6e7a719f68f0de4ca85dffc05
[]
no_license
zhaoyizhuang/constructnet
ce3eef8a0bc482838557a92d8c3e6606ade703a7
d12faab50d7ea1a6c0a6844cd486fca7746138c7
refs/heads/master
2023-04-23T22:38:26.315714
2021-02-18T22:28:56
2021-02-18T22:28:56
null
0
0
null
null
null
null
UTF-8
R
false
true
448
rd
threshold.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/threshold.R \name{threshold} \alias{threshold} \title{Utilities for thresholding matrices based on different criteria} \usage{ threshold(mat, rule, ...) } \arguments{ \item{mat}{input matrix} \item{rule}{A string indicating which thresholding function to invoke.} \item{...}{Arguments} } \description{ Utilities for thresholding matrices based on different criteria }
3de635a27dd61b464e3d288e552ff67d1516eef6
802fdba8cad732ff1f8a899e6d018e8db51c4477
/Analysis/Climwin/Correlations/Autocorrelations.R
3d8a6312bca091bafcb3c9ec76c7ba3ebdad4726
[]
no_license
SanneE1/Climate-windows
f5083f6dadf2dbc2e130362f8776c169a822eab2
dc34ae591b8d971813b15439cf519ba2f5bc6c15
refs/heads/master
2023-03-14T10:33:25.991001
2021-02-26T10:29:12
2021-02-26T10:29:12
205,806,524
0
0
null
null
null
null
UTF-8
R
false
false
4,397
r
Autocorrelations.R
# Calculate climate (auto)correlation of the selected climate driver library(lme4) library(climwin) library(plyr); library(dplyr) library(lubridate) library(patchwork) source("Results/Load_Climwin_results.R") # centralized script to load rds object, and selected climate drivers source("Analysis/Climwin/Correlations/required_functions.R") # custom functions to calculate correlations ### Survival AutoHs <- cor_wrap(species = "HEQU", vitalrate = "s", Climate = "data/Climate data/PRISM_check/PRISM_HEQU_climate.csv", SlidingObject = Hs, Winner = Hsurv) AutoFs <- cor_wrap(species = "FRSP", vitalrate = "s", Climate = 'data/Climate data/FRSP_NOAA_month.csv', SlidingObject = Fs, Winner = Fsurv) AutoCs <- cor_wrap(species = "CRFL", vitalrate = "s", Climate = "data/Climate data/CRFL_NOAA_month.csv", SlidingObject = Cs, Winner = Csurv) AutoOs <- cor_wrap("OPIM", "s", "data/Climate data/OPIM_SEVLTER_month_imputed.csv", Os, Osurv) ### Growth AutoHg <- cor_wrap("HEQU", "g", "data/Climate data/PRISM_check/PRISM_HEQU_climate.csv", Hg, Hgrowth) AutoFg <- cor_wrap("FRSP", "g", "data/Climate data/FRSP_NOAA_month.csv", Fg, Fgrowth) AutoCg <- cor_wrap("CRFL", "g", "data/Climate data/CRFL_NOAA_month.csv", Cg, Cgrowth) AutoOg <- cor_wrap("OPIM", "g", "data/Climate data/OPIM_SEVLTER_month_imputed.csv", Og, Ogrowth) ### Flower Prob AutoHfp <- cor_wrap("HEQU", "fp", "data/Climate data/PRISM_check/PRISM_HEQU_climate.csv", Hfp, HpFlwr) AutoFfp <- cor_wrap("FRSP", "fp", "data/Climate data/FRSP_NOAA_month.csv", Ffp, FpFlwr) AutoCfp <- cor_wrap("CRFL", "fp", "data/Climate data/CRFL_NOAA_month.csv", Cfp, CpFlwr) AutoOfp <- cor_wrap("OPIM", "fp", "data/Climate data/OPIM_SEVLTER_month_imputed.csv", Ofp, OpFlwr) ### Flower Numbers AutoHfn <- cor_wrap("HEQU", "fn", "data/Climate data/PRISM_check/PRISM_HEQU_climate.csv", Hfn, HnFlwr) AutoFfn <- cor_wrap("FRSP", "fn", "data/Climate data/FRSP_NOAA_month.csv", Ffn, FnFlwr) AutoCfn <- cor_wrap("CRFL", "fn", "data/Climate data/CRFL_NOAA_month.csv", Cfn, CnFlwr) AutoOfn <- cor_wrap("OPIM", "fp", "data/Climate data/OPIM_SEVLTER_month_imputed.csv", Ofn, OnFlwr) ggsave("Visual/Correlation_Hs.png", AutoHs) ggsave("Visual/Correlation_Hg.png", AutoHg) ggsave("Visual/Correlation_Hfp.png", AutoHfp) ggsave("Visual/Correlation_Hfn.png", AutoHfn) ggsave("Visual/Correlation_Fs.png", AutoFs) ggsave("Visual/Correlation_Fg.png", AutoFg) ggsave("Visual/Correlation_Ffp.png", AutoFfp) ggsave("Visual/Correlation_Ffn.png", AutoFfn) ggsave("Visual/Correlation_Os.png", AutoOs) ggsave("Visual/Correlation_Og.png", AutoOg) ggsave("Visual/Correlation_Ofp.png", AutoOfp) ggsave("Visual/Correlation_Ofn.png", AutoOfn) ggsave("Visual/Correlation_Cs.png", AutoCs) ggsave("Visual/Correlation_Cg.png", AutoCg) ggsave("Visual/Correlation_Cfp.png", AutoCfp) ggsave("Visual/Correlation_Cfn.png", AutoCfn)
5c361ceeba4806e964eeb421718cd07a05bff137
b7fb7358b25dfb0e064a59fdd444e682cfb2361e
/R_Projects/ML_in_R_KaggleTut.R
43b8d881d404ff713f30505279b60e2aaa0a781f
[]
no_license
KMitzner9/Data_Science
bc5cb965a0f0550d64313454e8d5c4ad73fb86cc
3ab332a155a284e5cd5db12dea0b4768285c8adf
refs/heads/main
2023-02-07T18:16:45.289615
2020-12-17T22:30:37
2020-12-17T22:30:37
307,819,316
0
0
null
null
null
null
UTF-8
R
false
false
5,473
r
ML_in_R_KaggleTut.R
############################# ### MACHINE LEARNING IN R ### ############################# library(tidyverse) library(reshape2) housing = read.csv('housing.csv') head(housing) #check the summary to make sure #s are #s and categoricals are categoricals summary(housing) ### from this we can see... # NAs in total_bedrooms that need to be addressed # Feature encoding for ocean_proximity (even though R can often handle this) # make total_bedrooms and total_rooms into mean columns instead par(mfrow=c(2,5)) colnames(housing) #take a look at the variables ggplot(data = melt(housing), mapping = aes(x = value)) + geom_histogram(bins = 30) + facet_wrap(~variable, scales = 'free_x') ### from this we can see... # some housing blocks have old age homes # there is a cap on the median house value that may affect the data # we need to standardize numeric scales for an non-tree based methods ##### CLEANING THE DATA ##### #impute missing values #fill with median bc less influenced by outliers housing$total_bedrooms[is.na(housing$total_bedrooms)] = median(housing$total_bedrooms, na.rm = TRUE) summary(housing) #change total columns to mean columns housing$mean_bedrooms = housing$total_bedrooms/housing$households housing$mean_rooms = housing$total_rooms/housing$households #drop the old columns, create variables drops with the column names drops = c('total_bedrooms', 'total_rooms') #update housing such that it excludes those columns housing = housing[ , !(names(housing) %in% drops)] head(housing) #turn categoricals into booleans #get a list of all the categories in ocean_proximity col categories = unique(housing$ocean_proximity) categories #split the categories off #create a new df which is a list of all the values in col ocean_prox cat_housing = data.frame(ocean_proximity = housing$ocean_proximity) cat_housing #use for loop to populate the df with columns and 0 values for(cat in categories) { cat_housing[,cat] = rep(0, times = nrow(cat_housing)) } head(cat_housing) #use for loop to populate df with binary values for(i in 1:length(cat_housing$ocean_proximity)) { cat = as.character(cat_housing$ocean_proximity[i]) cat_housing[,cat][i] = 1 } head(cat_housing) tail(cat_housing) #dropping the OG column cat_columns = names(cat_housing) keep_columns = cat_columns[cat_columns != 'ocean_proximity'] cat_housing = select(cat_housing, one_of(keep_columns)) tail(cat_housing) ### Followup tasks here: # coded with a python accent, more effective route for R? # can you make a function that could be used to split any categorical column? #scale numerical values #do not scale median_house_value as that will be our y variable colnames(housing) drops = c('ocean_proximity', 'median_house_value') housing_num = housing[ , !(names(housing) %in% drops)] head(housing_num) scaled_housing_num = scale(housing_num) head(scaled_housing_num) #merge our altered dataframes cleaned_housing = cbind( cat_housing, scaled_housing_num, median_house_value=housing$median_house_value) head(cleaned_housing) ##### CREATE A TEST SET OF DATA ##### set.seed(1738) sample = sample.int( n = nrow(cleaned_housing), size = floor(.8*nrow(cleaned_housing)), replace = F) train = cleaned_housing[sample, ] #just the sample (80%) test = cleaned_housing[-sample, ] #everything but sample set (20%) #some quick checks to make sure we actually have what we want head(train) #check that we have the right columns and indexes are jumbled #this checks that the length of the two sets = length of dataset nrow(train) + nrow(test) == nrow(cleaned_housing) ##### TEST SOME PREDICTIVE MODELS ##### # Generalized Linear Model with cross-validation, k-folds = 5 library(boot) ?cv.glm ?glm glm_house = glm(median_house_value ~ median_income + mean_rooms + population, data = cleaned_housing) k_fold_cv_error = cv.glm(cleaned_housing, glm_house, K=5) k_fold_cv_error$delta glm_cv_rmse = sqrt(k_fold_cv_error$delta)[1] glm_cv_rmse names(glm_house) #which parts of this model can we call on? glm_house$coefficients #seems median income has the biggest effect # Random Forest model library(randomForest) ?randomForest names(train) set.seed(1738) train_y = train[, 'median_house_value'] train_x = train[, names(train) != 'median_house_value'] head(train_y) head(train_x) #create the model (2 ways) rf_model = randomForest(train_x, y = train_y, ntree = 1000, ### HERE I CHANGED NTREE TO 1000 from 500 importance = TRUE) #rf_model = randomForest(median_house_value ~ ., #data = train, ntree = 500, importance = TRUE) names(rf_model) rf_model$importance # %IncMSE = percent increase mean squared error # higher this number, the more important the variable #get out of the bag error estimate sqrt(mean(rf_model$mse)) oob_prediction = predict(rf_model) train_mse = mean(as.numeric((oob_prediction - train_y)^2)) oob_rmse = sqrt(train_mse) oob_rmse # = 49,126 #now $49,126 is our benchmark for predictions, can we get smaller error? # new value = 49,010. Slightly lower not much of a meaningful change #how well does the model do on the test set? test_y = test[, 'median_house_value'] test_x = test[, names(test) != 'median_house_value'] y_pred = predict(rf_model, test_x) test_mse = mean(((y_pred - test_y)^2)) test_rmse = sqrt(test_mse) test_rmse #our model scored roughly the same on the test data
8edc23e9db24f55e4d50881a25e772996f94bd74
d94045abbe4f33599d543543c00ccd2c70a11d7d
/time_calculations.R
6f0ed5836f7decb26ef8b967eff88a0dec0554d1
[]
no_license
mrdevlar/ga_benchmark
ecc4d12470c1028adb8e6fb3debccb17b0a40a32
5d57fba75b90035b92b6ced05fc854d6e29ab183
refs/heads/master
2016-09-02T03:43:25.677044
2015-08-04T11:43:18
2015-08-04T11:43:18
40,181,206
0
0
null
null
null
null
UTF-8
R
false
false
1,212
r
time_calculations.R
library(readr) library(stringr) library(stringi) eurotours = list.files("euro_tour/") length(eurotours) output = data.frame(t(rep(NA, 5))) names(output) = c("Type", "Mean", "SD", "Median", "MAD") for(tour in eurotours){ pat1 = "(?<=_)(rw|to)" pat2 = "(?<=_)(cx|ox|pbx|pmx)" pat3 = "(?<=_)(dm|ism|scr|sim|sw)" reg1 = regexpr( pat1, tour, perl = T ) reg2 = regexpr( pat2, tour, perl = T ) reg3 = regexpr( pat3, tour, perl = T ) mat1 = regmatches(tour,reg1) mat2 = regmatches(tour,reg2) mat3 = regmatches(tour,reg3) type = paste(mat1,mat2,mat3,sep = "_") # print(tour) # print(type) tourString = read_file(paste0("euro_tour/",tour)) timePat = "(\\d\\d\\.\\d\\d)" tourString = str_split(tourString, "\r\n") tourString = unlist(tourString) times = str_subset(tourString, timePat) # times = str_split(times, "\\s") times = stri_extract_first(tourString, regex = timePat) times = times[!is.na(times)] # print(times) times = as.numeric(times) out = (c(type, mean(times), sd(times), median(times), mad(times))) output = rbind(output, out) print(out) } output = output[2:nrow(output),] write.table(output, "clipboard", sep="\t", row.names=FALSE)
d333ab7dcf11f5a45586392b708f3c4e9480f0b7
c88e57b9d63063691748bf0085762e8f271bd473
/1_R_objects/3_Matrices/3_basic_matrix_operations.R
b1bf47a608bbdb6f68ede5e77c591d233deb1836
[]
no_license
officialPrasanta/R_basics
8f7f7c25ce9d326a19ca4f9df6455f0d7e115b34
95058eb1d16de0f1dc6f08d8a1a4e6eecf78f70f
refs/heads/master
2023-06-02T06:06:15.389065
2021-06-14T00:27:27
2021-06-14T00:27:27
369,820,636
0
0
null
null
null
null
UTF-8
R
false
false
673
r
3_basic_matrix_operations.R
# matrix 1 mat1 = matrix(data=c(1:9), nrow=3, byrow=TRUE) #matrix 2 mat2 = matrix(data=c(5:13), nrow=3, byrow=TRUE) print(mat1) print(mat2) # addition of two matrices cat("\nAddition:\n") add_mat = mat1 + mat2 print(add_mat) # subtraction of two matrices sub_mat = mat2 - mat1 cat("\nSubtraction:\n") print(sub_mat) # matrix multiplication of two matrices mmul_mat = mat2 %*% mat1 cat("\nMatrix Multiplication:\n") print(mmul_mat) # element-wise multiplication of two matrices emul_mat = mat2 * mat1 cat("\nElement-wise Multiplication:\n") print(emul_mat) # element-wise division of two matrices div_mat = mat2 / mat1 cat("\nElement-wise Division:\n") print(div_mat)
f5cdb4d44b24054cc3bde274fc7c477077537c4d
52586df6b1df22e19750306185ee69a7b09abf42
/ISAEM/pk-pd/pk100/isaem_pk_simulated.R
ddf2c3aa6ad6797da2388f5b19eae8e7935fd5f7
[]
no_license
BelhalK/AccelerationTrainingAlgorithms
5d1390f5a5cb6f24f59f2c06073040056014aa64
0cc5f4405ad103f704cd7c6259762a66fb6bf37f
refs/heads/master
2023-07-25T02:28:38.095277
2020-10-30T09:14:28
2020-10-30T09:14:28
94,530,148
0
0
null
2023-07-06T21:20:14
2017-06-16T09:46:26
Jupyter Notebook
UTF-8
R
false
false
7,478
r
isaem_pk_simulated.R
####20 CHAINS # load("warfa_isaem.RData") # load("Rdata/pk100_1chain.RData") # load("Rdata/pk100_10chains.RData") # load("Rdata/pk100_20chains.RData") source('R/aaa_generics.R') source('R/compute_LL.R') source('R/func_aux.R') source('R/func_distcond.R') source('R/func_FIM.R') source('R/func_plots.R') source('R/func_simulations.R') source('R/main.R') source('R/main_estep.R') source('R/main_initialiseMainAlgo.R') source('R/main_mstep.R') source('R/SaemixData.R') source('R/SaemixModel.R') source('R/SaemixRes.R') source('R/SaemixObject.R') source('R/zzz.R') source('R/main_incremental.R') source('R/main_estep_incremental.R') source('/Users/karimimohammedbelhal/Documents/GitHub/AccelerationTrainingAlgorithms/ISAEM/incremental/R/mixtureFunctions.R') source("/Users/karimimohammedbelhal/Documents/GitHub/AccelerationTrainingAlgorithms/ISAEM/incremental/plots.R") library("mlxR") library("rlist") library("psych") library("coda") library("Matrix") library(abind) require(ggplot2) require(gridExtra) require(reshape2) # warfa_data <- read.table("/Users/karimimohammedbelhal/Desktop/csda_new/data/warfarin_data.txt", header=T) model1cpt<-function(psi,id,xidep) { dose<-xidep[,1] time<-xidep[,2] ka<-psi[id,1] V<-psi[id,2] Cl<-psi[id,3] k <- Cl/V ypred<-dose*ka/(V*(ka-k))*(exp(-k*time)-exp(-ka*time)) return(ypred) } model <- inlineModel(" [INDIVIDUAL] input = {ka_pop, V_pop, Cl_pop, omega_ka, omega_V, omega_Cl} DEFINITION: ka = {distribution=lognormal, reference=ka_pop, sd=omega_ka} V = {distribution=lognormal, reference=V_pop, sd=omega_V } Cl = {distribution=lognormal, reference=Cl_pop, sd=omega_Cl} [LONGITUDINAL] input = {ka, V, Cl,a} EQUATION: C = pkmodel(ka,V,Cl) DEFINITION: y = {distribution=normal, prediction=C, sd=a} ") N=500 param <- c( ka_pop = 2, omega_ka = 0.3, V_pop = 10, omega_V = 0.2, Cl_pop = 1, omega_Cl = 0.3, a =1) res <- simulx(model = model, parameter = param, treatment = list(time=0, amount=100), group = list(size=N, level='individual'), output = list(name='y', time=seq(1,5,by=1))) warfarin.saemix <- res$y warfarin.saemix$amount <- 100 saemix.data<-saemixData(name.data=warfarin.saemix,header=TRUE,sep=" ",na=NA, name.group=c("id"), name.predictors=c("amount","time"),name.response=c("y"), name.X="time") # Default model, no covariate saemix.model<-saemixModel(model=model1cpt,description="warfarin",type="structural" ,psi0=matrix(c(3,3,0.1,0,0,0),ncol=3,byrow=TRUE, dimnames=list(NULL, c("ka","V","Cl"))),fixed.estim=c(1,1,1), transform.par=c(1,1,1),omega.init=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE),covariance.model=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3, byrow=TRUE)) # warfa_data <- read.table("/Users/karimimohammedbelhal/Documents/GitHub/AccelerationTrainingAlgorithms/ISAEM/incremental/data/warfarin_data.txt", header=T) # saemix.data<-saemixData(name.data=warfa_data,header=TRUE,sep=" ",na=NA, name.group=c("id"), # name.predictors=c("amount","time"),name.response=c("y1"), name.X="time") # saemix.model<-saemixModel(model=model1cpt,description="warfarin",type="structural" # ,psi0=matrix(c(3,7,1,0,0,0),ncol=3,byrow=TRUE, dimnames=list(NULL, c("ka","V","k"))), # transform.par=c(1,1,1),omega.init=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE), # covariance.model=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3, # byrow=TRUE)) K1 = 200 K2 = 30 iterations = 0:(K1+K2-1) end = K1+K2 batchsize25 = 25 batchsize50 = 50 seed0=3456 nchains = 50 gamma = 1 options<-list(seed=39546,map=F,fim=F,ll.is=F,save.graphs=FALSE,nb.chains = nchains,nbiter.mcmc = c(2,2,2,0), nbiter.saemix = c(K1,K2),nbiter.sa=0,displayProgress=FALSE,nbiter.burn =0, map.range=c(0), nb.replacement=100,sampling='randomiter',gamma=gamma, algo="full") theo_ref<-saemix_incremental(saemix.model,saemix.data,options) theo_ref <- data.frame(theo_ref$param) theo_ref <- cbind(iterations, theo_ref[-1,]) # graphConvMC_threekernels(theo_ref,theo_ref,theo_ref) options.incremental75<-list(seed=seed0,map=F,fim=F,ll.is=F,save.graphs=FALSE,nb.chains = nchains, nbiter.mcmc = c(2,2,2,0), nbiter.saemix = c(K1,K2),displayProgress=FALSE, map.range=c(0), nbiter.sa=0,nbiter.burn =0, nb.replacement=75,sampling='randomiter',gamma=gamma,algo="minibatch") theo_mix75<-saemix_incremental(saemix.model,saemix.data,options.incremental75) theo_mix75 <- data.frame(theo_mix75$param) theo_mix75 <- cbind(iterations, theo_mix75[-1,]) options.incremental50<-list(seed=seed0,map=F,fim=F,ll.is=F,save.graphs=FALSE,nb.chains = nchains, nbiter.mcmc = c(2,2,2,0), nbiter.saemix = c(K1,K2),displayProgress=FALSE, map.range=c(0),nbiter.sa=0, nbiter.burn =0, nb.replacement=50,sampling='seq',gamma=gamma,algo="minibatch") theo_mix50<-saemix_incremental(saemix.model,saemix.data,options.incremental50) theo_mix50 <- data.frame(theo_mix50$param) theo_mix50 <- cbind(iterations, theo_mix50[-1,]) options.incremental25<-list(seed=seed0,map=F,fim=F,ll.is=F,save.graphs=FALSE,nb.chains = nchains, nbiter.mcmc = c(2,2,2,0), nbiter.saemix = c(K1,K2),displayProgress=FALSE, map.range=c(0), nbiter.sa=0,nbiter.burn =0, nb.replacement=25,sampling='seq',gamma=gamma,algo="minibatch") theo_mix25<-saemix_incremental(saemix.model,saemix.data,options.incremental25) theo_mix25 <- data.frame(theo_mix25$param) theo_mix25 <- cbind(iterations, theo_mix25[-1,]) # graphConvMC_threekernels(theo_ref,theo_mix25,theo_mix50) theo_ref_scaled <- theo_ref theo_mix25_scaled <- theo_mix25 theo_mix50_scaled <- theo_mix50 theo_ref_scaled$iterations = theo_ref_scaled$iterations*1 theo_mix25_scaled$iterations = theo_mix25_scaled$iterations*0.25 theo_mix50_scaled$iterations = theo_mix50_scaled$iterations*0.5 # graphConvMC_threekernels(theo_ref_scaled,theo_mix25_scaled,theo_mix50_scaled) theo_mix75_scaled <- theo_mix75 theo_mix75_scaled$iterations = theo_mix75_scaled$iterations*0.75 graphConvMC_5(theo_ref_scaled,theo_mix25_scaled,theo_mix50_scaled,theo_mix50_scaled,theo_mix75_scaled) theo_ref_scaled$algo <- 'full' theo_mix25_scaled$algo <- 'quarter' theo_mix50_scaled$algo <- 'half' theo_mix75_scaled $algo<- 'three quarter' comparison <- rbind(theo_ref_scaled,theo_mix25_scaled,theo_mix50_scaled,theo_mix75_scaled) var <- melt(comparison, id.var = c('iterations','algo'), na.rm = TRUE) # write.csv(var, file = "../notebooks/data/pk100_1chain.csv") # write.csv(var, file = "../notebooks/data/pk100_5chains.csv") # write.csv(var, file = "../notebooks/data/pk100_10chains.csv") # write.csv(var, file = "../notebooks/data/pk100_20chains.csv") # save.image("Rdata/pk100_1chain.RData") # save.image("Rdata/pk100_5chains.RData") # save.image("Rdata/pk100_10chains.RData") # save.image("Rdata/pk100_20chains.RData") graphConvMC_threekernels <- function(df,df2,df3, title=NULL, ylim=NULL) { G <- (ncol(df)-2)/3 ylim <-rep(ylim,each=2) graf <- vector("list", ncol(df)-2) o <- c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) for (j in (2:(ncol(df)-1))) { grafj <- ggplot(df)+geom_line(aes_string(df[,1],df[,j],by=df[,ncol(df)])) +geom_line(aes_string(df2[,1],df2[,j],by=df2[,ncol(df2)]),colour="blue")+geom_line(aes_string(df3[,1],df3[,j],by=df3[,ncol(df3)]),colour="red")+ xlab("iteration") + ylab(names(df[j])) if (!is.null(ylim)) grafj <- grafj + ylim(ylim[j-1]*c(-1,1)) graf[[o[j]]] <- grafj } do.call("grid.arrange", c(graf, ncol=3, top=title)) }
852df0ea3ff05e0a7aec4da37332a03fb35de33a
29585dff702209dd446c0ab52ceea046c58e384e
/Biograph/R/cmc_as_age.R
0129580637b2245e48aa645e9eebffc1a4060df8
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
1,183
r
cmc_as_age.R
cmc_as_age <- function (x,born,format.born) { # Decimal year if (is.character(x)) x<- as.numeric(x) if (!is.numeric(x)) { print ("WARNING in cmc.as.age: cmc is not numeric") if (is.character(x)) x <- as.numeric(x) else print ("ERROR in cmc.as.age: cmc not numeric and not character.") } if (missing(format.born)) stop ("cmc_as_age: format.born is missing") if (missing(born)) stop("ERROR: cmc_as_age: birth date is missing") if (class(born)=="Date" | is.character(born)) b <-as.Date(born,format.born) if (format.born=="CMC"|format.born=="cmc") { age = (x-born)/12 if (length(which (age < 0 )) > 0) # At least one negative age { age[age < 0] <- NA message ("cmc_as_age: negative ages are replaced by NA") } c <- cmc_as_year (x) return (list(year = c, age = age)) } d <- Date_as_year (born,format.in="%Y-%m-%d") # convert date of birth to year print (d) print (born) c <- cmc_as_year (x) z <- c - d k <- z<0 if (TRUE%in%k) warning ("At least one date (cmc) is before date of birth.") age <- c - d return (list (year = c, age = age)) }
be169d3562d5a0b8e8145feb45bbb007c569e31b
8893ba18785558773985e5dcf2049ddc01375e6b
/R/PCscoresCorr.R
369634613385396059f1a2ac0b5cf1eed32e9d0d
[]
no_license
cran/Arothron
a122b82cdfc75dc2be94115917c24c228445c8cd
79c614f1b17f54a72648fbcd4d924907a768f755
refs/heads/master
2023-02-07T17:29:56.204584
2023-02-01T11:40:08
2023-02-01T11:40:08
127,958,255
0
1
null
null
null
null
UTF-8
R
false
false
986
r
PCscoresCorr.R
#' PCscoresCorr #' Perform a correlation test between two matrices of PCscores #' @param matrix1 matrix: first set of PC scores #' @param matrix2 matrix: second set of PC scores #' @param nPCs numeric vector: specify which PC scores will be selected in the correlation test #' @return corr the correlation values associated to each pair of PC scores #' @return p.values p-values associated to the correlation test #' @author Antonio Profico, Costantino Buzi, Marina Melchionna, Paolo Piras, Pasquale Raia, Alessio Veneziano #' @export #' PCscoresCorr<-function(matrix1,matrix2,nPCs=1:5){ Rcors<-NULL pvalues<-NULL for(i in nPCs){ Rcorsi<-cor.test(matrix1[,i],matrix2[,i])$estimate if(Rcorsi<0){Rcors[i]<-cor.test(matrix1[,i],matrix2[,i]*-1)$estimate}else{ Rcors[i]<-cor.test(matrix1[,i],matrix2[,i])$estimate } pvalues[i]<-cor.test(matrix1[,i],matrix2[,i])$p.value } out<-list("corr"=Rcors,"p.values"=pvalues) return(out) }
12ef7bcb7421bbf31eaa1f37abfa68798a210997
aa3c74e4fd4c3865dc102e8b1324d6d9aa43a26d
/man/get_friends.Rd
142c8cf69b2c5462c6cdeacc71b4a8c8b46352cb
[]
no_license
mkearney/rtw
9b38f4282b4f61e4347cbcdbe53ac20ae5fb63cc
2e113bd17a5fe100cac9b39b0b7d7fde4484807c
refs/heads/main
2023-08-25T14:40:24.091921
2021-11-05T18:06:02
2021-11-05T18:06:02
420,239,950
0
0
null
null
null
null
UTF-8
R
false
true
4,428
rd
get_friends.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/friends.R \name{get_friends} \alias{get_friends} \title{Get user IDs of accounts followed by target user(s).} \usage{ get_friends( users, n = 5000, retryonratelimit = FALSE, page = "-1", parse = TRUE, verbose = TRUE, token = NULL ) } \arguments{ \item{users}{Screen name or user ID of target user from which the user IDs of friends (accounts followed BY target user) will be retrieved.} \item{n}{Number of friends (user IDs) to return. Defaults to 5,000, which is the maximum returned by a single API call. Users are limited to 15 of these requests per 15 minutes. Twitter limits the number of friends a user can have to 5,000. To follow more than 5,000 accounts (to have more than 5 thousand "friends") accounts must meet certain requirements (e.g., a certain ratio of followers to friends). Consequently, the vast majority of users follow fewer than five thousand accounts. This function has been oriented accordingly (i.e., it assumes the maximum value of n is 5000). To return more than 5,000 friends for a single user, call this function multiple times with requests after the first using the \code{page} parameter.} \item{retryonratelimit}{If you'd like to retrieve 5,000 or fewer friends for more than 15 target users, then set \code{retryonratelimit = TRUE} and this function will use base \code{Sys.sleep} until rate limits reset and the desired number of friend networks is retrieved. This defaults to FALSE. See details for more info regarding possible issues with timing misfires.} \item{page}{Default \code{page = -1} specifies first page of JSON results. Other pages specified via cursor values supplied by Twitter API response object. This is only relevant if a user has over 5000 friends (follows more than 5000 accounts).} \item{parse}{Logical, indicating whether to return parsed vector or nested list object. By default, \code{parse = TRUE} saves you the time [and frustrations] associated with disentangling the Twitter API return objects.} \item{verbose}{Logical indicating whether or not to include output messages. Defaults to TRUE, which includes printing a success message for each inputted user.} \item{token}{Every user should have their own Oauth (Twitter API) token. By default \code{token = NULL} this function looks for the path to a saved Twitter token via environment variables (which is what `create_token()` sets up by default during initial token creation). For instruction on how to create a Twitter token see the tokens vignette, i.e., `vignettes("auth", "rtweet")` or see \code{?tokens}.} } \value{ A data frame with two columns, "user" for name or ID of target user and "user_id" for follower IDs. } \description{ Returns a list of user IDs for the accounts following BY one or more specified users. To return the friends of more than 15 users in a single call (the rate limit maximum), set "retryonratelimit" to TRUE. } \details{ When \code{retryonratelimit = TRUE} this function internally makes a rate limit API call to get information on (a) the number of requests remaining and (b) the amount of time until the rate limit resets. So, in theory, the sleep call should only be called once between waves of data collection. However, as a fail safe, if a system's time is calibrated such that it expires before the rate limit reset, or if, in another session, the user dips into the rate limit, then this function will wait (use Sys.sleep for a second time) until the next rate limit reset. Users should monitor and test this before making especially large calls as any systematic issues could create sizable inefficiencies. At this time, results are ordered with the most recent following first — however, this ordering is subject to unannounced change and eventual consistency issues. While this remains true it is possible to iteratively build friends lists for a user over time. } \examples{ \dontrun{ ## get user ids of accounts followed by Donald Trump (djt <- get_friends("realDonaldTrump")) ## get user ids of accounts followed by (friends) KFC, Trump, and Nate Silver. (fds <- get_friends(c("kfc", "jack", "NateSilver538"))) } } \seealso{ \url{https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-friends-ids} Other ids: \code{\link{get_followers}()}, \code{\link{next_cursor}()} } \concept{ids}
05b105af58c781e4db87f495b72e9b245c73e52f
08fca79285da60dd56b2ae7bea2cace36d3476ba
/Fig4_radarCharts.R
351687aa0e9e1e04fb5f2625e14f7391232dab87
[ "MIT" ]
permissive
artyomovlab/ImmGenOpenSource
f72503b7ef524566616955646f968621afae9b5d
91d6dbd4c2b14c482b7fcb00e80bbdfbe732a44c
refs/heads/master
2023-01-10T00:02:02.629669
2022-12-29T16:01:53
2022-12-29T16:01:53
227,374,383
1
0
MIT
2020-01-14T23:34:00
2019-12-11T13:38:44
R
UTF-8
R
false
false
2,871
r
Fig4_radarCharts.R
library(tidyverse) load("Data/eyegene.rda") anno <- read_csv("Data/anno.csv") df <- eyegene %>% as_tibble() # Glimpse on annotation anno %>% count(value) %>% arrange(desc(n)) # Calculate per-module statistics stats <- df %>% mutate(module = row_number()) %>% gather('sample', 'value', -module) %>% group_by(module) %>% summarize(max = max(value), min = min(value)) stats plot_radarchart <- function(long) { long %>% spread(module, value) %>% as.data.frame() %>% column_to_rownames('sample') %>% { # bind in following order: max, min, data, mean rbind(stats$max, # + 1, # max(stats$max) stats$min, # - 1, # min(stats$min) # 0, ., means) } %>% { .[, c(1, ncol(.):2)] } %>% fmsb::radarchart( pty = 32, # point symbol (16 filled circle, 32 nothing) pcol = line_color, plwd = line_width, plty = line_type, cglcol = 'gray22', # grid color cglwd = 1, # grid width cglty = 2, # grid type (1 solid, 2 dashed, 3 dotted) axistype = 0, # just 0 axislabcol = "black", # labels color vlcex = 1 # font size magnification for labels ) } plot_radarchart_pdf <- function(long, filename) { pdf(filename, width = 4, height = 4) plot_radarchart(long) dev.off() } plot_radarchart_png <- function(long, filename) { png(filename) plot_radarchart(long) dev.off() } # Choose specific meta-sample load("Data/annotation_colors.Rda") names(annotation_colors$metaSample) for(metasample in names(annotation_colors$metaSample)){ print(metasample) # Pick specific meta-sample filtered <- df %>% select(select_vars(names(df), anno %>% filter(value == metasample) %>% pull(samples))) # Convert to long format long <- filtered %>% # mutate(module = str_c('Mod.', row_number())) %>% mutate(module = row_number()) %>% gather('sample', 'value', -module) # Calculate means means <- long %>% group_by(module) %>% summarize(mean = mean(value)) %>% pull(mean) # Number of samples k <- ncol(filtered) %>% print() { # Setup plot line color color_all <- 'ivory4' # all samples color_mean <- unname(annotation_colors$metaSample[metasample]) # mean of sample # navy line_color <- c(rep(color_all, k), color_mean) } { # Setup plot line width width_all <- 2 width_mean <- 4 line_width <- c(rep(width_all, k), width_mean) } { # Setup plot line type (1 solid, 2 dashed, 3 dotted) type_all <- 1 type_mean <- 1 line_type <- c(rep(type_all, k), type_mean) } # plot_radarchart(long) plot_radarchart_pdf(long, str_interp('spiderplot-${metasample}.pdf')) # plot_radarchart_png(long, str_interp('spiderplot-${metasample}.png')) }
4010d44120ca09042f4fad02c4ea354d4d533359
257d3d8444e6994288e5c6cb29f9f5e00a4e7ac4
/GEMRegress.R
aab68be496c5f6243af1c7ffc61036ea08236560
[]
no_license
kennawayjic/Boleracea-AssociativeTranscriptomics
251d4bd95ac718cf8724853205d819795a2c47eb
8cd9347f770431a39d498b81fc23fe4681abb291
refs/heads/main
2023-02-27T21:48:25.925237
2021-01-27T18:50:44
2021-01-27T18:50:44
null
0
0
null
null
null
null
UTF-8
R
false
false
3,568
r
GEMRegress.R
require('data.table') ## Read in delimited file with sequence identifiers and trait values traitDataFile <- tcltk::tk_choose.files(default = "", caption = "Please select one trait data file") traitData <- read.delim(traitDataFile, header=TRUE, row.names=1, check.names = FALSE, na.strings = "-999") ## -999 is TASSEL output ## Read in delimited files with sequence names and rpkm values rpkmC.temp <- read.table ("Bol_RPKMTASSEL.txt", header=TRUE, sep= "\t", row.names=1) # can use the check.names=FALSE argument if there's "unusual" symbols in the column headers 2018-01-15 outputName="" outputName=readline(prompt="Please enter output identifier... ") print("reading files...") traitLabel = colnames(traitData) meansCutOff = 0.4 ## remove low rpkm means print(paste0("Removing markers with a mean RPKM < ", meansCutOff)) delrpkmC = rpkmC.temp[rowMeans(rpkmC.temp) >= meansCutOff, ] rpkmC <- t(delrpkmC) ## Delete missing varieties from rpkm files rpkmmergeC <- merge(traitData,rpkmC, by="row.names") rownames(rpkmmergeC) <- rpkmmergeC[,1] ## Function for apply()ing. Does a linear model between trait data and genes performLinearRegression <- function(exp_vector) { model <- lm(rpkmmergeC[,2] ~ exp_vector) return(as.numeric(anova(model)[1,])) } ## Do the linear models. Each gene is in a column hence apply() over ## columns. rpkmmergeA contains lines in column 1 and the trait data ## in column 2 hence these are dropped in the apply() call. The beauty ## of this way is that it makes the row names the genes in the final ## lmResults output. lmResults <- t(apply(rpkmmergeC[, -c(1,2)], 2, performLinearRegression)) colnames(lmResults) <- c("Df", "SumSq", "MeanSq", "Fvalue", "Pvalue") ## calculate log10P log10P <- -log10(lmResults[,"Pvalue"]) print("Calculated p-values") ## Read B. oleracea to Arabidopsis matches codesFile = read.csv("Marker_to_At_Bol.csv", header=TRUE, na.strings = ".", fileEncoding="UTF-8-BOM") ## Get everything together for writing to a file. Made an explicit ## rownames column so don't have to use row.names in next regress ## plotter step. Not a problem because I use row.names=FALSE in ## write.table. Note needed to use as.data.frame otherwise it all came ## out as a list of character vectors i.e. in quotes which obviously ## can't be order()ed. getGeneModels = function(results) { unigenes = unlist(strsplit(sub("_", "~#~", results$unigene), "~#~")) ArabidopsisHits = codesFile[ codesFile$unigene %in% unigenes, ] results$unigenes = unigenes r = data.table( results ) ah = data.table( ArabidopsisHits ) resultsWithAGI = merge(r, ah, by="unigene") ## likely in the future can just merge by="unigene" as identical(as.character(results$unigene), results$tmp) == TRUE return(resultsWithAGI) } BoleraceaUnigene = rownames(lmResults) traitAndUnigene = as.data.frame(cbind(trait = traitLabel, unigene = BoleraceaUnigene)) print("Producing final results") finalResults <- cbind(traitAndUnigene, log10P, lmResults) finalResults = getGeneModels(finalResults) finalResults <- finalResults[order(finalResults$log10P, decreasing = TRUE),] ## Write these columns for now. In future if the apply() is quick ## enough probably combine this script and grapher one together. finalResults = finalResults[,c("trait", "unigene", "C.Chr", "AGI", "log10P", "Df", "SumSq", "MeanSq", "Fvalue", "Pvalue")] resultsFile = paste("Reg-", paste(outputName, collapse="-"), ".txt", sep="") write.table(finalResults, resultsFile, quote=FALSE, sep="\t" ,row.names=FALSE, col.names=TRUE) print ("Phew! Complete...")
d6ef5e9446de7b3d77331d10ca9a82a32e7c0d6a
3f38270d8200ab51a3772dad33365801205bf20f
/scripts/S2-ejercicios.R
daafdc097041bf038dc0b13af9ce77ed3a96fd27
[]
no_license
Songeo/introduccion-r-material
18b1d0cca7a9c6aa2441ad13d25e3fd748bdb460
9835d62aeb0655cac11c02bbcf65345c32f845a5
refs/heads/master
2021-07-10T11:20:13.959415
2017-10-05T23:31:00
2017-10-05T23:31:00
103,583,018
1
0
null
null
null
null
UTF-8
R
false
false
8,918
r
S2-ejercicios.R
## Ejercicios ## # Tipo de datos ---- ### Ej: Hidden Figures IMDB ---- # Usando los siguientes objetos crea una lista # de tres elementos con nombres: director, stars y reviews. director_hf <- "Theodore Melfi" stars_hf <- c( "Taraji P. Henson", "Octavia Spencer", "Janelle Monáe", "Kirsten Dunst", "Kevin Costner", "Jim Parsons", "Mahershala Ali") reviews_hf <- data.frame( scores = c(9, 6, 5, 10), source = c( rep("IMDB", 4) ), comments = c("It made for an old-fashioned movie going experience...", "Evident Heroism, Hidden Doubts", "OK, but very disappointing", "Don't let Hidden Figures be a hidden treasure!") ) # La lista se llama `hidden_figures`: hidden_figures <- list( director = , stars = , reviews = ) str(hidden_figures) ### Ej: Calificación promedio ---- # Extrae los scores de la película `hidden_figures` y # con la función `mean()` calcula el promedio. # # 1. Primero deberás extraer el elemento que contiene los scores. Es un dataframe. # 2. Después deberás seleccionar la columna de *scores*. # 3. Por último calcular el promedio y asignarlo a `avg_reviews_hf`. # # # Tip: Usando la función `str()` sobre la lista ubica el nivel # en el que esta el valor *scores*. reviews_df <- hidden_figures reviews_vec <- avg_reviews_hf <- mean( ) avg_reviews_hf ### Ej: Pesos a dolares ---- # El siguiente vector presenta el precio de # la gasolina en diferentes localidades. gas_cdmx <- c(15.82, 15.77, 15.83, 15.23, 14.95, 15.42, 15.55) gas_cdmx # Usando la siguiente lista de tipo de cambio por mes: # - Julio: 17.3808 # - Agosto: 17.6084 # - Septiembre: 17.7659 # Crea un dataframe donde cada # variable/columna sea el precio en dolares # por cada mes. gas_usd_df <- data.frame( julio = gas_cdmx/ agosto = septiembre = ) print(gas_usd_df) # Funciones ---- ### Ej: Suma de valores absolutos # Crea una función que sume los valores # absolutos de dos números. Los argumentos # deben ser estos números. # # Tip: Usa la función `abs()` para obtener # el valor absoluto de la función. suma_abs_fun <- function(a, b){ } suma_abs_fun(-4, 2) ### Ej: Likes ---- # Considerando el siguiente vector de likes # de cada día de la semana. likes <- c(16, 7, 9, 20, 2, 17, 11) names(likes) <- c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") likes # Crea una función en la # imprima *Hoy fuiste popular* si # los likes del día fueron mayores a 15 y # regrese el número de likes. # # Si el número de likes es menor a 15, entonces # imprime *:(* y regresa 0. # # Usa la función `print()`. likes_fun <- function(num) { if (num > ) { print() return() } else { print() return() } } # Prueba la función para el primer elemento # del vector `likes`. likes_fun(likes[1]) ### Ej: Grafica de gasolina ---- # El siguiente vector presenta el precio de # la gasolina en diferentes localidades. gas_cdmx <- c(15.82, 15.77, 15.83, 15.23, 14.95, 15.42, 15.55) gas_cdmx # Completa la siguiente función tal que # considerando el argumento tipo de cambio, # imprima una grafica del vector en dolares y regrese este vector. grafica_dolar_fun <- function(precio, tipo_cambio){ precio_en_dolar <- precio/ print(plot()) return() } # Considerando el tipo de cambio # de los siguientes meses obten # el vector y la grafica de cada mes. # # - Julio: 17.3808 # - Agosto: 17.6084 gas_dolar_julio <- grafica_dolar_fun(, 17.3808) gas_dolar_agosto <- grafica_dolar_fun(, 17.6084) ### Ej: Instala y carga ---- # Instala y carga en tu computadora los paquetes # en listados antes. install.packages(readr) install.packages(readxl) install.packages(tidyr) install.packages(dplyr) install.packages(stringr) install.packages(ggplot) install.packages(lubridate) library(readr) library(readxl) library(lubridate) library(stringr) library(tidyr) library(dplyr) library(ggplot) ### Ej: Search ---- # Después de cargar los paquetes # llama el comando `search()` # ¿Observas las nuevas librerías de la sesión? # Iteraciones ---- ### Ej: Ciudad de México ---- # Considerando la lista siguiente, cdmx_list <- list( pop = 8918653, delegaciones = c("Alvaro Obregón", "Azcapotzalco" ,"Benito Juárez" , "Coyoacán" ,"Cuajimalpa de Morelos" ,"Cuauhtémoc" , "Gustavo A. Madero" , "Iztacalco" ,"Iztapalapa" , "Magdalena Contreras" ,"Miguel Hidalgo" ,"Milpa Alta" , "Tláhuac" ,"Tlalpan" , "Venustiano Carranza" ,"Xochimilco"), capital = TRUE ) # obten la clase # de cada elemento con la función `lapply()`. lapply( , class) ### Ej: Mínimo y máximo ---- # La siguiente función extrae la letra de menor posicion # y mayor posicion en orden alfabético. min_max_fun <- function(nombre){ nombre_sinespacios <- gsub(" ", "", nombre) letras <- strsplit(nombre_sinespacios, split = "")[[1]] c(minimo = min(letras), maximo = max(letras)) } # Es decir, si incluímos las letras `abcz` la letra # *mínima* es a y la *máxima* es z. min_max_fun("abcz") # El siguiente vector incluye el nombre # de las 16 delegaciones de la Ciudad de México. delegaciones <- c("Alvaro Obregon", "Azcapotzalco" ,"Benito Juarez" , "Coyoacan" ,"Cuajimalpa de Morelos" ,"Cuauhtemoc" , "Gustavo Madero" , "Iztacalco" ,"Iztapalapa" , "Magdalena Contreras" ,"Miguel Hidalgo" ,"Milpa Alta" , "Tlahuac" ,"Tlalpan" , "Venustiano Carranza" ,"Xochimilco") # Aplica la función `sapply()` para obtener un # arreglo con la letra máxima y mínima de cada nombre. sapply(, ) ### Ej: Precio de la gasolina ---- # El siguiente vector incluye el precio de la # gasolina en diferentes estados del país en julio # de 2017. gas_cdmx <- c(15.82, 15.77, 15.83, 15.23, 14.95, 15.42, 15.55) gas_cdmx # 1. Crea una función que convierta el precio a dolares # suponiendo que un dolar equivale a 17.76 pesos. conv_fun <- function(precio){ /17.76 return() } # 2. Usando la función `lapply()` # convierte el precio de la gasolina a dolares. gas_cdmx_usd_lista <- lapply(, conv_fun) # 3. Usa la función `unlist()` para convertir la # lista a un vector. gas_cdmx_usd <- unlist() print(gas_cdmx_usd) ### Ej: Estadísticos importantes ---- estadisticos <- c("GAUSS:1777", "BAYES:1702", "FISHER:1890", "PEARSON:1857") split_estadisticos <- strsplit(estadisticos, split = ":") split_estadisticos # Usa la función predefinida `tolower()` y # `lapply()` para convertir a minúsculas # cada letra de la lista `split_estadisticos`. split_lower <- lapply( , ) print(split_lower) ### Ej: Nombres y fechas ---- # Usando el vector `split_estadísticos` del # ejercicio anterior. str(split_estadisticos) # 1. Crea una función que regrese la # primera posición. primera_pos_fun <- function(lista){ } # 2. Crea una función que # regrese la segunda posición. segunda_pos_fun <- function(lista){ } # 3. Usando `lapply()` crea una lista con los # nombres de los estadísticos # y otra con la fecha de nacimiento. nombres <- lapply() fechas <- lapply() ### Ej: Función anónima ---- # Usando una función anónima y el vector `split_estadísticos` # en un solo `lapply()` o `sapply()` obten # un vector compuesto de la primera posición, es decir el nombre, # en minúsculas. # # Tip: si usas `lapply()` recuerda usar la función `unlist()`. nombre_estadisticos <- (split_estadisticos, function(elemento){ tolower() }) nombre_estadisticos ### Ej: Tempraturas ---- # En la siguiente lista se presenta el registro # de temperatura de tres ciudades # a las 07:00 am, 10:00 am, 01:00 pm, # 04:00 pm y 07:00 pm. temp_lista <- list( cdmx = c(13, 15, 19, 22, 20), guadalajara = c(18, 18, 22, 26, 27), tuxtla_gtz = c(22, 24, 29, 32, 28) ) str(temp_lista) # Completa la siguiente función que obtiene el promedio entre # el valor mínimo y máximo registrados. promedio_extremos_fun <- function(x) { ( min() + max() ) / 2 } # Implementa la función a la lista y obten # la temperatura promedio de extremos para cada # ciudad usando `lapply()` y `sapply()`. lapply(,) sapply(,) ### Ej: ¡Demasiado Rápido! ---- # Crea una función del tipo `while` en la que # mientras la velocidad sea mayor a 50 km/hr # se reduzca de la siguiente forma: # # - Si es mayor a 80 km/hr se reducen 20 km/hr e imprime # **¡Demasido rápido!**. # # - Si es menor o igual a 80km/hr se reducen únicamente # 5 km/hr. velocidad_act <- 140 while(velocidad_act > ){ if(velocidad_act > ){ print() velocidad_act <- } if(velocidad_act < ){ velocidad_act <- } velocidad_act }
faf47106b6d603ade250f04d2cc9f90524fd2451
28a0104f0da7b827d0244bdf9e515b9402691115
/R/TransToMax.R
1a56edecd69db4946b59d0506f8f4f074537b027
[ "MIT" ]
permissive
alastair-JL/StochasticCIB
12176530d810659f9501b16a00a1d0d0caef22e6
0e442d14f58e38dd7d66f3ee527f7c069c908bd5
refs/heads/master
2023-07-25T06:32:52.467223
2023-07-08T01:48:36
2023-07-08T01:48:36
43,457,903
1
0
null
null
null
null
UTF-8
R
false
false
1,956
r
TransToMax.R
#' TransToMax #' #' A transition matrix function, whereby given the current worldstate, we always transition directly to the world state with the highest possible score. #' @keywords CIB #' @export #' @param TheList a list containing the CIB matrix, and a "shape" vector. The output of \code{\link{InputCibBanner}} is an appropriate input here. #' @param TransRelAdj a list containing a blank transition matrix, a relative score matrix, and an adjacency matrix. The output of \code{\link{MakeScoreMatrix}} is appropriate here. #' @note In the case of a tie, transition probability is split evenly between the top candidates. #' @note This transition procedure is one of the most likely to result in loops, with antagonistic and synergistic descriptors switching simultaneously, and thus never reaching a stable arrangement (for example Ab -> aB -> Ab->...) #' @note Other transition functions can be found via \code{\link{TransitionCalculators}} #' @return A matrix describing the transition probability from each world state (rows) to each other world state (columns). #' @author Alastair Jamieson Lane. <aja107@@math.ubc.ca> #' @examples #' data(ExampleCIBdata) #' Transitions<-TransToMax(ExampleCIBdata) #' #' TransToMax<-function(TheList, TransRelAdj=NA,Deterministic=F){ if (is.na(TransRelAdj)){ TransRelAdj<- MakeScoreMatrix(TheList) } Transitions<-TransRelAdj[[1]] RelativeScores<-TransRelAdj[[2]] AdjacentMatrix<-TransRelAdj[[3]] CrossImpactMatrix<-TheList[[1]] listName<-colnames(CrossImpactMatrix) CIBshape<- TheList[[2]] if(Deterministic){ epsilon=10^-5; RelativeScores=t(t(RelativeScores)+epsilon*1:ncol(RelativeScores)) } ##For reasons I can not fathom, apply(A,1,function(x) x) appears to give A'. Hence I need to transpose to keep everything nicesly in line. Okay. Transitions<-t(apply(RelativeScores, 1, function(x) x==max(x))) Transitions<-t(apply(Transitions, 1, function(x) x/sum(x))) }
67cbf68064d0b245e672f67398a76243995590ec
24fd48d0c97178697813037b986aad8987b48e55
/man/EBlassoNEG.BinomialCV.Rd
ca1321052896956523c755049c8f6b8db19ec202
[]
no_license
cran/EBEN
0e240e19e87af74e38e9b4d7010bb80a3b4c0d92
a43e993c25f14365e496e266f2b79e57d0dc3c9c
refs/heads/master
2023-06-07T12:51:46.551941
2023-05-29T15:40:02
2023-05-29T15:40:02
20,628,286
0
2
null
2015-11-17T04:19:07
2014-06-08T22:44:34
C
UTF-8
R
false
false
2,768
rd
EBlassoNEG.BinomialCV.Rd
\name{EBlassoNEG.BinomialCV} \alias{EBlassoNEG.BinomialCV} %- Also NEED an '\alias' for EACH other topic documented here. \title{Cross Validation (CV) Function to Determine Hyperparameters of the EBlasso Algorithm for Binomial Model with Normal-Exponential-Gamma (NEG) Prior Distribution} \description{Hyperparameters control degree of shrinkage, and are obtained via Cross Validation. This program performs three steps of CV.\cr 1st: a = b = 0.001, 0.01, 0.1, 1;\cr 2nd: fix b= b1; a=[-0.5, -0.4, -0.3, -0.2, -0.1, -0.01, 0.01, 0.05, 0.1, 0.5, 1];\cr 3rd: fix a = a2; b= 0.01 to 10 with a step size of one for b > 1 and a step size of one on the logarithmic scale for b < 1\cr In the 2nd step, a can take value from -1 and values in [-1, -0.5] can be added to the set in line 13 of this function (The smaller a is, the less shrinkage.) } \usage{ EBlassoNEG.BinomialCV(BASIS, Target, nFolds,foldId, Epis,verbose, group) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{BASIS}{sample matrix; rows correspond to samples, columns correspond to features} \item{Target}{Class label of each individual, TAKES VALUES OF 0 OR 1} \item{nFolds}{number of n-fold cv} \item{foldId}{random assign samples to different folds} \item{Epis}{TRUE or FALSE for including two-way interactions} \item{verbose}{from 0 to 5; larger verbose displays more messages} \item{group}{TRUE or FALSE; FALSE: No group effect; TRUE two-way interaction grouped. Only valid when Epis = TRUE } } \details{If Epis=TRUE, the program adds two-way interaction K*(K-1)/2 more columns to BASIS \cr Note: Given the fact that degree of shrinkage is a monotonic function of (a,b), \cr The function implemented a 3-step search as described in Huang, A. 2014, for full \cr grid search, user needs to modify the function accordingly.} \value{ \item{CrossValidation}{col1: hyperparameters; col2: loglikelihood mean; standard ERROR of nfold mean log likelihood} \item{a_optimal}{the optimal hyperparameter as computed} \item{b_optimal}{the optimal hyperparameter as computed} } \references{Huang A, Xu S, Cai X: Empirical Bayesian LASSO-logistic regression for multiple binary trait locus mapping. BMC genetics 2013, 14(1):5. \cr Huang, A., S. Xu, et al. Whole-genome quantitative trait locus mapping reveals major role of epistasis on yield of rice. PLoS ONE 2014, 9(1): e87330.} \author{Anhui Huang; Dept of Electrical and Computer Engineering, Univ of Miami, Coral Gables, FL} \examples{ library(EBEN) data(BASISbinomial) data(yBinomial) #reduce sample size to speed up the running time n = 50; k = 100; BASIS = BASISbinomial[1:n,1:k]; y = yBinomial[1:n]; \dontrun{ CV = EBlassoNEG.BinomialCV(BASIS, y, nFolds = 3,Epis = FALSE, verbose = 0) } }
9752c00885af6114037e37e62d30043327e6009b
18d354c010e185fd8abc1ebaf8b5e2b8d2bbd49d
/script.R
ea1762bd22166479eba73cfe45e6e0576f5600ad
[]
no_license
shrutijalewar/tidyverse-workshop-alexander-poon-evanplancaster
2a94cf37f1b448d37ed919a00a9ef0979daf3e32
6b7721410a218b2dd5db5d8691f801cf47351de6
refs/heads/master
2020-03-22T13:27:05.464664
2018-01-08T15:14:57
2018-01-08T15:14:57
null
0
0
null
null
null
null
UTF-8
R
false
false
4,322
r
script.R
library(tidyverse) ach_profile <- read_csv("data/achievement_profile_data_with_CORE.csv") ## Exercise 1 # Use filter() to find the number of districts with a 100% Algebra I proficiency rate. ach_profile %>% filter(AlgI == 100) %>% count() ## Exercise 2 # Create a new variable called `math_achievement` with a value of: # * `"High"` if a district's Math proficiency is 75% or higher; # * `"Medium"` if a district's Math proficiency is between 50% and 75%; # * `"Low"` if a district's Math proficiency is below 50%. ach_profile$math_achievement <- case_when(ach_profile$Math >= 75 ~ "High", ach_profile$Math >= 50 ~ "Medium", ach_profile$Math < 50 ~ "Low", TRUE ~ "No Data") ## Exercise 3 # Filter down to district 792 (Shelby County), then pipe the result to `View()`. ach_profile %>% filter(system == 792) %>% View() ## Exercise 4 # Do the following in one sequence of function calls, piped together: # 1. Read in the `data/tvaas.csv` file. # 2. Rename variables as follows: # * `District Name` to `system`. # * `District-Wide: Composite` to `TVAAS Composite`. # * `District-Wide: Literacy` to `TVAAS Literacy`. # * `District-Wide: Numeracy` to `TVAAS Numeracy`. # 3. Drop the `District Name` variable. tvaas <- read_csv("data/tvaas.csv") tvaas_data <- tvaas %>% rename(`system` = `District Number`, `TVAAS Composite` = `District-Wide: Composite`, `TVAAS Literacy` = `District-Wide: Literacy`, `TVAAS Numeracy` = `District-Wide: Numeracy`) %>% select(-`District Name`) ## Exercise 5 # Sort alphabetically by CORE region, then by Algebra I proficiency in descending order. # Then, keep just the district name, Algebra I proficiency, and CORE region variables. ach_profile %>% arrange(`CORE_region`, desc(`AlgI`)) %>% select(system_name, AlgI, CORE_region) ## Exercise 6 # Use `summarise()` to find the mean, minimum, and maximum district grad rate. # Assign variable names to the resulting data frame. ach_profile %>% summarise(Mean_Grad = mean(Graduation, na.rm = TRUE), Min_Grad = min(Graduation, na.rm = TRUE), Max_Grad = max(Graduation, na.rm = TRUE) ) ## Exercise 7 # Identify districts with a higher Percent ED than the median district, and a # higher Math proficiency than the median district. ach_profile %>% mutate(Med_Pct_ED = median(Pct_ED, na.rm = TRUE), Med_Math = median(Math, na.rm = TRUE)) %>% filter(Pct_ED > Med_Pct_ED, Math > Med_Math) %>% select(system_name, Pct_ED, Med_Pct_ED, Math, Med_Math) # Exercise 8 # Identify districts with a higher dropout rate than the average of districts # in the same CORE Region. ach_profile %>% group_by(CORE_region) %>% mutate(avg_drop_out_by_CR = mean(Dropout, na.rm = TRUE)) %>% ungroup() %>% filter(Dropout > avg_drop_out_by_CR) %>% select(system_name, Dropout, avg_drop_out_by_CR) ## Exercise 9 # Calculate three variables: # * A district's average proficiency in math subjects (Math, Algebra I-II) # * A district's average proficiency in English subjects (ELA, English I-III) # * A district's average proficiency in science subjects (Science, Biology I, Chemistry) # Then, reorder variables such that: # * The math average is next to the individual math variables. # * The English average is next to the individual English variables. # * The science average is next to the individual science variables. ach_profile %>% rowwise() %>% mutate(avg_math = mean(c(Math, AlgI, AlgII), na.rm = TRUE), avg_eng = mean(c(ELA, EngI, EngII, EngIII), na.rm = TRUE), avg_sci = mean(c(Science, BioI, Chemistry), na.rm = TRUE)) %>% ungroup() %>% select(system_name, Math, AlgI, AlgII, avg_math, ELA, EngI, EngII, EngIII, avg_eng, Science, BioI, Chemistry, avg_sci) ## Exercise 10 # Create a data frame with the number of districts at each TVAAS level, by CORE region. ach_profile %>% inner_join(tvaas_data, by = 'system') %>% group_by(CORE_region, `TVAAS Composite`) %>% count() ## Exercise 11 # Reshape the `tvaas` data frame long by subject, then arrange by system. tvaas_data %>% gather(subject, level, `TVAAS Composite`:`TVAAS Numeracy`) %>% arrange(system)
48a9c2ff08aa8180c213d0b3d5f10e838b657bf5
8403b972f889cea691634290304d86e073cc6af7
/SDM I - HW3/P2.R
c601ad416538cbd28ec2efcecb78c9880dfb3c1c
[]
no_license
rohithx/statistical-data-mining
6f1867ae3a7bb39ed52aa7d85251131c6a570339
378fdfe59b5c16bfa1b180bd43dcb7bab5cb2b7e
refs/heads/main
2023-05-13T12:48:52.897500
2021-06-07T21:24:05
2021-06-07T21:24:05
373,328,031
0
0
null
null
null
null
UTF-8
R
false
false
1,113
r
P2.R
# Writte by Rohith rm(list =ls()) set.seed(1234) library(tidyverse) library(leaps) gen_data <- data.frame(replicate(20, rnorm(n = 1000))) gen_data %>% reduce(function(y, x) y + ifelse(runif(1) < 0.4, rnorm(1, mean = 2, sd = 1), 0)*x + rnorm(1000)) -> gen_data.Y train_gen <- gen_data[1:800,] test_gen <- gen_data[801:1000,] train_resp <- as.data.frame(gen_data.Y)[1:800,] test_resp <- as.data.frame(gen_data.Y)[801:1000,] bss_data <- regsubsets(x = train_gen, y = train_resp, nvmax = 20) bss_summary <- summary(bss_data) test <- cbind(rep(1,length(test_gen[,1])), test_gen) colnames(test) <- c("(Intercept)", colnames(test[-1])) test.bss.err = rep(NA, 20) for (i in 1:20){ coeff_bss = coef(bss_data, id = i) bss_pred = as.matrix(test[,names(coeff_bss)])%*%coeff_bss err_bss = (test_resp-bss_pred)^2 test.bss.err[i] = mean(err_bss) } x11() plot((bss_summary$rss)/length(train_resp), col = "blue", type = "b", xlab = "Subset Size", ylab = "MSE") lines(test.bss.err, col = 'red') legend("topright", c("Training","Testing"),lty=c(1,1),lwd=c(2.5,2.5),col=c("blue","red"))
5b59b863ffd9d3a38088ef99ecf255c543f9a934
ecb273a343a5e98bc65673cbc48c32d9ecdb545d
/Exercises/Exerciseset1/dataSet1.R
48b2a1fcf56bdb7f3208499bdf776696b2565050
[ "MIT" ]
permissive
InseadDataAnalytics/INSEADAnalytics
703655246d9db4b6d9d08301b4be4d3a3d81cf46
0e602d5332e2b3fb7dc157473092315957b9bc3d
refs/heads/master
2023-05-25T06:53:48.650028
2023-01-28T16:57:01
2023-01-28T16:57:01
15,819,896
118
1,258
MIT
2021-02-01T14:53:11
2014-01-11T09:18:52
HTML
UTF-8
R
false
false
2,222
r
dataSet1.R
# rm(list=ls()) # Clean up the memory, if we want to rerun from scratch # source("helpersSet1.R") getdata.fromscratch = 1 website_used = "yahoo" # can be "yahoo" or other ( see help(getSymbols) ). Depending on the website we may need to change the stock tickers' representation mytickers = c("SPY", "AAPL") # Other tickers for example are "GOOG", "GS", "TSLA", "FB", "MSFT", startDate = "2001-01-01" if (getdata.fromscratch){ # Get SPY first, to get all trading days tmp<-as.matrix(try(getSymbols(Symbols="SPY",from = startDate,src = website_used, auto.assign=FALSE))) StockPrices=matrix(rep(0,nrow(tmp)*length(mytickers)), ncol=length(mytickers)) colnames(StockPrices)<-mytickers; rownames(StockPrices)<-rownames(tmp) StockVolume=StockPrices StockPrices[,1] <- tmp[,6] for (ticker_index in 1:length(mytickers)){ ticker_to_get = mytickers[ticker_index] print(paste("\nDownloading ticker ", ticker_to_get, " ...")) tmpdata<-as.matrix(try(getSymbols(Symbols=ticker_to_get,from = startDate,auto.assign=FALSE))) if (!inherits(tmpdata, "try-error")) { therownames=intersect(rownames(tmpdata),rownames(StockPrices)) tmpdata[is.na(tmpdata)] <- 0 StockPrices[therownames,ticker_index]<-tmpdata[therownames,6] # adjusted close price StockVolume[therownames,ticker_index]<-tmpdata[therownames,5] # shares volume for now - need to convert to dollars later } else { cat(ticker_to_get," NOT found") } } # Get the daily returns now. Use the simple percentage difference approach StockReturns= ifelse(head(StockPrices,-1)!=0, (tail(StockPrices,-1)-head(StockPrices,-1))/head(StockPrices,-1),0) # note that this removes the first day as we have no way to get the returns then! rownames(StockReturns)<-tail(rownames(StockPrices),-1) # adjust the dates by 1 day now # Now remove the first day from the other data, too StockPrices = StockPrices[rownames(StockReturns),] StockVolume = StockPrices[rownames(StockReturns),] colnames(StockPrices)<-mytickers colnames(StockVolume)<-mytickers colnames(StockReturns)<-mytickers save(StockReturns,StockPrices,StockVolume, file = "DataSet1.Rdata") } else { load("DataSet1.Rdata") }
9d098293abb32ad1a237bc77541ffec608e38048
971c89f0dab0f9c37ca4167cc80db428b0e985d9
/tsHydro/man/get.TS.Rd
0a570e48c38b808aa6b0ea5c60eb4ef658883746
[ "BSD-2-Clause" ]
permissive
JiangNguyen/tshydro
a75fe50061fb7f014d3df4a7ddf9e8154e13998f
4da7123ed9c4b9dbbe073fe14b9bf860f674562c
refs/heads/master
2023-04-05T06:59:59.643516
2021-04-12T13:56:00
2021-04-12T13:56:00
null
0
0
null
null
null
null
UTF-8
R
false
false
1,112
rd
get.TS.Rd
\name{get.TS} \alias{get.TS} \title{ get.TS } \description{ Estimate the model parameters and return the estimated water levels } \usage{ get.TS(dat, init.logsigmarw = 0, init.logSigma = 10, init.logit = log(0.3/(1 - 0.3)), priorHeight=numeric(0), priorSd=numeric(0), estP = FALSE, weights=rep(1,nrow(dat))) } \arguments{ \item{dat}{The data.frame containing at least the columns \code{height}, \code{time}, and \code{track} } \item{init.logsigmarw}{Initial value for the log of the standard deviation of the random walk } \item{init.logSigma}{ Initial value for the log of the standard deviation of the observation noise } \item{init.logit}{ Initial value for the logit of the fraction of outliers } \item{priorHeight}{ ... } \item{priorSd}{ ... } \item{estP}{ A logic value \code{FALSE} if the outlier fraction is kept fixed at its initial value } \item{weights}{ Optional vector of weights. } } \details{ ... } \value{ An object of class tsHydro } \references{ my paper } \author{ Karina Nielsen } \note{ ... } \seealso{ ... } \examples{ data(lakelevels) get.TS(lakelevels) }
24de8f680ab4e0173c0fce5051c7d083d45cd2bd
1aabda4705950adef05004648a5212b3562fd9ee
/dataExploration.R
ca37ed42a5070b455912bfa2631d4fb62bb38375
[]
no_license
longyap/r_code_dumpster.tm
ffcc7081be2cdc4c440f509bacf663823c051d34
d791507b844e2fd2e05f61341f48ba91c61025bc
refs/heads/master
2023-04-24T14:40:45.008198
2021-05-14T16:05:17
2021-05-14T16:05:17
367,414,700
0
0
null
null
null
null
UTF-8
R
false
false
1,448
r
dataExploration.R
install.packages("ggplot2") library(ggplot2) #readfile sample_data = read.csv("C:\\Users\\long1\\Documents\\year2\\sem1\\pfda\\tutorial_note\\IRIS.csv",header=FALSE) sample_data #assigne header names(sample_data) = c("SEPAL_LENGTH","SEPAL_WIDTH","PETAL_LENGTH","PETAL_WIDTH","SPECIES") sample_data #different view merthod head(sample_data) head(sample_data,10) tail(saample_data) tail(sample_data,4) #view all data sample_data #view in table View(sample_data) #view title names(sample_data) #how data store(data type) class(sample_data) #no of collumm length(sample_data) #coulumm ncol(sample_data) nrow(sample_data) #catogorize species iris_Species = factor(sample_data$SPECIES) iris_Species sample_data$SEPAL_WIDTH max(sample_data$SEPAL_WIDTH) min(sample_data$SEPAL_WIDTH) avg =max(sample_data$SEPAL_WIDTH)-min(sample_data$SEPAL_WIDTH) avg sample_data[2,3] # 2nd row 3rd collum sample_data[142,] # whole collumn in 142 row summary(sample_data$SEPAL_LENGTH) summary(sample_data) #get all "iris -versicolor" #make some judgement sample_data[sample_data$SPECIES=="Iris-versicolor",] sample_data[sample_data$SPECIES=="Iris-setosa",] subset(sample_data[sample_data$SPECIES=="Iris-versicolor",])[1:6,] #from the fist to the 6 of the subsset subset(sample_data[sample_data$SPECIES=="Iris-setosa",])[1:13,] sample_data[sample_data$SEPAL_LENGTH>4,] #print #i found that tthe judgement was wrong this judgement was correct
0aaf76c23af929c0344c02bbc741fe7bddc05742
35e0fb388c1fab0bbf6579c15257672939470d97
/man/getPolyhedraRDSPath.Rd
9b9e5565ba7aa4130f3bda50e7df017d44a670eb
[ "MIT" ]
permissive
leobelen/Rpolyhedra
714ae1602a8f39fa3e0adaa9b7cf1926c056aae2
5b71cd56f0c4f5e6ba0c08e2823aa210e512c3b4
refs/heads/master
2021-07-18T00:20:43.627171
2017-10-26T15:41:35
2017-10-26T15:41:35
null
0
0
null
null
null
null
UTF-8
R
false
true
391
rd
getPolyhedraRDSPath.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/polyhedra-lib.R \name{getPolyhedraRDSPath} \alias{getPolyhedraRDSPath} \title{Get the path of Polyhedra RDS file} \usage{ getPolyhedraRDSPath(polyhedra.rds.filename = "polyhedra.RDS") } \arguments{ \item{polyhedra.rds.filename}{filename of polyhedra database} } \description{ Get the path of Polyhedra RDS file }
eab18bb7aa4905809a9f4b858588e155733ac859
d31dba836cff6c972ecc435a46d1878e0ece3b7e
/plot1.R
c8c0b6b87a3932aef6c3f299112ce1d01f7c3a0c
[]
no_license
lukasmeyeruk/ExData_Plotting1
968831ab7a09ae23a2349493c8998322cf8ce544
5a3cce5292c2f20faca37743bf8331523536032a
refs/heads/master
2021-01-19T23:41:17.864110
2017-04-21T19:52:44
2017-04-21T19:52:44
89,018,604
0
0
null
2017-04-21T19:49:52
2017-04-21T19:49:52
null
UTF-8
R
false
false
422
r
plot1.R
data <- read.delim('./household_power_consumption.txt', header = TRUE, sep = ";", na.strings='?') data$Date <- as.Date(data$Date, "%d/%m/%Y") data$Time <- strptime(data$Time, "%H:%M:%S") data_sub <- data[data$Date >= as.Date('2007-02-01') & data$Date <= as.Date('2007-02-02'),] png('plot1.png') hist(data_sub$Global_active_power, col = "red", main="Global Active power", xlab="Global Active Power (kilowatts)") dev.off()
0493d5909b316efefa3ce60cd2fd943584c965f0
9bca0860c54b3a3b444861b75f09eae6d8b1cbf7
/network_meta.R
bcb4205eec4d24547057749a09e06bc504edeb78
[]
no_license
lizhengxiao/GammaMDN
c38a1ce373824d1cce909806a8e88d0ef5b59242
f7eba6b6c9202bfa3d4db7459b05d5fc6662f6e6
refs/heads/main
2023-04-10T22:25:05.954562
2021-04-14T18:46:11
2021-04-14T18:46:11
null
0
0
null
null
null
null
ISO-8859-13
R
false
false
3,553
r
network_meta.R
#-------------------------------------------------------------------------------------------------------------------- #META MODEL FOR GAMMA MDN #Construct NN to replicate the model achieved with EM_NB #-------------------------------------------------------------------------------------------------------------------- #AUTHOR: £UKASZ DELONG (SGH WARSAW SCHOOL OF ECONOMICS) #e-mail: lukasz.delong@sgh.waw.pl #DATE: 14TH APRIL 2021 (VERSION 1) #BASED ON THE PAPER: #£. DELONG, M. LINDHOLM, M.W. WUTHRICH, 2021, #GAMMA MIXTURE DENSITY NETWORKS AND THEIR APPLICATION TO MODELLING INSURANCE CLAIM AMOUNTS #AVAILABLE ON https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3705225 #------------------------------------------------------------------------------------------------------------------- if (algorithm_method==1){ model_parameters_final_linear=inputs_nn%>% layer_dense(units=neurons_1,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=neurons_2,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=neurons_3,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=1+2*no_densities,activation='linear') model_parameters_final_linear<-keras_model(inputs=inputs,outputs=model_parameters_final_linear) } if (algorithm_method==2){ model_probabilities_final_linear=inputs_nn%>% layer_dense(units=neurons_1_alpha_prob,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=neurons_2_alpha_prob,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=neurons_3_alpha_prob,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=no_densities,activation='linear') model_alpha_final_linear=inputs_nn%>% layer_dense(units=neurons_1_alpha_prob,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=neurons_2_alpha_prob,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=neurons_3_alpha_prob,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=no_densities,activation='linear') model_beta_final_linear=inputs_nn%>% layer_dense(units=neurons_1_beta,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=neurons_2_beta,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=neurons_3_beta,activation='tanh', kernel_regularizer = regularizer_l2(l = regularization_rate))%>% layer_dense(units=1,activation='linear') model_parameters_final_linear=list(model_probabilities_final_linear, model_alpha_final_linear, model_beta_final_linear)%>%layer_concatenate model_parameters_final_linear<-keras_model(inputs=inputs,outputs=model_parameters_final_linear) } print("Meta network built")
599456bf973ca4fd3d5fc46b111c667bdc40a469
d4d39f004037e46943a26e5a552be326257bbb04
/GeneNetworkModels/Dro-ASC-6d/Input/002/Initial.R
1416d8bf62bc7ecc007ddbe0cf43eb055ef6de57
[ "MIT" ]
permissive
CyclicDynamicalSystems/DynamicalSystemsPortraits
646a64774a41fb0dc7a35774eaa11db0f6ae42e5
e78b4ffad52dfb03ea5ab027491f73d027fff838
refs/heads/master
2021-01-12T19:52:59.939844
2014-06-22T15:09:42
2014-06-22T15:09:42
15,503,465
0
0
null
null
null
null
UTF-8
R
false
false
606
r
Initial.R
set.seed(1) start.count <- 6 start <- matrix( c(0.10,0.37,0.57,0.90,0.20,0.89, 0.28,0.50,0.50,0.06,0.20,0.17, 0.30,0.90,1.50,0.49,0.71,0.99, 0.50,0.77,0.93,0.21,0.65,0.12, 0.80,0.38,0.01,3.00,0.86,0.34, 0.90,0.69,0.47,3.50,0.43,0.24), byrow = T, ncol = var.count, dimnames = list(paste0("t", 1:start.count), var.names)) traj.cols <- c("red", "darkgreen", "blue", "maroon", "orange", "brown") times <- seq(0, 100, by = 0.1) composition.x <- seq(0, 2, by=0.005) composition.labeloffset.x <- c(-0.05, -0.07, -0.05, -0.08, 0) composition.labeloffset.y <- c(0, 0, -0.005, -0.01, 0.03)
7586e8b41249776af461c4ccdd891c76c1982fdb
36abeb2414c800ade2ef75967b1344baca73f3b1
/man/utility.aggregate.bonusmalus.Rd
543baeea74a70deaa9abeb7f3e163ae85accdfa3
[]
no_license
cran/utility
e4461d22df7442932322dbf1070cc81ab3d028d1
cb7286d065e84de998b046670e22b0ec5d415c4d
refs/heads/master
2021-03-12T23:16:23.266301
2020-03-09T09:00:02
2020-03-09T09:00:02
17,700,709
0
0
null
null
null
null
UTF-8
R
false
false
6,452
rd
utility.aggregate.bonusmalus.Rd
\name{utility.aggregate.bonusmalus} \alias{utility.aggregate.bonusmalus} \title{Bonus-malus aggregation of values or utilities} \description{ Function to perform an aggregation of valus or utilities that considers some of the inputs only as bonus (only considered if value is larger then the aggregated value of the non bonus or malus input) or malus (only considered if value is smaller then the aggregated value of the non bonus or malus input). } \usage{ utility.aggregate.bonusmalus(u,par,def.agg="utility.aggregate.add") } \arguments{ \item{u}{ numeric vector of values or utilities to be aggregated. } \item{par}{ numeric vector combining the parameters of the default aggregation technique (see argument \code{def.agg}) with those specifying the bonus-malus behaviour. The arguments of \code{def.agg}) must match the number of arguments of this function for the number of inputs reduced to those that are not treated as bonus or malus. This parameter vector is then appended by the parameters characterizing the bonus-malus behavior. This is a parameter vector of the same length as the number of sub-objectives. Its elements must be NA for the sub-objectives onsidered for the default aggregation technique, the weights relative to the aggregated value of the non-bonus and non-malus sub-objectives for the sub-objectives to be considered as bonus objectives, and the weights with a negative sign for those to be considered as malus objectives. Note that the weights of the bonus or malus attributes are relative to the aggregated result of the non-bonus and non-malus inputs and the negative signs will only be used for identifying malus sub-objectives and will be eliminated when calculating the weighted mean. } \item{def.agg}{ (optional) character string specifying the name of the function used for aggregation of the non-bonus and non-malus sub-objectives. Note that for use of this aggregation technique in the function \code{\link{utility.aggregation.create}}, this argument has to be specified as the input argument \code{def.agg} (default aggregation) unless it should be additive (default). } } \value{ The function returns the aggregated value or utility. } \details{ The aggregation function is defined by \deqn{u = \frac{\displaystyle u^{\mathrm{agg}}_{i \notin b,i \notin m} + \sum_{\begin{array}{l}i \in b \wedge u_i > u^{\mathrm{agg}}_{i \notin b,i \notin m}\\i \in m \wedge u_i < u^{\mathrm{agg}}_{i \notin b,i \notin m}\end{array}}\mid w_i \mid u_i}{\displaystyle 1 + \sum_{\begin{array}{l}i \in b \wedge u_i > u^{\mathrm{agg}}_{i \notin b,i \notin m}\\i \in m \wedge u_i < u^{\mathrm{agg}}_{i \notin b,i \notin m}\end{array}}\mid w_i \mid}}{u = additive aggregation between the aggregation result of the values or utilities of the non-bonus and non-malus sub-objectives and the values of bonus sub-objectives that are larger than this aggregated value, and the values of malus sub-objectives that are smaller than the aggregated value} The following figure shows examples of the behaviour of this aggregation function for the two-dimensional case:\cr \if{html}{\figure{aggregationbonusmalus.png}{options: width=80\%}} \if{latex}{\figure{aggregationbonusmalus.pdf}{options: width=5in}} } \note{ This is the same function as \code{\link{utility.aggregate.cobbdouglas}} } \references{ Short description of the package: \cr\cr Reichert, P., Schuwirth, N. and Langhans, S., Constructing, evaluating and visualizing value and utility functions for decision support, Environmental Modelling & Software 46, 283-291, 2013. \cr\cr Description of aggregation techniques: \cr\cr Langhans, S.D., Reichert, P. and Schuwirth, N., The method matters: A guide for indicator aggregation in ecological assessments. Ecological Indicators 45, 494-507, 2014. \cr\cr Textbooks on the use of utility and value functions in decision analysis: \cr\cr Keeney, R. L. and Raiffa, H. Decisions with Multiple Objectives - Preferences and Value Tradeoffs. John Wiley & Sons, 1976. \cr\cr Eisenfuehr, F., Weber, M. and Langer, T., Rational Decision Making, Springer, Berlin, 2010. } \author{ Peter Reichert <peter.reichert@eawag.ch> } \seealso{ Constructor of aggregation node: \cr\cr \code{\link{utility.aggregation.create}} \cr\cr Aggregation techniques provided by uncsim: \cr\cr \code{\link{utility.aggregate.add}} for additive aggregation (weighted arithmetic mean), \cr \code{\link{utility.aggregate.min}} for minimum aggregation, \cr \code{\link{utility.aggregate.max}} for maximum aggregation, \cr \code{\link{utility.aggregate.geo}} or \code{\link{utility.aggregate.cobbdouglas}} for geometric or Cobb-Douglas aggregation (weighted geometric mean), \cr \code{\link{utility.aggregate.geooff}} for geometric aggregation with offset, \cr \code{\link{utility.aggregate.revgeo}} for reverse geometric aggregation, \cr \code{\link{utility.aggregate.revgeooff}} for reverse geometric aggregation with offset, \cr \code{\link{utility.aggregate.harmo}} for harmonic aggregation (weighted harmonic mean), \cr \code{\link{utility.aggregate.harmooff}} for harmonic aggregation with offset, \cr \code{\link{utility.aggregate.revharmo}} for reverse harmonic aggregation, \cr \code{\link{utility.aggregate.revharmooff}} for reverse harmonic aggregation with offset, \cr \code{\link{utility.aggregate.mult}} for multiplicative aggregation, \cr \code{\link{utility.aggregate.mix}} for a mixture of additive, minimum, and geometric aggregation, \cr \code{\link{utility.aggregate.addmin}} for a mixture of additive and minimum aggregation. \cr \code{\link{utility.aggregate.addpower}} for additive power aggregation (weighted power mean), \cr \code{\link{utility.aggregate.revaddpower}} for reverse additive power aggregation, \cr \code{\link{utility.aggregate.addsplitpower}} for splitted additive power aggregation, \cr \code{\link{utility.aggregate.revaddsplitpower}} for reverse splitted additive power aggregation, \cr \code{\link{utility.aggregate.bonusmalus}} for an aggregation technique that considers some of the values or utilities of sub-objectives only as bonus or malus. \cr } \examples{ utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,NA,1)) utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,1,NA)) utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,NA,-1)) utility.aggregate.bonusmalus(c(0.2,0.8), par=c(1,-1,NA)) }
999796bff5695e0e8e747ee82f92a59f1e8a0500
5567e79bdc5f5ae037fe310849e60f6b681e3092
/man/util_plotTheme.Rd
54c3fe4b67c057245b297b4e83d7f8adbd6ca2a9
[]
no_license
IanMadlenya/PortfolioEffectHFT
baf63cd36761d50ccbb74fdb1e7937054aadb355
4bf854cc3858a822bf75fa1c03ffe00aa47165d5
refs/heads/master
2020-12-30T15:55:13.066226
2017-03-24T18:54:25
2017-03-24T18:54:25
null
0
0
null
null
null
null
UTF-8
R
false
false
1,176
rd
util_plotTheme.Rd
\name{util_plotTheme} \alias{util_plotTheme} \title{Plot style settings for PortfolioEffect theme} \usage{util_plotTheme(base_size = 10, base_family = "sans", horizontal = TRUE, dkpanel = FALSE, bw = FALSE, axis.text.size=1.5, title.size=2, has.subtitle = FALSE) } \arguments{ \item{base_size}{Base font size.} \item{base_family}{Base font family.} \item{horizontal}{Horizontal alignment flag.} \item{dkpanel}{dkpanel flag.} \item{bw}{Black and white color scheme flag.} \item{axis.text.size}{Axis font size.} \item{title.size}{Title font size.} \item{has.subtitle}{Subtitle flag.} } \value{Void} \description{Customizable plot style for PortfolioEffect color theme.} \author{Kostin Andrey <andrey.kostin@portfolioeffect.com>} \examples{ \dontrun{ data(aapl.data) aapl.frame=data.frame(Data=aapl.data[,2],Time=as.POSIXct(aapl.data[,1]/1000, origin = "1970-01-01", tz = "America/New_York"),legend='AAPL') ggplot() + geom_line(data=aapl.frame, aes(x=Time,y=Data,col=legend))+ util_plotTheme()+util_colorScheme()+util_fillScheme() }} \keyword{PortfolioEffectHFT} %\concept{plot market data, plot} \keyword{util_plotTheme}
50dec6a4489444a466128336bca40bd16aa61e6b
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
/corpustools/R/search_contexts.r
b12e09d74ef1a789b1a6783b14d641e07f310fc4
[]
no_license
akhikolla/InformationHouse
4e45b11df18dee47519e917fcf0a869a77661fce
c0daab1e3f2827fd08aa5c31127fadae3f001948
refs/heads/master
2023-02-12T19:00:20.752555
2020-12-31T20:59:23
2020-12-31T20:59:23
325,589,503
9
2
null
null
null
null
UTF-8
R
false
false
9,261
r
search_contexts.r
#' Subset tCorpus token data using a query #' #' @description #' A convenience function that searches for contexts (documents, sentences), and uses the results to \link[=subset]{subset} the tCorpus token data. #' #' See the documentation for \link[=search_contexts]{search_contexts} for an explanation of the query language. #' #' \strong{Usage:} #' #' ## R6 method for class tCorpus. Use as tc$method (where tc is a tCorpus object). #' #' \preformatted{subset_query(query, feature = 'token', context_level = c('document','sentence','window'))} #' #' @param query A character string that is a query. See \link{search_contexts} for query syntax. #' @param feature The name of the feature columns on which the query is used. #' @param context_level Select whether the query and subset are performed at the document or sentence level. #' @param window If used, uses a word distance as the context (overrides context_level) #' @param copy If true, return modified copy of data instead of subsetting the input tcorpus by reference. #' #' @name tCorpus$subset_query #' @examples #' text = c('A B C', 'D E F. G H I', 'A D', 'GGG') #' tc = create_tcorpus(text, doc_id = c('a','b','c','d'), split_sentences = TRUE) #' #' ## subset by reference #' tc$subset_query('A') #' tc$meta #' #' ## using copy mechanic #' class(tc$tokens$doc_id) #' tc2 = tc$subset_query('A AND D', copy=TRUE) #' #' tc2$get_meta() #' #' tc$meta ## (unchanged) tCorpus$set('public', 'subset_query', function(query, feature='token', context_level=c('document','sentence'), window=NA, copy=F){ context_level = match.arg(context_level) if (!is.na(window)) { hits = self$search_features(query, feature=feature, context_level=context_level, mode='features') if (is.null(hits)) return(NULL) window = self$get_token_id(hits$hits$doc_id, hits$hits$token_id, window=window) out = self$subset(window, copy=copy) } else { hits = search_contexts(self, query, feature=feature, context_level=context_level) if (is.null(hits)) return(NULL) if (context_level == 'document'){ #self$select_meta_rows(self$get_meta('doc_id') %in% hits$hits$doc_id) .doc_ids = hits$hits$doc_id out = self$subset(subset_meta= doc_id %in% .doc_ids, copy=copy) } if (context_level == 'sentence'){ d = self$get(c('doc_id','sentence'), keep_df=T) d$i = 1:nrow(d) setkeyv(d, c('doc_id','sentence')) .rows = d[list(hits$hits$doc_id, hits$hits$sentence),]$i #self$select_rows(rows) out = self$subset(subset=.rows, copy=copy) } } invisible(out) }) ##################### ##################### #' Search for documents or sentences using Boolean queries #' #' @param tc a \code{\link{tCorpus}} #' @param query A character string that is a query. See details for available query operators and modifiers. Can be multiple queries (as a vector), in which case it is recommended to also specifiy the code argument, to label results. #' @param code If given, used as a label for the results of the query. Especially usefull if multiple queries are used. #' @param feature The name of the feature column #' @param context_level Select whether the queries should occur within while "documents" or specific "sentences". Returns results at the specified level. #' @param as_ascii if TRUE, perform search in ascii. #' @param verbose If TRUE, progress messages will be printed #' #' @details #' Brief summary of the query language #' #' The following operators and modifiers are supported: #' \itemize{ #' \item{The standaard Boolean operators: AND, OR and NOT. As a shorthand, an empty space can be used as an OR statement, so that "this that those" means "this OR that OR those". NOT statements stricly mean AND NOT, so should only be used between terms. If you want to find \emph{everything except} certain terms, you can use * (wildcard for \emph{anything}) like this: "* NOT (this that those)".} #' \item{For complex queries parentheses can (and should) be used. e.g. '(spam AND eggs) NOT (fish and (chips OR albatros))} #' \item{Wildcards ? and *. The questionmark can be used to match 1 unknown character or no character at all, e.g. "?at" would find "cat", "hat" and "at". The asterisk can be used to match any number of unknown characters. Both the asterisk and questionmark can be used at the start, end and within a term.} #' \item{Multitoken strings, or exact strings, can be specified using quotes. e.g. "united states"} #' \item{tokens within a given token distance can be found using quotes plus tilde and a number specifiying the token distance. e.g. "climate chang*"~10} #' \item{Alternatively, angle brackets (<>) can be used instead of quotes, which also enables nesting exact strings in proximity/window search} #' \item{Queries are not case sensitive, but can be made so by adding the ~s flag. e.g. COP~s only finds "COP" in uppercase. The ~s flag can also be used on quotes to make all terms within quotes case sensitive, and this can be combined with the token proximity flag. e.g. "Marco Polo"~s10} #' } #' #' @return A contextHits object, which is a list with $hits (data.frame with locations) and $queries (copy of queries for provenance) #' @export #' @examples #' text = c('A B C', 'D E F. G H I', 'A D', 'GGG') #' tc = create_tcorpus(text, doc_id = c('a','b','c','d'), split_sentences = TRUE) #' tc$tokens #' #' hits = search_contexts(tc, c('query label# A AND B', 'second query# (A AND Q) OR ("D E") OR I')) #' hits ## print shows number of hits #' hits$hits ## hits is a list, with hits$hits being a data.frame with specific contexts #' summary(hits) ## summary gives hits per query #' #' ## sentence level #' hits = search_contexts(tc, c('query label# A AND B', 'second query# (A AND Q) OR ("D E") OR I'), #' context_level = 'sentence') #' hits$hits ## hits is a list, with hits$hits being a data.frame with specific contexts #' #' \donttest{ #' #' ## query language examples #' #' ## single term #' search_contexts(tc, 'A')$hits #' #' search_contexts(tc, 'G*')$hits ## wildcard * #' search_contexts(tc, '*G')$hits ## wildcard * #' search_contexts(tc, 'G*G')$hits ## wildcard * #' #' search_contexts(tc, 'G?G')$hits ## wildcard ? #' search_contexts(tc, 'G?')$hits ## wildcard ? (no hits) #' #' ## boolean #' search_contexts(tc, 'A AND B')$hits #' search_contexts(tc, 'A AND D')$hits #' search_contexts(tc, 'A AND (B OR D)')$hits #' #' search_contexts(tc, 'A NOT B')$hits #' search_contexts(tc, 'A NOT (B OR D)')$hits #' #' #' ## sequence search (adjacent words) #' search_contexts(tc, '"A B"')$hits #' search_contexts(tc, '"A C"')$hits ## no hit, because not adjacent #' #' search_contexts(tc, '"A (B OR D)"')$hits ## can contain nested OR #' ## cannot contain nested AND or NOT!! #' #' search_contexts(tc, '<A B>')$hits ## can also use <> instead of "". #' #' ## proximity search (using ~ flag) #' search_contexts(tc, '"A C"~5')$hits ## A AND C within a 5 word window #' search_contexts(tc, '"A C"~1')$hits ## no hit, because A and C more than 1 word apart #' #' search_contexts(tc, '"A (B OR D)"~5')$hits ## can contain nested OR #' search_contexts(tc, '"A <B C>"~5')$hits ## can contain nested sequence (must use <>) #' search_contexts(tc, '<A <B C>>~5')$hits ## (<> is always OK, but cannot nest quotes in quotes) #' ## cannot contain nested AND or NOT!! #' #' #' ## case sensitive search #' search_contexts(tc, 'g')$hits ## normally case insensitive #' search_contexts(tc, 'g~s')$hits ## use ~s flag to make term case sensitive #' #' search_contexts(tc, '(a OR g)~s')$hits ## use ~s flag on everything between parentheses #' search_contexts(tc, '(a OR G)~s')$hits ## use ~s flag on everything between parentheses #' #' search_contexts(tc, '"a b"~s')$hits ## use ~s flag on everything between quotes #' search_contexts(tc, '"A B"~s')$hits ## use ~s flag on everything between quotes #' #' } search_contexts <- function(tc, query, code=NULL, feature='token', context_level=c('document','sentence'), verbose=F, as_ascii=F){ is_tcorpus(tc) context_level = match.arg(context_level) if (!feature %in% tc$names) stop(sprintf('Feature (%s) is not available. Current options are: %s', feature, paste(tc$feature_names, collapse=', '))) codelabel = get_query_code(query, code) query = remove_query_label(query) cols = if(context_level == 'sentence') c('doc_id','sentence') else c('doc_id') subcontext = if(context_level == 'sentence') 'sentence' else NULL hits = vector('list', length(query)) lookup_tables = list() for (i in 1:length(query)) { if (verbose) print(code[i]) q = parse_query_cpp(as.character(query[i])) lookup_tables = prepare_lookup_tables(tc, q, lookup_tables, feature = feature, as_ascii = as_ascii) h = recursive_search(tc, q, lookup_tables, subcontext=subcontext, feature=feature, mode = 'contexts', as_ascii=as_ascii) if (!is.null(h)) { h[, code := codelabel[i]] hits[[i]] = h } } hits = data.table::rbindlist(hits) if (nrow(hits) > 0) { setorderv(hits, cols) } else { hits = data.frame(code=factor(), doc_id=factor(), sentence=numeric()) } queries = data.frame(code=codelabel, query=query) contextHits(hits, queries) }
69f2b851d79c4a6d6bbc0af2e5d5e4b88d718176
dcad712069df1b69d480476ef394d951f113ef86
/man/drawBoxplot.Rd
07feca2e178644507fc8c9b066c3f05e3a140e52
[ "MIT" ]
permissive
KWB-R/kwb.plot
d0b174e827a8f0fbf13813aa0619b086aa37866d
6bb6137844431139c7574c4e9bec31be98aca1d1
refs/heads/master
2023-06-24T08:14:29.077416
2022-06-09T18:37:39
2022-06-09T18:37:39
100,889,917
0
0
MIT
2023-06-16T03:10:28
2017-08-20T21:33:31
R
UTF-8
R
false
true
1,200
rd
drawBoxplot.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/main.R \name{drawBoxplot} \alias{drawBoxplot} \title{Draw Boxplot Icon} \usage{ drawBoxplot( centre.x, centre.y, boxwidth.cm = 1, boxheight.cm = boxwidth.cm, whisker.cm = boxheight.cm ) } \arguments{ \item{centre.x}{x coordinate in user coordinates around which the box is to be drawn} \item{centre.y}{y coordinate in user coordinates around which the box is to be drawn} \item{boxwidth.cm}{width of the box in cm. Default: 1} \item{boxheight.cm}{height of the box in cm. Default: \emph{boxwidth.cm}} \item{whisker.cm}{length of the whiskers in cm. Default: \emph{boxheight.cm}} } \description{ draws a symmetric boxplot icon around a centre } \examples{ ### prepare a simple plot area plot(1:5) ### draw a box around the centre at (2, 2) with default proportions drawBoxplot(2, 2, boxwidth.cm = 1) ### draw a box around the centre at (3, 3) with differing width and height drawBoxplot(3, 3, boxwidth.cm = 2, boxheight.cm = 1) ### draw a box around the centre at (4, 4) with modified whisker lengths drawBoxplot(4, 4, boxwidth.cm = 0.5, boxheight.cm = 1.5, whisker.cm = 0.5) }
e5a6df31237caa804524d17ef1e22d4787842489
84af420719d4a303fc741025aa6b76a472ac32c7
/ExpressionGSEA/02_Run_ssGSEA_Metabolic_Pathways.R
7197b5ca4a1002191dac20eb6434a100161a5da5
[]
no_license
YingXu-FDU/MetabolicDependencies
50d17cde039d0ad5803cdc47831b46472b44119a
cce3f2aee333c1364df3c911577781bd158975da
refs/heads/main
2022-12-19T23:04:18.824889
2020-10-21T20:57:16
2020-10-21T20:57:16
null
0
0
null
null
null
null
UTF-8
R
false
false
16,794
r
02_Run_ssGSEA_Metabolic_Pathways.R
rm(list = ls()) setwd("/.../Data/") all_data_in <- data.table::fread(file = "CCLE_expression_19Q4.csv", header=T, sep = ",") cell_line_info <- read.csv(file = "sample_info.csv", header = T) all_pathways <- GSA::GSA.read.gmt("KEGG_metabolic_pathways.gmt") ## Change CCLE gene names to match Gene ID genes <- colnames(all_data_in) genes <- gsub("\\s*\\([^\\)]+\\)", "", genes) genes[1] <- "DepMap_ID" colnames(all_data_in) <- genes genes <- colnames(all_data_in) genes <- genes[-1] #Shorten culture media to get DMEM or RPMI cell_line_info$media <- substr(cell_line_info$culture_medium,1,4) mediums <- unique(cell_line_info$media) subset.cell.line.info <- dplyr::select(cell_line_info,matches("DepMap_ID"),matches("CCLE_Name"),matches("stripped_cell_line_name"), matches("culture_medium"),matches("media"), matches("culture_type")) all_data_in <- merge(subset.cell.line.info, all_data_in, by = "DepMap_ID") row.names(all_data_in) <- all_data_in$CCLE_Name #Tweak characters in culture type all_data_in$culture_type <- gsub("Suspensions","Suspension", all_data_in$culture_type) all_data_in$culture_type <- gsub("Semi-Adherent","Adherent", all_data_in$culture_type) all_data_in$culture_type <- gsub("Semi Adherent","Adherent", all_data_in$culture_type) all_data_in$culture_type <- gsub("semi-Adherent","Adherent", all_data_in$culture_type) all_data_in$culture_type <- gsub("Semi-adherent","Adherent", all_data_in$culture_type) mediums.filtered <- c("RPMI","DMEM") types.of.culture <- c("Adherent","Suspension") ssGSEA_custom <- function(input.df, gmt.list, num.permutations = 1000, stat.type = "Weighted"){ nperm = num.permutations #number of permutations if (stat.type == "Classic"){ score.weight = 0 } if (stat.type == "Weighted"){ score.weight = 1 } #Read in gene expression data #Genes should be first column, named "Gene" #Samples should be columns 2:N data_in <- input.df gmt.for.reformat <- gmt.list Gene.Sets <- t(plyr::ldply(gmt.for.reformat$genesets, rbind)) #reformat gmt list to desired format colnames(Gene.Sets) <- gmt.for.reformat$geneset.names Gene.Sets <- as.data.frame(Gene.Sets) testthat::expect_is(data_in, "data.frame") testthat::expect_is(Gene.Sets, "data.frame") GSEA.EnrichmentScore <- function(gene.list, gene.set, weighted.score.type = score.weight, correl.vector = NULL){ tag.indicators <- sign(match(gene.list, gene.set, nomatch = 0)) no.tag.indicator <- 1 - tag.indicators N <- length(gene.list) Nh <- numhits_pathway Nm <- N - Nh if (weighted.score.type == 0){ correl.vector <- rep(1,N) } alpha <- weighted.score.type correl.vector <- abs(correl.vector**alpha) sum.correl.tag <- sum(correl.vector[tag.indicators == 1]) norm.tag <- 1.0/sum.correl.tag norm.no.tag <- 1.0/Nm RES <- cumsum(tag.indicators * correl.vector * norm.tag - no.tag.indicator * norm.no.tag) max.ES <- max(RES) min.ES <- min(RES) if (max.ES > - min.ES) { # ES <- max.ES ES <- signif(max.ES, digits = 5) arg.ES <- which.max(RES) } else { # ES <- min.ES ES <- signif(min.ES, digits=5) arg.ES <- which.min(RES) } return(list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicators)) } #for real ES GSEA.EnrichmentScore2 <- function(gene.list, gene.set, weighted.score.type = score.weight, correl.vector = NULL) { N <- length(gene.list) Nh <- numhits_pathway Nm <- N - Nh loc.vector <- vector(length=N, mode="numeric") peak.res.vector <- vector(length=Nh, mode="numeric") valley.res.vector <- vector(length=Nh, mode="numeric") tag.correl.vector <- vector(length=Nh, mode="numeric") tag.diff.vector <- vector(length=Nh, mode="numeric") tag.loc.vector <- vector(length=Nh, mode="numeric") loc.vector[gene.list] <- seq(1, N) tag.loc.vector <- loc.vector[gene.set] tag.loc.vector <- sort(tag.loc.vector, decreasing = F) if (weighted.score.type == 0) { tag.correl.vector <- rep(1, Nh) } else if (weighted.score.type == 1) { tag.correl.vector <- correl.vector[tag.loc.vector] tag.correl.vector <- abs(tag.correl.vector) } else if (weighted.score.type == 2) { tag.correl.vector <- correl.vector[tag.loc.vector]*correl.vector[tag.loc.vector] tag.correl.vector <- abs(tag.correl.vector) } else { tag.correl.vector <- correl.vector[tag.loc.vector]**weighted.score.type tag.correl.vector <- abs(tag.correl.vector) } norm.tag <- 1.0/sum(tag.correl.vector) tag.correl.vector <- tag.correl.vector * norm.tag norm.no.tag <- 1.0/Nm tag.diff.vector[1] <- (tag.loc.vector[1] - 1) tag.diff.vector[2:Nh] <- tag.loc.vector[2:Nh] - tag.loc.vector[1:(Nh - 1)] - 1 tag.diff.vector <- tag.diff.vector * norm.no.tag peak.res.vector <- cumsum(tag.correl.vector - tag.diff.vector) valley.res.vector <- peak.res.vector - tag.correl.vector max.ES <- max(peak.res.vector) min.ES <- min(valley.res.vector) ES <- signif(ifelse(max.ES > - min.ES, max.ES, min.ES), digits=5) return(ES) } #for permutation ES Samples <- colnames(data_in) if (Samples[1] != "Gene"){ stop("Please ensure that your data frame is organized with the first column to be named 'Gene'") } Samples <- Samples[-1] Gene.Sets.All <- colnames(Gene.Sets) annotations <- matrix(data = 0, nrow = nrow(data_in), ncol = length(Gene.Sets.All)) colnames(annotations) <- Gene.Sets.All annotations <- as.data.frame(annotations) annotations <- cbind(data_in$Gene,annotations) colnames(annotations) <- c("Gene", Gene.Sets.All) annotations <- as.matrix(annotations) num.hits.pathways <- list() ### Annotate gene sets for (j in 1:length(Gene.Sets.All)){ temp.pathway <- Gene.Sets[,Gene.Sets.All[j]] for (i in 1:nrow(annotations)){ if (annotations[i,"Gene"] %in% temp.pathway){ annotations[i,j+1] = "X"; } } num.hits.pathways[[Gene.Sets.All[j]]] <- sum(annotations[,Gene.Sets.All[j]] == "X") } num.hits.pathways.df <- matrix(unlist(num.hits.pathways)) row.names(num.hits.pathways.df) = Gene.Sets.All num.gene.sets.under.5 <- which(num.hits.pathways.df < 5) if (length(num.gene.sets.under.5) > 1){ print("Warning: Removing gene sets with less than 5 genes observed in data set.") gene.sets.to.remove <- Gene.Sets.All[num.gene.sets.under.5] annotations[,which(colnames(annotations) %in% gene.sets.to.remove)] <- NULL } annotations <- as.data.frame(annotations) data_in <- merge(data_in, annotations, by = "Gene") data_in <- stats::na.omit(data_in) GSEA.Results.All.Samples <- matrix(data = NA, nrow = 0, ncol = 8) colnames(GSEA.Results.All.Samples) <- c("Sample","Gene.Set","KS","KS_Normalized", "p_value","Position_at_max", "FDR_q_value", "Leading_Edge_Genes") Mountain.Plot.Info.All.Samples <- list() rank_metric.All.Samples <- list() #Find out how many cores are available (if you don't already know) cores<-parallel::detectCores() #Create cluster with desired number of cores, leave one open for the machine #core processes cl <- snow::makeCluster(cores[1]-1) #Register cluster doSNOW::registerDoSNOW(cl) rm(annotations) data_in2 <- array(data = NA) for (u in 1:length(Samples)){ loop.time <- Sys.time() data_in2 <- cbind(dplyr::select(data_in, Samples[u], "Gene"),subset(data_in, select = Gene.Sets.All)) #select one Sample type and the genes and Gene.Sets.A.and.B data_in2[,Samples[u]] <- as.numeric(as.character(data_in2[,Samples[u]])) data_in2 <- data_in2[order(-data_in2[,Samples[u]]),] #sort by descending order for the rank metric rownames(data_in2) <- 1:nrow(data_in2) #reorder row indices for counting in for loop below ## Assuming first two columns in data table are Genes and Rank Metric (e.g. Foldchange, SNR) GSEA.Results <- matrix(data = NA, nrow = length(Gene.Sets.All), ncol = 8) colnames(GSEA.Results) <- c("Sample","Gene.Set","KS","KS_Normalized", "p_value","Position_at_max", "FDR_q_value", "Leading_Edge_Genes") GSEA.Results <- as.data.frame(GSEA.Results) GSEA.Results$Gene.Set <- Gene.Sets.All GSEA.Results$Sample <- Samples[u] ions <- nrow(data_in2) #for plotting ks_results_plot <- list() positions.of.hits <- list() #ks_results_plot <- as.data.frame(ks_results_plot) gene.list <- 1:ions rank_metric <- data_in2[,Samples[u]] #Save the rank metric pos_gene_set <- array(data = 0, dim = nrow(data_in2), dimnames = NULL); ## Calculate Real KS Statistic for (i in 1:length(Gene.Sets.All)){ data_in3 <- data_in2[,Gene.Sets.All[i]] numhits_pathway <- sum(data_in3 == "X"); #check to see if there is anything in the column (e.g. X) if (numhits_pathway > 1){ pos_gene_set <- which(data_in2[,Gene.Sets.All[i]] %in% c("X")) KS_real <- GSEA.EnrichmentScore(gene.list, pos_gene_set, weighted.score.type = score.weight, correl.vector = rank_metric) GSEA.Results[GSEA.Results$Gene.Set == Gene.Sets.All[i],]$KS <- KS_real$ES; GSEA.Results[GSEA.Results$Gene.Set == Gene.Sets.All[i],]$Position_at_max <- KS_real$arg.ES; ks_results_plot[[Gene.Sets.All[i]]] = KS_real$RES positions.of.hits[[Gene.Sets.All[i]]] = pos_gene_set ### Find leading edge genes (e.g. genes before position at max) if (KS_real$ES > 0){ leading.edge.positions <- pos_gene_set[which(pos_gene_set < KS_real$arg.ES)] leading.edge.genes <- data_in2[leading.edge.positions,"Gene"] GSEA.Results[GSEA.Results$Gene.Set == Gene.Sets.All[i],]$Leading_Edge_Genes <- paste(leading.edge.genes, collapse = ", ") } else if (KS_real$ES < 0){ leading.edge.positions <- pos_gene_set[which(pos_gene_set > KS_real$arg.ES)] leading.edge.genes <- data_in2[leading.edge.positions,"Gene"] GSEA.Results[GSEA.Results$Gene.Set == Gene.Sets.All[i],]$Leading_Edge_Genes <- paste(leading.edge.genes, collapse = ", ") } } } Mountain.Plot.Info <- list(MountainPlot = ks_results_plot, Position.of.hits = positions.of.hits) rm(pos_gene_set) rm(numhits_pathway) rm(data_in3) rm(KS_real) print("Calculating permutations...") pb <- utils::txtProgressBar(max = num.permutations, style = 3) progress <- function(n) utils::setTxtProgressBar(pb, n) opts <- list(progress = progress) KSRandomArray <- matrix(data = NA, nrow = nperm, ncol = length(Gene.Sets.All)) num.gene.sets.all <- length(Gene.Sets.All) `%dopar%` <- foreach::`%dopar%` KSRandomArray <- foreach::foreach(L = 1:nperm, .combine = "rbind",.options.snow = opts) %dopar% { temp.KSRandomArray <- matrix(data = NA, nrow = 1, ncol = num.gene.sets.all) for(i in 1:length(Gene.Sets.All)){ numhits_pathway <- length(positions.of.hits[[Gene.Sets.All[i]]]) pos_gene_set <- sample(1:ions,numhits_pathway) temp.KSRandomArray[,i] <- GSEA.EnrichmentScore2(gene.list, pos_gene_set, weighted.score.type = score.weight, correl.vector = rank_metric) } temp.KSRandomArray } colnames(KSRandomArray) <- Gene.Sets.All rm(opts) rm(pb) KSRandomArray <- data.frame(matrix(unlist(KSRandomArray), nrow = nperm, byrow = T)) colnames(KSRandomArray) <- Gene.Sets.All KSRandomArray <- stats::na.omit(KSRandomArray) print("Normalizing enrichment scores...") KSRandomArray <- as.data.frame(KSRandomArray) ###normalize the GSEA distribution KSRandomArray.Norm <- matrix(data = NA, nrow = nrow(KSRandomArray), ncol = ncol(KSRandomArray)) colnames(KSRandomArray.Norm) <- colnames(KSRandomArray) avg <- 0 KSRandomArray.temp <- 0 for (i in 1:ncol(KSRandomArray.Norm)){ avg <- 0 KSRandomArray.temp <- KSRandomArray[,i] pos.temp <- KSRandomArray.temp[which(KSRandomArray.temp >= 0)] neg.temp <- KSRandomArray.temp[which(KSRandomArray.temp < 0)] avg.pos <- mean(pos.temp) avg.neg <- mean(neg.temp) norm.pos.temp <- pos.temp / avg.pos norm.neg.temp <- neg.temp / avg.neg * -1 norm.perms <- c(norm.pos.temp,norm.neg.temp) KSRandomArray.Norm[,i] <- norm.perms } GSEA.NES.perms <- as.vector(KSRandomArray.Norm) rm(KSRandomArray.Norm) GSEA.NES.perms.pos <- GSEA.NES.perms[which(GSEA.NES.perms >= 0)] GSEA.NES.perms.neg <- GSEA.NES.perms[which(GSEA.NES.perms < 0)] rm(GSEA.NES.perms) percent.pos.GSEA <- sum(GSEA.Results$KS > 0) / length(GSEA.Results$KS) percent.neg.GSEA <- sum(GSEA.Results$KS < 0) / length(GSEA.Results$KS) # Calculate GSEA NES and p-value and FDR print("Calculating GSEA FDR...") for (i in 1:length(Gene.Sets.All)){ temp.gene.set <- Gene.Sets.All[i] temp.KS <- GSEA.Results[GSEA.Results$Gene.Set == temp.gene.set,]$KS if (temp.KS > 0){ pos.perms <- KSRandomArray[,temp.gene.set] pos.perms <- pos.perms[which(pos.perms > 0)] #p-val GSEA.Results[GSEA.Results$Gene.Set == temp.gene.set,]$p_value = signif(sum(pos.perms > temp.KS) / length(pos.perms),digits = 3) #NES GSEA.Results[GSEA.Results$Gene.Set == temp.gene.set,]$KS_Normalized = signif(temp.KS / mean(pos.perms), digits = 3) #FDR percent.temp <- sum(GSEA.NES.perms.pos > GSEA.Results[GSEA.Results$Gene.Set == temp.gene.set,]$KS_Normalized) / length(GSEA.NES.perms.pos) GSEA.Results[GSEA.Results$Gene.Set == temp.gene.set,]$FDR_q_value = ifelse(signif(percent.temp / percent.pos.GSEA, digits = 3) < 1, signif(percent.temp / percent.pos.GSEA, digits = 3), 1) } else if (temp.KS < 0){ neg.perms <- KSRandomArray[,temp.gene.set] neg.perms <- neg.perms[which(neg.perms < 0)] #p-val GSEA.Results[GSEA.Results$Gene.Set == temp.gene.set,]$p_value = signif(sum(neg.perms < temp.KS) / length(neg.perms),digits = 3) #NES GSEA.Results[GSEA.Results$Gene.Set == temp.gene.set,]$KS_Normalized = signif(temp.KS / mean(neg.perms) * -1, digits = 3) #FDR percent.temp <- sum(GSEA.NES.perms.neg < GSEA.Results[GSEA.Results$Gene.Set == temp.gene.set,]$KS_Normalized) / length(GSEA.NES.perms.neg) GSEA.Results[GSEA.Results$Gene.Set == temp.gene.set,]$FDR_q_value = ifelse(signif(percent.temp / percent.neg.GSEA, digits = 3) < 1, signif(percent.temp / percent.neg.GSEA, digits = 3), 1) } } GSEA.Results.All.Samples <- rbind(GSEA.Results.All.Samples,GSEA.Results) Mountain.Plot.Info.All.Samples <- c(Mountain.Plot.Info.All.Samples,Mountain.Plot.Info) rank_metric.All.Samples <- c(rank_metric.All.Samples,rank_metric) print(paste("Sample #: ", u)) end.loop.time <- Sys.time() total.loop.time <- signif(end.loop.time - loop.time, digits = 3) print(paste("Time per Sample:" , total.loop.time)) } snow::stopCluster(cl) rm(cl) return(list(GSEA.Results = GSEA.Results.All.Samples, Mountain.Plot.Info = Mountain.Plot.Info.All.Samples, ranking.metric = rank_metric.All.Samples)) } GSEA.res.all <- array() for (p in 1:length(types.of.culture)){ for (M in 1:length(mediums.filtered)){ data_in <- all_data_in[which(all_data_in$culture_type %in% types.of.culture[p]),] data_in <- data_in[which(data_in$media %in% mediums.filtered[M]),] #remove culture type, ccle name, media, depmap ID data_in <- dplyr::select(data_in, -matches("stripped_cell_line_name"), -matches("culture_medium"), -matches("media"), -matches("culture_type"), -matches("CCLE.Name"), -matches("DepMap_ID")) ## Center and scale data within culture type and media data_in <- scale(data_in, center = TRUE, scale = TRUE) #scale the data data_in <- as.data.frame(t(data_in)) data_in$Gene <- rownames(data_in) #reorder colnames for ssGSEA function data_in <- data_in[,c("Gene",colnames(data_in)[1:length(data_in)-1])] ssGSEA_temp <- ssGSEA_custom(data_in, all_pathways) GSEA.res.temp <- ssGSEA_temp$GSEA.Results GSEA.res.temp$Media <- mediums.filtered[M] GSEA.res.temp$Culture_type <- types.of.culture[p] GSEA.res.all <- rbind(GSEA.res.all, GSEA.res.temp) } } write.csv(GSEA.res.all, file = "CCLE_GSEA_Metabolic_Pathways.csv", row.names = FALSE)
cff4079c5bd80b26671eefd97d8c8b4bd268d2fc
ea31432bb67f7c13ea16b8f1e3e5f9c4c4f45f35
/maize.R
a95e91ebeb338c1bd4791d996b8e627152abe8b3
[]
no_license
kartikkwatra/profitabilityAnalysis
11ddae49031954b9186a40d7f9aa42c38987b797
c3f9f7cd58a0bdc6d2ebd3e406934c2a4bf5a510
refs/heads/master
2020-03-23T03:45:35.544838
2018-07-22T16:13:12
2018-07-22T16:13:12
141,047,388
0
0
null
null
null
null
UTF-8
R
false
false
5,642
r
maize.R
# maize Everything library(tidyverse) library(rvest) library(lubridate) # Get the data from file maizeCost <- readxl::read_xlsx("kharif.xlsx",sheet = "maize" ) unique(maizeCost$State) # Getting the prices from file maizePrices <- data.frame() cropName <- c("Maize") monthList <- c('September', 'October', 'November', 'December' ) yearList <- as.character(seq(2006,2014, by=1)) maizePrices <- combine_data(cropName,yearList,monthList,maizePrices,fmonth = 5) maizePrices <- clean_data(maizePrices) maizePrices <- maizePrices %>% rename(State = X1,Price = X2, Month = month) # No of months of Harvest for each crop monthMatrix <- readxl::read_xlsx("matrix.xlsx") harvestSummary <- monthMatrix %>% group_by(Crop,State) %>% summarise(num = n()) # Getting list of states which have atleast x price points per and with available costs maizePricesSubsetLarge <- maizePrices %>% group_by(fiscal,State) %>% filter( any( n() >= case_when( as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 1 ~ 1, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 2 ~ 1, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 3 ~ 2, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 4 ~ 2, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 5 ~ 3, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 6 ~ 4, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 7 ~ 5, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 8 ~ 5 #TRUE ~ 100 ) ) ) # Checking for Min Max - Ideally should happen earlier: Just after creating the subset str(maizePricesSubsetLarge) maizePricesSubsetLarge$Price <- as.numeric(maizePricesSubsetLarge$Price) temp <- maizePricesSubsetLarge %>% group_by(State, fiscal) %>% summarise(min = min(Price), max = max(Price)) #maizeWSP <- maizePricesSubset %>% group_by(fiscal,State) #temp <- semi_join(maize,maizeCost, by = c("State" = "State", "fiscal" = "Year")) #unique(temp$State) # Now Cleaning for harvest maizePricesSubsetSmall <- filter_by_harvest(cropName,maizePricesSubsetLarge) # Imp to check or filter only those groups where the minimum number of data points is satisfied maizePricesSubsetSmaller <- maizePricesSubsetSmall %>% group_by(fiscal,State) %>% filter( any( n() >= case_when( as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 1 ~ 1, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 2 ~ 1, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 3 ~ 2, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 4 ~ 2, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 5 ~ 3, as.numeric(harvestSummary[ (harvestSummary$Crop == cropName) & (harvestSummary$State == State) , 3]) == 6 ~ 4 ) ) ) # WSP for all # already grouped by fiscal year and state maizePricesSubsetSmaller$Price <- as.numeric(maizePricesSubsetSmaller$Price) maizePricesSubsetSmaller$fiscal <- as.integer(maizePricesSubsetSmaller$fiscal) maizeWSPSmaller <- maizePricesSubsetSmaller %>% summarise(avgWSP = mean(Price, na.rm = TRUE)) maizePricesSubsetLarge$Price <- as.numeric(maizePricesSubsetLarge$Price) maizePricesSubsetLarge$fiscal <- as.integer(maizePricesSubsetLarge$fiscal) maizeWSPLarge <- maizePricesSubsetLarge %>% summarise(avgWSP = mean(Price, na.rm = TRUE)) # Not joining temp <- anti_join(maizeWSPLarge,maizeWSPSmaller, by= c("State","fiscal")) maizeWSP <- maizeWSPSmaller # Now joining the Cost and WSP tables: Our Final Table maize <- inner_join(maizeCost,maizeWSP, by = c("State" = "State", "Year" = "fiscal" )) ###### ANALYSIS ######## # be careful of periods: should be comparable for both # Average annual growth rates of C2 #maizeCost$Year <- as.numeric(maizeCost$Year) #maizeCost <- maizeCost %>% group_by(State) %>% # arrange(State,Year) %>% # mutate(C2Growth = 100*((C2 - lag(C2,1))/lag(C2,1))/(Year - lag(Year,1) )) ## net margins maize <- maize %>% filter( !(State %in% c("Bihar","Maharashtra","Punjab")) ) %>% mutate( margin = avgWSP - C2, marginPercent = 100*margin/C2) mz <- maize %>% ggplot() + geom_bar( aes(x = State, y = marginPercent, fill = factor(Year), alpha = factor(Year), width = 0.6 ), stat = "identity", position = "dodge") + labs(x = "States", y = "Percentage Net Return in Rs/Quintal", title = "Net Returns in Maize growing states", fill = "Year", alpha = "Year") ggplotly(mz) # Average annual growth rates of avgWSP and C2 for the same period maize <- maize %>% group_by(State) %>% arrange(State,Year) %>% mutate(WSPGrowth = 100*((avgWSP - lag(avgWSP,1))/lag(avgWSP,1))/(Year - lag(Year,1) )) %>% mutate(C2Growth = 100*((C2 - lag(C2,1))/lag(C2,1))/(Year - lag(Year,1) )) maize %>% group_by(State) %>% summarise(C2AAGR = mean(C2Growth, na.rm = TRUE), WSPAAGR = mean(WSPGrowth, na.rm = TRUE), avgAnnualProfitMarginPercent = mean(marginPercent, na.rm = TRUE) )
81b3568203580df4fea736a44e4f3a909ef0d62a
31d2d467030565c44f4d28d42c0e4d225dececaa
/R/scoregpcm.R
aa59af45a814c4a4d5e532dc481a7ffc780c168d
[]
no_license
cran/ltm
84fd858915db9fe1506a40628f61e6500a21ed1c
dbbabfa99fa09ad94113856a6a5ae1535e7b817f
refs/heads/master
2022-02-25T01:10:01.747125
2022-02-18T08:40:02
2022-02-18T08:40:02
17,697,218
2
4
null
null
null
null
UTF-8
R
false
false
2,693
r
scoregpcm.R
scoregpcm <- function (thetas, constraint) { betas <- betas.gpcm(thetas, p, ncatg, constraint) eta <- eta1 <- log.crf <- num <- den <- vector("list", p) log.p.xz <- matrix(0, nfreqs, length(Z)) na.ind <- is.na(X) for (j in 1:p) { bt <- betas[[j]] nbt <- length(bt) eta1[[j]] <- if (IRT.param) t(outer(Z, bt[-nbt], "-")) else matrix(Z, nbt - 1, length(Z), TRUE) eta[[j]] <- if (IRT.param) bt[nbt] * eta1[[j]] else outer(bt[-nbt], bt[nbt] * Z , "+") num[[j]] <- exp(apply(eta[[j]], 2, cumsum)) if (!is.matrix(num[[j]])) num[[j]] <- t(num[[j]]) den[[j]] <- 1 + colSums(num[[j]]) crf <- rbind(1/den[[j]], num[[j]] / rep(den[[j]], each = ncatg[j] - 1)) eps <- .Machine$double.eps^(1/2) if (any(ind <- crf == 1)) crf[ind] <- 1 - eps if (any(ind <- crf == 0)) crf[ind] <- eps log.pr <- log(crf) xj <- X[, j] log.pr <- log.pr[xj, ] if (any(na.ind[, j])) log.pr[na.ind[, j], ] <- 0 log.p.xz <- log.p.xz + log.pr } p.xz <- exp(log.p.xz) p.x <- c(p.xz %*% GHw) p.zx <- p.xz / p.x check.alpha <- constraint == "gpcm" || constraint == "1PL" if (check.alpha) scores.alpha <- numeric(p) scores.deltas <- vector("list", p) for (j in 1:p) { if (check.alpha) { xj <- X[, j] etaj <- eta[[j]] eta1j <- apply(eta1[[j]], 2, cumsum) I1 <- rbind(0, eta1j) - rep(colSums(num[[j]] * eta1j) / den[[j]], each = ncatg[j]) scores.alpha[j] <- - sum((p.zx * I1[xj, ] * obs) %*% GHw, na.rm = TRUE) } alpha <- betas[[j]][ncatg[j]] ii <- seq(1, ncatg[j] - 1) ind1 <- unlist(sapply(ii, ":", b = ncatg[j] - 1)) ind2 <- rep(ii, rev(ii)) Part1 <- if (IRT.param) - alpha * XX[[j]] else XX[[j]] Part2 <- if (IRT.param) - alpha * rowsum(num[[j]][ind1, , drop = FALSE], ind2) else rowsum(num[[j]][ind1, , drop = FALSE], ind2) scores.deltas[[j]] <- lapply(ii, function (i) { I2 <- outer(Part1[, i], Part2[i, ] / den[[j]], "-") if (any(na.ind[, j])) I2[na.ind[, j], ] <- 0 - sum((p.zx * I2 * obs) %*% GHw) }) } if (!check.alpha) { unlist(scores.deltas) } else { if (constraint == "gpcm") { unlist(mapply(function (x, y) c(x, y), scores.deltas, scores.alpha, SIMPLIFY = FALSE, USE.NAMES = FALSE)) } else { c(unlist(scores.deltas), sum(scores.alpha)) } } }
935c8123f983a67d42b3818278d3bf4fe6f6a73b
6e3587f9cbe1c5c963252f58c579c101a8037d88
/man/cacheBinomConfIntervals.Rd
dfe1053c97bc33d6d01817f984eaa31dafebe3aa
[]
no_license
databio/epihet
b628b87ac44d93df22890b61c5020237a962e9b1
e7c0e4d667ec6c31d2d26752b1168ed45aa259aa
refs/heads/master
2021-03-19T14:57:00.981488
2018-02-26T14:10:41
2018-02-26T14:10:41
84,873,955
1
1
null
null
null
null
UTF-8
R
false
true
931
rd
cacheBinomConfIntervals.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/intervals.R \name{cacheBinomConfIntervals} \alias{cacheBinomConfIntervals} \title{Cache binomial confidence intervals} \usage{ cacheBinomConfIntervals(maxHits, maxTotal, confLevel) } \arguments{ \item{maxHits}{Maximum methylation count} \item{maxTotal}{Maximum coverage} \item{confLevel}{A decimal indicating the level of confidence to be used while creating cached the binomial bayes credibility interval; default is .95 for 95 percent confidence} } \value{ A \code{data.table} object containing columns for upper and lower limits of a Bayesian binomial confidence interval for maximum methylation count and coverage; this serves as a cache that can replace the need to perform the computationally expensive probability estimation } \description{ Cache binomial confidence intervals } \examples{ cacheBinomConfIntervals(100,100, confLevel = .95) }
0b7ae10b23b08b7009941b9f6c2811f8c92754e8
3d563bfe96346c3cc46f0752ef3972bf0229a39d
/R/sequence_onehot.R
cd17cc81789a8b36ad1b4d48ff3c9604526f5195
[ "MIT" ]
permissive
jonthegeek/textrecipes
d65542344397c6392060919c7353ed4c2540df2c
a3e93d1aa528c77261a52aea7b88963e6ec5c90c
refs/heads/master
2021-08-22T21:27:46.664125
2021-06-12T18:55:17
2021-06-12T18:55:17
336,397,454
1
0
NOASSERTION
2021-02-05T21:51:39
2021-02-05T21:51:38
null
UTF-8
R
false
false
7,980
r
sequence_onehot.R
#' Generate the basic set of text features #' #' `step_sequence_onehot` creates a *specification* of a recipe step that #' will take a string and do one hot encoding for each character by position. #' #' @param recipe A recipe object. The step will be added to the #' sequence of operations for this recipe. #' @param ... One or more selector functions to choose variables. #' For `step_sequence_onehot`, this indicates the variables to be encoded #' into a [tokenlist]. See [recipes::selections()] for more #' details. For the `tidy` method, these are not currently used. #' @param role For model terms created by this step, what analysis #' role should they be assigned?. By default, the function assumes #' that the new columns created by the original variables will be #' used as predictors in a model. #' @param columns A list of tibble results that define the #' encoding. This is `NULL` until the step is trained by #' [recipes::prep.recipe()]. #' @param sequence_length A numeric, number of characters to keep before #' discarding. Defaults to 100. #' @param padding 'pre' or 'post', pad either before or after each sequence. #' defaults to 'pre'. #' @param truncating 'pre' or 'post', remove values from sequences larger than #' sequence_length either in the beginning or in the end of the sequence. #' Defaults too 'pre'. #' @param vocabulary A character vector, characters to be mapped to integers. #' Characters not in the vocabulary will be encoded as 0. Defaults to #' `letters`. #' @param prefix A prefix for generated column names, default to "seq1hot". #' @param skip A logical. Should the step be skipped when the #' recipe is baked by [recipes::bake.recipe()]? While all #' operations are baked when [recipes::prep.recipe()] is run, some #' operations may not be able to be conducted on new data (e.g. #' processing the outcome variable(s)). Care should be taken when #' using `skip = TRUE` as it may affect the computations for #' subsequent operations. #' @param id A character string that is unique to this step to identify it #' @param trained A logical to indicate if the recipe has been #' baked. #' @return An updated version of `recipe` with the new step added #' to the sequence of existing steps (if any). #' @examples #' library(recipes) #' library(modeldata) #' data(okc_text) #' #' okc_rec <- recipe(~essay0, data = okc_text) %>% #' step_tokenize(essay0) %>% #' step_tokenfilter(essay0) %>% #' step_sequence_onehot(essay0) #' #' okc_obj <- okc_rec %>% #' prep() #' #' bake(okc_obj, new_data = NULL) #' #' tidy(okc_rec, number = 1) #' tidy(okc_obj, number = 1) #' @export #' @details #' The string will be capped by the sequence_length argument, strings shorter then #' sequence_length will be padded with empty characters. The encoding will assign #' a integer to each character in the vocabulary, and will encode accordingly. #' Characters not in the vocabulary will be encoded as 0. #' #' @source \url{https://papers.nips.cc/paper/5782-character-level-convolutional-networks-for-text-classification.pdf} #' #' @family character to numeric steps step_sequence_onehot <- function(recipe, ..., role = "predictor", trained = FALSE, columns = NULL, sequence_length = 100, padding = "pre", truncating = "pre", vocabulary = NULL, prefix = "seq1hot", skip = FALSE, id = rand_id("sequence_onehot")) { if (length(padding) != 1 || !(padding %in% c("pre", "post"))) { rlang::abort("`padding` should be one of: 'pre', 'post'") } if (length(truncating) != 1 || !(truncating %in% c("pre", "post"))) { rlang::abort("`truncating` should be one of: 'pre', 'post'") } add_step( recipe, step_sequence_onehot_new( terms = ellipse_check(...), role = role, trained = trained, columns = columns, sequence_length = sequence_length, padding = padding, truncating = truncating, vocabulary = vocabulary, prefix = prefix, skip = skip, id = id ) ) } step_sequence_onehot_new <- function(terms, role, trained, columns, sequence_length, padding, truncating, vocabulary, prefix, skip, id) { step( subclass = "sequence_onehot", terms = terms, role = role, trained = trained, columns = columns, sequence_length = sequence_length, padding = padding, truncating = truncating, vocabulary = vocabulary, prefix = prefix, skip = skip, id = id ) } #' @export prep.step_sequence_onehot <- function(x, training, info = NULL, ...) { col_names <- terms_select(x$terms, info = info) check_list(training[, col_names]) token_list <- list() for (i in seq_along(col_names)) { token_list[[i]] <- x$vocabulary %||% sort(get_unique_tokens(training[, col_names[i], drop = TRUE])) } step_sequence_onehot_new( terms = x$terms, role = x$role, trained = TRUE, columns = col_names, sequence_length = x$sequence_length, padding = x$padding, truncating = x$truncating, vocabulary = token_list, prefix = x$prefix, skip = x$skip, id = x$id ) } #' @export bake.step_sequence_onehot <- function(object, new_data, ...) { col_names <- object$columns # for backward compat for (i in seq_along(col_names)) { out_text <- string2encoded_matrix( x = new_data[, col_names[i], drop = TRUE], vocabulary = object$vocabulary[[i]], sequence_length = object$sequence_length, padding = object$padding, truncating = object$truncating ) colnames(out_text) <- paste( sep = "_", object$prefix, col_names[i], seq_len(ncol(out_text)) ) new_data <- new_data[, !(colnames(new_data) %in% col_names[i]), drop = FALSE] new_data <- vctrs::vec_cbind(new_data, as_tibble(out_text)) } as_tibble(new_data) } #' @export print.step_sequence_onehot <- function(x, width = max(20, options()$width - 30), ...) { cat("Sequence 1 hot encoding for ", sep = "") printer(x$columns, x$terms, x$trained, width = width) invisible(x) } #' @rdname step_sequence_onehot #' @param x A `step_sequence_onehot` object. #' @export tidy.step_sequence_onehot <- function(x, ...) { if (is_trained(x)) { term_names <- sel2char(x$terms) res <- tibble( terms = rep(term_names, each = lengths(x$vocabulary)), vocabulary = unlist(lapply(x$vocabulary, seq_along)), token = unlist(x$vocabulary) ) } else { term_names <- sel2char(x$terms) res <- tibble( terms = term_names, vocabulary = NA_character_, token = NA_integer_ ) } res$id <- x$id res } char_key <- function(x) { out <- seq_along(x) names(out) <- x out } string2encoded_matrix <- function(x, vocabulary, sequence_length, padding, truncating) { vocabulary <- char_key(vocabulary) x <- get_tokens(x) res <- matrix(NA, nrow = length(x), ncol = sequence_length) for (i in seq_along(x)) { len_x <- length(x[[i]]) values <- x[[i]] if (len_x == 0) next if (len_x == sequence_length) { res[i, ] <- values } if (len_x < sequence_length) { if (padding == "post") { res[i, seq_len(len_x)] <- values } else { res[i, seq(sequence_length - len_x + 1, sequence_length)] <- values } } else { if (truncating == "post") { res[i, ] <- values[seq_len(sequence_length)] } else { res[i, ] <- values[seq(len_x - sequence_length + 1, len_x)] } } } res <- matrix( vocabulary[match(res, names(vocabulary))], nrow = length(x), ncol = sequence_length ) res[is.na(res)] <- 0 res } #' @rdname required_pkgs.step #' @export required_pkgs.step_sequence_onehot <- function(x, ...) { c("textrecipes") }
c4c9b7d6da8b023f8b51f7497b2fc581939175b7
e08424eb7743323f470775d2cedb893d9d2080db
/package/analogues/R/loadData.R
a7d7c06aec859e6430cae9b81f3047632fd704b8
[]
no_license
neojavan/ccafs-analogues
9ea474537aa5f7e351b163ca27920df9b3240634
5faed60c61c3d44d642c76e4ca0f15700175ae22
refs/heads/master
2020-05-30T14:53:52.302986
2012-04-26T12:47:19
2012-04-26T12:47:19
39,160,381
0
0
null
null
null
null
UTF-8
R
false
false
852
r
loadData.R
#' Load data for the dissimilarity calculations #' #' This functions loads the dissimilarity data. #' #' @param params an object of the class AnalogueParameters #' @keywords manip #' @return an object of AnalogueTraining #' @export #' @examples #' ccafs_params <- createParams(x, z, ) loadData <- function(params) { # list to hold all the data training <- list() # get paths for rasters, if there are gcms as well # add the ndivisions in the next (level loadGridsFromFiles function, so # that its easy to build stacks paths <- as.vector(t(outer(str_c(params$env.data,"/",params$scenario,"_"), str_c(params$vars,"_"), FUN="str_c"))) # load data training <- lapply(paths, function(x) loadGridsFromFiles(x,params)) training <- lapply(training, readAll) return(training) }
8aab2750e5fbb40e4bf85244090328ca0ef0925b
14bf52cde8bcee1d261e94576ddbbf4698677904
/Machine Learning/Methods/CV_RF.R
efc49ef4bb0bcfd848da805e4299d75d111f62b0
[]
no_license
K-Schubert/R
4b7e3a7d7b4587ec0e886a5bef9587aeebdb4dac
808679acdda177906b09ecf3d85c876cd0f19739
refs/heads/master
2020-04-15T11:25:00.828413
2019-07-01T08:15:06
2019-07-01T08:15:06
164,628,874
0
0
null
null
null
null
UTF-8
R
false
false
2,376
r
CV_RF.R
set.seed(42) k <- 10 cv <- function(trn, k){ trn_shuffled <- trn[sample(nrow(trn)),] folds <- cut(seq(1,nrow(trn_shuffled)),breaks=k,labels=FALSE) CV_MSE <- rep(0, 22) SE <- rep(0, 22) h <- 0 for (m in seq(5, ncol(trn)-1, by=5)){ h <- h + 1 MSE <- rep(0, k) for (i in 1:k){ test_ind <- which(folds==i,arr.ind=TRUE) testData <- trn_shuffled[test_ind, ] trainData <- trn_shuffled[-test_ind, ] ######## #RF rf_mod <- randomForest(y~., data=trainData, mtry=m, importance=F, na.action=na.omit) rf_pred <- predict(rf_mod, newdata=testData) ######## MSE[i] <- (k/nrow(trn))*sum((rf_pred-testData$y)^2) } CV_MSE[h] <- (1/k)*sum(MSE) SE[h] <- (1/sqrt(k))*sqrt(sum((MSE-CV_MSE[h])^2)/(k-1)) } data <- list(mse = MSE, cv_mse = CV_MSE, se = SE) return(data) } a <- cv(train_tr, k) CV_MSE <- a$cv_mse SE <- a$se pdf("Pict/RF_CV_RMSE_m.pdf", height=4, width=8) plot(seq(5,111,by=5), sqrt(CV_MSE), ylab="CV RMSE", xlab="m", main="CV RMSE of RF models as a function of m (ntree=500)", type="o") points(70, min(sqrt(CV_MSE)), col="red", cex=2) dev.off() ######################################## set.seed(42) k <- 10 cv <- function(trn, k){ trn_shuffled <- trn[sample(nrow(trn)),] folds <- cut(seq(1,nrow(trn_shuffled)),breaks=k,labels=FALSE) CV_MSE <- rep(0, 20) SE <- rep(0, 20) h <- 0 for (n in seq(50, 1000, by=50)){ h <- h + 1 MSE <- rep(0, k) for (i in 1:k){ test_ind <- which(folds==i,arr.ind=TRUE) testData <- trn_shuffled[test_ind, ] trainData <- trn_shuffled[-test_ind, ] ######## #RF rf_mod <- randomForest(y~., data=trainData, mtry=70, ntree=n, importance=F, na.action=na.omit) rf_pred <- predict(rf_mod, newdata=testData) ######## MSE[i] <- (k/nrow(trn))*sum((rf_pred-testData$y)^2) } CV_MSE[h] <- (1/k)*sum(MSE) SE[h] <- (1/sqrt(k))*sqrt(sum((MSE-CV_MSE[h])^2)/(k-1)) } data <- list(mse = MSE, cv_mse = CV_MSE, se = SE) return(data) } a <- cv(train_tr, k) CV_MSE <- a$cv_mse SE <- a$se pdf("Pict/RF_CV_RMSE_ntree.pdf", height=4, width=8) plot(seq(50, 1000, by=50), sqrt(CV_MSE), ylab="CV RMSE", xlab="ntree", main="CV RMSE of RF models as a function of ntree (m=70)", type="o") points(550, min(sqrt(CV_MSE)), col="red", cex=2) dev.off()
e9fb1af4db7c1a351224db9b64c7694dd2c953a8
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/BayesS5/examples/S5_parallel.Rd.R
ea4676aa893d8c03b8b632190c38fce7b9c0f606
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
2,032
r
S5_parallel.Rd.R
library(BayesS5) ### Name: S5_parallel ### Title: Parallel version of S5 ### Aliases: S5_parallel ### Keywords: Shotgun Stochastic Search Bayesian variable selection g-prior ### nonlocal prior piMoM peMoM ### ** Examples p=5000 n = 100 indx.beta = 1:5 xd0 = rep(0,p);xd0[indx.beta]=1 bt0 = rep(0,p); bt0[1:5]=c(1,1.25,1.5,1.75,2)*sample(c(1,-1),5,replace=TRUE) xd=xd0 bt=bt0 X = matrix(rnorm(n*p),n,p) y = crossprod(t(X),bt0) + rnorm(n)*sqrt(1.5) X = scale(X) y = y-mean(y) y = as.vector(y) ### parallel version of S5 (defalut) #fit_parallel = S5_parallel(NC=2,X,y) #fit_parallel$GAM # the searched models by S5 #fit_parallel$OBJ # the corresponding log (unnormalized) posterior probability #res_parallel = result(fit_parallel) #str(res_parallel) #print(res_parallel$hppm) # the MAP model #print(res_parallel$hppm.prob) # the posterior probability of the hppm #plot(res_parallel$marg.prob,ylim=c(0,1),ylab="marginal inclusion probability") # the marginal inclusion probability ### parallel version of S5 (temperature rescheduling) #library(snowfall) #NC = 2 # the number of cores for the prallel computing #C0 = 5 # the number of repetitions of S5 algorithms to explore the model space #tuning = hyper_par(type="pimom",X,y,thre = p^-0.5) # tuning parameter selection for nonlocal priors #print(tuning) #ind_fun = ind_fun_pimom #model = Bernoulli_Uniform # the log-marginal likelihood of models based on piMoM prior #('Uniform' or 'Bernoulli_Uniform'). #tem = seq(0.4,1,length.out=20)^2 # the temperatures schedule #fit_parallel = S5_parallel(NC=2,X,y,ind_fun,model,tuning,tem,C0=C0) #fit_parallel$GAM # the searched models by S5 #fit_parallel$OBJ # the corresponding log (unnormalized) posterior probability #res_parallel = result(fit_parallel) #str(res_parallel) #print(res_parallel$hppm) # the MAP model #print(res_parallel$hppm.prob) # the posterior probability of the hppm #plot(res_parallel$marg.prob,ylim=c(0,1),ylab="marginal inclusion probability") # the marginal inclusion probability
5ecde4ef3e500e7969fdb0b89e6cfc4b087abd1f
a2687d3d630f2705e4d70c6ffc0428899b2cdd5a
/cachematrix.R
eeed6ec024609bd3693cc4cfbf376a6d4f0d2aec
[]
no_license
DebraBowen/datasciencecoursera
5763e2cfe0d36ad868e0efd80edfdc2056c6d7c7
e5da46b91038cbcf042f39b4d8d26c15dab0fbf4
refs/heads/master
2021-01-13T09:14:53.781542
2016-11-27T17:10:16
2016-11-27T17:10:16
69,380,039
0
0
null
2016-09-27T19:43:46
2016-09-27T17:09:01
null
UTF-8
R
false
false
2,378
r
cachematrix.R
## makCacheMatrix() builds a set of functions and returns the functions in a list ## to the parent environment. ## to run, use the following steps: ## 1. m <- matrix(c(1:4), nrow = 2, ncol = 2) ## 2. matr1 <- makeCacheMatrix(m) ## 3. cacheSolve(matr1) makeCacheMatrix <- function(x = matrix()) { ## m is the inverse matrix to an original matrix; it is set to NULL in case the original ## matrix changes m <- NULL ## the set function allows m to be initialized if the matrix (& therefore its inverse) changes set <- function(y) { x <<- y m <<- NULL } ## get() is used to retrieve the original matrix get <- function() x ## setmatrix() is used to call the solve function and assign the inverse matrix to m setmatrix <- function(solve) m <<- solve ## getmatrix() is used to retrieve the cached inverse matrix getmatrix <- function() m ## the functions created are assigned to names so they can later be accessed using $ list(set = set, get = get, setmatrix = setmatrix, getmatrix = getmatrix) } ## cacheSolve() requires an argument of the object type returned by function makeCacheMatrix() ## in order to retrieve the cached inverse matrix stored in the makeCacheMatrix() object's ## environment cacheSolve <- function(x, ...) { ## m is the inverse of an original matrix ## function getmatrix(), from the list of functions created in makecacheMatrix() is accessed m <- x$getmatrix() ## if the inverse matrix has previously been created and cached, it is retrieved and ## a message is printed to the console ## the retrieved inverse matrix is returned to the console if(!is.null(m)) { message("getting cached matrix") return(m) } ## if the inverse matrix has not yet been cached, the original matrix is retrieved data <- x$get() ## the inverse is found and stored in m m <- solve(data, ...) ## function setmatrix() from the makecacheMatrix() object is accessed to cache the matrix x$setmatrix(m) ## the inverse matrix is printed to the console m }
b63f925ea5884bcf2a39377553cd397dc51cb117
f8b1c573b12a98d9bd299ad886ab0517f0db8f26
/analysis/matching/flowchart.R
55e284f42e5eeb76204c39dd5f133c364af8b811
[ "MIT" ]
permissive
opensafely/vaccine-effectiveness-3dose
ca1593238af710e4c2d7794d2a7b0344c33b8cc8
83a0873237cc51dc0d7dc9054a1b7587e10f9e85
refs/heads/main
2023-08-25T08:29:17.081953
2023-06-19T15:21:04
2023-06-19T15:21:04
539,359,537
0
0
MIT
2023-09-13T05:40:54
2022-09-21T07:25:39
R
UTF-8
R
false
false
5,456
r
flowchart.R
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # This script: # calculates the counts for the flowchart in the manuscript # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Preliminaries ---- # import libraries library(tidyverse) library(glue) library(here) # load functions and parameters source(here("analysis", "design.R")) source(here("lib", "functions", "utility.R")) source(here("lib", "functions", "fuzzy_join.R")) # import command-line arguments ---- args <- commandArgs(trailingOnly=TRUE) if(length(args)==0){ # use for interactive testing cohort <- "mrna" } else { cohort <- args[[1]] } # create output directories outdir <- here("output", cohort, "flowchart") fs::dir_create(outdir) # define all flow categories for sequential trial cohorts flow_categories <- tribble( ~crit, ~criteria, # those who are boosted with the brand on day 1 of recruitment "A", "boosted and unmatched", "B", "boosted and matched", # those who are boosted with the brand during the recruitment period but not on day 1 "C", "unboosted and unmatched then boosted and unmatched", "D", "unboosted and unmatched then boosted and matched", "E", "unboosted and matched then boosted and unmatched", "F", "unboosted and matched then boosted and matched", # those who remain unboosted at the end of the recruitment period "H", "unboosted and matched", ) # define boxes for sequential trial flow flow_boxes <- tribble( ~box_crit, ~box_descr, "ABCDEF", "Boosted during recruitment period", "AC", "Boosted during recruitment period, unmatched as treated, unmatched as control", "BDF", "Boosted during recruitment period, matched as treated", "E", "Boosted during recruitment period, matched as treated, matched as control", "F", "Boosted during recruitment period, matched as treated, matched as control", "EFH", "Unboosted, matched as control", "H", "Unboosted up to recruitment end, matched as control", ) # read data_treatedeligible_matchstatus to count unmatched treated individuals (we don't count unmatched controls) data_treatedeligible_matchstatus <- readr::read_rds(here("output", cohort, "match", "data_treatedeligible_matchstatus.rds")) %>% select(patient_id, treated, matched, vax3_date) # reshape so one row per patient, and logical columns to indicate if matched as treated, control or both data_matched <- readr::read_rds(here("output", cohort, "match", "data_matched.rds")) %>% select(patient_id, treated) %>% mutate(matched = 1) %>% pivot_wider( names_from = treated, values_from = matched ) %>% rename("treated" = "1", "control" = "0") %>% full_join(data_treatedeligible_matchstatus, by = c("patient_id", "treated")) %>% mutate(across(c(treated, control, matched), ~ replace_na(as.logical(.x), replace=FALSE))) %>% mutate(treated=matched) %>% select(-matched) cat("Check there are the same number of treated and control:\n") data_matched %>% summarise( treated = sum(treated, na.rm = TRUE), control = sum(control, na.rm = TRUE) ) %>% print() # categorise individuals data_match_flow <- data_matched %>% mutate( crit = case_when( # those who are vaccinated on day 1 of recruitment vax3_date == study_dates[[cohort]]$start_date & !control & !treated ~ "A", vax3_date == study_dates[[cohort]]$start_date & !control & treated ~ "B", # those who are vaccinated during the recruitment period but not on day 1 vax3_date <= study_dates$studyend_date & !control & !treated ~ "C", vax3_date <= study_dates$studyend_date & !control & treated ~ "D", vax3_date <= study_dates$studyend_date & control & !treated ~ "E", vax3_date <= study_dates$studyend_date & control & treated ~ "F", # those who remain unvaccinated at end of recruitment period TRUE ~ "H" ) ) # count number in each category flowchart_matching <- data_match_flow %>% group_by(crit) %>% count() %>% right_join(flow_categories, by = "crit") %>% arrange(crit) %>% mutate(across(n, ~if_else(is.na(.x), 0L, .x))) # brand-specific flow_match_final <- flow_boxes %>% # join to the counts for each criteria fuzzy_join( flowchart_matching, by = c("box_crit" = "crit"), match_fun = str_detect, mode = "left" ) %>% # sum across all criteria in each box group_by(box_crit, box_descr) %>% summarise(n = sum(n), .groups = "keep") %>% ungroup() %>% mutate(across(n, roundmid_any, to=threshold)) %>% rename( # rename to match flowcharttreatedeligible criteria = box_descr, crit = box_crit ) flowchart_final_rounded <- bind_rows( # read unrounded as rounding different (using ceiling_any in process_data.R) read_rds(here("output", "treated", "eligible", "flowchart_treatedeligible.rds")) %>% # round to match flow_match_final transmute( criteria, crit, n = roundmid_any(n, to=threshold), n_exclude = lag(n) - n, pct_exclude = n_exclude/lag(n), pct_all = n / first(n), pct_step = n / lag(n), ) %>% mutate(across(starts_with("pct_"), round, 3)), flow_match_final ) %>% # for easy review, join back after release select(-criteria) %>% select(crit, everything()) # save flowchart_final write_csv( flowchart_final_rounded, file.path(outdir, "flowchart_final_rounded.csv") )
e4e81597e3e668ff8a8e19d91a79588232a79e6a
5f4e127bf2a52486df01088384d7c5926cfa277e
/man/tabela_soma_razao_beneficio_custo.Rd
ca37213882c7e8ea714a7c24da03f8af5ab3b269
[]
no_license
pedroliman/oshcba
0b4ac93c2135e85b71dde834b6eb313980b98db9
01ef42e96a7089fc6f4f35912e825a484b017208
refs/heads/master
2020-02-26T15:04:57.043923
2018-08-01T00:48:22
2018-08-01T00:48:22
94,784,015
0
1
null
2017-07-17T20:52:06
2017-06-19T14:12:00
R
UTF-8
R
false
true
498
rd
tabela_soma_razao_beneficio_custo.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analise.R \name{tabela_soma_razao_beneficio_custo} \alias{tabela_soma_razao_beneficio_custo} \title{tabela_soma_razao_beneficio_custo} \usage{ tabela_soma_razao_beneficio_custo(resultados_cbr) } \arguments{ \item{resultados_cbr}{dataframe com resultados formatados no modelo "CBR".} } \value{ dataframe com resumo do CBR por iniciativa e razão benefício custo total } \description{ tabela_soma_razao_beneficio_custo }
1e87e740b5938f486cef372dbe02aa0e56a64846
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/Rnmr1D/man/ggplotClusters.Rd
b4074e0ebae686291aee2b14ace881a60119a6c3
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
true
554
rd
ggplotClusters.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplotTools.R \name{ggplotClusters} \alias{ggplotClusters} \title{ggplotClusters} \usage{ ggplotClusters(data, clustObj) } \arguments{ \item{data}{the matrix including the integrations of the areas defined by the buckets (columns) on each spectrum (rows)} \item{clustObj}{a list generated by the \code{getClusters} function} } \description{ Plots the boxplot of all clusters allowing to have an insight on the clusters distribution. Plot based on ggplot2 }
7dfc1261f8fe78c9b2c6373fa6e95e716bea5742
162bf939b9a53541674aa55ce7601e0dbb273493
/code_ggvis.R
cacfcebac1bbbebfd0a4f46386438dd4b6f62649
[]
no_license
fatih-cakmak/R_snippets
d48c07cc3538331c260b21b2db6cc8941fbcb520
facdfadd1d0f733605ece1d29cd8642efb131734
refs/heads/master
2021-01-21T17:50:54.705942
2016-09-18T21:47:53
2016-09-18T21:47:53
68,547,340
0
0
null
null
null
null
UTF-8
R
false
false
134
r
code_ggvis.R
## similar to "ggplot2" but more extensive ## interactive like "shiny" ## can use the pipeline ## renders in a web browser (vega.js)
b86d0039cfb88f69237645227d283fbc82fc9805
46a104e2aa482820af0bd16126959aa734387de9
/Binding.R
20d3c73c17006ff689c8bb00a73ccbab30d3653a
[]
no_license
DuongNg0403/R_Harvard
c6ab1a1a69bfb4b4bb7f6e45ac373ea149589c2a
e3ab486a4394467f2f36e1dcfdfa2d8ca390f85b
refs/heads/master
2022-11-08T05:59:11.217012
2020-06-29T01:02:40
2020-06-29T01:02:40
244,889,886
0
0
null
null
null
null
UTF-8
R
false
false
360
r
Binding.R
library(tidyverse) library(ggrepel) library(dslabs) ds_theme_set() data(murders) head(murders) tab <- left_join(murders, results_us_election_2016, by = "state") head(tab) bind_cols(a=1:3, b=4:6) tab1 <- tab[,1:3] tab2 <- tab[,4:6] tab3 <- tab[,7:9] new_tab <- bind_cols(tab1,tab2,tab3) head(new_tab) tab1 <- tab[1:2,] tab2 <- tab[3:4,] bind_rows(tab1, tab2)
893ccd8e9194dcd94ba1516273e32a943601f864
df0e9f804c7708481b021f20b3a9d372fc752254
/man/cobalt-package.Rd
c7727c2fd35f6cbe30f67efdd3ba285bf8d30961
[]
no_license
Zchristian955/cobalt
3a132bca1d6a7fe3286e9d0f7154e072766a2f79
92596ad30186a06f263db8b32c005989c720f345
refs/heads/master
2023-03-14T20:50:03.661026
2021-03-30T08:50:18
2021-03-30T08:50:18
436,739,071
1
0
null
null
null
null
UTF-8
R
false
false
2,149
rd
cobalt-package.Rd
\docType{package} \name{cobalt-package} \alias{cobalt-package} \alias{cobalt} \title{ cobalt: Covariate Balance Tables and Plots } \description{ A set of tools for assessing, displaying, and reporting covariate balance in observational studies before and after preprocessing through matching, weighting, or subclassification (e.g., using propensity scores). Compatible with many of the major preprocessing packages, including \pkg{MatchIt}, \pkg{twang}, \pkg{Matching}, \pkg{WeightIt}, and others, serving as a replacement or supplement to their balance assessment functions. The focus in \pkg{cobalt} is on flexible output, methodologically recommended practices, and smart defaults. It's critical that you read some of the documentation to understand what values are being produced. See the links below for the main functions and what output they create: \itemize{ \item \fun{bal.tab} - Generate balance tables for binary, multi-category, and continuous treatments, longitudinal/sequential treatments, and clustered/subgrouped and multiply imputed data. \item \fun{bal.plot} - Generate plots to assess distributional balance for a single covariate. \item \fun{love.plot} - Generate Love plots to display covariate balance graphically for publication. } In addition to the main functions, there are also some helper functions that users might find valuable: \itemize{ \item \fun{f.build} - Create a formula (e.g., \code{Z ~ X1 + X2}) from strings or data frames to reduce programming burden. \item \fun{splitfactor} - Split factors in data frame into dummy variables with flexibility in how categories are dropped. \item \fun{get.w} - Extract weights from the output of a preprocessing function from another package (e.g., \pkg{MatchIt}, \pkg{twang}, or \pkg{Matching}). } } \section{Citing}{ Please cite \pkg{cobalt} if you use it to produce balance tables or plots in your paper to ensure replicability by others performing the same analysis. Use \code{citation("cobalt")} to generate a current citation. } \author{ Noah Greifer \email{noah.greifer@gmail.com} If you have found \pkg{cobalt} helpful, please let me know! } \keyword{internal}
a14d11986a4067c1293a125189603b96e8b83b86
18bf8c7694a4dc1f967d41fbaeb530b9d8460f4d
/man/plot.for.ma.Rd
72aa66ab1dbd1bdb8946142468421340fa5dcf48
[]
no_license
juliusfoerstel/LaseR
cb9890e2032e9f0a057f1b728200a799c7d7cb7f
151a9f834d577963b1a19e2a86855a18709e19bb
refs/heads/master
2020-12-23T09:18:20.343937
2020-05-18T08:59:39
2020-05-18T08:59:39
63,343,462
0
1
null
null
null
null
UTF-8
R
false
true
1,058
rd
plot.for.ma.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PlotFuerMA.R \name{plot.for.ma} \alias{plot.for.ma} \title{Opens the tikzDevice in the graphics folder of the MA} \usage{ \method{plot}{for.ma}(filename = "RPlot", width = 4, height = 3, ...) } \arguments{ \item{filename}{A string which is going to be the name of the output file. Default value is "RPlot" @param width numeric Width of the plot in inches. Default is 4. @param height numeric height the plot in inches. Default is 3.} } \description{ This function opens and loads the tikzDevice to create a tikz output for a plot. It uses therein the function create.graphics.path() also from the LaseR-package. } \examples{ plot.for.ma(filename = "Signalstärke", width = 4, height = 4) This creates a new file called Signal_intensities.tex in the folder where the graphics for the Tex-file are stored. Afterwards the plotting has to be done and in the end the device has to be closed by dev.off(). } \keyword{device,} \keyword{file} \keyword{graphics,} \keyword{latex}
0b9b99aa76cf0eb028ee783f248df41eaee3405e
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/blockcluster/man/binarydata.Rd
aeb882f86398f03855f064229d2cb10e496dcbc4
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
true
415
rd
binarydata.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RCocluster.R \name{binarydata} \alias{binarydata} \title{Simulated Binary Data-set} \format{A data matrix with 1000 rows and 100 columns.} \description{ It is a binary data-set simulated using Bernoulli distribution. It consist of two clusters in rows and three clusters in columns. } \examples{ data(binarydata) } \keyword{datasets}
3954746068df8bedf60cf1b0791fcc771c8c8dda
f6b8bb9ed8ad5ab02f25f7a76a4c77beaeb7eb24
/ShinyApp/funct_0downloadFile.R
f0300d75a6779cdf8d5590f6eabc03fb346733cf
[]
no_license
DrRoad/ShinyAppVincent
b5966d20534ebd7055f23ca79b83b71edfe581ea
c565c01c7b087dd6888705e6fee022cf8aed1dfe
refs/heads/master
2020-07-22T11:27:18.255033
2019-08-23T22:18:30
2019-08-23T22:18:30
null
0
0
null
null
null
null
UTF-8
R
false
false
476
r
funct_0downloadFile.R
### Download tab costs function.downloadFile <- function(tabCosts) { downloadHandler( filename = function() { paste("MydataDownload", "csv", sep = ".") }, content = function(file) { write.table(tabCosts, file, sep = ",", row.names = FALSE) } ) } ### Save data function.saveDataInFile <- function(costsTab, file){ write.csv(hot_to_r(costsTab), file,row.names = FALSE) return(as.data.frame(read.csv(file))) }
77dafd9d8d6eae35fae2efe2f1737560e0c6d8a9
e3b70a106252542597985a5df1cba35dad9bc27c
/kate_programs_v2.0/k.nhnwutil.R
b4786f2068abab121b2424571aecad053a2714c2
[]
no_license
tkangk/tdm
5749d5834264a68286c58a8532af517aec8442d1
5081bff9b7b5793a82d39141a4b0d0de7fd248ac
refs/heads/master
2020-03-24T21:39:12.691526
2018-04-09T21:36:39
2018-04-09T21:36:39
null
0
0
null
null
null
null
UTF-8
R
false
false
3,758
r
k.nhnwutil.R
# k.nhnwutil.R # NHNW Destination Choice Logsums # Auto Operating Cost mf.Sopcost <- autocost * mf.tdist mf.Hopcost <- mf.Sopcost # Auto Out of Pocket Cost mf.Spocketcost <- sweep((mf.Sopcost * 0), 2, 0.5 * ma.stpkg, "+") mf.Hpocketcost <- sweep((mf.Hopcost * 0), 2, 0.5 * ma.stpkg, "+") if (toll == TRUE) { print("About to calculate toll costs") # Adjust Toll Time to reflect difference in elasticity between assignment and model (.25) mf.amstlTadj <- mf.amstl*(1.125)*(.25) mf.mdstlTadj <- mf.mdstl*(1.25)*(.25) mf.amstlD <- (mf.amstlTadj/60)*pkvot mf.mdstlD <- (mf.mdstlTadj/60)*opvot mf.amhtlTadj <- mf.amhtl*(1.125)*(.25) mf.mdhtlTadj <- mf.mdhtl*(1.25)*(.25) mf.amhtlD <- (mf.amhtlTadj/60)*pkvot mf.mdhtlD <- (mf.mdhtlTadj/60)*opvot # Add Tolls mf.Spocketcost <- sweep((NHNW.pkfact*mf.amstlD + NHNW.opfact*mf.mdstlD), 2, 0.5 * ma.stpkg, "+") rm (mf.amstl, mf.mdstl, mf.amstlTadj, mf.mdstlTadj, mf.amstlD, mf.mdstlD) mf.Hpocketcost <- sweep((NHNW.pkfact*mf.amhtlD + NHNW.opfact*mf.mdhtlD), 2, 0.5 * ma.stpkg, "+") rm (mf.amhtl, mf.mdhtl, mf.amhtlTadj, mf.mdhtlTadj, mf.amhtlD, mf.mdhtlD) } # Define cost coefficients cc <- NHNW.all.cc # auto operating costs coeff pktc <- NHNW.all.pktc # auto out of pocket costs: parking and tolls coeff faresc <- NHNW.all.faresc # transit fares coeff # NHNW Drive Alone Utility mf.dautil <- exp(NHNW.ivCoeff * (NHNW.pkfact * mf.amstt + NHNW.opfact * mf.mdstt) + sweep(cc * mf.Sopcost + pktc * mf.Spocketcost, 2, NHNW.da.walkCoeff * ma.auov, "+")) # NHNW Drive with Passenger Utility mf.dputil <- exp(NHNW.LS.dpconst + NHNW.ivCoeff * (NHNW.pkfact * mf.amhtt + NHNW.opfact * mf.mdhtt) + sweep(NHNW.dp.autoCostFac * (cc * mf.Hopcost + pktc * mf.Hpocketcost), 2, NHNW.dp.walkCoeff * ma.auov, "+")) # NHNW Passenger Utility mf.pautil <- exp(NHNW.LS.paconst + NHNW.ivCoeff * (NHNW.pkfact * mf.amhtt + NHNW.opfact * mf.mdhtt) + sweep(NHNW.pa.autoCostFac * (cc * mf.Hopcost + pktc * mf.Hpocketcost), 2, NHNW.pa.walkCoeff * ma.auov, "+")) # NHNW Transit Utility (weighted average) mf.amtiv <- (mf.amtbiv + mf.amtliv + mf.amtsciv + mf.amtriv + mf.amtbriv) mf.mdtiv <- (mf.mdtbiv + mf.mdtliv + mf.mdtsciv + mf.mdtriv + mf.mdtbriv) mf.amtconst <- (NHNW.LS.tranconst + mf.amtvehc + mf.amtstpc) mf.mdtconst <- (NHNW.LS.tranconst + mf.mdtvehc + mf.mdtstpc) ivt <- NHNW.pkfact * NHNW.ivCoeff * mf.amtiv + NHNW.opfact * NHNW.ivCoeff * mf.mdtiv trconst <- NHNW.pkfact * mf.amtconst + NHNW.opfact * mf.mdtconst mf.trwait1 <- NHNW.pkfact * mf.amtwt1 + NHNW.opfact * mf.mdtwt1 mf.trwait1 [mf.trwait1[,]>30 & mf.trwait1[,]<9990] <- 30 mf.trwait2 <- NHNW.pkfact * mf.amtwt2 + NHNW.opfact * mf.mdtwt2 mf.trwait2 [mf.trwait2[,]>30 & mf.trwait2[,]<9990] <- 30 mf.trwalk <- NHNW.pkfact * mf.amtwalk + NHNW.opfact * mf.mdtwalk mf.trwalk[mf.trwalk[,]>30 & mf.trwalk[,]<9990] <- 30 mf.transfers <- NHNW.pkfact * mf.amtxfr + NHNW.opfact * mf.mdtxfr oviv <- NHNW.tr.trOVIVCoeff * ((mf.trwait1 + mf.trwait2 + mf.trwalk) / (NHNW.pkfact * mf.amtiv + NHNW.opfact * mf.mdtiv)) mf.trutil <- exp(trconst + ivt + oviv + NHNW.tr.wait1Coeff * mf.trwait1 + NHNW.tr.wait2Coeff * mf.trwait2 + NHNW.tr.walkCoeff * mf.trwalk + NHNW.tr.transCoeff * mf.transfers + cc * mf.trfare) mf.trutil[mf.trwait1[,]>9990 & mf.trwait1[,]<=99999] <- 0 # NHNW Bike Utility mf.bkutil <- exp(NHNW.LS.bikeconst + NHNW.bk.ubdistCoeff * mf.ubdist + NHNW.bk.nbcostCoeff * mf.nbcost) # NHNW Walk Utility mf.wkutil <- exp(NHNW.LS.walkconst + NHNW.wk.walkCoeff * mf.wtime) mf.wkutil[mf.wtime[,]>100] <- 0 # NHNW Logsum mf.nhnwls <- log(mf.dautil + mf.dputil + mf.pautil + mf.trutil + mf.bkutil + mf.wkutil)
c683b6ce7047f477d1b699df8457ed4318952640
16d9ea3f8badf9a52c6235ce8497caacdd61daa8
/ui.R
a27c9fecdb2ccb1a2b1d33125be752f3618e1e7f
[]
no_license
suryanshraghuvanshi/-CAPS-
73fc30228758f640907f1cf8bb8ec6428a3517a2
6235b4a286490933033ee4dca8553bcb58efea34
refs/heads/master
2021-01-01T04:04:06.842939
2017-07-13T12:05:42
2017-07-13T12:05:42
97,117,570
0
0
null
null
null
null
UTF-8
R
false
false
6,718
r
ui.R
library(shiny) library(shinydashboard) library(rCharts) library(leaflet) library(ggmap) library(ggplot2) library(dplyr) library(leaflet.extras) library(rMaps) d <- read.csv("del.csv") names <- as.character(unique(unlist(d$CITY))) ui <- dashboardPage( dashboardHeader(title = "Predicted Crime Rates", titleWidth = 300), dashboardSidebar(width = 300, sidebarMenu( #h5(strong("Choose Crime Type:")), selectInput("variable", label= "Choose Crime Type", choices = list("", "Violent Crime", "Property Crime"), selected=""), #h5(strong("Choose Time Period:")), selectInput("year", label= "Choose Time Period", choices = list("", "2017", "Change, 2015-2017"), selected=""), # h5(strong("Choose City:")), #selectInput("city", label= "Choose City", choices = names[order(names)], selected="New Delhi"), selectInput("city", label="Choose City", choices = list("Adarsh Nagar" = 1,"Ambedkar Nagar" = 2,"Babarpur" = 3,"Badarpur" = 4 ,"Badli" = 5,"Ballimaran" = 6,"Bawana" = 7,"Burari" = 8,"Chandni Chowk" = 9,"Chhatarpur" = 10 ,"Delhi Cantt" = 11,"Deoli" = 12,"Dwarka" = 13,"Gandhi Nagar" = 14,"Ghonda" = 15,"Gokalpur" = 16 ,"Greater Kailash" = 17,"Hari Nagar" = 18,"Janakpuri" = 19,"Jangpura" = 20,"Kondli" = 21,"Karawal Nagar" = 22 ,"Karol Bagh" = 23,"Kasturaba Nagar" = 24,"Kalkaji" = 25,"Madipur" = 26,"Malviya Nagar" = 27,"Matiala" = 28 ,"Matia Mahal" = 29,"Mehrauli" = 30,"Mangol Puri" = 31,"Model Town" = 32,"Moti Nagar" = 33,"Mundka" = 34 ,"Mustafabad" = 35,"Najafgarh" = 36 ,"Nangloi Jat" = 37,"Narela" = 38,"Okhla" = 39,"Palam" = 40 ,"Patel Nagar" = 41,"Patparganj" = 42,"Rajinder Nagar" = 43,"Rajouri Garden" = 44 ,"Rithala" = 45,"R.K. Puram" = 46,"Rohtas Nagar" = 47,"Rohini" = 48,"Sadar Bazar" = 49 ,"Sangam Vihar" = 50,"Shahdara" = 51,"Shakur Basti" = 52,"Shalimar Bagh" = 53 ,"Seelam Pur" = 54,"Seemapuri" = 55,"Sultan Pur Majra" = 56,"Timarpur" = 57 ,"Tilak Nagar" = 58,"Trilokpuri" = 59,"Tri Nagar" = 60,"Tughlakabad" = 61 ,"Uttam Nagar" = 62,"Vishwas Nagar" = 63,"Vikaspuri" = 64,"Wazirpur" = 65 ,"Kirari" = 66,"Bijwasan" = 67,"Noida" = 68,"Faridabad" = 69,"Gaziabad" = 70 ,"Shamli" = 71,"Gurgaon" = 72,"New Delhi" = 73), selected = 73), br(), div(style="display:center-align;",actionButton("go", label = "Analyize", icon = icon("paper-plane"))), br(), p(strong("Data Notes:")), #p(textOutput("var_desc")), br(), #textOutput("yr_desc"), br(), #a("NOTES :"), br(), a(" "), br(), a("",href="http://www.delhipolice.nic.in/"), br(), img(src= 'ilssc.png', height=100, width=100) )), dashboardBody( fluidRow( tabsetPanel( tabPanel("Introduction", textOutput("bhai ji "), tags$iframe(src = 'include.html', # put testdoc.html to /www width = '100%', height = '800px', frameborder = 0, scrolling = 'auto') #p(strong("Data Notes:")), #p(textOutput("var_desc")), #br(), #textOutput("yr_desc"), #br(), #a("NOTES :"), #br(), #a(" "), #br(), #a("",href="http://www.delhipolice.nic.in/"), #br(), #img(src="ilssc.png", height=100, width=100) ), tabPanel( title = "Histogram", h2("Cities in New Delhi- Histogram"), p(em("Predicted crime values of all cities in New Delhi are displayed below.")), plotOutput("hist", height = 400), br(), p(". Value for selected city:"), verbatimTextOutput("data1"), p(". Values for all cities:"), verbatimTextOutput("data2") ), tabPanel("Heatmap", h2("Cities in New Delhi- Heatmap"), p(em("Predicted crime location of all cities in New Delhi are displayed below.")), br(), plotOutput("del.csv"), chartOutput("my.map", "leaflet"), tags$style('.leaflet {height: calc(100vh -80px) !important; padding: 0; margin: 0; min-height: 500px}') # tags$head(tags$script(src="http://leaflet.github.io/Leaflet.heat/dist/leaflet-heat.js")) ), tabPanel("Linear Regression", textOutput("this wiredsoft"), h2("Cities in New Delhi- Linear Regression"), br(), p(em("Linear regression for Cities vs Voilent Crime in 2017 in New Delhi are displayed below.")), plotOutput("mod1", height = 400), p(em("Linear regression for Cities vs Property Crime in 2017 in New Delhi are displayed below.")), plotOutput("mod2", height = 400), p(em("Linear regression for Cities vs Voilent Crime in 2015 in New Delhi are displayed below.")), plotOutput("mod3", height = 400), p(em("Linear regression for Cities vs Property Crime in 2015 in New Delhi are displayed below.")), plotOutput("mod4", height = 400) ), tabPanel("Predictive Values", textOutput("this values"), h2("Cities in New Delhi- Predictive Data"), p(em("Predicted crimes values of all cities in New Delhi are displayed below.")), h3(textOutput("predict")), br(), h3(p("Sum of squared errors (SSE) for calcuating R squared :")), h3(textOutput("SSET")) ), tabPanel("About", textOutput("this") ) ) ) ) )
b605589a5126cfc3cc28ff01d138db9b2b3ad1b8
fdcd69bbf8fa90f1e5998068147cbd7a93886598
/man/similarityTree.Rd
2ed5f7771a0af8ffe8150f098d828505ed518077
[]
no_license
souhilabsl/Rchic
bf60ec437d5ba6caf49447692cda4fa229fc2d5c
252948b9e034b9a508938d94c976ef02533a4735
refs/heads/master
2021-01-21T18:38:25.225411
2014-04-17T13:58:54
2014-04-17T13:58:54
null
0
0
null
null
null
null
UTF-8
R
false
false
514
rd
similarityTree.Rd
\name{similarityTree} \alias{similarityTree} \title{Computes and Displays the Similarity Tree.} \usage{ similarityTree(list.variables, rules = NULL, Verbose = FALSE) } \arguments{ \item{list.variables}{list of variables to compute the similarity tree from} \item{rules}{dataframe of ASI rules.} \item{verbose}{give many details} } \description{ (Reads the ASI rules) Computes the similarities and displays the Similarity Tree. } \author{ Rapha\"{e}l Couturier \email{raphael.couturier@univ-fcomte.fr} }
e1531a42d55c18c257dec4f93e71b3fe48397b63
e4ff3a5fc17302d8d4fd86b38072e67ffe1aedec
/R/lgama.R
ec3c3fa709992561969858c5b3b64f58e0159c06
[]
no_license
cran/robeth
5782cfcb86e6931152dc8a0b9b8f71e97068e449
5b60aabc7c1b21f15d73d1246ab40c1defdf5f7f
refs/heads/master
2023-08-31T11:44:10.694653
2023-08-22T08:00:05
2023-08-22T09:31:11
17,699,284
0
1
null
null
null
null
UTF-8
R
false
false
165
r
lgama.R
"lgama" <- function(x) { if (missing(x)) messagena("x") gl <- single(1) f.res <- .Fortran("lgamaz", x=to.single(x), gl=to.single(gl)) list(gl=f.res$gl) }
43ffaba675b3a8284a12c6a7f109d97c51f3b983
a60c3de2c40c3667f0d59beff1906a5198d1e044
/scripts/read-moistmix.R
1a2e05acfc6ede0ae188899f316f6ecdc28a5e0d
[]
no_license
schwilklab/trait-flam
c1ab4045294f110f84ab7cfdda0bde26d0f7f2cf
fccb5b255525848be72e9d9521681419234e793d
refs/heads/master
2023-04-27T10:41:08.969869
2023-04-13T19:43:55
2023-04-13T19:43:55
17,681,249
0
0
null
null
null
null
UTF-8
R
false
false
7,930
r
read-moistmix.R
# read-moistmix.R # 1. Reads in the data files for the moisture content and the flamambility of the mixtures, # both observed and predicted # 2. Creates summary statistics # 3. Investigate species differences in dry down intercepts and rates # 4. Investigate non-additivity #library(lme4) #library(plyr) #library (tidyr) #library(dplyr) #must come after plyr #library(stringr) ############################################################# # MOISTURE ############################################################# source("dry-down.R") # for dry.mod # Read in mixtures moisture data mmc <- read.csv("../data/moisture/dry_down_long_mix.csv") mmc$mixcode <- mmc$spcode mmc <- mmc %>% mutate(mixcode = str_replace(mixcode, "Ab", "Abco"), mixcode = str_replace(mixcode, "Ca", "Cade"), mixcode = str_replace(mixcode, "Pi", "Pije"), mixcode = str_replace(mixcode, "Qu", "Quke"), sp1 = str_sub(mixcode, 1,4), sp2 = str_sub(mixcode, 5,8), sp3 = str_sub(mixcode, 9,12)) mmc <- mmc %>% mutate(MC_dry_pred1 = exp(predict(dry.mod, allow.new.levels=TRUE, newdata=data.frame(spcode=sp1, tray = str_c(sp1,"_",rep,"NEW"), hour = hour))), MC_dry_pred2 = exp(predict(dry.mod, allow.new.levels=TRUE, newdata=data.frame(spcode=sp2, tray = str_c(sp2,"_",rep,"NEW"), hour = hour))), MC_dry_pred3 = exp(predict(dry.mod, allow.new.levels=TRUE, newdata=data.frame(spcode=sp3, tray = str_c(sp3,"_",rep,"NEW"), hour = hour))), MC_dry_pred = (MC_dry_pred1 + MC_dry_pred2 + MC_dry_pred3)/3 ) # end of DWS new code mmc$res_MC_dry <- mmc$MC_dry - mmc$MC_dry_pred mmc.sum <- mmc %>% group_by(spcode, hour) %>% summarise(MC_dry.mean = mean(MC_dry), MC_dry.sd = sd(MC_dry), res_MC_dry.mean = mean(res_MC_dry), res_MC_dry.sd = sd(res_MC_dry), bd.mean = mean(bd), bd.sd = sd(bd) ) mmcpred <-mmc[, c("spcode", "hour", "MC_dry_pred")] mmcpred.sum <- mmcpred %>% group_by(spcode, hour) %>% sample_n(1) mmc.sum <- mmc.sum %>% left_join(mmcpred.sum, by=c("spcode", "hour")) ############################################################################### ## Investigate species differences in dry down intercepts and rates ############################################################################### mmc$logMC_dry <- log(mmc$MC_dry) mmc <- mmc %>% mutate(tray = str_c(spcode, "_", rep)) # Fit a nested model using lmer mdry.mod <- lmer(log(MC_dry) ~ hour*spcode + (1 | tray), data=mmc) summary(mdry.mod) anova(mdry.mod) # comparing the above model with one with bulk density added. Second model is better mdrybd.mod <- lmer(log(MC_dry) ~ hour*spcode + bd + (1 | tray), data=mmc) summary(mdrybd.mod) anova(mdry.mod, mdrybd.mod) ## # subset by species to get the coefficients (y0 and B) for each curve. ## coefuncm <- function(d){ ## mod <- lmer(log(MC_dry)~ hour + (1 | tray ), data=d) ## res <- coef(mod) ## return(data.frame(logmaxMC = res[1,1], logmaxMC.se = res[1,2], di= res[2,1], di.se = res[2,2])) ## } ## mmcdis <- mmc %>% group_by(spcode) %>% do(coefuncm(.)) %>% mutate(maxMC = exp(logmaxMC), maxMC.se=exp(logmaxMC.se)) ############################################################################### #FLAMMABILITY ############## # Read in mixtures flammability data mflam <- read.csv("../data/moisture/burn_moisture_trials_mix.csv") library(plantecophys) mflam$vpd <- RHtoVPD(mflam$rh, mflam$T_C) mflam$mixcode <- mflam$spcode mflam <- mflam %>% mutate(mixcode = str_replace(mixcode, "Ab", "Abco"), mixcode = str_replace(mixcode, "Ca", "Cade"), mixcode = str_replace(mixcode, "Pi", "Pije"), mixcode = str_replace(mixcode, "Qu", "Quke"), sp1 = str_sub(mixcode, 1,4), sp2 = str_sub(mixcode, 5,8), sp3 = str_sub(mixcode, 9,12)) source("burn-moist.R") modspreadsp <- lm(spread ~ actualMC_dry*spcode, data=burnt) summary(modspreadsp) modignitsp <- lm(t2ignit ~ actualMC_dry*spcode, data=burnt) mflam <- mflam %>% mutate(spread_pred1 = predict(modspreadsp, allow.new.levels=TRUE, newdata=data.frame(spcode=sp1, actualMC_dry = actualMC_dry)), spread_pred2 = predict(modspreadsp, allow.new.levels=TRUE, newdata=data.frame(spcode=sp2, actualMC_dry = actualMC_dry)), spread_pred3 = predict(modspreadsp, allow.new.levels=TRUE, newdata=data.frame(spcode=sp3, actualMC_dry = actualMC_dry)), spread_pred = (spread_pred1 + spread_pred2 + spread_pred3)/3) mflam <- mflam %>% mutate(ignit_pred1 = predict(modignitsp, allow.new.levels=TRUE, newdata=data.frame(spcode=sp1, actualMC_dry = actualMC_dry)), ignit_pred2 = predict(modignitsp, allow.new.levels=TRUE, newdata=data.frame(spcode=sp2, actualMC_dry = actualMC_dry)), ignit_pred3 = predict(modignitsp, allow.new.levels=TRUE, newdata=data.frame(spcode=sp3, actualMC_dry = actualMC_dry)), ignit_pred = (ignit_pred1 + ignit_pred2 + ignit_pred3)/3) mflam$res_spread <- mflam$spread - mflam$spread_pred mflam$res_ignit <- mflam$t2ignit - mflam$ignit_pred mflam.sum <- mflam %>% group_by(spcode, hour) %>% summarise(t2ignit.mean = mean(t2ignit), t2ignit.sd = sd(t2ignit), spread.mean = mean(spread), spread.sd = sd(spread), actualMC_dry.mean = mean(actualMC_dry), actualMC_dry.sd = sd(actualMC_dry) ) mflampred <-mflam[, c("spcode", "hour", "spread_pred", "ignit_pred")] mflampred.sum <- mflampred %>% group_by(spcode, hour) %>% sample_n(1) mflam.sum <- mflam.sum %>% left_join(mflampred.sum, by=c("spcode", "hour")) ############################################################################### ## Investigate non-additivity ############################################################################### # Residual analysis on moisture res.mod <- lmer(res_MC_dry ~ (1|spcode), data=mmc) summary(res.mod) res2.mod <- lmer(res_MC_dry ~ hour + (1|spcode), data=mmc) summary(res2.mod) anova(res.mod, res2.mod) # Residual analysis on flammability (spread rate and time to ignition) resspread.mod <- lmer(res_spread ~ (1|spcode), data=mflam) summary(resspread.mod) resspread2.mod <- lmer(res_spread ~ hour + (1|spcode), data=mflam) summary(resspread2.mod) anova(resspread.mod, resspread2.mod) resignit.mod <- lmer(res_ignit ~ (1|spcode), data=mflam) summary(resignit.mod) resignit2.mod <- lmer(res_ignit ~ hour + (1|spcode), data=mflam) summary(resignit2.mod) anova(resignit.mod, resignit2.mod)
00641aa88e0d554b9f8ba8611990a0f63ff71220
543f0ec1fc03efdc13415086b85c03fee1b6f86c
/gtex_ips.r
ec039e76d5792777b63c303c8c34435a8778e56a
[]
no_license
YingChen-bio/myiPS
a1d0db5073ba998d637a3988a0ccb3938b7b552f
4c6eb70bf2cf74e44c78fc817de4809508187dfb
refs/heads/main
2023-06-07T10:41:20.194485
2021-07-15T22:37:07
2021-07-15T22:37:07
366,914,222
0
0
null
null
null
null
UTF-8
R
false
false
14,337
r
gtex_ips.r
#!/usr/local/package/r/3.6.0/bin/Rscript --vanilla library("zFPKM") library("biomaRt") library("ggplot2") library("pheatmap") library("ggfortify") library("M3C") library("Rtsne") setwd("/archive/data/hgc1074/yingchen/Hiseq_2rounds") source("./gtex_ips.function.r") if(FALSE){ ips_tpm <- read.table("all.counts.ips_tpm.txt",sep="\t",header=TRUE)[,-c(2:6)] rownames(ips_tpm) <-as.character(ips_tpm$Geneid) ips_tpm <- ips_tpm[,-1] #gtex_tpm <- readRDS("gtex_tpm.rds") #gtex_tpm <- as.data.frame(gtex_tpm) #get stable id of ENSG of gtex #rownames(gtex_tpm) <- make.names(gsub("\\..*", "", rownames(gtex_tpm)),unique=TRUE) #gtex_tmp_sample <- gtex_tpm[1:100,1:100] #write.csv(gtex_tmp_sample,"gtex_tmp_sample.csv") #write.csv(gtex_tpm,"gtex_tpm_noversion.csv") gtex_tpm <- read.csv("gtex_tpm_noversion.csv") rownames(gtex_tpm) <-as.character(gtex_tpm$X) gtex_tpm <- gtex_tpm[,-1] gtex_ips_tpm <- merge(gtex_tpm,ips_tpm,by=0,all=TRUE) #write.csv(gtex_ips_tpm[1:100,1:100],"gtex_ips_tpm_sample.csv") gtex_ips_tpm[is.na(gtex_ips_tpm)] <- 0 saveRDS(gtex_ips_tpm[1:100,17300:17450],"merged_gtex_ips.rds") gtex_ips_tpm <- readRDS("merged_17838samples.rds") gtex_ips_tpm$sum <- rowSums(gtex_ips_tpm) gtex_cut <- subset(gtex_ips_tpm,sum>0) gtex_clean <- gtex_cut[,-ncol(gtex_cut)] zTPM <- zFPKM(gtex_clean,assayName ="tpm") #here activeGenes <- which(rowMeans(zTPM) > -3) gtex_active <- gtex_clean[activeGenes,] saveRDS(gtex_active,"gtex_ips_active.rds") gtex_active <- readRDS("gtex_ips_active.rds") gtex_active$var <- apply(gtex_active,1,var) gtex_active_clean<- gtex_active[,-ncol(gtex_active)] saveRDS(gtex_active,"gtex_ips_var.rds") selectGenes <- rownames(gtex_active[order(gtex_active$var,decreasing = T),][1:1000,]) gtex_anno <- read.table("GTEx_Analysis_v8_Annotations_SampleAttributesDS.txt",sep="\t",head=TRUE,quote = "",fill=NA) gtex_anno$SAMPID <- gsub("-",".",gtex_anno$SAMPID) gtex_anno <- subset(gtex_anno,gtex_anno$SAMPID %in% colnames(gtex_active)) gtex_anno_tissue <- cbind(gtex_anno$SAMPID, as.character(gtex_anno$SMTS)) colnames(gtex_anno_tissue) <- c("SampleID","Tissue") gtex_anno_tissue <- as.data.frame(gtex_anno_tissue) gtex_anno_tissue$Tissue <- as.factor(gtex_anno_tissue$Tissue) ips_anno <- as.data.frame(colnames(gtex_ips_tpm)[17383:17838]) ips_anno$Tissue <- "ips" colnames(ips_anno) <- c("SampleID","Tissue") gtex_ips_anno <- rbind(gtex_anno_tissue,ips_anno) gtex_ips_anno$Tissue <- as.factor(gtex_ips_anno$Tissue) rownames(gtex_ips_anno) <- gtex_ips_anno[,1] saveRDS(gtex_ips_anno,"gtex_ips_anno.rds") pca_data <- t(gtex_active_clean[selectGenes,]) pca_data <- log2(pca_data+1) pcaResults <- prcomp(pca_data) saveRDS(pcaResults,"gtex_pca_1000.rds") #pcaResults <- readRDS("gtex_pca_1000.rds") pdf("gtex.tissue.pca.pdf",paper="a4r") autoplot(pcaResults,data=gtex_anno_tissue,colour="Tissue",label.show.legend = FALSE) dev.off() } #tsne set.seed(999) gtex_active <- readRDS("gtex_ips_active.rds") gtex_anno_tissue <- readRDS("gtex_ips_anno.rds") colnames(gtex_anno_tissue) <- c("ID","Tissue") if(FALSE){ gtex_active$var <- apply(gtex_active,1,var) gtex_active_clean<- gtex_active[,-ncol(gtex_active)] selectGenes <- rownames(gtex_active[order(gtex_active$var,decreasing = T),][1:1000,]) tsne_data <- gtex_active_clean[selectGenes,] tsne_data <- log2(tsne_data+1)saveRDS(tsne_data,"merged_tsne_data.rds") #res <- M3C(tsne_data, des=gtex_anno_tissue, removeplots=TRUE, iters=25,fsize=8,maxK=20,analysistype="chi") saveRDS(tsne_data,"merged_tsne_data.rds") #saveRDS(res,"M3Cresults.rds") #res <- M3C(tsne_data, des=gtex_anno_tissue,method=2,iters=25,maxK=20,analysistype="chi") #saveRDS(res,"M3Cresults_fast.rds") tsne <- Rtsne(t(tsne_data),dims=2,perplexity=30,verbose=TRUE,max_iter=500,num_threads=0) saveRDS(tsne,"rtsne_result.rds") embedding <- as.data.frame(tsne$Y) embedding$Tissue <- as.factor(gtex_anno_tissue$Tissue) #embedding$Tissue <- as.factor(ifelse(embedding$Tissue == "ips","ips","others")) palette_64 <- c("#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059", "#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87", "#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80", "#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100", "#DDEFFF", "#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F", "#372101", "#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09", "#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66", "#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C", "#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81", "#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00", "#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700", "#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329", "#5B4534", "#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C") p <- ggplot(embedding, aes(x=V1, y=V2, color=Tissue)) + geom_point(size=1.25) + scale_colour_manual(values=palette_64)+ guides(colour = guide_legend(override.aes = list(size=6))) + xlab("") + ylab("") + ggtitle("t-SNE Gtex (17382) and iPS(456)") + theme_light(base_size=20) + theme(strip.background = element_blank(), strip.text.x = element_blank(), axis.text.x = element_blank(), axis.text.y = element_blank(), axis.ticks = element_blank(), axis.line = element_blank(), panel.border = element_blank()) ggsave("rtsne_ipsgtex_color.pdf", p, width=24, height=18, units="in") } #colors <- rainbow(length(unique(gtex_anno_tissue$Tissue))) #names(colors) <- unique(gtex_anno_tissue$Tissue) #pdf("gtex_rtsne.pdf",paper="a4r") #par(mgp=c(2.5,1,0)) #plot(tsne$Y, t='n', main="tSNE",xlab="tSNE dimension 1", ylab="tSNE dimension 2", "cex.main"=2,"cex.lab"=1.5) #text(tsne$Y, labels=gtex_anno_tissue$Tissue,col=colors[gtex_anno_tissue$Tissue]) #dev.off() #pdf("gtex.tissue.tsne.pdf",paper="a4r") #tsne(tsne_data) #dev.off() ## machine learning for establishing reference cohort data #library("e1071") #library("caret") #library("e1071") #library("glmnet") #library("LiblineaR") #library("HandTill2001") #sampling and fold = y is label data makefolds <- function(y, cv.fold = 5){ n <- length(y) nlvl <- table(y) idx <- numeric(n) folds <- list() for (i in 1:length(nlvl)) { idx[which(y == levels(y)[i])] <- sample(rep(1:cv.fold,length = nlvl[i])) } for (i in 1:cv.fold){ folds[[i]] <- list(train = which(idx!=i), test = which(idx==i)) } return(folds) } ## Inner/Nested 5-fold CV loops ==> predict ==> score 1-5 (nested folds) ==> train calibration model ==> PREDICT makenestedfolds <- function(y, cv.fold = 5){ nfolds <- list() folds <- makefolds(y,cv.fold) names(folds) <- paste0("outer",1:length(folds)) for(k in 1:length(folds)){ inner = makefolds(y[folds[[k]]$train],cv.fold) names(inner) <- paste0("inner",1:length(folds)) for(i in 1:length(inner)){ inner[[i]]$train <- folds[[k]]$train[inner[[i]]$train] inner[[i]]$test <- folds[[k]]$train[inner[[i]]$test] } nfolds[[k]] <- list(folds[k],inner) } names(nfolds) <- paste0("outer",1:length(nfolds)) return(nfolds) } nfolds <- makenestedfolds(as.factor(gtex_anno_tissue$Tissue)) saveRDS(nfolds,"./data/nfolds.rds") #function `subfunc_load_betashdf5_subset_filter_match_save_betasKk()` ------------------------------------------------------------------------------------------------------- # 1. Load betas_v11.h5 (betashdf5) # 2. Subset K.k..train # 3. Unsupervised variance filtering (p = 10 000) # 4. Subset K.k.test # 5. Match CpG probes of filtered K.k.train to K.k.test # 6. Save also the full betas (2801 * 10000) but 10k CpG (are based on the respective K.k train set) # 7. Save betas.p.filtered.K.k into a separate folder # Define utility / loader function ---------------------------------------------------------------------------------------------------------------- subfunc_load_betashdf5_subset_filter_match_save_betasKk <- function(K.start = 1, k.start = 0, n.cv.folds = 5, nfolds.. = NULL, fpath.betasv11.hdf5 = NULL, p.var.filtering = 5000, out.path = "gtex_ips.varfilt.5k", out.fname = "gtex_ips.K.k"){ # Check whether nfolds.. is provided nfolds.. <- nfolds # Check whether gtex_ips data existed # rows: 9333 genes # cols: 17838 cell types message("Dimensions of loaded `gtex_ips_data` data file nrows: ", nrow(gtex_active), "; ncols: ", ncol(gtex_active)) # Run CV scheme message("\nNested cross validation (CV) scheme starts ... ", Sys.time()) for(K in K.start:n.cv.folds){ # Nested loop for(k in k.start:n.cv.folds){ if(k > 0){ message("\n Subsetting & filtering inner/nested fold ", K,".", k," ... ",Sys.time()) fold <- nfolds..[[K]][[2]][[k]] ### [[]][[2]][[]] means inner loop # Inner CV loops 1.1-1.5 (Fig. 1.) } else{ message("\n \nSubsetting & filtering outer fold ", K,".0 ... ",Sys.time()) fold <- nfolds..[[K]][[1]][[1]] ### [[]][[1]][[]] means outer loop # Outer CV loops 1.0-5.0 (Fig. 1.) } # Subset K.k$train message(" Step 2. Subsetting cases/columns: " , K, ".", k, " training set @ ", Sys.time()) gtex.K.k.train <- gtex_active[ , fold$train] # rows genes # columns are cells types message(" Step 3. Unsupervised variance filtering of p = ", p.var.filtering, " CpG probes on " , K, ".", k, " training set @ ", Sys.time(), "\n It can take up to 1-2mins to finish.") # sd is calculated over all cols (i.e. cell_typpe) for each row (i.e. CpG probe) gtex.p.filtered.K.k.train <- gtex.K.k.train[order(apply(gtex.K.k.train, 1, sd), decreasing = T)[1:5000], ] message(" Dimension of `gtex.p.filtered.K.k.train` nrows: ", nrow(gtex.p.filtered.K.k.train), " ncols: ", ncol(gtex.p.filtered.K.k.train), "\n Variance filtering finished @ ", Sys.time()) # Duration @ single core ca. 1.25-1.5mins message(" \n Check whether there is NA in train set : ", sum(is.na(gtex.p.filtered.K.k.train) == T)) # Transposed afterwards! gtex.p.filtered.K.k.train <- t(gtex.p.filtered.K.k.train) # gtex.p.filtered.K.k.train # matrix # fold$train (ca. 1700-2204) rows cases/patients (sentrixIDs) # rows: patients; cols 10k most variable CpGs message(" Transposing `gtex.p.filtered.K.k.train` finished @ ", Sys.time()) # Garbage collector (note: gc is not absolutely necessary) message(" Clean up memory (garbage collector) @ ", Sys.time()) gc() # Subset genes of the corresping test set # Select only 5000 CpG (`p.var.filtering`) genes (i.e. rows of betasv11.h5) that are filtered based on # the training (sub)fold (i.e. columns of betas.p.varfilt.train) message(" Step 4. Subsetting `betas_v11.h5` cases/columns: " , K, ".", k, " test/calibration set @ ", Sys.time()) gtex.K.k.test <- gtex_active[ , fold$test] message(" Step 5. Matching variance filtered p = ", p.var.filtering, " CpG probes corresponding to the " , K, ".", k, " training set @ ", Sys.time(), "\n It can take up to 1-2mins to finish.") gtex.p.filtered.K.k.test <- gtex.K.k.test[match(colnames(gtex.p.filtered.K.k.train), rownames(gtex.K.k.test)), ] # Transpose $test # Note: wrappend in t() => rows: patients ; cols: CpG probenames gtex.p.filtered.K.k.test <- t(gtex.p.filtered.K.k.test) message(" Transposing `betas.p.filtered.K.k.test` finished @ ", Sys.time()) message(" Dimension of `betas.p.filtered.K.k.test` nrows: ", nrow(gtex.p.filtered.K.k.test), " ncols: ", ncol(gtex.p.filtered.K.k.test), "\n CpG matching finished @ ", Sys.time()) # Save also betas.K.k (2801 * 10k CpG selected on the training set) message(" Step 6. Matching variance filtered p = ", p.var.filtering, " CpG probes corresponding to the " , K, ".", k, " training set @ ", Sys.time(), "\n On the full `betas_v11.h5` data. It can take up to 1-2mins to finish.") gtex.K.k <- gtex_active[match(colnames(gtex.p.filtered.K.k.train), rownames(gtex_active)), ] # rows = gens # columns = cell types # no subsetting message(" Transposing `betas.K.k` finished @ ", Sys.time()) gtex.K.k <- t(gtex.K.k) # rows patients # cols CpGs #########here # Security check message("\nAre column names (CpG probes) of $train and $test and full betas identical? ", identical(colnames(gtex.p.filtered.K.k.train), colnames(gtex.p.filtered.K.k.test))) message("Are column names (CpG probes) of $train and full `betas.K.k`` identical? ", identical(colnames(gtex.p.filtered.K.k.train), colnames(gtex.K.k))) message("Are column names (CpG probes) of $test and full `betas.K.k`` identical? ", identical(colnames(gtex.p.filtered.K.k.train), colnames(gtex.K.k))) # Create output directory folder.path <- file.path(getwd(), "data", out.path) dir.create(folder.path, showWarnings = F, recursive = T) #RData.path <- file.path(folder.path, paste(out.fname, K, k, "RData", sep = ".")) # Save unsupervised variance filtered $train and $test sets save(gtex.p.filtered.K.k.train, gtex.p.filtered.K.k.test, gtex.K.k, fold, file = file.path(folder.path, paste(out.fname, K, k, "RData", sep = ".")) ) } } } subfunc_load_betashdf5_subset_filter_match_save_betasKk()
e8f5772d938a5f47497506241365f199981e6e2c
6d592800320df3943a54761b9e60a21f8fe68d2d
/R/pull_troop_data.R
ce68b10904b1adf1f9f0d20bc13a1457849839a6
[ "MIT" ]
permissive
CenterForPeaceAndSecurityStudies/ISAF
ee2959dedf262a657eff3858a247fa5cb5bcd92f
d904b8d0ec9a2876c909fcd1c7a5ede517887677
refs/heads/master
2023-01-31T23:29:43.228361
2020-12-10T16:18:17
2020-12-10T16:18:17
144,044,148
1
0
null
null
null
null
UTF-8
R
false
false
716
r
pull_troop_data.R
pull_troop_data <- function(ws = "IISS_troopdata", key = '1Q_PNTl6JAWYIxvKfHfXt5mPA-VrgDxdoCUkvQ4i_eQo'){ troops <- pull_live_googledoc(ws = ws,key = key) subsheet_names <- googlesheets::gs_ws_ls(troops) sheets_out <- list() for(i in 1:length(subsheet_names)){ sheets_out[[i]] <- googlesheets::gs_read(troops, ws = i) # include a pause to avoid error: Too Many Requests (RFC 6585) (HTTP 429) Sys.sleep(8) } n <- length(sheets_out[[1]]) troops <- structure(sheets_out, row.names = c(NA, -n), class = "data.frame") troops <- data.table::rbindlist(troops) troops <- as.data.frame(troops) #returns df with all years return(troops) }
6dab8dadac5f8718204ec33deaeb2addeab52be1
981cbaf799599f6d23bf79cdeb4ef72a8f3eb8a5
/script/3_cleanse.R
ed953467e08fdf276d081e3e201a3859b5e281d2
[]
no_license
noahhhhhh/Santander_Customer_Satisfaction
51249cdc53ef6fcf545cd0e069e3b5e3458857af
2cce8e82ab12659445818f42316cdd8e7ae9d8b6
refs/heads/master
2021-01-17T17:14:28.761063
2016-05-10T02:38:32
2016-05-10T02:38:32
54,017,892
0
0
null
null
null
null
UTF-8
R
false
false
2,426
r
3_cleanse.R
setwd("/Volumes/Data Science/Google Drive/data_science_competition/kaggle/Santander_Customer_Satisfaction/") rm(list = ls()); gc(); require(data.table) require(caret) require(purrr) source("utilities/preprocess.R") load("../data/Santander_Customer_Satisfaction/RData/dt_explored.RData") ####################################################################################### ## 1.0 remove zero vars ############################################################### ####################################################################################### dim(dt.explored) # [1] 151838 371 nzv <- nearZeroVar(dt.explored[, !c("ID", "TARGET"), with = F], saveMetrics= T) nzv.train <- nearZeroVar(dt.explored[TARGET >= 0, ][, !c("ID", "TARGET"), with = F], saveMetrics= T) nzv.test <- nearZeroVar(dt.explored[TARGET == -1, ][, !c("ID", "TARGET"), with = F], saveMetrics= T) cols.zeroVar.all <- rownames(nzv[nzv$zeroVar == T, ]) cols.zeroVar.train <- rownames(nzv.train[nzv.train$zeroVar == T, ]) cols.zeroVar.test <- rownames(nzv.test[nzv.test$zeroVar == T, ]) ## all, train, and test difference on zeroVar setdiff(cols.zeroVar.all, cols.zeroVar.train) # character(0) setdiff(cols.zeroVar.train, cols.zeroVar.all) # character(0) dt.explored <- dt.explored[, !cols.zeroVar.all, with = F] dim(dt.explored) # [1] 151838 337 ####################################################################################### ## 2.0 remove duplicates ############################################################## ####################################################################################### features_pair <- combn(names(dt.explored), 2, simplify = F) toRemove <- c() for(pair in features_pair) { f1 <- pair[1] f2 <- pair[2] if (!(f1 %in% toRemove) & !(f2 %in% toRemove)) { if (all(dt.explored[[f1]] == dt.explored[[f2]])) { cat(f1, "and", f2, "are equals.\n") toRemove <- c(toRemove, f2) } } } dt.explored <- dt.explored[, !toRemove, with = F] dim(dt.explored) # [1] 151838 310 ####################################################################################### ## save ############################################################################### ####################################################################################### dt.cleansed <- dt.explored save(dt.cleansed, file = "../data/Santander_Customer_Satisfaction/RData/dt_cleansed.RData")
a710c3d44b0bf417e1104f4a4b8cd615489269ea
f543e2312931c21441e77b9c8a2713e8d0af299e
/plot3_txt.txt
0008b2560ace3142144dee36331db3923183298c
[]
no_license
askaikai/ExData_project2
a5d76287a173f94403424cf3a70ec82cf3aa2a84
c18558123992eb527bde299adb5c408f609ffe73
refs/heads/master
2021-01-17T05:55:14.880181
2014-05-23T18:19:23
2014-05-23T18:19:23
null
0
0
null
null
null
null
UTF-8
R
false
false
1,929
txt
plot3_txt.txt
plot3 = function(){ # Of the four types of sources indicated by the type (point, nonpoint, # onroad, nonroad) variable, which of these four sources have seen # decreases in emissions from 1999–2008 for Baltimore City? # Which have seen increases in emissions from 1999–2008? # Use the ggplot2 plotting system to make a plot answer this question. # # useful page: http://docs.ggplot2.org/0.9.3.1/facet_grid.html # first, let's make sure we are in the right directory setwd('~/Documents/Coursera//ExploratoryDataAnalysis//ExData_project2') library(ggplot2) library(reshape2) # read in data & code NEI <- readRDS("../data/summarySCC_PM25.rds") SCC <- readRDS("../data/Source_Classification_Code.rds") # select data just from Baltimore Bmore=NEI[NEI$fips == "24510",] Bmore$type=as.factor(Bmore$type) Bmore$year=as.factor(Bmore$year) BmoreSummary=tapply(Bmore$Emissions, INDEX=list(c(Bmore$year),c(Bmore$type)), sum,simplify = TRUE) BmoreSummary=as.data.frame(BmoreSummary) colnames(BmoreSummary)=levels(Bmore$type) rownames(BmoreSummary)=levels(Bmore$year) BmoreSummary$year=row.names(BmoreSummary) BmoreMelt=melt(BmoreSummary,variable.name='type',value.name='Emissions') BmoreMelt$year=as.numeric(BmoreMelt$year) BmoreMelt$Emissions=BmoreMelt$Emissions/(10^3) # now plot g = ggplot(BmoreMelt, aes(year, Emissions)) + geom_point(size=.8) g = g + facet_grid(.~type) + theme_grey(base_size = 4) + geom_smooth(method="lm", se=FALSE, col="steelblue", size=.3) + labs(list(title = expression(paste(PM[2.5]," Emission in Baltimore City by Type")), x = "year", y = expression(paste("Total Emission (", 10^3," tons)")))) + scale_x_continuous(limits=c(1998, 2009), breaks = seq(1999, 2008, by = 3), labels=c("1999","2002","2005","2008")) ggsave(filename="plot3.png",plot=g,width=8,height=6,units="cm") }
c0a38bbf0c15212744af8ae8a61dad34c8621a59
387ed1d9fcaadb5744bcc298b6883f27b7303c59
/ColumbiaImages.R
ba129e511d7f59a91eb4f81f3986a6e87aaede5e
[]
no_license
bu-w/Image-Classification
1ca0de9febfa7b9c7cda1f0804dd94d6c074adf0
57bf84554860e5ac6e625e329facb12c8b8b9d0b
refs/heads/main
2023-04-12T04:06:52.827531
2021-04-23T06:34:50
2021-04-23T06:34:50
360,786,139
0
0
null
null
null
null
UTF-8
R
false
false
6,490
r
ColumbiaImages.R
#PACKAGES----------------------------------------------------------------------- library(jpeg) library(e1071) library(scatterplot3d) #IMPORT DATA SET---------------------------------------------------------------- pm <- read.csv("~/Desktop/2021WINTER/MATH3333/project/photoMetaData.csv") n <- nrow(pm) set.seed(3333) # partitioning the dataset into 50% training set and 50% testing set. # in logic form trainFlag <- (runif(n) > 0.5) y <- as.numeric(pm$category == "outdoor-day") # read variable "name" for all images X <- matrix(NA, ncol=3, nrow=n) for (j in 1:n) { img <- readJPEG(paste0("~/Desktop/2021WINTER/MATH3333/project/columbiaImages/",pm$name[j])) X[j,] <- apply(img,3,median) #extract 3 median intensities of each color channel print(sprintf("%03d / %03d", j, n)) } #OVERVIEW OF DATA--------------------------------------------------------------------------------------- scatterplot3d(x = X[,1], y = X[,2], z = X[,3], color = c("blue", "red")[as.factor(y)], pch = 19) #BUILD GLM MODEL--------------------------------------------------------------------------------------- out <- glm(y ~ X, family=binomial, subset=trainFlag) out$iter summary(out) #CLASSIFICATION--------------------------------------------------------------------------------------- pred <- 1 / (1 + exp(-1 * cbind(1,X) %*% coef(out))) y[order(pred)] #prediction outcome in training data set y[!trainFlag][order(pred[!trainFlag])] #prediction outcome in testing data set mean((as.numeric(pred > 0.5) == y)[trainFlag]) #[1] 0.729798 # 77% of photo belongs to y in training data set mean((as.numeric(pred > 0.5) == y)[!trainFlag]) #[1] 0.7747525 # 73% of photo belongs to y in testing data set #ANALYSIS--------------------------------------------------------------------------------------- #misclassification table tt<-table(y[!trainFlag],y[!trainFlag][order(pred[!trainFlag])],dnn=c("actual group","predicted group")) tt #misclassification rate 1-sum(diag(tt))/sum(tt) #[1] 0.3663366 ## ROC curve (see lecture 12) roc <- function(y, pred) { alpha <- quantile(pred, seq(0,1,by=0.01)) N <- length(alpha) sens <- rep(NA,N) spec <- rep(NA,N) for (i in 1:N) { predClass <- as.numeric(pred >= alpha[i]) sens[i] <- sum(predClass == 1 & y == 1) / sum(y == 1) spec[i] <- sum(predClass == 0 & y == 0) / sum(y == 0) } return(list(fpr=1- spec, tpr=sens)) } r <- roc(y[!trainFlag], pred[!trainFlag]) # y[!trainFlag] means the true value of y in testing data set # pred[!trainFlag] means the predicted value of y in testing data set plot(r$fpr, r$tpr, xlab="false positive rate", ylab="true positive rate", type="l") abline(0,1,lty="dashed") # auc auc <- function(r) { sum((r$fpr) * diff(c(0,r$tpr))) } glmAuc <- auc(r) glmAuc #[1] 0.817901 #SVM MODEL-------------------------------------------------------------------------------- svm_model <- svm(X,y,subset=trainFlag,type="C-classification",kernel = "radial") summary(svm_model) pred_svm <- predict(svm_model,X[!trainFlag,]) pred_svm #ANALYSIS------------------------------------------------------------------------------------ tt_svm <- table(y[!trainFlag],pred_svm,dnn=c("actual group","predicted group")) tt_svm #misclassification rate 1-sum(diag(tt_svm))/sum(tt_svm) #[1] 0.220297 #roc r_svm <- roc(y[!trainFlag],order(pred_svm)) # y[!trainFlag] means the true value of y in testing data set # pred[!trainFlag] means the predicted value of y in testing data set plot(r_svm$fpr, r_svm$tpr, xlab="false positive rate", ylab="true positive rate", type="l") abline(0,1,lty="dashed") auc(r_svm) # [1] 0.5238525 #EXTRACT MORE INFORMATION FROM THE IMAGES---------------------------------------------------------------- X <- matrix(NA, ncol=3*(length(seq(0,1,0.01))), nrow=n) for (j in 1:n) { img <- readJPEG(paste0("~/Desktop/2021WINTER/MATH3333/project/columbiaImages/",pm$name[j])) X[j,] <- apply(img,3,quantile,probs=seq(0,1,0.01)) print(sprintf("%03d / %03d", j, n)) } #logistic regression out <- glm(y ~ X, family=binomial, subset=trainFlag) pred <- 1 / (1 + exp(-1 * cbind(1,X) %*% coef(out))) mean((as.numeric(pred > 0.5) == y)[trainFlag]) #[1] 1 mean((as.numeric(pred > 0.5) == y)[!trainFlag]) #[1] 0.6089109 #misclassification table tt<-table(y[!trainFlag],y[!trainFlag][order(pred[!trainFlag])],dnn=c("actual group","predicted group")) tt #misclassification rate 1-sum(diag(tt))/sum(tt) #[1] 0.4059406 r <- roc(y[!trainFlag], pred[!trainFlag]) plot(r$fpr, r$tpr, xlab="false positive rate", ylab="true positive rate", type="l") abline(0,1,lty="dashed") glmAuc <- auc(r) glmAuc #[1] 0.6105962 # svm svm_model <- svm(X,y,subset=trainFlag,type="C-classification",kernel = "radial") summary(svm_model) pred_svm <- predict(svm_model,X[!trainFlag,]) pred_svm tt_svm <- table(y[!trainFlag],pred_svm,dnn=c("actual group","predicted group")) tt_svm #misclassification rate 1-sum(diag(tt_svm))/sum(tt_svm) #[1] 0.1980198 #roc r_svm <- roc(y[!trainFlag],order(pred_svm)) # y[!trainFlag] means the true value of y in testing data set # pred[!trainFlag] means the predicted value of y in testing data set plot(r_svm$fpr, r_svm$tpr, xlab="false positive rate", ylab="true positive rate", type="l") abline(0,1,lty="dashed") auc(r_svm) #[1] 0.492113 #CHANGE THE PROPORTION OF TRAINING SET FOR UPDATED VARIABLE X--------------------------------------------------------- trainFlag <- (runif(n) > 0.2) out <- glm(y ~ X, family=binomial, subset=trainFlag) pred <- 1 / (1 + exp(-1 * cbind(1,X) %*% coef(out))) mean((as.numeric(pred > 0.5) == y)[trainFlag]) #[1] 1 mean((as.numeric(pred > 0.5) == y)[!trainFlag]) #[1] 0.6424242 #misclassification table tt<-table(y[!trainFlag],y[!trainFlag][order(pred[!trainFlag])],dnn=c("actual group","predicted group")) tt #misclassification rate 1-sum(diag(tt))/sum(tt) #[1] 0.4242424 r <- roc(y[!trainFlag], pred[!trainFlag]) plot(r$fpr, r$tpr, xlab="false positive rate", ylab="true positive rate", type="l") abline(0,1,lty="dashed") glmAuc <- auc(r) glmAuc #[1] 0.7479146 # svm svm_model <- svm(X,y,subset=trainFlag,type="C-classification",kernel = "radial") summary(svm_model) pred_svm <- predict(svm_model,X[!trainFlag,]) pred_svm tt_svm <- table(y[!trainFlag],pred_svm,dnn=c("actual group","predicted group")) tt_svm #misclassification rate 1-sum(diag(tt_svm))/sum(tt_svm) #[1] 0.2 #roc r_svm <- roc(y[!trainFlag],order(pred_svm)) plot(r_svm$fpr, r_svm$tpr, xlab="false positive rate", ylab="true positive rate", type="l") abline(0,1,lty="dashed") auc(r_svm) #[1] 0.5523857
554621f3d5017383eafe75371b75b4489c31db73
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/woeBinning/examples/woe.binning.plot.Rd.R
fc3d3b5ba7ccd7773a35942c727062a4196943ad
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
656
r
woe.binning.plot.Rd.R
library(woeBinning) ### Name: woe.binning.plot ### Title: Visualization of Binning ### Aliases: woe.binning.plot ### ** Examples # Load German credit data data(germancredit) df <- germancredit # Bin all variables of the data frame (apart from the target variable) # with default parameter settings binning <- woe.binning(df, 'creditability', df) # Plot all binned variables as multiple plots woe.binning.plot(binning) # Plot only the first four binned variables with the highest IV value # as multiple plots woe.binning.plot(binning, plot.range='1:4') # Plot the binned variables in single plots woe.binning.plot(binning, multiple.plots=FALSE)
ebac4a446a8ab985d2e5522301a14aa119bdd534
3b2b5636282ae842def1c16265cccac19f9d125a
/man/plotACF.Rd
708ed2a615e09529b17abed72550a45fc28f9f50
[ "BSD-2-Clause" ]
permissive
ilkkavir/LPI.gdf
06cf2ccb0ed19b7a04df417fe93cef2f7e530115
088a53c3624c68406b87ccaf8d1451ef678c3b62
refs/heads/master
2023-05-28T09:56:00.948551
2023-05-15T13:23:37
2023-05-15T13:23:37
205,375,323
1
0
null
null
null
null
UTF-8
R
false
false
2,192
rd
plotACF.Rd
\name{plotACF} \title{plot ACF} \alias{plotACF} \description{Plot incoherent scatter autocovariance function.} \usage{ plotACF( data , part='real' , pdf=NULL , jpg=NULL , figNum=NULL , zlim=NULL , ylim=NULL , xlim=NULL , cex=1 , bg='white' , fg='black' , width=8.27 , height=11.69 , paper='a4' , res=300 , stdThrsh=Inf , yheight=FALSE , llhT=NULL , azelT=NULL , llhR=NULL , lags=NULL , SIunits=TRUE ) } \arguments{ \item{ data }{ Data directory path(s) or an output list from readLPIdir or plotLagProfilfes. The data path is a vector that may contain both full paths to files and directory names. } \item{ part }{ Real part / imaginary part / standard deviation plot selection. Use string "real", "imaginary", or "error". (Only the first two characters are used, case non-sensitive. )} \item{ pdf }{ pdf output file name } \item{ jpg }{ jpg output file name } \item{ figNum }{ Device number to use for plotting } \item{ zlim }{ z axis limits } \item{ ylim }{ y axis limits } \item{ xlim }{ x axis (lag) limits } \item{ cex }{ Scaling factor for figure labels and titles. } \item{ bg }{ Background color } \item{ fg }{ Foreground color} \item{ width }{ plot width } \item{ height }{ plot height} \item{ paper }{ paper seletion } \item{ res }{ resolution for jpg images} \item{ stdThrsh }{ Standard deviation threshold for readACF} \item{ yheight }{ Logical, should the range be converted to height? Works only if the result files contain site positions and pointing directions. } \item{ llhT }{ c( Latitude [deg], longitude [deg], height [m] ) of the transmitter site } \item{ azelT }{ c(azimuth [deg] , elevation [deg]) of the transmitter beam} \item{ llhR }{ c( Latitude [deg], longitude [deg], height [m] ) of the receiver site } \item{ lags }{ Time lag selection. Effective only if data is a file path.} \item{ SIunits }{ Logical, should range be expressed in km and lag in ms? Works only if range.km and lag.us are stored in the data files} } \value{ A list similar with that returned by \link{readACF} } \author{Ilkka Virtanen (University of Oulu, Finland) \cr \email{ilkka.i.virtanen@oulu.fi}}
48c248bc34af521c1fd7a906912bd89830bd3f35
6f257dfac5625c2bc5cd0fa418c94b432bac472d
/man/gf4cimes.Rd
bd1bb15604f331481d5d366502ee115366ca3a54
[]
no_license
GastonMauroDiaz/caiman
591ac8fa2d46a291ff2692cd825021ec3970b650
c37d0e4a0af4774b67c30dc3c22c1b55cbe3f153
refs/heads/master
2022-01-25T21:34:30.218488
2022-01-21T18:52:43
2022-01-21T18:52:43
61,065,044
10
1
null
null
null
null
UTF-8
R
false
true
591
rd
gf4cimes.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ZZZ_group_untested.R \name{gf4cimes} \alias{gf4cimes} \alias{gf4cimes,BinImage-method} \alias{gf4cimes,character-method} \title{todo} \usage{ gf4cimes(x, ...) \S4method{gf4cimes}{BinImage}(x, z, a, m, angleWidth, file, txt = TRUE) \S4method{gf4cimes}{character}(x, z, a, m, subfolder = "out", ...) } \arguments{ \item{x}{todo} \item{...}{todo} \item{z}{todo} \item{a}{todo} \item{m}{todo} \item{angleWidth}{todo} \item{file}{todo} \item{txt}{todo} } \value{ todo } \description{ todo } \examples{ todo }
05ccdb975fdcf1ee70d64537403e2ac0d65d7c7d
f667644b730dc41b372355eeb50a61d4c3b37b74
/script.R
5d066bf904440acae464db6d6be4ef8a15e50ed4
[]
no_license
lukasvin/lukas-vinoelst.be
999c17d479f9aeac0c2bb079a5aa12077ce20541
27cc83a43782a3401a4a1bc5739859b9fa503542
refs/heads/master
2021-05-18T13:42:53.162562
2020-04-15T18:01:52
2020-04-15T18:01:52
251,267,169
0
1
null
null
null
null
UTF-8
R
false
false
7,585
r
script.R
##------------------------------------------------------------------------- #!change working directory! #Data Inladen install.packages("ggfortify") install.packages("ggplot2") install.packages("Rcpp") library(MASS) library(class) library(ggplot2) library(ggfortify) wijn <- read.csv("wijn.csv", header=FALSE, sep = ",") attach(wijn) summary(wijn) #Data schalen wijnScaled = scale(wijn[,2:14]); #extra col met de druifsoorten wijnScaledSp = wijn;wijnScaledSp[,2:14] = wijnScaled; wijnScaledSp$V1 <- as.character(wijnScaledSp$V1) wijnScaledSp$V1 ##------------------------------------------------------------------------- #geschaalde PCA maken [waarschijnlijk best aangezien geen eenheden gegeven worden] wijnScaled.pca = prcomp(wijnScaled,scale=TRUE); #plotjes screeplot(wijnScaled.pca) plot(wijnScaled.pca) autoplot(wijnScaled.pca, data = wijnScaledSp, x=1,y=2,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC1 en PC2 autoplot(wijnScaled.pca, data = wijnScaledSp, x=1,y=3,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC1 autoplot(wijnScaled.pca, data = wijnScaledSp, x=1,y=4,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC1 autoplot(wijnScaled.pca, data = wijnScaledSp, x=1,y=5,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC1 autoplot(wijnScaled.pca, data = wijnScaledSp, x=2,y=1,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC1 en PC2 autoplot(wijnScaled.pca, data = wijnScaledSp, x=2,y=3,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC2 autoplot(wijnScaled.pca, data = wijnScaledSp, x=2,y=4,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC2 autoplot(wijnScaled.pca, data = wijnScaledSp, x=2,y=5,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC2 autoplot(wijnScaled.pca, data = wijnScaledSp, x=3,y=1,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC1 autoplot(wijnScaled.pca, data = wijnScaledSp, x=3,y=2,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC2 autoplot(wijnScaled.pca, data = wijnScaledSp, x=3,y=4,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #niet interessant autoplot(wijnScaled.pca, data = wijnScaledSp, x=3,y=5,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #niet interessant autoplot(wijnScaled.pca, data = wijnScaledSp, x=4,y=1,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC1 autoplot(wijnScaled.pca, data = wijnScaledSp, x=4,y=2,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC2 autoplot(wijnScaled.pca, data = wijnScaledSp, x=4,y=3,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #niet interessant autoplot(wijnScaled.pca, data = wijnScaledSp, x=4,y=5,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #niet interessant autoplot(wijnScaled.pca, data = wijnScaledSp, x=5,y=1,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC1 autoplot(wijnScaled.pca, data = wijnScaledSp, x=5,y=2,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #interessant voor PC2 autoplot(wijnScaled.pca, data = wijnScaledSp, x=5,y=3,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #niet interessant autoplot(wijnScaled.pca, data = wijnScaledSp, x=5,y=4,colour = "V1",loadings=TRUE, loadings.label=TRUE, loadings.colour = "blueviolet") #niet interessant #numerieke info summary(wijnScaled.pca) scaledRot = wijnScaled.pca$rotation; #PC1 verklaart 36,2% van de data en met een grote PC1 komt een lagere hoeveelheid flavonoiden,fenolen,proteines en proanrhocyanidine relatief t.o.v. de andere variabelen overeen. [ interpretatie : hoeveelheid smaak in wijn/hoe zuur is de wijn ] #PC2 verklaart 19,2% van de data en met een grote PC2 komt een lagere tint en proteineconcentratie relatief t.o.v. de hogere kleurintensiteit en hoger suikergehalte van de druif overeen. [ interpretatie : ? ] #PC3 verklaart 11,1% van de data en met een grote PC3 komt een lager suikergehalte van de druif relatief t.o.v. het hogere asgehalte en de hogere alkaliteit overeen. [ interpretatie : ? ] scaledPred = predict(wijnScaled.pca); ##------------------------------------------------------------------------- #Classificatie #functie voor error rate aer = function(y1,y2,conf=TRUE) { confusion = table(y1,y2) if (conf) {print(confusion)} observaties = sum(confusion) verkeerd = observaties-sum(diag(confusion)) verkeerd/observaties } #Opdelen in train, validatie en test data lda.ervec.val = c(1:50); lda.ervec.test = c(1:50); qda.ervec = c(1:50); for (i in 1:50) { indices = 1:178; test = sample(178,50); validate_train = indices[-test]; validate = sample(validate_train,50); train = validate_train[-which(validate_train %in% validate)]; ##----------------------------------------------------------------------- #determinantmethode #lineair wijn.lda <- lda(V1 ~.,wijnScaledSp ,subset = train); lda.pred.val <- predict(wijn.lda, wijnScaledSp[validate,]); lda.pred.test <- predict(wijn.lda, wijnScaledSp[test,]); lda.ervec.val[i] = aer(wijnScaledSp$V1[validate],lda.pred.val$class,conf=FALSE); lda.ervec.test[i] = aer(wijnScaledSp$V1[test],lda.pred.test$class,conf=FALSE); #quadratisch wijn.qda <- qda(V1 ~.,wijnScaledSp ,subset = train); qda.pred.val <- predict(wijn.qda, wijnScaledSp[validate,]); qda.ervec[i] = aer(wijnScaledSp$V1[validate],qda.pred.val$class,conf=FALSE); } #gemiddelde error-rates van 50 opdelingen lda.er.val = mean(lda.ervec.val);lda.er.val lda.er.test = mean(lda.ervec.test);lda.er.test qda.er = mean(qda.ervec);qda.er ##---------------------------------------------------------------------- #k-nearest-neighboursmethode #zoek k met kleinste error rate max_k = 78; resultaten = cbind(CV=NULL); klijst = 1:max_k; for (k in klijst) { resultaten = rbind(resultaten,cbind( CV=aer(V1[validate],knn(wijnScaledSp[train,],wijnScaledSp[validate,],V1[train],k),FALSE) )) } head(resultaten) plot(c(0,78),c(0,1),main="Error rate in function of neighbours",xlab="Number of neigbours",ylab="Error rate",type="n"); abline(h=lda.er, col = "blueviolet"); abline(h=qda.er, col="chartreuse") matplot(1:78,resultaten,type='l',add=TRUE) legend(x=0, y=0.8, legend=c('knn','lda','qda'), fill=c("black","blueviolet","chartreuse")) best_k = which.min(resultaten[,3]) resultaten #lda geeft gemiddeld kleinere error rate dan qda en ligt in dezelfde grootteorde van knn ##---------------------------------------------------------------------- #testen t.o.v. testset lda.er.test #al berekend in vorige sectie CV.test.ervec = c(1:50) for (j in 1:50) { CV.test.ervec[j]=aer(V1[test],knn(wijnScaledSp[train,],wijnScaledSp[test,],V1[train],best_k),FALSE); } #gemiddelde error rate van 50 nearest neighbour modellen met best_k CV.er.test = mean(CV.test.ervec); CV.er.test #lda geeft betere aer.
1afdf97124fff237a5f64a7daed5eef3fddf7dd0
49ff0bc7c07087584b907d08e68d398e7293d910
/mbg/mbg_core_code/mbg_central/LBDCore/R/load_populations_cov.R
8f9053b259995809bebb1309874ddb2410cab8f7
[]
no_license
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
db7963836c9ce9cec3ca8da3a4645c4203bf1352
4219ee6b1fb122c9706078e03dd1831f24bdaa04
refs/heads/master
2023-07-30T07:05:28.802523
2021-09-27T12:11:17
2021-09-27T12:11:17
297,317,048
0
0
null
null
null
null
UTF-8
R
false
false
4,669
r
load_populations_cov.R
#' @title Load Populations For Covariates #' @description This gets the population in each cell which you need to convert the rates to counts for fractional raking #' it can also get the stackers if you would like #' #' @param reg modeling region over which you are operating #' @param pop_measure the world pop covariate measure used in your model #' @param measure needs to be depricated, currently a flag that if set to 'prevalence' this function also grabs teh covatiate stakers #' @param simple_polygon the simple polygon for your region #' @param simple_raster the simple raster for your model #' @param year_list The years you are modeling over, will be used to pull populations #' @param interval_mo the number of months between time steps #' @param outputdir currently not used, there is a commented out line of code which can save the population raster if you want #' #' @return used internally above, returns a data frame with the population in each cell-year so rates can be turned into counts #' @return for fractional raking and aggregation #' @export load_populations_cov <- function(reg, pop_measure, measure = "prevalence", simple_polygon, simple_raster, year_list, interval_mo, outputdir, pixel_id) { message("Loading the world pop rasters for this region") ## Pull 2000-2015 annual population brick using new covariates function pop_raster_annual <- load_worldpop_covariate( template_raster = simple_polygon, pop_measure = pop_measure, pop_release = pop_release, start_year = min(year_list), end_year = max(year_list), interval = as.numeric(interval_mo) ) ## extend and crop pop raster to ensure it matches the simple raster #not convinced this is totally needed pop <- pop_raster_annual[[1]] pop <- extend(pop, simple_raster, values = NA) pop <- crop(pop, extent(simple_raster)) pop <- setExtent(pop, simple_raster) pop <- raster::mask(pop, simple_raster) ## check to ensure the pop raster matches the simple raster in extent and resolution if (extent(pop) != extent(simple_raster)) { stop("population raster extent does not match simple raster") } if (any(res(pop) != res(simple_raster))) { stop("population raster resolution does not match simple raster") } # writeRaster(pop, paste0(outputdir, indicator,"_", reg, "_pop_rasters.tif"), format = "GTiff", overwrite = TRUE) message("loading the covariate stakers for this model") if (measure == "prevalence") { covs <- fetch_from_rdata(paste0("/share/geospatial/mbg/", indicator_group, "/", indicator, "/model_image_history/", run_date, pathaddin, ".RData"), "cov_list") fes <- fetch_from_rdata(paste0("/share/geospatial/mbg/", indicator_group, "/", indicator, "/model_image_history/", run_date, pathaddin, ".RData"), "all_fixed_effects") submodels <- trimws(strsplit(fes, "+", fixed = T)[[1]]) covs <- covs[submodels] # make sure spatial extent is the same covs <- lapply(covs, function(x) invlogit(crop(x, simple_raster))) } else { covs <- list() } # bring everything into one place covs$pop <- crop(pop, simple_raster) covnames <- names(covs) # ensure the dimensions are the same for (ccc in covs) { stopifnot(dim(ccc)[1:2] == dim(simple_raster)[1:2]) } message("converting the covariate stackers and pop data in to a data table") # convert to datables, reshape and stuff brick_to_dt <- function(bbb, pixel_id = pixel_id) { dt <- setDT(as.data.frame(bbb)) dt[, pxid := .I] # probably uncessary # drop rows now in cellIdx dt <- dt[pixel_id, ] dt <- melt(dt, id.vars = "pxid", variable.factor = F) dt <- dt[, .(value)] return(dt) } covdt <- covnames for (iii in 1:length(covs)) { dt <- brick_to_dt(bbb = covs[[iii]], pixel_id = pixel_id) covdt[[iii]] <- dt } # covdt = lapply(covs, brick_to_dt(bbb = covs, pixel_id = pixel_id)) covdt <- do.call(what = cbind, covdt) setnames(covdt, names(covs)) # Add pixel_id, but make sure that its recycled explicitly as per data.table 1.12.2 guidelines covdt[, pixel_id := rep(pixel_id, times = nrow(covdt) / length(pixel_id))] # set pop to 0 when pop is na covdt[is.na(pop), pop := 0] # add year to covdt yyy <- as.vector(unlist(lapply(min(year_list):max(year_list), function(x) rep.int(x, times = length(pixel_id))))) covdt[, year := yyy] # free up a bit of space rm(covs) return(covdt) }
6d5d3ee67510d1c4555cca005538ff543a37fe59
d746fef241f9a0e06ae48cc3b1fe72693c43d808
/ark_87287/d7m59t/d7m59t-014/rotated.r
2ffe904d8bd499fb4e02d592ddc21daff893eb1f
[ "MIT" ]
permissive
ucd-library/wine-price-extraction
5abed5054a6e7704dcb401d728c1be2f53e05d78
c346e48b5cda8377335b66e4a1f57c013aa06f1f
refs/heads/master
2021-07-06T18:24:48.311848
2020-10-07T01:58:32
2020-10-07T01:58:32
144,317,559
5
0
null
2019-10-11T18:34:32
2018-08-10T18:00:02
JavaScript
UTF-8
R
false
false
195
r
rotated.r
r=0.23 https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7m59t/media/images/d7m59t-014/svc:tesseract/full/full/0.23/default.jpg Accept:application/hocr+xml
e9c44c834af8d442f457059d4195b928a7c7c773
5a5f43179fe5675d91ef0dd31b662fedf7eb9b11
/R/knit_print_dml.R
4b89edf7d550db7b62a1839bce1f599298fc9ebe
[ "MIT" ]
permissive
davidgohel/officedown
4e63a99cae4d6cb9f254d31ca6d5cf9d278f453c
a831d923b577bbf376070e155097d5f9dec2e9a0
refs/heads/master
2023-07-10T11:41:08.442432
2023-01-06T12:18:33
2023-01-06T12:18:33
126,241,290
333
36
NOASSERTION
2022-02-19T15:36:51
2018-03-21T21:12:48
R
UTF-8
R
false
false
3,488
r
knit_print_dml.R
#' @title Render a plot as a Powerpint DrawingML object #' @description Function used to render DrawingML in knitr/rmarkdown documents. #' Only Powerpoint outputs currently supported #' #' @param x a `dml` object #' @param ... further arguments, not used. #' @author Noam Ross #' @importFrom knitr knit_print asis_output opts_knit opts_current #' @importFrom rmarkdown pandoc_version #' @importFrom rvg dml dml_pptx #' @importFrom grDevices dev.off #' @importFrom rlang eval_tidy #' @noRd knit_print.dml <- function(x, ...) { if (pandoc_version() < 2.4) { stop("pandoc version >= 2.4 required for DrawingML output in pptx") } if (is.null(opts_knit$get("rmarkdown.pandoc.to")) || opts_knit$get("rmarkdown.pandoc.to") != "pptx") { stop("DrawingML currently only supported for pptx output") } layout <- knitr::opts_current$get("layout") master <- knitr::opts_current$get("master") doc <- get_reference_pptx() if(is.null( ph <- knitr::opts_current$get("ph") )){ ph <- officer::ph_location_type(type = "body") } if(is.null( bg <- knitr::opts_current$get("bg") )){ bg <- "transparent" } if(!inherits(ph, "location_str")){ stop("ph should be a placeholder location; ", "see officer::placeholder location for an example.", call. = FALSE) } id_xfrm <- get_content_ph(ph, layout, master, doc) dml_file <- tempfile(fileext = ".dml") img_directory = get_img_dir() dml_pptx(file = dml_file, width = id_xfrm$width, height = id_xfrm$height, offx = id_xfrm$left, offy = id_xfrm$top, pointsize = x$pointsize, last_rel_id = 1L, bg = bg, editable = x$editable, standalone = FALSE, raster_prefix = img_directory) tryCatch({ if (!is.null(x$ggobj) ) { stopifnot(inherits(x$ggobj, "ggplot")) print(x$ggobj) } else { rlang::eval_tidy(x$code) } }, finally = dev.off() ) dml_xml <- read_xml(dml_file) raster_files <- list_raster_files(img_dir = img_directory ) if (length(raster_files)) { rast_element <- xml_find_all(dml_xml, "//p:pic/p:blipFill/a:blip") raster_files <- list_raster_files(img_dir = img_directory ) raster_id <- xml_attr(rast_element, "embed") for (i in seq_along(raster_files)) { xml_attr(rast_element[i], "r:embed") <- raster_files[i] } } dml_str <- paste( as.character(xml_find_first(dml_xml, "//p:grpSp")), collapse = "\n" ) knit_print(asis_output( x = paste("```{=openxml}", dml_str, "```", sep = "\n") )) } #' If size is not provided, get the size of the main content area of the slide #' @noRd #' @importFrom officer fortify_location get_ph_uncached <- function(ph, layout, master, doc) { ls <- layout_summary(doc) if(!master %in% ls$master){ stop("could not find master ", master, call. = FALSE) } slide_index <- which(ls$layout %in% layout & ls$master %in% master) if(length(slide_index)<1){ stop("could not find layout ", layout, " and master ", master, call. = FALSE) } doc <- on_slide(doc, index = slide_index) fortify_location(ph, doc) } #' @importFrom memoise memoise #' @noRd get_content_ph <- memoise(get_ph_uncached) get_img_dir <- function(){ uid <- basename(tempfile(pattern = "")) img_directory = file.path(tempdir(), uid ) img_directory } list_raster_files <- function(img_dir){ path_ <- dirname(img_dir) uid <- basename(img_dir) list.files(path = path_, pattern = paste0("^", uid, "(.*)\\.png$"), full.names = TRUE ) }
a9b84a8dd1025b623f83fdc855e7ff12e7b7d240
ddf87d7410f5f63747758b8beaf0a4fe7c297796
/analysis/drake/other.R
1c13930bdb664393e2a7aa921f2b84718321109e
[ "MIT" ]
permissive
ashiklom/fortebaseline
3142ff38f305906489cf6e012a8f55fa3efaa51e
513ea9353c133b47f67b3023a78e56abb6384847
refs/heads/master
2021-07-23T16:30:12.074804
2020-04-30T17:15:43
2020-04-30T17:15:43
157,924,482
3
3
NOASSERTION
2023-01-22T10:39:45
2018-11-16T21:42:24
R
UTF-8
R
false
false
1,411
r
other.R
plan <- bind_plans(plan, drake_plan( umbs_map_gg = { umbs <- sf::st_sfc(sf::st_point(c(-84.6975, 45.5625)), crs = 4326) states <- rnaturalearth::ne_states(returnclass = "sf") ggplot(states) + geom_sf() + geom_sf(data = umbs, size = 3) + coord_sf(xlim = c(-90, -80), ylim = c(40, 48)) + theme_bw() }, umbs_map_png = ggsave( file_out("analysis/figures/umbs-map.png"), umbs_map_gg, width = 6, height = 6.5, dpi = 300 ) )) plan <- bind_plans(plan, drake_plan( forte_inv_summary = { species_pft_map <- tribble( ~Species, ~PFT, "POGR", "Early", "FAGR", "Late", "ACSA", "Late", "QURU", "Mid", "ACPE", "Mid", "ACRU", "Mid", "BEPA", "Early", "????", "Other", "BEAL", "Other", "AMEL", "Other", "TSCA", "Other", "PIST", "Pine", "PIRE", "Other", "ADRU", "Other", "QUR", "Mid", "POTR", "Early" ) inv <- fortedata::fd_inventory() %>% inner_join(species_pft_map, "Species") inv %>% group_by(shortname = PFT) %>% summarize(basal_area = sum(DBH_cm ^ 2, na.rm = TRUE)) %>% ungroup() %>% mutate( f_area = basal_area / sum(basal_area), lai = obs_lai$mean * f_area, pft = factor(shortname, pfts("shortname"), pfts("pft")) ) %>% arrange(desc(f_area)) %>% filter(!is.na(pft)) } ))
aaa77bb4470ff73235749cbaa6d8da91b9613035
eef018cb89dbaf5d8a7678a451af2373ddb54bb7
/global.R
afa74ad46caf7dba12f7accdd8c18a6896ac31fe
[]
no_license
MrDAndersen/player_ids
2b2636b838708532fdc1f70529954250fbe63698
3c2b62985eb80d2c52380da4b66c1d036322f930
refs/heads/master
2022-11-28T14:26:31.812739
2020-07-28T17:14:28
2020-07-28T17:14:28
283,240,595
1
0
null
null
null
null
UTF-8
R
false
false
1,016
r
global.R
library(ffanalytics) library(shiny) library(shinyjs) library(RSQLite) library(dbplyr) library(DT) # Helper function to find missing IDs id_missing <- function(x)(is.na(x) | x == "") # Create table id_table <- player_table %>% select(id, first_name, last_name, team, position) %>% arrange(last_name) %>% unite("player", c(first_name, last_name), sep = " ") %>% left_join(ffanalytics:::player_ids, by = "id") # Initialize SQL Lite in-memory database to handle updates db_con <- RSQLite::dbConnect(RSQLite::SQLite(), ":memory:") # Copy tale into database copy_to(db_con, id_table) # Establish connection to DB table db_table <- tbl(db_con, "id_table") id_names <- names(ffanalytics:::player_ids)[-1] names(id_names) <- c("Yahoo", "CBS", "Fleaflicker", "NFL", "ESPN", "FFToday", "Numberfire", "FantasyPros", "FantasyData", "FantasyNerd", "RTS") # IDs for players no longer active in MFL. Will need those for historical purposes. inactive_ids <- ffanalytics:::player_ids %>% filter(!(id %in% id_table$id))
5f64ad16d34b7ca542f2dfc85bc46ae75ddfc754
fd6ce0742aa2f3e259f94a7267934838711338a8
/Figures_Statistics/Supp_Figure_1_2.R
b79f6dfd04bea8a64510ddc2f9c500e3095426dc
[]
no_license
FloMazel/Protist_Altitude_alps
0de8946c0316380c2f6810f0c7ebc0039f3c6304
4300cdd81b9649fd39d0d930731e082902b3a0a9
refs/heads/main
2023-03-06T01:31:03.518891
2021-02-18T16:10:43
2021-02-18T16:10:43
340,104,698
0
0
null
null
null
null
UTF-8
R
false
false
1,124
r
Supp_Figure_1_2.R
rm(list=ls()) #Load code library(tidyverse) library(ade4) library("PerformanceAnalytics") #Load data pooled_metadata = read_csv("Data/Final/Pooled_metadata.csv") %>% mutate(plotID=as.character(plotID)) # define variables edaphic <- c("soilTemp","bulkSoilWaterContent", "pH","C.N" ,"TOC","EC_1_5", "TotalP") climatic <- c("bio1","bio10","bio11","bio12","bio13" ,"bio14", "bio15","bio16","bio17","bio18","bio19","bio2","bio3","bio4","bio5","bio6","bio7","bio8","bio9") length(climatic) length(edaphic) # perform PCA factor_meta <- pooled_metadata %>% select(altitude,all_of(edaphic),all_of(climatic)) %>% drop_na() PCA <- dudi.pca(factor_meta,nf = 3, scannf = FALSE) pdf("Redaction/V4/Figures/Supp_Figure_1.pdf") s.corcircle(PCA$co, lab = names(PCA$tab), full = FALSE, box = TRUE) dev.off() # Final choice of variables edaphic <- c("bulkSoilWaterContent", "pH","C.N" ,"TOC") climatic <- c("bio1") my_data <- factor_meta [, c('altitude',edaphic,climatic)] pdf("Redaction/V4/Figures/Supp_Figure_2.pdf") chart.Correlation(my_data, histogram=TRUE, pch=19,method = "spearman") dev.off()
718855112a250f76c1cb217fdd995000a96967e3
0a906cf8b1b7da2aea87de958e3662870df49727
/breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609963192-test.R
0c4321c8ec90c82e22088558f5962e01a1539e05
[]
no_license
akhikolla/updated-only-Issues
a85c887f0e1aae8a8dc358717d55b21678d04660
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
refs/heads/master
2023-04-13T08:22:15.699449
2021-04-21T16:25:35
2021-04-21T16:25:35
360,232,775
0
0
null
null
null
null
UTF-8
R
false
false
98
r
1609963192-test.R
testlist <- list(n = -905904128L) result <- do.call(breakfast:::setBitNumber,testlist) str(result)
33ffaaf0798c2fe7888a69651e14bc85245664e8
7d826931303fd4316dc870e95bea377bc270ce66
/man/Y.mvlm.Rd
e3087df544c9274094073700394582f352f7de13
[]
no_license
dmcartor/MVLM
163c90cc46efd124e836d3eb2ef330ff2c0953ed
9dc5945c7dd1a31f6c5ad59793b469611388dc7f
refs/heads/master
2021-01-16T21:21:41.536732
2017-03-26T14:08:23
2017-03-26T14:08:23
58,753,059
1
0
null
null
null
null
UTF-8
R
false
true
382
rd
Y.mvlm.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mvlm.R \docType{data} \name{Y.mvlm} \alias{Y.mvlm} \title{Simulated outcome data to illustrate the mvlm package.} \format{An object of class \code{matrix} with 200 rows and 5 columns.} \usage{ Y.mvlm } \description{ See package vignette by calling \code{vignette('mvlm-vignette')}. } \keyword{datasets}
5af2031be2de8063e84577267fe04118e511a4f3
5d19a22476086a97e7037555e2f51809c0d950e4
/SL.truffle.dynsem/all.r
e2f7ad8951d577269a87039a2e571e6fc458ca9b
[]
no_license
metaborg/benchmark-data
2d8ed1fc2ea31afb9d3cffcdf22ea37899485eca
7686cf7d4e316c32f3429762a35a72c91824f23e
refs/heads/master
2020-04-06T03:43:05.773707
2016-04-15T20:01:38
2016-04-15T20:01:38
44,169,081
0
0
null
null
null
null
UTF-8
R
false
false
7,918
r
all.r
source("common.r") source("library.r") source("measurements.r") # Re-run all measurements rerunall <- function() { initconfig() initrevs() fetchdependencies() measurements <- loadmeasurements() benchmarks <- loadbenchmarks() rmdatafiles(measurements) measurements <- truncatedata(measurements) writemeasurements(measurements) temp.file <- "temp.csv" for(i in seq(1,nrow(measurements))) { measurements[i,] = runexperiment(measurements[i,], temp.file) writemeasurements(measurements) } rmfile("temp.csv") } createoraclemeasurement <- function() { initconfig() initrevs() measurements <- loadmeasurements() benchmarks <- loadbenchmarks() for(i in seq(1, nrow(benchmarks))) { t <- nrow(measurements) newrow <- c("Oracle", sl.oracle.rev, dynsem.rev, graal.rev, benchmarks[i,], "", "") measurements <- rbind(measurements[1:nrow(measurements),], newrow) } writemeasurements(measurements) } createdynsemmeasurement <- function() { initconfig() initrevs() measurements <- loadmeasurements() benchmarks <- loadbenchmarks() for(i in seq(1, nrow(benchmarks))) { t <- nrow(measurements) newrow <- c("DynSem", sl.metaborg.rev, dynsem.rev, graal.rev, benchmarks[i,], "", "") measurements <- rbind(measurements[1:nrow(measurements),], newrow) } writemeasurements(measurements) } runpending <- function() { initconfig() initrevs() fetchdependencies() measurements <- loadmeasurements() benchmarks <- loadbenchmarks() temp.file <- "temp.csv" firstpass <- TRUE for(i in seq(1,nrow(measurements))) { if(unlist(measurements[i,"GRAALDATA"]) == "" && unlist(measurements[i,"JDKDATA"]) == ""){ measurements[i,] <- runexperiment(measurements[i,], temp.file, firstpass) writemeasurements(measurements) firstpass <- FALSE } } rmfile("temp.csv") } runexperiment <- function(datarow, temp.file, forcerebuild = FALSE) { time <- Sys.time() timestamp.datepart <- format(time, "%Y%m%d") timestamp.timepart <- format(time, "%H%M%s") datafile.graal.rel <- paste("data/data_graal_", timestamp.datepart, "_", timestamp.timepart, ".csv", sep="") datafile.jdk.rel <- paste("data/data_jdk_", timestamp.datepart, "_", timestamp.timepart, ".csv", sep="") datafile.graal <- paste(getwd(), "/", datafile.graal.rel, sep="") datafile.jdk <- paste(getwd(), "/", datafile.jdk.rel, sep="") inputarg <- paste("\"", benchmarks.path,"/", unlist(datarow["BENCHMARK"]), "\"", sep="") graaloutarg <- paste("\"", datafile.graal, "\"", sep="") jdkoutarg <- paste("\"", datafile.jdk, "\"", sep="") preparecodebases(datarow, forcerebuild) # switchrevisions(datarow) # initrevs() # compileimplementations(datarow) runres <- system2("./runner.sh", args=c(paste(getvariantpath(datarow["VARIANT"])), inputarg, graaloutarg, jdkoutarg, "1", "65")) datarow["GRAALDATA"] = datafile.graal.rel datarow["JDKDATA"] = datafile.jdk.rel write.table(datarow, file=temp.file, quote=FALSE, append=T, row.names=F, col.names=F, sep=",") return(datarow) } preparecodebases <- function(datarow, forcerebuild = FALSE) { ds.switch.required <- getgitrev(dynsem.repo) != unlist(datarow["DSREV"]) sl.switch.required <- getgitrev(sl.metaborg.repo) != unlist(datarow["VARIANTREV"]) ds.build.required <- forcerebuild || ds.switch.required sl.build.required <- ds.build.required || sl.switch.required # switch versions if needed # switch DS version if(ds.switch.required) { switchgitrev(dynsem.repo, unlist(datarow["DSREV"])) } # switch SL version if(sl.switch.required) { switchgitrev(sl.metaborg.repo, unlist(datarow["VARIANTREV"])) } # init revisions initrevs() # compile codebases if needed # compile DS if(ds.build.required) { compiledynsem() } # compile SL if(sl.build.required) { compilevariant(datarow["VARIANT"]) } } fetchdependencies <- function() { version = "2.0.0-SNAPSHOT" res = system2("./mvn-download.sh", args=c(".", "org.metaborg", "org.metaborg.sunshine2", version, "jar")) == 0 res = res && system2("./mvn-download.sh", args=c(".", "org.metaborg", "org.metaborg.meta.lang.template", version, "spoofax-language")) == 0 res = res && system2("./mvn-download.sh", args=c(".", "org.metaborg", "org.metaborg.meta.lib.analysis", version, "spoofax-language")) == 0 res = res && system2("./mvn-download.sh", args=c(".", "org.metaborg", "org.metaborg.meta.lang.esv", version, "spoofax-language")) == 0 quitonfail(ifelse(res, 0, 1), "Download dependencies failed") } compilegraal <- function() { res = system2("mx", args=c("-p", paste(graal.repo), "clean")) == 0 res = res && system2("mx", args=c("-p", paste(graal.repo), "build")) == 0 res = res && system2("mx", args=c("-p", paste(graal.repo), "maven-install")) == 0 res = res && system2("mx", args=c("-p", paste(graal.repo, "/../truffle/", sep=""), "maven-install")) == 0 quitonfail(ifelse(res, 0, 1), "Building graal failed") } compiledynsem <- function() { lang.dir = paste(dynsem.repo, "/dynsem", sep="") res = system2("./mvn-invoke.sh", args=c(lang.dir, "clean")) == 0 quitonfail(ifelse(res, 0, 1), "Clean failed") res = res && system2("./mvn-invoke.sh", args=c(lang.dir, "install")) == 0 quitonfail(ifelse(res, 0, 1), "Compilation of DynSem failed") framework.dir = paste(dynsem.repo, "/org.metaborg.meta.interpreter.framework", sep="") res = system2("./mvn-invoke.sh", args=c(framework.dir, "clean")) == 0 res = res && system2("./mvn-invoke.sh", args=c(framework.dir, "install")) == 0 quitonfail(ifelse(res, 0, 1), "Compilation of interpreter framework failed") metainterp.dir = paste(dynsem.repo, "/org.metaborg.meta.lang.dynsem.interpreter", sep="") res = system2("./mvn-invoke.sh", args=c(metainterp.dir, "clean")) == 0 res = res && system2("./mvn-invoke.sh", args=c(metainterp.dir, "install")) == 0 quitonfail(ifelse(res, 0, 1), "Compilation of meta-interpreter failed") } compilevariant <- function(variant) { if(variant == "Oracle") { compilesloracle() } else if(variant == "DynSem") { compilesldynsem() } else { quitonfail(-1, paste("Unknown implementation variant:", variant)) } } compilesloracle <- function() { res = system2("./mvn-invoke.sh", args=c(paste(sl.oracle.path), "clean")) == 0 res = res && system2("./mvn-invoke.sh", args=c(paste(sl.oracle.path), "compile")) == 0 quitonfail(ifelse(res, 0, 1), "Oracle SL compilation failed") } compilesldynsem <- function() { lang.dir = paste(sl.metaborg.repo, "/org.metaborg.lang.sl", sep="") # interp.dir = paste(sl.metaborg.repo, "/org.metaborg.lang.sl.interp", sep="") res = system2("./mvn-invoke.sh", args=c(paste(lang.dir), "clean")) == 0 res = res && system2("./mvn-invoke.sh", args=c(paste(lang.dir), "verify")) == 0 quitonfail(ifelse(res, 0, 1), "Metaborg SL (language) compilation failed") dynsem.proj = paste(dynsem.repo, "/dynsem", sep="") sl.metaborg.proj = paste(sl.metaborg.repo, "/org.metaborg.lang.sl/", sep="") templatelang = paste("zip://", getwd(), "/target/dependency/org.metaborg.meta.lang.template.spoofax-language", sep="") analysislang = paste("zip://", getwd(), "/target/dependency/org.metaborg.meta.lib.analysis.spoofax-language", sep="") esvlang = paste("zip://", getwd(), "/target/dependency/org.metaborg.meta.lang.esv.spoofax-language", sep="") sunshineargs = c("transform", "-l", dynsem.proj, "-l", templatelang, "-l", analysislang, "-l", esvlang, "-p", sl.metaborg.proj , "-n", "\"Generate interpretable\"", "-i", "trans/semantics/sl.ds") args = c("-jar", "target/dependency/org.metaborg.sunshine2.jar", sunshineargs) res = system2("java", args=args) == 0 res = res && system2("./mvn-invoke.sh", args=c(paste(sl.metaborg.repo, "/org.metaborg.lang.sl.interp/", sep=""), "compile")) == 0 quitonfail(ifelse(res, 0, 1), "Metaborg SL (interpreter) compilation failed") }
3bed37bccf6d576b5e971a21a7a8811e228c2a17
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
/cran/paws.database/man/simpledb.Rd
f25ccd912d971ba0bdf6b2cb1be6e3c8194c34e8
[ "Apache-2.0" ]
permissive
paws-r/paws
196d42a2b9aca0e551a51ea5e6f34daca739591b
a689da2aee079391e100060524f6b973130f4e40
refs/heads/main
2023-08-18T00:33:48.538539
2023-08-09T09:31:24
2023-08-09T09:31:24
154,419,943
293
45
NOASSERTION
2023-09-14T15:31:32
2018-10-24T01:28:47
R
UTF-8
R
false
true
5,681
rd
simpledb.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simpledb_service.R \name{simpledb} \alias{simpledb} \title{Amazon SimpleDB} \usage{ simpledb(config = list(), credentials = list(), endpoint = NULL, region = NULL) } \arguments{ \item{config}{Optional configuration of credentials, endpoint, and/or region. \itemize{ \item{\strong{credentials}:} {\itemize{ \item{\strong{creds}:} {\itemize{ \item{\strong{access_key_id}:} {AWS access key ID} \item{\strong{secret_access_key}:} {AWS secret access key} \item{\strong{session_token}:} {AWS temporary session token} }} \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} \item{\strong{anonymous}:} {Set anonymous credentials.} \item{\strong{endpoint}:} {The complete URL to use for the constructed client.} \item{\strong{region}:} {The AWS Region used in instantiating the client.} }} \item{\strong{close_connection}:} {Immediately close all HTTP connections.} \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.} \item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e. \verb{http://s3.amazonaws.com/BUCKET/KEY}.} \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}} }} \item{credentials}{Optional credentials shorthand for the config parameter \itemize{ \item{\strong{creds}:} {\itemize{ \item{\strong{access_key_id}:} {AWS access key ID} \item{\strong{secret_access_key}:} {AWS secret access key} \item{\strong{session_token}:} {AWS temporary session token} }} \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} \item{\strong{anonymous}:} {Set anonymous credentials.} }} \item{endpoint}{Optional shorthand for complete URL to use for the constructed client.} \item{region}{Optional shorthand for AWS Region used in instantiating the client.} } \value{ A client for the service. You can call the service's operations using syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned to the client. The available operations are listed in the Operations section. } \description{ Amazon SimpleDB is a web service providing the core database functions of data indexing and querying in the cloud. By offloading the time and effort associated with building and operating a web-scale database, SimpleDB provides developers the freedom to focus on application development. A traditional, clustered relational database requires a sizable upfront capital outlay, is complex to design, and often requires extensive and repetitive database administration. Amazon SimpleDB is dramatically simpler, requiring no schema, automatically indexing your data and providing a simple API for storage and access. This approach eliminates the administrative burden of data modeling, index maintenance, and performance tuning. Developers gain access to this functionality within Amazon's proven computing environment, are able to scale instantly, and pay only for what they use. Visit \href{https://aws.amazon.com/simpledb/}{http://aws.amazon.com/simpledb/} for more information. } \section{Service syntax}{ \if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- simpledb( config = list( credentials = list( creds = list( access_key_id = "string", secret_access_key = "string", session_token = "string" ), profile = "string", anonymous = "logical" ), endpoint = "string", region = "string", close_connection = "logical", timeout = "numeric", s3_force_path_style = "logical", sts_regional_endpoint = "string" ), credentials = list( creds = list( access_key_id = "string", secret_access_key = "string", session_token = "string" ), profile = "string", anonymous = "logical" ), endpoint = "string", region = "string" ) }\if{html}{\out{</div>}} } \section{Operations}{ \tabular{ll}{ \link[=simpledb_batch_delete_attributes]{batch_delete_attributes} \tab Performs multiple DeleteAttributes operations in a single call, which reduces round trips and latencies\cr \link[=simpledb_batch_put_attributes]{batch_put_attributes} \tab The BatchPutAttributes operation creates or replaces attributes within one or more items\cr \link[=simpledb_create_domain]{create_domain} \tab The CreateDomain operation creates a new domain\cr \link[=simpledb_delete_attributes]{delete_attributes} \tab Deletes one or more attributes associated with an item\cr \link[=simpledb_delete_domain]{delete_domain} \tab The DeleteDomain operation deletes a domain\cr \link[=simpledb_domain_metadata]{domain_metadata} \tab Returns information about the domain, including when the domain was created, the number of items and attributes in the domain, and the size of the attribute names and values\cr \link[=simpledb_get_attributes]{get_attributes} \tab Returns all of the attributes associated with the specified item\cr \link[=simpledb_list_domains]{list_domains} \tab The ListDomains operation lists all domains associated with the Access Key ID\cr \link[=simpledb_put_attributes]{put_attributes} \tab The PutAttributes operation creates or replaces attributes in an item\cr \link[=simpledb_select]{select} \tab The Select operation returns a set of attributes for ItemNames that match the select expression } } \examples{ \dontrun{ svc <- simpledb() svc$batch_delete_attributes( Foo = 123 ) } }
5215fe6ca9dafb0004341e6b2ef7ae49ed09f181
b891263728fc0108d3701ec9723b627a6d6f484a
/man/getDecYear.Rd
b592cb73327536e84431b9f4382df4c0676633d3
[ "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-public-domain-disclaimer" ]
permissive
limnoliver/GSqwsr
2218a40432f75107b31eb1a50168858032635b85
0a59fe050eb851de54b0567c317bd07a1b0b099a
refs/heads/master
2021-01-01T18:47:17.291369
2015-10-14T20:57:49
2015-10-14T20:57:49
null
0
0
null
null
null
null
UTF-8
R
false
false
524
rd
getDecYear.Rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/getDecYear.R \name{getDecYear} \alias{getDecYear} \title{Returns decimal year from POSIXct} \usage{ getDecYear(dateTime) } \arguments{ \item{dateTime}{POSIXct} } \value{ decYear numeric } \description{ Returns decimal year from POSIXct. } \examples{ date1 <- as.POSIXct("2012-01-02 12:38:00") decYear <- getDecYear(date1) date2 <- as.POSIXct("2012-07-02 12:38:00") decYear2 <- getDecYear(date2) } \keyword{conversion} \keyword{dateTime}
2dc92779ad1215c1d07bbffa61cf9de31fa57600
13d34ad3f9487eff6fedd772365ae85082dd5567
/ExtractAffy.R
c944d6f0ed56f511a2051ef3fa7e1103aca38cfa
[]
no_license
zhenyisong/CardioTF_Database
b318953f1b3e8f151b601afe49dac13b2ebfe444
f6a037506a58a4d414bdbd092920f6b496ceea99
refs/heads/master
2021-01-10T01:49:04.364748
2018-05-31T01:34:03
2018-05-31T01:34:03
51,047,428
0
1
null
null
null
null
UTF-8
R
false
false
1,017
r
ExtractAffy.R
# Author Yisong Zhen # Since 2013-12-31 # Original data is from NCBI GEO database: # http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE1479 # # To select genes which are expressed by MAS5 alorithm # verified in any one of stages # saved file: # cardiogenomics.data # any gene symbol on one line # # methods: # http://blog.csdn.net/hzs106/article/details/12016363 # http://www.biostars.org/p/52725/ # library(affy); library(annotate); library(mouse4302.db); raw.data <- ReadAffy(); mas5calls.data <- mas5calls(raw.data); mas5calls.exprs <- exprs(mas5calls.data); head(mas5calls.exprs); probeID.set <- apply(mas5calls.exprs, 1, function(x) any(x == "P")); present.probes.names <- names( probeID.set[probeID.set] ); gene.name.set <- c(); for( i in 1:length(present.probes.names)) { gene.name <-mget(present.probes.names[i],mouse4302SYMBOL); gene.name.set <-c(gene.name.set,gene.name); } write.table(gene.name.set,file="cardiogenomics.data",col.names = FALSE);
a7babeb69cd352f80ac1fea1cc8c79b6aeaa0ad8
7d0f25a190e97e0d2714c43a57a017eaf093ecd5
/R/objSize.R
c58cee0f939593513d68989ab8eaacb6f11b59a7
[ "MIT" ]
permissive
mikeniemant/nbs
f192f6d6ce16d725dc0985d6f505666c2c785020
3d82fd553c58ea5941d5dfa60cfaefa95df76121
refs/heads/master
2022-07-03T11:21:21.170515
2022-06-15T08:51:37
2022-06-15T08:51:37
142,566,362
0
0
null
null
null
null
UTF-8
R
false
false
471
r
objSize.R
#' Object Size #' #' Compute memory size of objects in the R environment. #' #' Version 1.0 2022-02-15 #' #' @param objs character vector with object name(s) #' @return A tibble containing all objects arranged on object size (mb). #' @export objSize <- function(objs) { res <- data.frame(name = objs) %>% dplyr::as_tibble() %>% dplyr::mutate(obj_size_mb = purrr::map_dbl(name, ~ object.size(get(.x))/1000)) %>% dplyr::arrange(-obj_size_mb) return(res) }
0e8a7f41f5a411285e8120bf4ca77a42d1f649ab
d15c254cab30b92a59b83264fb4b94c9ae8dd993
/man/Infilt.Rd
65621781d218a98e41f18e269cb922d0035e6a5d
[]
no_license
cran/HydroMe
282418dad34edb18d2493dfd00a5362fe767b1ae
28e932150d6283d04e4970f6cfd5ad264dc5ff0d
refs/heads/master
2021-05-19T03:08:49.552205
2021-01-10T14:40:18
2021-01-10T14:40:18
17,679,887
1
0
null
null
null
null
UTF-8
R
false
false
1,926
rd
Infilt.Rd
\name{infilt} \alias{infilt} \docType{data} \title{ Water infiltration characteristics data } \description{ This is part of a dataset from a PhD study which measured water infiltration characteristics from the Upper Athi River basin in Eastern Kenya. It contains rate of infiltration (y) at different levels of cummulative Time intervals (x) } \usage{data(infilt)} \format{ A data frame with 1105 observations on the following 6 variables. \describe{ \item{\code{Sample}}{ which is a numeric vector} \item{\code{PlotNo}}{which is a factor with levels such as: \code{10lP3} \code{11lP3} \code{12lP3} \code{13lP3} \code{14lP3} \code{15lP3} \code{16lP3} \code{17lP3} \code{18lP3} \code{19lP3} \code{1lP3} \code{20lP3} \code{21lP3} \code{22lP3} \code{23lP3} \code{24lP3} \code{25lP3} \code{26lP3} \code{27lP3} \code{28lP3} \code{29lP3} \code{2lP3} \code{30lP3} \code{3lP3} \code{4lP3} \code{5lP3} \code{6lP3} \code{7lP3} \code{8lP3} \code{9lP3}} \item{\code{Erosion}}{which is a factor with levels such as: \code{E0} \code{E1} \code{E2}} \item{\code{Time}}{which is a numeric vector of cumulative infiltration time} \item{\code{Rate}}{which is a numeric vector of instantaneous infiltration rate} \item{\code{Cumrate}}{which is a numeric vector of cumulative infiltration rate} } } \details{ The data is grouped according to plots (given the name PlotNo) from where the data were collected } \source{ Omuto CT. 2006. Large-area soil physical degradation assessment using gis, remote sensing, and infrared spectroscopy in arid and semi-arid Kenya. PhD Dissertation, University of Nairobi, Kenya } \references{ Omuto CT. 2006. Large-area soil physical degradation assessment using gis, remote sensing, and infrared spectroscopy in arid and semi-arid Kenya.PhD Dissertation, University of Nairobi, Kenya } \examples{ data(infilt) str(infilt) } \keyword{datasets}
8f31e10700eaa9eb7091a2a1b32da7125fde81cb
2f6d7a99ce3155d2c635c39013a0da1418208b40
/man/explicit.Rd
90190ce2f465415d60bf07a375b2ccc20e67a431
[ "MIT" ]
permissive
oganm/ogbox
c75eb1d8f4df00be214731e085e6c19e141992cc
ba99a46487836af5ab4fb5b013bc92cf35ad8e95
refs/heads/master
2020-04-04T07:07:27.383911
2019-07-29T23:00:12
2019-07-29T23:00:12
37,562,559
5
1
null
null
null
null
UTF-8
R
false
true
536
rd
explicit.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/explicit.R \name{explicit} \alias{explicit} \title{Make function calls explicit} \usage{ explicit(fileIn, fileOut = fileIn, ignore = "base") } \arguments{ \item{fileIn}{input file} \item{fileOut}{output file. default is to overwrite fileIn} \item{ignore}{which packages to ignore when translating} } \description{ This function turns function calls into explicit function calls (packageName::functionName) in a file The packages must be currently loaded }
3e09c389afc283ab5791f4d70b4de7890f32fae8
852c8140a3d1b1d081d3c8ee62af28f36d23833e
/COD_parameters.R
a0ae1945b24507fa2fb62ef060f9aa814d12db3f
[]
no_license
jessicaweb3/fmsy_m
fbc1d2f037238c97bda424e7d3d5621cb0d73937
dacdf75ecf287356a720416f2385f4250c138e0d
refs/heads/main
2023-04-17T10:47:14.476055
2022-12-09T09:44:08
2022-12-09T09:44:08
576,012,006
0
0
null
null
null
null
UTF-8
R
false
false
1,180
r
COD_parameters.R
########################------------ NEA COD Parameters -------------####################### #ICES asssessment based data Blim<- 220000 ### Blim tonnes (official advice sheet) recage <- 3 amax <- 15 ## 13 age classes 3-15 age <- recage:amax ac<-length(age) fbar<-c(2,7) # in reality age 5-10 mbar <- c(2,7) averageF <- 0.6 # ICES assessment tmax<- 250 # simulation time ### Natural mortality ### Nmort<-rep(.2,ac) ####### Ricker ###### start.rec <- 748681600 # reccap <- 3866920500 # max(codass$Rec.age3)*1.5 alphar <- 7.953965 betar <- -1.176552e-06 ricker.sd <- 0.6447752 AR1par <- 0.5190351 ###### B-H ###### alphabh <- 0.0004234719 #0.4234719 betabh <- 8.498407e-10 #8.498407e-07 #bh.sd <- 646770.4 #AR1par <- 0.5080555 ### maturity ### a50 <- 7.919142 env <- 1.184689 maturity <- round(1/(1+exp(-((age-a50)/env))),2) ### weights ### k <- 0.1513596 b <- 5.127882 Winf <- 28.04999 weights <- Winf * (1-exp(-k * age))^b ### f sel ### s50 <- 5.811967 ss <- 1.307185 Fsel <- 1/(1+exp(-((age-s50)/ss))) #Fsel <- c(0.5, 0.5, 0.5, rep(1, ac-3))
e4d16ebd59a84b99d63962ed9bfd1a143b922990
f281f08b82846459b3bfd53546e1abda60082a67
/rsrc/generate_bcp_madvision.R
d7fe0021ad7eca55c1c59a241646fc1133e4b823
[ "Apache-2.0" ]
permissive
MadFace/MadFace
07de49a31cb5e0a5b5c9a6c6c3a8fe3410545dee
dad6df197bc1ad330863f0f84da4d97dfb7a3b7d
refs/heads/master
2021-08-14T18:02:46.020078
2017-11-16T11:22:18
2017-11-16T11:22:18
108,241,990
0
0
null
null
null
null
UTF-8
R
false
false
3,540
r
generate_bcp_madvision.R
library(rimage) source("rsrc/terminator_view.R") ##----------------------------------------------------------- ## ## Functions ## ##----------------------------------------------------------- get.vers <- function(graph.data.matrix=graph.data.matrix){ uniq.src <- unique(graph.data.matrix[,1]) uniq.dst <- unique(graph.data.matrix[,2]) return(data.frame(c(as.character(uniq.src), as.character(uniq.dst)), stringsAsFactors=FALSE)) } generate.graph <- function(graph.matrix){ ## Generating graph Edges <- data.frame(from=graph.matrix[,1], to=graph.matrix[,2]) vers <- get.vers(Edges) g <- graph.data.frame(Edges, directed=TRUE, vertices=vers) g <- simplify(g, remove.loops = FALSE) V(g)$shape <- rep("rectangle", length(vers)) return(g) } ##----------------------------------------------------------- ## ## Main ## ##----------------------------------------------------------- ## Command line analyzer command.args <- commandArgs(trailingOnly = TRUE) capture.dir <- command.args[1] date.ids <- unlist(strsplit(command.args[2], ",")) link.name <- command.args[3] robj.dir <-command.args[4] bcp.threshold <- command.args[5] mad.vision.dir <- command.args[6] robj.file1 <- paste(c(robj.dir, "/", link.name, "__infogain.robj"), collapse="") robj.file2 <- paste(c(robj.dir, "/", link.name, "__bcp.robj"), collapse="") robj.file3 <- paste(c(robj.dir, "/", link.name, "__pathway.robj"), collapse="") message("capture.dir = ", capture.dir) message("date.ids = ", paste(date.ids, collapse=", ")) message("link.name = ", link.name) message("robj.dir = ", robj.dir) message("robj.file1 = ", robj.file1) message("robj.file2 = ", robj.file2) message("robj.file3 = ", robj.file3) message("bcp.threshold = ", bcp.threshold) message("mad.vision.dir = ", mad.vision.dir) ##---------------------------------------- ## Generating plots ##---------------------------------------- ## Loading data if (file.exists(robj.file1)) load(file=robj.file1) if (file.exists(robj.file2)) load(file=robj.file2) if (file.exists(robj.file3)) load(file=robj.file3) ## Mad Vision plot latest.date.id <- date.ids[length(date.ids)] latest.img.file <- paste(c(capture.dir, "/", latest.date.id, "/", link.name, ".jpg"), collapse="") mad.vision.file <- paste(c(mad.vision.dir, "/", link.name, ".jpg"), collapse="") if (file.exists(latest.img.file)){ message("Latest BCP Posterior Probability of ", link.name, " = ", latest.bcp.pp) if (latest.bcp.pp >= bcp.threshold){ message("Generating MadVision: ", link.name, " = ", latest.bcp.pp, " ...") img <- read.jpeg(latest.img.file) data1 <- as.list(NULL) ## Graph 1 data1[["graph1"]] <- generate.graph(sub.graph.matrix) ## Plot 1 data1[["plot1"]] <- bcp.posterior.prob data1[["plot1.title"]] <- "Bayesian Posterior Probability" ## Plot 2 data1[["plot2"]] <- info.gain.df$info.gain data1[["plot2.title"]] <- "Nearest Image InfoGain" len <- length(bcp.posterior.prob) ## Text 1,2,3 data1[["text1"]] <- bcp.posterior.prob[len:(len-8)] data1[["text2"]] <- "■ Status Changed" len <- length(info.gain.df$info.gain) data1[["text3"]] <- c("Mad Vision v0.21", "-------------------------", info.gain.df$info.gain[len:(len-8)]) ## Plotting here generate.terminator.vision(img, mad.vision.file, data1) } else { if(file.symlink(latest.img.file, mad.vision.file)) message("Symlink: ", latest.img.file, " -> ", mad.vision.file) } }
b0de74291d85d2523c2ecbbb81c6c2c6c8d270bf
4067ff0db87ec536dd48b38dbb2de6e13536cd67
/Unit2/src/test.R
d40ab63270080f3aca856a940858a42592639502
[]
no_license
strgeon/rClassFiles
2b4267676e39878da7077716f508ae1aee0ea3c3
adeb452b82e4fac642f8e65b608d4843fee15e66
refs/heads/master
2020-12-02T19:41:00.846565
2017-08-22T03:00:02
2017-08-22T03:00:02
96,375,498
0
1
null
null
null
null
UTF-8
R
false
false
1,685
r
test.R
# Title : TODO # Objective : TODO # Created by: Scott # Created on: 7/1/2017 #part 3 - flu queries FluTrain=read.csv("FluTrain.csv") #next one is wrong, don't use $ notation in lm #FluTrend1=lm(log(FluTrain$ILI) ~ FluTrain$Queries, data = pisaTrain) FluTrend1 = lm(log(ILI)~Queries, data=FluTrain) FluTest=read.csv("FluTest.csv") predTest1=exp(predict(FluTrend1, newdata=FluTest)) which(FluTest$Week == "2012-03-11 - 2012-03-17") #answer 11 predTest1[which(FluTest$Week == "2012-03-11 - 2012-03-17")] #getting the actual, the estimated, then the relative error (act-est)/act FluTest$ILI[11] predTest1[11] (FluTest$ILI[11]-predTest1[11])/FluTest$ILI[11] #SSE=sum((pisaTest$readingScore - predTest)^2) #RMSE=sqrt(SSE/nrow(pisaTest)) #mean(pisaTrain$readingScore) #SST=sum((pisaTest$readingScore - mean(pisaTrain$readingScore))^2) SSE=sum((FluTest$ILI - predTest1)^2) SST=sum((FluTest$ILI - mean(FluTrain$ILI))^2) RMSE=sqrt(SSE/nrow(FluTest)) #MIT answer #The RMSE can be calculated by first computing the SSE: SSE = sum((PredTest1-FluTest$ILI)^2) #and then dividing by the number of observations and taking the square root: RMSE = sqrt(SSE / nrow(FluTest)) #Alternatively, you could use the following command: RMSE = sqrt(mean((PredTest1-FluTest$ILI)^2)) #my analysis - #SSE is the sum of the absolute values of the - #difference between predicted and observed i.e. "standard error" - #or the sum of length of the difference vectors #RMSE is the mean of the absolute values of the - #difference between the prediction and the observed #basically the mean of the length of the difference vector install.packages("zoo") library(zoo)s
b3e1dae1c40f9c0dcf8a7fa27867225c086b0392
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
/corpustools/R/data.r
dfd969e43f0e90db67f372a335cc587fcac9ae1c
[]
no_license
akhikolla/InformationHouse
4e45b11df18dee47519e917fcf0a869a77661fce
c0daab1e3f2827fd08aa5c31127fadae3f001948
refs/heads/master
2023-02-12T19:00:20.752555
2020-12-31T20:59:23
2020-12-31T20:59:23
325,589,503
9
2
null
null
null
null
UTF-8
R
false
false
630
r
data.r
#' State of the Union addresses #' #' @docType data #' @usage data(sotu_texts) #' @format data.frame 'sotu_texts' #' coreNLP example sentences #' #' @docType data #' @usage data(corenlp_tokens) #' @format data.frame 'corenlp_tokens' #' Basic stopword lists #' #' @docType data #' @usage data(stopwords_list) #' @format A named list, with names matching the languages used by SnowballC "stopwords_list" #' Dictionary with common ASCII emoticons #' #' Obtained from the Wikipedia List_of_emoticons page. #' #' @docType data #' @usage data(emoticon_dict) #' @format A data.frame with a "string" and "code" column. "emoticon_dict"