blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
760fe9d4449facbae766153da9c2f78939559620
|
f910b89dfb4454e1ec2dbdd36fcebec21f68137e
|
/Thesis.R
|
6e48075d1fd32e2920638a15111f10365bc103da
|
[] |
no_license
|
hyunjin-nam/thesis-diabetes
|
92c70d31ee2a3e1b87ce9f50a1f343b66ea9439f
|
a3c7f302a9621177676dc53f2159a0f6b7b9306c
|
refs/heads/master
| 2020-07-16T18:45:08.947297
| 2019-09-02T11:54:30
| 2019-09-02T11:54:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,190
|
r
|
Thesis.R
|
install.packages('xgboost')
library(dplyr)
library(tidyverse)
library(data.table)
library(rpart.plot)
library(ggplot2)
library(randomForest)
library(caret) #confusionMatrix
library(reprtree)
library(randomForestExplainer)
library(xgboost)
library(ROCR)
library(skimr)
library(stargazer)
library(caret)
library(e1071)
library(corrplot)
diagnoses <- read.csv('/Users/namhyunjin/Byon8/MIMIC/sql/DIAGNOSES_ICD.csv')
D_ICD_DIAGNOSES <- read.csv('/Users/namhyunjin/Byon8/MIMIC/sql/D_ICD_DIAGNOSES.csv')
D_LABITEMS <- read.csv('/Users/namhyunjin/Byon8/MIMIC/sql/D_LABITEMS.csv')
LABEVENTS <- fread('/Users/namhyunjin/Byon8/MIMIC/sql/LABEVENTS.csv', header = T, sep = ',')
ADMISSIONS <- read.csv('/Users/namhyunjin/Byon8/MIMIC/sql/ADMISSIONS.csv')
PATIENTS <- read.csv('/Users/namhyunjin/Byon8/MIMIC/sql/PATIENTS.csv')
weight <- read.csv('/Users/namhyunjin/Byon8/MIMIC/sql/weight.csv')
set.seed(1)
diabetes.id <- diagnoses %>%
filter(ICD9_CODE %in% paste0(250,rep(seq(from=0,to=9),each=4),rep(c(0,2),10)) ) %>%
dplyr::select(HADM_ID)
notdiabetes.id <- diagnoses %>%
filter(ICD9_CODE %in% setdiff(diagnoses$ICD9_CODE,paste0(250,rep(seq(from=0,to=9),each=4),rep(c(0,2),10)))) %>%
dplyr::select(HADM_ID)
##################################################################
######################## Making Data set #########################
##################################################################
#LABEVENTS
LABEVENTS.new <- LABEVENTS %>%
dplyr::select(HADM_ID,ITEMID,VALUE) %>%
group_by(HADM_ID, ITEMID) %>%
mutate(VALUE = as.numeric(as.character(VALUE))) %>%
summarise_at(vars(VALUE), funs(mean(., na.rm=TRUE))) %>%
ungroup() %>%
spread("ITEMID","VALUE",fill = NA)
label.frame <- D_LABITEMS %>%
filter(ITEMID %in% colnames(LABEVENTS.new)[-1]) %>%
dplyr::select(ITEMID,LABEL,LOINC_CODE)
new.colnames <- data.frame(ITEMID = colnames(LABEVENTS.new)[-1]) %>%
mutate(ITEMID = as.numeric(as.character(ITEMID)))%>%
inner_join(label.frame, by='ITEMID')
colnames(LABEVENTS.new) <- c('HADM_ID',paste(as.character(new.colnames$LABEL),'!',new.colnames$ITEMID,'!',new.colnames$LOINC_CODE))
#ADMISSIONS
admissions.new <- ADMISSIONS %>%
filter(HADM_ID %in% LABEVENTS.new$HADM_ID) %>%
dplyr::select(SUBJECT_ID,HADM_ID,ETHNICITY,ADMITTIME) #ADMITTIME,MARITAL_STATUS,RELIGION
#PATIENTS
admissions.patients.new <- PATIENTS %>%
dplyr::select(SUBJECT_ID,GENDER,DOB) %>% #DOB
inner_join(admissions.new, by='SUBJECT_ID') %>%
mutate(DOB = as.Date(DOB,format='%Y-%m-%d'),
ADMITTIME = as.Date(ADMITTIME,format='%Y-%m-%d'),
Age = as.numeric(ADMITTIME - DOB )%/% (365)) %>%
mutate(Age = ifelse(Age >200 , 0, Age)) %>%
dplyr::select(-DOB,-ADMITTIME)
#Weight
BMI = function(height,weight){return((weight/((height/100)^2)))}
weight <- weight %>%
mutate(BMI = round(BMI(height_first,weight_first),2))
#Merge all the data
data <- admissions.patients.new %>%
left_join(LABEVENTS.new, by='HADM_ID') %>%
left_join(weight, by=c('HADM_ID'='hadm_id')) %>%
#left_join(complication, by='HADM_ID') %>%
mutate(Diabetes = ifelse(HADM_ID %in% diabetes.id$HADM_ID , 1, 0)) %>%
dplyr::select(-height_first,-weight_first)
#Discard if all the valuse are NA
data <- data %>%
dplyr::select(names(which(apply(is.na(data),2,sum) !=dim(data)[1])))
D.data <- data %>% dplyr::select(-SUBJECT_ID,-HADM_ID)
##################################################################
#################### Description Statistics ######################
##################################################################
D.data %>% select(Diabetes) %>% table
1-0.2144136
12933/(47385+12933)
#look over specific person
random.sample <- sample(data$HADM_ID, 1)
data %>%
filter( HADM_ID == random.sample)
diagnoses %>%
filter( HADM_ID == random.sample)
D.data$ETHNICITY %>% table
D.discriptive <- skim(D.data)
D.discriptive1 <- skimr::kable(D.discriptive, format=latex, digits = getOption("digits"),
row.names = NA, col.names = NA, align = NULL, caption = NULL,
format.args = list(), escape = TRUE)
##################################################################
######################## Desicion Tree ###########################
##################################################################
tree.data <- D.data
tree.data[is.na(tree.data)] <- 0
# Sample
set.seed(1)
sample = sample.int(n = nrow(tree.data), size = floor(.8*nrow(tree.data)), replace = F)
tree.train = tree.data[sample, ]
tree.test = tree.data[-sample, ]
# Step1: Begin with a small cp.
rpart.fit <- rpart(Diabetes~., data=tree.train, method="class",control = rpart.control(cp = 0, maxdepth=4))
printcp(rpart.fit) %>% stargazer(summary=F)
plotcp(rpart.fit)
printcp(rpart.fit)
# Step2: Pick the tree size that minimizes misclassification rate (i.e. prediction error).
# Prediction error rate in training data = Root node error * rel error * 100%
# Prediction error rate in cross-validation = Root node error * xerror * 100%
# Hence we want the cp value (with a simpler tree) that minimizes the xerror.
bestcp <- rpart.fit$cptable[which.min(rpart.fit$cptable[,"xerror"]),"CP"]
rpart.fit$cptable
# Step3: Prune the tree using the best cp.
tree.pruned <- prune(rpart.fit, cp = bestcp)
#printcp(tree.pruned), plotcp(rpart.fit), printcp(rpart.fit)
print(Sys.time())
par(mfrow=c(1,2)) # two plots on one page
rsq.rpart(rpart.fit) # visualize cross-validation results
summary(tree.pruned)
# Draw a tree plot
only_count <- function(x, labs, digits, varlen){paste(x$frame$n)}
boxcols <- c("grey", "red")[tree.pruned$frame$yval]
par(mfrow=c(1,1))
prp(rpart.fit, faclen = 0, cex = 0.8, node.fun= only_count, box.col = boxcols)
printcp(rpart.fit)
par(mfrow=c(1,1))
prp(tree.pruned, faclen = 0, cex = 0.8, node.fun= only_count, box.col = boxcols)
printcp(tree.pruned)
# Prediction error rate in training data = Root node error * rel error * 100%
# Prediction error rate in cross-validation = Root node error * xerror * 100%
printcp(tree.pruned)
tree.train.err <- 0.21459*0.83332
1-tree.train.err
tree.cross.err <- 0.21459*0.84462
1-tree.cross.err
# confusion matrix (test data)
tree.conf.matrix <- table(tree.test[,ncol(tree.test)],predict(tree.pruned,tree.test,type="class"))
tree.y <- tree.test[,ncol(tree.test)]
tree.hat <- predict(tree.pruned,tree.test,type="class")
tree.conf <- confusionMatrix(tree.hat, tree.y %>% as.factor, positive='1')
#Importance plot
tree.imp.var.names <- names(tree.pruned$variable.importance)[1:10]
tree.imp.var.names.frame <- str_split(tree.imp.var.names,'!', simplify=T)
tree.imp.table <- data.frame(Feature= paste(tree.imp.var.names.frame[,1],tree.imp.var.names.frame[,3]),
Gain =(tree.pruned$variable.importance)[1:10])
xgb.plot.importance(tree.imp.table[1:10,] %>% data.table, rel_to_first = TRUE, xlab = "Relative importance")
#x <-tree.imp.table$Gain
#normalized <- (x-min(x))/(max(x)-min(x))
# mat : is a matrix of data
# ... : further arguments to pass to the native R cor.test function
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
rownames(tree.imp.table[1:10,])
# matrix of the p-value of the correlation
cor.tree <- tree.train %>% dplyr::select(rownames(tree.imp.table[1:10,]))
cor.tree.names <- str_split(rownames(tree.imp.table[1:10,]),'!', simplify=T)
names(cor.tree) <- paste(cor.tree.names[1:10,1],cor.tree.names[1:10,3])
p.mat.tree <- cor.mtest(cor.tree)
M.tree<-cor(cor.tree)
col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))
corrplot(M.tree, method="color", col=col(200),
type="upper", #order="hclust",
addCoef.col = "black", # Add coefficient of correlation
tl.col="black", tl.srt=45,tl.cex=0.7,cl.cex=0.5, number.cex=0.5, #Text label color and rotation
# Combine with significance
p.mat = p.mat.tree, sig.level = 0.01, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag=F
)
##################################################################
######################## Random Forest ###########################
##################################################################
#https://datascienceplus.com/random-forests-in-r/
rf.data <- D.data
rf.data[is.na(rf.data)] <- 0
names(rf.data) <- make.names(names(rf.data))
rf.data <- rf.data %>%
mutate( Diabetes = as.factor(Diabetes))
# Sample
rf.train = rf.data[sample, ]
rf.test = rf.data[-sample, ]
100-16.04
set.seed(1)
#diabetes.rf.fit <- randomForest(Diabetes~. , data=rf.train, ntree=300 , mtry=sqrt(ncol(rf.train)), importance=T)
rf.fit <- randomForest(Diabetes~. , data=rf.train, ntree=300 , mtry=sqrt(ncol(rf.train)), importance=T)
rf.fit1 <- randomForest(Diabetes~. , data=rf.train, ntree=300 , mtry=25, importance=T)
#Plot importance
rf.imp <- importance(rf.fit1) %>% as.data.frame %>% mutate(var =var.names)%>% arrange(desc(MeanDecreaseGini))
rf.imp.var.names <- (rf.imp$var)[1:10]
rf.imp.var.names.frame <- c("Glucose 2345-7",
"Age",
"% Hemoglobin A1c 4548-4",
"Creatinine 2160-0",
"Urea Nitrogen 3094-0",
"Glucose 2339-0",
"Potassium 2823-3",
"MCH 785-6",
"BMI ",
"RDW 788-0")
rf.imp.table <- data.frame(Feature= rf.imp.var.names.frame,
Gain =diabetes.rf.imp$MeanDecreaseGini[1:10])
xgb.plot.importance(rf.imp.table[1:10,] %>% data.table, rel_to_first = TRUE, xlab = "Relative importance")
#plot dbug d
# matrix of the p-value of the correlation
cor.rf <- rf.train %>% dplyr::select(rf.imp.var.names)
names(cor.rf) <-rf.imp.var.names.frame
p.mat.rf <- cor.mtest(cor.rf)
M.rf<-cor(cor.rf)
corrplot(M.rf, method="color", col=col(200),
type="upper",# order="hclust",
addCoef.col = "black", # Add coefficient of correlation
tl.col="black", tl.srt=45,tl.cex=0.7,cl.cex=0.5, number.cex=0.5, #Text label color and rotation
# Combine with significance
p.mat = p.mat.rf, sig.level = 0.01, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag=FALSE
)
# confusion matrix (test data)
rf.y <- rf.test[,ncol(rf.test)]
rf.hat <- predict(rf.fit1,rf.test,type="class")
rf.conf <- confusionMatrix(rf.hat, rf.y %>% as.factor, positive='1')
#Tune
set.seed(1)
rf.tunegrid1 <- expand.grid(.mtry=c(17:26))
rf.tunegrid <- expand.grid(.mtry=sqrt(ncol(rf.train)))
rf_gridsearch <- train(Diabetes~.,
data=rf.train, method="rf", metric="Accuracy",
trControl = trainControl(method = "oob"),
tuneGrid=rf.tunegrid)
rf_gridsearch1 <- train(Diabetes~.,
data=rf.train, method="rf", metric="Accuracy",
trControl = trainControl(method = "oob"),
tuneGrid=rf.tunegrid1)
print(rf_gridsearch)
plot(rf_gridsearch)
##################################################################
########################### XGBoost ##############################
##################################################################
#https://www.kaggle.com/rtatman/machine-learning-with-xgboost-in-r
one.hot <- function(one.hot.data){
numeric.data <- one.hot.data %>%
dplyr::select_if(is.numeric)
one.hot.matrix <- model.matrix(as.formula("~ ETHNICITY+ GENDER"), one.hot.data)[,-1]
result.data <- cbind(one.hot.matrix,numeric.data)
}
XG.data <- one.hot(D.data)
XG.data[is.na(XG.data)] <- 0
set.seed(1) # Set a random seed so that same sample can be reproduced in future runs
which.D <- (which(colnames(XG.data)== 'Diabetes'))
# Sample
train = XG.data[sample, ] #just the samples
test = XG.data[-sample, ] #everything but the samples
train_y = train[,which.D]
train_x = train[, -which.D]
test_y = test[,which.D]
test_x = test[, -which.D]
dtrain = xgb.DMatrix(data = as.matrix(train_x), label = train_y )
dtest = xgb.DMatrix(data = as.matrix(test_x), label = test_y)
negative_cases <- sum(train_y == 0)
postive_cases <- sum(train_y == 1)
#Step1: Tune eta
#Tuning with Caret Packages
tune_grid <- expand.grid(
nrounds = 300,
eta = c(0.025, 0.05, 0.1, 0.3),
max_depth = c(4),
gamma = 0,
colsample_bytree = 1,
min_child_weight = 1,
subsample = 1
)
tune_control <- caret::trainControl(
method = "cv", # cross-validation
number = 10, # with n folds
#index = createFolds(tr_treated$Id_clean), # fix the folds
verboseIter = FALSE, # no training log
allowParallel = TRUE, # FALSE for reproducible results
seeds=1
)
set.seed(1)
xgb_tune <- caret::train(
x = train_x,
y = train_y %>% as.factor,
trControl = tune_control,
tuneGrid = tune_grid,
method = "xgbTree",
verbose = TRUE
)
# helper function for the plots
tuneplot <- function(x, probs = .90) {
ggplot(x) +
coord_cartesian(ylim = c(quantile(x$results$Accuracy, probs = 1), min(x$results$Accuracy))) +
theme_bw()
}
tuneplot(xgb_tune)
#Step2: Tune min_child_weight
tune_grid2 <- expand.grid(
nrounds = 300,
eta = xgb_tune$bestTune$eta,
max_depth = 4,
gamma = 0,
colsample_bytree = 1,
min_child_weight = c(1, 2, 3),
subsample = 1
)
set.seed(1)
xgb_tune2 <- caret::train(
x = train_x,
y = train_y %>% as.factor,
trControl = tune_control,
tuneGrid = tune_grid2,
method = "xgbTree",
verbose = TRUE
)#0.8469347
#Step4: Tune Gamma
tune_grid3 <- expand.grid(
nrounds = 300,
eta = 0.3,
max_depth = 4,
gamma = c(0, 0.05, 0.1, 0.5, 0.7, 0.9, 1.0),
colsample_bytree = 1,
min_child_weight = xgb_tune2$bestTune$min_child_weight,
subsample = 1
)
xgb_tune3 <- caret::train(
x = train_x,
y = train_y %>% as.factor,
trControl = tune_control,
tuneGrid = tune_grid3,
method = "xgbTree",
verbose = TRUE
)#0.8475771
tuneplot(xgb_tune3)
#Step4: Tune Max depth
tune_grid4 <- expand.grid(
nrounds = 300,
eta = 0.3,
max_depth = c(3,4,5,6),
gamma = 0.7,
colsample_bytree = 1,
min_child_weight = xgb_tune2$bestTune$min_child_weight,
subsample = 1
)
xgb_tune4 <- caret::train(
x = train_x,
y = train_y %>% as.factor,
trControl = tune_control,
tuneGrid = tune_grid4,
method = "xgbTree",
verbose = TRUE
)
tuneplot(xgb_tune4)
tune_grid5 <- expand.grid(
nrounds = 300,
eta = 0.3,
max_depth = c(7,8,9,10),
gamma = 0.7,
colsample_bytree = 1,
min_child_weight = xgb_tune2$bestTune$min_child_weight,
subsample = 1
)
xgb_tune5 <- caret::train(
x = train_x,
y = train_y %>% as.factor,
trControl = tune_control,
tuneGrid = tune_grid5,
method = "xgbTree",
verbose = TRUE
)
tuneplot(xgb_tune5)
xgb_tune5
xgb_tune4.5 <- xgb_tune4
xgb_tune4.5$results <- rbind(xgb_tune4$results, xgb_tune5$results)
tuneplot(xgb_tune4.5)
#Final model
final_grid <- expand.grid(
nrounds = xgb_tune2$bestTune$nrounds,
eta = xgb_tune2$bestTune$eta,
max_depth = xgb_tune2$bestTune$max_depth,
gamma = xgb_tune2$bestTune$gamma,
colsample_bytree = xgb_tune2$bestTune$colsample_bytree,
min_child_weight = xgb_tune2$bestTune$min_child_weight,
subsample = xgb_tune2$bestTune$subsample)
train_control <- caret::trainControl(
method = "none",
verboseIter = FALSE, # no training log
allowParallel = TRUE,# FALSE for reproducible results
seeds=1
)
set.seed(1)
xgb_model <- caret::train(
x = train_x,
y = train_y %>% as.factor,
trControl = train_control,
tuneGrid = final_grid,
method = "xgbTree",
verbose = TRUE
)
summary(xgb_model)
xgb_predict <- predict(xgb_model, newdata = test_x)
xgb_confusion <- table(xgb_predict,test_y)
sum(diag(xgb_confusion))/sum(xgb_confusion)
xgb_tune3$bestTune
##XGBoost
params <- list(nrounds = 300,
max_depth = 4,
eta = 0.3,
gamma = 0,
colsample_bytree = 1,
min_child_weight = 1,
subsample = 1)
D.xgb.trin <- xgb.train(params = params,
nfold=10,
nrounds = 300,
data = dtrain,
objective = "binary:logistic",
watchlist = list(val=dtest, train=dtrain),
metrics = list("error","auc"),
seed=1) # 300 0.154012 0.082459
1-0.154012 #0.845988
D.xgb.cv <- xgb.cv(params = params,
nfold=10,
nround = 300,
data = dtrain,
objective = "binary:logistic",
metrics = list("error","auc"),
seed=1)
# iter train_error_mean train_error_std train_auc_mean train_auc_std test_error_mean test_error_std test_auc_mean test_auc_std
# 300 0.0801156 0.0005240775 0.9641491 0.0004920359 0.1539768 0.002393683 0.8734740 0.005456315
#0.8460232
# 1 0.8469347 0.4991396
params1 <- list(nrounds = 300,
max_depth = 4,
eta = 0.3,
gamma = 0.7,
colsample_bytree = 1,
min_child_weight = 1,
subsample = 1)
bbm
D.xgb.trin1 <- xgb.train(params = params1,
nfold=10,
nrounds = 300,
data = dtrain,
objective = "binary:logistic",
watchlist = list(val=dtest, train=dtrain),
metrics = list("error","auc"),
seed=1) # 300 0.150862 0.084304
#0.849138
D.xgb.cv1 <- xgb.cv(params = params1,
nfold=10,
nround = 300,
data = dtrain,
objective = "binary:logistic",
metrics = list("error","auc"),
seed=1)
# iter train_error_mean train_error_std train_auc_mean train_auc_std test_error_mean test_error_std test_auc_mean test_auc_std
# 300 0.0803250 0.0009053397 0.9637885 0.0006506108 0.1524635 0.004403562 0.8730802 0.003304229
#0.8475365
#0.70 0.8475771 0.5018122
model1 <- xgboost(data = dtrain, # the data
params = params1,
seed=1,
nrounds=300,
objective = "binary:logistic"# the objective function
)
###
params2 <- list(nrounds = 300,
max_depth = 6,
eta = 0.3,
gamma = 0.7,
colsample_bytree = 1,
min_child_weight = 1,
subsample = 1)
D.xgb.train2 <- xgb.train(params = params2,
nfold=10,
nrounds = 300,
data = dtrain,
objective = "binary:logistic",
watchlist = list(val=dtest, train=dtrain),
metrics = list("error","auc"),
seed=1) # 300 0.154095 0.017429
#0.849138
#0.845905
D.xgb.cv2 <- xgb.cv(params = params2,
nfold=10,
nround = 300,
data = dtrain,
objective = "binary:logistic",
metrics = list("error","auc"),
seed=1)
# iter train_error_mean train_error_std train_auc_mean train_auc_std test_error_mean test_error_std test_auc_mean test_auc_std
# 300 0.0803250 0.0009053397 0.9637885 0.0006506108 0.1524635 0.004403562 0.8730802 0.003304229
# 300 0.0132839 0.0009430603 0.9988901 0.0001444808 0.1521536 0.003514881 0.8734645 0.004000607
#0.8478464
#0.70 0.8475771 0.5018122
model2 <- xgboost(data = dtrain, # the data
params = params2,
seed=1,
nrounds=300,
objective = "binary:logistic"# the objective function
)
1-0.017429
length(model2.prediction)
model2.pre <- predict(model2, dtest,type="class")
model2.prediction <- as.numeric(model2.pre > 0.5)
xg.conf <- confusionMatrix(model2.prediction%>% as.factor, test_y %>% as.factor, positive='1')
#Variance Importance
xg.importance_matrix <- xgb.importance(model = model1)
xg.imp.var.names <- xg.importance_matrix$Feature
xg.imp.var.names.frame <- str_split(xg.imp.var.names,'!', simplify=T)
xg.imp.table <- data.frame(Feature= paste(xg.imp.var.names.frame[,1],xg.imp.var.names.frame[,3]),
Gain =(xg.importance_matrix$Gain))
xgb.plot.importance(xg.imp.table[1:10,] %>% data.table, rel_to_first = TRUE, xlab = "Relative importance")
xg.imp.table[1:10,]
# matrix of the p-value of the correlation
cor.xg <- train_x %>% dplyr::select(xg.imp.var.names[1:10])
names(cor.xg) <- paste(xg.imp.var.names.frame[1:10,1],xg.imp.var.names.frame[1:10,3])
p.mat.xg <- cor.mtest(cor.xg)
M.xg<-cor(cor.xg)
dim(p.mat.xg)
par(mfrow=c(1,1))
corrplot(M.xg, method="color", col=col(200),
type="upper", #order="hclust",
addCoef.col = "black", # Add coefficient of correlation
tl.col="black", tl.srt=45,tl.cex=0.7,cl.cex=0.5, number.cex=0.5, #Text label color and rotation
# Combine with significance
p.mat = p.mat.xg, sig.level = 0.01, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag=FALSE
)
#Plot the error rate
plot(data.frame(x=seq(from=1,to=300,by=1), D.xgb.cv1$evaluation_log[,6]),
type='l',col=3, ylim = c(0.14,0.26), xlim= c(0,300),
ylab="Validation Error",xlab="Number of tree")
points(rf.fit$err.rate[,1], type='l')
abline(h=tree.cross.err, col=2)
legend("top", c('Random Forest','Decision Tree', 'Boosting'),
col=1:4,cex=0.8,fill=1:4,horiz=F)
tuneplot(xgb_tune3)
png("image.png", width = 800, height = 600)
|
96f6ab5fa1a8f42ed87ec46e85f97ed0f84e3fa9
|
ac8f1f29dfa7b15a51b4e4df3bad1526f4c46640
|
/loaders/load_matches_with_win_ratios_prev_10_tie_as_half_win.R
|
8c8017a11eeb6b6ea88f65306dc350551ac1671d
|
[] |
no_license
|
martin-galajda/ML-soccer-data-project
|
8c6830e1d8cd53b79e503c3b77cc0d4749758bda
|
f18c7d2477d4ae3a38bd2a6db8bb194975f0e2ba
|
refs/heads/master
| 2020-03-09T17:08:53.931751
| 2018-06-20T19:51:30
| 2018-06-20T19:51:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 501
|
r
|
load_matches_with_win_ratios_prev_10_tie_as_half_win.R
|
load.matches.with.win.ratio.from.prev.10.matches.with.tie.as.half <- function() {
# This will precompute features of win ratios from prev 10 matches
# (if it was not computed yet)
# and save it to "precompute_win_ratios_prev_10_with_tie_as_half.csv" inside data/ dir
source('./feature_extraction/precompute_features/precompute_win_ratios_prev_10_with_tie_as_half.R')
matches = read.csv('./data/matches_with_win_ratio_from_prev_10_matches_with_tie_as_half_win.csv')
return(matches)
}
|
5acfd31561c9e5e49ce5c37cf2cb7399b3775ade
|
74f3121b84b8518aaae29d10fe0e69ed59fc145b
|
/plot4.R
|
5cb96de1c15b80190b79cbc685b044231e02d76c
|
[] |
no_license
|
dlarlick/ExData_Plotting1
|
c0e74a82a93f9f675f39aa8d044d3ac8e756c717
|
94251a5ab7b89b2fdabddeece09f130af250661b
|
refs/heads/master
| 2021-01-14T11:05:56.304931
| 2016-03-14T01:20:04
| 2016-03-14T01:20:04
| 53,793,893
| 0
| 0
| null | 2016-03-13T16:02:22
| 2016-03-13T16:02:22
| null |
UTF-8
|
R
| false
| false
| 2,445
|
r
|
plot4.R
|
##################
## Student DLARLICK
## COURSERA : Exploratory Data Analysis
## Week 1 Assignment Project 1
## File : plot4.R
## Due 03/13/2016
##################
##Needed for mutate function
library(plyr)
##Set the working directory and read in the relevant datasets
setwd("c:/rprogramming/ExpDataAnalysis/Proj1")
##read delimited files
hpowercon <- read.delim("C://RProgramming//ExpDataAnalysis//Proj1/household_power_consumption.txt", header=TRUE, sep=";", na.strings=c("?")
, colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
##Concatenate date and time for DateTime field
hpowercon <- mutate(hpowercon, DateTime= strptime(paste(hpowercon$Date,hpowercon$Time), format= "%d/%m/%Y %H:%M:%S"))
##Subset the dataset for the two days that we are interested
hpowercon <- subset(hpowercon, as.Date(Date, "%d/%m/%Y") >= as.Date('2007-02-01') & as.Date(Date, "%d/%m/%Y") < as.Date('2007-02-03'))
##Open a PNG device to direct the graph out too
png(filename="plot4.png")
##Define the layout for the plots to be positioned within
par(mfrow=c(2,2))
##1st upper left quadrant
##Create a plot based on the assignment requirements
plot(hpowercon$DateTime,hpowercon$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
##Make a box around the plot
box()
##2nd upper right quadrant
##Create a plot based on the assignment requirements
plot(hpowercon$DateTime,hpowercon$Voltage, type="l", ylab="Global Active Power (kilowatts)", xlab="")
##Make a box around the plot
box()
##3rd plot in the lower left
##Create a plot and add two additional lines for the remaining sub metering columns
plot(hpowercon$DateTime,hpowercon$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(hpowercon$DateTime,hpowercon$Sub_metering_2, col="red")
lines(hpowercon$DateTime,hpowercon$Sub_metering_3, col="blue")
##Add a legend
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty=c(1,1,1), bty="n")
##4th plot in the lower right
##Create a plot for global reactive power
plot(hpowercon$DateTime,hpowercon$Global_reactive_power, type="l",ylab="Global_reactive_power", xlab="")
##Make a box around the plot
box()
##Close the device
dev.off()
##End of Script
|
e55dac7dc8836a0ac2bff84802d2777cc8749dec
|
b16154d020aca160dec32497b466d51909b1a812
|
/plot1.R
|
a15fbfb9c38a819442cc1c389188049ec906b30c
|
[] |
no_license
|
gjamnitz/ExData_Plotting1
|
2fd3dc5c2aecb8b4ca9a683ffceebce437833b87
|
ca418687a17eb20a0819df6ba38d907454797f31
|
refs/heads/master
| 2021-01-21T16:54:48.454372
| 2015-01-11T11:11:28
| 2015-01-11T11:11:28
| 28,880,864
| 0
| 0
| null | 2015-01-06T19:59:39
| 2015-01-06T19:59:39
| null |
UTF-8
|
R
| false
| false
| 629
|
r
|
plot1.R
|
# reading file - only necessary rows read
powerConsumption <- read.csv(file = "household_power_consumption.txt", sep = ";", header = FALSE, skip = 66637, nrows = 69518 - 66637 - 1)
names(powerConsumption) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
# selecting graphics device
png("plot1.png", width = 480, height = 480)
# drawing graph
par(mfrow = c(1, 1))
with(powerConsumption, hist(Global_active_power, col="red", main="Global Active Power", xlab = "Global Active Power (kilowatts)"))
# closing device
dev.off()
|
6d7271f3a39604f2b96c209342ae029d419ec033
|
81a9486b9f7fef323e8446107272bbdeb7e1d5cf
|
/03-analysis/analysis.R
|
0627d5510f12952c9e33e40fb5e54bb1452f266a
|
[] |
no_license
|
sssspork/2020-hk-legco
|
65f979ede29fc86ec61e2cae12c782dd41bd4e2b
|
96eb8c47636ad8698e602cf4c6427384718b33c3
|
refs/heads/master
| 2022-11-25T05:04:01.353341
| 2020-07-22T05:29:02
| 2020-07-22T05:29:02
| 281,314,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,598
|
r
|
analysis.R
|
library(data.table)
master <- fread("../01-extract-data-from-XML/legco-201610-202006.csv")
member <- fread("../02-manual-members-info/legcomembers.csv")
# remove youngspiration as they were unable to vote, due to the oath-taking controversy
# cf https://en.wikipedia.org/wiki/Hong_Kong_Legislative_Council_oath-taking_controversy
master$sixtusleung <- NULL
master$yauwaiching <- NULL
member <- subset(member, member$party_en != "youngspiration")
# note a special case in the motionresult
master[1589,]
# cf. https://www.legco.gov.hk/yr16-17/chinese/hc/minutes/hc20170428.pdf
# end result was that starrylee voted no to break the tie, thus "negatived"
master[1589,]$motionresult <- c("negatived")
# also subset records that belong to each "committee"
council <- master[committee=="council"]
finance <- master[committee=="finance"]
house <- master[committee=="house"]
####################
# 1 ABSENT RECORDS #
####################
# simple definition:
# if *all* votes on a voting day = absent, then member is absent that day
# to check if a member is present
is.present <- function(x){
# calculate num of "absent" votes
numofabsent <- sum(x == c("absent"))
# calculate num of motions in total
numofmotions <- length(x)
# calculate absent percentage
absentpercentage <- numofabsent/numofmotions
# IF percentage is NA, return NA,
ifelse(is.na(absentpercentage), NA,
# ELSEIF absent percentage is 100, return False
# ELSE return T
ifelse(numofabsent/numofmotions == 1, F, T)
)
}
# since we will organize the "is present data" per motion date
# use unique(master$motiondate) as the base
master_present_days <- data.table(motiondate = master[, unique(motiondate)])
council_present_days <- data.table(motiondate = council[, unique(motiondate)])
finance_present_days <- data.table(motiondate = finance[, unique(motiondate)])
house_present_days <- data.table(motiondate = house[, unique(motiondate)])
# loop through each member
for (memberindex in 1:length(member$member_en)){
# get name of target member
targetmember <- member$member_en[memberindex]
# check if a certain member has voted on certain day
targetmembervotingdays_master <- master[, is.present(get(targetmember)), by=motiondate]
targetmembervotingdays_council <- council[, is.present(get(targetmember)), by=motiondate]
targetmembervotingdays_finance <- finance[, is.present(get(targetmember)), by=motiondate]
targetmembervotingdays_house <- house[, is.present(get(targetmember)), by=motiondate]
# rename columns
names(targetmembervotingdays_master) <- c("motiondate", targetmember)
names(targetmembervotingdays_council) <- c("motiondate", targetmember)
names(targetmembervotingdays_finance) <- c("motiondate", targetmember)
names(targetmembervotingdays_house) <- c("motiondate", targetmember)
# cbind/merge with the master
master_present_days <- merge(master_present_days, targetmembervotingdays_master, by="motiondate")
council_present_days <- merge(council_present_days, targetmembervotingdays_council, by="motiondate")
finance_present_days <- merge(finance_present_days, targetmembervotingdays_finance, by="motiondate")
house_present_days <- merge(house_present_days, targetmembervotingdays_house, by="motiondate")
}
# set up a master frame to hold all calculations
absent_records <- data.table(matrix(nrow=0, ncol=17, dimnames=list(c(),
c("member_en", "total_absentdays", "total_eligibledays", "total_absentvotes", "total_eligiblevotes",
"council_absentdays", "council_eligibledays", "council_absentvotes", "council_eligiblevotes",
"finance_absentdays", "finance_eligibledays", "finance_absentvotes", "finance_eligiblevotes",
"house_absentdays", "house_eligibledays", "house_absentvotes", "house_eligiblevotes"))))
# now loop through each member
# to calculate total of absent days and votes
for (memberindex in 1:length(member$member_en)){
# get name of target member
targetmember <- member$member_en[memberindex]
# calculate num of absent days in each committee
total_absentdays <- nrow(master_present_days[get(targetmember)==F, ])
council_absentdays <- nrow(council_present_days[get(targetmember)==F, ])
finance_absentdays <- nrow(finance_present_days[get(targetmember)==F, ])
house_absentdays <- nrow(house_present_days[get(targetmember)==F, ])
# calculate num of days that member is
# a "qualified" legco member in each committee
total_eligibledays <- master_present_days[, sum(!is.na(get(targetmember)))]
council_eligibledays <- council_present_days[, sum(!is.na(get(targetmember)))]
finance_eligibledays <- finance_present_days[, sum(!is.na(get(targetmember)))]
house_eligibledays <- house_present_days[, sum(!is.na(get(targetmember)))]
# calculate num of absent votes
total_absentvotes <- master[get(targetmember)=="absent", .N]
council_absentvotes <- council[get(targetmember)=="absent", .N]
finance_absentvotes <- finance[get(targetmember)=="absent", .N]
house_absentvotes <- house[get(targetmember)=="absent", .N]
# calculate num of voting sessions in which member is eligible to vote
total_eligiblevotes <- master[, sum(!is.na(get(targetmember)))]
council_eligiblevotes <- council[, sum(!is.na(get(targetmember)))]
finance_eligiblevotes <- finance[, sum(!is.na(get(targetmember)))]
house_eligiblevotes <- house[, sum(!is.na(get(targetmember)))]
# paste them into the master frame
tempframe <- as.data.table(cbind(targetmember, total_absentdays, total_eligibledays, total_absentvotes, total_eligiblevotes,
council_absentdays, council_eligibledays, council_absentvotes, council_eligiblevotes,
finance_absentdays, finance_eligibledays, finance_absentvotes, finance_eligiblevotes,
house_absentdays, house_eligibledays, house_absentvotes, house_eligiblevotes))
names(tempframe) <- c("member_en", "total_absentdays", "total_eligibledays", "total_absentvotes", "total_eligiblevotes",
"council_absentdays", "council_eligibledays", "council_absentvotes", "council_eligiblevotes",
"finance_absentdays", "finance_eligibledays", "finance_absentvotes", "finance_eligiblevotes",
"house_absentdays", "house_eligibledays", "house_absentvotes", "house_eligiblevotes")
absent_records <- rbind(absent_records, tempframe)
}
# change columns to integers
# lazy way: loop through relevant columns
for (i in names(absent_records)[2:17]){
absent_records[, (i) := as.integer(get(i))]
}
# calculate percentage
absent_records[, total_percentageabsentdays := round(total_absentdays / total_eligibledays, 3)]
absent_records[, total_percentageabsentvotes := round(total_absentvotes / total_eligiblevotes, 3)]
absent_records[, council_percentageabsentdays := round(council_absentdays / council_eligibledays, 3)]
absent_records[, council_percentageabsentvotes := round(council_absentvotes / council_eligiblevotes, 3)]
absent_records[, finance_percentageabsentdays := round(finance_absentdays / finance_eligibledays, 3)]
absent_records[, finance_percentageabsentvotes := round(finance_absentvotes / finance_eligiblevotes, 3)]
absent_records[, house_percentageabsentdays := round(house_absentdays / house_eligibledays, 3)]
absent_records[, house_percentageabsentvotes := round(house_absentvotes / house_eligiblevotes, 3)]
# join the member info, some aesthetics
absent_records <- merge(absent_records, member, by="member_en")
names(absent_records)
setcolorder(absent_records, c("member_en", "member_ch",
"constituency_en", "constituency_ch",
"party_en", "party_ch",
"caucus_en", "caucus_ch",
"total_absentdays", "total_eligibledays", "total_percentageabsentdays",
"total_absentvotes", "total_eligiblevotes", "total_percentageabsentvotes",
"council_absentdays", "council_eligibledays", "council_percentageabsentdays",
"council_absentvotes", "council_eligiblevotes", "council_percentageabsentvotes",
"finance_absentdays", "finance_eligibledays", "finance_percentageabsentdays",
"finance_absentvotes", "finance_eligiblevotes", "finance_percentageabsentvotes",
"house_absentdays", "house_eligibledays", "house_percentageabsentdays",
"house_absentvotes", "house_eligiblevotes", "house_percentageabsentvotes"))
# save
fwrite(absent_records, "legco-201610-202006-absences.csv")
####################
# 2 MDS SIMILARITY #
####################
dim(member)
member$member_en
# set up data.tables / similarity matrices
similarity_matrix_master <- matrix(nrow=length(member$member_en), ncol=length(member$member_en),
dimnames=list(member$member_en, member$member_en))
similarity_matrix_council <- matrix(nrow=length(member$member_en), ncol=length(member$member_en),
dimnames=list(member$member_en, member$member_en))
similarity_matrix_finance <- matrix(nrow=length(member$member_en), ncol=length(member$member_en),
dimnames=list(member$member_en, member$member_en))
similarity_matrix_house <- matrix(nrow=length(member$member_en), ncol=length(member$member_en),
dimnames=list(member$member_en, member$member_en))
# For each member
for (memberindexX in 1:length(member$member_en)){
# get his/her name
targetmemberX <- member$member_en[memberindexX]
# Compare against each member
for (memberindexY in 1:length(member$member_en)){
# get his/her name
targetmemberY <- member$member_en[memberindexY]
# vote similarity = if they performed the same action for a particular motion
votesimilarity_master <- ifelse(master[, get(targetmemberX)] == master[, get(targetmemberY)], 1, 0)
votesimilarity_council <- ifelse(council[, get(targetmemberX)] == council[, get(targetmemberY)], 1, 0)
votesimilarity_finance <- ifelse(finance[, get(targetmemberX)] == finance[, get(targetmemberY)], 1, 0)
votesimilarity_house <- ifelse(house[, get(targetmemberX)] == house[, get(targetmemberY)], 1, 0)
# num of actions that are the same
identicalvotes_master <- sum(votesimilarity_master, na.rm=T)
identicalvotes_council <- sum(votesimilarity_council, na.rm=T)
identicalvotes_finance <- sum(votesimilarity_finance, na.rm=T)
identicalvotes_house <- sum(votesimilarity_house, na.rm=T)
# num of motions that they are eligible together
numvalidcases_master <- sum(!is.na(votesimilarity_master))
numvalidcases_council <- sum(!is.na(votesimilarity_council))
numvalidcases_finance <- sum(!is.na(votesimilarity_finance))
numvalidcases_house <- sum(!is.na(votesimilarity_house))
# calculate the percentage of similar actions / num of eligible motions
# save it to the master matrix
similarity_matrix_master[memberindexX, memberindexY] <- identicalvotes_master / numvalidcases_master
similarity_matrix_council[memberindexX, memberindexY] <- identicalvotes_council / numvalidcases_council
similarity_matrix_finance[memberindexX, memberindexY] <- identicalvotes_finance / numvalidcases_finance
similarity_matrix_house[memberindexX, memberindexY] <- identicalvotes_house / numvalidcases_house
}
}
# save similarity matrices if needed
# write.csv(similarity_matrix_master, "legco-201610-202006-all-voting-similarity.csv")
# write.csv(similarity_matrix_council, "legco-201610-202006-council-voting-similarity.csv")
# write.csv(similarity_matrix_finance, "legco-201610-202006-finance-voting-similarity.csv")
# write.csv(similarity_matrix_house, "legco-201610-202006-house-voting-similarity.csv")
# remove andrewleung from finance and hosue,
# since an NA row+column in dist object would not work
similarity_matrix_finance <- similarity_matrix_finance[-4,-4]
similarity_matrix_house <- similarity_matrix_house[-4,-4]
# convert to distance matrix
distance_matrix_master <- as.dist(1-similarity_matrix_master)
distance_matrix_council <- as.dist(1-similarity_matrix_council)
distance_matrix_finance <- as.dist(1-similarity_matrix_finance)
distance_matrix_house <- as.dist(1-similarity_matrix_house)
# since we have NA values in distance matrix
# transform the matrix with isomap
# make test plots
distance_ISOMAP_master <- vegan::isomapdist(distance_matrix_master, k=1000)
distance_ISOMAP_council <- vegan::isomapdist(distance_matrix_council, k=1000)
distance_ISOMAP_finance <- vegan::isomapdist(distance_matrix_finance, k=1000)
distance_ISOMAP_house <- vegan::isomapdist(distance_matrix_house, k=1000)
isomds_master <- MASS::isoMDS(distance_ISOMAP_master, k=2)
isomds_council <- MASS::isoMDS(distance_ISOMAP_council, k=2)
isomds_finance <- MASS::isoMDS(distance_ISOMAP_finance, k=2)
# isomds_house <- MASS::isoMDS(distance_ISOMAP_house, k=2) # doesn't work since we have folks with identical voting records
# try cheating a little
# change folks with 0 distance to 0.0000000000000000000000000000001
distance_ISOMAP_house[distance_ISOMAP_house==0] <- 0.0000000000000000000000000000001
isomds_house <- MASS::isoMDS(distance_ISOMAP_house, k=2)
# test plots
# plot(isomds_master$points[,1], isomds_master$points[,2])
# plot(isomds_council$points[,1], isomds_council$points[,2])
# plot(isomds_finance$points[,1], isomds_finance$points[,2])
# plot(isomds_house$points[,1], isomds_house$points[,2])
## compile all MDS points, then save
# extract the points
mds_master <- as.data.frame(isomds_master$points)
mds_council <- as.data.frame(isomds_council$points)
mds_finance <- as.data.frame(isomds_finance$points)
mds_house <- as.data.frame(isomds_house$points)
# rename columns
names(mds_master) <- c("all_dim1", "all_dim2")
names(mds_council) <- c("council_dim1", "council_dim2")
names(mds_finance) <- c("finance_dim1", "finance_dim2")
names(mds_house) <- c("house_dim1", "house_dim2")
# add member_en column for merging
mds_master$member_en <- row.names(mds_master)
mds_council$member_en <- row.names(mds_council)
mds_finance$member_en <- row.names(mds_finance)
mds_house$member_en <- row.names(mds_house)
# merge with member
mds_to_plot <- merge(x=member, y=mds_master, by="member_en", all.x=T)
mds_to_plot <- merge(x=mds_to_plot, y=mds_council, by="member_en", all.x=T)
mds_to_plot <- merge(x=mds_to_plot, y=mds_finance, by="member_en", all.x=T)
mds_to_plot <- merge(x=mds_to_plot, y=mds_house, by="member_en", all.x=T)
# save
fwrite(mds_to_plot, "legco-201610-202006-vote-mds.csv")
###################################################################
# 3 SUCCESSFUL MOTIONS, PER MOVERTYPE, PER MOVERCAUCUS, PER MOVER #
###################################################################
# there's also a case of a tie -- seems like the decision was "no immediate action required"
# cf. https://www.legco.gov.hk/yr16-17/chinese/fc/fc/minutes/fc20170719c.pdf
master[motionresult=="tied"]
# the only case where a gov officer motioned
# but failed, was one specific amendment
# https://www.legco.gov.hk/php/hansard/chinese/rundown.php?term=yr16-20&date=2018-02-08&lang=0
council[movertype=="publicofficer" & motionresult=="negatived"]
# cross-tab mover x motionresult, clean up, add the member info
council_motion_result <- as.data.table(table(council$mover, council$motionresult))
council_motion_result <- dcast(council_motion_result, V1~V2, value.var="N")
names(council_motion_result) <- c("member_en", "negatived", "passed")
council_motion_result <- merge(x=council_motion_result, y=member, all.x=T)
# steps to merge all bills from the government
# calculate the total # of negatived and passed, drop all government rows
# then add it back as one single row
# manual work, i know. Too lazy to think of an automated solution for now
council_gov_negatived <- council_motion_result[is.na(constituency_en), sum(negatived)]
council_gov_passed <- council_motion_result[is.na(constituency_en), sum(passed)]
council_motion_result <- council_motion_result[!is.na(constituency_en)]
council_motion_result <- rbindlist(list(council_motion_result,
data.table(member_en="government",
negatived=council_gov_negatived, passed=council_gov_passed,
member_ch="政府法案", constituency_en="government", constituency_ch="政府",
party_en="government", party_ch="政府", caucus_en="government", caucus_ch="政府")))
# aesthetics
setcolorder(council_motion_result, c("member_en", "member_ch", "constituency_en", "constituency_ch",
"party_en", "party_ch", "caucus_en", "caucus_ch",
"negatived", "passed"))
# save
fwrite(council_motion_result, "legco-201610-202006-council-motion-results.csv")
####################
# Successful votes #
# i.e. "yes" then pass
# i.e. "no" then negatived
# function to check if the vote is "successful"
is.successful <- function(result, vote){
# if "negatived" + "no" then T, otherwise
ifelse(result=="negatived" & vote=="no", T,
# if "passed" + "yes" then T, otherwise F
ifelse(result=="passed" & vote=="yes", T, F))
}
# copy master as a template
master_successful <- master[,1:7]
# loop through each member
for (memberindex in 1:length(member$member_en)){
# get name of target member
targetmember <- member$member_en[memberindex]
# T = "successful", F = "failed"
# overwrite column
# master[, get(targetmember) := is.successful(motionresult, get(targetmember))]
# not sure why the above doesn't work; i have a feeling that you can't do get() := get()
# let's use the roundabout way below
# get the comparison results
tempsuccess <- master[, is.successful(motionresult, get(targetmember))]
# add column, then rename that new column
master_successful[, ":=" (abc = tempsuccess)]
names(master_successful)[dim(master_successful)[2]] <- targetmember
}
# set up blank data.table for summary
successful_summary <- data.table(matrix(nrow=0, ncol=9, dimnames=list(c(),
c("member_en", "total_successful", "total_eligible",
"council_successful", "council_eligible",
"finance_successful", "finance_eligible",
"house_successful", "house_eligible"))))
# loop through each member again
for (memberindex in 1:length(member$member_en)){
# get name of target member
targetmember <- member$member_en[memberindex]
# get the needed information
# - successful votes
# - eligible = motions for which member is an eligible member of legco
total_successful <- master_successful[, sum(get(targetmember), na.rm=T)]
total_eligible <- master_successful[, sum(!is.na(get(targetmember)), na.rm=T)]
council_successful <- master_successful[committee=="council", sum(get(targetmember), na.rm=T)]
council_eligible <- master_successful[committee=="council", sum(!is.na(get(targetmember)), na.rm=T)]
finance_successful <- master_successful[committee=="finance", sum(get(targetmember), na.rm=T)]
finance_eligible <- master_successful[committee=="finance", sum(!is.na(get(targetmember)), na.rm=T)]
house_successful <- master_successful[committee=="house", sum(get(targetmember), na.rm=T)]
house_eligible <- master_successful[committee=="house", sum(!is.na(get(targetmember)), na.rm=T)]
# mold into a temp row
tempsummary <- data.table(cbind(targetmember, total_successful, total_eligible,
council_successful, council_eligible,
finance_successful, finance_eligible,
house_successful, house_eligible))
# rename just in case
names(tempsummary) <- c("member_en", "total_successful", "total_eligible",
"council_successful", "council_eligible",
"finance_successful", "finance_eligible",
"house_successful", "house_eligible")
# rbind with the summary data.table
successful_summary <- rbind(successful_summary, tempsummary)
}
# change columns to integers
# lazy way: loop through relevant columns
for (i in names(successful_summary)[2:9]){
successful_summary[, (i) := as.integer(get(i))]
}
# calculate percentages
successful_summary[, total_percentage := round(total_successful / total_eligible, 3)]
successful_summary[, council_percentage := round(council_successful / council_eligible, 3)]
successful_summary[, finance_percentage := round(finance_successful / finance_eligible, 3)]
successful_summary[, house_percentage := round(house_successful / house_eligible, 3)]
# left join the member info
successful_summary <- merge(x=successful_summary, y=member, by="member_en")
# aesthetics
setcolorder(successful_summary, c("member_en", "member_ch", "constituency_en", "constituency_ch",
"party_en", "party_ch", "caucus_en", "caucus_ch",
"total_successful", "total_eligible", "total_percentage",
"council_successful", "council_eligible", "council_percentage",
"finance_successful", "finance_eligible", "finance_percentage",
"house_successful", "house_eligible", "house_percentage"))
# save
fwrite(successful_summary, "legco-201610-202006-vote-success.csv")
|
6cee4e60d8267d01f2fc099ee8700d7b82419309
|
7c3083e6eb57d38a8735b63d0f23620938d28669
|
/man/SURVFIT.Rd
|
3f58d4535c702fa760f55aa683cff63640926596
|
[] |
no_license
|
hamzameer/SURVFIT
|
530d45a929bdae2ca3825505a100e10129a8b7c1
|
a921c744a2da2661588dc8ae22563be519ab8419
|
refs/heads/master
| 2021-01-03T22:04:21.743495
| 2020-12-16T14:29:59
| 2020-12-16T14:29:59
| 240,253,274
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,630
|
rd
|
SURVFIT.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survfit.R
\name{SURVFIT}
\alias{SURVFIT}
\title{Doubly Sparse Survival Rule Extraction}
\usage{
SURVFIT(
formula = formula,
data = data,
rulelength = 3,
doubly.sparse = FALSE,
gamma = NULL,
lambda1 = NULL,
lambda2 = NULL,
crossvalidate = TRUE,
nfolds = 4,
num_toprules = 16,
num_totalrules = 2000,
input_rule_list = FALSE,
rule_list = NULL,
ntree = 200,
digit = 10,
seed = NULL,
nodesize = NULL,
trace = 1,
max.grid = 25,
...
)
}
\arguments{
\item{formula}{\code{formula}. The model formula specifying time, status and
dependent variables of the form \code{Surv(time, status)~ x1 + x2 + .. }}
\item{data}{\code{data.frame}. Training data.}
\item{rulelength}{\code{Integer}. Maximum length of the rule. (Default = 3)}
\item{doubly.sparse}{\code{Logical} for whether double sparsity required.
(Default = FALSE)}
\item{gamma}{\code{Numeric} or \code{list}. Hyperparameter (Default = NULL)}
\item{lambda1}{\code{Numeric} or \code{list}. Hyperparameter (Default = NULL)}
\item{lambda2}{\code{Numeric} or \code{list}. Hyperparameter (Default =
NULL)}
\item{crossvalidate}{\code{Logical}. Whether crossvalidation to be done to
find hyperparameters. (Default = TRUE)}
\item{nfolds}{\code{Integer}. Number of cross validation folds. (Default = 5)}
\item{num_toprules}{\code{Integer}. Number of rules extracted. (Default = 16)}
\item{num_totalrules}{\code{Integer}. Number of rules considered. (Default =
2000)}
\item{input_rule_list}{\code{Logical} Whether rule list supplied. (Default =
FALSE)}
\item{rule_list}{\code{List}. List of supplied rules. (Default = NULL)}
\item{ntree}{\code{Integer} .Number of trees built}
\item{digit}{\code{Integer}. Decimal points.}
\item{seed}{\code{Numeric}. Seed for reproducible experiments.}
\item{nodesize}{\code{Integer}. (Default = NULL)}
\item{trace}{\code{0 or 1}. : Turn CPLEX output on (1) or off(0). Default 1.}
\item{...}{Other inputs}
}
\value{
Object of class \code{list} with elements
\item{\code{rules}}{List of top \code{\link{num_toprules}} rules}
\item{\code{all_rules}}{List of all \code{\link{num_totalrules}} rules}
\item{\code{rule_data}}{\code{Data.frame} of rules evaluated over data}
\item{\code{beta}}{Coefficients of \link{all_rules} in the model}
}
\description{
\code{SURVFIT} extracts a "doubly sparse" (sparse in both number of rules and
in number of variables in the rules) survival rule ensemble from survival
data
}
\examples{
## For ovarian data from "survival" package.
SURVFIT(Surv(futime, fustat) ~ ., data = ovarian)
}
|
fbdf469c7a8f5b4cb42ad889fc8670459e3655d4
|
04a89a0089f50c2b98db688c6bbc899c2ea9f295
|
/man/add_watcher.Rd
|
0970ec7819ee74054b19f746cae21aa2097316ef
|
[] |
no_license
|
patsulda/Rjira
|
3f9e7eeb7b42ea72c85dfb7bc0ee05780193924d
|
4fdd287a61c6fae48ab3effba073f5ca47b12b6f
|
refs/heads/master
| 2021-01-22T21:07:37.824994
| 2017-04-01T19:07:12
| 2017-04-01T19:07:12
| 85,394,436
| 1
| 0
| null | 2017-03-18T10:53:18
| 2017-03-18T10:53:17
| null |
UTF-8
|
R
| false
| false
| 785
|
rd
|
add_watcher.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/watcher_functions.R
\name{add_watcher}
\alias{add_watcher}
\title{Add a watched to an existing issue}
\usage{
add_watcher(issue, user, jira_url = getOption("jira_url"),
jira_user = getOption("jira_user"),
jira_password = getOption("jira_password"),
verbose = getOption("jira_verbose"))
}
\arguments{
\item{jira_url}{base url to JIRA. Defaults to 'jira/'}
\item{jira_user}{username for authentication}
\item{jira_password}{password for authentication}
\item{verbose}{FALSE}
}
\value{
POST results
}
\description{
Add a watched to an existing issue
}
\examples{
\dontrun{
add_watcher(issue = "BAS-1", user = "admin")
}
}
\seealso{
\code{\link{remove_watcher}}
\code{\link{get_watchers}}
}
|
03b904a06f3c3224de49e12c999a19c88c327a48
|
4e3d58e1e2165b2fd8a5c5141e9990a70914e8d9
|
/man/circos.genomicTrack.Rd
|
e1e84daae212ea59bdd582b5aef659876051b9c6
|
[
"MIT"
] |
permissive
|
jokergoo/circlize
|
d287b72d1e2cb3079068d9c36f253342fde29bf7
|
11ddb741620c44e9c566c992a4e28bb213fab19f
|
refs/heads/master
| 2023-08-23T02:27:35.268033
| 2022-12-09T16:16:41
| 2022-12-09T16:16:41
| 9,529,406
| 884
| 156
|
NOASSERTION
| 2022-02-03T11:26:44
| 2013-04-18T18:56:04
|
R
|
UTF-8
|
R
| false
| false
| 391
|
rd
|
circos.genomicTrack.Rd
|
\name{circos.genomicTrack}
\alias{circos.genomicTrack}
\title{
Create a track for genomic graphics
}
\description{
Create a track for genomic graphics
}
\usage{
circos.genomicTrack(...)
}
\arguments{
\item{...}{Pass to \code{\link{circos.genomicTrackPlotRegion}}.
}
}
\details{
shortcut function of \code{\link{circos.genomicTrackPlotRegion}}.
}
\examples{
# There is no example
NULL
}
|
2008a7ceaf8e4a2c6d85ee4dd6bf32f58cff22c4
|
82acfdef9ac6b3b0fcf0d04bb678b88fdf24e965
|
/lab8/assignment8_1.R
|
895cdf2745852bba3bd320c1284860b750daf145
|
[] |
no_license
|
NikhilMugganawar/Data_Analytics_Lab
|
1fe7093fc6b354c27e1c5c136d1a1d2b98b2e86b
|
0ffe8e6f66d0c2d6265732933bb8b2c8b509c077
|
refs/heads/main
| 2023-08-24T10:23:21.196816
| 2021-10-29T13:18:35
| 2021-10-29T13:18:35
| 394,239,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,296
|
r
|
assignment8_1.R
|
#OVERALL STRATEGY
#A SMALL TRAINING SET (here Iris dataset)
#TRAIN A SIMPLE MODEL ON SMALL TRAINING SET (here decision tree and knn)
#GATHER A LARGE SET OF UNLABELLED DATA (here using the test split set for convenience)
#IF MORE TRAINING DATA IS NEEDED MAKE PREDICTION ON UNLABELLED DATA (For convenience we treated test split as unlabelled data and made predictions on it)
#LABEL THE MOST UNCERTAIN EXAMPLE (We stop here after finding the most uncertain example)
#ADD LABELLED EXAMPLE TO TRAINING SET AND RETRAIN THE MODEL
#DATASET 1 -> IRIS DATASET
#-----------------------------------------------------------------------------------------#
df <- read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data",
header = FALSE)#IMPORT IRIS DATA
colnames(df) <- c('Sepal.Length','Sepal.Width','Petal.Length','Petal.Width','Species')#RENAMING COLUMNS
df$Species <- as.numeric(as.factor(df$Species))
################################################
######################################################
#DIVIDE INTO TRAIN AND TEST
sample <- sample.int(n = nrow(df), size = floor(.75*nrow(df)), replace = F)
train <- df[sample, ]
test <- df[-sample, ]
####################################################
library(caTools)
library(ROCR)
library(stats4) #Load package stats
library(splines) #Load package splines
#To load package VGAM, need to load package stats4 and splines.
library(VGAM) #Load package VGAM
#logistic_model <- vglm(train$Species ~ train$Sepal.Length + train$Sepal.Width + train$Petal.Length + train$Petal.Width,
#data = train,
#family = "multinomial")
#Perform classification
#logistic_probabilities<- predict(logistic_model, data = test, type="response")
#logistic_predictions <- apply(logistic_probabilities, 1, which.max)
#logistic_predictions
###########################
#DECISION TREE MODEL
library(rpart)
dt_fit <- rpart(train$Species~., data = train, method = 'class')
dt_predict <-list(predict(dt_fit, test, type = 'class'))
dt_predict
#########################
#K NEAREST NEIGHBOUS MODEL
library(class)
knn_pred <- knn(train = train, test = test,cl = train$Species, k=10)
commitee <- list("DecisionTree", "knn")
for (p in commitee) {
print(p)
}
dt_knn <- cbind(data.frame(dt_predict), data.frame(knn_pred))
colnames(dt_knn) <- c("DecisionTree","knn")
dt_knn
####################################################
#ENTROPY AS A MESAURE OF UNCERTAINTY
library("entropy")
vote_entropy <- function(x, type='class', entropy_method='ML') {
it <- do.call(itertools2::izip, x)
disagreement <- sapply(it, function(obs) {
entropy(table(unlist(obs)), method=entropy_method)
})
disagreement
}
##ABOVE FUNCTION IMPLEMENTS Query By Committee approach where we use a committee
#of models (in our case its decision tree and knn)
#for selecting unseen examples we get predictions from all models for all examples
#and look for examples where these models largely disagree and should be included in the training data
###################################################
xy <-vote_entropy(dt_knn)
xy
#looking at below result and figure data points which has highest disagreement value and hence should be included in training data
|
001131a443a0ab38b2bc0263eebaf9737884fe86
|
ca4772ee47f2d3d2b1d9bc8d9450d633ad1b7c74
|
/R/assert-is-set.R
|
daa9ad30d21ca51d30258257c5f8dd736db49af1
|
[] |
no_license
|
cran/assertive.sets
|
400f82484be52e2147f197b259fa772ed28a4c19
|
f196701576ce5e490b2346ad6e4a66ade39b0a56
|
refs/heads/master
| 2021-05-04T11:23:03.322718
| 2016-12-30T18:35:49
| 2016-12-30T18:35:49
| 48,076,809
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,788
|
r
|
assert-is-set.R
|
#' @rdname are_set_equal
#' @export
assert_are_disjoint_sets <- function(x, y,
severity = getOption("assertive.severity", "stop"))
{
assert_engine(
are_disjoint_sets,
x,
y = y,
.xname = get_name_in_parent(x),
.yname = get_name_in_parent(y),
severity = severity
)
}
#' @rdname are_set_equal
#' @export
assert_are_intersecting_sets <- function(x, y,
severity = getOption("assertive.severity", "stop"))
{
assert_engine(
are_intersecting_sets,
x,
y = y,
.xname = get_name_in_parent(x),
.yname = get_name_in_parent(y),
severity = severity
)
}
#' @rdname are_set_equal
#' @export
assert_are_set_equal <- function(x, y,
severity = getOption("assertive.severity", "stop"))
{
assert_engine(
are_set_equal,
x,
y = y,
.xname = get_name_in_parent(x),
.yname = get_name_in_parent(y),
severity = severity
)
}
#' @rdname are_set_equal
#' @export
assert_is_set_equal <- function(x, y,
severity = getOption("assertive.severity", "stop"))
{
.Deprecated("assert_are_set_equal")
assert_are_set_equal(x, y, severity = severity)
}
#' @rdname are_set_equal
#' @export
assert_is_subset <- function(x, y, strictly = FALSE,
severity = getOption("assertive.severity", "stop"))
{
assert_engine(
is_subset,
x,
y = y,
strictly = strictly,
.xname = get_name_in_parent(x),
.yname = get_name_in_parent(y),
severity = severity
)
}
#' @rdname are_set_equal
#' @export
assert_is_superset <- function(x, y, strictly = FALSE,
severity = getOption("assertive.severity", "stop"))
{
assert_engine(
is_superset,
x,
y = y,
strictly = strictly,
.xname = get_name_in_parent(x),
.yname = get_name_in_parent(y),
severity = severity
)
}
|
20d54e7db8126fe6ae538780c46c641f06c2fbcc
|
fd80a412dd304b8b9994e9aa897b41e42093bbca
|
/ui.R
|
a4131232d67c7af731822030d3a183e8ffab4dcb
|
[] |
no_license
|
Shekhar8777/WBAT
|
7379588363ced3f07577b19349662c3f803b09df
|
32bccd414c682c36a027ab29666132e7bc1ecfe9
|
refs/heads/master
| 2021-01-23T01:26:54.765195
| 2017-05-31T06:09:04
| 2017-05-31T06:09:04
| 92,874,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,516
|
r
|
ui.R
|
library(shiny)
library(shinydashboard)
library(ggplot2)
ui <- dashboardPage(
dashboardHeader(title = "E-Analyzer"
),
dashboardSidebar(
sidebarMenu(
menuItem("DashBoard", icon = icon("fa fa-tachometer"), tabName = "dashboard"),
menuItem("Word Count", icon = icon("fa fa-commenting-o"), tabName = "wordcount"),
menuItem("Analysis", icon = icon("bar-chart-o"), tabName = "analysis")
)
# radioButtons(inputId = 'sep',label = 'Separator',choices = c(Comma=',',Semicolon=';',Tab='\t',Space=''),selected =',' ),
#HTML('</br>'),
#HTML('</br>'),
#radioButtons('format', h5('Document format'), c('PDF', 'HTML', 'Word'), inline = TRUE),
#downloadButton('downloadReport')
),
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "stylesheet.css")
),
tabItems(
tabItem(tabName = "dashboard")
),
tabItems(
tabItem(tabName = "wordcount",
fluidRow(
column(4,
sliderInput("topn",label="Select top n highest frequency words to display",min=1,max=100,step=1,value=10)
),
column(4,
actionButton("example",label="Example"),
actionButton("clear",label="Clear Text")
)
),
fluidRow(
tags$p(" Text input:"),
fluidRow(
box(width=11,tags$textarea(id="text",rows="5",style="width:100%",""))
),
fluidRow(
box(width=11,textOutput("char_count"))
),
fluidRow(
box(width=11,textOutput("total_count"))
),
tags$p(" Seperate word count:"),
fluidRow(
box(width=11,plotOutput("seperate_bar",width="100%",height="500px"))
)
)
),
tabItem(tabName = "analysis",
fluidRow(
column(4,
selectInput("dataset", h5("Choose a dataset:"), choices = c("cars", "longley","rock", "pressure","Uploaded Data"))
),
column(4,
fileInput('file','Choose file to upload.')
)
),
fluidRow(
tabBox(
width = "100%",
tabPanel("Data",DT::dataTableOutput('table')),
tabPanel("Summary", tableOutput("sum")),
tabPanel("Clustering",
HTML('<br>'),
tabBox(
width = "100%",
tabPanel("Kmeans",
fluidRow(
column(4,
selectInput('xcol', 'X Variable',"")
),
column(4,
selectInput('ycol', 'Y Variable',"")
),
column(4,
numericInput('clusters', 'Cluster count', 3,min = 1, max = 9)
)
),
# Create a new row for the table.
fluidRow(
box(title="ScatterPlot", width="100%",background = "light-blue",plotOutput('plot1' ,width = "100%" ))
)
),
tabPanel("Fuzzy c-means",
fluidRow(
column(4,
selectInput('fc_xcol', 'X Variable',"")
),
column(4,
selectInput('fc_ycol', 'Y Variable',"")
),
column(4,
numericInput('fc_clusters', 'Cluster count', 3,min = 1, max = 9)
)
),
# Create a new row for the table.
fluidRow(
box(title="ScatterPlot", width="100%",background = "light-blue",plotOutput('fc_plot' ,width = "100%" ))
)
)
)
),
tabPanel("Regression",
HTML('<br>'),
fluidRow(
column(4,
uiOutput('dv')
),
column(4,
uiOutput('iv')
)
),
fluidRow(
tabBox(width = "100%",
tabPanel("Histograms",
plotOutput("distPlot_dv"),
sliderInput("bins_dv", "Number of bins:", min = 1, max = 50, value = 7),
#textInput("text_hist_dv", label = "Interpretation", value = "Enter text..."),
plotOutput("distPlot_iv"),
sliderInput("bins_iv", "Number of bins:", min = 1, max = 50, value = 7)
#textInput("text_hist_iv", label = "Interpretation", value = "Enter text...")
),
tabPanel("Scatter Plot",
plotOutput("scatter")
#textInput("text_scatter", label = "Interpretation", value = "Enter text...")
),
tabPanel("Correlations",
htmlOutput("corr"),
HTML('</br> </br>')
#textInput("text_correlation", label = "Interpretation", value = "Enter text...")
),
tabPanel("Model",
verbatimTextOutput("model")
#textInput("text_model", label = "Interpretation", value = "Enter text...")
),
tabPanel("Residuals",
plotOutput("residuals_hist"),
plotOutput("residuals_scatter")
#plotOutput("residuals_qqline")
#textInput("text_residuals", label = "Interpretation", value = "Enter text...")
)
#radioButtons('format', h5('Document format'), c('PDF', 'HTML', 'Word'), inline = TRUE),
#downloadButton('downloadReport')
))
),
tabPanel("Classification",
HTML('<br>'),
tabBox(
width = "100%",
tabPanel("SVM(Support Vector Machine)",
fluidRow(
column(4,
uiOutput('svm_dv')
),
column(4,
uiOutput('svm_iv')
),
column(4,
selectInput("kernel", h5("Kernel"), choices = c("radial","linear", "polynomial","sigmoid"))
)
),
# Create a new row for the table.
fluidRow(
tabBox(width = "100%",
tabPanel("Scatter Plot",
box(title="ScatterPlot", width="100%",background = "light-blue",plotOutput('svm_plot' ,width = "100%" ))
#textInput("text_scatter", label = "Interpretation", value = "Enter text...")
),
tabPanel("Model",
verbatimTextOutput("svm_model")
#textInput("text_model", label = "Interpretation", value = "Enter text...")
),
tabPanel("Residual",
verbatimTextOutput("residual_svm")
#textInput("text_model", label = "Interpretation", value = "Enter text...")
),
tabPanel("Root Mean Square Error",
HTML("<p>Error</p>"),
verbatimTextOutput("error_svm")
#textInput("text_model", label = "Interpretation", value = "Enter text...")
)
)
)
)
)
)
)
)
)
)
)
)
|
900c294382e0077aa44689d627da4345a356bf71
|
8ff336d8fa0b4cee7c4359b1792ddf019a430baf
|
/R/x_data_deparser.R
|
8616af24cb1363e2c20c3e1b290d5bf4245417c9
|
[
"MIT"
] |
permissive
|
prestevez/victim
|
aab79dd897adf2539a88eeaa17e9d79a9923194e
|
9cac44d7a0a5d199fd7cb8db44a4b0a968471dfc
|
refs/heads/master
| 2022-04-03T20:37:29.162704
| 2020-01-27T16:47:45
| 2020-01-27T16:47:45
| 90,482,308
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 443
|
r
|
x_data_deparser.R
|
#' x_data_deparser function
#'
#' A helper function used inside other functions to be able
#' to call either x as an object or as a column name of data
#' @param x a vector or a name of a column in \code{data}
#' @param data a data frame.
#' @export
x_data_deparser <- function(x, data = NULL)
{
if(is.data.frame(data)) {xvar <- data[,x]; xname <- x }
else {xvar <- x; xname <- deparse(substitute(x))}
return(list(xvar, xname))
}
|
87fc6cb523e8fc94203ce67eaef7bb61affed014
|
6990b119bcec4203db277be183b6cefd7ff46b9f
|
/Useful functions.R
|
c67f15e0d70e4ed77fe6bc97442fd42126cbc55f
|
[] |
no_license
|
visheshkochher/R_codes
|
30897f8b4277b3bfc0efbb320ee32c46b2491cf3
|
24e7b86a6c84a60f36e34e544fdb1377025aefb0
|
refs/heads/master
| 2021-01-01T18:46:33.707698
| 2017-07-26T14:47:36
| 2017-07-26T14:47:36
| 98,434,096
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 341
|
r
|
Useful functions.R
|
`%not in%` <- function (x, table) is.na(match(x, table, nomatch=NA_integer_))
shift.vec <- function (vec, shift) {
if(length(vec) <= abs(shift)) {
rep(NA ,length(vec))
}else{
if (shift >= 0) {
c(rep(NA, shift), vec[1:(length(vec)-shift)]) }
else {
c(vec[(abs(shift)+1):length(vec)], rep(NA, abs(shift))) } } }
|
f9c0f306f4c4fc0b6f43d4ad855d2f1bee2e5c3a
|
f14aa70c97272cc70360c733fc348026da93ec79
|
/man/titanic.Rd
|
99609260a2208ef13138c6c6bfb1e9a2b1234bd7
|
[] |
no_license
|
tklebel/crosstabr
|
410c2a229b23b56b90ed05ebd07026add609aae2
|
01c9e8d5b7c85723233f4b73c6e8ec9863450f16
|
refs/heads/master
| 2021-01-21T05:00:07.697291
| 2018-05-15T16:54:32
| 2018-05-15T16:54:32
| 46,113,923
| 1
| 0
| null | 2016-07-08T14:12:10
| 2015-11-13T09:52:34
|
R
|
UTF-8
|
R
| false
| true
| 489
|
rd
|
titanic.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{titanic}
\alias{titanic}
\title{Data on Titanic survival.}
\format{A data frame with 2201 rows and 4 columns.
The levels are as follows:
\describe{
\item{Class}{1st, 2nd, 3rd, Crew}
\item{Sex}{Male, Female}
\item{Age}{Child, Adult}
\item{Survived}{No, Yes}
}}
\usage{
titanic
}
\description{
This dataset is a reshaped form of \link[datasets]{Titanic}.
}
\keyword{datasets}
|
a0df4957b260bf135e367a9e326e808d56f87286
|
a29b0b2ec374909fe1087099c309989dcd9d0a8b
|
/R/deriveTotalPrecipitation.R
|
4a8e0edbdd50e9997f5ffa3e7d18b5e785471446
|
[] |
no_license
|
jbedia/loadeR.ECOMS
|
729bab9a41074234bc4b9d3e4473cc25bf411018
|
84528ab325dba57d2af34e1b71ce032830ee7ac9
|
refs/heads/master
| 2021-01-24T23:58:02.464171
| 2018-02-12T12:04:11
| 2018-02-12T12:04:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,756
|
r
|
deriveTotalPrecipitation.R
|
deriveTotalPrecipitation <- function(gds, grid, dic, level, season, years, time, latLon, aggr.d, aggr.m) {
timePars <- getTimeDomain(grid, dic, season, years, time, aggr.d, aggr.m)
levelPars <- getVerticalLevelPars(grid, level)
message("[", Sys.time(), "] Retrieving data subset ..." )
# grid = prsn (snowfall flux)
grid1 <- gds$findGridByName("Rainf") # grid1 = tp (in the case of WFDEI, this is rainfall flux)
gcs <- grid$getCoordinateSystem()
dimNames <- rev(names(scanVarDimensions(grid)))
aux.list <- rep(list(bquote()), length(timePars$tRanges))
do.aggr <- ifelse((timePars$aggr.d != "none") | (timePars$aggr.m != "none"), TRUE, FALSE)
proj <- gcs$getProjection()
for (i in 1:length(aux.list)) {
dimNamesRef <- dimNames
aux.list2 <- rep(list(bquote()), length(latLon$llRanges))
for (j in 1:length(aux.list2)) {
subSet <- grid$makeSubset(levelPars$zRange, levelPars$zRange, timePars$tRanges[[i]], levelPars$zRange, latLon$llRanges[[j]]$get(0L), latLon$llRanges[[j]]$get(1L))
subSet1 <- grid1$makeSubset(levelPars$zRange, levelPars$zRange, timePars$tRanges[[i]], levelPars$zRange, latLon$llRanges[[j]]$get(0L), latLon$llRanges[[j]]$get(1L))
shapeArray <- rev(subSet$getShape()) # Reversed!!
# shape of the output depending on spatial selection
if (latLon$pointXYindex[1] >= 0) {
rm.dim <- grep(gcs$getXHorizAxis()$getDimensionsString(), dimNamesRef, fixed = TRUE)
shapeArray <- shapeArray[-rm.dim]
dimNamesRef <- dimNamesRef[-rm.dim]
}
if (latLon$pointXYindex[2] >= 0) {
rm.dim <- grep(gcs$getYHorizAxis()$getDimensionsString(), dimNamesRef, fixed = TRUE)
shapeArray <- shapeArray[-rm.dim]
dimNamesRef <- dimNamesRef[-rm.dim]
}
# Calculate total precipitation
snow <- array(subSet$readDataSlice(-1L, -1L, latLon$pointXYindex[2], latLon$pointXYindex[1])$copyTo1DJavaArray(), dim = shapeArray)
rain <- array(subSet1$readDataSlice(-1L, -1L, latLon$pointXYindex[2], latLon$pointXYindex[1])$copyTo1DJavaArray(), dim = shapeArray)
prec <- snow + rain
snow <- rain <- NULL
aux.list2[[j]] <- array(prec, dim = shapeArray)
prec <- NULL
}
aux.list[[i]] <- do.call("abind", c(aux.list2, along = 1))
aux.list2 <- NULL
# Daily aggregator
if (timePars$aggr.d != "none") {
aux.string <- paste(substr(timePars$dateSliceList[[i]],6,7),
substr(timePars$dateSliceList[[i]],9,10), sep = "-")
aux.factor <- factor(aux.string, levels = unique(aux.string), ordered = TRUE)
mar <- grep("^time", dimNamesRef, invert = TRUE)
aux.list[[i]] <- apply(aux.list[[i]], MARGIN = mar, FUN = function(x) {
tapply(x, INDEX = aux.factor, FUN = timePars$aggr.d, na.rm = TRUE)
})
dimNamesRef <- c("time", dimNamesRef[mar])
# Convert dates to daily:
nhours <- length(aux.factor) / nlevels(aux.factor)
timePars$dateSliceList[[i]] <- timePars$dateSliceList[[i]][seq(1, by = nhours, length.out = nlevels(aux.factor))]
}
# Monthly aggregator
if (timePars$aggr.m != "none") {
mes <- as.numeric(substr(timePars$dateSliceList[[i]],6,7))
mes <- factor(mes, levels = unique(mes), ordered = TRUE)
day <- as.POSIXlt(timePars$dateSliceList[[i]])$mday
mar <- grep("^time", dimNamesRef, invert = TRUE)
aux.list[[i]] <- apply(aux.list[[i]], MARGIN = mar, FUN = function(x) {
tapply(x, INDEX = mes, FUN = timePars$aggr.m)
})
dimNamesRef <- if (length(unique(mes)) > 1) {
c("time", dimNamesRef[mar])
} else {
dimNamesRef[mar]
}
timePars$dateSliceList[[i]] <- timePars$dateSliceList[[i]][which(day == 1)]
}
}
if (timePars$aggr.m != "none") {
if (length(unique(mes)) > 1) {
mdArray <- do.call("abind", c(aux.list, along = grep("^time", dimNamesRef)))
} else {
mdArray <- do.call("abind", c(aux.list, along = -1L))
dimNamesRef <- c("time", dimNamesRef)
}
} else {
mdArray <- do.call("abind", c(aux.list, along = grep("^time", dimNamesRef)))
}
aux.list <- timePars$tRanges <- NULL
if (any(dim(mdArray) == 1)) {
dimNames <- dimNamesRef[-which(dim(mdArray) == 1)]
mdArray <- drop(mdArray)
} else {
dimNames <- dimNamesRef
}
mdArray <- unname(mdArray)
attr(mdArray, "dimensions") <- dimNames
timePars$dateSliceList <- as.POSIXct(do.call("c", timePars$dateSliceList), tz = "GMT")
# Next steps are needed to match the structure returned by loadeR::loadGridDataset
cube <- list("timePars" = timePars, "mdArray" = mdArray)
if (!is.null(dic)) {
isStandard <- TRUE
cube$mdArray <- dictionaryTransformGrid(dic, cube$timePars, cube$mdArray)
} else {
isStandard <- FALSE
}
if (isTRUE(latLon$revLat)) {
cube$mdArray <- revArrayLatDim(cube$mdArray)
}
Variable <- list("varName" = "pr", "level" = levelPars$level)
attr(Variable, "use_dictionary") <- isStandard
attr(Variable, "description") <- "total precipitation amount (rain + snow)"
if (isStandard) {
vocabulary <- UDG.vocabulary()
attr(Variable, "units") <- as.character(vocabulary[grep("^pr$", vocabulary$identifier), 3])
attr(Variable, "longname") <- as.character(vocabulary[grep("^pr$", vocabulary$identifier), 2])
} else {
attr(Variable, "units") <- grid$getUnitsString()
attr(Variable, "longname") <- grid$getFullName()
}
attr(Variable, "daily_agg_cellfun") <- cube$timePars$aggr.d
attr(Variable, "monthly_agg_cellfun") <- cube$timePars$aggr.m
attr(Variable, "verification_time") <- time
out <- list("Variable" = Variable, "Data" = cube$mdArray, "xyCoords" = latLon$xyCoords, "Dates" = adjustDates(cube$timePars))
return(out)
}
# End
|
7fb81477f93d5b04d27afddd7c072193d3bbbfb2
|
e38f11fe73c4562db284e8dda6890f4037ebc3ae
|
/package/man/perform_qc_plots.Rd
|
f40abfa6dd1f851678b9fcfabd2faec2d7b9770f
|
[] |
no_license
|
genepi-freiburg/saige-pipeline
|
f3430dccf0d5d174f34e63dcd1d6ef3fb4951a9b
|
cd3145ea65e3204530308167bdc980b7f3ce8a97
|
refs/heads/master
| 2020-06-29T22:13:13.532977
| 2020-01-13T09:35:38
| 2020-01-13T09:35:38
| 200,639,008
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 613
|
rd
|
perform_qc_plots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/perform_QC_plots.R
\name{perform_qc_plots}
\alias{perform_qc_plots}
\title{Performs box plots, histograms, Manhattan and QQ plot
(unfiltered, MAF >1\%/INF >0.4 and MAF >5\%/INF >0.8.}
\usage{
perform_qc_plots(file_pattern, out_prefix, file_sep = " ")
}
\arguments{
\item{file_pattern}{file name of results (\%CHR\% spaceholder)}
\item{out_prefix}{output file name prefix}
\item{file_sep}{field separator}
}
\description{
Performs box plots, histograms, Manhattan and QQ plot
(unfiltered, MAF >1\%/INF >0.4 and MAF >5\%/INF >0.8.
}
|
536c373d50d7a4e2f71f8f400236169b2b61e056
|
b526588e6e7a3febf842d961e27c4f3fcb059588
|
/not_shiny_app/Scripts/med_hh_income_2010_2019.R
|
f22c3d761fbbf5535f8757ed066959191080cc8b
|
[] |
no_license
|
DSPG-Young-Scholars-Program/dspg21rappk
|
9e0301e3b4201f1e960bc4cd15ecc340b52438ed
|
e725cc5e4f60b48fc5e43600b08a7b0afefaa473
|
refs/heads/main
| 2023-07-02T09:43:58.488965
| 2021-08-04T20:26:00
| 2021-08-04T20:26:00
| 392,816,062
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,232
|
r
|
med_hh_income_2010_2019.R
|
##### Necessary Libraries ######
library(tidyverse)
library(tidycensus)
library(ggplot2)
library(sp)
library(rgdal)
library(tigris)
library(raster)
library(maptools)
library(rnaturalearth)
library(rnaturalearthdata)
library(rgeos)
library(tmap)
library(tmaptools)
library(maps)
library(leaflet)
library(viridis)
library(mapview)
library(RColorBrewer)
library(stringr)
library(osmdata)
library(osrm)
library(sf)
library(ggpubr)
library(ggmap)
library(gridExtra)
library(xlsx)
library(scales)
library(fpp2)
library(spData)
library(spDataLarge)
library(GISTools)
devtools::install_github("rCarto/osrm")
options(tigris_use_cache = TRUE)
options(tigris_class="sf")
#################################################
#Rappk Coordinates, Latitude = 38.6762° Longitude = -78.1564
#Generalized vector to get the total population for any function
pop_total <- c(poptotal = "B02001_001")
#Just gonna slap this at the end of every ggplot for consistency
plot_theme <- theme(plot.title = element_text(hjust = 0.5),
axis.text=element_text(size=12),
legend.text = element_text(size=12),
axis.title.x=element_text(size =13),
axis.title.y=element_text(size =13),
panel.background = element_blank())
########## Necessary functions for working all variables ############
#Gets the states for Rappahannock as a whole county rather than subcounty
rapp_all <- function(varcode, summary_var, year = 2019){
get_acs(geography = "county",
state = 51,
county = 157,
variables = varcode,
summary_var = summary_var,
year = year,
geometry = TRUE,
keep_geo_vars = TRUE,
cache = TRUE) %>%
mutate(percent = (estimate/summary_est)*100) %>%
subset(select = -c(COUNTYNS))}
get_rapp <- function(varcode, summary_var, year = 2019){
get_acs(geography = "county",
state = 51,
county = 157,
variables = varcode,
summary_var = summary_var,
year = year,
geometry = TRUE,
keep_geo_vars = TRUE,
cache = TRUE) %>%
mutate(percent = (estimate/summary_est)*100) %>%
st_transform(crs = "WGS84")} #Converts the dataframe into a coord system most commonly used
rapp_wide <- function(varcode, summary_var, year = 2019){
get_acs(geography = "county",
state = 51,
county = 157,
variables = varcode,
summary_var = summary_var,
year = year,
geometry = TRUE,
keep_geo_vars = TRUE,
cache = TRUE,
output = "wide") %>%
st_transform(crs = "WGS84") #Converts the dataframe into a coord system most commonly used
}
#Function for getting variables and makes a pct column
rapp_var <- function(varcode, summary_var, year = 2019){
get_acs(geography = "county subdivision",
state = 51,
county = 157,
variables = varcode,
summary_var = summary_var,
year = year,
geometry = TRUE,
keep_geo_vars = TRUE,
cache = TRUE) %>%
mutate(percent = (estimate/summary_est)*100) %>%
subset(select = -c(NAME.y))}
#################################################
get_district_acs_wide <- function(varcode, summary_var, year = 2019){
get_acs(geography = "county subdivision",
state = 51,
county = 157,
variables = varcode,
summary_var = summary_var,
geometry = TRUE,
keep_geo_vars = TRUE,
year = year,
cache = TRUE,
output = "wide") %>%
st_transform(crs = "WGS84") %>%
subset(select = -c(NAME.y))}
medianincome_wide <- get_district_acs_wide(medianincome_var, median_income_all)
################################################# Same as above but better
medianincome_var <- c(
med_hh_income_less10k = "B19001_002",
med_hh_income_10_14k = "B19001_003",
med_hh_income_15_19k = "B19001_004",
med_hh_income_20_24k = "B19001_005",
med_hh_income_25_29k = "B19001_006",
med_hh_income_30_34k = "B19001_007",
med_hh_income_35_39k = "B19001_008",
med_hh_income_40_44k = "B19001_009",
med_hh_income_45_49k = "B19001_010",
med_hh_income_50_59k = "B19001_011",
med_hh_income_60_74k = "B19001_012",
med_hh_income_75_99k = "B19001_013",
med_hh_income_100_124k = "B19001_014",
med_hh_income_125_149k = "B19001_015",
med_hh_income_150_199k = "B19001_016",
med_hh_income_200kmore = "B19001_017")
median_income_all = "B19001_001"
incomevector <- c("Under $25,000" = "medianunder25k", "$25,000 to $50,000" = "median25to50k", "$50,000 to $100,000" = "median50to100k", "Over $100,000" = "medianover100k")
#For every year I summon forth the median income, combine them into succinct brackets, and give them a percentage value
medianincome_wide <- get_district_acs_wide(medianincome_var, median_income_all)
medianincome2019 <- medianincome_wide %>%
mutate(medianunder25k = (((medianincome_wide$med_hh_income_less10kE + medianincome_wide$med_hh_income_10_14kE + medianincome_wide$med_hh_income_15_19kE + medianincome_wide$med_hh_income_20_24kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median25to50k = (((medianincome_wide$med_hh_income_25_29kE + medianincome_wide$med_hh_income_30_34kE + medianincome_wide$med_hh_income_35_39kE + medianincome_wide$med_hh_income_40_44kE + medianincome_wide$med_hh_income_45_49kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median50to100k = (((medianincome_wide$med_hh_income_50_59kE + medianincome_wide$med_hh_income_60_74kE + medianincome_wide$med_hh_income_75_99kE)/ sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(medianover100k = (((medianincome_wide$med_hh_income_100_124kE + medianincome_wide$med_hh_income_125_149kE + medianincome_wide$med_hh_income_150_199kE + med_hh_income_200kmoreE) / sum(medianincome_wide$summary_est)) * 100)) %>%
subset(select = c(GEOID, NAME.x, medianunder25k, median25to50k, median50to100k, medianover100k, summary_est, summary_moe, geometry)) %>%
cbind(year = "2019")
medianincome_wide <- get_district_acs_wide(medianincome_var, median_income_all, 2018)
medianincome2018 <- medianincome_wide %>%
mutate(medianunder25k = (((medianincome_wide$med_hh_income_less10kE + medianincome_wide$med_hh_income_10_14kE + medianincome_wide$med_hh_income_15_19kE + medianincome_wide$med_hh_income_20_24kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median25to50k = (((medianincome_wide$med_hh_income_25_29kE + medianincome_wide$med_hh_income_30_34kE + medianincome_wide$med_hh_income_35_39kE + medianincome_wide$med_hh_income_40_44kE + medianincome_wide$med_hh_income_45_49kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median50to100k = (((medianincome_wide$med_hh_income_50_59kE + medianincome_wide$med_hh_income_60_74kE + medianincome_wide$med_hh_income_75_99kE)/ sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(medianover100k = (((medianincome_wide$med_hh_income_100_124kE + medianincome_wide$med_hh_income_125_149kE + medianincome_wide$med_hh_income_150_199kE + med_hh_income_200kmoreE) / sum(medianincome_wide$summary_est)) * 100)) %>%
subset(select = c(GEOID, NAME.x, medianunder25k, median25to50k, median50to100k, medianover100k, summary_est, summary_moe, geometry)) %>%
cbind(year = "2018")
medianincome_wide <- get_district_acs_wide(medianincome_var, median_income_all, 2017)
medianincome2017 <- medianincome_wide %>%
mutate(medianunder25k = (((medianincome_wide$med_hh_income_less10kE + medianincome_wide$med_hh_income_10_14kE + medianincome_wide$med_hh_income_15_19kE + medianincome_wide$med_hh_income_20_24kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median25to50k = (((medianincome_wide$med_hh_income_25_29kE + medianincome_wide$med_hh_income_30_34kE + medianincome_wide$med_hh_income_35_39kE + medianincome_wide$med_hh_income_40_44kE + medianincome_wide$med_hh_income_45_49kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median50to100k = (((medianincome_wide$med_hh_income_50_59kE + medianincome_wide$med_hh_income_60_74kE + medianincome_wide$med_hh_income_75_99kE)/ sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(medianover100k = (((medianincome_wide$med_hh_income_100_124kE + medianincome_wide$med_hh_income_125_149kE + medianincome_wide$med_hh_income_150_199kE + med_hh_income_200kmoreE) / sum(medianincome_wide$summary_est)) * 100)) %>%
subset(select = c(GEOID, NAME.x, medianunder25k, median25to50k, median50to100k, medianover100k, summary_est, summary_moe, geometry)) %>%
cbind(year = "2017")
medianincome_wide <- get_district_acs_wide(medianincome_var, median_income_all, 2016)
medianincome2016 <- medianincome_wide %>%
mutate(medianunder25k = (((medianincome_wide$med_hh_income_less10kE + medianincome_wide$med_hh_income_10_14kE + medianincome_wide$med_hh_income_15_19kE + medianincome_wide$med_hh_income_20_24kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median25to50k = (((medianincome_wide$med_hh_income_25_29kE + medianincome_wide$med_hh_income_30_34kE + medianincome_wide$med_hh_income_35_39kE + medianincome_wide$med_hh_income_40_44kE + medianincome_wide$med_hh_income_45_49kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median50to100k = (((medianincome_wide$med_hh_income_50_59kE + medianincome_wide$med_hh_income_60_74kE + medianincome_wide$med_hh_income_75_99kE)/ sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(medianover100k = (((medianincome_wide$med_hh_income_100_124kE + medianincome_wide$med_hh_income_125_149kE + medianincome_wide$med_hh_income_150_199kE + med_hh_income_200kmoreE) / sum(medianincome_wide$summary_est)) * 100)) %>%
subset(select = c(GEOID, NAME.x, medianunder25k, median25to50k, median50to100k, medianover100k, summary_est, summary_moe, geometry)) %>%
cbind(year = "2016")
medianincome_wide <- get_district_acs_wide(medianincome_var, median_income_all, 2015)
medianincome2015 <- medianincome_wide %>%
mutate(medianunder25k = (((medianincome_wide$med_hh_income_less10kE + medianincome_wide$med_hh_income_10_14kE + medianincome_wide$med_hh_income_15_19kE + medianincome_wide$med_hh_income_20_24kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median25to50k = (((medianincome_wide$med_hh_income_25_29kE + medianincome_wide$med_hh_income_30_34kE + medianincome_wide$med_hh_income_35_39kE + medianincome_wide$med_hh_income_40_44kE + medianincome_wide$med_hh_income_45_49kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median50to100k = (((medianincome_wide$med_hh_income_50_59kE + medianincome_wide$med_hh_income_60_74kE + medianincome_wide$med_hh_income_75_99kE)/ sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(medianover100k = (((medianincome_wide$med_hh_income_100_124kE + medianincome_wide$med_hh_income_125_149kE + medianincome_wide$med_hh_income_150_199kE + med_hh_income_200kmoreE) / sum(medianincome_wide$summary_est)) * 100)) %>%
subset(select = c(GEOID, NAME.x, medianunder25k, median25to50k, median50to100k, medianover100k, summary_est, summary_moe, geometry)) %>%
cbind(year = "2015")
medianincome_wide <- get_district_acs_wide(medianincome_var, median_income_all, 2014)
medianincome2014 <- medianincome_wide %>%
mutate(medianunder25k = (((medianincome_wide$med_hh_income_less10kE + medianincome_wide$med_hh_income_10_14kE + medianincome_wide$med_hh_income_15_19kE + medianincome_wide$med_hh_income_20_24kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median25to50k = (((medianincome_wide$med_hh_income_25_29kE + medianincome_wide$med_hh_income_30_34kE + medianincome_wide$med_hh_income_35_39kE + medianincome_wide$med_hh_income_40_44kE + medianincome_wide$med_hh_income_45_49kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median50to100k = (((medianincome_wide$med_hh_income_50_59kE + medianincome_wide$med_hh_income_60_74kE + medianincome_wide$med_hh_income_75_99kE)/ sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(medianover100k = (((medianincome_wide$med_hh_income_100_124kE + medianincome_wide$med_hh_income_125_149kE + medianincome_wide$med_hh_income_150_199kE + med_hh_income_200kmoreE) / sum(medianincome_wide$summary_est)) * 100)) %>%
subset(select = c(GEOID, NAME.x, medianunder25k, median25to50k, median50to100k, medianover100k, summary_est, summary_moe, geometry)) %>%
cbind(year = "2014")
# 2014's shapefile is incompatible with all the other years so I have to drop the z-axis from its dataframe
medianincome2014 <- st_zm(medianincome2014, drop=TRUE, what = "ZM")
medianincome_wide <- get_district_acs_wide(medianincome_var, median_income_all, 2013)
medianincome2013 <- medianincome_wide %>%
mutate(medianunder25k = (((medianincome_wide$med_hh_income_less10kE + medianincome_wide$med_hh_income_10_14kE + medianincome_wide$med_hh_income_15_19kE + medianincome_wide$med_hh_income_20_24kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median25to50k = (((medianincome_wide$med_hh_income_25_29kE + medianincome_wide$med_hh_income_30_34kE + medianincome_wide$med_hh_income_35_39kE + medianincome_wide$med_hh_income_40_44kE + medianincome_wide$med_hh_income_45_49kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median50to100k = (((medianincome_wide$med_hh_income_50_59kE + medianincome_wide$med_hh_income_60_74kE + medianincome_wide$med_hh_income_75_99kE)/ sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(medianover100k = (((medianincome_wide$med_hh_income_100_124kE + medianincome_wide$med_hh_income_125_149kE + medianincome_wide$med_hh_income_150_199kE + med_hh_income_200kmoreE) / sum(medianincome_wide$summary_est)) * 100)) %>%
subset(select = c(GEOID, NAME.x, medianunder25k, median25to50k, median50to100k, medianover100k, summary_est, summary_moe, geometry)) %>%
cbind(year = "2013")
medianincome_wide <- get_district_acs_wide(medianincome_var, median_income_all, 2012)
medianincome2012 <- medianincome_wide %>%
mutate(medianunder25k = (((medianincome_wide$med_hh_income_less10kE + medianincome_wide$med_hh_income_10_14kE + medianincome_wide$med_hh_income_15_19kE + medianincome_wide$med_hh_income_20_24kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median25to50k = (((medianincome_wide$med_hh_income_25_29kE + medianincome_wide$med_hh_income_30_34kE + medianincome_wide$med_hh_income_35_39kE + medianincome_wide$med_hh_income_40_44kE + medianincome_wide$med_hh_income_45_49kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median50to100k = (((medianincome_wide$med_hh_income_50_59kE + medianincome_wide$med_hh_income_60_74kE + medianincome_wide$med_hh_income_75_99kE)/ sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(medianover100k = (((medianincome_wide$med_hh_income_100_124kE + medianincome_wide$med_hh_income_125_149kE + medianincome_wide$med_hh_income_150_199kE + med_hh_income_200kmoreE) / sum(medianincome_wide$summary_est)) * 100)) %>%
subset(select = c(GEOID, NAME.x, medianunder25k, median25to50k, median50to100k, medianover100k, summary_est, summary_moe, geometry)) %>%
cbind(year = "2012")
medianincome_wide <- get_district_acs_wide(medianincome_var, median_income_all, 2011)
medianincome2011 <- medianincome_wide %>%
mutate(medianunder25k = (((medianincome_wide$med_hh_income_less10kE + medianincome_wide$med_hh_income_10_14kE + medianincome_wide$med_hh_income_15_19kE + medianincome_wide$med_hh_income_20_24kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median25to50k = (((medianincome_wide$med_hh_income_25_29kE + medianincome_wide$med_hh_income_30_34kE + medianincome_wide$med_hh_income_35_39kE + medianincome_wide$med_hh_income_40_44kE + medianincome_wide$med_hh_income_45_49kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median50to100k = (((medianincome_wide$med_hh_income_50_59kE + medianincome_wide$med_hh_income_60_74kE + medianincome_wide$med_hh_income_75_99kE)/ sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(medianover100k = (((medianincome_wide$med_hh_income_100_124kE + medianincome_wide$med_hh_income_125_149kE + medianincome_wide$med_hh_income_150_199kE + med_hh_income_200kmoreE) / sum(medianincome_wide$summary_est)) * 100)) %>%
subset(select = c(GEOID, NAME.x, medianunder25k, median25to50k, median50to100k, medianover100k, summary_est, summary_moe, geometry)) %>%
cbind(year = "2011")
medianincome_wide <- get_district_acs_wide(medianincome_var, median_income_all, 2010)
medianincome2010 <- medianincome_wide %>%
mutate(medianunder25k = (((medianincome_wide$med_hh_income_less10kE + medianincome_wide$med_hh_income_10_14kE + medianincome_wide$med_hh_income_15_19kE + medianincome_wide$med_hh_income_20_24kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median25to50k = (((medianincome_wide$med_hh_income_25_29kE + medianincome_wide$med_hh_income_30_34kE + medianincome_wide$med_hh_income_35_39kE + medianincome_wide$med_hh_income_40_44kE + medianincome_wide$med_hh_income_45_49kE) / sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(median50to100k = (((medianincome_wide$med_hh_income_50_59kE + medianincome_wide$med_hh_income_60_74kE + medianincome_wide$med_hh_income_75_99kE)/ sum(medianincome_wide$summary_est)) * 100)) %>%
mutate(medianover100k = (((medianincome_wide$med_hh_income_100_124kE + medianincome_wide$med_hh_income_125_149kE + medianincome_wide$med_hh_income_150_199kE + med_hh_income_200kmoreE) / sum(medianincome_wide$summary_est)) * 100)) %>%
subset(select = c(GEOID, NAME.x, medianunder25k, median25to50k, median50to100k, medianover100k, summary_est, summary_moe, geometry)) %>%
cbind(year = "2010")
#The only justification for these separations are incompatible geography
medianincome2010_2019 <- medianincome2019 %>%
rbind(medianincome2018) %>%
rbind(medianincome2017) %>%
rbind(medianincome2016) %>%
rbind(medianincome2015) %>%
rbind(medianincome2014) %>%
rbind(medianincome2013) %>%
rbind(medianincome2012) %>%
rbind(medianincome2011) %>%
rbind(medianincome2010)
#Converting the above wide format into a long format so that all the variables in a singular column and the estimates can be their own column
income2010_2019 <- medianincome2010_2019 %>%
pivot_longer(cols = c(medianunder25k, median25to50k, median50to100k, medianover100k), names_to = "incomebracket", values_to = "percent")
income2010_2019$incomebracket <- factor(income2010_2019$incomebracket, levels = incomevector)
income2010_2019 <- income2010_2019 %>% mutate(estimate = ((income2010_2019$percent / 100) * income2010_2019$summary_est) )
income2010_2019 <- st_as_sf(income2010_2019)
ggplot(income2010_2019, aes(x = incomebracket, y = percent, fill = NAME.x, group = NAME.x)) +
geom_col(position = "dodge") +
ggtitle("Median Household Income (In US Dollars) by District 2010-2019") +
xlab("Income Bracket") +
ylab("Percentage of Population") +
facet_wrap(~year) +
scale_fill_viridis_d(name = "District") +
coord_flip() +
scale_x_discrete(labels = c("Under $25,000", "$25,000 to $50,000" , "$50,000 to $100,000" , "Over $100,000" ))
plot_theme
ggplot(income2010_2019, aes(x = incomebracket, y = estimate, fill = NAME.x, group = NAME.x)) +
geom_col(position = "dodge") +
ggtitle("Median Household Income (In US Dollars) by District 2010-2019") +
xlab("Income Bracket") +
ylab("Total Population") +
facet_wrap(~year) +
scale_fill_viridis_d(name = "District") +
coord_flip() +
scale_x_discrete(labels = c("Under $25,000", "$25,000 to $50,000" , "$50,000 to $100,000" , "Over $100,000" )) +
plot_theme
ggplot(income2010_2019, aes(x = year, y = estimate, fill = incomebracket, group = incomebracket)) +
geom_col(position = "dodge") +
ggtitle("Median Household Income (In US Dollars) 2010-2019") +
xlab("Income Bracket") +
ylab("Total Population") +
scale_fill_viridis_d(name = "Income Bracket", labels = c("Under $25,000", "$25,000 to $50,000" , "$50,000 to $100,000" , "Over $100,000" )) +
plot_theme
ggplot(income2010_2019, aes(x = year, y = estimate, group = incomebracket, color = incomebracket,)) +
geom_line(aes(size = percent)) +
ggtitle(label = "Median Household Income (In US Dollars) by District 2010-2019") +
ylab("Total Population") +
labs(size ="Percent of Population") +
scale_color_viridis_d(name = "Income Brackets", labels = c("Under $25,000", "$25,000 to $50,000" , "$50,000 to $100,000" , "Over $100,000" )) +
facet_wrap(~NAME.x) +
plot_theme
ggplot(income2010_2019, aes(x = year, y = percent, group = incomebracket, color = incomebracket,)) +
geom_line(aes(size = estimate)) +
ggtitle(label = "Median Household Income (In US Dollars) by District 2010-2019") +
ylab("Percentage of Population") +
labs(size = "Total Population") +
scale_color_viridis_d(name = "Income Brackets", labels = c("Under $25,000", "$25,000 to $50,000" , "$50,000 to $100,000" , "Over $100,000" )) +
facet_wrap(~NAME.x) +
plot_theme
####################### Median Income in raw Dollars #######################
median_income_dollars <- c(median_income_dollars = "S1901_C01_012")
mean_income_dollars <- c(mean_income_dollars = "S1901_C01_013")
mediandollars2019 <- rapp_var(median_income_dollars, median_income_dollars) %>%
subset(select = c(GEOID, NAME.x, variable, estimate, moe, summary_est, summary_moe, percent, geometry)) %>%
rename(NAME = NAME.x) %>%
add_column(year = "2019")
mediandollars2018 <- rapp_var(median_income_dollars, median_income_dollars, 2018) %>%
subset(select = c(GEOID, NAME.x, variable, estimate, moe, summary_est, summary_moe, percent, geometry)) %>%
rename(NAME = NAME.x) %>%
add_column(year = "2018")
mediandollars2017 <- rapp_var(median_income_dollars, median_income_dollars, 2017) %>%
subset(select = c(GEOID, NAME.x, variable, estimate, moe, summary_est, summary_moe, percent, geometry)) %>%
rename(NAME = NAME.x) %>%
add_column(year = "2017")
mediandollars2016 <- rapp_var(median_income_dollars, median_income_dollars, 2016) %>%
subset(select = c(GEOID, NAME.x, variable, estimate, moe, summary_est, summary_moe, percent, geometry)) %>%
rename(NAME = NAME.x) %>%
add_column(year = "2016")
mediandollars2015 <- rapp_var(median_income_dollars, median_income_dollars, 2015) %>%
subset(select = c(GEOID, NAME.x, variable, estimate, moe, summary_est, summary_moe, percent, geometry)) %>%
rename(NAME = NAME.x) %>%
add_column(year = "2015")
mediandollars2014 <- rapp_var(median_income_dollars, median_income_dollars, 2014) %>%
subset(select = c(GEOID, NAME.x, variable, estimate, moe, summary_est, summary_moe, percent, geometry)) %>%
rename(NAME = NAME.x) %>%
add_column(year = "2014") %>%
st_zm(drop = TRUE, what = "ZM")
mediandollars2013 <- rapp_var(median_income_dollars, median_income_dollars, 2013) %>%
subset(select = c(GEOID, NAME.x, variable, estimate, moe, summary_est, summary_moe, percent, geometry)) %>%
rename(NAME = NAME.x) %>%
add_column(year = "2013")
mediandollars2012 <- rapp_var(median_income_dollars, median_income_dollars, 2012) %>%
subset(select = c(GEOID, NAME.x, variable, estimate, moe, summary_est, summary_moe, percent, geometry)) %>%
rename(NAME = NAME.x) %>%
add_column(year = "2012")
mediandollars2011 <- rapp_var(median_income_dollars, median_income_dollars, 2011) %>%
subset(select = c(GEOID, NAME.x, variable, estimate, moe, summary_est, summary_moe, percent, geometry)) %>%
rename(NAME = NAME.x) %>%
add_column(year = "2011")
mediandollars2010 <- get_acs(geography = "county subdivision",
state = 51,
county = 157,
variables = median_income_dollars,
summary_var = median_income_dollars,
year = 2010,
geometry = TRUE,
keep_geo_vars = TRUE,
cache = TRUE) %>%
mutate(percent = (estimate/sum(summary_est))*100) %>%
subset(select = c(GEOID, NAME.x, variable, estimate, moe, summary_est, summary_moe, percent, geometry)) %>%
rename(NAME = NAME.x) %>%
add_column(year = "2010")
mediandollars2010_2019 <- mediandollars2019 %>%
rbind(mediandollars2018) %>%
rbind(mediandollars2017) %>%
rbind(mediandollars2016) %>%
rbind(mediandollars2015) %>%
rbind(mediandollars2014) %>%
rbind(mediandollars2013) %>%
rbind(mediandollars2012) %>%
rbind(mediandollars2011) %>%
rbind(mediandollars2010)
ggplot(mediandollars2010_2019, aes(x = year, y = estimate, color = NAME, group = NAME)) +
geom_line(aes(size = 3)) +
labs(title = "Median Household Income by District (In US Dollars) 2010-2019") +
xlab("Income in Dollars") +
guides(size = FALSE) +
ylim(30000,120000) +
scale_color_viridis_d(name = "District") +
plot_theme
ggplot(mediandollars2010_2019) +
geom_sf(aes(fill = estimate)) +
ggtitle("Median Household Income by District (In US Dollars) 2010-2019")
geom_sf_label(aes(label = NAME)) +
xlab("") +
ylab("") +
coord_sf(datum = NA) +
facet_wrap(~year)
|
ba3baed8731eb5c231e769241ea3deebe954e7d4
|
409cdf1ef7d0009475c28b56d8fe009d869231c2
|
/plot3.R
|
40735f9a23b10ab7dcd0d939f9a3eac2c6603368
|
[] |
no_license
|
wlau0721/ExData_Plotting1
|
d1d81b8fdcf3b106ab94fc30eb592c1a646e96ba
|
3f6159c0682f5fd18d91266c0f9fd6152d46c94b
|
refs/heads/master
| 2020-05-29T11:57:42.098056
| 2015-01-10T17:32:50
| 2015-01-10T17:32:50
| 28,992,057
| 0
| 0
| null | 2015-01-08T23:50:33
| 2015-01-08T23:50:33
| null |
UTF-8
|
R
| false
| false
| 1,576
|
r
|
plot3.R
|
##### filename: plot3.R
##### goal: this script is to generate plot3.png for course project #1. The data file is assumed to be stored in the data folder in same directory as the script.
##read the entire electric date set
if (!file.exists("./data")) {dir.create("./data")}
powerdata <- read.table("./data/household_power_consumption.txt",header=TRUE,sep=";")
##subset the dataset to dates 2007-02-01 and 2007-02-02
d <- as.Date(as.character(powerdata$Date),format = "%d/%m/%Y")
subd <- d >= as.Date("2007-02-01") & d <= as.Date("2007-02-02")
subpowerdata <- powerdata[subd,]
##add new column combing Date and Time
subpowerdata$DateTime <- as.POSIXct(paste(as.Date(subpowerdata$Date,"%d/%m/%Y"),subpowerdata$Time),"%Y-%m-%d %H:%M:%S")
##convert Sub_metering columns into numeric
subpowerdata$Sub_metering_1 <- as.numeric(as.character(subpowerdata$Sub_metering_1))
subpowerdata$Sub_metering_2 <- as.numeric(as.character(subpowerdata$Sub_metering_2))
subpowerdata$Sub_metering_3 <- as.numeric(as.character(subpowerdata$Sub_metering_3))
##open file dervice to create png file
png(file = "plot3.png")
##plot plot3.png
with(subpowerdata, plot(DateTime, Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "n"))
with(subpowerdata,lines(DateTime,Sub_metering_1,col="black"))
with(subpowerdata,lines(DateTime,Sub_metering_2,col="red"))
with(subpowerdata,lines(DateTime,Sub_metering_3,col="blue"))
legend("topright",legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty = c(1, 1, 1),, col = c("black","red","blue"))
##close png file dervice
dev.off()
|
75af1dcf6005a942a9e18aae5ec3d547e4fc5f66
|
842d2e60d78bf98fbc801bf77c0cdecefdc743fb
|
/R/range_bag.R
|
34e6a698a4e01c6919f77637acf14fffb6c2389e
|
[] |
no_license
|
jscamac/edmaps
|
f3c2a588b1c23d64a7739d30fd8c1b21cbef4823
|
3b21b380d5de167c115fc867162eed23c58d96c1
|
refs/heads/master
| 2023-04-04T23:42:50.981957
| 2022-05-27T04:05:24
| 2022-05-27T04:05:24
| 220,372,372
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,132
|
r
|
range_bag.R
|
#' Fit and project range bag model
#'
#' Fit and project range bag model.
#'
#' @param occurrence_data `sf` object, `data.frame` or character
#' path to a csv file containing occurrence coordinates (must contain
#' columns named "Latitude" and "Longitude").
#' @param bioclim_dir Path. Path to directory containing WorldClim raster data.
#' @param n_dims Integer. The number of dimensions ranges to bag.
#' @param n_models Integer. The number of bootstrapped model ensembles to run.
#' @param p Numeric between 0 and 1. The proportion of occurrence records to
#' include in bootstrapping .
#' @param exclude_vars Character vector. A vector of bioclim variables to
#' exclude from analysis. Default is `NULL`.
#' @param outfile Character. Output raster file path. Parent directory will be
#' created recursively if required. If `NULL`, the `RasterLayer`
#' will be returned in R.
#' @return A `RasterLayer` of model predictions is written to
#' `outfile` if provided, and returned to R otherwise. The raster's
#' extent, resolution and CRS are taken from the raster data in
#' `bioclim_dir`. Cell values give the fraction of bootstrapped models
#' for which the cell's environment fell within the species' modelled
#' climate envelope.
#' @references This function is a modified version of the `rb` function
#' provided in Drake, J.M. & Richards, R.L. (2019)
#' [Data from: Estimating environmental suitability.](https://doi.org/10.5061/dryad.g5p7d1c]) Dryad, Dataset, doi:10.5061/dryad.g5p7d1c.
#'
#' See also: Drake, J.M. (2015)
#' [Range bagging: a new method for ecological niche modelling from presence-only data.](https://doi.org/10.1098/rsif.2015.0086)
#' _Journal of the Royal Society Interface_, 12(107), 20150086.
#' doi:https://doi.org/10.1098/rsif.2015.0086.
#' @importFrom geometry tsearchn convhulln delaunayn
#' @importFrom raster stack dropLayer crop rasterFromXYZ writeRaster as.data.frame
#' @importFrom utils read.csv
#' @importFrom sf as_Spatial st_transform
#' @importFrom sp coordinates proj4string CRS
#' @importFrom stats na.omit
#' @importFrom rnaturalearth ne_countries
#' @importFrom dplyr filter
#' @importFrom magrittr "%>%"
#' @export
range_bag <- function(occurrence_data, bioclim_dir, n_dims = 2, n_models = 100,
p = 0.5, exclude_vars = NULL, outfile) {
# SUB FUNCTIONS
# Fit function
range_bag_fit <- function(fit_data, n_models, dimensions, p) {
models <- list()
n <- dim(fit_data)
for(i in 1:n_models){
vars <- sample.int(n[2], size=dimensions, replace=FALSE)
x0 <- fit_data[, vars]
if(dimensions==1) {
x1 <- x0[sample(n[1], ceiling(p*n[1]), replace=FALSE)]
models[[i]] <- list(vars=vars, endpoints=c(min(x1), max(x1)), data=x1)
}
else{
x1 <- x0[sample(n[1],ceiling(p*n[1]), replace=FALSE),]
idx <- unique(as.vector(geometry::convhulln(x1, options='Pp')))
endpoints <- x1[idx,]
models[[i]] <- list(vars=vars, endpoints=endpoints, data=unique(x1))
}
}
return(models)
}
# Prediction function
range_bag_pred <- function(models, new_data) {
n_models <- length(models)
dimensions <- ifelse(is.null(dim(models[[1]]$endpoints)), 1,
dim(models[[1]]$endpoints)[2])
n <- dim(new_data)
out <- numeric(n[1])
for(i in 1:n_models){
if(dimensions==1){
test.pts <- (models[[i]]$endpoints[1] < new_data[,models[[i]]$vars]) &
(new_data[,models[[i]]$vars] < models[[i]]$endpoints[2])
out <- out + test.pts
} else{
test.dat <- as.matrix(new_data[,models[[i]]$vars])
tri.pts <- geometry::tsearchn(
as.matrix(models[[i]]$data),
geometry::delaunayn(models[[i]]$data), test.dat)
test.pts <- !is.na(tri.pts$p[,1])
out <- out + test.pts
}
}
return(out/n_models)
}
bioclim_vars <- c("bio01", "bio02", "bio03", "bio04",
"bio05", "bio06", "bio07", "bio08",
"bio09", "bio10", "bio11", "bio12",
"bio13", "bio14", "bio15", "bio16",
"bio17", "bio18", "bio19")
if(!is.null(exclude_vars) && !all(exclude_vars %in% bioclim_vars)) {
stop(paste("Only the following variables can be excluded:",
bioclim_vars))
}
bioclim_stack <- raster::stack(list.files(bioclim_dir, full.names=TRUE,
pattern="\\.tif$"))
if(!is.null(exclude_vars)) {
id <- which(bioclim_vars %in% exclude_vars)
bioclim_stack <- raster::dropLayer(bioclim_stack, id)
}
if(is.character(occurrence_data)) {
locs <- utils::read.csv(occurrence_data)
} else {
locs <- occurrence_data
}
if(any(("sf") %in% class(locs))) {
locs <- suppressMessages(
sf::st_transform(
locs, crs = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
) %>%
sf::as_Spatial(.)
} else {
locs <- locs %>%
{names(.) <- tolower(names(.)); .} %>%
{sp::coordinates(.) <- c("longitude", "latitude"); .} %>%
{sp::proj4string(.) <-
sp::CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"); .}
}
# Reduces points to 1 per grid cell
loc_env <- stats::na.omit(
raster::as.data.frame(
bioclim_stack[unique(raster::cellFromXY(bioclim_stack, locs))]
)
)
models <- range_bag_fit(loc_env, n_models = n_models, dimensions = n_dims,
p = p)
world_map <- rnaturalearth::ne_countries(returnclass = "sf") %>%
dplyr::filter(name != "Antarctica")
pred_data <- stats::na.omit(raster::as.data.frame(
bioclim_stack %>%
raster::crop(., world_map), xy=TRUE))
out <- suppressWarnings(
raster::rasterFromXYZ(
cbind(pred_data[, 1:2],
range_bag_pred(models = models, new_data = pred_data[, -(1:2)])),
crs = '+init=epsg:4326')
)
if(!missing(outfile)) {
# Create directory if it does not exist
if(!dir.exists(dirname(outfile))) {
dir.create(dirname(outfile), recursive = TRUE)
}
raster::writeRaster(out, outfile, overwrite=TRUE)
} else {
out
}
}
|
cdde47bae06dd8d7d1b2c9b8c9d6c8a496428093
|
70b973af1466108afbb476e62e5672b1fb495f94
|
/seir-model/compare-models.R
|
bd7cec6567af78e3193d2ab04c7b128e0e2d9744
|
[] |
no_license
|
openmodels/coronaclimate
|
dbac851a4a7b4d51891f69c7e9e43d691e34ecf1
|
2909520af378cc9b38db09c295f4c451a3be2c00
|
refs/heads/master
| 2022-11-07T07:19:59.006186
| 2022-11-03T01:58:19
| 2022-11-03T01:58:19
| 249,415,990
| 0
| 0
| null | 2020-03-30T19:39:06
| 2020-03-23T11:46:24
|
Python
|
UTF-8
|
R
| false
| false
| 7,087
|
r
|
compare-models.R
|
setwd("~/research/coronavirus/code/seir-model")
library(lubridate)
## result.files <- c('epimodel-0604.csv', 'epimodel-0604-ww.csv', 'epimodel-0615.csv',
## 'epimodel-meta-0616.csv', 'epimodel-0803.csv', 'epimodel-meta-0806-x5.csv',
## 'epimodel-meta-0817.csv', 'epimodel-meta-0817-noprior.csv',
## 'epimodel-meta-0907-nobs.csv', 'epimodel-meta-0907-pop.csv', 'epimodel-meta-0907-region.csv',
## 'epimodel-meta-0921-pop.csv', 'epimodel-meta-1030-all-pop.csv', 'epimodel-meta-1111-mixed-all-pop.csv')
## result.names <- c('Kucharski et al.', 'Include Weather', 'Handle Deaths',
## 'Death & Not', 'Early/Late & Mobility', 'Const. Omega & Priors',
## 'New Weather', 'Drop Priors',
## 'Omega Effect, by Pop.', 'Omega Effect, by Obs.', 'Omega Effect, by Reg.',
## 'Smooth Omega', 'Estimated Testing', 'OLS Compare')
result.files <- c('epimodel-meta-0314-noweather-noomega-nodlogomega-nodeath-all-nobs-nodel-combo.csv',
'epimodel-meta-0314-noweather-noomega-nodeath-all-nobs-nodel.csv', ##
'epimodel-meta-0314-noweather-all-nobs-nodel.csv',
'epimodel-meta-0314-noprior-noomega-all-nobs-nodel-combo.csv',
'epimodel-meta-0314-noprior-all-nobs-nodel.csv', 'epimodel-meta-0314-full3-all-nobs-nodel.csv')
result.names <- c("Kucharski et al.", " + Variable reporting rate", " + Handle deaths",
" + Weather effects on transmission", " + Weather effects on detection",
" + OLS-based priors")
coeff.names <- list('alpha'='Time Variance (α)', 'invsigma'='Incubation Period (1/σ)', 'invgamma'='Infectious Period (1/γ)', 'omega'='Reporting Rate (ω)', 'error'='Model Error (ε)', 'e.absh'='Beta[Abs. Humid.]', 'e.r'='Beta[Rel. Humid.]', 'e.t2m'='Beta[Surface Temp.]', 'e.tp'='Beta[Total Prec.]', 'o.absh'='Zeta[Abs. Humid.]', 'o.r'='Zeta[Rel. Humid.]', 'o.t2m'='Zeta[Surface Temp.]', 'o.tp'='Zeta[Total Prec.]', 'deathrate'='Death Rate (δ)', 'deathomegaplus'='Death Reporting (λ)', 'deathlearning'='Death rate learning (ν)', 'portion_early'='Portion Early', 'mobility_slope'='Mobility Slope', 'logbeta'='Log Transmission', 'logomega'='Log Reporting', 'eein'='Infected Imports', 'omega0'='Initial Reporting (ω₀)', 'domega'='Reporting Slope', 'invkappa'='Testing Delay (1/κ)', 'invtheta'='Reporting Delay (1/θ)', 'e.ssrd', 'e.ssrd'='Beta[Solar Rad.]', 'e.utci'='Beta[Thermal Index]', 'o.ssrd'='Zeta[Solar Rad.]', 'o.utci'='Zeta[Thermal Index]')
coeff.order <- c('Time Variance (α)', 'Incubation Period (1/σ)', 'Infectious Period (1/γ)', 'Testing Delay (1/κ)', 'Reporting Delay (1/θ)', 'Reporting Rate (ω)', 'Initial Reporting (ω₀)', 'Reporting Slope', 'Death Rate (δ)', 'Death Reporting (λ)', 'Death rate learning (ν)', 'Beta[Abs. Humid.]', 'Beta[Rel. Humid.]', 'Beta[Surface Temp.]', 'Beta[Total Prec.]', 'Beta[Solar Rad.]', 'Beta[Thermal Index]', 'Zeta[Abs. Humid.]', 'Zeta[Rel. Humid.]', 'Zeta[Surface Temp.]', 'Zeta[Total Prec.]', 'Zeta[Solar Rad.]', 'Zeta[Thermal Index]', 'Portion Early', 'Log Transmission', 'Log Reporting', 'Model Error (ε)')
fits <- data.frame()
performs <- data.frame()
for (rr in 1:length(result.files)) {
filepath <- file.path("../../results", result.files[rr])
print(filepath)
df <- read.csv(filepath)
for (param in unique(df$param)) {
if (!(coeff.names[[param]] %in% coeff.order))
next
if (" " %in% df$regid[df$param == param]) {
meanmu <- df$mu[df$param == param & df$regid == " "]
mean25 <- df$ci25[df$param == param & df$regid == " "]
mean75 <- df$ci75[df$param == param & df$regid == " "]
} else {
meanmu <- mean(df$mu[df$param == param], na.rm=T)
mean25 <- mean(df$ci25[df$param == param], na.rm=T)
mean75 <- mean(df$ci75[df$param == param], na.rm=T)
}
mu25q.raw <- quantile(df$mu[df$param == param & df$group == "Raw"], .25, na.rm=T)
mu75q.raw <- quantile(df$mu[df$param == param & df$group == "Raw"], .75, na.rm=T)
mu25q.combined <- quantile(df$mu[df$param == param & df$group == "Combined" & df$regid != " "], .25, na.rm=T)
mu75q.combined <- quantile(df$mu[df$param == param & df$group == "Combined" & df$regid != " "], .75, na.rm=T)
if ('rhat' %in% names(df))
rhat <- mean(df$rhat[df$param == param], na.rm=T)
else
rhat <- NA
if (any(result.files == 'epimodel-0803.csv')) {
if (rr >= which(result.files == 'epimodel-0803.csv') & rr <= which(result.files == 'epimodel-meta-0806-x5.csv')) {
if (param == 'e.absh')
param <- 'e.r'
else if (param == 'e.r')
param <- 'e.t2m'
}
}
ymin <- NA
ymax <- NA
if (substring(as.character(param), 1, 2) == 'e.')
if (!is.na(mean25) && (mean75 > 1 || mean25 < -1 || mu75q.raw > 1 || mu25q.raw < -1)) {
ymin <- -1
ymax <- 1
meanmu <- NA
mean25 <- NA
mean75 <- NA
mu25q <- NA
mu75q <- NA
}
fits <- rbind(fits, data.frame(model=result.names[rr], param, param.name=coeff.names[[param]], meanmu, mean25, mean75, mu25q.raw, mu75q.raw, mu25q.combined, mu75q.combined, ymin, ymax, rhat=rhat))
}
if ('rhat' %in% names(df))
rhat <- median(df$rhat, na.rm=T)
else
rhat <- NA
regions <- length(unique(df$regid))
performs <- rbind(performs, data.frame(model=result.names[rr], regions, rhat))
}
performs
library(ggplot2)
fits$model <- factor(fits$model, levels=rev(result.names))
fits$param.name <- factor(fits$param.name, coeff.order)
gp <- ggplot(fits[!(fits$param %in% c('logbeta', 'logomega')),], aes(model, meanmu)) +
facet_wrap(~ param.name, scales='free_x', ncol=6) +
geom_errorbar(aes(ymin=mean25, ymax=mean75)) +
geom_linerange(aes(ymin=mu25q.combined, ymax=mu75q.combined), col='red', linetype='dashed') +
# geom_linerange(aes(ymin=ymin, ymax=ymax), col='blue') +
coord_flip() + scale_y_continuous(expand=c(.01, .01)) + theme_bw() +
xlab(NULL) + ylab("Parameter Estimate")
ggsave("../../figures/compare-models.pdf", gp, width=10, height=8)
gp <- ggplot(fits[!(fits$param %in% c('logbeta', 'logomega')),], aes(model, meanmu)) +
facet_wrap(~ param.name, scales='free_x', ncol=6) +
geom_errorbar(aes(ymin=mu25q.combined, ymax=mu75q.combined)) +
geom_linerange(aes(ymin=mu25q.raw, ymax=mu75q.raw), col='red', linetype='dashed') +
# geom_linerange(aes(ymin=ymin, ymax=ymax), col='blue') +
coord_flip() + scale_y_continuous(expand=c(.01, .01)) + theme_bw() +
xlab(NULL) + ylab("Parameter Estimate")
ggsave("../../figures/compare-models-0314.pdf", gp, width=13, height=5)
cairo_pdf("../../figures/compare-models-0314.pdf", width=13, height=5)
gp
dev.off()
|
bce6316ce7917ec5a78134bcd33f60a0f6a78539
|
cfe8b5466b9e1bd35ebb000a15e74e466f613603
|
/Analyses/Mapping.R
|
fbaba1a6db48e43d90664c43e08c80ce0e2b1359
|
[
"MIT"
] |
permissive
|
christiancjw/how-many-galeopterus
|
fb5d691a1ff37fed97162268f72bb83992a4dd75
|
7ffe1211a4411578d48544ded2afe62f7b91c0bf
|
refs/heads/main
| 2023-04-22T18:50:23.629628
| 2021-05-07T08:43:18
| 2021-05-07T08:43:18
| 353,683,466
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,744
|
r
|
Mapping.R
|
# Assuming your original data is called cologu. Note you don't need the PCs or anything, just the data for how you want to split/colour the points and the lat longs
## Load libraries
library(tidyverse)
library(sf)
library(rgeos)
library(rgdal)
library(Hmisc)
library(rnaturalearth)
# Helper functions for plotting ----
remove_y <-
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank())
remove_x <-
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
# Read in of data ----
mappingdata <- read.csv("Rawdata/dermopteradata.csv")
# Mapping ----
# First convert the lat long into "points" data for plotting
mapref <- mappingdata %>%
filter(!Extent..m. == "NA")
dermdata <- mapref %>%
st_as_sf(coords = c("Longitude", "Latitude"), crs = 4326)
dermdata
# Make a base map of the land ----
baseMap <-
rnaturalearth::ne_countries(returnclass = 'sf') %>%
st_union()
## Choose coordinates to limit to Asia (may need to modify to zoom in or out more) ----
# puts the coords into the order expected down in ggmap coords
asia_bbox <- c(94, -10,
130, 20)
xlim_as <- c(asia_bbox[1], asia_bbox[3])
ylim_as <- c(asia_bbox[2], asia_bbox[4])
# Make map - only the first bits are special ----
# changing colours etc is same as normal ggplot code
ggplot(baseMap) +
geom_sf() +
# Add points
geom_sf(alpha = 0.9, aes(colour = Region, fill = Region),
data = dermdata, show.legend = TRUE, size = 0.5) +
# restrict map to just Asia
coord_sf(xlim = xlim_as, ylim = ylim_as, expand = TRUE) +
theme_bw() +
remove_x +
remove_y
# Save plots
ggsave("figures/dermoptera-map.png")
|
62e05d258f04f3ce63e712d1128eee700eed09a9
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/PopGenKit/R/freqPCA.R
|
2ba3f96ffdb6a4ed82dc949624b31bb60a253307
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,843
|
r
|
freqPCA.R
|
freqPCA <-
function(datafile, ndigit=3, default.pop=T, vecpops)
{
freq.overall=F
freq.by.pop=F
input=read.table(file=datafile, sep='\t', colClasses='character')
noext= gsub("[.].*$","",datafile)
nloci=ncol(input)-2
#transformer pour trouver npops et popsizes
whereNbSamples=grep('NbSamples',input[,1])
npops=gsub( "[^0123456789]", "", input[whereNbSamples,1])
npops=as.numeric(npops)
whereSampleSize =grep('SampleSize',input[,1])
popsizes=as.numeric(c(gsub('[^0123456789]', "", input[whereSampleSize,1])))
#reconnaître les noms des pops
whereSampleName =grep('SampleName',input[,1])
popnames=(c(gsub('SampleName=', "", input[whereSampleName,1])))
#créer une matrice nind x nloci seulement
matinput=input[,3:ncol(input)]
xsums=rep(NA,times=nrow(matinput))
for (i in 1:nrow(matinput)){ xsums[i]=sum(nchar(matinput[i,])) }
emptyrows=which(xsums==0)
matinput=matinput[-emptyrows,]
#déterminer le nombre dallèles/locus pour formatter output
kvec=vector(mode='numeric',nloci)
for (i in 1:nloci)
{
alleles=matinput[,i]
vec=unique(alleles)
vec=paste(vec,collapse='')
vec=gsub( "[^[:alnum:]]", "", vec)
k=nchar(vec)/ndigit
kvec[i]=k
}
MAX=max(kvec)
#créer le tableau de résultats
results=matrix(NA,2*nloci,MAX)
missing=rep(NA, times=nloci)
nbk=rep(NA, times=nloci)
n.alleles=rep(NA, times=nloci)
for (j in 1:nloci)
{
alleles=matinput[,j]
totaln=length(alleles)
vec=unique(alleles)
vec=paste(vec,collapse='')
vec=gsub( "[^[:alnum:]]", "", vec)
k=nchar(vec)/ndigit
sampsize=paste(alleles,collapse='')
sampsize=gsub( "[^[:alnum:]]", "", sampsize)
sampsize=(nchar(sampsize)/ndigit)
missingABS=length(grep('[?]',alleles))
missing[j]=round((100*(missingABS/totaln)),2)
nbk[j]=k
n.alleles[j]=sampsize/2
for (m in 1:k)
{
alleleID=substr(vec,(m*ndigit)-(ndigit-1),m*ndigit)
results[(2*j)-1,m]=alleleID
count=0
for (z in 1:length(alleles))
{
if (alleles[z]==alleleID) count=count+1
}
results[2*j,m]=round(count/sampsize,3)
}
}
#trier les allèles en ordre croissant dans le output
for (j in 1:nloci)
{
ordre=order(results[(2*j)-1,])
results[(2*j)-1,]=results[(2*j)-1,ordre]
results[(2*j),]=results[(2*j),ordre]
}
#ajouter une colonne au début avec le no de locus et le % de données manquantes
loc.col=NULL
missing.col=NULL
k.col=NULL
n.alleles.col=NULL
for (i in 1:nloci) {
loc.col=c(loc.col,i,NA)
missing.col=c(missing.col,missing[i],NA)
k.col=c(k.col,nbk[i],NA)
n.alleles.col=c(n.alleles.col,n.alleles[i],NA)
}
table.results=cbind(loc.col,n.alleles.col,missing.col, k.col, results)
#mettre les cellules NA vides pour lesthétisme !
for (r in 1:nrow(table.results))
{
for (c in 1:ncol(table.results))
{
if (is.na(table.results[r,c])==T) table.results[r,c]=''
}
}
col.name=rep('',times=ncol(table.results))
col.name[1]= 'Locus#'
col.name[2]= 'n'
col.name[3]= 'Miss.%'
col.name[4]= 'k'
col.name[5]= 'Allele frequencies'
colnames(table.results)=col.name
filename=paste(noext,'_Overall_freq.txt',sep='')
if (freq.overall==T) {
write.table(table.results, file=filename, quote=F, row.names=F, col.names=T, sep='\t') }
#ici les analyses par population commencent
allpopresults=NULL
matpopresults=NULL
matpopcounts=NULL
popsizes2=2*popsizes
PCA.table=NULL
for (v in 1:npops)
{
popmissing=rep(NA,times=nloci)
popnbk=rep(NA,times=nloci)
pop.n.alleles=rep(NA,times=nloci)
popresults=matrix(NA,2*nloci,MAX)
popcounts=matrix(NA,2*nloci,MAX)
popno=v
if (v==1) first=1
if (v>1) first=sum(popsizes2[1:(popno-1)])+1
last=sum(popsizes2[1:popno])
popdata=matinput[first:last,]
vecPCA=NULL
for (j in 1:nloci)
{
alleles=popdata[,j]
vecalleles=results[(2*j)-1,]
k=nbk[j]
sampsize=paste(alleles,collapse='')
sampsize=gsub( "[^0123456789]", "", sampsize)
sampsize=(nchar(sampsize)/ndigit)
missingABS=length(grep('[?]',alleles))
popmissing[j]=round((100*(missingABS/popsizes2[v])),2)
pop.n.alleles[j]=sampsize/2
for (m in 1:k)
{
alleleID=vecalleles[m]
count=0
for (z in 1:length(alleles))
{
if (alleles[z]==alleleID) count=count+1
}
popresults[(2*j),m]= round(count/sampsize,3)
popresults[(2*j)-1,]=vecalleles
popcounts[(2*j),m]= count
popcounts[(2*j)-1,]=vecalleles
if (popresults[2*j,m]=='NaN') popresults[2*j,m]='Miss!'
}
popnbk[j]= length(which(popresults[2*j,]>0))
vecPCA2=as.numeric(popresults[(2*j),(1:(k-1))])
vecPCA=c(vecPCA,vecPCA2)
}
pop.missing.col=NULL
pop.k.col=NULL
pop.n.alleles.col=NULL
for (i in 1:nloci) {
pop.missing.col=c(pop.missing.col,popmissing[i],NA)
pop.k.col=c(pop.k.col,popnbk[i],NA)
pop.n.alleles.col=c(pop.n.alleles.col,pop.n.alleles[i],NA)
}
table.popresults=cbind(loc.col,pop.n.alleles.col,pop.missing.col, pop.k.col, popresults)
blank.row=rep(NA,times=ncol(table.popresults))
blank.row[1]=popnames[v]
allpopresults=rbind(allpopresults,blank.row,table.popresults)
matpopresults=rbind(matpopresults,popresults)
matpopcounts=rbind(matpopcounts,popcounts)
PCA.table=rbind(PCA.table,vecPCA)
}
NAallpopresults=allpopresults
#mettre les cellules NA vides pour lesthétisme !
for (r in 1:nrow(allpopresults))
{
for (c in 1:ncol(allpopresults))
{
if (is.na(allpopresults[r,c])==T) allpopresults[r,c]=''
}
}
colnames(allpopresults)=col.name
if (default.pop==F) {rownames(PCA.table)=vecpops} else {rownames(PCA.table)=popnames}
filename2=paste(noext,'_Pops_freq.txt',sep='')
if (freq.by.pop==T) {
write.table(allpopresults, file=filename2, quote=F, row.names=F, col.names=T, sep='\t')
}
filename3=paste(noext,'_FreqPCA.txt',sep='')
write.table(PCA.table, file=filename3, quote=F, row.names=T, col.names=F, sep='\t')
#return(list('matinput'=matinput, 'popsizes'=popsizes, 'matpopresults'=matpopresults, 'PCA'=PCA.table))
return(PCA.table)
}
|
332ae16adfd8e81bf724dc7efdd7daa9ae882788
|
1f3f41868d8ac5afd1731bd6756a2a6bde855c27
|
/Analysis/Read_Files.R
|
a795978a8b98f66a82da838e74ab693c190a6269
|
[] |
no_license
|
milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility
|
49c1d04f034b7d07f08680cfdc8eceb210ae2a61
|
7dd30b575011b94312d1dbadbc7f59cefa4ee1f4
|
refs/heads/master
| 2020-08-22T03:47:52.432308
| 2020-03-09T11:12:28
| 2020-03-09T11:12:28
| 216,311,236
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,342
|
r
|
Read_Files.R
|
# May 9 2016
# Miles Corak
# load libraries used in mapping and analysis
# of intergenerational inocme mobility
# of Census Divisions and Census Sub Divisions
# install libraries needed for mapping
MapLibraries <- c("sp",
"maptools",
"gpclib",
"maps",
"mapdata",
"sfsmisc",
"mapproj",
"raster",
"rgeos",
"scales",
"mapplots",
"RgoogleMaps",
"plotGoogleMaps",
"ggmap",
"GEOmap",
"plotrix", "sf"
)
lapply(MapLibraries, library, character.only = TRUE)
library(xtable)
library(plyr) # allows use of the rename function to change variable names
library(dplyr)
library(sparcl) # colors the leaves of a dendrogram https://rpubs.com/gaston/dendrograms
library(Hmisc)
# this library is for colour templates discussed at
# http://stat545-ubc.github.io/block018_colors.html#rcolorbrewer
library(RColorBrewer)
#read shape files, and Census Division data
file_path = "https://github.com/milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility/raw/master/Shapefile.zip"
file_name = "Shapefile.zip"
download.file(file_path,
destfile = file_name , mode='wb')
unzip(file_name, exdir = ".")
CD = st_read("Shapefile/CD1986_SHAPE.shp")
CSD = st_read("ShapeFile/CSD1986.shp")
CMA = st_read("ShapeFile/CMACA1986.shp")
CD.data <- repmis::source_data("https://raw.githubusercontent.com/milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility/master/Data/CD_mobility_All_cohorts_incomes_above_500.csv")
#CD.data <- read.csv(file="Data/CD_mobility_All_cohorts_incomes_above_500.csv",header=TRUE,sep=",")
# include province acronym with Census Division name to avoid ambiguities
CD.data$census.division.province.name <- paste(CD.data$census.division.name, CD.data$province.name)
CD.data.boys <- repmis::source_data("https://raw.githubusercontent.com/milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility/master/Data/CD_mobility_All_cohorts_incomes_above_500_Boys.csv")
# include province acronym with Census Division name to avoid ambiguities
CD.data.boys$census.division.province.name <- paste(CD.data.boys$census.division.name, CD.data.boys$province.name)
CD.data.girls <- repmis::source_data("https://raw.githubusercontent.com/milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility/master/Data/CD_mobility_All_cohorts_incomes_above_500_Girls.csv")
# include province acronym with Census Division name to avoid ambiguities
CD.data.girls$census.division.province.name <- paste(CD.data.girls$census.division.name, CD.data.girls$province.name)
CSD.data <- repmis::source_data("https://github.com/milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility/raw/master/Data/CSD_mobility_All_cohorts_incomes_above_500.csv")
PR.data <- repmis::source_data("https://raw.githubusercontent.com/milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility/master/Data/PR_mobility_All_cohorts_incomes_above_500.csv")
PR.data.boys <- repmis::source_data("https://raw.githubusercontent.com/milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility/master/Data/PR_mobility_All_cohorts_incomes_above_500_Boys.csv")
PR.data.girls <- repmis::source_data("https://raw.githubusercontent.com/milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility/master/Data/PR_mobility_All_cohorts_incomes_above_500_Girls.csv")
All <- repmis::source_data("https://raw.githubusercontent.com/milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility/master/Data/Percentile_transition_matrix_Canada_All_Cohorts_incomes_above_500.csv")
Boys <-repmis::source_data("https://raw.githubusercontent.com/milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility/master/Data/Percentile_transition_matrix_Canada_All_Cohorts_incomes_above_500_Boys.csv")
Girls <-repmis::source_data("https://raw.githubusercontent.com/milescorak/The-Canadian-Geography-of-Intergenerational-Income-Mobility/master/Data/Percentile_transition_matrix_Canada_All_Cohorts_incomes_above_500_Girls.csv")
# create factor variable to represent membership in a group
# for the rags to riches movement
QuintileProbGroups <- cut(CD.data$p15, breaks= c(-0.01, 0.02499, 0.04999, 0.09999, 0.1499, 0.1999, 0.9),
labels = c("less than 0.025",
"0.025 up to 0.05",
"0.05 up to 0.10",
"0.10 up to 0.15",
"0.15 up to 0.20",
"0.20 or more"))
# for the cycle of poverty
QuintileProbGroupsP11 <- cut(CD.data$p11, breaks= c(-0.01, 0.1999, 0.2499, 0.2999, 0.3499, 0.3999, 0.9),
labels = c("less than 0.20",
"0.20 up to 0.25",
"0.25 up to 0.30",
"0.30 up to 0.35",
"0.35 up to 0.40",
"0.40 or more"))
|
d1cf0f2de84526f8359c06f31cb2c8d4b67f6bfb
|
d1976dc8700c0edaf02556d9a232c4893a722ec0
|
/man/fitModel.Rd
|
eb2e9c2f27cc2df594f6c30bbc474deaa3eebfc6
|
[] |
no_license
|
MichaelFolkes/forecastR_package
|
5276cbb897078efd8994b5267aa6ef227917e613
|
2f291afff2bde360e8dee00fc17cf9a3b4e40725
|
refs/heads/master
| 2021-06-11T16:09:48.104493
| 2021-04-03T18:22:58
| 2021-04-03T18:22:58
| 171,557,939
| 1
| 3
| null | 2021-04-03T18:22:59
| 2019-02-19T22:09:42
|
R
|
UTF-8
|
R
| false
| true
| 1,275
|
rd
|
fitModel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Module_fitModel.R
\name{fitModel}
\alias{fitModel}
\title{General Model Fitting Functon}
\usage{
fitModel(
model = c("Naive", "ReturnRate", "Mechanistic", "SibRegSimple", "SibRegKalman",
"SibRegLogPower", "TimeSeriesArima", "TimeSeriesExpSmooth"),
data = NULL,
settings = NULL,
tracing = FALSE
)
}
\arguments{
\item{model}{A character vector of length one. The name of a model, it has to
match one of the model names in the "estimation.functions" list objects.
which is generated from the script R/Module_Sub_EstimationFunctions.R.}
\item{data}{Same as the data element from the list object generated by
prepData()}
\item{settings}{A list. Model-specific list of settings, where applicable.}
\item{tracing}{A Boolean. Default is FALSE.}
}
\value{
A list.
}
\description{
This function applies the estimation step for the various
models. It sets up the data inputs (e.g. loop through age classes, ) and
calls the estimation subroutine then stores the output in a list object.
Model-specific sub-routines live in the file
"Module_Sub_EstimationFunctions.R".
}
\details{
FOR NOW: JUST MAKING THIS WORK WITH THE BASIC SIBLING REGRESSION AND
KALMAN FILTER SIBLING REGRESSION
}
|
ffc5a9276550b63d7bedc5454a1cbee01111c890
|
f0a43727416e8bee2dee9895b82ec0b7d5b9fa10
|
/man/files.Rd
|
4b56e2d9d39bde375d62be2b68e84b6addbf2ef3
|
[
"BSD-3-Clause"
] |
permissive
|
jtoll/fileTools
|
5e8e41b3b513efcc9a825bfe56a87b19c1655349
|
57b1a067e85668ad860f74563a28aa3b31068683
|
refs/heads/master
| 2016-09-05T22:18:22.468669
| 2015-06-24T15:13:51
| 2015-06-24T15:13:51
| 6,632,251
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 420
|
rd
|
files.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/files.R
\name{files}
\alias{files}
\title{files}
\usage{
files(path = ".")
}
\arguments{
\item{path}{a character vector containing a single path name.}
}
\value{
a vector of files
}
\description{
Return a vector of \strong{just} the files in a path.
}
\examples{
\dontrun{files()}
}
\author{
James C. Toll, \email{james@jtoll.com}
}
|
727a4a44ef48fb07931a18925ad7372c08f0622d
|
623c061e25faef39f517345413ceeb6cb44203f5
|
/man/sdMean.Rd
|
d61a2aa20ba1e3cb1de57180f829c0c36bba3c48
|
[] |
no_license
|
RostyslavMaiboroda/mixvconc
|
e0f70af37df7d9e916897e220b7e2403904da84f
|
cfd955ca0f3e3e68770f3cdde81e860aea0cb5ed
|
refs/heads/master
| 2020-06-03T03:53:10.405082
| 2019-06-11T18:30:08
| 2019-06-11T18:30:08
| 189,638,338
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,650
|
rd
|
sdMean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/start2.R
\name{sdMean}
\alias{sdMean}
\title{Estimates for standard deviations and CI
for weighted means by observations from the mixture.}
\usage{
sdMean(x, p, comp = 1:ncol(p), means = FALSE, CI = FALSE,
alpha = 0.05)
}
\arguments{
\item{x}{numeric vector with the observed sample or a
\code{wtsamp} object.}
\item{p}{matrix (or data frame) of mixing probabilities
with rows corresponding to subjects
and columns coresponding to the mixture components.}
\item{comp}{a numeric vector with numbers of components
for which the standard deviations are estimated.}
\item{means}{logical, if \code{TRUE} then the estimates for
components' means are included in the function value.}
\item{CI}{logical, if \code{TRUE} then confidence bounds for
components' means are inculded in the function value.}
\item{alpha}{confidense level for the confidence interval.}
}
\value{
if \code{CI & means =FALSE} the function returns a vector
of the estimated standard deviations
with NA for the components which were not estimated.
Else a data frame is returned in which there can be variables:
\code{sd} are standard deviations of estimates;
\code{means} are the estimates of means;
\code{lower} and \code{upper} are lower and upper bounds
of the confidence intervals for means.
}
\description{
\code{sdMean} calculates estimates of standard deviatitions
and confidence intervals
for weighted means with minimax weights by observations
from the mixture with varying concentrations.
}
\details{
If \code{CI=TRUE} then the function calculates
confidence intervals for the components' means
with covering probability \code{1-alpha}.
If \code{x} is a vector then the weights for components' means and variances
are calculated as \code{lsweight(p)}. If \code{x} is a \code{wtsamp}
object than its own weights are used.
}
\examples{
set.seed(3)
M<-3 # number of mixture components
p <- genunifp(1000,M) # create mixing probabilities
m<-c(0,1,2) # true means of components
sd<-c(1,1,0.5) # true sd of components
x<-genormixt(p,m,sd) # sample generation
# Calculate sd only:
sdMean(x,p)
# the same:
sdMean(wtsamp(x,indiv=lsweight(p)),p)
# Calculate confidence intervals:
sdMean(x,p,means=TRUE,CI=TRUE)
# Plot confidence intervals:
CI<-sdMean(x,p,means=TRUE,CI=TRUE)
library(plotrix)
plotCI(1:M,CI$means,ui=CI$upper,li=CI$lower,
xlab=" ",ylab="means",xaxt="n")
axis(1,at=1:M,labels=row.names(CI))
}
\seealso{
Maiboroda R. and Kubaichuk O.
Asymptotic normality of improved weighted empirical distribution functions.
Theor. Probability and Math. Statist. 69 (2004), 95-102
}
|
0751e84d371f108a276613fa1c35142e5cba26d7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/datapack/examples/getSize.Rd.R
|
a81ac14b02e2a4fd931e6f7b1104fe7d2280efa7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 325
|
r
|
getSize.Rd.R
|
library(datapack)
### Name: getSize
### Title: Get the Count of Objects in the Package
### Aliases: getSize getSize,DataPackage-method
### ** Examples
dp <- new("DataPackage")
data <- charToRaw("1,2,3\n4,5,6")
do <- new("DataObject", dataobj=data, format="text/csv", user="jsmith")
dp <- addMember(dp, do)
getSize(dp)
|
3d6e5914b07419e617d3114c9a869f62bd57b51d
|
fe9c43c5eab08594756bf37eb7db5f7d0a7d8024
|
/man/slackr_tex.Rd
|
2879d1f1d50c4c5a42dfe97a1e945477a88becca
|
[
"MIT"
] |
permissive
|
mrkaye97/slackr
|
f3cf7c5e9a4170948d9b51c8414e9623e40aaccc
|
4dbc80db2fb89822b14879ab06dabbf9c2c5f7c7
|
refs/heads/master
| 2023-05-31T17:54:14.115413
| 2023-03-30T14:18:43
| 2023-03-30T14:18:43
| 23,662,956
| 49
| 14
|
NOASSERTION
| 2023-09-10T16:55:31
| 2014-09-04T13:16:47
|
R
|
UTF-8
|
R
| false
| true
| 2,043
|
rd
|
slackr_tex.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slackr_tex.R
\name{slackr_tex}
\alias{slackr_tex}
\title{Post a tex output to a Slack channel}
\usage{
slackr_tex(
obj,
channels = Sys.getenv("SLACK_CHANNEL"),
token = Sys.getenv("SLACK_TOKEN"),
ext = "png",
path = NULL,
title = NULL,
initial_comment = NULL,
thread_ts = NULL,
...
)
}
\arguments{
\item{obj}{character object containing tex to compile.}
\item{channels}{Comma-separated list of channel names or IDs where the file will be shared.}
\item{token}{Authentication token bearing required scopes.}
\item{ext}{character, type of format to return, can be tex, pdf, or any image device, Default: 'png'.}
\item{path}{character, path to save tex_preview outputs, if NULL then tempdir is used, Default: NULL.}
\item{title}{Title of file.}
\item{initial_comment}{The message text introducing the file in specified channels.}
\item{thread_ts}{Provide another message's ts value to upload this file as a reply. Never use a reply's ts value; use its parent instead.}
\item{...}{other arguments passed to \code{\link[texPreview:tex_preview]{texPreview::tex_preview()}}, see Details}
}
\value{
\code{httr} response object (invisibly)
}
\description{
Unlike the \code{\link[=slackr_dev]{slackr_dev()}} function, this one takes a \code{tex} object,
eliminating the need write to pdf and convert to png to pass to slack.
}
\details{
Please make sure \code{texPreview} package is installed before running this function.
For TeX setup refer to the
\href{https://github.com/mrkaye97/slackr#latex-for-slackr_tex}{Setup notes on \code{LaTeX}}.
}
\note{
You need to setup a full API token (i.e. not a webhook & not OAuth) for this to work
Also, you can pass in \code{add_user=TRUE} as part of the \code{...}
parameters and the Slack API will post the message as your logged-in user
account (this will override anything set in \code{username})
}
\seealso{
\code{\link[texPreview:tex_preview]{texPreview::tex_preview()}}
}
\author{
Jonathan Sidi (aut)
}
|
084f1ced541f12c439239c0da68f4148de6eb649
|
508c80c7ba3c5e6670d9122569aa8b144291d27c
|
/PA1_template.R
|
055e3fb0f86982ea2b8edda845176ed0d785d8df
|
[] |
no_license
|
DJMcClellan1966/reproducible-pa_1
|
573f139bc96acd748c77e557fb712e7e172cc48c
|
071e3b70348271f0a564e3eb2b61e90cafd90c35
|
refs/heads/master
| 2020-05-19T22:40:34.709835
| 2015-05-07T23:23:10
| 2015-05-07T23:23:10
| 33,887,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,507
|
r
|
PA1_template.R
|
**Introduction**
================
It is now possible to collect a large amount of data about personal movement using activity monitoring devices such as a Fitbit, Nike Fuelband, or Jawbone Up. These type of devices are part of the “quantified self” movement – a group of enthusiasts who take measurements about themselves regularly to improve their health, to find patterns in their behavior, or because they are tech geeks. But these data remain under-utilized both because the raw data are hard to obtain and there is a lack of statistical methods and software for processing and interpreting the data.
This assignment makes use of data from a personal activity monitoring device. This device collects data at 5 minute intervals through out the day. The data consists of two months of data from an anonymous individual collected during the months of October and November, 2012 and include the number of steps taken in 5 minute intervals each day.
*Data*
=======
The data for this assignment can be downloaded from the course web site:
Dataset: Activity monitoring data [52K]
The variables included in this dataset are:
steps: Number of steps taking in a 5-minute interval (missing values are coded as NA)
date: The date on which the measurement was taken in YYYY-MM-DD format
interval: Identifier for the 5-minute interval in which measurement was taken
The dataset is stored in a comma-separated-value (CSV) file and there are a total of 17,568 observations in this dataset.
**Assignment**
This assignment will be described in multiple parts. You will need to write a report that answers the questions detailed below. Ultimately, you will need to complete the entire assignment in a single R markdown document that can be processed by knitr and be transformed into an HTML file.
Throughout your report make sure you always include the code that you used to generate the output you present. When writing code chunks in the R markdown document, always use echo = TRUE so that someone else will be able to read the code. This assignment will be evaluated via peer assessment so it is essential that your peer evaluators be able to review the code for your analysis.
For the plotting aspects of this assignment, feel free to use any plotting system in R (i.e., base, lattice, ggplot2)
Fork/clone the GitHub repository created for this assignment. You will submit this assignment by pushing your completed files into your forked repository on GitHub. The assignment submission will consist of the URL to your GitHub repository and the SHA-1 commit ID for your repository state.
NOTE: The GitHub repository also contains the dataset for the assignment so you do not have to download the data separately.
Loading and preprocessing the data
Show any code that is needed to
Load the data (i.e. read.csv())
```{r, echo=TRUE}
setwd("~/Desktop/coursera ")
if(!file.exists("reproducible_project1")) dir.create("reproducible_project1")
rm(list=ls())
activity <- read.csv("./reproducible_project1/activity.csv",colClasses = c("numeric", "character","integer"))
```
Process/transform the data (if necessary) into a format suitable for your analysis
```{r,echo=TRUE}
dim(activity)
head(activity)
tail(activity)
summary(activity)
names(activity)
str(activity)
library(plyr)
library(dplyr)
library(lubridate)
library(ggplot2)
total.steps <- tapply(activity$steps, activity$date, FUN = sum, na.rm = TRUE)
activity$date <- ymd(activity$date)
```
**Part One**
============
**What is mean total number of steps taken per day?**
Calculate and report the mean and median of the total number of steps taken per day.
```{r,echo=TRUE}
mean(total.steps)
median(total.steps)
```
For this part of the assignment, you can ignore the missing values in the dataset.
**Calculate the total number of steps taken per day**
```{r,echo=TRUE}
steps <- activity %>%
filter(!is.na(steps)) %>%
group_by(date) %>%
summarize(steps = sum(steps)) %>%
print
```
**Make a histogram of the total number of steps taken each day**
```{r,echo=TRUE,fig.height=5,fig.width=10}
ggplot(steps, aes(x=date, y=steps))+geom_histogram(stat="identity")+ xlab("Dates")+ ylab("Steps")+ labs(title= "Total numbers of Steps per day")
```
**Part Two**
==============
**What is the average daily activity pattern?**
```{r,echo=TRUE}
daily <- activity %>%
filter(!is.na(steps)) %>%
group_by(interval) %>%
summarize(steps=mean(steps)) %>%
print
```
**Make a time series plot (i.e. type = "l") of the 5-minute interval (x-axis) and the average number of steps taken, averaged across all days (y-axis)**
```{r,echo=TRUE,fig.height=5,fig.width=10}
plot(daily, type = "l")
```
**Which 5-minute interval, on average across all the days in the dataset, contains the maximum number of steps?**
```{r,echo=TRUE}
daily[which.max(daily$steps), ]$interval
```
**Imputing missing values**
*Note that there are a number of days/intervals where there are missing values (coded as NA). The presence of missing days may introduce bias into some calculations or summaries of the data.*
Calculate and report the total number of missing values in the dataset (i.e. the total number of rows with NAs)
```{r,echo=TRUE}
missing <- sum(is.na(activity))
```
Devise a strategy for filling in all of the missing values in the dataset. The strategy does not need to be sophisticated. For example, you could use the mean/median for that day, or the mean for that 5-minute interval, etc.
**Create a new dataset that is equal to the original dataset but with the missing data filled in.**
```{r,echo=TRUE}
new <- activity %>%
group_by(interval) %>%
mutate(steps = ifelse(is.na(steps), mean(steps, na.rm=TRUE), steps))
summary(new)
```
**Make a histogram of the total number of steps taken each day**
```{r,echo=TRUE,fig.height=5,fig.width=10}
new.steps <- new %>%
group_by(date) %>%
summarize(steps = sum(steps)) %>%
print
ggplot(new.steps, aes(x=date, y=steps))+geom_histogram(stat="identity")+ xlab("Dates")+ ylab("Imputed Steps")+ labs(title= "Total numbers of Steps per day (missing data imputed)")
```
**Calculate and report the mean and median total number of steps taken per day.**
```{r,echo=TRUE}
imputed.steps <- tapply(new$steps, new$date, FUN = sum, na.rm = TRUE)
new$date <- ymd(new$date)
mean(imputed.steps)
median(imputed.steps)
```
**Do these values differ from the estimates from the first part of the assignment?**
```{r,echo=TRUE}
mean(total.steps)==mean(imputed.steps)
median(total.steps)==median(imputed.steps)
summary(total.steps)
summary(imputed.steps)
```
*What is the impact of imputing missing data on the estimates of the total daily number of steps?*
The estimates of the number of steps increased by `r summary(imputed.steps) - summary(total.steps)`.
```{r,echo=TRUE,fig.height=5,fig.width=10}
summary(imputed.steps) - summary(total.steps)
par(mfrow=c(2,1))
hist(imputed.steps,col="red")
hist(total.steps,col="blue")
```
##Part 3
**Are there differences in activity patterns between weekdays and weekends?**
*For this part the weekdays() function may be of some help here. Use the dataset with the filled-in missing values for this part.*
Create a new factor variable in the dataset with two levels – “weekday” and “weekend” indicating whether a given date is a weekday or weekend day.
```{r,echo=TRUE}
dayofweek <- function(date) {
if (weekdays(as.Date(date)) %in% c("Saturday", "Sunday")) {
"weekend"
} else {
"weekday"
}
}
new$daytype <- as.factor(sapply(new$date, dayofweek))
```
Make a panel plot containing a time series plot (i.e. type = "l") of the 5-minute interval (x-axis) and the average number of steps taken, averaged across all weekday days or weekend days (y-axis).
#See the README file in the GitHub repository to see an example of what this plot should look like using simulated data.#
```{r,echo=TRUE,fig.height=10,fig.width=10}
par(mfrow = c(2, 1))
for (type in c("weekend", "weekday")) {
steps.type <- aggregate(steps ~ interval, data = new, subset = new$daytype ==
type, FUN = mean)
plot(steps.type, type = "l", main = type)
}
```
```{r,echo=TRUE}
sessionInfo()
```
|
d57f90fbf0e38bde01e5897d078d140446794e19
|
1670b4a0eff54dfa917a25b5fee464477cc2c3cb
|
/CS7DS3-Applied Staticstical Modelling/Assignment-2(Main)/code/asm.R
|
6908fbd9d05ea56ee55aec6597e8259ddc495423
|
[] |
no_license
|
Mihirtcd/trinity
|
34dc0a1e64e562c299fc848696f8a30282c4b9cc
|
26ead3d713f4640f622822e24cc6acf50b40746a
|
refs/heads/master
| 2023-07-25T13:59:49.815092
| 2019-08-19T14:28:07
| 2019-08-19T14:28:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,506
|
r
|
asm.R
|
library(jsonlite)
library(ggplot2)
library(MCMCpack)
library(RColorBrewer)
#read the file
json_file <- stream_in(file("data/Business_Toronto_Restaurant.json"))
#select data
data = json_file[json_file$neighborhood == "Etobicoke"|json_file$neighborhood == "Scarborough",]
#x2 = json_file[,]
#data = rbind(x1,x2)
data = data[data$is_open == 1,]
data1 = data[grepl("Indian",data$categories)==TRUE,]
data_compare = data1[c("business_id","name","stars","review_count","neighborhood")]
data_compare$neighborhood[data_compare$neighborhood == "Etobicoke"] = 1
data_compare$neighborhood[data_compare$neighborhood == "Scarborough"] = 2
#First Part
#creating a box plot
ggplot(data_compare) + geom_boxplot(aes(neighborhood, stars, fill = neighborhood)) + geom_jitter(aes(neighborhood, stars, shape = data_compare$neighborhood))
# Checking the mean, median and sd of the data
mean = tapply(data_compare$stars, data_compare$neighborhood, mean)
tapply(data_compare$stars, data_compare$neighborhood, median)
sd = tapply(data_compare$stars, data_compare$neighborhood, sd)
#t test
t.test(stars ~ neighborhood, data=data_compare, var.equal = TRUE)
#Compare the means
b0 = (2.5 / (1.25^2))
a0 = b0*2.5
compare_2_gibbs <- function(y, ind, mu0 = 2.5, tau0 = 1/(1.25^2), del0 = 0, gamma0 = 1/(1.25^2), a0 = 4, b0 = 1.6, maxiter = 5000)
{
y1 <- y[ind == 1]
y2 <- y[ind == 2]
n1 <- length(y1)
n2 <- length(y2)
##### starting values
mu <- (mean(y1) + mean(y2)) / 2
del <- (mean(y1) - mean(y2)) / 2
mat_store <- matrix(0, nrow = maxiter, ncol = 3)
#####
##### Gibbs sampler
an <- a0 + (n1 + n2)/2
for(s in 1 : maxiter)
{
##update tau
bn <- b0 + 0.5 * (sum((y1 - mu - del) ^ 2) + sum((y2 - mu + del) ^ 2))
tau <- rgamma(1, an, bn)
##
##update mu
taun <- tau0 + tau * (n1 + n2)
mun <- (tau0 * mu0 + tau * (sum(y1 - del) + sum(y2 + del))) / taun
mu <- rnorm(1, mun, sqrt(1/taun))
##
##update del
gamman <- gamma0 + tau*(n1 + n2)
deln <- ( del0 * gamma0 + tau * (sum(y1 - mu) - sum(y2 - mu))) / gamman
del<-rnorm(1, deln, sqrt(1/gamman))
##
## store parameter values
mat_store[s, ] <- c(mu, del, tau)
}
colnames(mat_store) <- c("mu", "del", "tau")
return(mat_store)
}
# burn in and thinning
fit <- compare_2_gibbs(data_compare$stars, as.factor(data_compare$neighborhood))
plot(as.mcmc(fit))
raftery.diag(as.mcmc(fit))
#posterior mean and sd
apply(fit, 2, mean)
apply(fit, 2, sd)
# to interperate tau we convert it to sd
mean(1/sqrt(fit[, 3]))
sd(1/sqrt(fit[, 3]))
# calculating probablilty
y1_sim <- rnorm(5000, fit[, 1] + fit[, 2], sd = 1/sqrt(fit[, 3]))
y2_sim <- rnorm(5000, fit[, 1] - fit[, 2], sd = 1/sqrt(fit[, 3]))
ggplot(data.frame(y_sim_diff = y1_sim - y2_sim), aes(x=y_sim_diff)) + stat_bin(aes(y_sim_diff)) +geom_histogram(color="blue", fill="white")
mean(y1_sim > y2_sim)
ggplot(data.frame(y1_sim, y2_sim)) + geom_point(color='#8dd3c7',fill="white",aes(y1_sim, y2_sim), alpha = 0.3) + geom_abline(slope = 1, intercept = 0)
# how much better?
(mean(y2_sim) - mean(y1_sim))
#First Part end
#Second Part
#Data preparation
data_multi = json_file[json_file$is_open == 1,]
data_multi_test_1 = data_multi[!data_multi$neighborhood=="",]
data_multi_test_1 = data_multi_test_1[!row.names(data_multi_test_1)%in%c("219","2637"),]
#data_multi$neighborhood = factor(data_multi$neighborhood)
#nlevels(data_multi$neighborhood)
colorCount = 72
getPalette = getPalette = colorRampPalette(brewer.pal(12, "Accent"))
#plotting levels
ggplot(data_multi) + geom_boxplot(aes(x = reorder(neighborhood, stars, median), stars, fill = reorder(neighborhood, stars, median)), show.legend=FALSE) + scale_fill_manual(values = getPalette(colourCount))
ggplot(data_multi, aes(x = reorder(neighborhood, neighborhood, length))) + stat_count()
ggplot(data_multi, aes(stars)) + stat_bin(bins = 9)
ggplot(data.frame(size = tapply(data_multi$stars, data_multi$neighborhood, length), mean_score = tapply(data_multi$stars, data_multi$neighborhood, mean)), aes(size, mean_score)) + geom_point()
#gibbs sampler for second part
compare_m_gibbs <- function(y, ind, maxiter = 5000)
{
### weakly informative priors
a0 <- 4 ; b0 <- 1.6 ## tau_w hyperparameters
eta0 <-4 ; t0 <- 1.6 ## tau_b hyperparameters
mu0<-2.5 ; gamma0 <- 1/(1.25^2)
###
### starting values
m <- nlevels(ind)
ybar <- theta <- tapply(y, ind, mean)
tau_w <- mean(1 / tapply(y, ind, var)) ##within group precision
mu <- mean(theta)
tau_b <-var(theta) ##between group precision
n_m <- tapply(y, ind, length)
an <- a0 + sum(n_m)/2
###
### setup MCMC
theta_mat <- matrix(0, nrow=maxiter, ncol=m)
mat_store <- matrix(0, nrow=maxiter, ncol=3)
###
### MCMC algorithm
for(s in 1:maxiter)
{
# sample new values of the thetas
for(j in 1:m)
{
taun <- n_m[j] * tau_w + tau_b
thetan <- (ybar[j] * n_m[j] * tau_w + mu * tau_b) / taun
theta[j]<-rnorm(1, thetan, 1/sqrt(taun))
}
#sample new value of tau_w
ss <- 0
for(j in 1:m){
ss <- ss + sum((y[ind == j] - theta[j])^2)
}
bn <- b0 + ss/2
tau_w <- rgamma(1, an, bn)
#sample a new value of mu
gammam <- m * tau_b + gamma0
mum <- (mean(theta) * m * tau_b + mu0 * gamma0) / gammam
mu <- rnorm(1, mum, 1/ sqrt(gammam))
# sample a new value of tau_b
etam <- eta0 + m/2
tm <- t0 + sum((theta-mu)^2)/2
tau_b <- rgamma(1, etam, tm)
#store results
theta_mat[s,] <- theta
mat_store[s, ] <- c(mu, tau_w, tau_b)
}
colnames(mat_store) <- c("mu", "tau_w", "tau_b")
return(list(params = mat_store, theta = theta_mat))
}
#fitting the model
data_multi_test_1$ind = as.numeric(factor(data_multi_test_1$neighborhood))
fit2 <- compare_m_gibbs(data_multi_test_1$stars, as.factor(data_multi_test_1$ind))
#checking values
#test = tapply(data_multi$stars, data_multi$neighborhood, length)
apply(fit2$params, 2, mean)
apply(fit2$params, 2, sd)
mean(1/sqrt(fit2$params[, 3]))
sd(1/sqrt(fit2$params[, 3]))
theta_hat <- apply(fit2$theta, 2, mean)
ggplot(data.frame(size = tapply(data_multi_test_1$stars, as.factor(data_multi_test_1$ind), length), theta_hat = theta_hat), aes(size, theta_hat)) + geom_point()
#data_multi_test = data_multi[!data_multi$neighborhood=="Cooksville",]
#data_multi_test_1 = data_multi_test[!data_multi$neighborhood=="Meadowvale Village",]
#row.names(data_multi[data_multi$neighborhood=="Meadowvale Village",])
|
6ac9a3bfc4bfe0a75f439cfd4e638731a5fd6829
|
d8dd7a1e55fe0a636cff463ba59cc18ea337bab3
|
/R/yokkunsr.R
|
bba97ac57803d66db035b014e3ee52bf754af4bb
|
[] |
no_license
|
teramonagi/yokkunsr
|
ac2a969ad9ee204a04f52c3f8608cb28ae36a6fc
|
b06d77da72010a7618296f082abf1e0594ca3916
|
refs/heads/master
| 2020-06-15T17:14:16.490131
| 2016-12-01T09:22:18
| 2016-12-01T09:22:18
| 75,274,610
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 141
|
r
|
yokkunsr.R
|
#' yokkunsr
#'
#' Love Yohei Sato
#'
#' @name yokkunsr
#' @docType package
#' @import stringr lubridate dplyr tidyr httr jsonlite data.table
|
9e82b2321287d01cbc5885166b6add83a8dd05cf
|
e3dfb30d469eca5a4880b18214fa32595dd9c6f1
|
/process.data.R
|
6e742452bb2ceda47970c04db686c3df3b3b317f
|
[] |
no_license
|
badfroze/abelian-complexity
|
741268e0bd04247d5a10f0a6e75edddeeb53da76
|
d50fee18685b1dd7a26c79831bcc5c2526572ddb
|
refs/heads/master
| 2020-03-18T08:47:46.913578
| 2019-02-22T08:53:41
| 2019-02-22T08:53:41
| 134,528,441
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,204
|
r
|
process.data.R
|
process.data <- function(){
###process the data
##############################################
#2018.01.12 by chengchao wu
############################################## read sample
############################################
ratio = 1 #ratio equals the proportion of training sample druing the whole original sample, for example, pos:neg is 1:1, ratio is 0.5; pos:neg is 9:1
############################################# standardize
temp = rbind(pos,neg)
tem = matrix(nrow = nrow(temp),ncol = ncol(temp))
n = ncol(temp)
for (s in 1:n){
ja = scale(temp[,s],center = T, scale = T)
ja = as.matrix(ja)
tem[,s] = ja
}
############################################# figure out the positive sample and negative sample
pos_scale = tem[1:nrow(pos),]
neg_scale = tem[(nrow(pos) + 1):nrow(tem),]
pos_train = pos_scale[1:floor(nrow(pos)*ratio),]
neg_train = neg_scale[1:floor(nrow(neg)*ratio),]
# pos_test = pos_scale[-(1:floor(nrow(pos)*ratio)),]
# neg_test = neg_scale[-(1:floor(nrow(neg)*ratio)),]
#############################################
#pos
for (p in 1:nrow(pos_train)){
temp = pos_train[p,]
sh = c(1)
for (q in 1:n){
set11 = temp[q]
tempo = paste0(q,":",set11)
sh = paste(sh,tempo)
}
write.table(sh, file = 'train.txt', append = T, quote = F, row.names = F, col.names = F)
}
# for (p in 1:nrow(pos_test)){
# temp = pos_test[p,]
# sh = c(1)
# for (q in 1:n){
# set11 = temp[q]
# tempo = paste0(q,":",set11)
# sh = paste(sh,tempo)
# }
# write.table(sh, file = 'test.txt', append = T, quote = F, row.names = F, col.names = F)
# }
# neg
for (p in 1:nrow(neg_train)){
temp = neg_train[p,]
sh = c(-1)
for (q in 1:n){
set11 = temp[q]
tempo = paste0(q,":",set11)
sh = paste(sh,tempo)
}
write.table(sh, file = 'train.txt', append = T, quote = F, row.names = F, col.names = F)
}
# for (p in 1:nrow(neg_test)){
# temp = neg_test[p,]
# sh = c(-1)
# for (q in 1:n){
# set11 = temp[q]
# tempo = paste0(q,":",set11)
# sh = paste(sh,tempo)
# }
# write.table(sh, file = 'test.txt', append = T, quote = F, row.names = F, col.names = F)
# }
}
|
01da50d3d88cf7e64e731e0740e1278e0a891a80
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/maelle/india_trains/complementing_timetable.R
|
814a4fbdb55e8b515d295ccb44cc5dc2b41923d2
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,674
|
r
|
complementing_timetable.R
|
load("train_data/train_timetable.RData")
load("geo_data/geoInfo.RData")
timetable <- timetable %>%
# Remove 'JN', which represents junction, from station names
mutate(sourceStationName = gsub(" JN", "", sourceStationName),
destStationName = gsub(" JN", "", destStationName),
stationName = gsub(" JN", "", stationName)) %>%
# Append "railway India" in station name
mutate(sourceStationName = paste0(sourceStationName, " railway India"),
destStationName = paste0(destStationName, " railway India"),
stationName = paste0(stationName, " railway India"))
# add stationName2 which is the next station for the train
timetableMap <- timetable %>%
arrange(trainNo) %>%
group_by(trainNo) %>%
mutate(nextStationName = lead(stationName)) %>%
select(trainNo, stationName, nextStationName, everything()) %>%
ungroup() %>%
mutate(stationName = tolower(gsub(" railway India", "", stationName)),
nextStationName = tolower(gsub(" railway India", "", nextStationName)))
# add latitude and longitude
geo1 <- timetableMap %>% left_join(listNames, c("stationName" = "name")) %>%
select(lat, long)
latA <- geo1$lat
longA <- geo1$long
geo2 <- timetableMap %>% left_join(listNames, c("nextStationName" = "name")) %>%
select(lat, long)
latB <- geo2$lat
longB <- geo2$long
timetableMap <- timetableMap %>%
mutate(lat1 = latA,
long1 = longA,
lat2 = latB,
long2 = longB) %>%
select(trainNo,
stationName,
lat1,
long1,
nextStationName,
lat2,
long2,
everything())
save(timetableMap, file="train_data/complemented_timetable.RData")
|
df7194ce66596cc6af0bd628faace414fbe54822
|
43418fa45f03b4c68c9e6f6fcefdec462105f6ee
|
/man/epsAu.Rd
|
d869547998b58a3e8a66ff0e9db419d1d6e3a7d9
|
[] |
no_license
|
cran/dielectric
|
9e66a9271b93042cb0d31de23be8f496663ee687
|
6c0e1164e5ba3c1da87a9b014c701d1236cbb0de
|
refs/heads/master
| 2020-04-06T03:31:31.574027
| 2012-03-04T00:00:00
| 2012-03-04T00:00:00
| 17,695,498
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,314
|
rd
|
epsAu.Rd
|
\name{epsAu}
\alias{epsAu}
\title{epsAu}
\usage{
epsAu(wavelength, epsilon.infty = 1.54, lambda.p = 177.5,
mu.p = 14500, A1 = 1.27, phi1 = -pi/4, lambda1 = 470,
mu1 = 1900, A2 = 1.1, phi2 = -pi/4, lambda2 = 325,
mu2 = 1060)
}
\arguments{
\item{wavelength}{wavelength in nm}
\item{epsilon.infty}{background dielectric constant}
\item{lambda.p}{plasma wavelength}
\item{mu.p}{damping constant}
\item{A1}{A1}
\item{phi1}{phi1}
\item{lambda1}{lambda1}
\item{mu1}{mu1}
\item{A2}{A2}
\item{phi2}{phi2}
\item{lambda2}{lambda2}
\item{mu2}{mu2}
}
\value{
data.frame
}
\description{
permittivity gold
}
\details{
analytical dielectric function of Au (Drude model +
interband transitions)
}
\examples{
require(dielectric) ; data(AuJC)
wvl <- seq(300, 900)
gold <- epsAu(wvl)
matplot(gold$wavelength, cbind(Re(gold$epsilon), Im(gold$epsilon)),
t="l", lty=1, xlab = "wavelength / nm", ylab = "Dielectric function")
matpoints(AuJC$wavelength, cbind(Re(AuJC$epsilon), Im(AuJC$epsilon)), pch=1)
}
\author{
baptiste Auguie
}
\references{
Principles of surface-enhanced Raman spectroscopy and
related plasmonic effects Eric C. Le Ru and Pablo G.
Etchegoin, published by Elsevier, Amsterdam (2009).
}
\seealso{
Other user_level permittivity: \code{\link{epsAg}}
}
|
82edc7bf64c8309cb141f6a0ff812570ced6b853
|
02e7669a028e0920a73ea00dcf4a3ede616809cf
|
/man/MultiForecast.Rd
|
f79f83a5ed14d25a35a6309e663e219dea32cc38
|
[
"MIT"
] |
permissive
|
evandeilton/RTFC
|
7b13339fe9d44c9f20d7ffa60fe024e2990353c7
|
20d37faa182751a6f276ad80a73fd6bab7215688
|
refs/heads/master
| 2021-01-20T00:48:05.754274
| 2018-04-05T17:46:34
| 2018-04-05T17:46:34
| 89,193,443
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,998
|
rd
|
MultiForecast.Rd
|
\name{MultiForecast}
\alias{MultiForecast}
\title{
Faz forecast com diversos metodos
}
\description{
Em analise...
}
\usage{
MultiForecast(x, Control = cmisControl(), fcMethod=NULL)
}
\arguments{
\item{x}{
Série temporal
}
\item{fcMethod}{
Método de forecast definida pelo utilizador. Se NULL, a escolha é feita automaticamente.
}
\item{Control}{
Argumentos para a análise
}
}
\details{
Esta função utiliza vários métodos de forecast implementados no pacote \code{forecast} para fazer modelagem de série temporais. Foi criado para cada método de forecast um wraper que permite ser chamado pela função genérica \code{MultiForecast}. Estes wrapers são:
- stsForecast: Modelos estruturais
- hwForecast: Modelos HoltWinters
- tbatsForecast: Modelos TBATS
- auto.arimaForecast: Modelos ARIMA automáticos
- sesForecast: Modelos de suavização exponencial
- meanForecast: Modelos de médias simples
- holtForecast: Modelos Holt
- batsForecast: Modelos BATS
- etsForecast: Modelos de estados de espaço (State Space)
- arimaForecast: Modelos ARIMA fixos
- lmForecast: Modelos lineares
- thetaForecast: Modelos theta
- rwForecast: Modelos random walk (Passeio aleatório)
- snaiveForecast: Modelos naive sazonais
- naiveForecast: Modelos naive simples
- nnetarForecast: Modelos de redes neurais (Não possui limites de predição)
- HWsForecast: Modelos HoltWinters sazonais
- HWnsForecast: Modelos HoltWinters não sazonais
- HWesForecast: Modelos HoltWinters com alizamento exponencial
No caso dos modelos ETS e ARIMA automáticos, a escolha dos melhores ajustes é feita pela estatística AIC, para os outros tipos de modelos o ajuste é feito sobre os dados sem critério de bondade para a rodada inicial de ajustes. A escolha final dos melhores modelos é feita automaticamente utilizando a estatística definida no Controle. O padrão é MAPE (Erro Médio Absoluto Percentual).
Em complemento o analista pode observar as estatísticas de acurácia e de qualidade do resíduos utilizando funcções especiais como \code{Acuracia} e \code{Mresid} no caso de uma análise fina individualmente, pois o objeto de saída contém os modelos escolhidos e permite acesso.
}
\value{
Lista com os melhores modelos de forecast escolhidos.
}
\references{
Hyndman, R.J. and Khandakar, Y. (2008) "Automatic time series forecasting: The forecast package for R", Journal of Statistical Software, 26(3).
}
\author{
LOPES, J. E.
}
\seealso{
\code{\link{switch.cvforecast}}
}
\examples{
## Dados
data(diario)
## Controle
Control <- cmisControl(
maxHorizon = 45,
level = 95,
onlyfc = FALSE,
cvMethod = "MAPE",
tsfrequency = "day",
outputFormat = "forecast"
)
fit <- MultiForecast(diario[,2], Control)
# Analise de residuos (p.valor dos testes)
sapply(fit, Mresid)
# Estatística dos modelos
sapply(fit, tsSummary)
}
|
e7290b499affe75f0b3a0441b12733d8312c3826
|
1f74a31dce7c679d3ef4507335e2f6e763987ff1
|
/stockassessment/man/corplot.Rd
|
9c53db455369fcc94ae691e0c514928537b898f1
|
[] |
no_license
|
fishfollower/SAM
|
5b684c0a54d6e69f05300ebb7629829b2a003692
|
a1f1c5b17505a7a73da28736f0077805a7606b30
|
refs/heads/master
| 2023-07-22T00:50:48.411745
| 2023-04-21T10:25:20
| 2023-04-21T10:25:20
| 67,597,583
| 55
| 35
| null | 2023-02-22T08:42:23
| 2016-09-07T10:39:25
|
R
|
UTF-8
|
R
| false
| true
| 593
|
rd
|
corplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{corplot}
\alias{corplot}
\alias{corplot.sam}
\alias{corplot.samres}
\title{Plots between-age correlations by fleet, either estimated or empirical using residuals.}
\usage{
corplot(x, ...)
\method{corplot}{sam}(x, ...)
\method{corplot}{samres}(x, ...)
}
\arguments{
\item{x}{Either a sam fit as returned by sam.fit OR the object returned from residuals.sam}
\item{...}{extra arguments to plot}
}
\description{
Plots between-age correlations by fleet, either estimated or empirical using residuals.
}
|
48ce9a0418a4d5384a6adf5604cf932a6dbbe63f
|
2b5d396d500ffc3523aa40c4bbc478cb08724986
|
/2.1_network_make_plots.R
|
61d1aa3a5577bde8593081971e8d260d4b914bfc
|
[] |
no_license
|
peterch405/pchic_network
|
2a67419f2c75e009a6156078f0cdb15ae8046e68
|
c618d614cf01b37da12aad85409db2ca17374c0a
|
refs/heads/master
| 2023-02-18T14:52:56.690645
| 2021-01-19T21:48:03
| 2021-01-19T21:48:03
| 190,488,260
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,627
|
r
|
2.1_network_make_plots.R
|
library(readr)
library(igraph)
library(ggplot2)
library(ggpubr)
source("network_dynamics_functions.R")
#Naive primed only -------------------------------------------------------------
net_all_s <- readRDS("2_network_make/net_all_s_20190829.rds")
net_all_naive <- isolate_cell_network(net_all_s, "naive")
net_all_primed <- isolate_cell_network(net_all_s, "primed")
#Make plots based on subnetwork id
# KRT cluster 1233
# Olfactory 749
# Hist 2609
# Pcdh 2387
#'Plot paired boxplot of the node degrees of subnetworks
#'
#'
degree_subnetwork <- function(net_all_naive, net_all_primed, net_number, gene_only=TRUE, gene_regex=NA){
degree_naive <- data.frame(name=V(net_all_naive)$name[V(net_all_naive)$subnet == net_number],
gene=V(net_all_naive)$prot_genes[V(net_all_naive)$subnet == net_number],
count=unname(degree(net_all_naive, V(net_all_naive)$subnet == net_number)))
degree_primed <- data.frame(name=V(net_all_primed)$name[V(net_all_primed)$subnet == net_number],
gene=V(net_all_primed)$prot_genes[V(net_all_primed)$subnet == net_number],
count=unname(degree(net_all_primed, V(net_all_primed)$subnet == net_number)))
stopifnot(nrow(degree_naive) > 0 & nrow(degree_primed) > 0)
degree_naive$origin <- "naive"
degree_primed$origin <- "primed"
if(gene_only){
degree_naive <- degree_naive[!is.na(degree_naive$gene),]
degree_primed <- degree_primed[!is.na(degree_primed$gene),]
}
if(!is.na(gene_regex)){
degree_naive <- degree_naive[grepl(gene_regex, degree_naive$gene, ignore.case = TRUE),]
degree_primed <- degree_primed[grepl(gene_regex, degree_primed$gene, ignore.case = TRUE),]
}
degree_all <- merge(degree_naive, degree_primed, by = "name", all=TRUE, suffixes = c("_naive","_primed"))
degree_all$count_naive[is.na(degree_all$count_naive)] <- 0
degree_all$count_primed[is.na(degree_all$count_primed)] <- 0
degree_all$origin_naive <- "naive"
degree_all$origin_primed <- "primed"
degree_all <- data.frame(name=c(as.character(degree_all$name), as.character(degree_all$name)),
count=c(degree_all$count_naive, degree_all$count_primed),
origin=c(degree_all$origin_naive, degree_all$origin_primed),
gene=c(as.character(degree_all$gene_naive), as.character(degree_all$gene_primed)))
ggpaired(degree_all, x = "origin", y = "count",
color = "origin", ylab="gene degree", xlab=NULL,
line.color = "gray", line.size = 0.2, point.size = 0.5) +
stat_compare_means(paired=TRUE)
}
degree_subnetwork(net_all_naive, net_all_primed, 2387, gene_only = FALSE, gene_regex = "PCDH")
ggsave("2_network_make/genes_degree_stat_pcdh_cluster.pdf", device="pdf",
width = 6, height = 8, units="cm", useDingbats=FALSE)
degree_subnetwork(net_all_naive, net_all_primed, 2609, gene_only = FALSE, gene_regex = "HIST")
ggsave("2_network_make/genes_degree_stat_hist_cluster.pdf", device="pdf",
width = 6, height = 8, units="cm", useDingbats=FALSE)
degree_subnetwork(net_all_naive, net_all_primed, 1233, gene_only = FALSE, gene_regex = "^KRT\\d")
ggsave("2_network_make/genes_degree_stat_krt_cluster.pdf", device="pdf",
width = 6, height = 8, units="cm", useDingbats=FALSE)
degree_subnetwork(net_all_naive, net_all_primed, 749, gene_only = FALSE, gene_regex = "^OR\\d")
ggsave("2_network_make/genes_degree_stat_or_cluster.pdf", device="pdf",
width = 6, height = 8, units="cm", useDingbats=FALSE)
|
178cf402d91bc65aa8e5210067c5d32ccfa9bb88
|
d060fad3c33325ba3e6ab2e42ac9df2ff2a5abf0
|
/R/check.variables.to.be.ordered.R
|
851b3552dff73c083591c8b612deaa20227d558f
|
[] |
no_license
|
cran/bnpa
|
0eb35f0d18850e4b3556c7b08c469e04aab97538
|
3f27ca031b7f6fbf30264d8582970f505f8b76ea
|
refs/heads/master
| 2021-01-11T21:51:55.114628
| 2019-08-01T22:20:02
| 2019-08-01T22:20:02
| 78,866,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,235
|
r
|
check.variables.to.be.ordered.R
|
#'Check if the variables need to be ordered
#'
#'This function receives a data set and check the level of each factor variable, if they have more than 2 levels the function recommend to check the need to transform it to ordered factor.
#'@param data.to.work is a data set with variables to check.
#'@return TRUE or FALSE if need or not to tranform the variable into ordered factor.
#'@author Elias Carvalho
#'@references HAYES, A F; PREACHER, K J. Statistical mediation analysis with a multicategorical independent variable. British Journal of Mathematical and Statistical Psychology, v. 67, n. 3, p. 451-470, 2014.
#'@examples
#'# Clean environment
#'closeAllConnections()
#'rm(list=ls())
#'# Set enviroment
#'# setwd("to your working directory")
#'# Load packages
#'library(bnpa)
#'# Use working data sets from package
#'data(dataQualiN)
#'# Show first lines of data set
#'head(dataQualiN)
#'# Insert categorical variables with more than 2 levels
#'dataQualiN$test.variable[dataQualiN$A == "yes"] <- "low"
#'dataQualiN$test.variable[dataQualiN$B == "yes"] <- "medium"
#'dataQualiN$test.variable[dataQualiN$X == "yes"] <- "high"
#'# Transform it to factor variable
#'dataQualiN$test.variable <- as.factor(dataQualiN$test.variable)
#'# Check the necessity to transform in ordered variables
#'bnpa::check.variables.to.be.ordered(dataQualiN)
#'@export
check.variables.to.be.ordered <- function (data.to.work)
{
# Create a variable to alert if need transform o not
need.transform <- FALSE
# Scan all variables
for (variable.name in names(data.to.work))
{
# Count the number o levels of a specific variable
number.of.levels <- check.levels.one.variable(data.to.work, variable.name)
# if it has more than 2 levels show an alert
if (number.of.levels > 2)
{
cat("\nVariable:", variable.name, " probably would be categorical ordered because it has ", number.of.levels, " levels.\n Check if it is TRUE and in positive situation transform it before start the process !")
need.transform <- TRUE
} # if (check.levels.one.variable(data.to.work, variable.name) > 2)
} # for (variable.name in names(data.to.work))
# Return an alert
return(need.transform)
} # check.levels.one.variable <- function
|
5f9a07cfb3dfd1ca946bf925155cebceb520c976
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed_and_cleaned/10520_0/rinput.R
|
23379ab17691f43b7f7a2c98db65c1d669f77ecd
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("10520_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10520_0_unrooted.txt")
|
26233ea8cfcd4e2bdcbdb181e58a19f253cabc97
|
e5ae96af6fcc5e8ad327412258be984912898985
|
/2015/12-December/2015-12-10BLM.R
|
d5574fa6fd0487f79e5ab2d6416a6dee1327da62
|
[
"MIT"
] |
permissive
|
AdolfoGrossoGamboa/Ninja
|
146c672523a7aaac61c3528266d0781f2380d4dd
|
b8e0dcb46b70bf635878ae9a91b29355ce4f6397
|
refs/heads/master
| 2022-04-17T10:24:31.137549
| 2020-04-11T18:55:22
| 2020-04-11T18:55:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,551
|
r
|
2015-12-10BLM.R
|
# Fatal Encounters
rm(list=ls())
library(data.table)
library(plyr)
library(stringr)
library(ggplot2)
library(scales)
library(dplyr)
library(tm)
library(foreign)
C <- function(x) x %>% strsplit(split=",|,\n") %>% unlist %>% gsub("^\\s+|\\s+$","",.)
fataldata <- fread('Z:/Data/FatalEncounters/FatalEncounters.csv', header = T, sep = ',')
fataldata$V22 <- NULL
setnames(fataldata, names(fataldata),
C('SubmitTime,Name,Age,Gender,Race,URL,Date,Address,City,state,
Zip,County,Agency,Cause,Circumstance,OfficialDisposition,OfficialURL,
MentalIllness,SubmitUID,Email,DateDesc,Award,UID')
)
fataldata$age <- fataldata$Age %>% gsub("`|'","", .) %>% as.numeric()
fataldata$age[grep('months',fataldata$Age)] <- 1
fataldata$age[grep('20s',fataldata$Age)] <- 25
fataldata$age[grep('30s',fataldata$Age)] <- 35
fataldata$age[grep('40s|40-50',fataldata$Age)] <- 45
fataldata$age[grep('50s',fataldata$Age)] <- 55
fataldata$age[grep('60s',fataldata$Age)] <- 65
fataldata$age[grep('45 or 49',fataldata$Age)] <- 47
fataldata$age[grep('25-30',fataldata$Age)] <- 27
fataldata$age[grep('24-25',fataldata$Age)] <- 24
fataldata$age[grep("20's-30's",fataldata$Age)] <- 27
fataldata$Age[fataldata$age %>% is.na] # 169 cases of missing or unknown
fataldata$decade <- ceiling((fataldata$age-1)/10)
fataldata$decade[fataldata$decade==0] <- 1
# Define new variable called state for state abbreviations
# Add DC to the List
state.abb2 <- state.abb %>% c('DC')
state.name2 <- state.name %>% c('District of Columbia')
census$state <- state.abb2[match(census$STATEFIP,state.name2)]
# Create a list of completed states
complete <- C("Alabama,Connecticut,Delaware,District of Columbia,Florida,Louisiana,Maine,
Massachusetts,Mississippi,Montana,Nevada,New Hampshire,New York,North Carolina,
North Dakota,Oregon,Rhode Island,South Dakota,Utah,Vermont,Wyoming")
fataldata$complete <- fataldata$state %in% state.abb2[match(complete,state.name2)]
# Date
fataldata$date <- fataldata$Date %>% as.Date("%d-%b-%y")
# Review financial award information
fataldata$award <- fataldata$Naward <-
fataldata$Award %>% gsub(",","", .) %>% as.numeric()
fataldata$award[fataldata$Naward<0] <- NA
fataldata$sued <- 1
fataldata$sued[is.na(fataldata$Naward) | fataldata$Naward==-2] <- 0
fataldata$awarded <- NA
fataldata$awarded[fataldata$Naward==0] <- 0
fataldata$awarded[fataldata$Naward>0 | fataldata$Naward==-1] <- 1
fataldata$race <- "NA"
fataldata$race[grep("(?i)white", fataldata$Race)] <- "white"
fataldata$race[grep("(?i)black", fataldata$Race)] <- "black"
fataldata$race[grep("(?i)latina|latino", fataldata$Race)] <- "latino"
fataldata$race[grep("(?i)native", fataldata$Race)] <- "native-american"
fataldata$race[grep("(?i)asian", fataldata$Race)] <- "asian"
fataldata$race[grep("(?i)unknown", fataldata$Race)] <- "unknown"
fataldata$race[grep("(?i)mixed", fataldata$Race)] <- "mixed"
fataldata$race[grep("(?i)islander", fataldata$Race)] <- "pacific islander"
fataldata$race[grep("(?i)Middle Eastern", fataldata$Race)] <- "middle eastern"
table(fataldata$race)
fataldata$race4 <- fataldata$race
fataldata$race4[
fataldata$race %in% C('middle eastern,mixed,native-american,pacific islander,asian')] <- 'other'
table(fataldata$race4)
fataldata$white <- 0
fataldata$white[fataldata$race == 'white'] <- 1
fataldata$whiteunknown <- 0
fataldata$whiteunknown[fataldata$race == 'white'|fataldata$race == 'unknown'] <- 1
fataldata$black <- 0
fataldata$black[fataldata$race == 'black'] <- 1
fataldata$DateDesc[grep("(?i)settle|wrongful",fataldata$DateDesc)]
grep("inconsistenc|account",fataldata$DateDesc)
fataldata$nude <- 0
fataldata$nude[grep("nude|Nude|naked|Naked",fataldata$Circumstance)] <- 1
# Number of shots fired
shots <- str_extract(fataldata$DateDesc, "[0-9]{2} shots")
sum(!is.na(shots))
shots[!is.na(shots)] <- substr(shots[!is.na(shots)],1,2) %>% as.numeric()
shots <- shots %>% as.numeric()
shocks <- str_extract(fataldata$DateDesc, "shocked him [0-9]+|tasered him [0-9]+")
sum(!is.na(shocks))
shots <- NA
shots[!is.na(shots)] <- substr(shots[!is.na(shots)],1,2) %>% as.numeric()
shots <- shots %>% as.numeric()
mean(shots[fataldata$black==1], na.rm=TRUE)
mean(shots[fataldata$white==1], na.rm=TRUE)
length(grep("shocked him [0-9]+{2}|tasered.+[0-9]+{2}.+seconds",fataldata$DateDesc))
length(grep("shocked him [0-9]+{2}|tasered.+[0-9]+{2}.+seconds",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("shocked him [0-9]+{2}|tasered.+[0-9]+{2}.+seconds",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
length(grep("[0-9]+{2} shots|[0-9]+{2} rounds|[0-9]+{2} bullets",fataldata$DateDesc))
length(grep("[0-9]+{2} shots|[0-9]+{2} rounds|[0-9]+{2} bullets",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("[0-9]+{2} shots|[0-9]+{2} rounds|[0-9]+{2} bullets",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("nude|Nude|naked|Naked",fataldata$DateDesc) %>% length
length(grep("nude|Nude|naked|Naked",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("nude|Nude|naked|Naked",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("stop sign|traffic",fataldata$DateDesc) %>% length
length(grep("stop sign|traffic",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("stop sign|traffic",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("falsifying report|false report|cover up|changed.+story|changed.+testimony",fataldata$DateDesc) %>% length
length(grep("falsifying report|false report|cover up|changed.+story|changed.+testimony",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("falsifying report|false report|cover up|changed.+story|changed.+testimony",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("chase",fataldata$DateDesc) %>% length
length(grep("chase",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("chase",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("indict|grand jury",fataldata$DateDesc) %>% length
length(grep("indict|grand jury",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("indict|grand jury",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("(?i)robb|stolen",fataldata$DateDesc) %>% length
length(grep("(?i)robb|stolen",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("(?i)robb|stolen",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("hostage",fataldata$DateDesc) %>% length
length(grep("hostage",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("hostage",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("(?i)refus",fataldata$DateDesc) %>% length
length(grep("(?i)refus",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("(?i)refus",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("(?i)standoff",fataldata$DateDesc) %>% length
length(grep("(?i)standoff",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("(?i)standoff",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("(?i)spree",fataldata$DateDesc) %>% length
length(grep("(?i)standoff",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("(?i)standoff",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("toy",fataldata$DateDesc) %>% length
length(grep("toy",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("toy",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("unarmed|Unarmed|hands above.+head|hands up",fataldata$DateDesc[!fataldata$vehi]) %>% length
length(grep("unarmed|Unarmed|hands above.+head|hands up",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("unarmed|Unarmed|hands above.+head|hands up",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep(" cooperative",fataldata$DateDesc) %>% length
length(grep(" cooperative",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep(" cooperative",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("wrongful death|Wrongful death",fataldata$DateDesc) %>% length
length(grep("wrongful death|Wrongful death",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("wrongful death|Wrongful death",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
grep("(?i)Reach.+Gun",fataldata$DateDesc) %>% length
length(grep("(?i)Reach.+Gun",fataldata$DateDesc[fataldata$black==1]))/sum(fataldata$black==1)/
(length(grep("(?i)Reach.+Gun",fataldata$DateDesc[fataldata$white==1]))/sum(fataldata$white==1))
sum(fataldata$black==1)/sum(fataldata$white==1)
grep("unarmed|Unarmed|hands above.+head",fataldata$DateDesc)
grep("chase|Chase",fataldata$DateDesc)
grep("(?i)off-duty|off duty(?-i)",fataldata$DateDesc) %>% fataldata$DateDesc[.]
for (i in C('Justified|Excusable|Cleared|Acquitted,
Unknown|?Unreported|Unreporetd,
investigation|Investigation|Pending|Indicted,
Criminal,
Suicide|suicide,
Accident|accident,
bill,
overdose'))
fataldata$dispotion[grep(i, fataldata$OfficialDisposition)] <- tolower(i)
fataldata$OfficialDisposition[is.na(fataldata$dispotion)]
fataldata$Cause <- fataldata$Cause %>% tolower
sort(table(fataldata$OfficialDisposition))
for (i in C('gunshot,taser,vehicle,
medical|overdose|drug,
trama|trauma|beaten|bludgeoned|stabbed|knifed|battered|fall|fell|restraint|fire|burns,
asphyx|breathe|pepper|drown|smoke,
unknown|investigation|undetermined|undisclosed|unreported|unclear'
)) {
fataldata[[substr(i,1,4)]] <- FALSE
fataldata[[substr(i,1,4)]][grep(i,fataldata$Cause)] <- TRUE
}
sum(fataldata$vehi)
with(fataldata, sum(black& vehi )/sum(black) / (sum(white& vehi )/sum(white)))
with(fataldata %>% subset(!is.na(age)), sum(age>9 & age<=15))
with(fataldata %>% subset(!is.na(age)), sum(black & age>9 & age<=15 )/sum(black) /
(sum(white & age>9 & age<=15 )/sum(white)))
with(fataldata %>% subset(!is.na(age)), sum(black & age>50)/sum(black) /
(sum(white & age>50)/sum(white)))
with(fataldata %>% subset(!is.na(age)), sum(age<=9))
with(fataldata %>% subset(!is.na(age)), sum(black & age<=9 )/sum(black) / (sum(white & age<=9 )/sum(white)))
mr <- function(x) x %>% mean %>% round(2)
table(fataldata$Gender)
table(fataldata$state)
# Census data
fatal_pop_density <-
fataldata %>%
group_by(state) %>%
dplyr::summarise(kills=n(),
white_fatal=mean(white),
wfn=sum(white),
wun=mean(whiteunknown),
black_fatal=mean(black),
bfn=sum(black),
pkilledbw=mean(black)/mean(white),
whiteunknown_black_fatal=mean(whiteunknown)/mean(black),
count_black=sum(black),
guns=sum(guns),
tase=sum(tase),
vehi=sum(vehi),
medi=sum(medi),
tram=sum(tram),
asph=sum(asph),
unkn=sum(unkn),
award=(sum(awarded, na.rm = TRUE)*mean(award, na.rm = TRUE))/10^6,
complete=mean(complete))
summary(lm(award~race+age+dispotion+vehi+tram+tase, data=fataldata))
fataldata %>% subset(age>15 & age<=30 & Gender=="Male") %>% group_by(race4) %>%
dplyr::summarise(n = n()) %>% mutate(freq = n / sum(n))
##############################################################################
# Bring In State Data
# Read State Data
census <- read.spss('Z:/Data/FatalEncounters/usa_00047.sav') %>% as.data.table
census$hisp <- TRUE
census$hisp[census$HISPAN=="Not Hispanic"] <- FALSE
census$totinc <- census$policeeduc <- NA
census$totinc[census$FTOTINC!=9999999] <- census$FTOTINC[census$FTOTINC!=9999999]
census$white <- census$black <- census$police <- census$highschool <- FALSE
census$highschool[grep("12|college", census$EDUC)] <- TRUE
census$educ <- 0
census$educ[grep("12", census$EDUC)] <- 1
census$educ[grep("college", census$EDUC)] <- 2
census$educ[grep("4 years of college", census$EDUC)] <- 3
census$educ[grep("5\\+ years of college", census$EDUC)] <- 4
census$poor <- census$POVERTY<=100
# Define new variable called abbreviation
census$state <- state.abb2[match(census$STATEFIP,state.name2)]
census$white[census$RACE=="White" & !census$hisp] <- TRUE
census$black[census$RACE=="Black/Negro"] <- TRUE
census$agestring <- census$AGE[1:100] %>% sapply(toString)
census$age <- census$agestring %>% as.numeric
census$age[census$agestring == "Less than 1 year old"] <- 0
with(census %>% subset(age >15 & age<=30), weighted.mean(white, PERWT))
with(census %>% subset(age >15 & age<=30), weighted.mean(black, PERWT))
with(census %>% subset(age >15 & age<=30), weighted.mean(!black&hisp, PERWT))
with(census %>% subset(age >15 & age<=30), weighted.mean(!black&!hisp&!white, PERWT))
census$wHS[census$white] <- census$highschool[census$white]
census$bHS[census$black] <- census$highschool[census$black]
census$wpoor[census$white] <- census$poor[census$white]
census$bpoor[census$black] <- census$poor[census$black]
census$police[census$OCC %in% c(3850, 3860)] <- TRUE
# How many police/sherifs are in the US?
sum(census$police)*100
4*20000/(sum(census$police)*100)
census$bpolice <- census$wpolice <- NA
census$bpolice[census$police] <- census$wpolice[census$police] <- FALSE
census$bpolice[census$police & census$black] <- TRUE
census$wpolice[census$police & census$white] <- TRUE
census$policeeduc[census$police] <- census$educ[census$police]
census$policetotinc[census$police] <- census$totinc[census$police]
# Census data
census_pop_density <-
census %>%
group_by(state) %>%
dplyr::summarise(
pw=weighted.mean(white, PERWT),
nw=weighted.mean(white, PERWT)*sum(PERWT),
pb=weighted.mean(black, PERWT),
nb=weighted.mean(black, PERWT)*sum(PERWT),
pwb=weighted.mean(white, PERWT)/weighted.mean(black, PERWT),
people=sum(PERWT),
wHS=weighted.mean(wHS, PERWT, na.rm=TRUE),
bHS=weighted.mean(bHS, PERWT, na.rm=TRUE),
wbHS=weighted.mean(wHS, PERWT, na.rm=TRUE)/
weighted.mean(bHS, PERWT, na.rm=TRUE),
wpoor=weighted.mean(wpoor, PERWT, na.rm=TRUE),
bpoor=weighted.mean(bpoor, PERWT, na.rm=TRUE),
bwpoor=weighted.mean(bpoor, PERWT, na.rm=TRUE)/
weighted.mean(wpoor, PERWT, na.rm=TRUE),
policeeduc=mean(policeeduc, na.rm=TRUE),
policetotinc=mean(policetotinc, na.rm=TRUE),
polwb=sum(wpolice, na.rm=TRUE)/sum(bpolice, na.rm=TRUE),
bpolic=mean(bpolice, na.rm=TRUE),
wpolic=mean(wpolice, na.rm=TRUE),
policebwprop=(weighted.mean(bpolice, PERWT, na.rm=TRUE)/weighted.mean(black, PERWT))/
(weighted.mean(wpolice, PERWT, na.rm=TRUE)/weighted.mean(white, PERWT)),
npolice=sum(police, na.rm = TRUE)*100,
polpercap=sum(police, na.rm = TRUE)/length(police)
)
#####################
# Merge Census and fatalencounters data summaries
merged <- merge(census_pop_density, fatal_pop_density, by='state')
# P(killed|black) = P(killed)P(black|killed)/P(black)
# P(killed|white) = P(killed)P(white|killed)/P(white)
# P(killed|black)/P(killed|white) = P(white)/P(black) * P(black|killed)/P(white|killed)
merged$bwhazard <- merged$pwb * merged$pkilledbw
# merged$policebwprop <- merged$pwb/merged$polwb
merged$hazard <- merged$kills / merged$people * 100000
merged$awpercapita <- merged$award / (merged$people) * 10^6
merged$awperpolice <- merged$award / (merged$npolice) *10^6
# Likelihood of being killed by police per 100k people
for (i in C('guns,tase,vehi,medi,asph,tram,unkn'))
merged[[paste0('h',i)]] <- merged[[i]]/ merged$people * 1000
merged[,.(polwb, pwb, policebwprop, bpolic, wpolic, pw, pb)]
merged$bwhazard_unknown <- merged$pwb/merged$whiteunknown_black_fatal
merged[,.(state,count_black,bwhazard,wbHS,bwpoor,policetotinc,policebwprop)] %>%
arrange(-bwhazard) %>% head(60)
merged[,.(state, count_black, bwhazard, bwhazard_unknown, wbHS, bwpoor, policetotinc)] %>%
arrange(-bwhazard_unknown) %>% head(100)
merged[,.(state, count_black, bwhazard, bwhazard_unknown, wbHS, bwpoor)] %>%
arrange(-bwhazard) %>% subset(count_black>10) %>% head(100)
merged[,.(state, count_black, bwhazard, bwhazard_unknown, wbHS, bwpoor)] %>%
arrange(-bwhazard_unknown) %>% subset(count_black>10) %>% head(100)
setwd('C:/Users/fsmar/Dropbox/Econometrics by Simulation/2015-12-December')
png('2015-12-03-PovertyViolence.png', width=1000, height=610)
merged %>%
subset(count_black>10 & state!="DC") %>%
ggplot(aes(x=bwpoor, y=bwhazard, label=state)) +
geom_smooth(method='lm',formula=y~x) +
geom_text(size=10) +
theme_bw(base_size = 18) +
labs(x='P(Poor|Black)/P(Poor|White)',
y='P(Killed|Black)/P(Killed|White)',
title="Relationship Between Poverty and Likelihood of Being Killed by Race")
dev.off()
summary(lm(bwhazard~bwpoor+wbHS, data=merged %>% subset(count_black>10 & state!="DC")))
png('2015-12-03-PovertyHS.png', width=1000, height=610)
merged %>%
subset(count_black>10) %>%
ggplot(aes(x=1/wbHS, y=bwpoor, label=state)) +
geom_smooth(method='lm',formula=y~x) +
geom_text(size=10) +
theme_bw(base_size = 18) +
labs(x='P(HS|Black)/P(HS|White)',
y='P(Poor|Black)/P(Poor|White)',
title="Relationship Between Poverty and High School Completion by Race")
dev.off()
# Mapping the data
# Load state border maps
states <- map_data("state")
# Assign state abbreviations
states$state <- state.abb2[match(states$region, state.name2 %>% tolower)]
#####################
# Merge state and merged data together
choro <- merge(states, merged, sort = FALSE, by = "state")
choro <- choro[order(choro$order), ]
png('2015-12-03-BLM.png', width=1000, height=610)
choro %>%
ggplot(aes(x=long, y=lat, fill = bwhazard, group = group)) +
geom_polygon(colour="black") +
geom_polygon(data=choro %>% subset(complete==1), colour="orange", lwd=1) +
scale_fill_gradient(low = "white", high = "violetred4",
name="P(Killed|Black)/\nP(Killed|White)",
limit=c(1,10)) +
theme_bw(base_size = 18) +
labs(x='Orange bordered states are those for which information is complete',
y='',title="Relative Likelihood of Black Person Being Killed by Police to that of a White Person") +
geom_text(data=state.center %>%
as.data.frame %>%
cbind(state.abb=state.abb) %>%
subset(!(state.abb %in% C('AK,HI'))), aes(x=x,y=y,label=state.abb, fill=NULL, group=NULL))
dev.off()
# Number of people killed by police
choro %>%
ggplot(aes(x=long, y=lat, fill = kills, group = group)) +
geom_polygon(colour=gray(.2)) +
scale_fill_gradient(high=rgb(.3,0,.3), low="white") +
scale_colour_gradient(low = gray(.5), high = , guide=FALSE) +
theme_bw(base_size = 18) +
labs(x='The minimum since 2000',y='',title="Number of People Killed by Police")
# People Killed by Police (per 100,000)
png('2015-12-03-BHaz.png', width=1000, height=610)
choro %>%
ggplot(aes(x=long, y=lat, fill = bfn/nb*10^5, group = group)) +
geom_polygon(colour=gray(.2)) +
geom_polygon(data=choro %>% subset(complete==1), colour="orange", lwd=1) +
scale_fill_gradient(high="darkblue", low="white",
name="P(Killed|Black)") +
scale_colour_gradient(low = gray(.5), high = , guide=FALSE) +
theme_bw(base_size = 18)+
# guides(fill=FALSE)+
labs(x='Orange bordered states are those for which information is complete',
y='',title="Black People Killed by Police (per 100,000)")+
geom_text(data=state.center %>%
as.data.frame %>%
cbind(state.abb=state.abb) %>%
subset(!(state.abb %in% C('AK,HI'))), aes(x=x,y=y,label=state.abb, fill=NULL, group=NULL))+
geom_text(data=state.center %>%
as.data.frame %>%
cbind(sabb=state.abb) %>%
subset(sabb %in% c('NV','VT')),
aes(x=x,y=y,label=sabb, fill=NULL, group=NULL), colour='white')
dev.off()
png('2015-12-03-WHaz.png', width=1000, height=610)
choro %>%
ggplot(aes(x=long, y=lat, fill = wfn/nw*10^5, group = group)) +
geom_polygon(colour=gray(.2)) +
geom_polygon(data=choro %>% subset(complete==1), colour="orange", lwd=1) +
scale_fill_gradient(high="turquoise4", low="white",
name="P(Killed|White)") +
scale_colour_gradient(low = gray(.5), high = , guide=FALSE) +
theme_bw(base_size = 18)+
# guides(fill=FALSE)+
labs(x='Orange bordered states are those for which information is complete',
y='',title="White People Killed by Police (per 100,000)")+
geom_text(data=state.center %>%
as.data.frame %>%
cbind(state.abb=state.abb) %>%
subset(!(state.abb %in% C('AK,HI'))), aes(x=x,y=y,label=state.abb, fill=NULL, group=NULL))+
geom_text(data=state.center %>%
as.data.frame %>%
cbind(sabb=state.abb) %>%
subset(sabb %in% c('NV')),
aes(x=x,y=y,label=sabb, fill=NULL, group=NULL), colour='white')
dev.off()
########################################
lm()
names(merged)
fataldata %>%
group_by(race) %>%
dplyr::summarize(N=length(Cause),
gunshot=mr(guns),
taser=mr(tase),
vehicle=mr(vehi),
medical=mr(medi),
trama=mr(tram),
asphyxiation=mr(asph),
unknown=mr(unkn)) %>%
arrange(-N)
fataldata %>%
group_by(decade) %>%
dplyr::summarize(N=length(Cause),
gunshot=sum(guns),
taser=sum(tase),
vehicle=sum(vehi),
medical=sum(medi),
trama=sum(tram),
asphyxiation=sum(asph),
unknown=sum(unkn)) %>%
arrange(decade)
fataldata %>%
subset(award>0 & Race=="African-American/Black") %>%
ggplot(aes(x=award)) + geom_histogram() +
scale_x_continuous(labels = comma)+
theme_bw()+ theme_bw(base_size = 18)
fataldata %>%
subset(race %in% C('black,unknown,white,latino')) %>%
group_by(race) %>%
dplyr::summarize(meanAward = mean(award, na.rm=TRUE),
medAward = median(award, na.rm=TRUE),
suetcount = sum(!is.na(award)),
psued = mean(sued),
winSuet = mean(awarded, na.rm=TRUE))
# For those who recieve an award
fataldata %>%
subset(award>0) %>%
group_by(race) %>%
dplyr::summarize(meanAward = mean(award, rm.na=TRUE),
medAward = median(award),
count = length(award))
fataldata %>%
group_by(race) %>%
dplyr::summarize(meanAward = mean(award, rm.na=TRUE),
medAward = median(award),
count = length(award),
psued = mean(sued))
# Text Mining
library(qdap)
mycorpus <- with(df, as.Corpus(txt, ID))
mydtm <- as.dtm(Filter(as.wfm(mycorpus,
col1 = "docs", col2 = "text",
stopwords = tm::stopwords("english")), 3, 10))
key_merge(matrix2df(mydtm, "ID"), df2, "ID")
|
6164418200e473b5d9a368aba6ff3d3b24e4cbf4
|
864b7c280da3729cdc9b122b47dcbbe8bb2a09a1
|
/R/Archive/Bmatch_Example2.R
|
df4b31d643a7fd01ee15446413978342bd3409e1
|
[] |
no_license
|
ejbreen/LOCOS
|
a431c0f514436cb5ae7cff8579a9cea1dca83148
|
f119079ae7a9b0950178ab48eb58215b74c9b185
|
refs/heads/master
| 2021-05-13T19:12:16.234931
| 2018-04-20T14:21:31
| 2018-04-20T14:21:31
| 116,886,231
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,393
|
r
|
Bmatch_Example2.R
|
# Required packages: Rglpk
# designmatch
# gurobi 'C:/gurobi752/win64/R/gurobi_7.5-2.zip'
install.packages('C:/gurobi752/win64/R/gurobi_7.5-2.zip', repos = NULL)
library(gurobi)
library(Rglpk)
library(designmatch)
# Load and attach data
data(lalonde)
attach(lalonde)
#################################
# Example 2: minimum distance matching
#################################
# The goal here is to minimize the total of distances between matched pairs. In
# this example there are no covariate balance requirements. Again, the solver
# used is glpk with the approximate option
# Treatment indicator
t_ind = treatment
# Matrix of covariates
X_mat = cbind(age, education, black, hispanic, married, nodegree, re74, re75)
# Distance matrix
dist_mat = distmat(t_ind, X_mat)
# Subset matching weight
subset_weight = NULL
# Total pairs to be matched
total_groups = sum(t_ind)
# Solver options
t_max = 60*5
solver = "gurobi"
approximate = 1
solver = list(name = solver, t_max = t_max, approximate = approximate,
round_cplex = 0, trace_cplex = 0)
# Match
out = bmatch(t_ind = t_ind, dist_mat = dist_mat, total_groups = total_groups,
solver = solver)
# Indices of the treated units and matched controls
t_id = out$t_id
c_id = out$c_id
# Total of distances between matched pairs
out$obj_total
# Assess mean balance
meantab(X_mat, t_ind, t_id, c_id)
|
bdb88028f5229a4b9260a35bdeece2f23eb661df
|
708ae4e7e59e6f67baa2faafc39c34d235ebcf8d
|
/8schools.R
|
19b018064df24c11ca58632d43c3141c518d9a52
|
[] |
no_license
|
albcab/Stan-OpenBugs-Examples
|
af791bc910ecaf56a01b709ec4e6f24c523f82c5
|
ba964c930a726247a865f56cf138b1cdf0d72c2e
|
refs/heads/master
| 2020-04-24T06:12:48.063443
| 2019-03-29T13:23:38
| 2019-03-29T13:23:38
| 171,757,361
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 661
|
r
|
8schools.R
|
library(rstan)
schools_dat <- list(J = 8,
y = c(28, 8, -3, 7, -1, 1, 18, 12),
sigma = c(15, 10, 16, 11, 9, 11, 10, 18))
eight <- stan("8schools.stan",
data = schools_dat)
print(eight) #customizable
plot(eight)
plot(eight, pars = c("theta[1]", "theta[2]", "theta[3]", "theta[4]",
"theta[5]", "theta[6]", "theta[7]", "theta[8]",
"lp__"))
samples <- extract(eight)
names(samples)
dim(samples$theta)
apply(samples$theta, 2, mean)
summa <- summary(eight)
names(summa)
summa$summary
params <- get_sampler_params(eight)
length(params)
|
4a417242b43da7a7f4c5af326c30b65c9d27fda0
|
561beed458dfcf06de55c8b9145613adf3e3dad6
|
/myPairs.R
|
c6d5f4d4d9df00852acc41ca1d1c91ffcf18bbfd
|
[] |
no_license
|
deniseduma/respls
|
e75dafd68ecd0fa5905667b78f2a36f44d8947da
|
f326cf1a84ab4734a67a156917f8a2752597be68
|
refs/heads/master
| 2016-09-14T05:36:51.454059
| 2016-05-23T22:30:59
| 2016-05-23T22:30:59
| 59,517,165
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 674
|
r
|
myPairs.R
|
twofour <- read.table("./percentMutation_24Genes.txt",header=TRUE)
names(twofour)[1] <- c('Subject')
twofour <- tbl_df(twofour)
train.data <- readRDS("./traindata.rds")
X <- as.matrix(twofour[,2:24])
b <- svd(X)
plot(b$u[,1],b$u[,2])
plot(X%*%b$v[,1],X%*%b$v[,2])
# perfrom pca on 24 genes
twofour.pca <- prcomp(as.matrix(twofour[,2:24]))
pairs(twofour.pca$x[,1:5])
# perform pca on 24 genes for training data
twofour.train <- inner_join(train.data,twofour)
twofour.train.pca <- prcomp(as.matrix(twofour.train[,4195:4217]))
cols <- character(nrow(twofour.train))
cols[] <- "black"
cols[twofour.train$PTGENDER == 'Male'] <- "blue"
pairs(twofour.train.pca$x[,1:5],col=cols)
|
a64f8e5e8f74022b99abfb4bee94d493477b202e
|
30fc18d8606b252d2b27e459fb33b457892a06ff
|
/R/April1st2020.R
|
5aedf7fbe401a06bf11d3b03c02034ef18cf0609
|
[] |
no_license
|
sbalci/MyRCodesForDataAnalysis
|
f76be3c7ef2163e67ea805b4e923c52cba5d52d0
|
3839c86b11bf4734fbd4949558f473fca63068f6
|
refs/heads/master
| 2021-09-08T11:39:19.117922
| 2021-09-06T19:40:45
| 2021-09-06T19:40:45
| 136,336,983
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,191
|
r
|
April1st2020.R
|
controloutcome=c(1,0,0,1,NA,1)
controloutcome=c("1","0","0","1",NA,"1")
myoutcome2 <- jmvcore::toNumeric(na.omit(controloutcome))
!is.numeric(myoutcome2) || !any(myoutcome2 %in% c(0,1), na.rm = TRUE)
controloutcome=c(1,0,0,1,NA,2)
jmvcore::naOmit(controloutcome)
controloutcome=c(3, 33, 1, NA, 0,1,NA,2)
controloutcome=c(3, 33, 44, NA,NA,2)
any((controloutcome != 0 & controloutcome != 1), na.rm = TRUE)
jmvcore::toNumeric(controloutcome)
as.numeric(as.character(controloutcome))
controloutcome=c(1,0,0,1,NA,2)
controloutcome=c(3, 33, 1, NA, 0,1,NA,2)
controloutcome=c(3, 33, 44, NA,NA,2)
any((controloutcome != 0 & controloutcome != 1), na.rm = TRUE)
utimes <- "12,36,60"
utimes <- eval(utimes)
quote(utimes)
utimes <- strsplit(utimes, ",")
utimes <- purrr::reduce(utimes, as.vector)
utimes <- as.numeric(utimes)
b <- c(12, 36, 60)
b
library(networkD3)
nodes = data.frame("name" =
c("Node A", # Node 0
"Node B", # Node 1
"Node C", # Node 2
"Node D"))# Node 3
links = as.data.frame(matrix(c(
0, 1, 10, # Each row represents a link. The first number
0, 2, 20, # represents the node being conntected from.
1, 3, 30, # the second number represents the node connected to.
2, 3, 40),# The third number is the value of the node
byrow = TRUE, ncol = 3))
names(links) = c("source", "target", "value")
plot <- sankeyNetwork(Links = links, Nodes = nodes,
Source = "source", Target = "target",
Value = "value", NodeID = "name",
fontSize= 12, nodeWidth = 30)
my.rmc <- rmcorr::rmcorr(participant = Subject, measure1 = PacO2, measure2 = pH, dataset = rmcorr::bland1995)
plot(my.rmc, overall = TRUE)
ggplot2::ggplot(rmcorr::bland1995,
ggplot2::aes(x = PacO2,
y = pH,
group = factor(Subject),
color = factor(Subject)
)
) +
ggplot2::geom_point(ggplot2::aes(colour = factor(Subject))) +
ggplot2::geom_line(ggplot2::aes(y = my.rmc$model$fitted.values), linetype = 1)
|
2a3cf59706d57e9f1ba521f1f88dab975cd83382
|
9b7888b0b9ecab83ac55e020d2c59917d6452f39
|
/man/bindist.Rd
|
fe3dfc9d53d8ba9bbee2e8d1ae77111b343b7158
|
[] |
no_license
|
jianhong/ChIPpeakAnno
|
703580b9ce6a7708f60d92a78a3714bc9d82a562
|
d2136538718c58881a420c9985c53c6e89e223f4
|
refs/heads/devel
| 2023-08-22T15:29:29.888828
| 2023-07-25T14:57:28
| 2023-07-25T14:57:28
| 186,652,664
| 10
| 6
| null | 2023-09-01T20:48:22
| 2019-05-14T15:41:28
|
R
|
UTF-8
|
R
| false
| true
| 729
|
rd
|
bindist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bindist.R
\docType{class}
\name{bindist-class}
\alias{bindist-class}
\alias{bindist}
\alias{bindist-method}
\alias{$,bindist-method}
\alias{$<-,bindist-method}
\title{Class \code{"bindist"}}
\description{
An object of class \code{"bindist"} represents the relevant fixed-width
range of binding site from the feature and number of possible binding site
in each range.
}
\section{Objects from the Class}{
Objects can be created by calls of the form
\code{new("bindist", counts="integer", mids="integer",
halfBinSize="integer", bindingType="character", featureType="character")}.
}
\seealso{
\link{preparePool}, \link{peakPermTest}
}
\keyword{classes}
|
5746dd8e6c7bced56a3842a2e66250f22736937f
|
3a28482227fed83be725bc4bf7f365328c70ba6d
|
/tests/testthat/test-share.R
|
ec9c40d8b4124059873e6ce9c5dde5484c759302
|
[] |
no_license
|
cran/crunch
|
2028c53d134c0d1b74cc2e680fbdb7c0ec80388f
|
5a855bbcaf3f8edadcb73989f435e299a726426b
|
refs/heads/master
| 2023-04-03T12:09:41.136011
| 2023-03-22T12:10:09
| 2023-03-22T12:10:09
| 33,396,864
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,030
|
r
|
test-share.R
|
context("Sharing")
me <- "fake.user@example.com"
with_mock_crunch({
ds <- cachedLoadDataset("test ds")
ds2 <- cachedLoadDataset("ECON.sav")
test_that("Dataset has permissions catalog", {
expect_is(permissions(ds), "PermissionCatalog")
expect_identical(
urls(permissions(ds)),
c("https://app.crunch.io/api/users/user1/", "https://app.crunch.io/api/users/user2/")
)
expect_identical(
emails(permissions(ds)),
c("fake.user@example.com", "nobody@crunch.io")
)
})
test_that("Editing attributes", {
expect_identical(
is.editor(permissions(ds)),
structure(c(TRUE, FALSE),
.Names = c("fake.user@example.com", "nobody@crunch.io")
)
)
expect_true(is.editor(permissions(ds)[me]))
expect_true(is.editor(permissions(ds)[[me]]))
expect_false(is.editor(permissions(ds)["nobody@crunch.io"]))
})
test_that("Permissions with dataset shared with team", {
expect_identical(
emails(permissions(ds2)),
c(NA_character_, "dos@example.io", "tres@example.com")
)
expect_identical(
is.editor(permissions(ds2)),
structure(c(TRUE, TRUE, TRUE),
.Names = c(NA_character_, "dos@example.io", "tres@example.com")
)
)
})
with(temp.options(crunch = list(crunch.api = "https://fake.crunch.io/api/v2/")), {
test_that("Share payload shape", {
expect_identical(
passwordSetURLTemplate(),
"https://fake.crunch.io/password/change/${token}/"
)
expect_PATCH(
share(ds, "lauren.ipsum@crunch.io",
edit = TRUE,
notify = FALSE
),
"https://app.crunch.io/api/datasets/1/permissions/",
'{"lauren.ipsum@crunch.io":{"dataset_permissions":',
'{"edit":true,"view":true}},"send_notification":false}'
)
expect_PATCH(
share(ds, "lauren.ipsum@crunch.io",
edit = TRUE,
notify = TRUE
),
"https://app.crunch.io/api/datasets/1/permissions/",
'{"lauren.ipsum@crunch.io":{"dataset_permissions":',
'{"edit":true,"view":true}},"send_notification":true,',
'"url_base":"https://fake.crunch.io/password/change/${token}/",',
'"dataset_url":"https://fake.crunch.io/dataset/1"}'
)
expect_PATCH(
share(ds, "lauren.ipsum@crunch.io",
edit = TRUE,
notify = TRUE, message = "testing"
),
"https://app.crunch.io/api/datasets/1/permissions/",
'{"lauren.ipsum@crunch.io":{"dataset_permissions":',
'{"edit":true,"view":true}},"send_notification":true,"message":"testing",',
'"url_base":"https://fake.crunch.io/password/change/${token}/",',
'"dataset_url":"https://fake.crunch.io/dataset/1"}'
)
expect_error(share(ds, "lauren.ipsum@crunch.io",
edit = TRUE,
notify = FALSE, message = "testing"
),
"Cannot send message if not notifying",
fixed = TRUE
)
})
})
with_mock(
`crunch::PermissionCatalog` = function(...) {
out <- new("PermissionCatalog", ...)
out@index <- list()
return(out)
},
test_that(
paste0(
"Sharing works even if the PermissionCatalog is empty (as with ",
"a project-owned dataset)"
), {
expect_length(permissions(ds), 0)
expect_PATCH(
share(ds, "lauren.ipsum@crunch.io",
notify = FALSE
),
"https://app.crunch.io/api/datasets/1/permissions/",
'{"lauren.ipsum@crunch.io":{"dataset_permissions":',
'{"edit":false,"view":true}},"send_notification":false}'
)
}
)
)
})
with_test_authentication({
me <- me()@body$email
ds <- createDataset(name = now())
test_that("PermissionsCatalog from real dataset", {
expect_is(permissions(ds), "PermissionCatalog")
expect_identical(urls(permissions(ds)), userURL())
expect_identical(emails(permissions(ds)), me)
expect_identical(is.editor(permissions(ds)), structure(TRUE, .Names = me))
})
test_that("share and unshare methods for dataset", {
ds <- share(ds, "foo@crunch.io", notify = FALSE)
expect_true(setequal(
emails(permissions(ds)),
c(me, "foo@crunch.io")
))
ds <- unshare(ds, "foo@crunch.io")
expect_identical(emails(permissions(ds)), me)
})
test_that("re-sharing doesn't change the state", {
share(ds, "foo@crunch.io", notify = FALSE)
expect_true(setequal(emails(permissions(ds)), c(me, "foo@crunch.io")))
})
others <- c("foo@crunch.io", "a@crunch.io", "b@crunch.io")
test_that("can share dataset with multiple at same time", {
share(ds, c("a@crunch.io", "b@crunch.io"), notify = FALSE)
expect_true(setequal(emails(permissions(ds)), c(me, others)))
expect_true(is.editor(permissions(ds)[[me]]))
for (user in others) {
expect_false(is.editor(permissions(ds)[[user]]), info = user)
}
})
test_that("Can make multiple people editors", {
skip("TODO invite a and b as advanced users")
ds <- share(ds, c("a@crunch.io", "b@crunch.io"),
notify = FALSE, edit = TRUE
)
expect_true(is.editor(permissions(ds)[["a@crunch.io"]]))
expect_true(is.editor(permissions(ds)[["b@crunch.io"]]))
})
})
|
290313c9ce862495c47904365a49126fdeb62e87
|
92f10d4f9750d68f3a048669e3efd5e92d5afd18
|
/man/somatic_piu_mapping.Rd
|
cacb4992e566d189ea278999ddfcf072ab4cb622
|
[] |
no_license
|
ginnyintifa/GPD_v0
|
5af48f399bc855614cd1898c0dd7ea2b0297165a
|
e0bbbc1dafc1f545593d234535c8fab1a2e15053
|
refs/heads/master
| 2020-03-30T09:00:19.049829
| 2018-11-07T01:40:52
| 2018-11-07T01:40:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,772
|
rd
|
somatic_piu_mapping.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/somatic_extraction_mapping_wrap.R
\name{somatic_piu_mapping}
\alias{somatic_piu_mapping}
\title{Map soamtic pc to piu and bpiu, summarise bpiu and npc per gene}
\usage{
somatic_piu_mapping(ptm_domain_filename, pc_data_name, npc_data_name,
cancer_barcode, locus_level_mut_min = 3, gene_level_mut_min = 1,
output_dir)
}
\arguments{
\item{ptm_domain_filename}{The file provided by the package where the piu information is recorded.}
\item{pc_data_name}{the file listing mutations that result in protein consequence. A file generated by function germline_extraction_annotation_pos}
\item{npc_data_name}{the file listing mutations that result in non-protein consequence. A file generated by function germline_extraction_annotation_pos}
\item{cancer_barcode}{TCGA barcodes for this cancer type cohort.}
\item{locus_level_mut_min}{This minimum frequency for selecting mutations on locus level.}
\item{output_dir}{The directory you would like to have your output files in.}
}
\description{
This function generates three files as the final products. First, each piu mapped, second, bpiu summarised per gene, third,
npc summarised per gene.
}
\examples{
somatic_piu_mapping (ptm_domain_filename = "/data/ginny/tcga_pancan/important_files/ptm_domain_combine_df.tsv",
pc_data_name =
"/data/ginny/tcga_pancan/germline_raw_process/STAD_snpeff_annotation/snpeff_variant_anno_pc_pos.tsv",
npc_data_name =
"/data/ginny/tcga_pancan/germline_raw_process/STAD_snpeff_annotation/snpeff_variant_anno_npc.tsv",
cancer_barcode = stad_barcode,
sample_cn_id = sample_cn_id,
}
|
390e5d2c95cfffb2b8749c92fd72e80a99ae6345
|
7625768e5b648788380cd89161f22805e187b1bc
|
/tests/testthat.R
|
e69d0a8e444cc6b005cb9141d786b245a84da7ac
|
[
"BSD-2-Clause"
] |
permissive
|
jsta/gamut
|
b33f2f05788874c955d0c7ea84330ba0a08940a7
|
0db7d240c06de1df4a3c6aba7fbd66a280d1279e
|
refs/heads/main
| 2023-06-05T08:49:05.075141
| 2021-05-05T19:42:53
| 2021-05-05T19:42:53
| 382,505,180
| 0
| 0
|
NOASSERTION
| 2021-07-03T02:14:23
| 2021-07-03T02:14:22
| null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(gamut)
test_check("gamut")
|
96dd81059a1f5fdcad44a934ff77c76f60664eab
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/colf/examples/fitted.colf_nlxb.Rd.R
|
0d225b8383ab6c409523154d3223e631aa284890
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 207
|
r
|
fitted.colf_nlxb.Rd.R
|
library(colf)
### Name: fitted.colf_nlxb
### Title: Fitted values for colf_nlxb
### Aliases: fitted.colf_nlxb
### ** Examples
mymod <- colf_nlxb(mpg ~ hp + cyl, mtcars)
#fitted values
fitted(mymod)
|
03c239e9c6f861d6772ed3a0ccff7ab81c222fd7
|
1eefd329fd5432932e67dd0cc47e8140eb914265
|
/08_swissre-use-case/test_script.R
|
ef93e9f7347160273e20ef1411a5c5197f5dfe7d
|
[] |
no_license
|
shubamsharma/Data-Analytics
|
73ff7b0874eb5923a7dd7d853bc713b035011225
|
ec9aaaeae441232adf88629857ab9a130f2f0bec
|
refs/heads/master
| 2021-08-30T22:05:14.394388
| 2017-12-19T15:45:53
| 2017-12-19T15:45:53
| 113,478,992
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,435
|
r
|
test_script.R
|
setwd("D:/09_analytics_new_start/07_swiss_re")
train_File = "train_dataset.csv"
train_data = read.csv(train_File)
X = as.matrix(train_data[,2:5])
y = as.matrix(train_data[,1])
ols.lf1 <- function(theta, y, X) {
beta <- theta[-1]
sigma2 <- theta[1]
if (sigma2 <= 0) return(NA)
n <- nrow(X)
print (beta)
e <- y - X%*%beta # t() = matrix transpose
logl <- ((-n/2)*log(2*pi)) - ((n/2)*log(sigma2)) - ((t(e)%*%e)/(2*sigma2))
return(-logl) # since optim() does minimisation by default.
}
ols.lf1(c(1,1,1,1,1),y,X)
-20989185.585176494
# Analytical derivatives
ols.gradient <- function(theta, y, X) {
beta <- theta[-1]
sigma2 <- theta[1]
e <- y - X%*%beta
n <- nrow(X)
g <- numeric(length(theta))
g[1] <- (-n/(2*sigma2)) + (t(e)%*%e)/(2*sigma2*sigma2) # d logl / d sigma
g[-1] <- (t(X) %*% e)/sigma2 # d logl / d beta
return(-g)
}
ols.gradient(c(1,1,1,1,1),y,X)
cat("\nGradient-free (constrained optimisation) --\n")
m1 = optim(c(1,1,1,1,1), method="L-BFGS-B", fn=ols.lf1,
lower=c(1e-6, 1e-6, 1e-6,-Inf,-Inf), upper=rep(Inf,5), y=y, X=X)
cat("\nUsing the gradient (constrained optimisation) --\n")
m2 = optim(c(1,1,1, 1, 1), method="L-BFGS-B", fn=ols.lf1, gr=ols.gradient,
lower=c(1e-6, 1e-6, 1e-6,-Inf,-Inf), upper=rep(Inf,3), y=y, X=X)
print (m1$par)
print (m2$par)
g <- 1
g
g[1] = 2
g[-1] = 3
g
|
267d8bde36d25a4cfc7a34a86a7b8f1b9742807f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/clValid/examples/stability.Rd.R
|
9248cb3b8409335323ee9766f894a83ff351936b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 865
|
r
|
stability.Rd.R
|
library(clValid)
### Name: stability
### Title: Stability Measures
### Aliases: stability APN AD ADM FOM
### Keywords: cluster
### ** Examples
data(mouse)
express <- mouse[1:25,c("M1","M2","M3","NC1","NC2","NC3")]
rownames(express) <- mouse$ID[1:25]
## hierarchical clustering
Dist <- dist(express,method="euclidean")
clusterObj <- hclust(Dist, method="average")
nc <- 4 ## number of clusters
cluster <- cutree(clusterObj,nc)
stab <- matrix(0,nrow=ncol(express),ncol=4)
colnames(stab) <- c("APN","AD","ADM","FOM")
## Need loop over all removed samples
for (del in 1:ncol(express)) {
matDel <- express[,-del]
DistDel <- dist(matDel,method="euclidean")
clusterObjDel <- hclust(DistDel, method="average")
clusterDel <- cutree(clusterObjDel,nc)
stab[del,] <- stability(express, Dist, del, cluster, clusterDel)
}
colMeans(stab)
|
916698ced852ee64a9b0804dd581e59d5d6fd845
|
6c4464440bf42df3df8eb947b3a2798476dfac78
|
/PBSmodelling/man/plotBubbles.Rd
|
9015a4902d4e1c7ee9233a45331ff632ef013225
|
[] |
no_license
|
pbs-software/pbs-modelling
|
ad59ca19ced6536d2e44ff705e36a787341f60d7
|
44b14f20af33d5dee51401bad2ff3dce2dfd3cea
|
refs/heads/master
| 2023-01-11T16:18:06.846368
| 2023-01-06T22:45:05
| 2023-01-06T22:45:05
| 37,491,656
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,976
|
rd
|
plotBubbles.Rd
|
\name{plotBubbles}
\alias{plotBubbles}
\title{Construct a Bubble Plot from a Matrix}
\description{
Construct a bubble plot for a matrix \code{z}.
}
\usage{
plotBubbles(z, xval=FALSE, yval=FALSE, dnam=FALSE, rpro=FALSE,
cpro=FALSE, rres=FALSE, cres=FALSE, powr=0.5, size=0.2, lwd=1,
clrs=c("black","red","blue"), hide0=FALSE, frange=0.1, prettyaxis=FALSE, ...)
}
\arguments{
\item{z}{input matrix, array (2 dimensions) or data frame.}
\item{xval}{x-values and/or labels for the columns of \code{z}.
if \code{xval=TRUE}, the first row contains x-values for the columns.}
\item{yval}{y-values and/or labels for the rows of \code{z}.
If \code{yval=TRUE}, the first column contains y-values for the rows.}
\item{dnam}{logical: if \code{TRUE}, attempt to use \code{dimnames} of input
matrix \code{z} as \code{xval} and \code{yval}. The \code{dimnames} are
converted to numeric values and must be strictly increasing or decreasing.
If successful, these values will overwrite previously specified values
of \code{xval} and \code{yval} or any default indices.}
\item{rpro}{logical: if \code{TRUE}, convert rows to proportions.}
\item{cpro}{logical: if \code{TRUE}, convert columns to proportions.}
\item{rres}{logical: if \code{TRUE}, use row residuals (subtract row means).}
\item{cres}{logical: if \code{TRUE}, use column residuals (subtract column means).}
\item{powr}{power transform. Radii are proportional to \code{z^powr}.
Note: \code{powr=0.5} yields bubble areas proportional to \code{z}.}
\item{size}{size (inches) of the largest bubble.}
\item{lwd}{line width for drawing circles.}
\item{clrs}{colours (3-element vector) used for positive, negative,
and zero values, respectively.}
\item{hide0}{logical: if \code{TRUE}, hide zero-value bubbles.}
\item{frange}{number specifying the fraction by which the range of the axes should be extended.}
\item{prettyaxis}{logical: if \code{TRUE}, apply the pretty function to both axes.}
\item{...}{additional arguments for plotting functions.}
}
\details{
The function \code{plotBubbles} essentially flips the \code{z} matrix
visually. The columns of \code{z} become the x-values while the rows of
\code{z} become the y-values, where the first row is displayed as the
bottom y-value and the last row is displayed as the top y-value. The
function's original intention was to display proportions-at-age vs. year.
}
\author{
Jon T. Schnute, Pacific Biological Station, Fisheries and Oceans Canada, Nanaimo BC
}
\seealso{
\code{\link{genMatrix}}
}
\examples{
local(envir=.PBSmodEnv,expr={
oldpar = par(no.readonly=TRUE)
plotBubbles(round(genMatrix(40,20),0),clrs=c("green","grey","red"));
data(CCA.qbr,envir=.PBSmodEnv)
plotBubbles(CCA.qbr,cpro=TRUE,powr=.5,dnam=TRUE,size=.15,
ylim=c(0,70),xlab="Year",ylab="Quillback Rockfish Age")
par(oldpar)
})
}
\keyword{hplot}
|
57a671e5c0fde94531d095cb87e6b4cef7631f65
|
cd0bcba4c286d810674110f77538e78fb51c63d3
|
/man/cv_joint_shrinkage_est.Rd
|
2eb8896340901862a3d4f3cffee23dd61565a944
|
[] |
no_license
|
KenLi93/PPWLSE
|
cf1259c5d009ccb76412be8eb67ca0cd339c49cb
|
e91b821d97b0bdcdc9768439aedfa50fb969af09
|
refs/heads/main
| 2023-04-12T02:36:31.174043
| 2021-05-19T22:29:55
| 2021-05-19T22:29:55
| 349,157,536
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 334
|
rd
|
cv_joint_shrinkage_est.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/joint_cv_rlasso.R
\name{cv_joint_shrinkage_est}
\alias{cv_joint_shrinkage_est}
\title{Joint Cross-Validation}
\usage{
cv_joint_shrinkage_est(
X,
Y,
lambda = NULL,
nlambda = 100,
nfolds = 5,
nrep = 200
)
}
\description{
Joint Cross-Validation
}
|
b28f08c6a59b0d1e70857477399a33013a2a0ee4
|
c1846f5e7846306940875e597b94d1100b5244ef
|
/database.R
|
ed896bd3d9dc324d68f2ab1a7d787f5b976e7d04
|
[
"MIT"
] |
permissive
|
lusystemsbio/geneex
|
c75d8ea8034b8e5fb6cd48c1c5ec83de491966f6
|
c17dc2536807e20e828c60dfc72b8c0a587b2de3
|
refs/heads/master
| 2022-12-12T08:06:12.545785
| 2020-09-02T19:12:36
| 2020-09-02T19:12:36
| 267,390,831
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,688
|
r
|
database.R
|
database <-
tabPanel("Database",
useShinyjs(),
fluidRow(
column(1,offset = 1,
actionButton(
"biologicalDB", "Biological",
style="color: #fff; background-color: #337ab7;
border-color: #2e6da4", title = "Add this interaction
to the circuit.
")),
column(1,offset = 1,
actionButton(
"syntheticDB", "Synthetic",
style="color: #fff; background-color: #337ab7;
border-color: #2e6da4", title = "Add this interaction
to the circuit.
")),
column(1,offset = 1,
actionButton(
"allDB", "All",
style="color: #fff; background-color: #337ab7;
border-color: #2e6da4", title = "Add this interaction
to the circuit.
"))
),
br(),
fluidRow(
DTOutput("databaseTable")
),
hr(),
bsAlert("dbAlert"),
hr(),
br(),
fluidRow(
column(4,offset = 0,
DTOutput("tableDbNetwork")
),
column(7,offset = 0,
hidden(actionButton("loadNetworkDatabase", "Load Circuit",
style="color: #fff;background-color: #32CD32; border-color: #2e6da4")),
visNetworkOutput("plotDbNetwork")
)
),
shinyjs::hidden(downloadButton('downloadDbData', 'Download Data')),
shinyjs::hidden(radioButtons("downloadDbDataType", "Format",
c("RDS" = "RDS","CSV" = "csv") ,
selected = "RDS",
inline = TRUE)),
hr(),
hr()
)
|
55f1aa3572e94ad86c76d09e5ea69ff3f4fb8bbc
|
bfbf3c22cab871231f35b8fd38ffce846a0571b2
|
/Energy_Optim_Project/Scripts_and_functions/LCOE_NUCLEAR.R
|
5a964f8c568e97a37af41cd6d8d0f93fe3ce9945
|
[
"MIT"
] |
permissive
|
malooney/Economic_Optimization
|
9df926b042bf1e460d2b1448e194abb979dbe2f0
|
e00f412e5ba14d69a683b8287c11d44fcc8fe6c9
|
refs/heads/master
| 2021-01-23T17:38:33.763265
| 2017-12-10T18:02:26
| 2017-12-10T18:02:26
| 102,771,460
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,645
|
r
|
LCOE_NUCLEAR.R
|
library(triangle)
LCOE_NUCLEAR <- function(i=0.10, Fc=0.65, terms=40, count=10000){
# Nuclear
# Terms is the estimated lifetime of power plant
# count is the number of Monte Carlo replications
# i is the interest rate on capital costs
# Fc is fuel cost ($/MMBtu) - convert in-situ
##############################################################################
Cf <- matrix(nrow=count, ncol=1) # Capacity Factor (%)
C_c <- matrix(nrow=count, ncol=1) # Capital Cost ($/MWh)
OMf <- matrix(nrow=count, ncol=1) # Fixed OM Costs ($/MW/Yr)
Q <- matrix(nrow=count, ncol=1) # Heat Rate (Btu/KWh) - convert in-situ
OMv <- matrix(nrow=count, ncol=1) # Variable OM Costs ($/MWh)
##############################################################################
## model Capacity Factor (%) from Normal Distn.
ii <- 1
while( is.na(Cf[length(Cf)]) ){
temp <- rnorm(n=1, mean=87.5, sd=sqrt(1.25))
if(temp >= 85 && temp <= 90){
Cf[ii,1] <- temp/100
ii <- ii+ 1
} else{}
}
##############################################################################
## model Capital Costs with Log Normal Distn.
ii <- 1
while( is.na(C_c[length(C_c)]) ){
temp <- rlnorm(n=1, meanlog = 8.7, sdlog = 0.185)
if(temp >= 4146 && temp <= 8691){
C_c[ii,1] <- temp/0.001
ii <- ii+ 1
} else{}
}
##############################################################################
## model Fixed OM Costs from Normal Distn.
ii <- 1
while( is.na(OMf[length(OMf)]) ){
temp <- rnorm(n=1, mean=87.69, sd=sqrt(16.75))
if(temp >= 54.19 && temp <= 121.19){
OMf[ii,1] <- temp/0.001
ii <- ii+ 1
} else{}
}
##############################################################################
## model Heat Rate from Normal Distn.
ii <- 1
while( is.na(Q[length(Q)]) ){
temp <- rnorm(n=1, mean=10450, sd=sqrt(15))
if(temp >= 10420 && temp <= 10480){
Q[ii,1] <- temp
ii <- ii+ 1
} else{}
}
##############################################################################
## model Variable OM Costs from Triangle Distn.
ii <- 1
while( is.na(OMv[length(OMv)]) ){
temp <- rtriangle(1, 0.42, 2.14, 1.28)
if(temp >= 0.42 && temp <= 2.14){
OMv[ii,1] <- temp
ii <- ii+ 1
} else{}
}
##############################################################################
P <- (C_c)* ( i+ (i/ ( ((i+ 1)^terms)- 1) ) )
LCOE_nuclear= ( (P+ OMf )/ (8760* Cf) )+ (Fc* Q* 0.001)+ OMv
##############################################################################
return(LCOE_nuclear)
}
|
2f1d8e14ebcc7a8f150d2d5518ba7bfa7dc9d848
|
a4c4893d319c93078075ab7549d23f927cf29ac5
|
/code/alfresco/alfExtract.R
|
72b0a0533d3c992f4baa643b5b562c53ffdbc63f
|
[] |
no_license
|
leonawicz/SNAPQAQC
|
4fecaef9b245e2f8b1c6a9c40d2ed86e6e6d5392
|
54eb9eca613202ffa166a2f3d417b506c5b78f8a
|
refs/heads/master
| 2020-04-12T01:48:01.178693
| 2017-03-10T19:00:31
| 2017-03-10T19:00:31
| 30,124,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,121
|
r
|
alfExtract.R
|
# @knitr setup
suppressMessages(library(rgdal))
suppressMessages(library(raster))
suppressMessages(library(data.table))
suppressMessages(library(dplyr))
rasterOptions(chunksize=10^12,maxmemory=10^11)
prep_alf_files <- function(i, loopBy, mainDir, reps, years){
if(is.null(years)) years <- 2008:2100
if(loopBy=="rep"){
iter <- reps
keep <- reps - 1
id <- paste0("_", years[i], ".tif$")
mainDir <- file.path(mainDir, years[i])
} else if(loopBy=="year") {
keep <- iter <- years
id <- paste0("_",c(0:199)[i],"_.*.tif$")
mainDir <- file.path(mainDir, reps[i])
}
p <- lapply(c("A", "V", "FireS"), function(p, id) gsub("expression","",paste(bquote(expression("^",.(p),".*.",.(id))),collapse="")), id=id)
files <- lapply(1:length(p), function(i, dir, p) list.files(dir, pattern=p[[i]], full=TRUE, recur=TRUE), dir=mainDir, p=p)
names(files) <- c("Age", "Veg", "FID")
if(loopBy=="rep") files.idx <- as.numeric(gsub("FireScar_", "", gsub("_\\d+\\.tif", "", basename(files$FID)))) + 1
if(loopBy=="year") files.idx <- as.numeric(gsub("FireScar_\\d+_", "", gsub(".tif", "", basename(files$FID))))
ord <- order(files.idx)
files <- lapply(files, function(x, ord) x[ord], ord=ord)
if(loopBy=="rep") files <- lapply(files, function(x, idx) x[idx], idx=reps)
if(loopBy=="year") files <- lapply(files, function(x, file.idx, keep) { k <- file.idx %in% keep; if(any(k)) x[which(k)] else x }, file.idx=files.idx, keep=keep)
files$iter <- if(is.null(iter)) files.idx[ord] else iter
stopifnot(!any(unlist(sapply(files, is.na))))
stopifnot(all(diff(unlist(sapply(files, length)))==0))
files
}
# @knitr extract_data
extract_data <- function(i, type, loopBy, mainDir, ageDir=NULL, reps=NULL, years=NULL, cells, readMethod="loop", ...){
stopifnot(length(type) > 0 && type %in% c("av", "fsv"))
if(type=="av") return(extract_av(i, loopBy, mainDir, ageDir, reps, years, cells, readMethod, ...))
if(type=="fsv") return(extract_fsv(i, loopBy, mainDir, reps, years, cells, readMethod, ...))
}
# @knitr extract_fsv
extract_fsv <- function(i, loopBy, mainDir, reps=NULL, years=NULL, cells, readMethod="loop", ...){
if(is.null(list(...)$veg.labels)) {
veg.labels <- c("Black Spruce", "White Spruce", "Deciduous", "Shrub Tundra", "Graminoid Tundra", "Wetland Tundra", "Barren lichen-moss", "Temperate Rainforest")
} else veg.labels <- list(...)$veg.labels
x <- prep_alf_files(i=i, loopBy=loopBy, mainDir=mainDir, reps=reps, years=years)
cells <- group_by(cells, LocGroup, Location)
d.fs <- vector("list", length(x$iter))
# fire size by vegetation class
if(readMethod=="stack"){
print("#### READING FULL REPLICATE STACK TO MEMORY ####")
v <- list(FID=getValues(stack(x$FID, bands=2)), Veg=getValues(stack(x$Veg, quick=TRUE)))
gc()
print("#### READ COMPLETE ####")
for(j in 1:length(x$iter)){ # fire size by vegetation class
d <- filter(cells, Cell %in% which(!is.na(v$FID[,j]))) %>% mutate(Vegetation=factor(veg.labels[v$Veg[Cell,j]], levels=veg.labels), FID=v$FID[Cell,j]) %>%
group_by(LocGroup, Location, Vegetation, FID) %>% summarise(Val=length(Cell), Var="Fire Size")
d.fs[[j]] <- if(loopBy=="rep") mutate(d, Replicate=x$iter[j]) else if(loopBy=="year") mutate(d, Year=x$iter[j])
print(switch(loopBy, "rep"=paste0("Year ", years[i], ": Replicate ", x$iter[j]), "year"=paste0("Replicate ", i, ": Year ", years[x$iter[j]])))
}
rm(v)
gc()
} else if(readMethod=="loop"){
# fire size by vegetation class
#fn <- function(j, x, v, cells, veg.labels, loopBy){
# d <- filter(cells, Cell %in% which(!is.na(v$FID))) %>% mutate(Vegetation=factor(veg.labels[v$Veg[Cell]], levels=veg.labels), FID=v$FID[Cell]) %>%
# group_by(LocGroup, Location, Vegetation, FID) %>% summarise(Val=length(Cell), Var="Fire Size")
# d <- if(loopBy=="rep") mutate(d, Replicate=x$iter[j]) else if(loopBy=="year") mutate(d, Year=x$iter[j])
# d
#}
#system.time({ d.fs <- lapply(1:10, fn, x, v, cells, veg.labels, loopBy) })
# fire size by vegetation class
#fn <- function(j, x, cells, veg.labels, loopBy){
# v <- list(FID=getValues(raster(x$FID[j], band=2)), Veg=getValues(raster(x$Veg[j])))
# d <- filter(cells, Cell %in% which(!is.na(v$FID))) %>% mutate(Vegetation=factor(veg.labels[v$Veg[Cell]], levels=veg.labels), FID=v$FID[Cell]) %>%
# group_by(LocGroup, Location, Vegetation, FID) %>% summarise(Val=length(Cell), Var="Fire Size")
# d <- if(loopBy=="rep") mutate(d, Replicate=x$iter[j]) else if(loopBy=="year") mutate(d, Year=x$iter[j])
# d
#}
#system.time({ d.fs <- lapply(1:length(x$iter), fn, x, cells, veg.labels, loopBy) })
for(j in 1:length(x$iter)){ # fire size by vegetation class
v <- list(FID=getValues(raster(x$FID[j], band=2)), Veg=getValues(raster(x$Veg[j])))
d <- filter(cells, Cell %in% which(!is.na(v$FID))) %>% mutate(Vegetation=factor(veg.labels[v$Veg[Cell]], levels=veg.labels), FID=v$FID[Cell]) %>%
group_by(LocGroup, Location, Vegetation, FID) %>% summarise(Val=length(Cell), Var="Fire Size")
d.fs[[j]] <- if(loopBy=="rep") mutate(d, Replicate=x$iter[j]) else if(loopBy=="year") mutate(d, Year=x$iter[j])
print(switch(loopBy, "rep"=paste0("Year ", years[i], ": Replicate ", x$iter[j]), "year"=paste0("Replicate ", i, ": Year ", years[x$iter[j]])))
}
}
d.fs <- if(loopBy=="rep") rbindlist(d.fs)[, Year:=as.integer(years[i])] else if(loopBy=="year") rbindlist(d.fs)[, Replicate:=as.integer(i)]
d.fs <- setcolorder(d.fs, c("LocGroup", "Location", "Var", "Vegetation", "Year", "Val", "FID", "Replicate")) %>%
group_by(LocGroup, Location, Var, Vegetation, Year) %>% setorder(Replicate, LocGroup, Location, Var, Vegetation, Year, Val)
print(paste("Returning fire size by vegetation class data table."))
d.fs
}
# @knitr extract_av
extract_av <- function(i, loopBy, mainDir, ageDir=NULL, reps=NULL, years=NULL, cells, readMethod="loop", ...){
if(is.null(list(...)$veg.labels)) {
veg.labels <- c("Black Spruce", "White Spruce", "Deciduous", "Shrub Tundra", "Graminoid Tundra", "Wetland Tundra", "Barren lichen-moss", "Temperate Rainforest")
} else veg.labels <- list(...)$veg.labels
if(!is.numeric(list(...)$n.samples)) n.samples <- 1000 else n.samples <- list(...)$n.samples
x <- prep_alf_files(i=i, loopBy=loopBy, mainDir=mainDir, reps=reps, years=years)
cells <- group_by(cells, LocGroup, Location)
r <- getValues(raster(x$Age[1])) # use as a template
idx <- which(!is.na(r))
idx.rmNA <- which(idx %in% 1:length(r))
d.age <- vector("list", length(x$iter))
for(j in 1:length(x$iter)){
v <- list(Age=getValues(raster(x$Age[j]))[idx], Veg=getValues(raster(x$Veg[j]))[idx])
v$Age[v$Age < 0] <- v$Age[ v$Age < 0] + 2147483647 # temporary hack
d <- filter(cells, Cell_rmNA %in% idx.rmNA) %>% mutate(Vegetation=factor(veg.labels[v$Veg[Cell_rmNA]], levels=veg.labels), Age=v$Age[Cell_rmNA]) %>%
group_by(LocGroup, Location, Vegetation, Age) %>% summarise(Freq=length(Cell_rmNA))
d.age[[j]] <- if(loopBy=="rep") mutate(d, Replicate=x$iter[j]) else if(loopBy=="year") mutate(d, Year=x$iter[j])
print(switch(loopBy, "rep"=paste0("Year ", years[i], ": Replicate ", x$iter[j]), "year"=paste0("Replicate ", i, ": Year ", years[x$iter[j]])))
}
d.age <- if(loopBy=="rep") rbindlist(d.age)[, Year:=as.integer(years[i])] else if(loopBy=="year") rbindlist(d.age)[, Replicate:=as.integer(i)]
d.age <- group_by(d.age, LocGroup, Location, Year, Vegetation) %>% setorder(Replicate, LocGroup, Location, Year, Vegetation, Age, Freq)
d.area <- group_by(d.age, Replicate, add=T) %>% summarise(Val=sum(Freq))
d.area <- mutate(d.area, Var="Vegetated Area")
d.area <- data.table(d.area)
setcolorder(d.area, c("LocGroup", "Location", "Var", "Vegetation", "Year", "Val", "Replicate"))
locs <- unique(d.age$Location)
if(loopBy=="rep"){
d.age <- group_by(d.age, Age, add=T) %>% summarise(Freq=sum(Freq))
d.age <- mutate(d.age, Var="Vegetation Age")
d.age <- data.table(d.age)
setcolorder(d.age, c("LocGroup", "Location", "Var", "Vegetation", "Year", "Age", "Freq"))
return(list(d.area=d.area, d.age=d.age))
}
if(loopBy=="year"){
setkey(d.age, Location)
for(j in 1:length(locs)){
obj.name.tmp <- paste0("age__", locs[j], "__rep", i)
assign(obj.name.tmp, d.age[locs[j]])
save(list=c("locs", obj.name.tmp), file=paste0(ageDir, "/", obj.name.tmp, ".RData"))
print(paste(obj.name.tmp, "object", j, "of", length(locs), "saved."))
rm(list=obj.name.tmp)
gc()
}
rm(d.age)
gc()
return(list(d.area=d.area))
}
}
|
451032cd913047e575ff4091411a0746de6c93a1
|
8b7630b35f68dccbbcd6a7656d4c8bd5ce3123a1
|
/zeta_boxplot.R
|
8c662c4841753ec6858694bedba2ce226bf3804b
|
[] |
no_license
|
omarchy/TMC
|
4722d24b89e7e173617a87928f261c73be9ce5a1
|
4ae552d92f80678ee830b2153c40a854de9b5a4c
|
refs/heads/main
| 2023-06-22T03:23:44.221828
| 2021-07-19T20:25:23
| 2021-07-19T20:25:23
| 344,884,815
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,295
|
r
|
zeta_boxplot.R
|
library("readxl")
library(writexl)
library(dplyr)
library(ggplot2)
library(rstatix)
library(reshape2)
#######################################
zeta <- read_xlsx("OmarZeta.xlsx")
##Boxplots of microplastics initial zeta potentials in diluted jars##
zeta$Dose <- as.factor(zeta$Dose)
initial_zp <- zeta %>%
filter(Water == "MilliQ" |
Water == "Grand River" |
Water == "Lake Ontario" |
Water == "Salt water") %>%
filter(Dose == "0" &
Sample_type == "Dosed")
ggplot(initial_zp, aes(x = Water, y = ZP))+
geom_boxplot()+
facet_wrap(~Microplastic)+
theme_bw()+
geom_hline(yintercept = 0, color = "Blue", linetype = "dotted")+
coord_flip()+
labs(y = "Zeta potential (mV)", title = "Zeta potentials before alum dosing (concentration = 500 #/mL)")+
theme(axis.title.x = element_text(size = 12),
axis.title.y = element_text(size = 12),
axis.text = element_text(size = 12))
##Zeta potentials of stock solutions##
zeta %>%
filter(Sample_type == "Stock") %>%
filter(Water != "Calgary water")%>%
filter(Microplastic != "AcrylicG") %>%
filter(Microplastic != "None") %>%
ggplot(aes(x = Water, y = ZP))+
geom_boxplot()+
facet_wrap(~Microplastic)+
theme_bw()+
coord_flip()+
geom_hline(yintercept = 0, color = "blue", linetype = "dotted")+
labs(y = "Zeta potential (mV)", title = "Zeta potentials of microplastic stock solutions") +
theme(axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
axis.text = element_text(size = 12))
####################################
##Zeta potential of the raw waters##
####################################
zeta_raw <- zeta %>%
filter(Sample_type == "Raw")
ggplot(zeta_raw, aes(x = Water, y = ZP))+
geom_boxplot()+
theme_bw()+
#coord_flip()+
geom_hline(yintercept = 0, color = "blue", linetype = "dotted")+
ylim(-40,0)+
labs(y = "Zeta potential (mV)", title = "Zeta potentials of raw water") +
theme(axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
axis.text = element_text(size = 12))
##Optimum dose determination for Grand River water##
zeta$Dose <- as.factor(zeta$Dose)
zeta %>%
filter(Sample_type == "Dosed" &
Water == "Grand River" &
Microplastic == "PS") %>%
ggplot(aes(x = Dose, y = ZP), color = Water)+
geom_boxplot()+
#facet_wrap(~Microplastic)+
theme_bw()+
#coord_flip()+
geom_hline(yintercept = 0, color = "blue", linetype = "dashed")+
labs(x = "Dose (mg/L)",y = "Zeta potential (mV)",
title = "Optimum alum dose determination for Grand River water")+
theme(axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
axis.text = element_text(size = 12))
###############################################
##Optimum dose determination for Lake Ontario##
###############################################
zeta %>%
filter(Sample_type == "Dosed" &
Water == "Lake Ontario" &
Microplastic == "PS") %>%
ggplot(aes(x = Dose, y = ZP), color = Water)+
geom_boxplot()+
#facet_wrap(~Microplastic)+
theme_bw()+
#coord_flip()+
geom_hline(yintercept = 0, color = "blue", linetype = "dashed")+
labs(x = "Dose (mg/L)",y = "Zeta potential (mV)",
title = "Optimum alum dose determination for Lake Ontario water")+
theme(axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
axis.text = element_text(size = 12))
##Comparing raw water zeta and dose 0 zetas
zeta %>%
filter(Sample_type == "Raw" |
Sample_type == "Dosed" &
Dose == 0) %>%
ggplot(aes(x = Water, y = ZP))+
geom_boxplot()+
facet_wrap(Microplastic~Sample_type)+
theme_bw()+
coord_flip()+
geom_hline(yintercept = 0, color = "blue", linetype = "dotted")+
labs(y = "Zeta potential (mV)", title = "Zeta potentials of raw water and seeded sample solutions")
##Calculating mean and std. deviations of zeta potential for 0 and optimimum dose (200 mg/L)
##GRAND RIVER##
Stats_GR <- zeta %>%
filter(Water == "Grand River" &
(Sample_type == "Dosed" |
Sample_type == "Stock")) %>%
group_by(Microplastic, Sample_type, Dose) %>%
summarize(Mean = mean(ZP), Standard_deviation = sd(ZP))
Stats_GR <- Stats_GR[-c(13:25,27),]
write_xlsx(Stats_GR,"Stats_GR.xlsx")
ggplot(Stats_GR, aes(x = Microplastic, y = Mean, shape = Dose, color = Sample_type))+
geom_point(size = 2.5)+
#facet_wrap(~Sample_type)+
theme_bw()+
scale_color_brewer(palette = "Dark2")+
geom_hline(yintercept = 0, color = "blue", linetype = "dashed")+
labs(shape = "Alum dose (mg/L)",y = "Mean zeta potential (mV)",
title = "Mean zeta potentials of Grand River water samples")+
theme(plot.title = element_text(size = 16),
axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
axis.text = element_text(size = 12),
legend.text = element_text(size = 12),
legend.title = element_text(size = 14))
##Mean zetas of both Grand River and Lake Ontario in one##
Stats_GR_LO <- zeta %>%
filter((Water == "Grand River" |
Water == "Lake Ontario") &
(Sample_type == "Dosed" |
Sample_type == "Stock"|
Sample_type == "Raw")) %>%
group_by(Water,Microplastic, Sample_type, Dose) %>%
summarize(Mean = mean(ZP), Standard_deviation = sd(ZP))
Stats_GR_LO <- Stats_GR_LO[-c(14:26,28,40,42,45:49),]
write_xlsx(Stats_GR_LO,"Stats_GR_LO.xlsx")
ggplot(Stats_GR_LO, aes(x = Microplastic, y = Mean, ymin = Mean - Standard_deviation,
ymax = Mean + Standard_deviation, shape = Dose, color = Sample_type))+
geom_errorbar(width = 0.2)+
geom_point(size = 2.5)+
facet_wrap(~Water)+
theme_bw()+
scale_color_brewer(palette = "Dark2")+
geom_hline(yintercept = 0, color = "blue", linetype = "dashed")+
labs(shape = "Alum dose (mg/L)",y = "Mean zeta potential (mV)",
title = "Mean zeta potentials of Grand River and Lake Ontario water samples")+
theme(plot.title = element_text(size = 16),
axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
axis.text = element_text(size = 12),
legend.text = element_text(size = 12),
legend.title = element_text(size = 14),
legend.position = "bottom")
########################################
##LO,GR,MilliQ and Salt water together##
########################################
Stats_GR_LO_MQ_SW <- zeta %>%
filter((Water == "Grand River" |
Water == "Lake Ontario" |
Water == "MilliQ" |
Water == "Salt water") &
(Sample_type == "Dosed" |
Sample_type == "Stock"|
Sample_type == "Raw") &
Microplastic != "AcrylicG" ) %>%
group_by(Water,Microplastic, Sample_type, Dose) %>%
summarize(Mean = mean(ZP), Standard_deviation = sd(ZP))
Stats_GR_LO_MQ_SW <- Stats_GR_LO_MQ_SW[-c(13:25,27,38,40,43:46,48,57,58,59,63,67,68,77,84,84),]
write_xlsx(Stats_GR_LO_MQ_SW,"Stats_all.xlsx")
stat_all_new <- read_xlsx("Stats_all_new.xlsx")
ggplot(stat_all_new, aes(x = factor(Microplastic,levels = c("None", "Acrylic","PE","PEEK","PS")),
y = Mean, ymin = Mean - Standard_deviation,
ymax = Mean + Standard_deviation, shape = Sample_type))+
geom_errorbar(width = 0.1)+
geom_point(size = 2.5, alpha = 0.70)+
facet_wrap(~Water)+
theme_bw()+
scale_color_brewer(palette = "Dark2")+
geom_hline(yintercept = 0, color = "blue", linetype = "dashed")+
labs(shape = "Sample type",y = "Mean zeta potential (mV)", x = "Microplastic")+
#title = "Mean zeta potentials of Grand River, Lake Ontario, MilliQ and Saltwater samples")+
theme(plot.title = element_text(size = 16),
axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
axis.text = element_text(size = 12),
legend.text = element_text(size = 12),
legend.title = element_text(size = 14),
legend.position = "right",
strip.text.x = element_text(size = 14))+
scale_shape_manual(values = 15:18)
########################################
##Mean zetas for MilliQ and Salt water##
########################################
Stats_MQ_SW <- zeta %>%
filter((Water == "MilliQ" |
Water == "Salt water") &
(Sample_type == "Dosed" |
Sample_type == "Stock")) %>%
group_by(Water,Microplastic, Sample_type, Dose) %>%
summarize(Mean = mean(ZP), Standard_deviation = sd(ZP))
Stats_MQ_SW <- Stats_MQ_SW[-c(4,6,7,11,15,16,20,28,29),]
write_xlsx(Stats_MQ_SW,"Stats_MQ_SW.xlsx")
ggplot(Stats_MQ_SW, aes(x = Microplastic, y = Mean, shape = Dose, color = Sample_type))+
geom_point(size = 2.5)+
facet_wrap(~Water)+
theme_bw()+
scale_color_brewer(palette = "Dark2")+
geom_hline(yintercept = 0, color = "blue", linetype = "dashed")+
labs(shape = "Alum dose (mg/L)",y = "Mean zeta potential (mV)",
title = "Mean zeta potentials of MilliQ and Salt water samples")+
theme(plot.title = element_text(size = 16),
axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
axis.text = element_text(size = 12),
legend.text = element_text(size = 12),
legend.title = element_text(size = 14),
legend.position = "bottom")
##Calculating mean and std. deviations## Lake Ontario
Stats_LO <- zeta %>%
filter(Water == "Lake Ontario" &
(Sample_type == "Dosed" |
Sample_type == "Stock")) %>%
group_by(Microplastic, Sample_type, Dose) %>%
summarize(Mean = mean(ZP), Standard_deviation = sd(ZP))
Stats_LO <- Stats_LO[-c(15:19,10,12),]
write_xlsx(Stats_LO,"Stats_LO.xlsx")
ggplot(Stats_LO, aes(x = Microplastic, y = Mean, shape = Dose))+
geom_point()+
facet_wrap(~Sample_type)+
theme_bw()+
#scale_color_brewer(palette = "Dark2")+
geom_hline(yintercept = 0, color = "blue", linetype = "dotted")+
labs(shape = "Alum dose (mg/L)",y = "Mean zeta potential (mV)", title = "Mean zeta potentials for Lake Ontario")
##########
##T-test##
##########
##Raw water zeta and particle addition zeta comparison## PEEK
compare <- zeta %>%
filter(Sample_type == "Raw" |
Dose == 0 &
Microplastic == "PEEK")
compare <- compare[,-c(1,2,6,7,9:13)]
compare <- compare %>%
subset(Sample_type != "Stock")
Ttest_PEEK <- t.test(ZP ~ Sample_type, data = compare)
Ttest_PEEK$p.value
##Raw water zeta and particle addition zeta comparison## PE MilliQ
compare <- zeta %>%
filter(Sample_type == "Raw" |
Dose == 0 &
Microplastic == "PS")%>%
filter(Water == "MilliQ")
compare <- compare[,-c(1,2,6,7,9:13)]
compare <- compare %>%
subset(Sample_type != "Stock")
Ttest_PE <- t.test(ZP ~ Sample_type, data = compare)
Ttest_PE$p.value
with(compare, shapiro.test(ZP[Sample_type == "Dosed"]))
var.test(ZP ~ Sample_type, data = compare)
###############################
##Adding a column of p values##
###############################
compare_ps <- zeta %>%
filter(Sample_type == "Raw" |
Dose == 0 &
Microplastic == "PS") %>%
group_by(Water) %>%
subset(Sample_type != "Stock") %>%
subset(Water != "Calgary water") %>%
mutate(p_value = t.test(ZP ~ Sample_type)$p.value)
compare_ps <- compare_ps[,-c(1,2,6,7,8,9:13)]
p_value_ps <- distinct(compare_ps)
##################################
compare_pe <- zeta %>%
filter(Sample_type == "Raw" |
Dose == 0 &
Microplastic == "PE") %>%
group_by(Water) %>%
subset(Sample_type != "Stock") %>%
subset(Water != "Calgary water") %>%
mutate(p_value = t.test(ZP ~ Sample_type)$p.value)
compare_pe <- compare_pe[,-c(1,2,6,7,8,9:13)]
p_value_pe <- distinct(compare_pe)
#################################
compare_peek <- zeta %>%
filter(Sample_type == "Raw" |
Dose == 0 &
Microplastic == "PEEK") %>%
group_by(Water) %>%
subset(Sample_type != "Stock") %>%
subset(Water != "Calgary water") %>%
mutate(p_value = t.test(ZP ~ Sample_type)$p.value)
compare_peek <- compare_peek[,-c(1,2,6,7,8,9:13)]
p_value_peek <- distinct(compare_peek)
#################################
compare_ac <- zeta %>%
filter(Sample_type == "Raw" |
Dose == 0 &
Microplastic == "Acrylic") %>%
group_by(Water) %>%
subset(Sample_type != "Stock") %>%
subset(Water != "Calgary water") %>%
mutate(p_value = t.test(ZP ~ Sample_type)$p.value)
compare_ac <- compare_ac[,-c(1,2,6,7,8,9:13)]
p_value_ac <- distinct(compare_ac)
p_values <- rbind(p_value_pe,p_value_peek,p_value_ps,p_value_ac)
p_values <- subset(p_values, Microplastic != "None")
p_values <- p_values[,-2]
write_xlsx(p_values,"p_values.xlsx")
|
535816c804da7994b8b3d0bdde57ae7dc50a105f
|
903a04d3b2ad601eac56531a6b81692d96d40b2d
|
/R/homerange.R
|
acd9630091a7c2ad6fc7b2efcf8ba6a9fc73003f
|
[] |
no_license
|
cran/secr
|
a2b058587a4655e4688c912d3fecb5fa7838dca8
|
50b315739c723f61bcd82bbbb5c6770973fcb132
|
refs/heads/master
| 2023-07-20T04:45:01.190001
| 2023-07-10T23:40:02
| 2023-07-11T07:34:31
| 17,699,535
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,647
|
r
|
homerange.R
|
############################################################################################
## package 'secr'
## dbar.R, ARL.R, MMDM.R, RPSV.R
## all in homerange.R 2014-09-01
## 2014-09-01 modified for userdist
## 2014-09-10 does NOT work when userdist fn requires mask covariates..
## 2016-10-06 secr 3.0
## 2016-10-28 NOT YET userdist may be session-specific
## 2017-02-06 updated for telemetry (completing job started last month)
## 2020-08-31 ORL, centroids
## 2021-05-18 sortorder
############################################################################################
getID <- function (det, capthist) {
if (all(det %in% .localstuff$polydetectors)) {
ID <- animalID(capthist, names = TRUE, sortorder = "ksn")
}
else {
ID <- animalID(capthist, names = TRUE, sortorder = "snk")
}
factor(ID, levels = rownames(capthist))
}
dbar <- function (capthist, userdist = NULL, mask = NULL) {
if (inherits (capthist, 'list')) {
lapply(capthist, dbar, userdist, mask) ## recursive
}
else {
dbarx <- function (x) {
x <- abs(unlist(x))
## assume that within animal, x is in order by occasion
distmat[cbind(x[-length(x)], x[-1])] ## vector
}
dbarxy <- function (xy) {
sqrt(diff(xy$x)^2 + diff(xy$y)^2)
}
if (nrow(capthist) < 1) return(NA)
traps <- traps(capthist)
det <- expanddet(capthist)
IDfactor <- getID(det, capthist)
## 2014-09-01
## NOT USING PARAMETERS noneuc ETC
distmat <- valid.userdist(userdist, det, traps, traps, mask )
if (!all(det %in% .localstuff$individualdetectors))
stop ("require individual detector type for dbar")
if (all(det %in% 'telemetry')) {
lxy <- telemetryxy(capthist)
if (is.null(lxy))
NA
else {
d <- try(lapply(lxy,dbarxy), silent = TRUE)
if (inherits(d, 'try-error'))
d <- NA
mean(unlist(d), na.rm=T)
}
}
else if (all(det %in% .localstuff$polydetectors)) {
if (is.null(xy(capthist)))
NA
else {
lxy <- split (xy(capthist), IDfactor)
d <- try(lapply(lxy,dbarxy), silent = TRUE)
if (inherits(d, 'try-error'))
d <- NA
mean(unlist(d), na.rm=T)
}
}
else {
## order is essential 2016-10-07
if (any(det %in% 'telemetry')) {
capthist <- subset(capthist,
occasions = det != 'telemetry',
traps = 1:(nrow(traps(capthist)-1)))
IDfactor <- factor(animalID(capthist, names = TRUE),
levels = rownames(capthist))
}
w <- split(trap(capthist, names = FALSE), IDfactor)
d <- try(unlist(lapply(w,dbarx)), silent = TRUE)
if (inherits(d, 'try-error'))
d <- NA
mean(d, na.rm=T)
}
}
}
############################################################################################
moves <- function (capthist, userdist = NULL, mask = NULL, names = FALSE) {
if (inherits (capthist, 'list')) {
lapply(capthist, moves, userdist, mask) ## recursive
}
else {
movex <- function (x) {
x <- abs(unlist(x))
distmat[cbind(x[-length(x)], x[-1])] ## vector
}
movexy <- function (xy) {
sqrt(diff(xy$x)^2 + diff(xy$y)^2)
}
traps <- traps(capthist)
if (is.null(traps)) {
## return empty vector if nonspatial 2019-04-04
nam <- rownames(capthist)
if (is.null(nam)) nam <- 1:nrow(capthist)
sapply(nam, function(x) numeric(0), simplify = FALSE)
}
else {
det <- expanddet(capthist)
IDfactor <- getID(det, capthist)
distmat <- valid.userdist(userdist, det, traps, traps, mask)
if (!all(det %in% .localstuff$individualdetectors))
stop ("require individual detector type for moves")
if (all(det %in% 'telemetry')) {
lxy <- telemetryxy(capthist)
if (is.null(lxy))
out <- NA
else {
out <- lapply (lxy, movexy)
}
}
else if (all(det %in% .localstuff$polydetectors)) {
if (is.null(xy(capthist)))
out <- NA
else {
lxy <- split (xy(capthist), IDfactor)
out <- lapply (lxy, movexy)
}
}
else {
## order is essential 2016-10-08
if (any(det %in% 'telemetry')) {
capthist <- subset(capthist,
occasions = det != 'telemetry',
traps = 1:(nrow(traps(capthist)-1)))
IDfactor <- factor(animalID(capthist, names = TRUE),
levels = rownames(capthist))
}
## 2020-08-27
w <- split(trap(capthist, names = FALSE), IDfactor)
out <- lapply(w,movex)
}
## 2022-01-21
# if (!names) names(out) <- 1:length(out)
if (!names && length(out)>0) names(out) <- 1:length(out)
out
}
}
}
############################################################################################
trapsPerAnimal <- function (capthist) {
if (inherits (capthist, 'list')) {
lapply(capthist, trapsPerAnimal) ## recursive
}
else {
nki <- apply( apply(abs(capthist), c(1,3), sum)>0, 1, sum)
if (length(nki)>0)
out <- tabulate (nki, nbins = max(nki))
else
out <- 0
names(out) <- 1:length(out)
out
}
}
############################################################################################
ARL <- function (capthist, min.recapt = 1, plt = FALSE, full = FALSE, userdist = NULL,
mask = NULL) {
if (inherits (capthist, 'list')) {
lapply(capthist, ARL, plt = plt, full = full, userdist = userdist, mask) ## recursive
}
else {
MMDMx <- function (cx) {
cx <- abs(cx) # no special trt for deads
if (sum(cx>0, na.rm=T) == 1) NA
else {
## x <- traps$x[cx]
## y <- traps$y[cx]
## max(dist(cbind(x,y)))
as.numeric(max(distmat[cx, cx]))
}
}
MMDMxy <- function (xy) {
max(dist(cbind(xy$x, xy$y)))
}
if (nrow(capthist) < 1) return(NA)
traps <- traps(capthist)
det <- expanddet(capthist)
IDfactor <- getID(det, capthist)
if (!all(det %in% .localstuff$individualdetectors))
stop ("require individual detector type for ARL")
distmat <- valid.userdist(userdist, det, traps, traps, mask )
prox <- length(dim(capthist)) > 2
if (all(det %in% 'telemetry')) {
lxy <- telemetryxy(capthist)
if (is.null(lxy))
stop("no telemetry coordinates")
else {
maxd <- unlist(lapply (lxy, MMDMxy))
n <- unlist(lapply (lxy, nrow))
}
}
else if (all(det %in% .localstuff$polydetectors)) {
if (is.null(xy(capthist)))
stop("no xy coordinates")
else {
lxy <- split (xy(capthist), IDfactor)
maxd <- unlist(lapply (lxy, MMDMxy))
n <- unlist(lapply (lxy, nrow))
}
}
else {
## order is essential 2016-10-08
if (any(det %in% 'telemetry')) {
capthist <- subset(capthist,
occasions = det != 'telemetry',
traps = 1:(nrow(traps(capthist)-1)))
IDfactor <- factor(animalID(capthist, names = TRUE),
levels = rownames(capthist))
}
w <- split(trap(capthist, names = FALSE), IDfactor)
maxd <- unlist(lapply(w, MMDMx))
n <- unlist(lapply(w, length))
}
maxd <- maxd[n>min.recapt]
n <- n[n>min.recapt]
temp <- try(coef(nls (maxd ~ aa * (1 - exp(bb * (n-1))),
start= list (aa = max(maxd)*1.2, bb = -0.4))))
if (inherits(temp, "try-error")) {
warning ("nls failure")
aa <- NA
bb <- NA
}
else {
aa <- temp[1]
bb <- temp[2]
if (plt) {
plot (jitter(n, amount=0.1), maxd,
xlim=c(0,max(c(n,ncol(capthist)))),
xlab='Number of captures', ylab='ARL')
xv <- seq(2,max(n),0.01)
lines(xv, aa * (1 - exp(bb * (xv-1))))
}
}
attr(aa,'names') <- NULL
attr(bb,'names') <- NULL
if (!full) aa
else list (ARL = aa, b = bb, data = data.frame(maxd = maxd, n=n))
}
}
############################################################################################
MMDM <- function (capthist, min.recapt = 1, full = FALSE, userdist = NULL, mask = NULL) {
if (inherits (capthist, 'list')) {
lapply(capthist, MMDM, full = full, userdist = userdist, mask = mask) ## recursive
}
else {
MMDMx <- function (cx) {
cx <- abs(cx) # no special trt for deads
if (sum(cx>0, na.rm=T) == 1) NA
else {
## x <- traps$x[cx]
## y <- traps$y[cx]
## max(dist(cbind(x,y)))
as.numeric(max(distmat[cx, cx]))
}
}
MMDMxy <- function (xy) {
max(dist(cbind(xy$x, xy$y)))
}
if (nrow(capthist) < 1) return(NA)
traps <- traps(capthist)
det <- expanddet(capthist)
IDfactor <- getID(det, capthist)
distmat <- valid.userdist(userdist, det, traps, traps, mask )
if (!all(det %in% .localstuff$individualdetectors))
stop ("require individual detector type for MMDM")
if (all(det %in% 'telemetry')) {
lxy <- telemetryxy(capthist)
if (is.null(lxy))
stop ("no telemetry coordinates")
else {
maxd <- unlist(lapply (lxy, MMDMxy))
n <- unlist(lapply (lxy, nrow))
}
}
else if (all(det %in% .localstuff$polydetectors)) {
if (is.null(xy(capthist)))
stop ("no xy coordinates")
else {
lxy <- split (xy(capthist), IDfactor)
maxd <- unlist(lapply (lxy, MMDMxy))
n <- unlist(lapply (lxy, nrow))
}
}
else {
if (any(det %in% 'telemetry')) {
capthist <- subset(capthist,
occasions = det != 'telemetry',
traps = 1:(nrow(traps(capthist)-1)))
IDfactor <- factor(animalID(capthist, names = TRUE),
levels = rownames(capthist))
}
w <- split(trap(capthist, names = FALSE), IDfactor)
maxd <- unlist(lapply( w, MMDMx))
n <- unlist(lapply(w, length))
}
maxd <- maxd[n>min.recapt]
n <- n[n>min.recapt]
temp <- mean (maxd, na.rm = TRUE)
if (!full) temp
else {
SE <- function(x) sqrt(var(x, na.rm=T)/sum(!is.na(x)))
summaryd <- data.frame (Ncapt = names(table(n)),
n = as.numeric(table(n)),
mean = tapply(maxd, n, mean, na.rm=T),
se = tapply(maxd, n, SE))
summaryd$mean[is.na(summaryd$mean)] <- NA ## tidier
summaryd <- rbind(summaryd, data.frame(Ncapt='Total', n=sum(!is.na(maxd)),
mean=temp, se=SE(maxd)))
list (MMDM = temp, data = data.frame(maxd = maxd, n=n), summary = summaryd )
}
}
}
############################################################################################
RPSV <- function (capthist, CC = FALSE)
{
if (inherits (capthist, 'list')) {
lapply(capthist, RPSV, CC) ## recursive
}
else {
RPSVx <- function (cx) {
cx <- abs(cx) # no special trt for deads
x <- traps$x[cx]
y <- traps$y[cx]
n <- length(x)
c(n = n-1, ssx = sum(x^2) - (sum(x))^2/n, ssy = sum(y^2) - (sum(y))^2/n)
}
RPSVxy <- function (xy) {
x <- xy[,1]
y <- xy[,2]
n <- length(x)
c(n = n-1, ssx = sum(x^2) - (sum(x))^2/n, ssy = sum(y^2) - (sum(y))^2/n)
}
if (nrow(capthist) < 1) return(NA)
traps <- traps(capthist)
det <- expanddet(capthist)
if (!all(det %in% .localstuff$individualdetectors))
stop ("require individual detector type for RPSV")
IDfactor <- getID(det, capthist)
if (all(det %in% 'telemetry')) {
lxy <- telemetryxy(capthist)
if (is.null(lxy))
temp <- NA
else {
temp <- lapply (lxy, RPSVxy)
}
}
else if (all(det %in% .localstuff$polydetectors)) {
if (is.null(xy(capthist)))
temp <- NA
else {
lxy <- split ( xy(capthist), IDfactor)
temp <- lapply (lxy, RPSVxy)
}
}
else {
if (any(det %in% 'telemetry')) {
capthist <- subset(capthist,
occasions = det != 'telemetry',
traps = 1:(nrow(traps(capthist)-1)))
IDfactor <- factor(animalID(capthist, names = TRUE),
levels = rownames(capthist))
}
w <- split(trap(capthist, names = FALSE), IDfactor)
temp <- lapply(w,RPSVx)
}
temp <- matrix(unlist(temp), nrow = 3)
temp <- apply(temp,1,sum, na.rm=T)
if (any(is.na(temp) | temp<0)) {
temp <- NA ## protected 2021-03-31
}
else {
if (CC)
temp <- sqrt((temp[2]+temp[3]) / (2 * temp[1]))
else
temp <- sqrt((temp[2]+temp[3]) / (temp[1]-1))
}
attr(temp,'names') <- NULL
temp
}
}
############################################################################################
## source ('d:\\density secr 1.3\\secr\\r\\mmdm.R')
## data(Peromyscus)
## MMDM(Peromyscus.WSG, full=T)$summary
## Ncapt n mean se
## 1 1 9 NA NA
## 2 2 9 28.32839 9.434631
## 3 3 10 24.05921 9.335062
## 4 4 8 33.87949 5.471227
## 5 5 8 52.37655 15.470420
## 6 6 7 34.24929 5.961350
## 7 Total 42 33.93669 4.495646
## MMDM(Peromyscus.WSG, full=T)$summary[,3:4]/15.2
## mean se
## 1 NA NA
## 2 1.863710 0.6206994
## 3 1.582843 0.6141488
## 4 2.228914 0.3599492
## 5 3.445826 1.0177908
## 6 2.253243 0.3921941
## 7 2.232677 0.2957662 <<<< 0.575?
## cf Otis et al 1978 p 87 Fig 23a
##################################################
ORL <- function (capthist, userdist = NULL, mask = NULL) {
if (inherits (capthist, 'list')) {
lapply(capthist, ORL, userdist, mask)
}
else {
ORLx <- function (cx) {
cx <- abs(cx) # no special trt for deads
as.numeric(max(distmat[cx, cx]))
}
ORLxy <- function (xy) {
if (nrow(xy) == 1)
0
else
max(dist(cbind(xy$x, xy$y)))
}
if (nrow(capthist) < 1) return(NA)
traps <- traps(capthist)
det <- expanddet(capthist)
if (!all(det %in% .localstuff$individualdetectors))
stop ("require individual detector type for ARL")
distmat <- valid.userdist(userdist, det, traps, traps, mask )
prox <- length(dim(capthist)) > 2
IDfactor <- getID(det, capthist)
if (all(det %in% 'telemetry')) {
lxy <- telemetryxy(capthist)
if (is.null(lxy))
stop("no telemetry coordinates")
else {
maxd <- unlist(lapply (lxy, ORLxy))
n <- unlist(lapply (lxy, nrow))
}
}
else if (all(det %in% .localstuff$polydetectors)) {
if (is.null(xy(capthist)))
stop("no xy coordinates")
else {
lxy <- split (xy(capthist), IDfactor)
maxd <- unlist(lapply (lxy, ORLxy))
n <- unlist(lapply (lxy, nrow))
}
}
else {
if (any(det %in% 'telemetry')) {
capthist <- subset(capthist,
occasions = det != 'telemetry',
traps = 1:(nrow(traps(capthist)-1)))
IDfactor <- factor(animalID(capthist, names = TRUE),
levels = rownames(capthist))
}
w <- split(trap(capthist, names = FALSE), IDfactor)
maxd <- unlist(lapply(w, ORLx))
n <- unlist(lapply(w, length))
}
data.frame(ORL = maxd, n = n)
}
}
############################################################################################
## 2020-08-05, 2020-08-31
centroids <- function (capthist) {
if (ms(capthist)) {
nsess <- length(capthist)
out <- lapply(capthist, centroids)
cov <- lapply(capthist, covariates)
ID <- unique(unlist(sapply(out, rownames)))
IDxy <- array(
dim = c(length(ID), 2, nsess),
dimnames = list(ID, c('meanx','meany'), session(capthist))
)
IDn <- matrix(0,
nrow = length(ID),
ncol = nsess,
dimnames = list(ID, 1:nsess)
)
if (!is.null(cov[[1]])) {
IDcov <- data.frame(
matrix('',
nrow = length(ID),
ncol = ncol(cov[[1]]),
dimnames = list(ID,names(cov[[1]]))
)
)
}
for (sess in 1:nsess) {
IDxy[rownames(out[[sess]]), 1:2, sess] <- out[[sess]]
IDn[rownames(out[[sess]]), sess] <- attr(out[[sess]], 'Ndetections')
}
attr(IDxy, 'Ndetections') <- IDn
IDxy
}
else {
centresx <- function (cx) {
cx <- abs(cx) # no special trt for deads
x <- traps$x[cx]
y <- traps$y[cx]
c(n = length(x), meanx = mean(x, na.rm=TRUE), meany = mean(y, na.rm=TRUE))
}
centresxy <- function (xy) {
x <- xy[,1]
y <- xy[,2]
c(n = length(x), meanx = mean(x, na.rm = TRUE), meany = mean(y, na.rm = TRUE))
}
if (nrow(capthist) < 1) return(NA)
traps <- traps(capthist)
det <- expanddet(capthist)
IDfactor <- getID(det, capthist)
if (!all(det %in% .localstuff$individualdetectors))
stop ("require individual detector type for centres")
if (all(det %in% 'telemetry')) {
lxy <- telemetryxy(capthist) ## already list by animal
if (is.null(lxy))
temp <- NA
else {
temp <- lapply (lxy, centresxy)
}
}
else if (all(det %in% .localstuff$polydetectors)) {
if (is.null(xy(capthist)))
temp <- NA
else {
lxy <- split ( xy(capthist), IDfactor)
temp <- lapply (lxy, centresxy)
}
}
else {
if (any(det %in% 'telemetry')) {
capthist <- subset(capthist,
occasions = det != 'telemetry',
traps = 1:(nrow(traps(capthist)-1)))
}
IDfactor <- factor(animalID(capthist, names = FALSE))
w <- split(trap(capthist, names = FALSE), IDfactor)
temp <- lapply(w,centresx)
}
out <- do.call(rbind, temp)
rownames(out) <- rownames(capthist)
ncapt <- out[,'n', drop = FALSE]
out <- out[,c('meanx','meany')]
attr(out, 'Ndetections') <- ncapt
out
}
}
############################################################################################
## 2017-02-06 not exported
RPSVxy <- function (xy, CC = F) {
x <- xy[,1]
y <- xy[,2]
n <- length(x)
temp <- c(n = n-1, ssx = sum(x^2) - (sum(x))^2/n, ssy = sum(y^2) - (sum(y))^2/n)
if (CC)
temp <- sqrt((temp[2]+temp[3]) / (2 * temp[1]))
else
temp <- sqrt((temp[2]+temp[3]) / (temp[1]-1))
attr(temp,'names') <- NULL
temp
}
##################################################
## not exported secr 4.4.5 2021-07-07
plotmoves <- function (capthist, byanimal = FALSE, withinsession = FALSE,
label = TRUE, arrows = TRUE, ...) {
if (!ms(capthist)) {
stop("plotmoves expects multi-session capthist")
}
cen <- centroids(capthist)
ct <- attr(cen, 'Ndetections')>0
ok <- apply(ct,1,sum)>1 # at least 2 sessions
if (!byanimal) {
plot(traps(capthist[[1]]), ...)
}
for (j in which(ok)) {
ch <- suppressWarnings(subset(capthist, rownames(cen)[j]))
if (byanimal) {
plot(traps(capthist[[1]]), ...)
mtext(side=3, rownames(cen)[j], line=0.4, cex=0.7)
}
if (withinsession) {
plot(ch, add=T, tracks = T, varycol=FALSE, title='', subtitle='')
}
for (i in 1:4) {
move2 <- (cen[j,1,i]-cen[j,1,i+1])^2 + (cen[j,2,i+1]-cen[j,2,i])^2
if (!is.na(move2) && move2>0.001 && arrows) {
arrows(cen[j,1,i], cen[j,2,i], cen[j,1,i+1], cen[j,2,i+1],
lwd = 1.5, angle = 15, length=0.15)
}
else {
segments(cen[j,1,i], cen[j,2,i], cen[j,1,i+1], cen[j,2,i+1],
lwd = 1.5)
}
}
if (label) {
points(cen[j,1,], cen[j,2,], pch = 16, col='yellow', cex=2)
text(cen[j,1,], cen[j,2,], 1:5, cex=0.9)
}
}
d <- apply(cen, 1, function (xy) (diff(xy[1,])^2 + diff(xy[2,])^2)^0.5)
d[!is.na(d)] # vector of consecutive moves
}
# par(mfrow=c(3,8), mar=c(2,2,2,2))
# d <- plotmoves(ovenCHp, label=T, arrows=F)
# symbols(circles=80, )
|
c351ffc8df2ad320b9bcc6556d52b8e40a829cb1
|
8227fe55923635d2c02e2382d7d9696d7f4b3f35
|
/R/Rslippy-internal.R
|
aa169ea35882d4f7b88205f3463f07fda99e9f79
|
[
"BSD-2-Clause"
] |
permissive
|
ozjimbob/Rslippy
|
1ff80939e725803368113424a692cf59760fafb0
|
c4edc2ae0d91c535a59bc38bb96d917cc78757d5
|
refs/heads/master
| 2021-01-19T12:58:26.081208
| 2014-07-28T01:42:07
| 2014-07-28T01:42:07
| 22,045,519
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,285
|
r
|
Rslippy-internal.R
|
.Random.seed <-
c(403L, 10L, -1165760154L, 1549851888L, 1556875283L, -1157145167L,
1955985504L, -1209608706L, 1348755849L, 1441175227L, 630379146L,
-1373557396L, -1036059921L, 2084373797L, -1879837732L, -1824357966L,
-1892933459L, -1769082777L, 791238542L, -2140348312L, -438555541L,
108525065L, -1204430696L, 2045854662L, 1035032353L, -1767394381L,
-915784926L, -34869996L, -1363288681L, -44595827L, 1126466724L,
1944774282L, -822731531L, -1410268337L, 432594006L, 587620096L,
-652281277L, -104783391L, 522331472L, 120208302L, 1699101017L,
1435141995L, 1499944666L, 539293596L, -635810913L, 144077301L,
-1639988084L, 1054076258L, -1095851715L, 1915521463L, -600215970L,
-1368348776L, -1777187077L, -594989415L, -1351641304L, 271805910L,
-1511517615L, 243108099L, 132801682L, 366017060L, 1997776103L,
-46089923L, 1201360308L, 1529467418L, 262158693L, 1074481151L,
892157574L, -437754352L, -166896461L, -183732399L, -1919812864L,
2003984862L, -1040078295L, 842394075L, 33904362L, 1654641804L,
1886673871L, 665809797L, -513220548L, 369193234L, -1013754355L,
-1585830585L, 1767517998L, 1373512328L, 231441291L, 1666159465L,
-171553480L, -2062991386L, -28215423L, -881733869L, -1469663038L,
-306588812L, -173719689L, 1475324013L, 1072613124L, -1217128918L,
-639329387L, -1863032785L, -345548490L, 789461344L, 1286174627L,
-1199087423L, -276320080L, -1547436082L, 448877369L, -1115317L,
526061818L, -1367070212L, 915016255L, -1049575659L, -1691898836L,
622687490L, -457286179L, -1480213289L, 282470334L, -1162079752L,
-698096165L, 1747766329L, -1701495416L, 897816886L, -1692023951L,
1878778275L, -2081293070L, -1661878204L, -1024134393L, -1693048099L,
579251156L, -1802278150L, 520629829L, 856970911L, -1753084890L,
1393617072L, -1920781997L, -811021199L, 597135776L, 406039870L,
-1706038199L, -357930757L, 1517158602L, 416871980L, 486615599L,
507159269L, 1233901468L, 1985626610L, 870673517L, -1279296089L,
857188302L, -1350834904L, 1992742699L, 57531721L, 797000280L,
-936813818L, -799050015L, 1510728307L, 57101026L, 487732820L,
720535383L, 1473164109L, 1264039652L, -465602870L, 495089461L,
975928719L, 1497856662L, -312274368L, -19066109L, -430472415L,
1276011536L, 1509000174L, 1565143961L, 1810992171L, 1798617370L,
1606826972L, -1766359585L, 1539746229L, -1530632372L, 444358050L,
1241304317L, -2013146633L, 31399710L, -1428192808L, 423230267L,
-1349694247L, 1475379304L, 1264280982L, -307159791L, 1571958467L,
-1024938286L, -2125187228L, 2080418471L, 2108847101L, -2038168076L,
1579942618L, 1242461221L, -643417793L, -2001043770L, -180612272L,
-775951501L, 1336409489L, -1915347008L, -1957447266L, -28302487L,
1254619035L, -1590403414L, 376961228L, 1147292047L, -82100795L,
-673685124L, -846548526L, 268995917L, -1323208697L, 1630281710L,
-2030377272L, -1510958645L, -1528067031L, -425223560L, -991836250L,
-406826559L, -1441708461L, 594922754L, -702890956L, 912694711L,
-2166355L, -542469436L, -140720406L, 566962517L, -1623638289L,
128613622L, -1062917088L, -690557469L, 138059712L, 1451836988L,
-1317148272L, -907869150L, -546932472L, -1316850548L, -1789476068L,
1247705234L, -2050119296L, -2107851756L, 1230954280L, 1582769722L,
52359632L, -1231359636L, 1791965108L, -24306158L, -1251081248L,
-1708898868L, 1592616160L, 1412124706L, 804345640L, 1103607308L,
-521014468L, -1121012750L, 1100679920L, -1400083900L, 1319306840L,
-1205627078L, -810454032L, 552928956L, 417457428L, -1745530686L,
-612205472L, 1554692476L, 733124560L, -1773160158L, -308302168L,
677567372L, -1936606020L, 2015797042L, -401939904L, 1581614260L,
1494795336L, 1457920442L, 2057960976L, 300828908L, 2096486740L,
433321746L, 1989809568L, -1955366196L, 2092136032L, -1509253182L,
179733288L, 774671788L, 1116098876L, 1761492850L, -523304656L,
1641557796L, -587692744L, -1562895654L, -270071920L, 1565430524L,
-1991757932L, 1435019330L, -1313530624L, -1301058500L, 1070763344L,
-123135838L, 1350940488L, 2010322700L, 808438492L, 1692709714L,
-1698000256L, 549779540L, -1738732568L, 1445541626L, 1206667600L,
204686124L, 1650886516L, -2080832622L, -2105444768L, 1908526988L,
-782421152L, 1727186914L, 1723613096L, -1323688756L, 673156604L,
-590918734L, 938174960L, -1928582716L, 1183392088L, 484271482L,
-1077341776L, -823660612L, 1479364692L, -1313160254L, 2086052896L,
1974353084L, -1376970160L, -1586249758L, 553743976L, -321283508L,
-687537924L, -849719310L, -87507712L, -1420650636L, -1129725944L,
-2004417094L, 1218805200L, 1040147436L, 43163540L, -1152230446L,
-893724832L, -197868212L, 611855648L, 1543062402L, 1771444968L,
742335724L, 1838115388L, 1448512370L, 1944414832L, -1557448668L,
581535544L, -761746278L, -920218928L, -1119788868L, -1208133996L,
6563458L, 408091840L, 364327228L, 1619710096L, -1998575070L,
-705001976L, 493617420L, 1456091804L, -196588014L, -1633858176L,
345562644L, -39152088L, 1646945594L, -336197680L, 1159926380L,
-538368204L, -1024015726L, -538453664L, -2041482932L, -624193568L,
703971234L, -1921919064L, -857609332L, 1063364284L, 385846130L,
-396240656L, 1928460356L, -1962402472L, 1446597690L, -2082249872L,
-1905130436L, -983872748L, 534653378L, -280583968L, -486697092L,
1948589776L, -935721310L, -1745337304L, 2112669964L, -1217989700L,
353647922L, -1959125056L, 281367092L, 1404787656L, 433833146L,
551931792L, 308262380L, -282165164L, -1247445358L, -1309000928L,
-198267444L, -1304421408L, -1705213246L, -2140382680L, 733585068L,
587641532L, 1168462322L, 546785584L, -2073539420L, -629161288L,
2080942938L, 1741311376L, 975057276L, -1350693612L, -2125502142L,
232391424L, -364110916L, 782269264L, 1099514146L, 1667997000L,
-1968375796L, 1981118556L, 915186898L, 1458145280L, 737901396L,
-223944216L, -2022533126L, 642864848L, -964205268L, -1411233548L,
-711604334L, -239453088L, -768443124L, 212436704L, -475636382L,
563298984L, -1420404148L, 405845884L, -843846862L, -686899600L,
-1672757948L, -837878824L, -1006792582L, 277243440L, -516912452L,
-856889516L, -312387006L, -332354656L, -1767028548L, 2041455056L,
1130501155L, -278627036L, -1161004590L, 1630431783L, -1711704975L,
101657270L, -401492396L, -81569051L, -1265838353L, 1594794536L,
1329448582L, -1207373645L, 1285236245L, 2121631858L, 257152192L,
1579591977L, -1011454053L, 375449708L, 371195002L, -1715608145L,
1641063609L, -1496289586L, -545477604L, 353357709L, 199657975L,
1018469632L, 407157598L, 2015005739L, -771759315L, -354447462L,
296910168L, 1074081985L, -348876941L, -1169582956L, 1750245666L,
990855223L, -17600255L, -1000750074L, -1523482044L, -1772934283L,
-1527116065L, -1025656264L, 1129804310L, -1684314589L, 1468920293L,
-2109489150L, 1588583024L, -1749413863L, 1957136907L, 509886716L,
1720491306L, 1349387103L, 1855443689L, -961381890L, 1507575276L,
-826788547L, -858112761L, -1516790544L, -1930952562L, 1114372795L,
-1590635875L, -1443731894L, -864711576L, -1154743215L, -1633620157L,
1868085828L, 1021688050L, 1999668423L, 1321565841L, -484376554L,
1680681588L, -1313706747L, -83270833L, -684495544L, -1532845850L,
-262654445L, 897001845L, -548852398L, 1938652576L, 175713417L,
-808969029L, -505688308L, -1240029542L, -605766641L, 781252697L,
837219566L, 573714300L, 1602509805L, 836625559L, 406908768L,
98273086L, -2015512565L, -2083970931L, 671865274L, 329242616L,
506729377L, 555167059L, -431070092L, -712010238L, 203917463L,
161987169L, 1680339622L, -1797367132L, 1086795605L, -758238209L,
1138250392L, -2043201610L, 705739843L, 307410309L, -1860473310L,
1940673808L, 139343033L, -1417864853L, 767424604L, 1840887178L,
-1082125953L, 927354057L, -1570623138L, 1139204620L, -1044232739L,
1472732391L, 69675280L, 628332590L, 2009407195L, -1426769987L,
1896272170L, -480045624L, -688977167L, 542590051L, -1826119580L,
142928786L, 1452195943L, -1345298383L, -1407127178L, 1616879252L,
383767717L, -1600550481L, -429518232L, -1997338042L, -1727852557L,
-860663979L, -1551535822L, -1395174144L, 1023174121L, -1335685029L,
194110124L, -343507782L, -723747217L, -1612305415L, 1111454350L,
521986908L, -1454739507L, -551276745L, -1550135872L, -2116357474L,
-1611863189L, -1984081171L, 1358235482L, -927491048L, -2109126271L,
-848765133L, 1081619796L, 1489198946L, 911741943L, -1214161855L,
-518350650L, -1459935740L, -1494552139L, -1557098849L, 1300539015L
)
|
7bded04b18a01738ed9047b75c03034d7231addc
|
954d502ab9ed7442ecdd960880403d4ed6a2ec10
|
/01-Reads_Count/Reads_count.R
|
cd62c5e45eb9b19580ea3274085e30f9cc237e43
|
[] |
no_license
|
LuigiLopezIglesias/Autocheck_Run
|
c9de6412d3aa9664395bdd1a237685c7f5c2ac9c
|
b7ba61cab2bf0ab5a0083e2db9814a9974558be9
|
refs/heads/master
| 2021-10-24T23:16:37.124639
| 2018-11-28T16:22:38
| 2018-11-28T16:22:38
| 117,987,398
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,510
|
r
|
Reads_count.R
|
library(dplyr)
library(optparse)
library(crayon)
#########################################################################################
### Cargar argumentos//Load arguments ###
option_list <- list(
make_option(c('-d', '--date'), action='store',
dest='date', type='character'),
make_option(c('-w', '--warehouse'), action='store',
dest='Warehouse', type='character',
default="Hard Disk"),
make_option(c('-p', '--path'), action='store',
dest='Path', type='character',
default="/home/yamishakka/Escritorio/Biomemakers/00-NP_Abundances/")
)
opt <- parse_args(OptionParser(option_list=option_list))
cat(blue("Run date to analyze is: "%+%green$bold(opt$date)%+%"\n"))
cat(blue("Location of run folder is on: "%+%green$bold(opt$Warehouse)%+%"\n"))
cat(blue("Location of pipeline storage files is on: "%+%green$bold(opt$Path)%+%"\n"))
#########################################################################################
PATH <- ifelse(opt$Warehouse == "Local",
paste0("/home/yamishakka/Escritorio/Runs_rename/",opt$date,"/"),
paste0("/media/yamishakka/Elements/Runs_Good/",opt$date,"/"))
Fastq <- list.files(path = PATH,
pattern = "_R1_",
all.files = FALSE,
full.names = FALSE,
recursive = FALSE,
ignore.case = FALSE,
include.dirs = FALSE,
no.. = FALSE)
cat(silver("\nReady to anlyze reads number by sample in run \n\n"))
#Fastq<-gsub("Undetermined.*","",Fastq)
Reads <- character()
Samples <- character()
for (i in Fastq) {
sample <- sub('_S.*', '',i)
cat("the sample "%+%magenta(sample)%+%" have: ")
Samples <- rbind(Samples, sample)
Count <- system(paste0("cp ",PATH,i," /tmp/fastatmp.gz && gunzip /tmp/fastatmp.gz && wc -l /tmp/fastatmp && rm /tmp/fastatmp"), intern = TRUE)
Raw_Reads <- as.integer(sub(' /.*', '',Count))
reads <- Raw_Reads/4
Reads <- cbind(Reads, reads)
cat(yellow(reads)%+%" reads\n")
}
ReadsNumber = suppressWarnings(data.frame(Sample = Samples,
Reads = as.integer(Reads),
stringsAsFactors = FALSE))
ReadsNumber["Date"] <- opt$date
Reads_file <- unique(ReadsNumber) %>%
filter(!grepl("Undeter",Sample))
Reads_16S <- Reads_file %>%
filter(grepl("b", Sample))
#print(Reads_16S)
Reads_18S <- Reads_file %>%
filter(grepl("c", Sample))
#print(Reads_18S)
Reads_ITS <- Reads_file %>%
filter(!grepl("b|c", Sample))
#print(Reads_ITS)
dir.create(paste0(opt$Path,opt$date,"/"), showWarnings = FALSE, recursive = TRUE)
write.table(Reads_ITS, file = paste0(opt$Path,opt$date,"/ITS_",opt$date,"_Reads_Raw.csv"), sep=",",row.names = FALSE, quote = FALSE)
write.table(Reads_16S, file = paste0(opt$Path,opt$date,"/16S_",opt$date,"_Reads_Raw.csv"), sep=",",row.names = FALSE, quote = FALSE)
write.table(Reads_18S, file = paste0(opt$Path,opt$date,"/18S_",opt$date,"_Reads_Raw.csv"), sep=",",row.names = FALSE, quote = FALSE)
cat(red$bold("O o O o O o O o O o O o\n"))
cat(red$bold("| O o | | O o | | O o | STEP | O o | | O o | | O o |\n"))
cat(red$bold("| | O | | | | O | | | | O | | 1 | | O | | | | O | | | | O | |\n"))
cat(red$bold("| o O | | o O | | o O | FINISHED | o O | | o O | | o O |\n"))
cat(red$bold("o O o O o O o O o O o O\n"))
|
6136ec08b46092cd8ef2f30b768df61d012643c4
|
4ae53c7a18aa4f789ad9f8709eaa33f46819478a
|
/ConnorCode/candidateSpecificLDA.R
|
87fd2f44bccdbf885f167ccc8c4d42e8d480a5cf
|
[] |
no_license
|
Denizhan-Yigitbas/Presidential-Candidate-Sentiment-Analysis
|
8bf2a04a59b1eac706615dca3ff4c55569b9ddd8
|
44bf9010981058dec570ef998c705704197026dc
|
refs/heads/master
| 2020-05-18T13:10:46.052390
| 2020-04-20T00:40:46
| 2020-04-20T00:40:46
| 184,428,631
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,627
|
r
|
candidateSpecificLDA.R
|
### TRASH? DO NOT USE
### STEP SIX: Topic Modeling by Candidate
amyklobuchar <- subset(tweet_words, candidate=="Amy Klobuchar")
andrewyang <- subset(tweet_words, candidate=="Andrew Yang")
berniesanders <- subset(tweet_words, candidate=="Bernie Sanders")
betoorourke <- subset(tweet_words, candidate=="Beto O'Rourke")
corybooker <- subset(tweet_words, candidate=="Cory Booker")
elizabethwarren <- subset(tweet_words, candidate=="Elizabeth Warren")
joebiden <- subset(tweet_words, candidate=="Joe Biden")
juliancastro <- subset(tweet_words, candidate=="Julian Castro")
kamalaharris <- subset(tweet_words, candidate=="Kamala Harris")
kirstengillibrand <- subset(tweet_words, candidate=="Kirsten Gillibrand")
petebuttigieg <- subset(tweet_words, candidate=="Pete Buttigieg")
tulsigabbard <- subset(tweet_words, candidate=="Tulsi Gabbard")
klobuchar_lda <- amyklobuchar %>%
count(id, word, sort = TRUE) %>%
bind_tf_idf(word, id, n)
klobuchar_lda
klobuchar_relative_freq <- klobuchar_lda %>%
ungroup %>%
mutate(word=reorder(word,n)) %>%
ggplot(aes(x=word, y=n)) +
geom_col(show.legend = FALSE)
klobuchar_relative_freq
klobuchar_dfm <- klobuchar_lda %>%
cast_dfm(candidate, word, n)
# topic modeling
klobuchar_topic_model <- stm(klobuchar_dfm, K = 6, init.type = "Spectral")
summary(klobuchar_topic_model)
klobuchar_tidy_tm <- tidy(klobuchar_topic_model)
klobuchar_tidy_tm %>%
group_by(topic) %>%
top_n(10) %>%
ungroup %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(x=term, y=beta, fill=topic)) +
geom_col(show.legend = FALSE) +
facet_wrap(~topic, scales="free") +
coord_flip()
|
3a8e9ef951cb223e06299fe9bdb6314e6fc2ce14
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/stringi/examples/stri_datetime_fields.Rd.R
|
a254d8a1870fab5739a4ce449060debafeb27da5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 400
|
r
|
stri_datetime_fields.Rd.R
|
library(stringi)
### Name: stri_datetime_fields
### Title: Get Values for Date and Time Fields
### Aliases: stri_datetime_fields
### ** Examples
stri_datetime_fields(stri_datetime_now())
stri_datetime_fields(stri_datetime_now(), locale="@calendar=hebrew")
stri_datetime_symbols(locale="@calendar=hebrew")$Month[
stri_datetime_fields(stri_datetime_now(), locale="@calendar=hebrew")$Month
]
|
f073a351963ccc56c7798c3682878359a92a3831
|
50c3b08c5958d3b8f8f4c4d32692467100fec0cb
|
/R/SolrCore-class.R
|
a1e1c3b359df0358b2b28cd46b213e062a921cdd
|
[] |
no_license
|
lawremi/rsolr
|
1355ac70c51559bfdedccdda5ea8e156071d052d
|
f8a2d25864e06556000bd25f6430827580e7c653
|
refs/heads/master
| 2022-05-19T04:01:22.765793
| 2022-05-17T23:16:29
| 2022-05-17T23:16:29
| 49,519,372
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,996
|
r
|
SolrCore-class.R
|
### =========================================================================
### SolrCore objects
### -------------------------------------------------------------------------
### Represents a Solr core; responsible for obtaining the Solr schema
### and processing Solr queries.
### We assume that the schema is relatively static and thus cache it
### during initialization. In theory though, we could retrieve it from
### the URI dynamically.
setClass("SolrCore",
representation(uri="RestUri",
schema="SolrSchema",
version="package_version"))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructor
###
SolrCore <- function(uri, ...) {
if (!is(uri, "RestUri"))
uri <- RestUri(uri, ...)
else if (length(list(...)) > 0L)
warning("arguments in '...' are ignored when uri is a RestUri")
schema <- readSchema(uri)
version <- readVersion(uri)
new("SolrCore", uri=uri, schema=schema, version=version)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Accessors
###
setMethod("name", "SolrCore", function(x) name(x@schema))
numFound <- function(x, query) {
emptyQuery <- head(query, 0L)
responseType(emptyQuery) <- "list"
ndoc(eval(emptyQuery, x))
}
setMethod("ndoc", "SolrCore", function(x, query = SolrQuery()) {
numFound <- numFound(x, query)
p <- translateBoundsParams(params(query), numFound)
min(numFound, p$rows)
})
setGeneric("schema", function(x, ...) standardGeneric("schema"))
setMethod("schema", "SolrCore", function(x, query = NULL) {
if (!is.null(query))
augment(schema(x), query)
else x@schema
})
readLuke <- function(x) {
processSolrResponse(read(x@uri$admin$luke, list(nTerms=0L, wt="json")))
}
sortFieldsBySchema <- function(x, schema) {
schemaNames <- names(fields(schema))
x[order(max.col(globMatchMatrix(schemaNames, x), ties.method="first"))]
}
retrieveFieldNames <- function(x) {
ans <- tryCatch(names(readLuke(x)$fields),
error = function(e) {
warning("Luke request handler ",
"unavailable --- ",
"try 'includeStatic=TRUE'")
character()
})
internal <- grepl("^_|____", ans)
ans[!internal]
}
resolveFields <- function(x, query = NULL, ...) {
schema <- schema(x, query)
fn <- resolveFieldNames(x, query, ...)
fields(schema)[fn]
}
sortFieldNames <- function(x, schema, query) {
x <- sortFieldsBySchema(x, schema)
if (!is.null(query)) {
x <- sortFieldsByQuery(x, query)
}
x
}
resolveFieldNames <- function(x, query = NULL, includeStatic = FALSE) {
if (!is.null(query)) {
isPattern <- flIsPattern(query)
if (!any(isPattern)) {
return(names(isPattern))
}
}
ans <- retrieveFieldNames(x)
if (includeStatic) {
f <- fields(schema(x))
ans <- union(ans, names(f)[!dynamic(f) & !hidden(f)])
}
sortFieldNames(ans, schema(x), query)
}
setMethod("fieldNames", "SolrCore",
function(x, query = NULL, onlyStored = FALSE, onlyIndexed = FALSE,
includeStatic = FALSE)
{
if (is.character(query)) {
query <- subset(SolrQuery(), fields=query)
}
if (!is.null(query) && !is(query, "SolrQuery")) {
stop("if non-NULL, 'query' must be a SolrQuery")
}
if (!isTRUEorFALSE(includeStatic)) {
stop("'includeStatic' must be TRUE or FALSE")
}
if (!isTRUEorFALSE(onlyStored)) {
stop("'onlyStored' must be TRUE or FALSE")
}
if (!isTRUEorFALSE(onlyIndexed)) {
stop("'onlyIndexed' must be TRUE or FALSE")
}
if (onlyStored || onlyIndexed) {
f <- resolveFields(x, query, includeStatic)
keep <-
(if (onlyStored) stored(f) else TRUE) &
(if (onlyIndexed) indexed(f) | docValues(f)
else TRUE)
names(f)[keep]
} else {
resolveFieldNames(x, query, includeStatic)
}
})
setGeneric("version", function(x) standardGeneric("version"))
setMethod("version", "SolrCore", function(x) {
x@version
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### CREATE/UPDATE/DELETE
###
setGeneric("updateParams", function(x) standardGeneric("updateParams"))
setMethod("updateParams", "ANY", function(x) {
list()
})
setMethod("updateParams", "data.frame", function(x) {
splitFields <- names(x)[vapply(x, is.list, logical(1L))]
splitParams <- rep("true", length(splitFields))
names(splitParams) <-
rep(paste0("f.", splitFields, ".split"), length(splitFields))
c(list(map="NA:"), splitParams)
})
setMethod("update", "SolrCore", function(object, value, commit=TRUE,
atomic=FALSE, ...)
{
if (!isTRUEorFALSE(commit)) {
stop("'commit' must be TRUE or FALSE")
}
if (!isTRUEorFALSE(atomic)) {
stop("'atomic' must be TRUE or FALSE")
}
if (is(value, "AsIs")) {
class(value) <- setdiff(class(value), "AsIs")
} else {
value <- toUpdate(value, schema=schema(object), atomic=atomic)
}
media <- as(value, "Media")
query.params <- updateParams(value)
if (commit) {
query.params <- c(query.params, commitQueryParams(...))
}
create(object@uri$update, media, query.params)
invisible(object)
})
setGeneric("toUpdate", function(x, ...) standardGeneric("toUpdate"))
setMethod("toUpdate", "ANY", function(x, schema, atomic=FALSE, ...) {
x <- toSolr(x, schema, ...)
if (atomic) {
if (is.null(uniqueKey(schema))) {
stop("modifying documents requires a 'uniqueKey' in the schema")
}
x <- unname(lapply(as(x, "DocList"), function(xi) {
uk <- names(xi) == uniqueKey(schema)
xi[is.na(xi)] <- list(NULL)
xi[!uk] <- lapply(xi[!uk], function(f) list(set = f))
xi
}))
}
x
})
dropNAs <- function(x) {
if (is.list(x)) {
x[is.na(x)] <- NULL
}
x
}
setMethod("toUpdate", "list", function(x, schema, atomic=FALSE, ...) {
if (is.data.frame(x)) {
return(callNextMethod())
}
delete <- vapply(x, is.null, logical(1))
if (!atomic && any(delete)) {
x[!delete] <- lapply(toSolr(x[!delete], schema, ...),
function(xi) list(doc=xi))
x[delete] <- lapply(names(x)[delete], function(nm) list(id=nm))
setNames(x, ifelse(delete, "delete", "add"))
} else {
if (!atomic) {
x <- lapply(x, dropNAs)
}
callNextMethod()
}
})
isSimpleQuery <- function(x) {
length(params(x)$fq) <= 1L &&
{
params(x)$fq <- NULL
identical(x, SolrQuery())
}
}
setMethod("delete", "SolrCore", function(x, which = SolrQuery(), ...) {
if (!isSimpleQuery(which)) {
warning("delete() cannot handle 'which' more complex than ",
"'subset(SolrQuery(), [expr])'")
}
which <- translate(which, core=x)
query <- params(which)$fq
if (is.null(query)) {
query <- params(which)$q
}
invisible(update(x, I(list(delete=list(query=as.character(query)))), ...))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### READ
###
setMethod("docs", "SolrCore", function(x, ...) read(x, ...))
setMethod("read", "SolrCore",
function(x, query=SolrQuery(), as=c("list", "data.frame"))
{
if (!is(query, "SolrQuery")) {
stop("'query' must be a SolrQuery")
}
as <- match.arg(as)
responseType(query) <- if (grouped(query)) "list" else as
as(docs(eval(query, x)), as, strict=FALSE)
})
readSchemaFromREST <- function(uri) {
parseSchemaFromREST(processSolrResponse(read(uri$schema))$schema)
}
readSchemaXMLFile <- function(uri) {
parseSchemaXML(read(uri$admin$file, file="schema.xml"))
}
readSchema <- function(uri) {
tryCatch(readSchemaXMLFile(uri), error = function(e) {
tryCatch(readSchemaFromREST(uri), error = function(e) {
stop("Failed to retrieve schema.\n",
"rsolr requires schema via REST or as XML from admin module.\n",
paste("Error from REST:", e))
})
})
}
readSystem <- function(uri) {
processSolrResponse(read(uri$admin$system, wt="json"))
}
readVersion <- function(uri) {
as.package_version(tryCatch({
readSystem(uri)$lucene$"solr-spec-version"
}, error = function(e) {
warning("Failed to retrieve version, assuming 5.0")
"5.0"
}))
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Summarizing
###
setGeneric("facets", function(x, ...) standardGeneric("facets"))
setMethod("facets", "SolrCore", function(x, by, ...) {
by <- head(by, 0)
facets(eval(by, x), ...)
})
setGeneric("groupings", function(x, ...) standardGeneric("groupings"))
setMethod("groupings", "SolrCore", function(x, by, ...) {
groupings(eval(by, x), ...)
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Query Evaluation
###
processSolrResponse <- function(response, type = "json") {
## some SOLR instances return text/plain for responses...
## this also happens for errors
if (is.character(response)) {
mediaType <- switch(type,
json="application/json",
csv="text/csv",
xml="application/xml")
media <- new(mediaType, response)
response <- as(media, mediaTarget(media))
}
response
}
## Unfortunately Solr does not describe errors with CSV output (!)
## So we reissue the query with JSON when one occurs
SolrErrorHandler <- function(core, query) {
function(e) {
if (is(e, "Not_Found")) {
stop("Solr core '", name(core), "' does not exist", call.=FALSE)
}
if (!is(e, "Bad_Request")) {
stop(e)
}
if (params(query)$wt == "json") {
response <- processSolrResponse(attr(e, "body"), params(query)$wt)
stop("[", response$error$code, "] ", response$error$msg,
if (!is.null(response$error$trace))
paste("\nJava stacktrace:\n", response$error$trace),
call.=FALSE)
} else {
params(query)$wt <- "json"
eval(query, core)
}
}
}
setMethod("eval", c("SolrQuery", "SolrCore"),
function (expr, envir, enclos)
{
if (is.null(responseType(expr)))
responseType(expr) <- "list"
expr <- translate(expr, core=envir)
query <- as.character(expr)
response <- tryCatch(read(envir@uri$select, query),
error = SolrErrorHandler(envir, expr))
response <- processSolrResponse(response, params(expr)$wt)
convertSolrQueryResponse(response, envir, expr)
})
setGeneric("ngroup", function(x, ...) standardGeneric("ngroup"))
setMethod("ngroup", "SolrCore", function(x, query) {
params(query)$group.limit <- 0L
ngroup(eval(query, x))
})
resultLength <- function(x, query) {
ans <- if (grouped(query))
ngroup(x, query)
else numFound(x, query)
if (length(ans) > 1L) {
warning("ambiguous result length (multiple groupings)")
}
ans
}
setGeneric("convertSolrQueryResponse",
function(x, core, query) standardGeneric("convertSolrQueryResponse"),
signature=c("x"))
convertSolrQueryResponse_default <- function(x, core, query) {
fromSolr(x, schema(core), query)
}
setMethod("convertSolrQueryResponse", "ANY", convertSolrQueryResponse_default)
setMethod("convertSolrQueryResponse", "data.frame",
convertSolrQueryResponse_default)
setMethod("convertSolrQueryResponse", "list", function(x, core, query) {
ListSolrResult(x, core, query)
})
setMethod("eval", c("TranslationRequest", "SolrCore"),
function (expr, envir, enclos) {
if (!missing(enclos)) {
warning("'enclos' is ignored")
}
translate(expr@src, expr@target, envir)
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Other commands
###
setGeneric("commit", function(x, ...) standardGeneric("commit"))
commitQueryParams <- function(...) {
def <- as.function(c(formals(commit_SolrCore)[-1], list(NULL)))
call.args <- as.list(match.call(def))[-1]
args <- formals(def)
args[names(call.args)] <- call.args
args <- do.call(c, c(commit="true", lapply(args, eval, args)))
tolower(args)
}
commit_SolrCore <- function(x, waitSearcher=TRUE, softCommit=FALSE,
expungeDeletes=FALSE,
optimize=TRUE, maxSegments=if (optimize) 1L)
{
args <- tail(as.list(match.call()), -2)
resp <- read(x@uri$update, do.call(commitQueryParams, args), wt="json")
invisible(as.integer(processSolrResponse(resp)$responseHeader$status))
}
setMethod("commit", "SolrCore", commit_SolrCore)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Show
###
setMethod("show", "SolrCore", function(object) {
cat("SolrCore object\n")
cat("name:", name(object), "\n")
cat("ndoc:", ndoc(object), "\n")
cat("schema:", length(fields(schema(object))), "fields\n")
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Utilities
###
setMethod("purgeCache", "SolrCore", function(x) {
purgeCache(x@uri)
invisible(x)
})
|
bf00ae6254838b88539ee3ebb3714b327ad50f1f
|
2d50491917be033214010ed349f0c6ab5d916271
|
/R/missCompare-package.r
|
9d580d7c3a08079c4da8fcc3894463eb30885c9f
|
[
"MIT"
] |
permissive
|
ZipZaap/missCompare
|
82aa38b388c7c2fd2dbfa9be55bc064fd33bd6b1
|
20ceaf2c16f5e62ef195760b77bb1a5d67e1a35b
|
refs/heads/master
| 2022-11-23T01:44:14.335078
| 2020-05-13T15:01:52
| 2020-05-13T15:01:52
| 282,241,971
| 0
| 0
|
NOASSERTION
| 2020-07-24T14:34:32
| 2020-07-24T14:34:31
| null |
UTF-8
|
R
| false
| false
| 1,628
|
r
|
missCompare-package.r
|
#' missCompare: Missing Data Imputation Comparison Framework
#'
#' The \strong{missCompare} package offers a convenient pipeline to test and compare various missing data
#' imputation algorithms on simulated data. The central assumption behind missCompare is that structurally
#' different datasets (e.g. larger datasets with a large number of correlated variables vs. smaller datasets
#' with non correlated variables and other combinations) will benefit differently from different missing data
#' imputation algorithms. \strong{missCompare} takes measurements of your dataset and sets up a sandbox to try
#' a curated list of standard and sophisticated missing data imputation algorithms and compares them assuming
#' custom set missingness patterns. \strong{missCompare} will give you a comparative analysis of missing data
#' imputation algorithms, offer a report with the best performing algorithms assuming various missing data patterns
#' and publication ready visualizations, impute your dataset for you, assess imputation performance using a validation
#' framework and help you better understand missing data in your dataset.
#'
#' @details
#' \tabular{ll}{
#' Package: \tab missCompare\cr
#' Depends: \tab R (>= 3.5.0)\cr
#' Type: \tab Package\cr
#' Version: \tab 1.0.1\cr
#' Date: \tab 2019-01-30\cr
#' License: \tab MIT\cr
#' LazyLoad: \tab Yes
#' }
#'
#' @author
#' \itemize{
#' \item Tibor V. Varga \email{tirgit@@hotmail.com}
#' \item David Westergaard \email{david.westergaard@@cpr.ku.dk}
#' }
#'
#' @seealso
#' \url{https://github.com/Tirgit/missCompare}
#'
#' @name missCompare
#' @docType package
NULL
|
55ac4bbdc86b481fdc8ce7bdb79acaa65ac7080e
|
454cd86f0669593707012f7038448e80fe940dac
|
/R/quickenrichmentscore.R
|
2dea3ff74f1bc3273246fc0a302fd4500e001b28
|
[] |
no_license
|
zhangxiner2/GeneExpressionSignature
|
a51148457c5d53f7d11f686cc495cee2e844414d
|
27c86a2cc5cbbf7a5ff41393b108b1442bfddc00
|
refs/heads/master
| 2023-03-19T15:33:35.174178
| 2020-12-17T05:19:59
| 2020-12-17T05:38:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,197
|
r
|
quickenrichmentscore.R
|
quickenrichmentscore <- function(S, S1, List) {
Or_list <- List
# List=sort(List);
# List=as.matrix(List)
Rank <- order(Or_list)
Rank <- as.matrix(Rank)
N <- length(Rank)
Nh <- length(S)
tmp <- matrix(0, N)
for (i in seq_len(Nh)) {
tmp[Or_list[S[i]]] <- 1
}
hitCases <- cumsum(tmp)
missCases <- cumsum(1 - tmp)
NR <- length(S)
Phit <- hitCases / NR
Pmiss <- missCases / (N - Nh)
abshm <- abs(Phit - Pmiss)
abshm <- as.matrix(abshm)
# m=apply(abshm,2,max);
t <- apply(abshm, 2, which.max)
ESUP <- Phit[t] - Pmiss[t]
RS <- Phit - Pmiss
Or_list2 <- Or_list
Rank <- order(Or_list2)
Rank <- as.matrix(Rank)
N <- length(Rank)
Nh <- length(S1)
tmp <- matrix(0, N)
for (i in seq_len(Nh)) {
tmp[Or_list[S1[i]]] <- 1
}
hitCases <- cumsum(tmp)
missCases <- cumsum(1 - tmp)
NR <- length(S1)
Phit <- hitCases / NR
Pmiss <- missCases / (N - Nh)
abshm <- abs(Phit - Pmiss)
abshm <- as.matrix(abshm)
# m=apply(abshm,2,max);
t <- apply(abshm, 2, which.max)
ESDOWN <- Phit[t] - Pmiss[t]
RS <- Phit - Pmiss
ES <- (ESUP - ESDOWN) / 2
}
|
2b92d530e4d86a94ecea303919b5b42eb4f5951d
|
3314a0e62f49ef830d604c14f5a3941701405860
|
/R CODE/Crime data.R
|
b8418329e5f27b696026d891bfbf4ff23ab7fed3
|
[] |
no_license
|
SaradaPrasadp/Hierarchical-Clustering
|
8a06c5e07cf355b61dc74ad8546b49e8af641c4a
|
9f5795eea25e180bae4a19cb13ff5425ae16105e
|
refs/heads/master
| 2023-06-16T06:15:36.925050
| 2021-07-06T15:05:18
| 2021-07-06T15:05:18
| 383,482,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,088
|
r
|
Crime data.R
|
#Crime_Data
cd <- read.csv(file.choose())
summary(cd) # Summary of data set
#we know about skewness outliers all things from here.
sum(is.na(cd)) #No missing values are there
#First we have to see is there any outlier or not.
# we dont have outliers present here...
#we need to standarize the value to get in a same scale.
norm <- function(x){
return ((x-min(x))/(max(x)-min(x)))
}
std_cd <- scale(cd[ , 2:5] )
summary(std_cd)
# Distance matrix
dist_cd <- dist(norm_cd, method = "euclidean")
fit <- hclust(dist_cd, method = "complete")
# Display dendrogram
plot(fit)
plot(fit, hang = -2)
groups <- cutree(fit, k = 3) # Cut tree into 3 clusters
#we'll cluster as per crime rate line maximum , minimum and moderate.
rect.hclust(fit, k = 3, border = "red")
Crime_category <- as.matrix(groups)
final <- data.frame(Crime_category, cd)
aggregate(cd, by = list(final$Crime_category), FUN = mean)
library(readr)
write_csv(final, "Crime data clust.csv")
getwd()
|
b5118e38d63736a2b084f1516bdd2cab9e87f2b0
|
f6faf7e2574bcf84319fc7c7e0d88ee8f9db2231
|
/R/subplex.R
|
9c9ca9f9c0864e61b83332968d039e3a7ef62756
|
[] |
no_license
|
Bhanditz/subplex
|
1c624b630f53ae40f35c2c4191fe57ba99899c30
|
a5e3a231148f3fc88453efc968f86df529063e71
|
refs/heads/master
| 2020-04-19T09:25:18.573965
| 2016-11-19T09:16:14
| 2016-11-19T09:16:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 563
|
r
|
subplex.R
|
subplex <- function (par, fn, control = list(), hessian = FALSE, ...) {
## set default control parameters
con <- list(
reltol = .Machine$double.eps,
maxit = 10000,
parscale = rep.int(1,length(par))
)
namc <- names(control)[names(control)%in%names(con)]
con[namc] <- control[namc]
.Call(
call_subplex,
par,
match.fun(fn),
tol=con$reltol,
maxnfe=con$maxit,
scale=con$parscale,
hessian,
environment(fn),
pairlist(...)
)
}
|
234729b576f0eb0c61d632beb4d3d8aefaa3cb14
|
69f9674002b1e12fed760f5f287c1b15971c46f5
|
/R/validate_textfile.R
|
2b9237fa4d75ce39cc50677b725ed75fad7b51d1
|
[
"MIT"
] |
permissive
|
SimonGoring/pb210dating
|
8cb4f3840a2893d7631975c7eddc8d4b5dd411e7
|
2ae5013ec10fac1628787a327a078dec56a5a5d1
|
refs/heads/master
| 2021-09-05T06:26:36.874643
| 2018-01-24T19:55:25
| 2018-01-24T19:55:25
| 117,278,200
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 191
|
r
|
validate_textfile.R
|
#' @title Validate user supplied metadata text
#' @description Users are expected to provide a set of parameters.
#'
validate_textfile <- function(filename) {
# Check the required fields
}
|
0a4c69497611eba286a261815e4332a676ab7430
|
12dce397df2b04249ed08682ddbc63371d1e7469
|
/man/readMulti.rooted.Rd
|
d660b67b418852ebe11d7af30cdd25d18474399d
|
[] |
no_license
|
erenada/Rboretum
|
7e682f14f3c6d778bc4d63d453aed30961ad598c
|
4f58b563a51d55e29ce95797f48ae9712e423c79
|
refs/heads/master
| 2020-09-19T07:52:29.706537
| 2019-11-25T18:43:12
| 2019-11-25T18:43:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 820
|
rd
|
readMulti.rooted.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readMulti.rooted.R
\name{readMulti.rooted}
\alias{readMulti.rooted}
\title{Rboretum Multiphylo Rooted Tree Reader}
\usage{
readMulti.rooted(tree_paths, root_taxa)
}
\arguments{
\item{tree_paths}{Vector of paths to tree files that can be read by ape::read.tree() or ape::read.nexus()}
\item{root_taxa}{Character vector containing outgroup species IDs (Must be in all trees and always monophyletic)}
}
\value{
A multiPhylo object, with each tree rooted at specified taxa
}
\description{
This function is an ape wrapper, and returns a multiPhylo object, with each tree rooted as specified by the user.
}
\examples{
myTrees <- c('/path/to/tree1','/path/to/tree2')
myRootTaxa <- c('Spp1','Spp2')
myMulti <- readMulti.rooted(myTrees,myRootTaxa)
}
|
fac6e7f76a8c2ad7dd753819f986d3fab1ef4006
|
5355ce6341489f05dc5894a70cf5cff1f951a194
|
/man/dbGetCurrentuserEM.Rd
|
d7c8894fa2e46ac365865d99e9ce8c3aeabc90c6
|
[] |
no_license
|
AndreMikulec/econModel
|
5032565f1722275425f75b55811493d45bf87f8c
|
22b8507838116d3e33b6e40cf891988ad104ac7b
|
refs/heads/master
| 2023-06-03T19:23:00.544927
| 2021-06-26T07:38:20
| 2021-06-26T07:38:20
| 303,683,123
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 837
|
rd
|
dbGetCurrentuserEM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AmerAssocIndividInvestorsAAII.R
\name{dbGetCurrentUserEM}
\alias{dbGetCurrentUserEM}
\title{PostgreSQL Current User}
\usage{
dbGetCurrentUserEM(connName, env, exec = TRUE, display = TRUE)
}
\arguments{
\item{connName}{String. Default is "connEM". Contains the name of the variable that contains the name of the "connection" in the environment "env".}
\item{env}{Environment. Default is the .Global environment. This is the environment to return the connection object "connEM".}
\item{exec}{Logical. Whether to execute the query (defaults to \code{TRUE}).}
\item{display}{Logical. Whether to display the query (defaults to \code{TRUE}).}
}
\value{
Current user
}
\description{
Get PostgreSQL CURRENT_USER.
}
\examples{
\dontrun{
dbGetCurrentUserEM()
}
}
|
facace3928645e85545986d199d3b12c44e74a24
|
1287dc617038cb92d7b665b4b4ba91d74e83837f
|
/man/mean.Rd
|
fcf8407fb186cdb84be2efbcbab008953c163124
|
[] |
no_license
|
MiGraber/myTestPackage
|
037d2f41ef2e56a917be0b1a6ec0f86e9adfdce5
|
3c8e4bf6c132d52187e93176d5a679e34524184a
|
refs/heads/master
| 2021-08-14T22:01:44.096533
| 2017-11-16T22:10:49
| 2017-11-16T22:10:49
| 110,173,887
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 407
|
rd
|
mean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{mean}
\alias{mean}
\title{Calculates the arithmetic mean}
\usage{
mean(x, digits = 3L)
}
\arguments{
\item{x}{a numeric vector}
\item{digits}{integer indicating the number of decimal places}
}
\value{
an integer
}
\description{
Calculates the arithmetic mean and round on the 3rd digit
}
\author{
Michael Graber
}
|
0b3bf58de4b722f64ac20352f7b0ba28aef91ed0
|
8b6d926eedbc58195f9158ea44097fd3b7f8ad12
|
/tests/testthat/test-misc.R
|
c72c4358bf3c0f0e7518231ed61b7ad24a91c4d2
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
mattwarkentin/R6methods
|
708fc4d3915c6fe4ccf3fb10defb662753ebdb17
|
65a07c850bd5414d7846425987f9b0c56676f1f6
|
refs/heads/main
| 2023-02-04T16:50:18.233728
| 2020-12-21T21:01:18
| 2020-12-21T21:01:18
| 320,459,004
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 577
|
r
|
test-misc.R
|
test_that("length() works", {
X <- R6::R6Class(
public = list(
.__length__ = function() {
nrow(mtcars)
}
)
)
x <- X$new()
expect_equal(length(x), 32)
})
test_that("names() works", {
X <- R6::R6Class(
public = list(
.__names__ = function() {
names(mtcars)
}
)
)
x <- X$new()
expect_equal(names(x), names(mtcars))
})
test_that("str() works", {
X <- R6::R6Class(
public = list(
.__str__ = function() {
str(mtcars)
}
)
)
x <- X$new()
expect_equal(str(x), str(mtcars))
})
|
3199c59263a60955ff1355de13b8481d4888374c
|
ef977c68e12d7ccc216b17e0ffc37c6e8bc9d057
|
/dep.happy/perm.free.dep.happy/perm.free.dep.happy/readargs.R
|
791274ffba5d8f0092645d2d4e8183c72e61dadf
|
[] |
no_license
|
jflournoy/sea_np_models
|
52e0df1f14c650dcf060bfc4c86d0a7014c2fc35
|
51214f4977ed0a7b4d6adcbd562befe51e150801
|
refs/heads/main
| 2023-04-18T03:57:56.694352
| 2021-05-05T19:47:36
| 2021-05-05T19:47:36
| 189,055,017
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 765
|
r
|
readargs.R
|
cmdargs <- c("-m","/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/dep.happy/perm.free.dep.happy/mask.nii.gz",
"--set1", "/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/datafiles/setfilenames_happyGTcalm.txt","--setlabels1", "/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/datafiles/depanxcov-midpoint5.csv",
"--model","/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/dep.happy/perm.free.dep.happy/permute_free_model.R",
"--output","perm.free.dep.happy/perm.free.dep.happy.",
debug.Rdata
"--slurmN", "60"
)
|
5bb8a4e19b61301c07f3d7a8d8e603a802fa7b3e
|
5038954117333b59dda21d2d12a05fc6fc82061b
|
/man/write_matrix.Rd
|
0b5f7a407892f85d3a15e5c6f62fe31daeded062
|
[] |
no_license
|
fossbert/binilib
|
b4887c996d2fbbcdfb7c4f988822460a9e37e37e
|
173e5fe7e4fb16dec10e1c57fe30cd155a5b895a
|
refs/heads/master
| 2021-06-05T14:55:00.691039
| 2021-04-19T09:20:05
| 2021-04-19T09:20:05
| 157,078,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 468
|
rd
|
write_matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{write_matrix}
\alias{write_matrix}
\title{Convenience function for writing gene expression to file}
\usage{
write_matrix(eset, path)
}
\arguments{
\item{eset}{Numeric gene expression matrix with gene identifiers as row names}
\item{path}{character string indicating path and name of file}
}
\description{
Convenience function for writing gene expression to file
}
|
b35aa7aceb08feac66b534a90d8b3f40461196be
|
253f82ba019b919889d54acea1a67a259e8014aa
|
/GeoCleanR/man/HHI.Rd
|
435511660c5ef6373911b1c195254babcd1734fd
|
[
"MIT"
] |
permissive
|
Jadamso/GeoCleanR
|
98a65a0d73f56357718d3461276931cdd1bdeee1
|
3e09874e375237264bd5b05c4e2bfde53d0f9f0f
|
refs/heads/master
| 2021-01-15T11:03:33.387095
| 2020-01-08T06:25:37
| 2020-01-08T06:25:37
| 99,609,004
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 647
|
rd
|
HHI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HHI.R
\name{HHI}
\alias{HHI}
\title{Calculate HHI for each raster cell}
\usage{
HHI(
rast,
wind,
FUN = hhi_in,
mask = NA,
writedir = NA,
HHIname = paste0("HHIrast_", wind[1], "_", wind[2])
)
}
\arguments{
\item{wind}{size of local windows to consider}
\item{FUN}{what to calculate}
\item{mask}{mask the values afterwards}
\item{writedir}{write the raster to hard disk}
\item{HHIname}{what to name the raster}
\item{Crast}{raster from which to perform calculations}
}
\value{
a raster
}
\description{
Calculate HHI for each raster cell
}
\examples{
}
|
5fc1ff2aebf3ab53233714b3c20c4061201f0aa0
|
c8ce461387ababba10c5116dd047325d873a17a4
|
/diversification_analyses/PGLS/PGLS_diversity_and_rangeSize.R
|
d5f8d33e48e173c08aab199af351be9b6a7445e0
|
[] |
no_license
|
elsemikk/tyranni
|
8ff58e6c39e2f402766d13fda413b3504023ec78
|
859f162e55124c72603f25b21172ac0b63188203
|
refs/heads/master
| 2023-05-01T05:57:18.018181
| 2021-01-29T16:15:03
| 2021-01-29T16:15:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,333
|
r
|
PGLS_diversity_and_rangeSize.R
|
setwd("tyranni/diversification_analyses/PGLS")
getwd()
library(ape)
library(caper)
library(phytools)
library(MEDUSA)
library(BAMMtools)
library(scales)
library(phylotate)
# Get ES
es.table <- read.table("../SummaryStats/es/text_files/es_AOSHM.txt")
es <- es.table$x
names(es) <- rownames(es.table)
# Get tree/diversity data
tree <- read.tree('tyranni/species_trees/final_timetrees/T400F_AOS_HowardMoore.tre')
tree <- force.ultrametric(tree)
#overlap.table <- read.table("~/Documents/research/Tyranni/v2/div_analyses/range_maps/Range_overlap_data_allbirds.txt", sep=" ", header=TRUE, row.names=1)
#overlap.table <- read.table("~/Documents/research/Tyranni/v2/div_analyses/range_maps/Range_overlap_data_passerines.txt", sep=" ", header=TRUE, row.names=1)
overlap.table <- read.table("tyranni/other_data/Range_overlap_data.txt", sep=" ", header=TRUE, row.names=1)
overlap <- rowSums(overlap.table, na.rm=TRUE)
# Need to convert overlap names from species names to tipnamecodes
name.map.file <- read.csv('../../Species_name_map_uids.csv')
name.map <- name.map.file$tipnamecodes
names(name.map) <- name.map.file$aos.howardmoore.species
name.map.used <- name.map[name.map %in% tree$tip.label]
name.map.used.unique <- name.map.used[!duplicated(name.map.used)]
length(name.map.used.unique)
newnames <- as.character(name.map.used.unique[names(overlap)])
names(overlap) <- newnames
# Diversity vs. speciation rate
region.data <- read.table("tyranni/other_data/Range_data_AOSHM_Olson_broad.txt")
areas <- cbind(region.data[,3:11])
rownames(areas) <- as.character(region.data$V1)
region.names <- c("WI", "AM", "AN", "NA", "OW", "PA", "DT", "AF", "CA")
colnames(areas) <- region.names
NewWorld.only <- areas[!colnames(areas)[apply(areas,1,which.max)] == "OW",]
tree <- drop.tip(tree, setdiff(tree$tip.label, rownames(NewWorld.only))) # Drop Old World
overlap <- overlap[names(overlap) %in% tree$tip.label]
overlap <- overlap[tree$tip.label]
overlap <- overlap[!is.na(overlap)]
subtree <- drop.tip(tree, setdiff(tree$tip.label, names(overlap)))
overlap <- overlap[subtree$tip.label]
# Range size
range.data <- read.table("tyranni/other_data/Range_data_AOSHM.txt", header=1)
areas <- range.data$areas
names(areas) <- range.data$V1
# ES vs richness + range area
dframe <- data.frame(subtree$tip.label, es[subtree$tip.label], overlap, areas[subtree$tip.label])
colnames(dframe) <- c("Species", "Speciation", "Species.Richness", "Range.Area")
data <- comparative.data(data=dframe, phy=subtree, names.col="Species")
full <- pgls(Speciation ~ Species.Richness + Range.Area, data=data)
sum <- summary(full)
sum
full.log <- pgls(Speciation ~ Species.Richness + log(Range.Area), data=data)
sum <- summary(full.log)
sum
# Model comparison (leave-one-out approach)
drop.area <- pgls(Speciation ~ Species.Richness, data=data)
drop.richness <- pgls(Speciation ~ Range.Area, data=data)
drop.area$aicc-full$aicc
drop.richness$aicc-full$aicc
# Akaike weights
full.wgt <- exp(-0.5 * full$aicc)
drop.area.wgt <- exp(-0.5 * drop.area$aicc)
drop.richness.wgt <- exp(-0.5 * drop.richness$aicc)
full.wgt/(full.wgt+drop.area.wgt+drop.richness.wgt)
drop.area.wgt/(full.wgt+drop.area.wgt+drop.richness.wgt)
drop.richness.wgt/(full.wgt+drop.area.wgt+drop.richness.wgt)
#
richness.area <- pgls(Species.Richness ~ Range.Area, data=data)
summary(richness.area)
|
0da3103d141c2fc114853d6b342f30ef025dfd5c
|
a27bc5f0552cb30d0de320785017c3557c135762
|
/R/utils.r
|
f85c23ebac7f2c80c7709033233929102f554939
|
[
"MIT"
] |
permissive
|
dimagor/pkgdown
|
bb39f90669d8c498f937014bdce726e66688283e
|
61c3999702966dd75267b98ce905df22521ebd11
|
refs/heads/master
| 2020-08-17T05:01:52.520310
| 2019-10-10T12:57:20
| 2019-10-10T12:57:20
| 215,612,007
| 1
| 0
|
NOASSERTION
| 2019-10-16T18:01:31
| 2019-10-16T18:01:31
| null |
UTF-8
|
R
| false
| false
| 2,973
|
r
|
utils.r
|
set_contains <- function(haystack, needles) {
all(needles %in% haystack)
}
split_at_linebreaks <- function(text) {
if (length(text) < 1)
return(character())
strsplit(text, "\\n\\s*\\n")[[1]]
}
up_path <- function(depth) {
paste(rep.int("../", depth), collapse = "")
}
dir_depth <- function(x) {
x %>%
strsplit("") %>%
purrr::map_int(function(x) sum(x == "/"))
}
invert_index <- function(x) {
stopifnot(is.list(x))
if (length(x) == 0)
return(list())
key <- rep(names(x), purrr::map_int(x, length))
val <- unlist(x, use.names = FALSE)
split(key, val)
}
rstudio_save_all <- function() {
if (rstudioapi::hasFun("documentSaveAll")) {
rstudioapi::documentSaveAll()
}
}
is_syntactic <- function(x) x == make.names(x)
str_trim <- function(x) gsub("^\\s+|\\s+$", "", x)
## For functions, we can just take their environment.
find_reexport_source <- function(obj, ns, topic) {
if (is.function(obj)) {
ns_env_name(get_env(obj))
} else {
find_reexport_source_from_imports(ns, topic)
}
}
## For other objects, we need to check the import env of the package,
## to see where 'topic' is coming from. The import env has redundant
## information. It seems that we just need to find a named list
## entry that contains `topic`. We take the last match, in case imports
## have name clashes.
find_reexport_source_from_imports <- function(ns, topic) {
imp <- getNamespaceImports(ns)
imp <- imp[names(imp) != ""]
wpkgs <- purrr::map_lgl(imp, `%in%`, x = topic)
if (!any(wpkgs)) stop("Cannot find reexport source for `", topic, "`")
pkgs <- names(wpkgs)[wpkgs]
pkgs[[length(pkgs)]]
}
# devtools metadata -------------------------------------------------------
devtools_loaded <- function(x) {
if (!x %in% loadedNamespaces()) {
return(FALSE)
}
ns <- .getNamespace(x)
env_has(ns, ".__DEVTOOLS__")
}
devtools_meta <- function(x) {
ns <- .getNamespace(x)
ns[[".__DEVTOOLS__"]]
}
# CLI ---------------------------------------------------------------------
dst_path <- function(...) {
crayon::blue(encodeString(path(...), quote = "'"))
}
src_path <- function(...) {
crayon::green(encodeString(path(...), quote = "'"))
}
cat_line <- function(...) {
cat(paste0(..., "\n"), sep = "")
}
rule <- function(left, ...) {
cli::cat_rule(left = crayon::bold(left), ...)
}
yaml_list <- function(...) print_yaml(list(...))
print_yaml <- function(x) {
structure(x, class = "print_yaml")
}
#' @export
print.print_yaml <- function(x, ...) {
cat(yaml::as.yaml(x), "\n", sep = "")
}
skip_if_no_pandoc <- function() {
testthat::skip_if_not(rmarkdown::pandoc_available("1.12.3"))
}
has_internet <- function() {
return(getOption("pkgdown.internet", default = TRUE))
}
with_dir <- function(new, code) {
old <- setwd(dir = new)
on.exit(setwd(old))
force(code)
}
# remove '' quoting
# e.g. 'title' becomes title.s
cran_unquote <- function(string) {
gsub("\\'(.*?)\\'", "\\1", string)
}
|
bbe6aa2d88a59a1d0d0413babd0415c398be1372
|
e189d2945876e7b372d3081f4c3b4195cf443982
|
/man/nn_loss.Rd
|
bc1b4e655fbce98a70b9ca643567e7ecc58a4b10
|
[
"Apache-2.0"
] |
permissive
|
Cdk29/fastai
|
1f7a50662ed6204846975395927fce750ff65198
|
974677ad9d63fd4fa642a62583a5ae8b1610947b
|
refs/heads/master
| 2023-04-14T09:00:08.682659
| 2021-04-30T12:18:58
| 2021-04-30T12:18:58
| 324,944,638
| 0
| 1
|
Apache-2.0
| 2021-04-21T08:59:47
| 2020-12-28T07:38:23
| null |
UTF-8
|
R
| false
| true
| 344
|
rd
|
nn_loss.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/experimental.R
\name{nn_loss}
\alias{nn_loss}
\title{Fastai custom loss}
\usage{
nn_loss(loss_fn, name = "Custom_Loss")
}
\arguments{
\item{loss_fn}{pass custom model function}
\item{name}{set name for nn_module}
}
\value{
None
}
\description{
Fastai custom loss
}
|
36c9e2b5ff13e434d45b3e08f8b9107e98568444
|
c4d6a4797a1b0be9b110d6af769e2844179551f6
|
/MDplot/man/rmsd_average.Rd
|
13094a3ecb824d45c4583f525b43917fd6bf1d24
|
[] |
no_license
|
MDplot/MDplot
|
52c1e1ba759ef03fc88e744607775741475507dd
|
3f4324a7e2fc884f8af75736ab3fdacf8052deea
|
refs/heads/master
| 2022-05-24T15:03:50.615525
| 2022-03-23T15:47:32
| 2022-03-23T15:47:32
| 51,455,487
| 19
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,404
|
rd
|
rmsd_average.Rd
|
% (C) 2017 Christian Margreitter
% last update: 2017-02-20
\name{rmsd_average}
\alias{rmsd_average}
\title{Root-mean-square-deviation (RMSD) average plot}
\description{Combines several RMSD index-value pairs and computes and plots the mean value and the spread (the respective minimum and maximum values) at every timepoint. This function is particularly useful, when multiple identical simulation runs (replicates) need to be analysed since it shows a 'corridor' which allows interpretation e.g. of the overall stability.}
\usage{
rmsd_average( rmsdInput,
levelFactor = NA,
snapshotsPerTimeInt = 1000,
timeUnit = "ns",
rmsdUnit = "nm",
maxYAxis = NA,
barePlot = FALSE,
... )
}
\arguments{
\item{rmsdInput}{List of input tables (which are provided by function \code{\link{load_rmsd}()}).}
\item{levelFactor}{If there are many datapoints, this parameter may be used to use only the \code{levelFactor}th datapoints to obtain a nicer graph.}
\item{snapshotsPerTimeInt}{Number, specifying how many snapshots are comprising one \code{timeUnit}.}
\item{timeUnit}{Specifies, which unit the x-axis is given in.}
\item{rmsdUnit}{Specifies, which unit the y-axis is given in.}
\item{maxYAxis}{Can be used to manually set the y-axis of the plot.}
\item{barePlot}{Boolean, indicating whether the plot is to be made without any additional information.}
\item{...}{Additional arguments (ellipsis).}
}
\value{
Returns a \code{n}x4-matrix, with the rows representing different snapshots and the columns the respective values as follows:
\itemize{\item{snapshot} Index of the snapshot.
\item{minimum} The minimum RMSD value over all input sources at a given time.
\item{mean} The mean RMSD value over all input sources at a given time.
\item{maximum} The maximum RMSD value over all input sources at a given time.
}}
\examples{
# GROMOS (see load_rmsd() for other input possibilities)
rmsd_average( list( load_rmsd( system.file( "extdata/rmsd_example_1.txt.gz",
package = "MDplot" ) ),
load_rmsd( system.file( "extdata/rmsd_example_2.txt.gz",
package = "MDplot" ) ) ),
snapshotsPerTimeInt = 2000, maxYAxis = 0.445 )
}
\author{Christian Margreitter}
\keyword{Root-mean-square-deviation}
|
72fc42fcf27ad93117127e93ff8fa31b4acad21b
|
04dc61178424b438c9c446262d85db8c165a166a
|
/man/do.in.envir.Rd
|
d49bb2828beb143710f8f7c05d4f42e4ee2b2cda
|
[] |
no_license
|
cran/mvbutils
|
0f259152bff14d668055e1924bde1ca91129a9f8
|
0de12969abaf828b5106290951318c79f0741a18
|
refs/heads/master
| 2021-01-17T05:46:40.652339
| 2018-12-12T14:30:03
| 2018-12-12T14:30:03
| 17,697,779
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,642
|
rd
|
do.in.envir.Rd
|
\name{do.in.envir}
\alias{do.in.envir}
\title{Modify a function's scope}
\description{\code{do.in.envir} lets you write a function whose scope (enclosing environment) is defined at runtime, rather than by the environment in which it was defined.
}
\usage{
# Use only as wrapper of function body, like this:
# my.fun <- function(...) do.in.envir( fbody, envir=)
# ... should be the arg list of "my.fun"
# fbody should be the code of "my.fun"
do.in.envir( fbody, envir=parent.frame(2)) # Don't use it like this!
}
\arguments{
\item{ fbody}{the code of the function, usually a braced expression}
\item{ envir}{the environment to become the function's enclosure}
}
\details{
By default, a \code{do.in.envir} function will have, as its enclosing environment, the environment in which it was \bold{called}, rather than \bold{defined}. It can therefore read variables in its caller's frame directly (i.e. without using \code{get}), and can assign to them via \code{<<-}. It's also possible to use \code{do.in.envir} to set a completely different enclosing environment; this is exemplified by some of the functions in \code{debug}, such as \code{go}.
Note the difference between \code{do.in.envir} and \code{\link{mlocal}}; \code{\link{mlocal}} functions evaluate in the frame of their caller (by default), whereas \code{do.in.envir} functions evaluate in their own frame, but have a non-standard enclosing environment defined by the \code{envir} argument.
Calls to e.g. \code{sys.nframe} won't work as expected inside \code{do.in.envir} functions. You need to offset the frame argument by 5, so that \code{sys.parent()} should be replaced by \code{sys.parent( 5)} and \code{sys.call} by \code{sys.call(-5)}.
\code{do.in.envir} functions are awkward inside namespaced packages, because the code in \code{fbody} will have "forgotten" its original environment when it is eventually executed. This means that objects in the namespace will not be found.
The \pkg{debug} package does not yet trace inside \code{do.in.envir} functions-- this will change.
}
\value{Whatever \code{fbody} returns.
}
\examples{
fff <- function( abcdef) ffdie( 3)
ffdie <- function( x) do.in.envir( \{ x+abcdef\} )
fff( 9) # 12; ffdie wouldn't know about abcdef without the do.in.envir call
# Show sys.call issues
# Note that the "envir" argument in this case makes the
# "do.in.envir" call completely superfluous!
ffe <- function(...) do.in.envir( envir=sys.frame( sys.nframe()), sys.call( -5))
ffe( 27, b=4) # ffe( 27, b=4)
}
\seealso{\code{\link{mlocal}}
}
\author{Mark Bravington}
\keyword{programming}
\keyword{utilities
}
|
fd62f6d7964a319cb597299de21e2f5f28b54473
|
3371d7dce7d02905cfbcf52f28ec58c26dfbb222
|
/ui.R
|
10544824e7a0e10950ea18fc0cd78d45b88959ee
|
[
"MIT"
] |
permissive
|
thevaachandereng/LPWC-Shiny
|
254d11cfddf35116c39c1328522467a5b615cec0
|
db03aa9f046bac30e3944e4f5ac4548eb514c7bd
|
refs/heads/master
| 2021-08-29T10:56:55.069218
| 2017-12-13T19:54:04
| 2017-12-13T19:54:04
| 113,227,128
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 701
|
r
|
ui.R
|
library(shiny)
library(LPWC)
pageWithSidebar(
headerPanel('Lag Penalized Weighted Correlation'),
sidebarPanel(
fileInput('file1', 'Data (CSV file)',
accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv')),
radioButtons('sep', 'Separator',
c(Comma=',',
Semicolon=';',
Tab='\t'),
','),
radioButtons('pen', 'Penalty',
c(High = 'high',
Low = 'low'),
'high'),
numericInput("k", "Number of clusters:", 10)),
tags$a(href="https://github.com/gitter-lab/LPWC", "Click here!")
mainPanel(tableOutput(outputId = 'table.output'))
)
|
dd618547f6989b31202a8d4b739872545abe3f7f
|
43d4b9dc11ad7721b8d872dfd0781d468d2a26fe
|
/OECD files.R
|
89df769b40e3aee587e5e21acc5e9fedd1429b94
|
[] |
no_license
|
Paulrans/LST_Thesis
|
247bc2eb3698f5c3ba472220bdd2ce740bbb80f4
|
000183863dc4267b74b4d6cd8c88f6bfa87878ea
|
refs/heads/main
| 2023-02-26T21:47:27.603671
| 2021-02-10T11:13:57
| 2021-02-10T11:13:57
| 337,698,848
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
OECD files.R
|
set <- get_datasets()
search_dataset("oil", data = set)
|
2ddeeecaf4583a1fdfdd1e1e0b1cd8fed67edede
|
2d39f850aad64a1880359de6ce9808459be330e3
|
/last.R
|
5ee8e04895faa7d159615aed9b494d1593bc0f87
|
[] |
no_license
|
andreasose/music-mining
|
e78774046d1f9000f66980337bce54625140bb30
|
f89b7beebdc30eca6641af70af38a77c65536ef0
|
refs/heads/master
| 2016-08-12T05:25:53.185318
| 2016-03-01T22:25:32
| 2016-03-01T22:25:32
| 51,100,524
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,915
|
r
|
last.R
|
library(plyr)
library(ggplot2)
library(plotly)
library(lubridate)
#Make sure your csv file is in the working directory
music <- read.csv("andreas.csv", header = FALSE)
#By default, the data comes without column headers, so they are added manually:
colnames(music) <- c("artist", "album", "song", "date")
#the date colum is down to the second. We only need it by day, month and year:
music$date <- as.Date(music$date, format = "%d %B %Y")
music <- music[music$date >= "2010-06-01", ]
#first we look at top 12 played artists over time:
artist_freq <- count(music, "artist")
artist_freq <- artist_freq[order(-artist_freq$freq),]
top_12 <- artist_freq[1:12,]
top_12 <- as.character(top_12$artist)
top_12 <- music[ music$artist %in% top_12, ]
top_12$artist <- factor(top_12$artist)
#table(factor(top_12$artist))
#This operation takes a little time to process.
artists <- ddply(top_12, c("date" ,"artist"), summarise, count= length(date))
artist_count <- tapply(artists$count, INDEX = artists$artist, sum)
p <- ggplot(data=artists, aes(x=date, y=count)) +
geom_line() + geom_point() +
facet_wrap(~artist, nrow = 4)
ggplotly(p)
#let's focus on the year 2015 instead so we can see things a bit more clearly:
music_2015 <- music[music$date >= "2015-01-01" & music$date <= "2015-12-31", ]
artist_freq <- count(music_2015, "artist")
artist_freq <- artist_freq[order(-artist_freq$freq),]
top_12 <- artist_freq[1:12,]
top_12 <- as.character(top_12$artist)
top_12 <- music_2015[ music_2015$artist %in% top_12, ]
top_12$artist <- factor(top_12$artist)
artists <- ddply(top_12, c("date" ,"artist"), summarise, count= length(date))
#artist_count <- tapply(artists$count, INDEX = artists$artist, sum)
p <- ggplot(data=artists, aes(x=date, y=count)) +
geom_line() + geom_point() +
facet_wrap(~artist, nrow = 4)
ggplotly(p)
#what about most played songs of all time??
song_freq <- count(music, "song")
song_freq <- song_freq[order(-song_freq$freq),]
top_12 <- song_freq[1:12,]
top_12 <- as.character(top_12$song)
top_12 <- music[ music$song %in% top_12, ]
top_12$song <- factor(top_12$song)
songs <- ddply(top_12, c("date" , "song"), summarise, count= length(date))
p <- ggplot(data=songs, aes(x=date, y=count)) +
geom_line() + geom_point() +
facet_wrap(~song, nrow = 4)
ggplotly(p)
#what about most played albums of 2015??
album_freq <- count(music_2015, "album")
#Consider removing all albums with no album label
album_freq <- album_freq[order(-album_freq$freq),]
top_12 <- album_freq[1:12,]
top_12 <- as.character(top_12$album)
top_12 <- music_2015[ music_2015$album %in% top_12, ]
top_12$album <- factor(top_12$album)
albums <- ddply(top_12, c("date" ,"artist", "album"), summarise, count= length(date))
#This command fucks with R since it has to overlay so many plays
p <- ggplot(data=albums, aes(x=date, y=count, color = artist)) +
geom_line() + geom_point() +
facet_wrap(~album, nrow = 4)
ggplotly(p)
qplot(music_2015$date)
geom_line(music_2015$date)
p <- ggplot(data=music_2015, aes(x=date)) +
geom_histogram()
p
#Compare trends in volume by year, starting from 2011:
## this is all still experimental code
music_played <- music[music$date >= "2011-01-01" & music$date <= "2015-12-31", ]
music_played <- ddply(music_played, c("date"), summarise, count= length(date))
music_played$year <- as.factor(format(music_played$date, format = "%Y"))
music_played$month <- as.factor(format(music_played$date, format = "%m"))
music_played$day <- as.factor(format(music_played$date, format = "%d"))
music_played$daymonth <- as.factor(format(music_played$date, format = "%m%d"))
ggplot(data=music_played, aes(x=daymonth, y=count)) +
geom_line(aes(group = year)) + facet_wrap(~year, nrow = 2)
ggplot(data=music_played, aes(x=daymonth, y=count)) +
geom_line(aes(group = year)) + facet_wrap(~day, nrow = 2)
facet_wrap(~artist, nrow = 4)
|
dd1c9fb989a026905c0508a51bc18ee304700122
|
4419dcaad86d41cca6ad026a6a6c72e408fa62eb
|
/tests/testthat/test-parameters.R
|
4edcb32f0c53ecdebfd9168bbe6a563399b7303d
|
[
"MIT"
] |
permissive
|
poissonconsulting/mcmcr
|
c122a92676e7b1228eedb7edaebe43df823fdeb8
|
ca88071369472483e7d73914493b99fd9bda9bd5
|
refs/heads/main
| 2023-06-24T09:43:04.640793
| 2023-06-13T00:19:29
| 2023-06-13T00:19:29
| 70,411,531
| 15
| 3
|
NOASSERTION
| 2022-06-21T03:07:27
| 2016-10-09T15:18:09
|
HTML
|
UTF-8
|
R
| false
| false
| 1,089
|
r
|
test-parameters.R
|
test_that("parameters.mcmcr", {
rlang::local_options(lifecycle_verbosity = "quiet")
lifecycle::expect_deprecated(parameters(mcmcr::mcmcr_example))
expect_identical(parameters(mcmcr::mcmcr_example), c("alpha", "beta", "sigma"))
expect_identical(parameters(mcmcr::mcmcr_example, scalar = TRUE), c("sigma"))
expect_identical(parameters(mcmcr::mcmcr_example, scalar = FALSE), c("alpha", "beta"))
parameters(mcmcr_example) <- c("alpha1", "alpha2", "alpha3")
expect_identical(parameters(mcmcr_example), c("alpha1", "alpha2", "alpha3"))
})
test_that("parameters.mcmcrs", {
rlang::local_options(lifecycle_verbosity = "quiet")
mcmcrs <- mcmcrs(mcmcr::mcmcr_example, mcmcr::mcmcr_example)
lifecycle::expect_deprecated(parameters(mcmcrs))
expect_identical(parameters(mcmcrs), c("alpha", "beta", "sigma"))
expect_identical(parameters(mcmcrs, scalar = TRUE), c("sigma"))
expect_identical(parameters(mcmcrs, scalar = FALSE), c("alpha", "beta"))
parameters(mcmcrs) <- c("alpha1", "alpha2", "alpha3")
expect_identical(parameters(mcmcrs), c("alpha1", "alpha2", "alpha3"))
})
|
b0f2b57a5aa2a579af2143d4771fe73d7b991bb5
|
1eb964100bbc7f4ef1ffebc762826da618045679
|
/단일.R
|
57c79f268f658f59a84b2c0eb74ec7b6caaefc01
|
[] |
no_license
|
kjihoon/AllRecover
|
2ba973d92c401a5d26363d34f7a5f9ce4329cde0
|
7cb9b743be2aa043f41cd3c6eff280e5e9fe20a4
|
refs/heads/master
| 2020-03-22T15:42:27.469081
| 2018-07-12T06:32:21
| 2018-07-12T06:32:21
| 140,271,409
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 999
|
r
|
단일.R
|
df<-read.csv("병원정보3.csv")
ykiho<-df[,2]
ykiho<-as.character(ykiho)
url<-"http://apis.data.go.kr/B551182/hospAsmRstInfoService/getHospWhlAsmRstList?ServiceKey=BVnt6h5rMtX52Vuu5ckoGKesnewYmnOTSYLd6GaQ4vjGnBMCgCG%2FEoMuP5StXjfQQZmrLeNU1luHjqbDDbu4cg%3D%3D&ykiho="
list<-c()
for(j in 1:length(ykiho)){
urlf<-paste0(url,ykiho[j])
html<-read_html(urlf)
nodes<-html_nodes(html,"item")
if (length(nodes)==0){
print("empty")
nodes<-html_node(html,"resultmsg")
}
list[[ykiho[j]]]<-nodes
if (j%%100==0){
print(j)
}
}
llist_final<-list
out<-function(node,name){
out<-c()
for (i in name){
nd<-html_nodes(node,i)
txt<-html_text(nd)
if (is.na(txt)||length(txt)==0){
txt<-"null"
}
out<-append(out,txt)
}
return(out)
}
df<-rbind(c("ykiho",name))
for (i in 1:length(list)){
row<-out(list[[i]],name)
row<-c(names(list[i]),row)
df<-rbind(df,row)
if (i%%100==0){
print(i)
}
}
|
261509323022e0a78c3dfe4769631f651b44a388
|
3116af50b95f348bbcb506adc4c733a2327b8234
|
/Script/scrape_BMC.R
|
fd47ca438fd4e1592de17fa84a14975c4d465f23
|
[
"CC0-1.0"
] |
permissive
|
andreaspacher/openeditors
|
2e2d228906775c89ecbc9e793b9d80266fcf93ef
|
909f2036727dea844e569cc6a96cce2eb7c69d09
|
refs/heads/main
| 2023-04-14T00:28:19.941845
| 2022-12-27T07:14:30
| 2022-12-27T07:14:30
| 340,627,338
| 45
| 9
|
CC0-1.0
| 2022-04-21T23:02:16
| 2021-02-20T10:32:47
|
R
|
UTF-8
|
R
| false
| false
| 5,742
|
r
|
scrape_BMC.R
|
library(tidyverse)
library(rvest)
# regex-function to clean strings from html codes
clean_html <- function(htmlString) {
return(gsub("<.*?>", "", htmlString))
}
# get journals
journals <- read.csv(url("https://github.com/andreaspacher/academic-publishers/blob/main/Output/allpublishers-PRELIMINARY-2021-12-09.csv?raw=true")) %>%
filter(publisher == "Springer" | publisher == "Springer Nature" | publisher == "BioMedCentral") %>%
distinct() %>%
select(journal, url)
# get links to editorial boards
JJ <- list.files(path = "Output\\2022-Springer-Links", pattern="springer.*.csv")
JJ <- lapply(paste0("Output\\2022-Springer-Links\\", JJ), read_csv)
JJ <- data.table::rbindlist(JJ)
JJ <- JJ %>%
filter(!is.na(editors_url) & editors_url != "https://www.springer.com/gp/authors-editors" & editors_url != "/authors-editors")
JJ <- JJ %>%
filter(grepl("Editorial ((B|b)oard|(S|s)taff)|About the (E|e)ditor", editors_linktext)
& !is.na(editors_linktext))
# merge the files together
journals <- left_join(JJ, journals)
journals <- journals %>%
mutate(url = ifelse(grepl("nature", url), "https://www.nature.com", url),
editors_url = ifelse(editors_url == "/about/editorial-board" | grepl("nature|biomedc", url), paste0(url, editors_url),
paste0("https://www.springer.com", editors_url)),
editors_url = stringr::str_extract(editors_url, "http(?![\\s\\S]*http).*")) %>%
filter(!grepl("volumesAndIssues", editors_url)
& editors_url != "https://www.springer.com/about-the-editors"
& editors_url != "https://bmcserieseditors.biomedcentral.com/"
& !grepl("javascript:|authors/editorial_policies", editors_url)
& !grepl("Join|recruiting|Call for|Introducing", editors_linktext)) %>%
distinct() %>%
select(-url) %>%
select(journal, "url" = editors_url, editors_linktext) %>%
distinct()
# only select BMC journals
journals <- journals %>%
filter(grepl("biomedc", journals$url) & !grepl("for-editorial-board", journals$url))
journals %>%
select(journal) %>%
distinct() %>% count()
# prepare the scraping process
EdList <- list()
for(i in 1:nrow(journals)) {
printtext <- paste(i, journals$url[i], sep=": ")
print(printtext)
# start scraping
wholepage <- try(xml2::read_html(journals$url[i]), silent = TRUE)
# did it work?
if (inherits(wholepage, "try-error")) {
print("--- 404 error?")
next
}
webpage <- html_node(wholepage, "main")
xmltext <- trimws(webpage)
#=====================
# (1) parse roles
#=====================
roles <- html_nodes(webpage, "strong") %>%
html_text()
#=====================
# (1b) count nr of editors per role
#=====================
groups <- strsplit(
xmltext
, '(<strong>)[^<]{8,}(?=</strong>|</div>|</p>)'
, perl=TRUE
)
groups <- lapply(groups, function(x){x[!x == ""]}) # remove empty string (that always occurs due to strsplit)
groups <- lapply(groups, function(x){x[grepl(",", x)]}) # remove empty string (that always occurs due to strsplit)
italicnumbers <- unlist(lapply(groups,
function(x)
str_count(x, '(?<=<i>|<em>)[\\s\\S]+?(?=</i>|</em>)')
)
)
italicnumbers <- italicnumbers[italicnumbers != 0]
roles <- try(rep(roles, times=italicnumbers),
silent = TRUE)
if (inherits(roles, "try-error")) {
roles <- NA
}
#=====================
# (2) parse editors
#=====================
editors <- unlist(stringr::str_extract_all(webpage, '(?<=<br>|<p>)(?:(?!<strong>)[\\s\\S])*?(?=<br>|</p>)'))
editors <- editors[nchar(editors) > 6]
#=====================
# (3) parse affiliations
#=====================
affiliations <- stringr::str_extract_all(editors, '(?<=<i>|<em>)(?:(?!</i>).)+?(?=</i>|</em>)')
affiliations <- stringr::str_squish(affiliations)
affiliations <- affiliations[nchar(affiliations) > 8]
editors <- stringr::str_extract(editors, ".*?(?=(<em>|<i>))")
#=====================
# (4) remove empty parts from list & clean data
#=====================
roles <- roles[!sapply(roles, identical, character(0))]
editors <- editors[!sapply(editors, identical, character(0))]
affiliations <- affiliations[!sapply(affiliations, identical, character(0))]
roles <- trimws(clean_html(roles))
editors <- trimws(clean_html(editors))
affiliations <- trimws(clean_html(affiliations))
editors <- gsub(",$", "", editors)
#=====================
# (5) CHECK
#=====================
# they should be of equal length:
len1 <- length(affiliations)
len2 <- length(editors)
len3 <- length(roles)
if(is_empty(roles)) { roles <- NA }
if(len1 == len2 & len3 != len1) {
roles <- NA
}
if((is.na(roles) & len1 == len2 & len1 != 0) | all(sapply(list(len1,len2,len3), function(x) x == len3) & len3 != 0)) {
EdB <- do.call(rbind, c(Map(data.frame,
editors = editors,
affiliations = affiliations,
roles = roles,
publisher = "BioMedCentral",
journal = journals$journal[i],
url = journals$url[i]
), make.row.names = FALSE))
EdB$date <- Sys.Date()
} else {
print("-- some error")
Sys.sleep(2.5)
next
}
EdList[[i]] <- EdB
print(paste0("--- found ", nrow(EdB), " editors!"))
Sys.sleep(3)
}
DF <- dplyr::bind_rows(EdList) %>%
select(publisher, journal, "role" = roles, "editor" = editors, "affiliation" = affiliations, url, date)
write_tsv(DF, paste0("Output\\2022-Scraping\\BioMedCentral-", Sys.Date(), ".tsv"))
|
c1a112fc73f5858deb59abc8c176970d535c198a
|
e3df4de339e023592659ef87dac9115abf85ac73
|
/hbolouri/oncoDev/Oncoscape/R/NanoStringExpressionData.R
|
f7c79fb91c8870110819d342526dcca8da28690a
|
[
"MIT"
] |
permissive
|
oncoscape/webapp-R-package
|
99919a2c227b3956a638d825c31d2c4ffdc090fd
|
a762d410a70b5eb12496802ae45680440ad0ebb7
|
refs/heads/master
| 2021-01-22T13:57:50.080131
| 2014-09-24T17:56:50
| 2014-09-24T17:56:50
| 24,355,025
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,337
|
r
|
NanoStringExpressionData.R
|
# incoming message function to call return.cmd
# ------------------- ---------------- -------------
addRMessageHandler("NanoStringExpresssionData.ping", "nanoStringPing") # handleNanoStringping
addRMessageHandler("requestTissueNames", "sendTissueNames") # handleTissueNames
addRMessageHandler("requestAverageExpression", "sendAverageExpression") # handleAverageExpression
addRMessageHandler("getNanoStringExpressionData", "getNanoStringExpressionData") # handleNanoStringExpressionData
addRMessageHandler("getGbmPathwaysCopyNumberData", "getGbmPathwaysCopyNumberData") # handleGbmPathwaysCopyNumberData
addRMessageHandler("getGbmPathwaysMutationData", "getGbmPathwaysMutationData") # handleGbmPathwaysMutationData
#----------------------------------------------------------------------------------------------------
nanoStringPing <- function(WS, msg)
{
return.cmd <- "handleNanoStringPing"
return.msg <- toJSON(list(cmd=return.cmd, status="success",
payload="ping!"))
sendOutput(DATA=return.msg, WS=WS);
} # nanoStringPing
#----------------------------------------------------------------------------------------------------
sendTissueNames <- function(WS, msg)
{
return.cmd <- "handleTissueNames"
return.msg <- toJSON(list(cmd=return.cmd, status="success",
payload=tbl.idLookup$specimen))
sendOutput(DATA=return.msg, WS=WS);
} # sendTissueNames
#----------------------------------------------------------------------------------------------------
sendAverageExpression <- function(WS, msg)
{
tissueIDs <- msg$payload
mtx.avg <- calculateAverageExpression(tissueIDs, mtx.nano)
return.cmd <- "handleAverageExpression"
if(!all(is.na(mtx.avg))) {
payload <- matrixToJSON(mtx.avg)
status <- "success"
}
else{
payload <- NA
status <- failure
}
return.msg <- toJSON(list(cmd=return.cmd, status=status, payload=payload))
sendOutput(DATA=return.msg, WS=WS)
} # sendAverageExpression
#---------------------------------------------------------------------------------------------------
cleanupNanoStringMatrix <- function(tbl.nano, tbl.idLookup, sampleIDs=NA, geneList=NA)
{
if(!all(is.na(sampleIDs)))
sampleIDs.known <- intersect(sampleIDs, tbl.idLookup$specimen)
else
sampleIDs.known <- tbl.idLookup$specimen
btcs.known <- tbl.idLookup[match(sampleIDs.known, tbl.idLookup$specimen),"btc"]
expression.rows <- match(btcs.known, tbl.nano$BTC_ID)
printf("found expression in tbl.nano for %d sampleIDs", length(expression.rows))
tbl.expr <- tbl.nano[expression.rows,]
tbl.expr$samples <- sampleIDs.known
mtx <- as.matrix(tbl.expr[, 4:154])
rownames(mtx) <- tbl.expr$samples
bad.columns <- c("LOC390940","KIAA0746")
for(bad.column in bad.columns){
indx <- grep(bad.column, colnames(mtx))
if(length(indx) > 0)
mtx <- mtx[, -indx]
} # for bad.column
mtx [which(is.na(mtx))] <- 0.0
empty.columns <- as.numeric(which(colSums(mtx) == 0))
empty.rows <- as.numeric(which(rowSums(mtx) == 0))
if(length(empty.columns) > 0)
mtx <- mtx[, -empty.columns]
if(length(empty.rows) > 0)
mtx <- mtx[-empty.rows, ]
if(!all(is.na(geneList))) {
columns.of.interest <- intersect(geneList, colnames(mtx))
mtx <- mtx[, columns.of.interest]
} # if geneList
mtx
} # cleanupNanoStringMatrix
#----------------------------------------------------------------------------------------------------
calculateAverageExpression <- function (tissueIDs, mtx.nano, rowname="average")
{
recognized.tissueIDs <- intersect(rownames(mtx.nano), tissueIDs)
if(length(recognized.tissueIDs) == 0)
return(NA)
mtx.sub <- mtx.nano[recognized.tissueIDs,]
result <- t(as.matrix(colSums(mtx.sub)/nrow(mtx.sub)))
rownames(result) <- rowname
result
} # calculateAverageExpression
#----------------------------------------------------------------------------------------------------
getNanoStringExpressionData <- function(WS, msg)
{
nodeNames.from.network <- fromJSON(msg$payload)
return.cmd <- "handleNanoStringExpressionData"
payload <- ""
status <- "failure"
if(ncol(mtx.nano) > 0) {
payload <- matrixToJSON(mtx.nano)
status <- "success"
}
printf("NanoStringExpressionData::getNanoStringExpressionData returning 'handleNanoStringExpressionData'");
return.msg <- toJSON(list(cmd=return.cmd, status=status, payload=payload))
sendOutput(DATA=return.msg, WS=WS)
} # getAgeAtDxAndSurvivalRanges
#---------------------------------------------------------------------------------------------------
getGbmPathwaysCopyNumberData <- function(WS, msg)
{
nodeNames.from.network <- fromJSON(msg$payload)
printf("getGbmPathwaysCopyNumberData: %s", paste(nodeNames.from.network, collapse=", "));
dp <- DataProvider("MSK_GBM_copyNumber")
tbl <- getData(dp)
# print(tbl)
genes.shared <- intersect(colnames(tbl), nodeNames.from.network)
printf("genes.shared: %s", paste(genes.shared, collapse=","));
tbl <- tbl[, genes.shared]
return.cmd <- "handleGbmPathwaysCopyNumberData"
payload <- ""
status <- "failure"
if(ncol(tbl) > 0) {
payload <- matrixToJSON(tbl)
status <- "success"
}
return.msg <- toJSON(list(cmd=return.cmd, status=status, payload=payload))
#print(return.msg)
sendOutput(DATA=return.msg, WS=WS)
} # getGbmPathwaysCopyNumberData
#---------------------------------------------------------------------------------------------------
getGbmPathwaysMutationData <- function(WS, msg)
{
return.cmd <- "handleGbmPathwaysMutationData"
return.payload <- ""
return.status <- "failure"
#browser()
payload <- as.list(msg$payload)
print(payload)
if(!"mode" %in% names(payload)) {
return.status <- "error"
return.payload <- "no mode field in payload"
return.msg <- toJSON(list(cmd=return.cmd, status=return.status, payload=return.payload))
sendOutput(DATA=return.msg, WS=WS)
return()
} # error: payload has no mode field
if(payload$mode == "ping"){
return.status <- "ping returned"
return.payload <- "nothing"
return.msg <- toJSON(list(cmd=return.cmd, status=return.status, payload=return.payload))
sendOutput(DATA=return.msg, WS=WS)
return()
} # ping
if(payload$mode == "getEntitiesAndFeatures"){
if(!exists("msk.gbm.dp"))
msk.gbm.dp <<- DataProvider("MSK_GBM_mutation")
tbl <- getData(msk.gbm.dp)
entities <- rownames(tbl) # tissues
features <- colnames(tbl) # genes
return.status <- "success"
return.payload <- list(entities=entities, features=features)
return.msg <- toJSON(list(cmd=return.cmd, status=return.status, payload=return.payload))
sendOutput(DATA=return.msg, WS=WS)
return()
} #
# optional extra payload fields: entities, features
if(payload$mode == "getData"){
features <- c()
entities <- c()
if("features" %in% names(payload))
features <- payload$features
if("entities" %in% names(payload))
entities <- payload$entities
if(!exists("msk.gbm.dp"))
msk.gbm.dp <<- DataProvider("MSK_GBM_mutation")
tbl <- getData(msk.gbm.dp)
printf("mutation tbl w/o filtering, rows: %d, columns: %d", nrow(tbl), ncol(tbl))
if(length(features) > 0)
tbl <- tbl[intersect(rownames(tbl), entities),]
if(length(entities) > 0)
tbl <- tbl[, intersect(colnames(tbl), features)]
return.status <- "success"
printf("mutation tbl rows: %d, columns: %d", nrow(tbl), ncol(tbl))
return.payload <- matrixToJSON(tbl)
return.msg <- toJSON(list(cmd=return.cmd, status=return.status, payload=return.payload))
sendOutput(DATA=return.msg, WS=WS)
return()
} #
} # getGbmPathwaysMutationData
#---------------------------------------------------------------------------------------------------
|
700c4b17d7b03e188facf2cb99c0fcb5967870d4
|
5d0ad197f94a53680dc4172ed3b8f1e8384a7d27
|
/uzhOS/tests/testthat/test-rorcid.R
|
b2e0eeea6f595ee0989bf6c4541384643705fb67
|
[
"MIT"
] |
permissive
|
markrobinsonuzh/os_monitor
|
3356cbc8fb2a826572a8f4d64d1a454a180ffe2b
|
a6acd4740c657b9ebae0a09945862666bf1345f0
|
refs/heads/master
| 2022-02-28T20:44:27.516655
| 2022-02-17T12:43:52
| 2022-02-17T12:43:52
| 243,106,445
| 2
| 1
|
MIT
| 2020-10-07T05:55:18
| 2020-02-25T21:29:29
|
R
|
UTF-8
|
R
| false
| false
| 1,084
|
r
|
test-rorcid.R
|
testthat::test_that("check_if_likely_orcid correct",{
expect_true(check_if_likely_orcid("0000-0002-3048-551X"))
expect_true(check_if_likely_orcid("0000-0002-3048-5511"))
expect_false(check_if_likely_orcid("0000-0002-3048-55X1"))
expect_false(check_if_likely_orcid("0000 0002 3048 5511"))
})
testthat::test_that("retrieve_from_orcid correct",{
mr_orcs <- retrieve_from_orcid("0000-0002-3048-5518")
expect_true(all(names(mr_orcs) %in% c("title","journal","type","doi","year","in_orcid")))
expect_equal(typeof(mr_orcs$year),"integer")
expect_equal(typeof(mr_orcs$in_orcid),"logical")
mr_orcs <- retrieve_from_orcid("noreport")
expect_true(all(names(mr_orcs) %in% c("title","journal","type","doi","year","in_orcid")))
expect_equal(typeof(mr_orcs$year),"integer")
expect_equal(typeof(mr_orcs$in_orcid),"logical")
mr_orcs <- retrieve_from_orcid("0000-0000-0000-0000")
expect_true(all(names(mr_orcs) %in% c("title","journal","type","doi","year","in_orcid")))
expect_equal(typeof(mr_orcs$year),"integer")
expect_equal(typeof(mr_orcs$in_orcid),"logical")
})
|
da16e871d7099f11ba6600877a60b8e80a617f97
|
3c4759629d61a33fc8c36aa21c537d691a53fa54
|
/barcodes_file/barcodes_check.R
|
75205c0ea8743369c67aee4599b609d05d13c8c4
|
[] |
no_license
|
jenessalemon/erisor
|
63264eae14ac0c35f7d85e92c87d84ba4a1d1bf6
|
468c5fdacce7b041e61c7d5f495cd3a1b4ba06a3
|
refs/heads/master
| 2020-12-03T04:18:32.461537
| 2018-01-02T23:07:55
| 2018-01-02T23:07:55
| 95,848,860
| 2
| 1
| null | 2017-07-12T02:34:39
| 2017-06-30T04:33:18
|
R
|
UTF-8
|
R
| false
| false
| 2,243
|
r
|
barcodes_check.R
|
## The objective here is to solve the "columns vs wells" problem. Barcodes are pulled
## from a different dataframe, and assigned to the appropriate spot in the dataframe
## containing samples and wells. Note that this must be done one plate at a time!
## The two input files are well_sample, which has two columns, "Well" and "Sample",
## And gomp_layout which also has two columns, "well" and "barcode."
setwd('/Users/jimblotter/Desktop/Grad_School/Data_Analysis/erisor/QC/')
diff_barcode_check <- function(gomp_layouts, well_samples){
gomp_layout <- read.csv(gomp_layouts, header = TRUE, stringsAsFactors = FALSE)
well_sample <- read.csv(well_samples, header = TRUE, stringsAsFactors = FALSE)
A1A2A3 <- c(gomp_layout$well)
for(i in A1A2A3){
samp <- well_sample[which(well_sample$Well==i),2] #bcode is a holder variable, which gets, from gompert_layout, the variable in the second column from the row where the well = value at ith iteration in the list.
gomp_layout[which(gomp_layout$well==i),2] <- samp #assign that variable (a barcode), to the (previously empty) cell in Barcode column of well_sample, on the row where that same variable is found.
}
#gomp_layout$well <- NULL #drop the well column
return(gomp_layout)
}
#Now, let's run it on all three
product1 <- diff_barcode_check("gompert_layout_p1.csv", "well_samp_p1.csv") #save output of the function to a variable
#product1
product2 <- diff_barcode_check("gompert_layout_p2.csv", "well_samp_p2.csv") #save output of the function to a variable
#product2
product3 <- diff_barcode_check("gompert_layout_p3.csv", "well_samp_p3.csv") #save output of the function to a variable
#product3
#And write to files
write_tsv(product1, path = '/Users/jimblotter/Desktop/Grad_School/Data_Analysis/erisor/QC/Ripy_barcodes1') #tab delineated
write_tsv(product2, path = '/Users/jimblotter/Desktop/Grad_School/Data_Analysis/erisor/QC/Ripy_barcodes2') #tab delineated
write_tsv(product3, path = '/Users/jimblotter/Desktop/Grad_School/Data_Analysis/erisor/QC/Ripy_barcodes3') #tab delineated
|
c5c2391aace9b5bb8eab0f0041e3822d27b34b0a
|
12499f173ba030d444d11e3884c0bb538f8a136c
|
/R/dynamictableTM.R
|
10749b49d4c0524d1e1b27ae1812aacf200d7a2a
|
[
"Apache-2.0"
] |
permissive
|
StanWijn/cemtool
|
7ff5b451fa689f44fb2f41e5aba2271793ad1fb5
|
8d7e277c7f5a263d28407a57c5fb81b16f4ce95a
|
refs/heads/master
| 2020-05-29T14:03:27.703471
| 2020-04-07T09:36:24
| 2020-04-07T09:36:24
| 189,181,805
| 1
| 0
| null | 2020-04-07T09:36:25
| 2019-05-29T08:18:40
|
HTML
|
UTF-8
|
R
| false
| false
| 4,666
|
r
|
dynamictableTM.R
|
# Dynamic table to alter treatment effect
#' @import rhandsontable shiny shinydashboard utils
#outdir=getwd()
editmatrix <- function(m.P, m.P_treatment){
DFtrans <- data.frame(m.P)
DFcost <- data.frame(m.P_treatment)
title <- tags$div(h2("Step 3: Transition probability matrix"))
header <- dashboardHeader(tags$li(class = "dropdown",
tags$style(".main-header {max-height: 100px}"),
tags$style(".main-header .logo {height: 100px}")),
title = title,
titleWidth = '100%')
sidebar <- dashboardSidebar(disable = TRUE)
body <- dashboardBody(
tags$head(tags$style(HTML('
.skin-blue .main-header .logo {
background-color: #3c8dbc;
}
.skin-blue .main-header .logo:hover {
background-color: #3c8dbc;
}
'))),
tags$style(HTML("hr {border-top: 1px solid #000000;}")),
tags$hr(),
wellPanel(
uiOutput("message", inline=TRUE),
div(class='row',
div(class="col-sm-6",
actionButton("save", "Save")))
),
tags$hr(),
fluidRow(
column(5, align = "left",
helpText("Make sure that the sum of each row is equal to 1!"),
br(),
helpText("Rows indicate the originating healthstate, colums indicate targeted healthstate.
Example: first row, second column is the probability to move from the first healthstate to the second."),
br(),
helpText("Transitionmatrix of usual care"),
rHandsontableOutput("hot"),
br(),
helpText("Transitionmatrix of treatment"),
rHandsontableOutput("cost"),
br()
),
column(7,
plotOutput("plotmodel")
)
),
fluidRow(column(4,
tags$hr(),
wellPanel(
div(class='row',
div(class="col-sm-6",
actionButton("save2", "Save")))
),
tags$hr())
)
)
server <- shinyServer(function(input, output, session) {
session$onSessionEnded(function() {
stopApp()
})
values <- reactiveValues()
# --- probability input
observe({
if (!is.null(input$hot)) {
values[["previous"]] <- isolate(values[["DFtrans"]])
DFtrans = hot_to_r(input$hot)
} else {
if (is.null(values[["DFtrans"]]))
DFtrans <- DFtrans
else
DFtrans <- values[["DFtrans"]]
}
values[["DFtrans"]] <- DFtrans
})
output$hot <- renderRHandsontable({
DFtrans <- values[["DFtrans"]]
if (!is.null(DFtrans))
rhandsontable(DFtrans, useTypes = as.logical(F), stretchH = "all") %>%
hot_context_menu(allowRowEdit = FALSE, allowColEdit = FALSE)
})
# --- cost input
observe({
if (!is.null(input$cost)) {
values[["previous"]] <- isolate(values[["DFcost"]])
DFcost = hot_to_r(input$cost)
} else {
if (is.null(values[["DFcost"]]))
DFcost <- DFcost
else
DFcost <- values[["DFcost"]]
}
values[["DFcost"]] <- DFcost
})
output$cost <- renderRHandsontable({
DFcost <- values[["DFcost"]]
if (!is.null(DFcost))
rhandsontable(DFcost, useTypes = as.logical(F), stretchH = "all") %>%
hot_context_menu(allowRowEdit = FALSE, allowColEdit = FALSE)
})
## Save
observeEvent(input$save | input$save2, {
finalDF <- isolate(values[["DFtrans"]])
finalDF2 <- isolate(values[["DFcost"]])
assign('m.P', as.matrix(finalDF), envir = cemtool.env)
assign('m.P_treatment', as.matrix(finalDF2), envir = cemtool.env)
}
)
output$plotmodel <- renderPlot({
second(cemtool.env$HS, cemtool.env$v.n)
}, width = 900, height = 600)
##-- Message
output$message <- renderUI({
if(input$save==0 & input$save2 == 0){
helpText(sprintf("When you are done editing the transition matrix, press Save and close this window.
To undo your change, press right-mouse button and reload the table"))
}else{
helpText(sprintf("Input saved. Please close this window to continue."))
}
})
})
## run app
runApp(shinyApp(ui= dashboardPage(header, sidebar, body), server=server))
return(invisible())
}
|
d004c6a6c467002191d5a8245fc72403b951eb2e
|
bae869ce879da3f7342a3fec33c8c227d00b2a27
|
/SES_model.R
|
50c3957f5b1b85bec897bd4163d07260015d3347
|
[] |
no_license
|
ThomasKraft/epidemicERGM
|
6cb2112c2d4d2addaee68adc68bbbb1ea389219e
|
74e83710c32090657fa1d261e674d6afb8bae140
|
refs/heads/master
| 2022-04-22T04:03:38.910642
| 2020-04-22T22:27:43
| 2020-04-22T22:27:43
| 256,597,370
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,339
|
r
|
SES_model.R
|
######## SES ##########
# load packages
library(statnet)
library(EpiModel)
library(ndtv)
######## SES ##########
# 1) Network with SES
# initialize empty network with desired attributes
net <- network.initialize(n = 200, directed = FALSE)
net <- set.vertex.attribute(net, "SES", rep(0:1, each = 100))
# Setting model parameters
# NOTE: this is where we can add attributes that will define structure in the way we like based on an ERGM formula
formation <- ~edges + nodematch("SES", diff=TRUE) + nodefactor("SES")
target.stats <- c(150, 30, 15, 20) # correspond to formation formula above
# because networks generated are dynamic, we also set parameters to determine the rate of dissolution of ties
coef.diss <- dissolution_coefs(dissolution = ~offset(edges) + offset(nodematch("SES", diff=TRUE)),
duration = c(1, 10, 10))
# Fitting model with desired parameters
mod1 <- netest(net, formation, target.stats, coef.diss, edapprox = T)
# Model diagnostics
dx <- netdx(mod1, nsims = 5, nsteps = 200, dynamic=F)
par(mar = c(3,3,1,1), mgp = c(2,1,0))
plot(dx)
# Simulate epidemic
param <- param.net(inf.prob = 0.3, act.rate = 1, rec.rate=.05)
status.vector <- rbinom(200, 1, 0.03) # seed initial infected individuals
status.vector <- ifelse(status.vector == 1, "i", "s")
init <- init.net(status.vector = status.vector)
# Structural model features
control <- control.net(type = "SIR", nsteps = 300, nsims = 10, epi.by = "SES", ncores=2)
# simulate!
sim1 <- netsim(mod1, param, init, control)
#### WHAT CAN WE EXTRACT FROM THE "sim1" OBJECT ?? ####
# # The rest is just plotting
# # Plots of how the epidemic spread
plot(sim1, mean.line = FALSE, qnts = FALSE, sim.lines = TRUE)
#
# # Plots of how the simulated dynamic networks looked at different timepoints
# par(mfrow = c(1,2), mar = c(0,0,1,0))
# plot(sim1, type = "network", at = 1, col.status = TRUE,
# main = "Prevalence at t1")
# plot(sim1, type = "network", at = 50, col.status = TRUE,
# main = "Prevalence at t100")
#
#
# # plot networks at specific points showing race
nw <- get_network(sim1, sim = 1)
out <- network.extract(nw, at=20)
plot(out, vertex.col="SES")
# Make an animated plot of the networks over a specific duration
slice.par<-list(start=1, end=30, interval=1, aggregate.dur=1,rule="latest")
compute.animation(nw,slice.par=slice.par)
render.d3movie(nw,
filename="SES_network.html",
edge.col="darkgray",displaylabels=TRUE,
label.cex=.6,label.col="blue",
output.mode = 'HTML',
vertex.col="SES")
render.animation(nw, vertex.cex=0.6, vertex.col="SES")
saveVideo(ani.replay(),video.name="SES_network.mp4",
other.opts="-b 5000k",clean=TRUE)
######## RACE ##########
# 1) Network with RACE
# initialize empty network with desired attributes
net <- network.initialize(n = 200, directed = FALSE)
net <- set.vertex.attribute(net, "race", c(rep(0:1, each = 70), rep(2, 60)))
# Setting model parameters
# NOTE: this is where we can add attributes that will define structure in the way we like based on an ERGM formula
formation <- ~edges + nodematch("race")
target.stats <- c(200, 180) # correspond to formation formula above
# stats_convert <- function(n,mean.degree,n_race_1,mean_deg_race_1,prcnt_homophil) {
# return(c(mean.degree*n/2,mean_deg_race_1*n_race_1/2, prcnt_homophil*mean.degree*n/200,n/10 ))
# }
# target.stats <- stats_convert (450, 1, 150, 1, 100)
# because networks generated are dynamic, we also set parameters to determine the rate of dissolution of ties
coef.diss <- dissolution_coefs(dissolution = ~offset(edges), duration = 5)
# Fitting model with desired parameters
mod1 <- netest(net, formation, target.stats, coef.diss, edapprox = T)
# Simulate epidemic
param <- param.net(inf.prob = 0.3, act.rate = 1, rec.rate=0.001)
status.vector <- rbinom(200, 1, 0.03) # seed initial infected individuals
status.vector <- ifelse(status.vector == 1, "i", "s")
init <- init.net(status.vector = status.vector)
# Structural model features
control <- control.net(type = "SIR", nsteps = 200, nsims = 10, epi.by = "race", ncores=2)
# simulate!
sim1 <- netsim(mod1, param, init, control)
# Plots of how the epidemic spread
plot(sim1, mean.line = FALSE, qnts = FALSE, sim.lines = TRUE)
# Plots of how the simulated dynamic networks looked at different timepoints
par(mfrow = c(1,2), mar = c(0,0,1,0))
plot(sim1, type = "network", at = 1, col.status = TRUE,
main = "Prevalence at t1")
plot(sim1, type = "network", at = 50, col.status = TRUE,
main = "Prevalence at t100")
# plot networks at specific points showing race
nw <- get_network(sim1, sim = 1)
out <- network.extract(nw, at=100)
plot(out, vertex.col="race")
# Make an animated plot of the networks over a specific duration
slice.par<-list(start=1, end=10, interval=1, aggregate.dur=1,rule="latest")
compute.animation(nw,slice.par=slice.par)
render.d3movie (nw,
filename="homophily_by_race_network.html",
edge.col="darkgray",displaylabels=TRUE,
label.cex=.6,label.col="blue",
output.mode = 'inline',
)
render.animation(nw, vertex.col="race")
saveVideo(ani.replay(),video.name="homophily_by_race_network.mp4",
other.opts="-b 5000k",clean=TRUE)
|
919c840d64362d9b8ca592de8ca85619f04ade39
|
71101f28de94b8c189ce9b7f7a14cb236dfcfd6e
|
/man/names.resATs.carb.sulf.Rd
|
fbac571e34de9c5caa06f9d0533c436077dc8029
|
[] |
no_license
|
Pawansit/vanddraabe
|
e5434b72fcf771cd28b9dfa6c88a328b140a40ef
|
5eccfec7ed511e4eb2e38a21d2218106d7fb2c42
|
refs/heads/master
| 2023-05-12T18:53:44.718393
| 2021-06-10T20:58:26
| 2021-06-10T20:58:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,197
|
rd
|
names.resATs.carb.sulf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Constants.R
\docType{data}
\name{names.resATs.carb.sulf}
\alias{names.resATs.carb.sulf}
\title{Carbon and Sulfur Residue-AtomType Names}
\format{An object of class \code{character} of length 109.}
\usage{
names.resATs.carb.sulf
}
\description{
Carbon and sulfur residue-atomtype names based on PDB atom
naming conventions.
}
\details{
These residue-atomtype names indicate carbon and sulfur atoms with a
neutral charge.
}
\examples{
names.resATs.carb.sulf
# [1] "ALA CA" "ALA C" "ALA CB" "ARG CA" "ARG C" "ARG CB" "ARG CG"
# "ARG CD" "ARG CZ" "ASN CA" "ASN C" "ASN CB" "ASN CG" "ASP CA" "ASP C"
# [16] "ASP CB" "ASP CG" "CYS CA" "CYS C" "CYS CB" "CYS SG" "GLN CA"
# "GLN C" "GLN CB" "GLN CG" "GLN CD" "GLU CA" "GLU C" "GLU CB" "GLU CG"
# [31] "GLU CD" "GLY CA" "GLY C" "HIS CA" "HIS C" "HIS CB" "HIS CG"
# "HIS CD2" "HIS CE1" "ILE CA" "ILE C" "ILE CB" "ILE CG1" "ILE CG2" "ILE CD1"
# [46] "LEU CA" "LEU C" "LEU CB" "LEU CG" "LEU CD1" "LEU CD2" "LYS CA"
# "LYS C" "LYS CB" "LYS CG" "LYS CD" "LYS CE" "MET CA" "MET C" "MET CB"
# [61] "MET CG" "MET SD" "MET CE" "PHE CA" "PHE C" "PHE CB" "PHE CG"
# "PHE CD1" "PHE CD2" "PHE CE1" "PHE CE2" "PHE CZ" "PRO CA" "PRO C" "PRO CB"
# [76] "PRO CG" "PRO CD" "SER CA" "SER C" "SER CB" "THR CA" "THR C"
# "THR CB" "THR CG2" "TRP CA" "TRP C" "TRP CB" "TRP CG" "TRP CD1" "TRP CD2"
# [91] "TRP CE2" "TRP CE3" "TRP CZ2" "TRP CZ3" "TRP CH2" "TYR CA" "TYR C"
# "TYR CB" "TYR CG" "TYR CD1" "TYR CD2" "TYR CE1" "TYR CE2" "TYR CZ" "VAL CA"
# [106] "VAL C" "VAL CB" "VAL CG1" "VAL CG2"
}
\seealso{
Other constants: \code{\link{names.backbone.atoms}},
\code{\link{names.polar.atoms}},
\code{\link{names.res.AtomTypes}},
\code{\link{names.resATs.nitro.neut}},
\code{\link{names.resATs.nitro.pos}},
\code{\link{names.resATs.oxy.neg}},
\code{\link{names.resATs.oxy.neut}},
\code{\link{names.residues}},
\code{\link{names.sidechain.atoms}},
\code{\link{names.waters}}
}
\author{
Emilio Xavier Esposito \email{emilio@exeResearch.com}
}
\concept{constants}
\keyword{datasets}
|
823290f01173fba3aee5d531437c5d92bb6df6c3
|
a61405b833f7122ace43db9de16b6044b9eb7ae9
|
/processing.R
|
f777ec9bbe85de8a35f168806260fbf513810305
|
[] |
no_license
|
hsrijay/COVID-19-Predictive-Modeling
|
5ad5c32069b7fa291e13f7501bac1a14b6cc0dee
|
4f88c82fe6b72610d191952527897b8051c0a31f
|
refs/heads/main
| 2023-02-08T06:25:37.453267
| 2021-01-04T18:57:59
| 2021-01-04T18:57:59
| 325,624,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,767
|
r
|
processing.R
|
# MESSI RNA Seq Analysis #
# Both batches
batch_type = 'all'
# Path for output #
```{r}
path <- "D:/Research/Summer-2020-+DS/data/"
save.image(file = "processing.RData")
```
raw_data_path <- paste0(path, "data/raw/")
readable_data_path <- paste0(path, "data/processed/", batch_type, "/readable/")
binary_data_path <- paste0(path, "data/processed/", batch_type, "/binary/")
figure_path <- paste0(path, "figures/", batch_type, "/")
# Libraries #
suppressPackageStartupMessages(library(limma))
suppressPackageStartupMessages(library(edgeR))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(EnsDb.Hsapiens.v79))
suppressPackageStartupMessages(library(sva))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(gplots))
suppressPackageStartupMessages(library(NMF))
# Number of NMF factors #
```{r}
num_factors = 10
```
# Import Key #
```{r}
path <- "D:/Research/Summer-2020-+DS/data/"
key <- read.csv(paste0(path,"raw_data/qry_covid_rnaseq_key_demog_analysis(cn 20200512).csv"), check.names=FALSE, stringsAsFactors = FALSE) #qry_20200424_covid_rnaseq_key_demog_sxsum_wFastq(cn 20200507).csv
key <- key[, c('Subject_ID', 'Timepoint', 'age_at_enroll', 'gender', 'race', 'RNA_ID', 'cohort', 'Pathogen', 'studyname')]
colnames(key) <- c('subject_id', 'timepoint', 'age', 'gender', 'race', 'rna_id', 'cohort', 'pathogen', 'studyname')
```
# Import batch 1 data #
```{r}
batch1 <- read.csv(paste0(path, "raw_data/covid_starprocessed_count_matrix_batch1.csv"), check.names=FALSE, stringsAsFactors = FALSE)
rownames(batch1) <- batch1[, 1]
batch1 <- batch1[, -1]
```
# Import batch 2 data #
```{r}
batch2 <- read.csv(paste0(path, "raw_data/covid_starprocessed_count_matrix_batch2.csv"), check.names=FALSE, stringsAsFactors = FALSE)
rownames(batch2) <- batch2[, 1]
batch2 <- batch2[, -1]
```
# samples in common across batches
```{r}
colnames(batch2)[which(colnames(batch2) %in% colnames(batch1))]
```
# key for all repeat samples
```{r}
key_all_repeats = key[which(key$rna_id %in% colnames(batch2)[which(colnames(batch2) %in% colnames(batch1))]), ]
```
# stratify repeat samples
```{r}
healthy_repeats = key_all_repeats$rna_id[which(key_all_repeats$pathogen == 'healthy')]
other_repeats = key_all_repeats$rna_id[which(key_all_repeats$pathogen != 'healthy')]
```
# Remove non-healthy repeats from batch 1
```{r}
dim(batch1)
batch1 <- batch1[, -which(colnames(batch1) %in% other_repeats)]
dim(batch1)
```
# Put healthy repeats at end of batch 1
```{r}
batch1_h <- batch1[, which(colnames(batch1) %in% healthy_repeats)]
batch1 <- batch1[, -which(colnames(batch1) %in% healthy_repeats)]
batch1 <- cbind(batch1, batch1_h)
```
# Put healthy repeats at end of batch 2
```{r}
batch2_h <- batch2[, which(colnames(batch2) %in% healthy_repeats)]
colnames(batch2_h) <- paste0(colnames(batch2_h), "_2")
batch2 <- batch2[, -which(colnames(batch2) %in% healthy_repeats)]
```
# check gene names are identical
```{r}
identical(rownames(batch1), rownames(batch2)) # should be TRUE
```
# counts matrix
```{r}
counts <- cbind(batch1, batch2)
```
# re-order key by subject and time point
```{r}
key <- key[order(as.factor(key$subject_id), as.factor(key$timepoint)), ]
```
# remove samples in data that are not in key
```{r}
match_order <- match(key$rna_id, colnames(counts))
```
# re-organize remaining subjects in data matrix to match those in the key
```{r}
counts <- counts[, match_order]
identical(key$rna_id, colnames(counts))
```
# Add back in healthy repeat samples in batch2 to data and key
```{r}
counts_sub <- cbind(counts, batch2_h)
key_h <- key[which(key$rna_id %in% healthy_repeats), ]
key_h$rna_id <- paste0(key_h$rna_id, "_2")
key_h <- key_h[match(colnames(batch2_h), paste0(healthy_repeats, "_2")), ]
key <- rbind(key, key_h)
rownames(key) <- 1:nrow(key)
key$batch <- rep(2, nrow(key))
key$batch[1:ncol(batch1)] <- 1
identical(key$rna_id, colnames(counts_sub))
```
# ## Formatting data ##
```{r}
library(edgeR)
#xpr <- DGEList(counts = counts_sub, samples = keyz)
xpr <- DGEList(counts = f, samples = gr_truth)
```
#
#
# # Transcript names #
```{r}
ref_seq <- rownames(xpr)
```
#
#
# # Annotation to map from transcripts to gene symbols #
```{r}
library(EnsDb.Hsapiens.v79)
gene_id <- AnnotationDbi::select(EnsDb.Hsapiens.v79,
key=ref_seq,
columns=c("SYMBOL"),
keytype="GENEID")
```
#
#
# # Not all transcripts have a gene symbol: different lengths #
# length(ref_seq)
# nrow(gene_id)
#
# # Transcripts that do not map to a gene symbol #
```{r}
genes_nosymb = unique(ref_seq[which(!(ref_seq %in% gene_id$GENEID))])
gene_nosymb_mat = data.frame(GENEID = genes_nosymb, SYMBOL = genes_nosymb)
```
#
#
# # Add back in genes for which a symbol could not be found #
```{r}
nrow(gene_nosymb_mat) == length(ref_seq) - length(gene_id$GENEID) # should be TRUE
gene_id = rbind(gene_id, gene_nosymb_mat)
nrow(gene_id) == length(ref_seq) # should be TRUE
```
#
#
# # Re-order gene annotation to match order of transcripts in data #
```{r}
idx_match = match(rownames(xpr), gene_id$GENEID)
gene_id = gene_id[idx_match, ]
identical(rownames(xpr), gene_id$GENEID) # should be true
```
#
# # Re-name row names of data from transcripts to gene symbols #
```{r}
rownames(xpr) = gene_id$SYMBOL
identical(rownames(xpr), gene_id$SYMBOL)
```
#
#
# # Genes symbols that come up multiple times #
```{r}
dup_uniq_genes = unique(rownames(xpr)[which(duplicated(rownames(xpr)) == T)])
# # Keep track of gene symbols for row names of data #
gene_symbs = rownames(xpr)
# # Sum up reads for gene symbols that are the same #
for(i in 1:length(dup_uniq_genes)){
# # which rows correspond to gene symbol
dup_symb = which(gene_symbs == dup_uniq_genes[i])
# # sum up reads for rows that correspond to same gene symbol
new_row = data.frame(t(colSums(xpr$counts[dup_symb, ])))
rownames(new_row) = dup_uniq_genes[i]
colnames(new_row) = colnames(xpr)
# # delete rows with the same gene symbol
xpr$counts = xpr$counts[-dup_symb, ]
# # create new row that contains the summed reads for that gene symbol
xpr$counts = rbind(xpr$counts, new_row)
# # update row names for data
gene_symbs = c(gene_symbs[-dup_symb], dup_uniq_genes[i])
}
```
#
# # Assign row names for data with appropriate gene symbols #
```{r}
rownames(xpr) <- gene_symbs
length(unique(rownames(xpr))) == length(rownames(xpr)) # should be TRUE
```
#
#
# # Save / Read data #
```{r}
saveRDS(xpr, file=paste0(path, "xpr.rds"))
xpr <- readRDS(paste0(path, "xpr.rds"))
dim(xpr)
```
# Exclude non-influenza samples
```{r}
non_flu_samps = key$rna_id[which(key$cohort == 'Viral' & !(key$pathogen %in% c("Influenza A 2009 H1N1", "Influenza A")))]
length(non_flu_samps)
xpr <- xpr[, -which(colnames(xpr) %in% non_flu_samps)]
key <- key[-which(key$rna_id %in% non_flu_samps), ]
```
# Replace 'Viral' category with 'Influenza'
```{r}
key$cohort[which(key$cohort == 'Viral')] <- 'Influenza'
dim(xpr)
dim(key)
```
### Quality Control ###
# Remove genes with no expression values #
```{r}
dim(xpr$counts)
xpr$counts <- xpr$counts[which(rowSums(xpr$counts) != 0), ]
dim(xpr$counts)
```
# Keep genes if they have higher than 1 count per million in at least half the samples #
```{r}
cpm <- cpm(xpr)
keep_xpr <- rowSums(cpm > 1) >= (ncol(xpr$counts) * (1/2)) # 1/2 the number of samples
xpr <- xpr[keep_xpr, , keep.lib.sizes=FALSE]
dim(xpr)
```
### Quality Control ###
# Number of observations for each subject #
```{r}
subj_table <- table(key$subject_id)
```
# Summary of counts #
```{r}
summary(colSums(xpr$counts)/1e6)
```
# Visualize number of reads per sample #
```{r}
library(tidyverse)
boxplot(colSums(xpr$counts)/1e6, xlab = "Samples", ylab = "Million reads per sample"); grid()
plotData <- data.frame(counts = (colSums(xpr$counts)/1e6))
p <- ggplot(plotData, aes(x="", y=counts))
p <- p + geom_boxplot()
p <- p + theme_bw(18)
p <- p + labs(title="Million Reads per Sample", x="samples", y="million reads per sample")
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
ggsave(paste0(path, "processing/exploratory/boxplot_counts.png"), width=10, height=6, dpi=300, units="in", device="png")
```
# Visualize number of reads per sample: sorted by number of reads #
```{r}
plot(sort(colSums(xpr$counts)/1e6), main = "Samples sorted by Number of Reads", xlab = "Samples", ylab = "Million reads per sample")
plotData <- data.frame(samps = seq(1, length(sort(colSums(xpr$counts)/1e6)), 1), counts = sort(colSums(xpr$counts)/1e6))
p <- ggplot(plotData, aes(x=samps, y=counts))
p <- p + geom_point()
p <- p + theme_bw(18)
p <- p + labs(title="Samples sorted by Number of Reads", x="samples", y="million reads per sample")
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
ggsave(paste0(figure_path, "processing/exploratory/boxplot_sort_counts.png"), width=10, height=6, dpi=300, units="in", device="png")
```
# Correlation between samples: boxplot #
# Computes log counts per million #
```{r}
xpr_lcpm <- cpm(xpr, log=TRUE)
```
# Find pairwise correlation of samples #
```{r}
cor_mat = cor(xpr_lcpm, method="spearman")
```
```{r}
library(tidyverse)
boxplot(apply(cor_mat, 1, mean), main = "Sample Correlation", ylab = "Correlation")
plotData <- data.frame(corr = apply(cor_mat, 1, mean))
p <- ggplot(plotData, aes(x="", y=corr))
p <- p + geom_boxplot()
p <- p + theme_bw(18)
p <- p + labs(title="Sample Correlation", x="samples", y="correlation")
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
```
ggsave(paste0(figure_path, "processing/exploratory/boxplot_samp_cor.png"), width=10, height=6, dpi=300, units="in", device="png")
# Samples with bad quality: spearman correlation less than 0.9 #
```{r}
cor_rm <- colnames(cor_mat)[which(apply(cor_mat, 1, mean) < 0.75)]
```
```{r}
keys <- key
xprs <- xpr
key <- keys
xpr <- xprs
xpr$samples <- xpr$samples[!rownames(xpr$samples) %in% key_all_repeats, ]
xpr$counts <- xpr$counts[ ,!colnames(xpr$counts) %in% key_all_repeats]
keyz <- keyz[which(!(keyz$rna_id %in% key_all_repeats)), ]
x <- keyz %>% anti_join(key_all_repeats)
dim(keyz)
dim(xpr)
```
# Correlation between samples: heatmap #
# Computes log counts per million #
```{r}
xpr_lcpm <- cpm(xpr, log=TRUE)
# Find pairwise correlation of samples #
corr <- cor(xpr_lcpm, method="spearman")
```
# Plot correlation #
```{r}
colfunc <- colorRampPalette(c("lightblue", "darkblue"))
heatmap.2(corr, trace="none", col=colfunc(10))
title(main = "Correlation", cex.main=1.5)
dev.off()
```
# Density plot: before sample removal #
```{r}
xpr_lcpm <- cpm(xpr, log=TRUE)
nsamples <- ncol(xpr_lcpm)
#png(paste0(figure_path, "processing/exploratory/density_before.png"), width=9, height=8, units="in", res=300) #width=10, height=6, dpi=300, units="in"
plot(density(xpr_lcpm[, 1]), lwd=2, las=2, main="", xlab="", col=1)#, xlim=c(-10, 15))
title(xlab="Log CPM")
rm_samp = c()
for (i in 2:nsamples)
{
den <- density(xpr_lcpm[,i])
lines(den$x, den$y,lwd=2,col=i)
if(max(den$y[den$x< (0)]) > 0.07) # determine threshold after looking at plot without if statement
{
print(colnames(xpr_lcpm)[i])
rm_samp = c(rm_samp, colnames(xpr_lcpm)[i])
}
}
dev.off()
```
# # Delete significantly bimodal samples from analysis #
```{r}
dim(xpr)
dim(key)
xpr$counts <- xpr$counts[ ,!colnames(xpr$counts) %in% rm_samp]
xpr$samples <- xpr$samples[!rownames(xpr$samples) %in% rm_samp, ]
key <- key[which(!(key$rna_id %in% rm_samp)), ]
dim(xpr)
dim(key)
```
#
# # Density plot: after sample removal #
# xpr_lcpm <- cpm(xpr, log=TRUE)
# nsamples <- ncol(xpr_lcpm)
# png(paste(figure_path, "processing/exploratory/density_after.png", sep=""), width=9, height=8, units="in", res=300) #width=10, height=6, dpi=300, units="in"
# plot(density(xpr_lcpm[,1]), lwd=2, las=2, main="", xlab="",col=1)
# title(xlab="Log CPM")
# for (i in 2:nsamples)
# {
# den <- density(xpr_lcpm[,i])
# lines(den$x, den$y,lwd=2,col=i)
# }
# dev.off()
# Apply trimmed mean normalization to gene expression distributions #
```{r}
xpr_norm <- calcNormFactors(xpr, method = "TMM")
xpr_norm2 <- xpr_norm
xpr_norm2$samples$norm.factors <- 1
```
# Unnormalized data #
```{r}
lcpm2 <- cpm(xpr_norm2, log=TRUE)
```
# samples to remove based on normalization plot
```{r}
norm_rm = colnames(lcpm2)[order(apply(lcpm2, 2, mean), decreasing=T)][(length(colnames(lcpm2))-2):length(colnames(lcpm2))]
# plot
#png(paste0(figure_path, "processing/exploratory/unnormalized_data.png"), width=9, height=8, units="in", res=300) #width=10, height=6, dpi=300, units="in"
par(mar = c(5,4,4,1))
boxplot(x, las=2, main="", xaxt = 'n')
title(main = "A. Unnormalised data", ylab = "Log CPM", xlab = NULL)
dev.off()
```
```{r}
xpr$samples <- xpr$samples[!rownames(xpr$samples) %in% norm_rm, ]
xpr$counts <- xpr$counts[ ,!colnames(xpr$counts) %in% norm_rm]
keyz <- keyz[which(!(keyz$rna_id %in% norm_rm)), ]
dim(keyz)
dim(xpr)
```
# Normalized data #
```{r}
lcpm <- cpm(xpr_norm, log=TRUE)
png(paste0(figure_path, "processing/exploratory/normalized_data.png"), width=9, height=8, units="in", res=300) #width=10, height=6, dpi=300, units="in"
boxplot(lcpm, las=2, main="")
title(main = "B. Normalised data", ylab = "Log CPM")
dev.off()
```
## PCA on RNA Data ##
# Perform PCA on log cpm of normalized counts data #
```{r}
xpr_norm <- calcNormFactors(xpr, method = "TMM")
xpr_nlcpm <- cpm(xpr_norm, log=TRUE)
PCA_data <- prcomp(t(xpr_nlcpm), center=TRUE, scale=TRUE)
```
# PCA colored by batch #
```{r}
groupLabel = as.factor(xpr$samples$batch)
plotData <- data.frame(pc1=PCA_data$x[, 1], pc2=PCA_data$x[, 2], group=groupLabel)
p <- ggplot(plotData, aes(x=pc1, y=pc2))
p <- p + geom_point(aes(colour=group), size=4)
p <- p + labs(title="PCA of RNA Data colored by Batch\nwith outliers")
p <- p + theme_bw(18)
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
#ggsave(paste0(figure_path, "processing/exploratory/pca_batch_with_outliers.png"), width=10, height=6, dpi=300, units="in", device="png")
```
# Outliers
```{r}
PCA_data$x[which(PCA_data$x[, 1] < -100), 1]
# from batch 1, batch 1, and batch 2 respectively (by pca_batch_with_outliers.png)
pca_rm = names(PCA_data$x[which(PCA_data$x[, 1] < -100), 1])
```
# Remove samples with bad quality from analysis
```{r}
dim(xpr)
dim(key)
xpr <- xpr[ ,!colnames(xpr) %in% pca_rm]
key <- key[which(!(key$rna_id %in% pca_rm)), ]
dim(xpr)
dim(key)
```
# Perform PCA on log cpm of normalized counts data #
```{r}
xpr_norm <- calcNormFactors(xpr, method = "TMM")
xpr_nlcpm <- cpm(xpr_norm, log=TRUE)
PCA_data <- prcomp(t(xpr_nlcpm), center=TRUE, scale=TRUE)
```
# PCA colored by batch without outliers #
```{r}
groupLabel = as.factor(xpr$samples$batch)
plotData <- data.frame(pc1=PCA_data$x[, 1], pc2=PCA_data$x[, 2], group=groupLabel)
p <- ggplot(plotData, aes(x=pc1, y=pc2))
p <- p + geom_point(aes(colour=group), size=4)
p <- p + labs(title="PCA of RNA Data \n colored by Batch")
p <- p + theme_bw(18)
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
#ggsave(paste0(figure_path, "processing/exploratory/pca_batch_without_outliers.png"), width=10, height=6, dpi=300, units="in", device="png")
```
# PCA colored by batch and healthy re-reruns #
```{r}
groupLabel = xpr$samples$batch
groupLabel[which(xpr$samples$rna_id %in% healthy_repeats)] = -1
groupLabel[which(xpr$samples$rna_id %in% paste0(healthy_repeats, "_2"))] = -2
groupLabel = as.factor(groupLabel)
plotData <- data.frame(pc1=PCA_data$x[, 1], pc2=PCA_data$x[, 2], group=groupLabel)
p <- ggplot(plotData, aes(x=pc1, y=pc2))
p <- p + geom_point(aes(colour=group), size=4)
p <- p + scale_color_manual(values=c("red", "darkblue", "lightblue", "pink"))
p <- p + labs(title="PCA of RNA Data colored by Batch\nshowing repeated healthy samples")
p <- p + theme_bw(18)
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
#ggsave(paste0(figure_path, "processing/exploratory/pca_batch_with_healthy.png"), width=10, height=6, dpi=300, units="in", device="png")
```
# Remove repeat healthy samples from first batch
```{r}
dim(xpr)
dim(key)
xpr <- xpr[ ,!colnames(xpr) %in% healthy_repeats]
key <- key[which(!(key$rna_id %in% healthy_repeats)), ]
dim(xpr)
dim(key)
rm <- c("DU18-02S0011611")
#remove covid sample
key <- key[which(!(key$rna_id %in% rm)),]
xpr <- xpr[,!colnames(xpr) %in% rm]
dim(xpr)
dim(key)
#remove na genders
na <- which(!(key$gender != 'MALE' & key$gender != 'FEMALE'))
b <- key[na,]
key <- key[which(!(key$gender != 'MALE' & key$gender != 'FEMALE')),]
xpr <- xpr[,!colnames(xpr) %in% b$rna_id]
dim(key)
dim(xpr)
```
# Perform PCA on log cpm of normalized counts data #
xpr_norm <- calcNormFactors(xpr, method = "TMM")
xpr_nlcpm <- cpm(xpr_norm, log=TRUE)
PCA_data <- prcomp(t(xpr_nlcpm), center=TRUE, scale=TRUE)
# PCA colored by time point #
xpr$samples$timepoint[which(xpr$samples$timepoint == 'DAY 7')] = 'Day 7'
xpr$samples$timepoint[which(xpr$samples$timepoint == 'DAY 14')] = 'Day 14'
groupLabel <- factor(xpr$samples$timepoint, levels = c('T=0', 'Day 1', 'Day 2', 'Day 3', 'Day 7', 'Day 14', 'Day 21'))
plotData <- data.frame(pc1=PCA_data$x[which(xpr$samples$cohort=='COVID-19'),1], pc2=PCA_data$x[which(xpr$samples$cohort=='COVID-19'),2], group=groupLabel[which(xpr$samples$cohort=='COVID-19')])
p <- ggplot(plotData, aes(x=pc1, y=pc2))
p <- p + geom_point(aes(colour=group), size=4)
p <- p + labs(title="PCA of COVID-19 Data \n colored by Time Point")
p <- p + theme_bw(18)
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
ggsave(paste0(figure_path, "processing/exploratory/pca_covid19_timepoints.png"), width=10, height=6, dpi=300, units="in", device="png")
# PCA colored by age #
groupLabel <- cut(xpr$samples$age, breaks = c(0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100))
plotData <- data.frame(pc1=PCA_data$x[,1], pc2=PCA_data$x[,2], group=groupLabel)
p <- ggplot(plotData, aes(x=pc1, y=pc2))
p <- p + geom_point(aes(colour=group), size=4)
p <- p + labs(title="PCA of RNA Data \n colored by Age")
p <- p + theme_bw(18)
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
ggsave(paste0(figure_path, "processing/exploratory/pca_age.png"), width=10, height=6, dpi=300, units="in", device="png")
# PCA colored by gender #
xpr$samples$gender[which(xpr$samples$gender == 'FEMALE')] = 'Female'
xpr$samples$gender[which(xpr$samples$gender == 'MALE')] = 'Male'
xpr$samples$gender[which(xpr$samples$gender == "")] = NA
groupLabel <- as.factor(xpr$samples$gender)
plotData <- data.frame(pc1=PCA_data$x[,1], pc2=PCA_data$x[,2], group=groupLabel)
p <- ggplot(plotData, aes(x=pc1, y=pc2))
p <- p + geom_point(aes(colour=group), size=4)
p <- p + labs(title="PCA of RNA Data \n colored by Gender")
p <- p + theme_bw(18)
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
ggsave(paste0(figure_path, "processing/exploratory/pca_gender.png"), width=10, height=6, dpi=300, units="in", device="png")
# PCA colored by race #
xpr$samples$race[which(xpr$samples$race == '\tAsian')] = 'Asian'
xpr$samples$race[which(xpr$samples$race == 'WHITE')] = 'White'
xpr$samples$race[which(xpr$samples$race == 'BLACK/AFRICAN AMERICAN')] = 'Black/African American'
xpr$samples$race[which(xpr$samples$race == 'UNKNOWN')] = 'Unknown/Not reported'
xpr$samples$race[which(xpr$samples$race == "")] = NA
groupLabel <- as.factor(xpr$samples$race)
plotData <- data.frame(pc1=PCA_data$x[,1], pc2=PCA_data$x[,2], group=groupLabel)
p <- ggplot(plotData, aes(x=pc1, y=pc2))
p <- p + geom_point(aes(colour=group), size=4)
p <- p + labs(title="PCA of RNA Data \n colored by Race")
p <- p + theme_bw(18)
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
ggsave(paste0(figure_path, "processing/exploratory/pca_race.png"), width=10, height=6, dpi=300, units="in", device="png")
# PCA colored by cohort #
groupLabel <- as.factor(xpr$samples$cohort)
plotData <- data.frame(pc1=PCA_data$x[,1], pc2=PCA_data$x[,2], group=groupLabel)
p <- ggplot(plotData, aes(x=pc1, y=pc2))
p <- p + geom_point(aes(colour=group), size=4)
p <- p + labs(title="PCA of RNA Data \n colored by Cohort")
p <- p + theme_bw(18)
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
ggsave(paste0(figure_path, "processing/exploratory/pca_cohort.png"), width=10, height=6, dpi=300, units="in", device="png")
# PCA colored by pathogen #
groupLabel <- as.factor(xpr$samples$pathogen)
plotData <- data.frame(pc1=PCA_data$x[,1], pc2=PCA_data$x[,2], group=groupLabel)
p <- ggplot(plotData, aes(x=pc1, y=pc2))
p <- p + geom_point(aes(colour=group), size=4)
p <- p + labs(title="PCA of RNA Data \n colored by Pathogen")
p <- p + theme_bw(18)
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
ggsave(paste0(figure_path, "processing/exploratory/pca_pathogen.png"), width=10, height=6, dpi=300, units="in", device="png")
# PCA colored by study #
xpr$samples$studyname[which(xpr$samples$studyname == 'ACESO/ARLG (DU09-03 continuation)')] = 'ACESO/ARLG'
groupLabel <- as.factor(xpr$samples$studyname)
plotData <- data.frame(pc1=PCA_data$x[,1], pc2=PCA_data$x[,2], group=groupLabel)
p <- ggplot(plotData, aes(x=pc1, y=pc2))
p <- p + geom_point(aes(colour=group), size=4)
p <- p + labs(title="PCA of RNA Data \n colored by Study")
p <- p + theme_bw(18)
p <- p + theme(plot.title = element_text(size = 18, face="bold", hjust=0.5))
p
ggsave(paste0(figure_path, "processing/exploratory/pca_study.png"), width=10, height=6, dpi=300, units="in", device="png")
### Normalization ###
# Apply trimmed mean normalization to data #
```{r}
xpr_tmm <- calcNormFactors(xpr, method = "TMM")
xpr_nlcpm <- cpm(xpr_tmm, log = TRUE)
#write.csv(t(rna_nmf_nlcpm), file=paste0(path, "195/processed_data/nmf/rna_nmf_nlcpm_sex_33.csv"))
```
### Non-Negative Matrix Factorization ###
```{r fig.height = 6, fig.width = 15}
# # NMF on TMM data #
rna_nmf_tmm <- nmf(xpr_tmm$counts, rank=33, seed=123456, .options='t')
#Convert nmf data to log-cpm
rna_nmf_nlcpm_sex <- cpm(rna_nmf_tmm@fit@H, log=TRUE)
# rank estimation
estim.r <- nmf(xpr_tmm$counts, 31:40, nrun = 10, seed = 123456)
estim.r <- nmf(xpr_tmm, 31:40, nrun = 10, seed = 123456)
plot(estim.r)
consensusmap(estim.r, labCol = NA, labRow = NA)
# # Track loss #
figure_path <- "D:/Research/Summer-2020-+DS/output"
png(paste0(figure_path, "rna_nmf_tmm_residuals_20.png"))
plot(rna_nmf_tmm)
dev.off()
# # Save NMF object
saveRDS(rna_nmf_tmm, paste0(path, '196/processed_data/rna_nmf_tmm_33.rds'))
saveRDS(estim.r, paste0(path, '/processed_data/rank_estimation_10-30_10run.rds'))
#plot visualizations
layout(cbind(1,3))
basismap(rna_nmf_tmm, subsetRow = TRUE)
coefmap(rna_nmf_tmm)
consensusmap(rna_nmf_tmm, labCol = NA, labRow = NA)
```
# Save data objects #
```{r}
dim(xpr_nlcpm)
dim(key)
write.csv(t(xpr_nlcpm), file=paste0(path, "195/processed_data/processing/xpr_nlcpm.csv"))
write.csv(key, file=paste0(path, "196/processed_data/processing/key.csv"), row.names=F)
```
# Samples should be in same order
```{r}
identical(colnames(xpr), colnames(xpr_tmm)) # should be TRUE
identical(colnames(xpr_nlcpm), colnames(xpr_tmm)) # should be TRUE
identical(colnames(xpr), key$rna_id) # should be TRUE
```
# Write to file
```{r}
write.csv(t(xpr), paste0(path, "195/processed_data/processing/xpr.csv"))
write.csv(t(xpr_tmm$counts), paste0(path, "195/processed_data/processing/sex/xpr_tmm_sex.csv"))
write.csv(t(rna_nmf_tmm@fit@H), paste0(path, "195/processed_data/nmf/sex/rna_nmf_tmm_sex_33.csv"))
key <- read.csv(file = "D:/Research/Summer-2020-+DS/data/196/processed_data/processing/key.csv")
key_subset <- read.csv(file = "D:/Research/Summer-2020-+DS/data/196/processed_data/processing/key_subset.csv")
xpr_nlcpm <- xpr_nlcpm %>%
filter(X != "DU18-02S0011611")
```
|
04a572bf01ea4bc13f3f948c7d43f0de38153bc5
|
a87a0af1931d6f0f2cf4512a0791dc7c0c20322b
|
/man/get_associations_points_tw.Rd
|
122579c917db9c8af99ca88b74054d969366e5fe
|
[] |
no_license
|
cran/asnipe
|
6ddc6d2a1d79601b9eb29980c161940b2a5f29dc
|
aab6c24eb867097b6602d886b6147dffe6d0b383
|
refs/heads/master
| 2021-08-07T11:18:58.825808
| 2021-06-08T09:30:02
| 2021-06-08T09:30:02
| 49,210,143
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,492
|
rd
|
get_associations_points_tw.Rd
|
\name{get_associations_points_tw}
\alias{get_associations_points_tw}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Calculate Group Membership using Time Window (please read warnings before using this method)
}
\description{
A time window approach to calculate group co-memberships.
}
\usage{
get_associations_points_tw(point_data, time_window = 180, which_days = NULL,
which_locations = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{point_data}{
dataframe of four columns: \code{Date Time ID Location}. This requirement is strict (see details).
}
\item{time_window}{
window around each individual for calculating associations
}
\item{which_days}{
subset of \code{Date} to include
}
\item{which_locations}{
subset of \code{Locations} to include
}
}
\details{
Calculates an ego-centric approach to group membership (see warning). For each detection, a group is created with and all associates within the time window at the same location are included.
Input data must be of the following form: Date is an integer for day (usually starting at 1 on the first day). Time are the number of seconds elapsed from the start (continuous across all dates). ID is a unique character string for each individual. Location is a unique character string for each location.
}
\value{
Returns a list with three objects:
1. group by individual matrix (K rows by N columns)
2. an vector of times for each group
3. a vector of dates for each group
4. a vector of locations for each group
}
\author{
Damien R. Farine
}
\section{Warning }{
This method biases associations of dyads occuring in large groups because it creates one row in the association matrix for each detection of an individual. For this reason, this function should not be used (see also Psorakis et al. 2015 Behavioural Ecology & Sociobiology). One way to circumvent this is by including only groups centered around the focal individual when calculating associations. However, none of the functions in this package are implement this way.
}
\examples{
data("identified_individuals")
## calculate group_by_individual for first day at one location
group_by_individual <- get_associations_points_tw(identified_individuals, time_window=180,
which_days=1,which_locations="1B")
## split the resulting list
times <- group_by_individual[[2]]
dates <- group_by_individual[[3]]
locations <- group_by_individual[[4]]
group_by_individual <- group_by_individual[[1]]
}
|
c8fc842a01d3a708200ca8c81090baaff1b97498
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_Makkink/libFuzzer_ET0_Makkink/ET0_Makkink_valgrind_files/1612737633-test.R
|
cf27b265348b46618815a5bc3de6ef830a871f6e
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 865
|
r
|
1612737633-test.R
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(NaN, NaN, NaN, NaN, NaN, NaN, -2.00416836000897e-291, 1.27319747452976e-313, NaN, 3.62604439982445e-217, -5.486124068794e+303, NaN, NaN, -1.16992835169468e+170, 4.65661287307728e-10, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 9.70519443299998e-101, 7.29025701937733e-304, 2.57125729134865e-100, 7.24307897740706e-313, -3.70473891655627e+304, 1.40049386857186e+167, NaN, 1.25986739689518e-321, 9.64395702940556e-101, 2.3498974497864e-310, 3.87069807005374e+233, 1.09588333398103e-99, 2.03489682271993e+174, NaN, 8.30996797931026e-246, NaN, 3.19808872162774e+227, 5.47184371138111e+169, NaN, 2.12199579047121e-314, 7.29037801048496e-304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = 1.23385850194239e-309)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
4a6c6900d76ba7ca2089355595b02ecb9e63b48a
|
d8531244ced5f30c40a306389b80757d4852b944
|
/archive/linreg_QR.R
|
dfce789a4edaf5c1a902b4d68be4ae19b0cddcd3
|
[
"MIT"
] |
permissive
|
MiniDlicious/Lab4
|
b769bf514168e574c5c511cfa7f358f4a1522ab7
|
3246022a0ad7f12499a76d64a09520672c44fb8e
|
refs/heads/master
| 2020-07-27T12:21:54.454994
| 2019-10-14T19:56:38
| 2019-10-14T19:56:38
| 209,087,967
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,818
|
r
|
linreg_QR.R
|
#' Multiple Regression Model using QR decomposition
#'
#' Multiple Regression Model works by... #TODO#
#'
#' @param formula an object of class "\code{formula}" describing the model sctructure.
#' @param data an object of class "data frame" containing the variables in the model.
#'
#' @return \code{linreg} returns an object of class "\code{}"
#'
#' @examples
#' linreg(formula=Petal.Length~Species, data=iris)
#'
#' @references #TODO#
#' @importFrom #TODO#
#'
#' @export
#'
linreg <- function(formula, data){
## 0. Check that the class of the formula argument is correct:
stopifnot(class(formula) == "formula")
## 1. Initialization
x <- model.matrix(formula, data) # X matrix (independent variables)
name_dep <- all.vars(formula)[1] # Dependent variable/s name/s
y <- data[, name_dep] # y (dependent variable/s)
## 2. Calculation of Q and R
qr_x <- qr(x)
Q <- qr.Q(qr_x) # orthogonal matrix
R <- qr.R(qr_x) # triangular matrix
## 3. Estimations (Computations using ordinary least squares).
# Regression coefficients:
beta_hat <- as.vector(backsolve(R, (t(Q) %*% y)))
# The fitted values:
y_hat <- x %*% beta_hat
# The residuals:
e_hat <- y - (x %*% beta_hat)
# The degrees of freedom:
df <- nrow(data) - ncol(x) # number of observations - number of parameters in the model
# The residual variance:
sigma2_hat <- as.numeric((t(e_hat)%*%e_hat)/df)
# The variance of the regression coefficients:
var_hat <- diag(solve(t(R)%*%R) * sigma2_hat) # Var(beta_hat)= (R^T * R)^(-1) * sigma_hat^2
std_error <- t(sqrt(var_hat)) # Std. Errors
# The t-values for each coefficient:
t_values <- beta_hat / std_error
# The p-values for each coefficient:
p_values <- 2*abs(pt(t_values, df, log.p = T))
return(cbind(beta_hat,var_hat))
}
|
01d8267ab810547accc41952bdac79a21600128b
|
4cfa9af712c13f7769f3733bfed0dd1ba9d7549c
|
/tests/testthat.R
|
9750d7a05b445f8b77fb10b39c8b1702a329081a
|
[] |
no_license
|
ahvargas/ProgrammingAssignment2
|
f1b900d7281ca09341cc3b8211f5e50b8d224a06
|
bbda65ea3acefd256b7ffe72922793b4a6d1eb5a
|
refs/heads/master
| 2021-01-16T22:18:59.761164
| 2015-05-18T20:59:20
| 2015-05-18T20:59:20
| 35,838,995
| 0
| 0
| null | 2015-05-18T19:49:09
| 2015-05-18T19:49:09
| null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(week3)
test_check("week3")
|
43937556a951642b579be0a78916abe61784b2e7
|
b1120d87051412552fcbcad4acf520528a0f575b
|
/importdata.R
|
357794ca5429bce93ca87b5a0665f408a9dbe010
|
[
"MIT"
] |
permissive
|
webadam1/ReMo_data_analysis
|
bd0985970fc5310b3da99bec42320fcd63a1044a
|
21580767bd7818d1cdb76bf557eb1afdc715a8e2
|
refs/heads/master
| 2021-08-24T04:28:16.083778
| 2017-12-08T02:26:24
| 2017-12-08T02:26:24
| 113,520,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,437
|
r
|
importdata.R
|
#rewrite to your working directory
setwd("D:\\Adam\\Szakdolgozat\\_Gitre\\ReMo_data_analysis\\data_googleforms")
#Processing all form data
#it will take a while
exportAllAnswersCsv(14,"all_answers.csv")
exportAllFormsCsv(14,"all_forms.csv")
answers_all <- read_csv2("all_answers.csv")
answers_all <- addEmptyVariable(answers_all)
forms_all <- read_csv2("all_forms.csv")
#rewrite to your working directory
setwd("D:\\Adam\\Szakdolgozat\\_Gitre\\ReMo_data_analysis\\data_tests")
exportAllTestTables()
zh_1 <- as.data.frame(unclass(read_csv2("zh-1.csv")))
zh_2 <- as.data.frame(unclass(read_csv2("zh-2.csv")))
zh_3 <- as.data.frame(unclass(read_csv2("zh-3.csv")))
zh_4 <- as.data.frame(unclass(read_csv2("zh-4.csv")))
zh_all <- as.data.frame(unclass(read_csv2("zh-all.csv")))
zh_1_answers <- as.data.frame(unclass(read_csv2("zh-1-answers.csv")))
zh_2_answers <- as.data.frame(unclass(read_csv2("zh-2-answers.csv")))
zh_3_answers <- as.data.frame(unclass(read_csv2("zh-3-answers.csv")))
zh_4_answers <- as.data.frame(unclass(read_csv2("zh-4-answers.csv")))
zh_all_answers <- as.data.frame(unclass(read_csv2("zh-all-answers.csv")))
zh_all_answers$test_id <- as.factor(zh_all_answers$test_id)
zh_all_answers$type <- as.factor(zh_all_answers$type)
zh_all_answers$subtype <- as.factor(zh_all_answers$subtype)
#rewrite to your working directory
setwd("D:\\Adam\\Szakdolgozat\\_Gitre\\ReMo_data_analysis")
|
6d0dc736fa0dca4aabdd096e0e7755686af9b54b
|
843d76780ea67bec4087d8892f3a2fdbd4993893
|
/src/cec4.R
|
7a9655a936330ee4a4227d8b143dfacf375c49cb
|
[] |
no_license
|
mingless/ALHE
|
43d6cba0b0e4909adc85935b26d8039082b558fc
|
c20d8826e88e55145cb2bc61e5c0cc3f3d89198b
|
refs/heads/master
| 2021-01-21T11:15:58.664507
| 2017-06-11T19:27:24
| 2017-06-11T19:27:24
| 91,733,910
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,731
|
r
|
cec4.R
|
library("cec2017")
desUniformInit <- function(fitnessFcn,nvars,control)
{
population = numeric()
for(i in 1:nvars){
population = cbind(population,runif(control$popSize,control$lowerBounds[i],control$upperBounds[i]))
}
return(population)
}
#Main des function.
#Minimizess the given fitnessFcn, which should be defined to take
# an nvars-sized vector as its argument.
#initFcn - function used to initialize the population. It is called with
# parameters (fitnessFcn,nvars,control)
#The function checks for following parameters in the control list:
#scalingCoeff - scaling coefficient. Default = 1/sqrt(2)
#popSize - population size. Default = 4*nvars
#eliteSize - the number of individuals that take part in defining the
# reference point for the next population. Default = popSize/2
#migrationCoeff - coefficient in range <0.,1.> determining how fast
# the center of population moves. default = 0.5
#horizonSize - amount of the past populations that take part in creating
# the offspring. default = 10
#noiseCoeff - coefficient specifying amount of noise added to each new individual
# default = 1
#lowerBounds - lower bounds of the initial population if using the default initFcn.
# default = rep(-1000,nvars)
#upperBounds - upper bounds --||--. Default = rep(1000,nvars)
#maxIterations - number of iterations after which optimizations stops. Currently
# the only stop condition. default = 1000*nvars
#stopScore - if not NULL, stops further evaluations at given score. Default - NULL
#
#TODO: Better param checks. Currently assumes that the user knows what he's doing.
#TODO: Other stop conditions
#
des <- function(fitnessFcn,nvars,initFcn=desUniformInit,control=list())
{
if(length(control$scalingCoeff)==0)
control$scalingCoeff = 0.70710678118
if(length(control$popSize)==0)
control$popSize = nvars*4
if(length(control$eliteSize)==0)
control$eliteSize = control$popSize / 2
if(length(control$migrationCoeff)==0)
control$migrationCoeff=0.5
if(length(control$horizonSize)==0)
control$horizonSize = 10
if(length(control$noiseCoeff)==0)
control$noiseCoeff = 1
if(length(control$lowerBounds)==0)
control$lowerBounds = rep(-1000,nvars)
if(length(control$upperBounds)==0)
control$upperBounds = rep(1000,nvars)
if(length(control$maxIterations)==0)
control$maxIterations = 1000*nvars
#current iteration and work index
t = 1
i = 1
#to save memory past scores and populations are saved as a psedo queue
#of size up to con$horizonSize
Del = list()
score = list()
pop = list()
bestScore = Inf
Del[[1]] = rep(0,nvars)
del = numeric()
for(it in 1:nvars)
del[it] = sqrt(nvars)
pop[[1]] = initFcn(fitnessFcn,nvars,control)
repeat
{
#apply the fitness function to each row of current population
score[[i]] = apply(pop[[i]],1,fitnessFcn)
#mean individual from a complete population
meanInd = colMeans(pop[[i]])
#get the indices of sorted scores and then reorder them and the population
ord = order(score[[i]])
score[[i]]=score[[i]][ord]
pop[[i]]=pop[[i]][ord,]
#mean individual from the elite
eliteMeanInd = colMeans(pop[[i]][1:control$eliteSize,])
Del[[i+1]] = (1-control$migrationCoeff)*Del[[i]]+control$migrationCoeff*(eliteMeanInd-meanInd)
#update best
if(score[[i]][1]<bestScore)
{
bestScore = score[[i]][1]
bestInd = pop[[i]][1,]
}
#break condition
if(t >= control$maxIterations)
break
if(length(control$stopScore)!=0)
if(bestScore <= control$stopScore)
break
#vectors of uniformly distributed random integer variables
h = sample(1:i,control$popSize,replace=TRUE)
j = sample(1:control$eliteSize,control$popSize,replace=TRUE)
k = sample(1:control$eliteSize,control$popSize,replace=TRUE)
#new pop init
pop[[i+1]] = matrix(0,control$popSize,nvars)
#calculating the new individuals
for(it in 1:control$popSize)
{
d <- control$scalingCoeff*(pop[[h[it]]][j[it],]-pop[[h[it]]][k[it],]) +
Del[[i+1]]*del*rnorm(1,0,1)
pop[[i+1]][it,] = eliteMeanInd + d + control$noiseCoeff*rnorm(nvars,0,1)
}
t = t+1
#work index doesn't increase if we've reached horizonSize iterations
#instead we remove old populations
if(t>=control$horizonSize)
{
pop = pop[-1] #remove the oldest population from history
Del = Del[-1]
score = score[-1]
}
else
{
i = i+1;
}
}
#prepare the result structure
result = list()
result$x = bestInd
result$score = bestScore
result$it = t
result$flag = 1
return(result)
}
results = data.frame(best=numeric(),worst=numeric(),median=numeric(),mean=numeric(),std=numeric())
results2 = data.frame(best=numeric(),worst=numeric(),median=numeric(),mean=numeric(),std=numeric())
con=list(lowerBounds=rep(-100,10),upperBounds=rep(100,10),migrationCoeff=4/14,
noiseCoeff=1e-8/sqrt(10),popSize=40,eliteSize=20,horizonSize=15,maxIterations=100000)
problems = c(2,4,7,17,23)
i = 4
cat(sprintf("Function nr %d:\n",i))
desfun <- function(x) cec2017::cec2017(i,x)
val <- numeric(5)
val2 <- numeric(5)
for(j in 1:5)
{
cat(sprintf("%d..",j))
con$stopScore <- i*100 + 1e-08
res <- des(desfun,10,control=con)
val[j] <- res$score - i*100
#if you want to save all the results (other than score) add something here
}
cat("\n")
results <- rbind(results, data.frame(best=min(val),worst=max(val),median=median(val),mean=mean(val),std=sd(val)))
row.names(results)[nrow(results)] <- i
print(results)
|
e7fb935b1d5bef8a69211d70212a7e4c1eb271d9
|
219ec7af97064924129019af253e337a1d4e9dfb
|
/RHive/man/rhive-api.Rd
|
04bc1f6787547416bb78ecd9a958524bdb6def63
|
[] |
no_license
|
jiyulongxu/RHive
|
6bac37610c83a11e2d508182b7f99862286ef642
|
f161db301f64129a1af5692e7fdf7c27a9b47f28
|
refs/heads/master
| 2020-04-01T06:41:20.660847
| 2017-07-19T01:02:28
| 2017-07-19T01:02:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,264
|
rd
|
rhive-api.Rd
|
\name{rhive-api}
\alias{rhive.list.databases}
\alias{rhive.show.databases}
\alias{rhive.use.database}
\alias{rhive.list.tables}
\alias{rhive.show.tables}
\alias{rhive.desc.table}
\alias{rhive.load.table}
\alias{rhive.load.table2}
\alias{rhive.exist.table}
\alias{rhive.size.table}
\alias{rhive.drop.table}
\alias{rhive.set}
\alias{rhive.unset}
\title{R functions to get informations of table from HIVE}
\usage{
rhive.list.databases(pattern)
rhive.show.databases(pattern)
rhive.use.database(databaseName)
rhive.list.tables(pattern)
rhive.show.tables(pattern)
rhive.desc.table(tableName, detail=FALSE)
rhive.load.table(tableName, fetchSize=50, limit=-1)
rhive.load.table2(tableName, limit=-1, remote=TRUE)
rhive.exist.table(tableName)
rhive.size.table(tableName)
rhive.drop.table(tableName, list)
rhive.set(key, value)
rhive.unset(key)
}
\description{
R functions to get informations of table from HIVE
}
\arguments{
\item{databaseName}{hive database name.}
\item{tableName}{hive table name.}
\item{remote}{hiveserver mode.}
\item{detail}{a flag on whether to show detail of table info.}
\item{limit}{total fetch size. -1 means full fetch}
\item{fetchSize}{the count of record to load at one time}
\item{pattern}{an optional regular expression. Only names
matching 'pattern' are returned. 'glob2rx' can be used to
convert wildcard patterns to regular expressions.}
\item{list}{a character vector naming tables to be removed.
or rhive.list.tables's result.}
\item{key}{hive configuration key}
\item{value}{hive configuration value}
}
\author{
\email{rhive@nexr.com}
}
\examples{
## try to connect hive server
\dontrun{rhive.connect("hive-server-ip")}
## get list of databases in the Hive
\dontrun{rhive.list.databases()}
## set current database
\dontrun{rhive.use.database('default')}
## get list of tables in the Hive
\dontrun{rhive.list.tables()}
## get table info in the Hive
\dontrun{rhive.desc.table('emp')}
## get detail information of a table in the Hive
\dontrun{rhive.desc.table('emp', TRUE)}
## retrieve data from hive
\dontrun{emp <- rhive.load.table('emp')}
## display column names
\dontrun{colnames(emp)}
## display row count
\dontrun{length(rownames(emp))}
## close connection
\dontrun{rhive.close()}
}
\keyword{programming}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.