blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d58189d80f5f46aef3c8b31cc179e13fcb5c58d1
|
9cc7423f4a94698df5173188b63c313a7df99b0e
|
/man/format_p.Rd
|
ece2b92d2575eb19c8f2243d4ced01b6a9ec5662
|
[
"MIT"
] |
permissive
|
HugoNjb/psycho.R
|
71a16406654b11007f0d2f84b8d36587c5c8caec
|
601eef008ec463040c68bf72ac1ed8d4a8f7751f
|
refs/heads/master
| 2020-03-27T01:24:23.389884
| 2018-07-19T13:08:53
| 2018-07-19T13:08:53
| 145,707,311
| 1
| 0
| null | 2018-08-22T12:39:27
| 2018-08-22T12:39:27
| null |
UTF-8
|
R
| false
| true
| 384
|
rd
|
format_p.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formatting.R
\name{format_p}
\alias{format_p}
\title{Format p values.}
\usage{
format_p(pvalues, stars = TRUE)
}
\arguments{
\item{pvalues}{P values (scalar or vector).}
\item{stars}{Add stars.}
}
\description{
Format p values.
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
|
088588ce3835382aad6b5e5d4d480b27ba134f56
|
9f57e0ad44b78d809c262fa0ffb659232fdb8d5e
|
/exercises/concat_alternate.R
|
6d097fe2fa9643f4f70ef0cb5323d336b5556a85
|
[] |
no_license
|
abhi8893/Intensive-R
|
c3439c177776f63705546c6666960fbc020c47e8
|
e340ad775bf25d5a17435f8ea18300013195e2c7
|
refs/heads/master
| 2020-09-22T00:57:36.118504
| 2020-08-31T09:23:57
| 2020-08-31T09:23:57
| 224,994,015
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 147
|
r
|
concat_alternate.R
|
# Concatenate two vectors alternately
concat.alternate <- function(v1, v2){
c(rbind(v1, v2))
}
v1 <- 1:10
v2 <- 11:20
concat.alternate(v1, v2)
|
13812d0fe67c4b92b47d7be28171c496e17f281b
|
75f49b6ae9d6262bad6545358eaa000b54dcc76e
|
/exemple.R
|
f8d308716b7acdc873e24f25971feae084f31e68
|
[] |
no_license
|
mateuscarbone/MOCK_R
|
ec1816f35dcf5bb865b912dc38ed9a6e8c700005
|
6fb90c4f5ab9687c8948d7833f8d62da7fb5a2c0
|
refs/heads/master
| 2020-03-20T19:08:04.428212
| 2019-07-01T17:21:29
| 2019-07-01T17:21:29
| 137,623,123
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 852
|
r
|
exemple.R
|
source("codigos_MOCK.R")
library(plotly)
library(mclust)
# dados ficticios ---------------------------------------------------------
am1<-c(rnorm(100,2,.2),0, rnorm(100,1,.2),rnorm(100,0,.2),2.5,1)
am2<-c(rnorm(100,2,.2),2.5,rnorm(100,1,.2),rnorm(100,0,.2),0,1.2)
am3<-c(rnorm(100,2,.2),2.5,rnorm(100,1,.2),rnorm(100,0,.2),0,1.2)
dados<-data.frame(cbind(am1,am2,am3))
plot_ly(dados,x=~am1,y=~am2,z=~am3,type = "scatter3d")
grupos<-c(rep(1,100),1,rep(2,100),rep(3,100),2,2)
plot_ly(dados,x=~am1,y=~am2,z=~am3,color=~factor(grupos),type = "scatter3d")
# MOCK --------------------------------------------------------------------
start.time <- Sys.time()
res.mock<-MOCK(dados,5)
end.time <- Sys.time()
end.time-start.time
plot_ly(dados,x=~am1,y=~am2,z=~am3,color=~factor(res.mock$group),type = "scatter3d")
adjustedRandIndex(res.mock$group,grupos)
|
b33a15226b478511eb2d3ea3091027f9f486e772
|
c1e7158f1947a3135a033487482877b77b439072
|
/Swaziland roof type SL - no tuning.R
|
786b29f2a5a182bbcbb014635310516ec8774572
|
[] |
no_license
|
amandairish-zz/Roof-type-project
|
b9bad302d5d806d8865d34e790c73aa290e80e1b
|
d21e65fc9ac331226a9676442199e0f31f3383df
|
refs/heads/master
| 2022-02-21T23:58:56.026627
| 2019-09-16T20:32:17
| 2019-09-16T20:32:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,397
|
r
|
Swaziland roof type SL - no tuning.R
|
## multi-class classification for roof types in Swaziland using SL
## last updated 3/8/19 by Amanda Irish
library(RhpcBLASctl)
library(caret)
library(SuperLearner)
library(randomForest)
library(gbm)
library(xgboost)
library(arm)
library(gam)
library(glmnet)
library(LogicReg)
library(cvAUC)
library(irr)
library(tidyverse)
library(namespace)
# Load data
setwd("/Users/amandairish/Desktop/Malaria project/Swaziland")
swazi <- read.csv("SWZ_OSM_Sentinel_112017.csv")
# Get rid of asphalt as outcome class
swazi <- swazi %>%
filter(LULC != "Asphalt")
# Change LULC so it is a factor variable w/ 3 levels instead of string variable
swazi$LULC <- as.factor(as.character(swazi$LULC))
levels(swazi$LULC)
# Exploratory analysis of data
head(swazi)
# Summary statistics
summary(swazi$B2)
summary(swazi$B3)
summary(swazi$B4)
summary(swazi$B8)
summary(swazi$NL)
summary(swazi$ndvi)
summary(swazi$ndwi)
# Histograms
ggplot(data = swazi, mapping = aes(x = B2)) + # R skew
geom_histogram(binwidth = 0.01)
ggplot(data = swazi, mapping = aes(x = B3)) + # R skew
geom_histogram(binwidth = 0.01)
ggplot(data = swazi, mapping = aes(x = B4)) + # mostly normal, sl R skew w/ 1(?) outlier >0.4
geom_histogram(binwidth = 0.01)
ggplot(data = swazi, mapping = aes(x = B8)) + # same as B4
geom_histogram(binwidth = 0.01)
ggplot(data = swazi, mapping = aes(x = NL)) + # very R skew
geom_histogram(binwidth = 1)
ggplot(data = swazi, mapping = aes(x = ndvi)) + # normal
geom_histogram(binwidth = 0.01)
ggplot(data = swazi, mapping = aes(x = ndwi)) + # slight R skew
geom_histogram(binwidth = 0.01)
# Calculate skewness
skew.values <- apply(swazi[4:10], 2, skewness) # apply skewness function to columns 4-10
skew.values # should be close to 0 if normally distributed, >0 means R skew
# Process data to get rid of skewness, center & scale variables
swazi <- as.data.frame(swazi) # need to do this to get next step to work
swazi.pre <- preProcess(swazi[, 4:10],
method = c("BoxCox", "center", "scale"))
swazi.pre
# Apply the transformations
swazi.trans <- predict(swazi.pre, swazi)
# Convert back to tibble
swazi.trans <- as_tibble(swazi.trans)
# Subset columns for training/testing dataset - try with all 3 classes
swazi.testtrain <- select(swazi.trans, -(c("Latitude", "Longitude")))
head(swazi.testtrain)
# Set seed for reproducibility
set.seed(88)
# Partition data into training and test data
trainIndex <- createDataPartition(swazi.testtrain$LULC, p = 0.85,
list = FALSE,
times = 1)
train <- swazi.testtrain[trainIndex,]
test <- swazi.testtrain[-trainIndex,]
table(train$LULC)
table(test$LULC)
# outcome
Y <- train$LULC
Y_test <- test$LULC
# independent variables - B2, B3, B4, B8, NL, NDVI, NDWI
X <- train[, 2:8]
X_test <- test[, 2:8]
# create the 3 binary outcome variables
Y_T <- as.numeric(Y == "Tile")
Y_M <- as.numeric(Y == "Metal")
Y_Th <- as.numeric(Y == "Thatch")
# Setup parallel computation - use all cores on our computer.
num_cores = RhpcBLASctl::get_num_cores()
# How many cores does this computer have?
num_cores
# Use 2 of those cores for parallel SuperLearner.
# Replace "2" with "num_cores" (without quotes) to use all cores.
options(mc.cores = 4)
# Check how many parallel workers we are using (on macOS/Linux).
getOption("mc.cores")
# We need to set a different type of seed that works across cores.
# Otherwise the other cores will go rogue and we won't get repeatable results.
# This version is for the "multicore" parallel system in R.
set.seed(1, "L'Ecuyer-CMRG")
# SL library
SL.library <- c("SL.randomForest", "SL.gbm", "SL.ksvm",
"SL.glmnet", "SL.glm", "SL.gam", "SL.xgboost",
"SL.bayesglm", "SL.lda", "SL.mean")
# fit CV.SL using method.AUC
# While this is running check CPU using in Activity Monitor / Task Manager.
system.time({
fit_T <- CV.SuperLearner(Y = Y_T,
X = X,
V = 10,
SL.library = SL.library,
verbose = FALSE,
family = binomial(),
parallel = "multicore",
cvControl = list(stratifyCV = TRUE))
})
system.time({
fit_M <- CV.SuperLearner(Y = Y_M,
X = X,
V = 10,
SL.library = SL.library,
verbose = FALSE,
family = binomial(),
parallel = "multicore",
cvControl = list(stratifyCV = TRUE))
})
system.time({
fit_Th <- CV.SuperLearner(Y = Y_Th,
X = X,
V = 10,
SL.library = SL.library,
verbose = FALSE,
family = binomial(),
parallel = "multicore",
cvControl = list(stratifyCV = TRUE))
})
# fit_A <- CV.SuperLearner(Y = Y_A, X = X, V = 10, SL.library = SL.library, verbose = FALSE, method = "method.NNLS", family = binomial(), cvControl = list(stratifyCV = TRUE))
# examine the fits of the CV.SLs
summary(fit_T)
fit_T$coef
table(simplify2array(fit_T$whichDiscreteSL))
plot(fit_T)
summary(fit_M)
fit_M$coef
table(simplify2array(fit_M$whichDiscreteSL))
plot(fit_M)
summary(fit_Th)
fit_Th$coef
table(simplify2array(fit_Th$whichDiscreteSL))
plot(fit_Th)
# Apply method.NNLS SL predictions of roof types
SL_pred <- data.frame(pred_T = fit_T$SL.predict, pred_M = fit_M$SL.predict, pred_Th = fit_Th$SL.predict)
SL_pred
# Apply method.NNLS discrete SL predictions of roof types - NOTE: need to add back in Ashpalt if decide to use in future
SL_discrete_pred <-data.frame(pred_T = fit_T$discreteSL.predict, pred_M = fit_M$discreteSL.predict, pred_Th = fit_Th$discreteSL.predict)
SL_discrete_pred
Classify <- apply(SL_pred, 1, function(xx) c("Tile", "Metal", "Thatch")[unname(which.max(xx))])
Classify_dp <- apply(SL_discrete_pred, 1, function(xx) c("Tile", "Metal", "Thatch")[unname(which.max(xx))])
SL_pred_table <- table(Classify, Y)
SL_pred_table
Discrete_SL_table <- table(Classify_dp, Y)
Discrete_SL_table
# Calculate kappa statistics
# SL prediction
SLP <- data.frame("pred" = Classify, "actual" = Y)
kappa2(SLP[,c(1,2)], "unweighted")
# Discrete prediction
DP <- data.frame("pred_dp" = Classify_dp, "actual" = Y)
kappa2(DP[,c(1,2)], "unweighted")
# Apply method.NNLS SL predictions of roof types
SL_pred_nnls <- data.frame(pred_T = fit_T2$SL.predict, pred_M = fit_M2$SL.predict, pred_Th = fit_Th2$SL.predict)
SL_pred_nnls
# Apply method.NNLS discrete SL predictions of roof types - NOTE: need to add back in Ashpalt if decide to use in future
SL_discrete_pred_nnls <-data.frame(pred_T = fit_T2$discreteSL.predict, pred_M = fit_M2$discreteSL.predict, pred_Th = fit_Th2$discreteSL.predict)
SL_discrete_pred_nnls
Classify_nnls <- apply(SL_pred_nnls, 1, function(xx) c("Tile", "Metal", "Thatch")[unname(which.max(xx))])
Classify_dp_nnls <- apply(SL_discrete_pred_nnls, 1, function(xx) c("Tile", "Metal", "Thatch")[unname(which.max(xx))])
SL_pred_nnls_table <- table(Classify_nnls, Y)
SL_pred_nnls_table
Discrete_SL_nnls_table <- table(Classify_dp_nnls, Y)
Discrete_SL_nnls_table
# Calculate kappa statistics
# method.NNLS SL prediction
SLP_nnls <- data.frame("pred" = Classify_nnls, "actual" = Y)
kappa2(SLP_nnls[,c(1,2)], "unweighted")
# method.NNLS Discrete prediction
DP_nnls <- data.frame("pred_dp" = Classify_dp_nnls, "actual" = Y)
kappa2(DP_nnls[,c(1,2)], "unweighted")
# Do the same as the above but with method.AUC
# Apply method.auc SL predictions of roof types
SL_pred_auc <- data.frame(pred_T = fit_T3$SL.predict, pred_M = fit_M3$SL.predict, pred_Th = fit_Th3$SL.predict)
SL_pred_auc
# Apply method.auc discrete SL predictions of roof types - NOTE: need to add back in Ashpalt if decide to use in future
#SL_discrete_pred_auc <-data.frame(pred_T = fit_T3$discreteSL.predict, pred_M = fit_M3$discreteSL.predict, pred_Th = fit_Th3$discreteSL.predict)
#SL_discrete_pred_auc
Classify_auc <- apply(SL_pred_auc, 1, function(xx) c("Tile", "Metal", "Thatch")[unname(which.max(xx))])
#Classify_dp_auc <- apply(SL_discrete_pred_auc, 1, function(xx) c("Tile", "Metal", "Thatch")[unname(which.max(xx))])
SL_pred_auc_table <- table(Classify_auc, Y)
SL_pred_auc_table
#Discrete_SL_auc_table <- table(Classify_dp_auc, Y)
#Discrete_SL_auc_table
# Calculate kappa statistics
# method.auc SL prediction
SLP_auc <- data.frame("pred" = Classify_auc, "actual" = Y)
kappa2(SLP_auc[,c(1,2)], "unweighted")
# Based on kappa statistic performance, method.auc slightly outperforms method.NNLS
# & method.nnloglik so should will method.auc moving forward (although substantively no real difference).
# More difficult to us discrete SL if desired since method.auc does not select which is
# the best-performing algorithm (whichDiscreteSL returnes "NULL" for all)
# First need to call SL functions instead of CV.SuperLearner in order to be able to predict (???)
# Modify SL library used to get rid of those that didn't work well
SL.library2 <- c("SL.gbm", "SL.gam", "SL.ksvm", "SL.glmnet", "SL.randomForest")
set.seed(451)
# Fit SL functions w/ method.NNLS & new SL library
fit_T_SL <- SuperLearner(Y = Y_T, X = X, SL.library = SL.library2, verbose = FALSE, family = binomial())
fit_T_SL_auc <- SuperLearner(Y = Y_T, X = X, SL.library = SL.library2, verbose = FALSE, method = "method.AUC", family = binomial())
# For the above, got this Warning message:
#In method$computeCoef(Z = Z, Y = Y, libraryNames = libraryNames, :
#optim didn't converge when estimating the super learner coefficients, reason (see ?optim): 52 optim message: ERROR: ABNORMAL_TERMINATION_IN_LNSRCH
fit_M_SL <- SuperLearner(Y = Y_M, X = X, SL.library = SL.library2, verbose = FALSE, family = binomial())
fit_M_SL_auc <- SuperLearner(Y = Y_M, X = X, SL.library = SL.library2, verbose = FALSE, method = "method.AUC", family = binomial())
# no error message with this method.auc for metal
fit_Th_SL <- SuperLearner(Y = Y_Th, X = X, SL.library = SL.library2, verbose = FALSE, family = binomial())
fit_Th_SL_auc <- SuperLearner(Y = Y_Th, X = X, SL.library = SL.library2, verbose = FALSE, method = "method.AUC", family = binomial())
# same warning message as for tile
fit_T_SL
fit_T_SL_auc
fit_M_SL
fit_M_SL_auc
fit_Th_SL
fit_Th_SL_auc
#fit_A_SL
# predict back on known data (df) so have comparison maps
# keep getting error message "test vector does not match model" 6/13/18
#pred_T <- predict(fit_T_SL, train, onlySL=T) #tried changing df to train - still same error message
#pred_M <- predict(fit_M_SL, newdata = df)
#pred_Th <- predict(fit_Th_SL, newdata = df)
#pred_A <- predict(fit_A_SL, newdata = df_new)
#SL_pred_df <- data.frame(pred_T = pred_T$pred, pred_M = pred_M$pred, pred_Th = pred_Th$pred)
#Classify_df <- apply(SL_pred_df, 1, function(xx) c("Tile", "Metal", "Thatch")[unname(which.max(xx))])
#Classify_df
# Add prediction probabilities & classifications to df
#df$pred_T = pred_T$pred
#df$pred_M = pred_M$pred
#df$pred_Th = pred_Th$pred
#df$pred_roof_type = Classify_df
#head(df)
# Write out new dataset with roof predictions
#write.csv(df, file = "Swazi sentinel CVSL data known and pred.csv")
# 3/11/19 got this error message when trying to predict with next chunk of code:
#Error: Length of logical index vector for `[` must equal number of columns (or 1):
# * `.data` has 8 columns
#* Index vector has length 7
# Think it is b/c test has LULC class?? But why would this be a problem?
# Try removing LULC class
test2 <- test %>%
select(-"LULC")
# Then use 15% retained from original dataset
pred_T_test <- predict(fit_T_SL, newdata = test2)
pred_M_test <- predict(fit_M_SL, newdata = test2)
pred_Th_test <- predict(fit_Th_SL, newdata = test2)
# and for AUC
pred_T_test_auc <- predict(fit_T_SL_auc, newdata = test2)
pred_M_test_auc <- predict(fit_M_SL_auc, newdata = test2)
pred_Th_test_auc <- predict(fit_Th_SL_auc, newdata = test2)
# Attach LULC - this doesn't seem to be necessary (??)
LULC <- as.vector(test$LULC)
pred_T_2 <- as.data.frame(cbind(pred_T_test$pred, LULC))
pred_M_2 <- as.data.frame(cbind(pred_M_test$pred, LULC))
pred_Th_2 <- as.data.frame(cbind(pred_Th_test$pred, LULC))
# from online SL intro:
str(pred_T_test)
#summary(pred_T_vdf$library.predict)
#qplot(pred_T_vdf$pred) + theme_bw()
#qplot(Y_vdf, pred_T_vdf$pred) + theme_classic()
SL_pred_test <- data.frame(pred_T = pred_T_test$pred, pred_M = pred_M_test$pred, pred_Th = pred_Th_test$pred)
SL_pred_test
SL_pred_test_auc <- data.frame(pred_T = pred_T_test_auc$pred, pred_M = pred_M_test_auc$pred, pred_Th = pred_Th_test_auc$pred)
SL_pred_test_auc
# NOTE: if need to re-run kappas, make sure "Thatch" is spelled "Thach" to match original dataset.
Classify_test <- as.factor(apply(SL_pred_test, 1, function(xx) c("Tile", "Metal", "Thatch")[unname(which.max(xx))]))
Classify_test_auc <- as.factor(apply(SL_pred_test_auc, 1, function(xx) c("Tile", "Metal", "Thatch")[unname(which.max(xx))]))
confusionMatrix(Classify_test, test$LULC)
confusionMatrix(Classify_test_auc, test$LULC)
# Add prediction probabilities & classifications to vdf
vdf$pred_T = pred_T_vdf$pred
vdf$pred_M = pred_M_vdf$pred
vdf$pred_Th = pred_Th_vdf$pred
vdf$pred_roof_type = Classify_vdf
head(vdf)
# Write out new dataset with roof predictions
write.csv(vdf, file = "Validation sentinel data with roof type pred.csv")
# Next use entirely new dataset
df_new <- read.csv("SWA_OSM_Sentinel_113017.csv")
head(df_new)
pred_T <- predict(fit_T_SL, newdata = df_new)
pred_M <- predict(fit_M_SL, newdata = df_new)
pred_Th <- predict(fit_Th_SL, newdata = df_new)
#pred_A <- predict(fit_A_SL, newdata = df_new)
SL_pred_new <- data.frame(pred_T = pred_T$pred, pred_M = pred_M$pred, pred_Th = pred_Th$pred)
Classify_new <- apply(SL_pred_new, 1, function(xx) c("Tile", "Metal", "Thatch")[unname(which.max(xx))])
Classify_new
# Add prediction probabilities & classifications to df_new
df_new$pred_T <- pred_T$pred
df_new$pred_M <- pred_M$pred
df_new$pred_Th <- pred_Th$pred
df_new$pred_roof_type <- Classify_new
head(df_new)
# Write out new dataset with roof predictions
write.csv(df_new, file = "Swazi Sentinel data with roof type pred - no tuning.csv")
# Save all objects in global environment
save(list = ls(), file = "Swazi SL roof pred global env - no tuning.RData")
# Save workspace image
save.image(file = "Swazi SL roof pred image - no tuning.RData")
#######################
# For epi 264: predict back on known data (df) so have comparison maps
pred_T <- predict(fit_T_SL, newdata = df)
pred_M <- predict(fit_M_SL, newdata = df)
pred_Th <- predict(fit_Th_SL, newdata = df)
#pred_A <- predict(fit_A_SL, newdata = df_new)
SL_pred_df <- data.frame(pred_T = pred_T$pred, pred_M = pred_M$pred, pred_Th = pred_Th$pred)
Classify_df <- apply(SL_pred_df, 1, function(xx) c("Tile", "Metal", "Thatch")[unname(which.max(xx))])
Classify_df
# Add prediction probabilities & classifications to df
df$pred_T = pred_T$pred
df$pred_M = pred_M$pred
df$pred_Th = pred_Th$pred
df$pred_roof_type = Classify_df
head(df)
# Write out new dataset with roof predictions
write.csv(df, file = "Swazi sentinel CVSL data known and pred.csv")
#####################
# Evaluate with ROC curves
# CV SL predictions
# Tile
roc.t.cvsl <- as.data.frame(cbind(fit_T$SL.predict, fit_T$Y))
colnames(roc.t.cvsl) <- c("predictions", "labels")
AUC(roc.t.cvsl$predictions, roc.t.cvsl$labels)
# Metal
roc.m.cvsl <- as.data.frame(cbind(fit_M$SL.predict, fit_M$Y))
colnames(roc.m.cvsl) <- c("predictions", "labels")
AUC(roc.m.cvsl$predictions, roc.m.cvsl$labels)
# Thatch
roc.th.cvsl <- as.data.frame(cbind(fit_Th$SL.predict, fit_Th$Y))
colnames(roc.th.cvsl) <- c("predictions", "labels")
AUC(roc.th.cvsl$predictions, roc.th.cvsl$labels)
# Plot curves with pROC package
library(pROC)
roc1 <- roc(roc.m.cvsl$labels,
roc.m.cvsl$predictions, percent=TRUE,
# arguments for auc
auc=c(100, 90), auc.correct=TRUE,
# arguments for ci
ci=TRUE, boot.n=100, ci.alpha=0.9, stratified=FALSE,
# arguments for plot
plot=TRUE, grid=TRUE,
print.auc=TRUE, show.thres=TRUE)
|
f8a5e0eb369904eadd7eac5d625b9278ba0780bb
|
a85ee4fb743c662c7fd588ba662e6fa6352ca24b
|
/functions.R
|
899e6b548acd762fc0f70602f3746400475e57ef
|
[] |
no_license
|
DiveIT/AustraliaKidneyDisease
|
525e06510e142e3d1cc73ddeddd1d226aa7b1d8a
|
acb106e1a091507342972ceace9e258ded8246cc
|
refs/heads/master
| 2020-03-13T03:38:17.272224
| 2018-06-14T07:08:10
| 2018-06-14T07:08:10
| 130,947,783
| 0
| 1
| null | 2018-06-12T07:23:19
| 2018-04-25T03:49:22
|
HTML
|
UTF-8
|
R
| false
| false
| 9,994
|
r
|
functions.R
|
importSA2byState <- function(filePath, state) {
data <- read.csv(file=filePath, skip = 9, header = TRUE)
data$X <- NULL
names(data) <- c("SA2", "Non-Indigenous", "Aboriginal", "Torres Strait Islander",
"Both Aboriginal and Torres Strait Islander", "Not stated", "Total")
data$`Non-Indigenous` <- as.integer(data$`Non-Indigenous`)
data$State <- state
nrows <- nrow(data)
if (grepl("2011", filePath)) {
data <- data[-c(1, nrows-3, nrows-2, nrows-1, nrows), ]
}
else if (grepl("2016", filePath)) {
data <- data[-c(1, nrows-2, nrows-1, nrows), ]
}
return(data)
}
# Import: Indigenous Status by SA2 Level for each state
importSA2Data <- function(census_year = "2011") {
act <- importSA2byState(paste(census_year, "-Census-Analysis/data/Census ", census_year, " - Indigenous Status by SA2 Level - ACT.csv", sep=""), "ACT")
nsw <- importSA2byState(paste(census_year, "-Census-Analysis/data/Census ", census_year, " - Indigenous Status by SA2 Level - NSW.csv", sep=""), "NSW")
nt <- importSA2byState(paste(census_year, "-Census-Analysis/data/Census ", census_year, " - Indigenous Status by SA2 Level - NT.csv", sep=""), "NT")
other <- importSA2byState(paste(census_year, "-Census-Analysis/data/Census ", census_year, " - Indigenous Status by SA2 Level - Other.csv", sep=""), "OTHER")
qld <- importSA2byState(paste(census_year, "-Census-Analysis/data/Census ", census_year, " - Indigenous Status by SA2 Level - QLD.csv", sep=""), "QLD")
sa <- importSA2byState(paste(census_year, "-Census-Analysis/data/Census ", census_year, " - Indigenous Status by SA2 Level - SA.csv", sep=""), "SA")
tas <- importSA2byState(paste(census_year, "-Census-Analysis/data/Census ", census_year, " - Indigenous Status by SA2 Level - TAS.csv", sep=""), "TAS")
vic <- importSA2byState(paste(census_year, "-Census-Analysis/data/Census ", census_year, " - Indigenous Status by SA2 Level - VIC.csv", sep=""), "VIC")
wa <- importSA2byState(paste(census_year, "-Census-Analysis/data/Census ", census_year, " - Indigenous Status by SA2 Level - WA.csv", sep=""), "WA")
sa2Data <- rbind(act, nsw, nt, other, qld, sa, tas, vic, wa)
sa2Data$State <- as.factor(sa2Data$State)
rm(act, nsw, nt, other, qld, sa, tas, vic, wa)
return(sa2Data)
}
# Import Census data
importCensusData <- function(census_year = "2011",
importFilePath = "2011-Census-Analysis/data/2011_Census.csv",
exportFilePath = "D:/2011_Census.csv") {
if (file.exists(importFilePath)) {
census <- read.csv(file=importFilePath, header=TRUE)
rm(importFilePath)
} else
{
sa2Data <- importSA2Data(census_year)
data <- read.csv(file=paste(census_year, "-Census-Analysis/data/Census ", census_year, " - SA2 by IndigenousStatus Sex Age10 WeeklyIncome.csv", sep=""),
skip = 12, header = TRUE)
nrows <- nrow(data)
if (grepl("2011", census_year)) {
data <- data[-c(1, nrows-3, nrows-2, nrows-1, nrows), ] # Remove first row and last 4 rows of dataset
}
else if (grepl("2016", census_year)) {
data <- data[-c(1, nrows-2, nrows-1, nrows), ] # Remove first row and last 3 rows of dataset
}
#data <- data[-c(1, nrows-3, nrows-2, nrows-1, nrows), ]
data$Negative.income <- as.integer(as.character(data$Negative.income))
names(data)[1] <- "SA2"
data <- merge(sa2Data[c("SA2", "State")], data, by="SA2") # Add state SA2 region belongs to
head(data[c("SA2", "State")])
# Get rid of columns for "Total" status as this can be calculated from other data (
# last 308 columns for 2011 Census data and last 374 for 2016 Census data
ncols <- ncol(data)
if (grepl("2011", census_year)) {
data <- data[, -c((ncols-307):ncols)]
}
else if (grepl("2016", census_year)) {
data <- data[, -c((ncols-373):ncols)]
}
#data <- data[, -c((ncols-307):ncols)]
#data <- data[, -c((ncols-373):ncols)] # 374 vs 308
length(which(data == 0)) # number of zero counts in data
# Rename Columns headings
# New Columns: SA2
# State
# Indigenous Status
# Sex
# Age
# Weekly Income
nrows <- nrow(data)
census <- data.frame(matrix(ncol = 7, nrow = 0))
column_names <- c("SA2", "State", "Indigenous Status", "Sex", "Age", "Weekly Personal Income", "Count")
colnames(census) <- column_names
census$`Indigenous Status` <- census$Sex <- census$Age <- census$`Weekly Personal Income` <- factor()
census$SA2 <- factor(levels = unique(data$SA2))
census$State <- factor(levels = unique(data$State))
census$Count <- integer()
income_options <- c("Negative income", "Nil income", "$1-$199", "$200-$299", "$300-$399", "$400-$599",
"$600-$799", "$800-$999", "$1,000-$1,249", "$1,250-$1,499", "$1,500-$1,999",
"$2,000 or more", "Not stated", "Not applicable")
if (grepl("2016", census_year)) {
income_options <- c("Negative income", "Nil income", "$1-$149", "$150-$299", "$300-$399", "$400-$499",
"$500-$649", "$650-$799", "$800-$999", "$1,000-$1,249", "$1,250-$1,499", "$1,500-$1,749",
"$1,750-$1,999", "$2,000-$2,999", "$3,000 or more", "Not stated", "Not applicable")
}
row <- 1
col <- 3
start_time <- Sys.time()
while (row <= nrows)
{
for (status in c("Non-Indigenous", "Aboriginal", "Torres Strait Islander",
"Both Aboriginal and Torres Strait Islander", "Not stated"))
{
for (gender in c("Male", "Female"))
{
for (age in c("0-9 years", "10-19 years", "20-29 years", "30-39 years", "40-49 years", "50-59 years",
"60-69 years", "70-79 years", "80-89 years", "90-99 years", "100 years and over"))
{
for (weekly_income in income_options)
{
count <- data[row, col]
if (count == 0)
{
# Skipping counts of 0 speeds up the processing considerably
col <- col+1
next
}
new_row <- c(as.character(data[row, "SA2"]), as.character(data[row, "State"]), status, gender,
age, weekly_income, count)
new_row <- data.frame(t(new_row))
colnames(new_row) <- column_names
#census <- rbind(census, new_row) # 1.846581 min
census <- rbindlist(list(census, new_row)) #34.37751 sec
col <- col+1
}
}
}
}
row <- row+1
col <- 3
}
end_time <- Sys.time()
end_time - start_time
census$Count <- as.integer(as.character(census$Count))
rm(nrows, ncols, age, gender, status, weekly_income, column_names, row, col, new_row, count, start_time, end_time)
rm(income_options, data)
# Add new column that codes everyone as Non-Indigenous or Indigenous
census$Indigenous <- 1
census$Indigenous[census$`Indigenous Status`=='Non-Indigenous'] <- 0
census$Indigenous[census$`Indigenous Status`=='Not stated'] <- 0
# Import: SA2 Level by Remoteness Areas. This is used to determine whether a SA2 area is considered rural or not.
remoteness <- read.csv(file=paste(census_year, "-Census-Analysis/data/Census ", census_year, " - SA2 by Remoteness Area.csv", sep=""), skip=9, header=TRUE)
remoteness$Total <- remoteness$X <- NULL # Remove unrequired variables
nrows <- nrow(remoteness)
if (grepl("2011", census_year)) {
remoteness <- remoteness[-c(1, nrows-3, nrows-2, nrows-1, nrows), ] # Remove unrequired rows
}
else if (grepl("2016", census_year)) {
remoteness <- remoteness[-c(1, nrows-2, nrows-1, nrows), ] # Remove unrequired rows
}
colnames(remoteness)[1] <- "SA2"
remoteness$Major.Cities.of.Australia..NSW. <- as.integer(as.character(remoteness$Major.Cities.of.Australia..NSW.))
remoteness$SA2 <- as.character(remoteness$SA2)
remoteness <- merge(sa2Data[c("SA2", "State")], remoteness, by="SA2") # Add state SA2 region belongs to
# Create dummy categories
remoteness$nonzeros <- simplify2array(
apply(
remoteness[-1:-2], 1,
function(x) paste(names(remoteness[-1:-2])[x != 0], collapse = " ")
)
)
remoteness$City <- 0
remoteness$InnerRegional <- 0
remoteness$OuterRegional <- 0
remoteness$Remote <- 0
remoteness$VeryRemote <- 0
remoteness$MigratoryOffshoreShipping <- 0
remoteness$NoUsualAddress <- 0
remoteness$City[grepl("Cities", remoteness$nonzeros)] <- 1
remoteness$InnerRegional[grepl("Inner.Regional", remoteness$nonzeros)] <- 1
remoteness$OuterRegional[grepl("Outer.Regional", remoteness$nonzeros)] <- 1
remoteness$Remote[grepl("Remote.Australia", remoteness$nonzeros)] <- 1
remoteness$VeryRemote[grepl("Very.Remote", remoteness$nonzeros)] <- 1
remoteness$MigratoryOffshoreShipping[grepl("Migratory", remoteness$nonzeros)] <- 1
remoteness$NoUsualAddress[grepl("No.usual.address", remoteness$nonzeros)] <- 1
remoteness$nonzeros <- NULL
remoteness <- remoteness[c(1:2, 56:62)]
# Merge Remoteness dummy variables
census <- merge(remoteness, census, by=c("SA2", "State"))
rm(remoteness)
# 172 obs took 1.820187 hours to produce 264,880 x 7 data frame. Only takes ~2.5 minutes if skip 0 values
# 6.666625 hours to run for 2011 Census data
write.table(census, exportFilePath, sep=",", col.names = TRUE, row.names = FALSE)
}
return (census)
}
|
e629e8c4b0a417236c689d4e910ec1865c5721ee
|
0379b581948aa2bc3b924793e597adecba470800
|
/code/figures4-6.R
|
bd994943fa11c3663d8c97ded98718d5fba338dc
|
[] |
no_license
|
brooke-l-hawkins/salmon-simulations
|
84f877b946829b85471e5f8521d14ae3000e0a6a
|
1b696b0a1eaccc4120b9ecac4b2bf769111e01c4
|
refs/heads/master
| 2022-11-08T21:55:08.863623
| 2020-06-22T18:02:35
| 2020-06-22T18:02:35
| 261,578,848
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,217
|
r
|
figures4-6.R
|
#----- PREPARE DATA ------------------------------------------------------------
# load packages
library(lubridate)
library(ggplot2)
library(tidyr)
library(dplyr)
# would you like to save these figures?
save.figures <- TRUE
if (save.figures) {
# specify directory for plot output
plot.directory <- paste0(getwd(), "/data.out/Figures")
if (!dir.exists(plot.directory)) {
dir.create(plot.directory)
}
}
# load four scenarios of data
load(file = "data.out/A.2014/salmon.finalstep.2014.1.RData")
baseline.data <- as.data.frame(salmon.finalstep)
rm(salmon.finalstep)
load(file = "data.out/A.2015/salmon.finalstep.2015.1.RData")
warm.data <- as.data.frame(salmon.finalstep)
rm(salmon.finalstep)
load(file = "data.out/P.2014/salmon.finalstep.2014.1.RData")
predator.data <- as.data.frame(salmon.finalstep)
rm(salmon.finalstep)
load(file = "data.out/P.2015/salmon.finalstep.2015.1.RData")
warm.predator.data <- as.data.frame(salmon.finalstep)
rm(salmon.finalstep)
# combine four scenarios into one dataframe
baseline.data <- cbind(scenario = "Baseline", baseline.data)
warm.data <- cbind(scenario = "Warm", warm.data)
predator.data <- cbind(scenario = "Predator", predator.data)
warm.predator.data <- cbind(scenario = "Warm-Predator", warm.predator.data)
all.scenarios.data <- rbind(baseline.data, warm.data,
predator.data, warm.predator.data)
rm(baseline.data, warm.data, predator.data, warm.predator.data)
# prep data for ggplot2
all.scenarios.data <- all.scenarios.data %>%
select(scenario, survive, weight, dateSp, dateEm, dateOm) %>%
transmute(Scenario = as.factor(scenario),
FinalState = as.factor(survive),
Weight = as.numeric(weight),
DateSpawn = date(as_datetime(dateSp, origin = "1970-01-01")),
DateEmerge = date(as_datetime(dateEm, origin = "1970-01-01")),
DateOutmigrate = date(as_datetime(dateOm, origin = "1970-01-01")))
levels(all.scenarios.data$FinalState) <- c("Predation", "Stochastic", "Yearling", "Subyearling")
#--- FIGURE 6: FINAL STATE -----------------------------------------------------
# Final state barplot
all.scenarios.data %>%
# filter data to surviving fish
filter(FinalState == "Subyearling" | FinalState == "Yearling") %>%
# plot final state vs. scenario
ggplot(aes(x = Scenario, color = FinalState, fill = FinalState)) +
# add barplot
geom_bar(alpha = 0.5) +
# set theme
theme_classic() +
# remove legend title
theme(legend.title = element_blank()) +
# adjust y-axis label position
theme(axis.title.y = element_text(angle = 0, vjust = 0.5)) +
# manually set color
scale_color_manual(values=c("#008000", "#808080")) +
# manually set fill
scale_fill_manual(values=c("#008000", "#808080")) +
# adjust y-axis label text
labs(y = "Simulated\nsalmon\n(count)")
if (save.figures) {
ggsave(path = plot.directory, filename = "Figure6.png", plot = last_plot(),
width = 5.5, height = 4, units = "in", dpi = 300)
}
#--- FIGURE 5: MASS ------------------------------------------------------------
# Subyearling weight histogram
all.scenarios.data %>%
# filter data to surviving yearlings
filter(FinalState == "Subyearling") %>%
# plot weight
ggplot(aes(x = Weight, color = FinalState, fill = FinalState)) +
# add histogram plot
geom_histogram(alpha = 0.5) +
# split plot by life history stage
facet_wrap( ~ Scenario, nrow = 4, ncol = 1) +
# set theme
theme_classic() +
# remove legend title
theme(legend.position = "none") +
# remove bottom axis line and ticks
theme(axis.line.x.bottom = element_blank(), axis.ticks.x.bottom = element_blank()) +
# remove subplot label background
theme(strip.background = element_blank()) +
# adjust y-axis label position
theme(axis.title.y = element_text(angle = 0, vjust = 0.5)) +
# add axis and title text
labs(x = "Subyearling mass (g)", y = "Simulated\nsalmon\n(count)") +
# manually set color
scale_color_manual(values="#808080") +
# manually set fill
scale_fill_manual(values="#808080") +
# add x-axis to each plot
geom_hline(yintercept = 0)
if (save.figures) {
ggsave(path = plot.directory, filename = "Figure5Subyearling.png", plot = last_plot(),
width = 4, height = 6, units = "in", dpi = 300)
}
# Yearling weight histogram
all.scenarios.data %>%
# filter data to surviving yearlings
filter(FinalState == "Yearling") %>%
# plot weight
ggplot(aes(x = Weight, color = FinalState, fill = FinalState)) +
# add histogram plot
geom_histogram(alpha = 0.5) +
# split plot by life history stage
facet_wrap( ~ Scenario, nrow = 4, ncol = 1) +
# set theme
theme_classic() +
# remove legend
theme(legend.position = "none") +
# remove bottom axis line and ticks
theme(axis.line.x.bottom = element_blank(), axis.ticks.x.bottom = element_blank()) +
# remove subplot label background
theme(strip.background = element_blank()) +
# adjust y-axis label position
theme(axis.title.y = element_text(angle = 0, vjust = 0.5)) +
# add axis and title text
labs(x = "Yearling mass (g)", y = "Simulated\nsalmon\n(count)") +
# manually set color
scale_color_manual(values="#008000") +
# manually set fill
scale_fill_manual(values="#008000") +
# add x-axis to each plot
geom_hline(yintercept = 0)
if (save.figures) {
ggsave(path = plot.directory, filename = "Figure5Yearling.png", plot = last_plot(),
width = 4, height = 6, units = "in", dpi = 300)
}
#--- FIGURE 4: PHENOLOGY -------------------------------------------------------
# change year in warm scenarios to match year in cool scenarios,
# so that day-month is the comparable across scenarios.
phenology.data <- all.scenarios.data
scenario.column <- all.scenarios.data$Scenario
spawn.column <- all.scenarios.data$DateSpawn
emergence.column <- all.scenarios.data$DateEmerge
outmigration.column <- all.scenarios.data$DateOutmigrate
# this is slow, but troubleshooting a vectorized way was wasting time
for (i in 1:length(scenario.column)) {
if (scenario.column[i] == "Warm" | scenario.column[i] == "Warm-Predator") {
date.spawn <- spawn.column[i]
year(date.spawn) <- (year(date.spawn) - 1)
spawn.column[i] <- date.spawn
date.emerge <- emergence.column[i]
year(date.emerge) <- (year(date.emerge) - 1)
emergence.column[i] <- date.emerge
date.outmigrate <- outmigration.column[i]
year(date.outmigrate) <- (year(date.outmigrate) - 1)
outmigration.column[i] <- date.outmigrate
}
}
# plot emergence and outmigration across scenarios
phenology.data %>%
mutate(Emergence = emergence.column, Outmigration = outmigration.column) %>%
# filter data to surviving fish
filter(FinalState == "Subyearling" | FinalState == "Yearling") %>%
# select relevant columns
select(Scenario, Emergence, Outmigration) %>%
# combine emergence and outmigration into a single column
gather(key = "Event", value = "Date", c(Emergence, Outmigration)) %>%
# filter data to fish that experienced each event
filter(!is.na(Date)) %>%
# plot event vs. day-month
ggplot(aes(x = Date, fill = Event, color = Event)) +
# add histogram
geom_histogram(alpha = 0.5, position = "identity") +
# split plot by scenario
facet_wrap(~ Scenario, nrow = 4, ncol = 1) +
# set theme
theme_classic() +
# adjust y-axis label position
theme(axis.title.y = element_text(angle = 0, vjust = 0.5)) +
# remove legend title
theme(legend.title = element_blank()) +
# remove bottom axis line and ticks
theme(axis.line.x.bottom = element_blank(), axis.ticks.x.bottom = element_blank()) +
# remove subplot label background
theme(strip.background = element_blank()) +
# manually set color
scale_color_manual(values=c("#00008B", "#FF8C00")) +
# manually set fill
scale_fill_manual(values=c("#00008B", "#FF8C00")) +
# set x-axis labels to day-month
scale_x_date(date_labels = "%d-%b") +
# adjust y-axis label text
labs(y = "Simulated\nsalmon\n(count)") +
# add x-axis to each plot
geom_hline(yintercept = 0)
if (save.figures) {
ggsave(path = plot.directory, filename = "Figure4.png", plot = last_plot(),
width = 6.5, height = 5.5, units = "in", dpi = 300)
}
|
817c283db10d4bcd1bfd48046444e19225bbabac
|
d8e37e99e9ff9f413b841eaba43f3a300e91ea49
|
/sandbox/shiny/ComparisonApp.R
|
6427b88ab6fab03b950004f746919e78637f8231
|
[] |
no_license
|
SWS-Methodology/faoswsFisheryStandardization
|
d65dee1dda8eda22ef4f788ac1f1ab5fddfbd13b
|
8b4e9c7ee78c039926f216724fd28fce0b1ffc7e
|
refs/heads/master
| 2022-07-13T23:20:22.412065
| 2022-06-30T09:48:53
| 2022-06-30T09:48:53
| 151,390,508
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,708
|
r
|
ComparisonApp.R
|
# get old SUA
greece=fread("data/SUA_standard/GreeceCleaned.csv" , header = TRUE)
austria=fread("data/SUA_standard/AustriaCleaned.csv" , header = TRUE)
chechia=fread("data/SUA_standard/CzechiaCleaned.csv" , header = TRUE)
sua_old=rbind(greece,austria,chechia)
sua_old=melt( sua_old,
id.vars = colnames(sua_old)[c(1:3)],
measure.vars = colnames(sua_old)[c(4:20)],
variable.name = "timePointYears",
value.name = "Value"
)
sua_old[,Status:="OLD"]
#get new SUA
greece=fread("C:/Users/ROSA/Desktop/Fisheries/batch0_SUA_pilotCounties/300.csv" , header = TRUE)
austria=fread("C:/Users/ROSA/Desktop/Fisheries/batch0_SUA_pilotCounties/40.csv" , header = TRUE)
chechia=fread("C:/Users/ROSA/Desktop/Fisheries/batch0_SUA_pilotCounties/203.csv" , header = TRUE)
sua_new=rbind(greece,austria,chechia)
sua_new[,availability:=NULL]
sua_new[,Status:="NEW"]
data=rbind(sua_new, sua_old)
data[,timePointYears:=as.numeric(as.character(timePointYears))]
sua_old[,geographicAreaM49_fi:=as.character(geographicAreaM49_fi)]
sua_old[,ics:=as.character(ics)]
sua_old[,measuredElement:=as.character(measuredElement)]
tableData=merge( sua_new[,.(geographicAreaM49_fi , ics, measuredElement, timePointYears , Value)],
sua_old[,.(geographicAreaM49_fi , ics, measuredElement, timePointYears , Value)],
by=c("timePointYears","geographicAreaM49_fi" ,"ics", "measuredElement"),
suffixes = c("_new", "_old"))
#ggplot(data[geographicAreaM49_fi=="300" & measuredElement=="51" & ics=="1504"], aes(x=timePointYears, y=Value)) +
# geom_line(aes(linetype=Status,color=Status,size=Status)) +
# scale_x_continuous(breaks=2000:2016) +
# scale_linetype_manual(values=c("solid","dotdash","dotdash","solid","solid")) +
# scale_colour_manual(values = c("red","blue")) +
# scale_size_manual(values=c(0.8,0.8)) +
# theme(axis.title =element_text(size=5),
# axis.text.y = element_text(size=5),
# axis.text.x = element_text(size=4,angle = 50, hjust = 1),
# legend.text = element_text(size=6),
# strip.text.x = element_text(size = 7),
# legend.position = "top",
# panel.grid.major = element_line(colour = "grey80", linetype = "longdash",size= 0.2 ),
# panel.grid.minor = element_line(colour="white",size=0),
# panel.background = element_rect(fill="white"))
library(shiny)
library(ggplot2)
library(dplyr)
library(tidyr)
library(rhandsontable)
library(readr)
library(readxl)
library(plotly)
library(data.table)
library(datasets)
library(utils)
library(DT)
library(shinydashboard)
ui <- function(request){ navbarPage(HTML('<a href="javascript: window.location.assign(window.location.href.replace(/\\?.*/, \'\'))" tile = "XXX"><strong>SWS</strong></a>'),
id = 'main',
tabPanel("Charts",
tags$head(tags$script(src = "http://mongeau.net/js.cookie.js")),
selectInput("ics","Choose an ICS code", unique(data$ics)),
selectInput("geo","Choose a country",unique(data$geographicAreaM49_fi)),
plotOutput("prod"),
plotOutput("foodProc"),
plotOutput("import"),
plotOutput("export")
),
tabPanel("Table",
sliderInput(inputId = "timePointYear",
label="Choose the year",
value=max(as.numeric(as.character(data$timePointYears))),
min=min(as.numeric(as.character(data$timePointYears))),
max=max(as.numeric(as.character(data$timePointYears)))),
dataTableOutput('dataComparison')
)
)
}
#server <- function(input, output){
# output$hist <- renderPlot({hist(rnorm(100),main = "My hist")})
#}
## Use input values with input$
## REACTIVITY automatically
server <- function(input, output){
# Plot to evaluate the composition of each single
output$prod = renderPlot(ggplot(data[geographicAreaM49_fi==input$geo & measuredElement=="51" & ics==input$ics], aes(x=timePointYears, y=Value)) +
geom_line(aes(linetype=Status,color=Status,size=Status))+
ggtitle("Production")+
scale_x_continuous(breaks=2000:2016) +
scale_linetype_manual(values=c("solid","dotdash","dotdash","solid","solid")) +
scale_colour_manual(values = c("red","blue")) +
scale_size_manual(values=c(0.8,0.8)) +
theme(axis.title =element_text(size=5),
axis.text.y = element_text(size=5),
axis.text.x = element_text(size=4,angle = 50, hjust = 1),
legend.text = element_text(size=6),
strip.text.x = element_text(size = 7),
legend.position = "top",
panel.grid.major = element_line(colour = "grey80", linetype = "longdash",size= 0.2 ),
panel.grid.minor = element_line(colour="white",size=0),
panel.background = element_rect(fill="white"))
);
output$foodProc = renderPlot(ggplot(data[geographicAreaM49_fi==input$geo & measuredElement=="131" & ics==input$ics], aes(x=timePointYears, y=Value)) +
geom_line(aes(linetype=Status,color=Status,size=Status))+
ggtitle("Food processing")+
scale_x_continuous(breaks=2000:2016) +
scale_linetype_manual(values=c("solid","dotdash","dotdash","solid","solid")) +
scale_colour_manual(values = c("red","blue")) +
scale_size_manual(values=c(0.8,0.8)) +
theme(axis.title =element_text(size=5),
axis.text.y = element_text(size=5),
axis.text.x = element_text(size=4,angle = 50, hjust = 1),
legend.text = element_text(size=6),
strip.text.x = element_text(size = 7),
legend.position = "top",
panel.grid.major = element_line(colour = "grey80", linetype = "longdash",size= 0.2 ),
panel.grid.minor = element_line(colour="white",size=0),
panel.background = element_rect(fill="white"))
);
output$import = renderPlot(ggplot(data[geographicAreaM49_fi==input$geo & measuredElement=="61" & ics==input$ics], aes(x=timePointYears, y=Value)) +
geom_line(aes(linetype=Status,color=Status,size=Status)) +
ggtitle("Import")+
scale_x_continuous(breaks=2000:2016) +
scale_linetype_manual(values=c("solid","dotdash","dotdash","solid","solid")) +
scale_colour_manual(values = c("red","blue")) +
scale_size_manual(values=c(0.8,0.8)) +
theme(axis.title =element_text(size=5),
axis.text.y = element_text(size=5),
axis.text.x = element_text(size=4,angle = 50, hjust = 1),
legend.text = element_text(size=6),
strip.text.x = element_text(size = 7),
legend.position = "top",
panel.grid.major = element_line(colour = "grey80", linetype = "longdash",size= 0.2 ),
panel.grid.minor = element_line(colour="white",size=0),
panel.background = element_rect(fill="white"))
);
output$export = renderPlot(ggplot(data[geographicAreaM49_fi==input$geo & measuredElement=="91" & ics==input$ics], aes(x=timePointYears, y=Value)) +
geom_line(aes(linetype=Status,color=Status,size=Status)) +
ggtitle("Export")+
scale_x_continuous(breaks=2000:2016) +
scale_linetype_manual(values=c("solid","dotdash","dotdash","solid","solid")) +
scale_colour_manual(values = c("red","blue")) +
scale_size_manual(values=c(0.8,0.8)) +
theme(axis.title =element_text(size=5),
axis.text.y = element_text(size=5),
axis.text.x = element_text(size=4,angle = 50, hjust = 1),
legend.text = element_text(size=6),
strip.text.x = element_text(size = 7),
legend.position = "top",
panel.grid.major = element_line(colour = "grey80", linetype = "longdash",size= 0.2 ),
panel.grid.minor = element_line(colour="white",size=0),
panel.background = element_rect(fill="white"))
);
output$dataComparison = renderDT(tableData[geographicAreaM49_fi==input$geo & timePointYears %in% input$timePointYear &
ics %in% input$ics ,])
}
shinyApp(ui=ui, server=server)
|
a011f2fbb9497fac77ef21bbc96fe89935bae413
|
5168cbb889fef2c50daf3eeb4743796145953d67
|
/mixed-effect model/Mixed_Effects_Regression_daily_approach.R
|
be2250863f818d319f3719cdcb17a73665af3735
|
[] |
no_license
|
pgonzaleze/cf_SST
|
ca2bb554819800dd9471d73cfdc32e22e3c6c5e0
|
1c3aa5e4bc25b6f4f3e59004a26f15f3532d80f6
|
refs/heads/master
| 2023-04-11T21:46:44.655658
| 2021-05-23T07:59:21
| 2021-05-23T07:59:21
| 232,674,962
| 0
| 0
| null | null | null | null |
ISO-8859-10
|
R
| false
| false
| 2,618
|
r
|
Mixed_Effects_Regression_daily_approach.R
|
"
Author: Pedro Gonzalez
Date: April 8th 2021
University of British Columbia
"
################# ================== ###################
###### Mixed Effects Regression | R Data Analysis ######
################# ================== ###################
require(ggplot2) # to plot
#require(lme4) # to perform mixed effect models
#library(stargazer) # to make table
library(MuMIn) # to compute r2
library(lmerTest) # to get the p-value
library(sjPlot) # to plot estimates
library(sjlabelled)
library(sjmisc)
library(ncf) # to compute MORAN's Index and Mantel test
# Load the database
hdp <- read.csv("df_sst_clouds.csv")
# perform a simple lm (to compare outputs)
basic.lm <- lm(SEVERITY_CODE ~ DHW * CF_a_runmean30, data = hdp)
summary(basic.lm)
#plot qq plot
plot(basic.lm, which = 2)
########## =============== ##########
### Perform a mixed effects model ###
########## =============== ##########
# lmerTESt::lmer
m <- lmer(SEVERITY_CODE ~ DHW * CF_a_runmean30 +
(1 | lat:YEAR), data = hdp)
summary(m)
# plot estimates
theme_set(theme_sjplot2())
plot_model(m, sort.est = TRUE,
show.values = TRUE, value.offset = .3)
# Reference: Kutner, et al. (2005). Applied Linear Statistical Models. McGraw-Hill. (Ch. 8)
#plot qq plot
library("car")
qqPlot(hdp$CF_a_runmean30, line = "none",
envelope = FALSE, grid = FALSE, id = FALSE)
qqnorm(resid(m))
qqline(resid(m))
plot(m, which = 2)
# plot results in a nice table
# Use this only with "lmer" and not with lmerTest
stargazer(m, type = "text",
digits = 3,
star.cutoffs = c(0.05, 0.01, 0.001),
digit.separator = "")
# to get the r2; will returns the marginal and the conditional Rē
r.squaredGLMM(m)
# To extract the residuals (errors) and summarize them,
# as well as plot them (they should be approximately normally distributed around a mean of zero)
residuals <- resid(m)
summary(residuals)
hist(residuals)
# Compute the profile confidence intervals
confint(m, oldNames = FALSE)
# Performa Anova
anova(m)
# alternative (it does not suppor 'stargazer' package)
# # lmerTest
# mm <- lmerTest::lmer(SEVERITY_CODE ~ DHW + CF_a_runmean7 +
# (1 | lat) +
# (1 | lon), data = hdp)
# summary(mm)
# Interaction plot
theme_set(theme_sjplot2())
plot_model(m, type = "int", show.p = TRUE,
terms = c("DHW_adj_date", 'CF_a_runmean30_adj_date'))
# Aiken and West (1991). Multiple Regression: Testing and Interpreting Interactions.
library(emmeans)
emmeans(m, pairwise ~ DHW|CF_a_runmean30, lmerTest.limit = 20978,
pbkrtest.limit = 20978)
|
0d4e51a636d3b66b89580cc89707cfe2a4d22a0f
|
4fbb006fb5bd86e7d40615715d340cee1580f307
|
/other.R
|
d62bb6db1f26ed10eee15246a406a0f36c0effa2
|
[] |
no_license
|
wilson2121/jiebaR_emotion
|
b2be55013157d5eb742b9417939a06fabe1250be
|
2d26afc945c667fbb0f52550d6534d3f505a95a5
|
refs/heads/master
| 2020-04-11T12:27:37.827681
| 2017-05-15T14:11:39
| 2017-05-15T14:11:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,198
|
r
|
other.R
|
setwd("G:/workcloud/bjtudrive/Rworkspace/emotion analysis")#!!!这只是我自己的根目录请记得按照自己的目录修改
library(jiebaRD)
library(jiebaR)
library(stringr)
#普通引擎
jieba<-worker("tag")
#网络词语引擎,自行选择修改
jieba_net<-worker(type="tag",user = "dict/net.dict.utf8")
#情感分析引擎,自行选择修改
jieba_porn<-worker(type="tag",user = "dict/emotion.dict.utf8")
#保留符号引擎,自行选择修改
jieba_sym<-worker(symbol = TRUE)
#祈使词引擎,自行选择修改
jieba_imp<-worker(type="tag",user = "dict/imperative.dict.utf8")
#逐行读取字符串信息,注意换行符
article_n<-readLines("这里填写你的文件路径.txt",encoding = "UTF-8",n=-1L)
####以上命令执行一次即可
#初始化输出向量
check_num<-c()#总词数
check_anum<-c()#总字数
check_noun<-c()#名词数
check_adj<-c()#形容词数
check_adv<-c()#副词数
check_numnum<-c()#数字数
check_fp<-c()#一人称代词
check_sp<-c()#二人称代词
check_tp<-c()#三人称代词
check_net<-c()#网络热词
check_url<-c()#链接
check_at<-c()#@数量
check_imp<-c()#祈使词
check_pos<-c()#积极词语
check_neg<-c()#消极词语
i<-1
#提取所有数字
numnum<-str_extract_all(article_n,"[0-9]+")
#提取链接中的数字
numnum_url<-str_extract_all(article_n,"[a-zA-Z][0-9]+[a-zA-z]")
#遍历所有记录,提取所需数据
for (i in 1:length(article_n)) {
#分词和词性标注处理
tagstr<-jieba<=article_n[i]
#根据标签提取数据
check_num[i]<-length(tagstr[names(tagstr)!='eng'])
check_anum[i]<-nchar(article_n[i])
check_noun[i]<-length(tagstr[names(tagstr)%in%c("n","nr","nr1","nr2","nrj","nrf","ns","nsf","nt","nz","nl","ng","nrfg")])
check_adj[i]<-length(tagstr[names(tagstr)%in%c('a','ad','an','ag','al')])
check_adv[i]<-length(tagstr[names(tagstr)=='d'])
check_numnum[i]<-lengths(numnum[i])-lengths(numnum_url[i])
check_fp[i]<-length(tagstr[tagstr%in%c("我","我们")])
check_sp[i]<-length(tagstr[tagstr%in%c("你","你们")])
check_tp[i]<-length(tagstr[tagstr%in%c("他","她","它","他们","她们","它们")])
tagstr<-jieba_net<=article_n[i]
check_net[i]<-length(tagstr[names(tagstr)=='net'])
check_url[i]<-length(tagstr[tagstr=='http'])
tagstr<-jieba_sym<=article_n[i]
check_at[i]<-length(tagstr[tagstr=="@"])
tagstr<-jieba_imp<=article_n[i]
check_imp[i]<-length(tagstr[names(tagstr)=='imp'])
tagstr<-jieba_porn<=article_n[i]
check_pos[i]<-length(tagstr[names(tagstr)=='positive'])
check_neg[i]<-length(tagstr[names(tagstr)=='negative'])
}
#拼合数据
result<-data.frame(内容=article_n,总词数=check_num,总字数=check_anum,名词=check_noun,
形容词=check_adj,副词=check_adv,数字=check_numnum,一人称代词=check_fp,
二人称代词=check_sp,三人称代词=check_tp,网络热词=check_net,
链接=check_url,所提及人=check_at,祈使词=check_imp,
积极情感词=check_pos,消极情感词=check_neg)
#生成数据文件,注意修改文件路径
file<-file("你的输出文件路径.csv","w")
write.table(result,file,sep=",",col.names = NA)
close(file)
|
a6ec75907b9768c34f6ad5ddbaa3bd811ba23a55
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/themetagenomics/R/prepare_data.R
|
851dea98cfaba4864bee3072c35596fc2a3a963e
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,253
|
r
|
prepare_data.R
|
#' @importFrom stats model.frame na.omit relevel
NULL
#' Prepare themetadata object from data for topic modeling pipeline
#'
#' Creates a themetadata class by preprocessing data from an OTU table,
#' taxonomic information, sample metadata, and a formula reflecting the preposed
#' relationship between sample metadata and the topics over samples
#' distribution.
#'
#' @param otu_table (required) Matrix or dataframe containing taxa abundances
#' (counts, non-negative integers) across samples. Rows and columns must be
#' uniquely named.
#' @param rows_are_taxa (required) Logical flag indicating whether otu_table
#' rows correspond to taxa (TRUE) or samples (FALSE).
#' @param tax_table Matrix or dataframe containing taxonomic information with row or
#' column names corresponding to the otu_table.
#' @param metadata Matrix or dataframe containing sample information with row or
#' column names corresponding to the otu_table.
#' @param formula Formula for covariates of interest found in metadata.
#' Interactions, transformations, splines, and polynomial expansions are
#' permitted.
#' @param refs Character vector of length equal to the number of factors or
#' binary covariates in formula, indicating the reference level.
#' @param cn_normalize Logical flag for performing 16S rRNA copy number
#' normalization. Defaults to TRUE.
#' @param drop Logical flag to drop empty rows and columns. Defaults to TRUE.
#' @param seed Seed for random number generation. This seed will be passed to
#' each function that uses this prepared data unless otherwise overridden.
#' Defaults to a random integer between 1 and the maximum integer supported by R.
#' @param verbose Logical flag to print progress information. Defaults to FALSE.
#'
#' @return An object of class themetadata containing
#' \describe{
#' \item{otu_table}{Matrix of taxa abundances, correctly overlapping with tax_table
#' and metadata. Will be copy number normalized, lacking empty rows and columns by
#' default.}
#' \item{tax_table}{Matrix, correctly overlapping with otu_table}
#' \item{metadata}{Dataframe, correctly overlapping with otu_table and formula. All
#' character covariates are converted to factors.}
#' \item{formula}{Unaltered, given by the user}
#' \item{splineinfo}{List containing the covariate, nonlinear function name, and
#' basis function expansion of all applicable covariates based on the formula.}
#' \item{modelframe}{Dataframe of metadata of only applicable covariates with factors
#' expanded as dummy variables}
#' }
#'
#' @seealso \code{\link[stm]{s}}
#'
#' @examples
#' formula <- ~DIAGNOSIS
#' refs <- 'Not IBD'
#'
#' dat <- prepare_data(otu_table=GEVERS$OTU,rows_are_taxa=FALSE,tax_table=GEVERS$TAX,
#' metadata=GEVERS$META,formula=formula,refs=refs,
#' cn_normalize=TRUE,drop=TRUE)
#' @export
prepare_data <- function(otu_table,rows_are_taxa,tax_table,metadata,formula,refs,
cn_normalize=TRUE,drop=TRUE,seed=sample.int(.Machine$integer.max,1),
verbose=FALSE){
set.seed(check_seed(seed))
next_seed <- sample.int(.Machine$integer.max,1)
if (max(otu_table) <= 1)
stop('Count table must contain counts (non-negative integers) and hence cannot be normalized.')
if (is.null(colnames(otu_table)) | is.null(rownames(otu_table)))
stop('otu_table must contain appropriate row and column names.')
if (!missing(tax_table)){
if (is.null(colnames(tax_table)) & is.null(rownames(tax_table)))
stop('tax_table must contain appropriate row and column names.')
if (!rows_are_taxa){
if (substr(colnames(otu_table)[1],1,1) == 'X' & substr(colnames(tax_table)[1],1,1) != 'X'){
warning('otu_table taxa names may have a leading X. Renaming.')
colnames(otu_table) <- gsub('^X','',colnames(otu_table))
}
}
}
if (!missing(metadata))
if (is.null(colnames(metadata)) & is.null(rownames(metadata)))
stop('metadata must contain appropriate row and column names.')
slots <- list(otu_table=NULL,
tax_table=NULL,
metadata=NULL,
formula=NULL,
refs=NULL,
splineinfo=NULL,
modelframe=NULL,
seeds=list(seed=seed,next_seed=next_seed))
refs_type <- NULL
splines <- NULL
class(slots) <- 'themetadata'
if (rows_are_taxa) {
otu_table <- t(otu_table)
}else{
if (!is.matrix(otu_table)) otu_table <- as.matrix(otu_table)
}
miss <- list()
if (missing(tax_table)) miss$tax_table <- TRUE else miss$tax_table <- FALSE
if (missing(metadata)) miss$metadata <- TRUE else miss$metadata <- FALSE
if (missing(formula)) miss$formula <- TRUE else miss$formula <- FALSE
if (missing(refs) || is.null(refs)){
miss$refs <- TRUE
refs <- NULL
}else{
miss$refs <- FALSE
}
if (!miss$formula)
if (miss$metadata)
stop('Must provide metadata if a formula is given.')
# reorient tax_table and metadata
if (!miss$metadata){
if (verbose) cat('Checking row and column names.\n')
if (sum(rownames(otu_table) %in% colnames(metadata)) > sum(rownames(otu_table) %in% rownames(metadata))){
if (verbose) cat('Transposing metadata to reorient sample IDs.\n')
metadata <- t(metadata)
}
}
if (!miss$tax_table & !miss$metadata)
if (sum(colnames(otu_table) %in% colnames(tax_table)) > sum(colnames(otu_table) %in% rownames(metadata))){
if (verbose) cat('Transposing tax_table to reorient taxa IDs.\n')
tax_table <- t(tax_table)
}
# make silva like greengenes for downstream functions
if (!miss$tax_table){
if (!all(grepl('^[a-z]__',tax_table))){
tax_table_dimnames <- dimnames(tax_table)
tax_table[is.na(tax_table)] <- ''
gg_prefix <- c('k','p','c','o','f','g','s')
tax_table <- sapply(seq_along(gg_prefix),function(g) paste(gg_prefix[g],tax_table[,g],sep='__'))
dimnames(tax_table) <- tax_table_dimnames
}
}
# if unsupervised
if (miss$formula){
if(is.null(dimnames(otu_table))){
rownames(otu_table) <- paste0('s',seq_len(nrow(otu_table)))
colnames(otu_table) <- paste0('f',seq_len(ncol(otu_table)))
cn_normalize <- FALSE
}
if (cn_normalize){
if (verbose) cat('Performing copy number normalization.\n')
otu_table <- cnn(otu_table,FALSE,drop=FALSE)
}
if (drop){
otu_table <- otu_table[,colSums(otu_table) > 0]
otu_table <- otu_table[rowSums(otu_table) > 0,]
}
if (!miss$metadata){
intersection <- intersect(rownames(otu_table),rownames(metadata))
otu_table <- otu_table[intersection,]
metadata <- metadata[intersection,,drop=FALSE]
slots$metadata <- metadata
}
if (!miss$tax_table){
intersection <- intersect(colnames(otu_table),rownames(tax_table))
otu_table <- otu_table[,intersection]
tax_table <- tax_table[intersection,]
slots$tax_table <- tax_table
}
if (verbose) cat('Forcing otu_table to integer mode.\n')
storage.mode(otu_table) <- 'integer'
slots$otu_table <- otu_table
attr(slots,'splines') <- splines
attr(slots,'refs') <- refs_type
attr(slots,'cnn') <- cn_normalize
attr(slots,'drop') <- drop
return(slots)
}
classes <- sapply(metadata,class)
classes_counts <- c('n'=sum(classes=='numeric' | classes=='integer'),
'c'=sum(classes=='character'),
'f'=sum(classes=='factor'))
if (verbose) cat(sprintf('\nStarting stats:
N otu_table samples: %s
N otu_table taxa: %s\n
N metadata numeric %s
N metadata character %s
N metadata factor %s\n',
nrow(otu_table),
ncol(otu_table),
classes_counts['n'],
classes_counts['c'],
classes_counts['f']))
if (!miss$tax_table)
if (verbose) cat(sprintf('
N phyla: %s
N classes: %s
N orders: %s
N families: %s
N genera: %s
N species: %s\n\n',
length(na.omit(unique(tax_table[,2]))),
length(na.omit(unique(tax_table[,3]))),
length(na.omit(unique(tax_table[,4]))),
length(na.omit(unique(tax_table[,5]))),
length(na.omit(unique(tax_table[,6]))),
length(na.omit(unique(tax_table[,7])))))
splines <- check_for_splines(formula,metadata)
if (splines) formula_tmp <- extract_spline_info(formula,metadata,remove_only=TRUE) else formula_tmp <- formula
if (verbose) if (any(is.na(metadata))) cat('Removing NA values in metadata.\n')
metadata <- model.frame(formula_tmp,data=metadata,na.action=na.omit)
intersection <- intersect(rownames(otu_table),rownames(metadata))
otu_table <- otu_table[intersection,]
metadata <- metadata[intersection,,drop=FALSE]
if (!miss$tax_table){
intersection <- intersect(colnames(otu_table),rownames(tax_table))
otu_table <- otu_table[,intersection]
tax_table <- tax_table[intersection,]
}
if (any(is.na(otu_table))) stop('NA values in otu_table. Please correct.\n')
if (cn_normalize){
if (verbose) cat('Performing copy number normalization.\n')
otu_table <- cnn(otu_table,FALSE,drop=FALSE)
}
if (drop){
otu_table <- otu_table[,colSums(otu_table) > 0]
otu_table <- otu_table[rowSums(otu_table) > 0,]
metadata <- metadata[rownames(otu_table),,drop=FALSE]
if (!miss$tax_table) tax_table <- tax_table[colnames(otu_table),]
}
classes <- sapply(metadata,class)
if(any(classes == 'character')){
if (verbose) cat('Converting character covariates to factors.\n')
rnames <- rownames(metadata)
metadata <- as.data.frame(unclass(metadata))
rownames(metadata) <- rnames
}
expanded <- expand_multiclass(metadata=metadata,refs=refs,verbose=verbose)
metadata <- expanded$metadata
refs <- expanded$refs
refs_type <- expanded$refs_type
if (splines){
splineinfo <- extract_spline_info(formula,metadata)
modelframe <- create_modelframe(splineinfo$formula,metadata,refs)
slots$splineinfo <- splineinfo
}else{
modelframe <- create_modelframe(formula,metadata,refs)
}
rownames(metadata) <- rownames(otu_table)
classes <- sapply(metadata,class)
classes_counts <- c('n'=sum(classes=='numeric' | classes=='integer'),
'c'=sum(classes=='character'),
'f'=sum(classes=='factor'))
if (verbose) cat(sprintf('\nFinal stats:
N otu_table samples: %s
N otu_table taxa: %s\n
N metadata numeric %s
N metadata character %s
N metadata factor %s\n',
nrow(otu_table),
ncol(otu_table),
classes_counts['n'],
classes_counts['c'],
classes_counts['f']))
if (!miss$tax_table)
if (verbose) cat(sprintf('
N phyla: %s
N classes: %s
N orders: %s
N families: %s
N genera: %s
N species: %s\n\n',
length(na.omit(unique(tax_table[,2]))),
length(na.omit(unique(tax_table[,3]))),
length(na.omit(unique(tax_table[,4]))),
length(na.omit(unique(tax_table[,5]))),
length(na.omit(unique(tax_table[,6]))),
length(na.omit(unique(tax_table[,7])))))
if (verbose) cat('Forcing otu_table to integer mode.\n')
storage.mode(otu_table) <- 'integer'
slots$otu_table <- otu_table
if (!miss$tax_table) slots$tax_table <- tax_table
slots$metadata <- data.frame(metadata,stringsAsFactors=TRUE)
slots$refs <- refs
slots$formula <- formula
slots$modelframe <- modelframe
attr(slots,'splines') <- splines
attr(slots,'refs') <- refs_type
attr(slots,'cnn') <- cn_normalize
attr(slots,'drop') <- drop
return(slots)
}
|
50c0dee94809ab18119ad5007899392559b76695
|
ca66ceda7681e3e9633eb81d51278192ccaec49c
|
/man/SanityCheck.Rd
|
de0f44fb03005297535404b121a6733a22c0c157
|
[] |
no_license
|
fcampelo/MetaTuner
|
3473c7d468c9f46f2071b9665f073538bd7c8825
|
ee93a9575199b6d70af2ce864991904f6e24bb38
|
refs/heads/master
| 2021-03-30T18:20:49.568522
| 2018-12-17T10:37:13
| 2018-12-17T10:37:13
| 114,928,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 420
|
rd
|
SanityCheck.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SanityCheck.R
\name{SanityCheck}
\alias{SanityCheck}
\title{Check sanity of inputs for metatuner}
\usage{
SanityCheck(myenv)
}
\arguments{
\item{myenv}{list containing input parameters used for \code{\link[=metatuner]{metatuner()}}.}
}
\description{
Check sanity of inputs for metatuner
}
\author{
Felipe Campelo (\email{fcampelo@ufmg.br})
}
|
c008dccc33010e0084ae1187c5e531bc0357311a
|
e9e3f02316db3a9b8b22bb7cd5ff643b8120e765
|
/R_code/plot4.R
|
01a0209f1ba91aa37a3e7eb1352b93afaecfaa40
|
[] |
no_license
|
mikersign/ExData_Plotting1
|
58c9f6f2fd71ec6a65e4730f36d02b212bf93423
|
73f3558da17d3c0c92203bca02be8d8092010c53
|
refs/heads/master
| 2021-01-17T07:43:52.849231
| 2014-11-09T09:04:52
| 2014-11-09T09:04:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,984
|
r
|
plot4.R
|
setwd("D:/data/mkl13350/Documents/R course/Exploratory Data Analysis/Week1")
## Read file "household_power_consumption.txt"
initial = read.table("household_power_consumption.txt", sep=";", nrows = 100)
##Determine the datatype of the colomns
classes = sapply(initial,class)
## Read file "household_power_consumption.txt" with datatypes equal to classes
data = read.csv("household_power_consumption.txt", sep=";",colClasses=classes)
head(data)
str(data)
## For the assignment We will only be using data from the dates 2007-02-01 and 2007-02-02
dataset = data.frame(subset(data,data[,1] %in% c("1/2/2007","2/2/2007")))
head(dataset)
nrow(dataset)
suppressWarnings(dataset[, 3] <- as.numeric(as.character(dataset[, 3])))
suppressWarnings(dataset[, 4] <- as.numeric(as.character(dataset[, 4])))
suppressWarnings(dataset[, 5] <- as.numeric(as.character(dataset[, 5])))
suppressWarnings(dataset[, 7] <- as.numeric(as.character(dataset[, 7])))
suppressWarnings(dataset[, 8] <- as.numeric(as.character(dataset[, 8])))
suppressWarnings(dataset[, 9] <- as.numeric(as.character(dataset[, 9])))
## Combine Date and Time to one field Date_Time
dataset[,1] = as.Date(dataset[, 1],"%d/%m/%Y")
dataset$date_time = paste(dataset[,1],dataset[,2])
dataset$date_time = strptime(dataset$date_time,"%Y-%m-%d %H:%M:%S")
## Make Histogram
png(file="plot4.png")
par(mfrow = c(2,2))
with(dataset,{
plot(dataset[,10],dataset[, 3],type="l",ylab="Global Active Power",xlab="")
plot(dataset[,10],dataset[, 5],type="l",ylab="Voltage",xlab="datime")
plot(dataset[,10],dataset[, 7],type="l",ylab="Energy sub metering",xlab="",col="black")
points(dataset[,10],dataset[, 8],type="l",col="red")
points(dataset[,10],dataset[, 9],type="l",col="blue")
legend("topright",pch="_",col=c("black","red","blue"),bty="n",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(dataset[,10],dataset[, 4],type="l",ylab="Global_reactive_power",xlab="datetime")
})
dev.off()
|
a010ca7c1b708d163d984b14f96726e52d3ce1cd
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/Quartet/man/SplitStatus.Rd
|
2efd070703fb9bcf9910cb673ce22cfdbf1c12a2
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,340
|
rd
|
SplitStatus.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PartitionDistance.R
\name{SplitStatus}
\alias{SplitStatus}
\alias{BipartitionStatus}
\alias{SharedSplitStatus}
\alias{SharedBipartitionStatus}
\title{Matching partitions}
\usage{
SplitStatus(trees, cf = trees[[1]])
SharedSplitStatus(trees, cf)
}
\arguments{
\item{trees}{A list of trees of class \code{\link[ape:read.tree]{phylo}},
with identically labelled tips.}
\item{cf}{Comparison tree of class \code{\link[ape:read.tree]{phylo}}. If unspecified,
each tree is compared to the first tree in \code{trees}.}
}
\value{
Returns a two dimensional array.
Rows correspond to the input trees, and are named if names were present.
Columns report:
\strong{N}: The total number of partitions present in the two trees,
i.e. \emph{P1} + \emph{P2}.
\strong{P1}: The number of partitions present in tree 1.
\strong{P2}: The number of partitions present in tree 2.
\strong{s}: The number of partitions present in both trees.
\strong{d1}: The number of partitions present in tree 1,
but contradicted by tree 2.
\strong{d2}: The number of partitions present in tree 2,
but contradicted by tree 1.
\strong{r1}: The number of partitions present in tree 1, and neither
present nor contradicted in tree 2.
\strong{r2}: The number of partitions present in tree 2, and neither
present nor contradicted in tree 1.
}
\description{
Calculates how many of the partitions present in tree 1 are also present in
tree 2 (\code{s}),
how many of the partitions in tree 1 are absent in tree 2 (\code{d1}),
and how many of the partitions in tree 2 are absent in tree 1 (\code{d2}).
The Robinson-Foulds (symmetric partition) distance is the sum of the
latter two quantities, i.e. \code{d1} + \code{d2}.
}
\section{Functions}{
\itemize{
\item \code{SharedSplitStatus}: Reports split statistics obtained after removing all
tips that do not occur in both trees being compared.
}}
\examples{
data('sq_trees')
# Calculate the status of each quartet
splitStatuses <- SplitStatus(sq_trees)
# Calculate the raw symmetric difference (i.e. Robinson–Foulds distance)
RawSymmetricDifference(splitStatuses)
# Normalize the Robinson Foulds distance by dividing by the number of
# splits present in the two trees:
RawSymmetricDifference(splitStatuses) / splitStatuses[, 'N']
# Normalize the Robinson Foulds distance by dividing by the total number of
# splits that it is possible to resolve for `n` tips:
nTip <- length(sq_trees[[1]]$tip.label)
nPartitions <- 2 * (nTip - 3L) # Does not include the nTip partitions that
# comprise but a single tip
RawSymmetricDifference(splitStatuses) / nPartitions
}
\references{
\itemize{
\item \insertRef{Robinson1981}{Quartet}
\item \insertRef{Penny1985}{Quartet}
}
}
\seealso{
Other element-by-element comparisons:
\code{\link{CompareQuartetsMulti}()},
\code{\link{CompareQuartets}()},
\code{\link{CompareSplits}()},
\code{\link{PairSharedQuartetStatus}()},
\code{\link{QuartetState}()},
\code{\link{SharedQuartetStatus}()}
}
\author{
\href{https://orcid.org/0000-0001-5660-1727}{Martin R. Smith}
(\href{mailto:martin.smith@durham.ac.uk}{martin.smith@durham.ac.uk})
}
\concept{element-by-element comparisons}
|
b11372a6faf88960e6a66f4fd24d91cd6fbb493a
|
cdb4f386359a571c8e919f00857af9b9c9232053
|
/ClusterCpGs.R
|
02086f5aa8a67f68519093d8b15ecb3cf92fe1de
|
[] |
no_license
|
GRSEB9S/Gaussian-mixture-modelling
|
ecc8ea32cd3dd509b97f254d33b17529b142882f
|
26387db1814a2bde24a098282cf3c0919ad43e12
|
refs/heads/master
| 2021-06-14T16:52:30.905632
| 2017-04-05T14:58:43
| 2017-04-05T14:58:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,182
|
r
|
ClusterCpGs.R
|
##################################################################################################
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##################################################################################################
# Start
#"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Clustering 450K methylation CpGs probes written by Dr Reza Rafiee
# Research Associate, Northern Institute for Cancer Research, Newcastle University
# This script loads 20,000 methylation probes (from 450K methylation profiling) and doing clustering analysis
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
library(mclust) # Gaussian Mixture Modelling package for Model-Based Clustering, Classification, and Density Estimation
library(scatterplot3d)
library(pheatmap)
library(apcluster) # Affinity Propagation Clustering
load("~/20KBetaValues_51InfantSHH.RData") # 20,000 probes
length(colnames(BetaValues_51Samples_20K)) # n=51
# Performs a principal components analysis on the given data matrix and returns the results as an object of class prcomp
PCA_Comp_Scaled_Centered <- prcomp(t(BetaValues_51Samples_20K), center = TRUE, scale=T) # scale =T is appropriate for high-dimensional omic data
summary(PCA_Comp_Scaled_Centered)
# Importance of components:
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8 PC9 PC10 PC11 PC12 PC13 PC14 PC15 PC16
# Standard deviation 56.400 45.3408 35.12111 32.24443 28.50901 27.18614 24.98240 24.3309 22.87971 21.83193 21.25324 20.75756 20.46585 18.90885 18.24397 17.89305
# Proportion of Variance 0.159 0.1028 0.06167 0.05199 0.04064 0.03695 0.03121 0.0296 0.02617 0.02383 0.02259 0.02154 0.02094 0.01788 0.01664 0.01601
# Cumulative Proportion 0.159 0.2618 0.32351 0.37550 0.41614 0.45309 0.48430 0.5139 0.54007 0.56390 0.58649 0.60803 0.62897 0.64685 0.66349 0.67950
# PC17 PC18 PC19 PC20 PC21 PC22 PC23 PC24 PC25 PC26 PC27 PC28 PC29 PC30 PC31
# Standard deviation 17.63880 17.53269 17.04100 16.66391 16.16575 15.92072 15.55023 15.35452 15.11828 14.7654 14.57411 14.51679 14.25707 14.21835 14.03399
# Proportion of Variance 0.01556 0.01537 0.01452 0.01388 0.01307 0.01267 0.01209 0.01179 0.01143 0.0109 0.01062 0.01054 0.01016 0.01011 0.00985
# Cumulative Proportion 0.69506 0.71043 0.72495 0.73883 0.75190 0.76457 0.77666 0.78845 0.79988 0.8108 0.82140 0.83194 0.84210 0.85221 0.86205
# PC32 PC33 PC34 PC35 PC36 PC37 PC38 PC39 PC40 PC41 PC42 PC43 PC44 PC45 PC46 PC47
# Standard deviation 13.89703 13.62558 13.54381 13.2693 13.12656 12.8847 12.78211 12.46655 12.22380 11.93376 11.72942 11.5741 11.41000 11.26610 10.89133 10.6728
# Proportion of Variance 0.00966 0.00928 0.00917 0.0088 0.00862 0.0083 0.00817 0.00777 0.00747 0.00712 0.00688 0.0067 0.00651 0.00635 0.00593 0.0057
# Cumulative Proportion 0.87171 0.88099 0.89017 0.8990 0.90758 0.9159 0.92405 0.93182 0.93930 0.94642 0.95330 0.9600 0.96650 0.97285 0.97878 0.9845
# PC48 PC49 PC50 PC51
# Standard deviation 10.44080 10.25791 9.81097 6.577e-14
# Proportion of Variance 0.00545 0.00526 0.00481 0.000e+00
# Cumulative Proportion 0.98993 0.99519 1.00000 1.000e+00
#
# How many eigenvectors/components we need to use in this analysis?
# Taking the first k eigenvectors that capture at least 99% of the total variance
# creating a graphic visualization of the relationship between eigenvalues and number of factors
par(mfrow=c(1,1))
par(mar=c(5,4,4,5) + 0.1)
par(cex.axis=0.8)
plot(PCA_Comp_Scaled_Centered,main = "Variances vs. number of components", type = "l")
# biplot(prcomp(PCA_Comp_Scaled_Centered$x, scale = TRUE))
pairs(PCA_Comp_Scaled_Centered$x[,1:5], pch=19, col="black",log = "")
GMM_object_PCA <- Mclust(as.matrix(PCA_Comp_Scaled_Centered$x[,1:5]), G=1:4)
Best_Num_of_Clusters <- dim(GMM_object_PCA$z)[2]
cat("Model-based optimal number of clusters:", Best_Num_of_Clusters, "\n")
# model-based optimal number of clusters: 2 clusters
# show a matrix whose [i,k]th entry is the probability that observation i in the test data belongs to the kth class.
GMM_object_PCA$z
# Resampling-based Inference for Gaussian finite mixture models
# Bootstrap or jackknife estimation of standard errors and percentile bootstrap confidence intervals
# for the parameters of a Gaussian mixture model.
bootClust <- MclustBootstrap(GMM_object_PCA)
bootClust$modelName #[1] "VII"
summary(bootClust, what = "se")
summary(bootClust, what = "ci")
# plot(GMM_object_PCA)
plot(GMM_object_PCA, what = c("BIC", "classification", "uncertainty", "density"),
dimens = NULL, xlab = NULL, ylab = NULL, ylim = NULL, addEllipses = TRUE, main = TRUE)
#sort(GMM_object_PCA$z[,1])
#sort(GMM_object_PCA$z[,2])
Probability_Assignment <- GMM_object_PCA$z
#
# [,1] [,2]
# NMB113 1.161098e-18 1.0000000000
# NMB138 1.087764e-13 1.0000000000
# NMB143 3.095577e-15 1.0000000000
# NMB200 9.673815e-01 0.0326185108
# ...
bestmodelBIC <- mclustBIC(GMM_object_PCA$data)
# Top 3 models based on the BIC criterion:
# VII,2 VII,5 EII,5
# -2562.430 -2565.113 -2569.015
length(which(GMM_object_PCA$classification == 1)) # red colour, n=18
length(which(GMM_object_PCA$classification == 2)) # blue colour, n=33
which(GMM_object_PCA$classification == 1)
which(GMM_object_PCA$classification == 2)
# Group 1
# which(GMM_object_PCA$classification == 1)
# NMB200 NMB254 NMB272 NMB32 NMB324 NMB328 NMB363 NMB465 NMB471 NMB483 NMB497 NMB553 NMB594 NMB608 NMB621 NMB64 NMB676 NMB712
# 4 5 6 7 8 9 10 15 17 22 27 29 32 33 35 36 43 45
# Group 2
# which(GMM_object_PCA$classification == 2)
# NMB113 NMB138 NMB143 NMB364 NMB371 NMB379 NMB439 NMB466 NMB474 NMB477 NMB479 NMB482 NMB485 NMB486 NMB495 NMB496 NMB498 NMB554 NMB580 NMB612 NMB651 NMB667 NMB670
# 1 2 3 11 12 13 14 16 18 19 20 21 23 24 25 26 28 30 31 34 37 38 39
# NMB673 NMB674 NMB675 NMB690 NMB720 NMB726 NMB79 NMB798 NMB803 NMB873
# 40 41 42 44 46 47 48 49 50 51
#GMM_object_PCA$data[,1:3]
grpCols <- ifelse(GMM_object_PCA$classification == 1, "blue", "red")
# Add regression plane
shapes = c(16, 17, 18)
my.lm <- lm(GMM_object_PCA$data[,1] ~ GMM_object_PCA$data[,2] + GMM_object_PCA$data[,3])
s3d <- scatterplot3d(GMM_object_PCA$data[,1:3], pch = 17, type = "h", color = grpCols,angle=-225, scale.y=0.9, col.grid="black", grid=TRUE) # angle=-225, 280, 55, 75
s3d$plane3d(my.lm)
text(s3d$xyz.convert(GMM_object_PCA$data[,1:3]), labels = rownames(PCA_Comp_Scaled_Centered$x), cex= 0.8, col = "black")
# Visualising heatmap of PCA components
pheatmap(GMM_object_PCA$data[,1:5],color = colorRampPalette(c("navy", "white", "firebrick3"))(100),clustering_method = "ward.D2")
write.csv(Probability_Assignment, file="~/Probabilities_Assignment.csv")
################################################## AP clustering ###############################
# Affinity Propagation clustering is used to confirm an aggreement/consensus with GMM+EM results
# Affinity propagation (AP) is a relatively new clustering algorithm that has been introduced by
# Brendan J. Frey and Delbert Dueck. The authors themselves describe affinity propagation as
# follows:
# “An algorithm that identifies exemplars among data points and forms clusters of data
# points around these exemplars. It operates by simultaneously considering all data
# point as potential exemplars and exchanging messages between data points until a
# good set of exemplars and clusters emerges.”
AP_object_PCA <- apcluster(negDistMat(r=2), PCA_Comp_Scaled_Centered$x[,1:5], q=0.0)
cat("affinity propogation optimal number of clusters:", length(AP_object_PCA@clusters), "\n")
plot(AP_object_PCA,GMM_object_PCA$data[,1:5])
heatmap(AP_object_PCA)
show(AP_object_PCA)
AP_object_PCA
# APResult object
#
# Number of samples = 51
# Number of iterations = 129
# Input preference = -73914.21
# Sum of similarities = -234507
# Sum of preferences = -221742.6
# Net similarity = -456249.6
# Number of clusters = 3
# Exemplars:
# NMB371 NMB498 NMB553
# Clusters:
# Cluster 1, exemplar NMB371:
# NMB143 NMB371 NMB439 NMB485 NMB486 NMB554 NMB612 NMB670 NMB673 NMB674 NMB675
# Cluster 2, exemplar NMB498:
# NMB113 NMB138 NMB364 NMB379 NMB466 NMB474 NMB477 NMB479 NMB482 NMB495 NMB496 NMB498 NMB580 NMB651 NMB667 NMB690 NMB720 NMB726 NMB79 NMB798 NMB803 NMB873
# Cluster 3, exemplar NMB553:
# NMB200 NMB254 NMB272 NMB32 NMB324 NMB328 NMB363 NMB465 NMB471 NMB483 NMB497 NMB553 NMB594 NMB608 NMB621 NMB64 NMB676 NMB712
length(AP_object_PCA@clusters[[1]]) # n1=11 (red colours)
length(AP_object_PCA@clusters[[2]]) # n2=22 (green colours)
# n1+n2 = 33 (Group 2)
length(AP_object_PCA@clusters[[3]]) # n3=18 (Group 1; blue colours)
# We could use all 20k probes in this clustering
# AP_object_20k <- apcluster(negDistMat(r=2), t(BetaValues_51Samples_20K), q=0.01)
# cat("affinity propogation optimal number of clusters:", length(AP_object_20k@clusters), "\n")
# affinity propogation optimal number of clusters: 2
#"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# End
##################################################################################################
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##################################################################################################
|
9f290320face41ce401783de847b6042bc70f151
|
ac771259d6e3469b75e0fdac251839ab1d070767
|
/man/vtlAddStatements.Rd
|
8dd99e576170fe3503af7d5caa7b8d8507ce3114
|
[] |
no_license
|
amattioc/RVTL
|
7a4e0259e21d52e8df1efe9a663ca20a7d130b15
|
630a41f27d0f5530d7c3df7266ecfaf25fe4803a
|
refs/heads/main
| 2023-04-27T17:52:39.093386
| 2021-05-14T09:22:24
| 2021-05-14T09:22:24
| 304,639,834
| 0
| 1
| null | 2020-10-19T19:19:41
| 2020-10-16T13:46:02
|
JavaScript
|
UTF-8
|
R
| false
| true
| 1,188
|
rd
|
vtlAddStatements.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vtl.R
\name{vtlAddStatements}
\alias{vtlAddStatements}
\title{Process VTL statements}
\usage{
vtlAddStatements(sessionID, statements, restartSession = F)
}
\arguments{
\item{sessionID}{The symbolic name of an active VTL session}
\item{statements}{The code to be added to the session}
\item{restartSession}{\code{TRUE} if session must be restarted (default \code{FALSE})}
}
\description{
Replaces or adds more statements to an existing VTL session.
}
\details{
If you are replacing one or more already defined rules,
you need to set \code{restartSession} to \code{TRUE} to avoid errors.
Always returns true.
}
\examples{
\dontrun{
vtlAddStatements(session = 'test', restartSession = T,
statements = 'a := r_input;
b := 3;
c := abs(sqrt(14 + a));
d := a + b + c;
e := c + d / a;
f := e - d;
g := -f;
test := a * b + c / a;')
}
}
|
0c404a705a6aa31a460a5fcf62a828fcb78d384e
|
51c641816641a17732c5ad8afe0f09cb2743a18c
|
/diseno_muestral.R
|
49a863127b1c22d2eb0087ab31bc40ae2cfb537e
|
[] |
no_license
|
jardanys/SmallAreaEstimation_HBF
|
5d2aec77d71aea5f77ea8008be922ce976a6c199
|
24c773df3fe4b9b54d3e59af0ce1a1efa60063d9
|
refs/heads/master
| 2021-04-15T18:24:57.168066
| 2018-04-02T16:42:52
| 2018-04-02T16:42:52
| 126,659,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,535
|
r
|
diseno_muestral.R
|
rm(list = ls())
library(survey)
library(sae)
library(TeachingSampling)
library(dplyr)
options(survey.lonely.psu="adjust")
#**************************************
#******* MUESTRA PRUEBAS SABER ********
#**************************************
est <- readRDS("estudiantes.rds")
muestra3etapas <- readRDS("muestra3etapas.rds")
diseno_muestral <- svydesign(ids = ~CODIGOMUNICIPIO + CODIGO_ICFES + ID_estud,
strata = ~estrato_mpio + EstratoColegio,
fpc = ~ NI + NII + N_i, data = muestra3etapas,
nest = T)
svymean(~INGLES_PUNT, diseno_muestral)
100 * cv(svymean(~INGLES_PUNT, diseno_muestral))
mean(est$INGLES_PUNT)
svytotal(~EVALUADOS, diseno_muestral)
100 * cv(svytotal(~EVALUADOS, diseno_muestral))
sum(est$EVALUADOS)
sum(weights(diseno_muestral))
nrow(est)
# Dominio Depto(Cod depto)
# y_est: Puntaje Matematicas
# x1: puntaje materias (una materia)
# x2: Estrato eneriga
# x3: Variable colegio (ej: jornada, ofc/priv, calendario)
# x4: ? la que se le pague la gana
# Estimaciones por departamento para MATEMATICAS_PUNT
svymean(~MATEMATICAS_PUNT, diseno_muestral)
100 * cv(svymean(~MATEMATICAS_PUNT, diseno_muestral))
mean(est$MATEMATICAS_PUNT)
svyby(~MATEMATICAS_PUNT, ~DEPARTAMENTO, diseno_muestral, svymean)
100 * cv(svyby(~MATEMATICAS_PUNT, ~DEPARTAMENTO, diseno_muestral, svymean))
mean(est$MATEMATICAS_PUNT)
# primera etapa NII / nII
# segunda es NI / nI
# tercera es N_i / n_i
# La multiplicación son los factores de expasión
|
40771556d59b3a17d765a70b3e190a71552c2943
|
dba39c615232885bf426d67ac0c21294005afc6b
|
/RegressionProject_obesity2013&2015.R
|
9770bf32559ff4de64d617d6a6a4742831a4f542
|
[] |
no_license
|
ajauregui11-stat6863/STAT-6509-Regression-Application
|
1d2851f12525a45996638e84049f8088a19f127c
|
dd7fde12eafbb7c52fc3b53e582e9b3091c5e688
|
refs/heads/master
| 2020-03-20T12:17:11.962093
| 2018-06-28T07:46:59
| 2018-06-28T07:46:59
| 137,425,821
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,445
|
r
|
RegressionProject_obesity2013&2015.R
|
obesity2<-read.csv("yrbs with new data.csv")
View(obesity2)
colnames(obesity2)<-c("year2","state2","alcohol2",
"computer2","soda2","fat2")
attach(obesity2)
#study obesity2 based on alcohol2, computer2, etc.
par(mfrow=c(2,2))
hist(alcohol2)
Boxplot(alcohol2)
hist(computer2)
Boxplot(computer2)
hist(soda2)
Boxplot(soda2)
#scatterplot matrix (only look at fat2 vs predictors)
scatterplotMatrix(~fat2+alcohol2+computer2+soda2,data=obesity2,
smooth=FALSE,ellipse="FALSE")
#correlation matrix
corr.test(x=obesity2[,3:9],y=NULL,use="pairwise",method="pearson",
adjust="holm",alpha=.05)
#additive model for 6 predictors
obesity2.lm<-lm(fat2~alcohol2+computer2+soda2,data=obesity2)
summary(obesity2.lm)
#test for normality of residuals
obesity2.res<-residuals(obesity2.lm)
shapiro.test(obesity2.res)
#test for whether the variance of the error terms is constant
ncvTest(obesity2.lm)
#plot of residuals
residualPlots(obesity2.lm)
#step function
obesity2.null<-lm(fat2~1,data=obesity2)
summary(obesity2.null)
obesity2.full=lm(fat2~alcohol2+computer2+soda2,
data=obesity2)
summary(obesity2.full)
step(obesity2.null,
scope=list(lower=obesity2.null,upper=obesity2.full),
direction="forward")
#reduced additive model
obesity2.lm2<-lm(fat2~poly(alcohol2,2)+soda2+poly(alcohol2,2)*soda2+
soda2*factor(year2)+poly(alcohol2,2)*factor(year)
+factor(year2)
,data=obesity2)
summary(obesity2.lm2)
#test for normality of residuals for reduced additive model
obesity2.res2<-residuals(obesity2.lm2)
shapiro.test(obesity2.res2)
#test for whether the variance of the error terms is constant
ncvTest(obesity2.lm2)
#plot of residuals for reduced additive model and interactions
residualPlots(obesity2.lm2)
plot(alcohol2*computer2,obesity2.res)
abline(0,0)
plot(computer2*soda2,obesity2.res)
abline(0,0)
plot(alcohol2*soda2,obesity2.res)
abline(0,0)
#fitted model with polynomial with soda2 at order 2
obesity2.polylm<-lm(fat2~poly(alcohol2,2)+soda2+
factor(year2),
data=obesity2)
summary(obesity2.polylm)
#test for normality of residuals for polynomial model
obesity2.res<-residuals(obesity2.polylm)
shapiro.test(obesity2.res)
#test for whether the variance of the error terms is constant
ncvTest(obesity2.polylm)
#plot of residuals for polynomial model
residualPlots(obesity2.polylm)
|
bf2c419b27d8fd6c6515876b7f64c3bf2a6148de
|
34394c5936e15be90aa81898fe88d61ac6927de7
|
/CAGEtagSearch.FUN.R
|
3a878303580ee9fd455a9be34e5cc968abeeb81c
|
[] |
no_license
|
thomaspb/R-scripts
|
b5a779c1477d35229ad4a5dce81293354d1c2425
|
aff4290002f2b39fb4e5eddf03528a956a49ef73
|
refs/heads/master
| 2020-05-23T14:28:44.900897
| 2015-07-04T07:47:29
| 2015-07-04T07:47:29
| 38,526,112
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,337
|
r
|
CAGEtagSearch.FUN.R
|
# CAGEtagSearch.FUN.R
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Progress bar settings
total <- nrow(CAGEinputX)
pb <- txtProgressBar(min = 0, max = total, style = 3)
remove(CAGE.result)
CAGE.result = list()
for (i in 1:nrow(CAGEinputX))
{
## Decompose the CAGE tag into its components
x <- as.character(rownames(CAGEinputX[i,]))
m <- regexec("^(([^:]+):)?([0-9]+))?(..([0-9]+))?(,-|+.*)", x)
# m
# regmatches(x, m)
#
CAGEtag_parts <- function(x) {
m <- regexec("^(([^:]+):)?([0-9]+))?(..([0-9]+))?(,-|+.*)", x)
parts <- do.call(rbind, lapply(regmatches(x, m), `[`, c(3L, 4L, 6L, 7L)))
colnames(parts) <- c("chromosome","start","stop","strand")
parts}
#
CAGEtaginfo <- CAGEtag_parts(x)
# head(CAGEtaginfo)
# if (CAGEtaginfo[,1] == "chr17" & CAGEtaginfo[,2] >= 10009524 & CAGEtaginfo[,3] <= 10009534 & CAGEtaginfo[,4] == ",-" )
if (CAGEtaginfo[,1] == TargetChromosome & CAGEtaginfo[,2] >= TargetStart & CAGEtaginfo[,3] <= TargetStop & CAGEtaginfo[,4] == TargetStrand )
{
# print(CAGEinputX[i,])
CAGE.result[[i]] = CAGEinputX[i,]
# update progress bar
} # END OF IF STATEMENT
#
setTxtProgressBar(pb, i)
}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
5ac83591c35a091073369cc3663c020bd876c45a
|
dc3e1c83cf4525f6739df2b99d67a0f945e86099
|
/R/tdroc.R
|
1037c107a0dd9efff4997a4cd13e83d6cb8e887e
|
[] |
no_license
|
csetraynor/yardstic-dev
|
f1bb71d02bebf5285a7437275f26b6938316ba7d
|
9f873f373a423186b55f0a2e03e2a99d1daa2537
|
refs/heads/master
| 2020-03-18T07:51:16.310249
| 2018-05-22T21:13:27
| 2018-05-22T21:13:27
| 134,476,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,728
|
r
|
tdroc.R
|
#' Calculate tdROC
#'
#'
#' The time dependent ROC analysis (tdROC) allows
#' the user to visualise the sensitivity (true positive rate) and
#' specificity (false positive rate) for all possible cut-offs for the
#' predicted survival.
#' #'
#' @param data For the default functions, a datframe containing survival
#' (time), and status (0:censored/1:event), and the explanatory variables.
#' @param mod Coxph model object fitted with coxph (survival).
#' @return A tdROC object
#' @seealso [iROC]
#' @keywords tdroc
#' @examples
#' data("surv_example")
#'
#' # Given a sample
#' require(survival)
#' mod <- coxph(Surv(time, status)~ age, data = surv_example)
#'
#' tdroc(surv_example, mod)
#'
#' @export tdroc
#' @author Carlos S Traynor
#' @references
#'
#' Liang Li, Cai Wu Department of Biostatistics and
#' The University of Texas MD Anderson Cancer Center (2016).
#' tdROC: Nonparametric Estimation of Time-Dependent ROC Curve
#' from Right Censored Survival Data. R package version 1.0.
#' https://CRAN.R-project.org/package=tdROC
#' @export tdroc
#'
tdroc <- function(data, mod,...)
UseMethod("tdroc")
#' @export
#' @rdname tdroc
"tdroc.model.list" <-
function(data, mod, ...) {
pred_dat <- assessment(data)
probs <- predict(mod, newdata = pred_dat, type = "lp")
roc <- tdROC::tdROC(X = probs,
Y = pred_dat$os_months,
delta = pred_dat$os_deceased,
tau = quantile(pred_dat$os_months, .9),
n.grid = 1000)
return(roc)
}
#' @export
#' @rdname tdroc
"tdroc.int.matrix" <-
function(data, mod, ...) {
model <- tdroc.model.list(data, mod)
iroc <- model$AUC[1] %>% unlist
return(iroc)
}
|
7fbabd7ac1a34fb792dbd0c1004784770059b6d2
|
e9f08e6597d447cd57df5647f109795d14c7c152
|
/R/miRNAcor.r
|
574648f950d90a7f322b7051a5763031b7559578
|
[] |
no_license
|
Liuy12/SomeUsefulScripts
|
81634b7a7f524d06bd41d1874109544d99505cc6
|
8b638e0ea8e267e18588021cf65499425b884f3c
|
refs/heads/master
| 2023-01-28T12:23:33.863612
| 2023-01-04T22:19:45
| 2023-01-04T22:19:45
| 27,837,879
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,101
|
r
|
miRNAcor.r
|
## Correlation test for miRNA and its target genes
# miRNA with enrichment score from Toppgene
miRNAcandi<-function(clustercenter,miRNAenrich,miRNAexpr){
temp1<-c()
for(i in 1:nrow(miRNAenrich))
{
temp<-grep(miRNAenrich[i,3],rownames(miRNAexpr))
if(length(temp)==1)
{
temp2<-miRNAcor(rowindex=temp,a=miRNAexpr,b=clustercenter,c=miRNAenrich,i=i)
temp1<-rbind(temp1,temp2)
}
if(length(temp)==2)
{
temp2<-miRNAcor(rowindex=temp[1],a=miRNAexpr,b=clustercenter,c=miRNAenrich,i=i)
temp1<-rbind(temp1,temp2)
temp2<-miRNAcor(rowindex=temp[2],a=miRNAexpr,b=clustercenter,c=miRNAenrich,i=i)
temp1<-rbind(temp1,temp2)
}
}
return(temp1)
}
miRNAcor<-function(rowindex,a,b,c,i)
{
cortest<-cor.test(as.numeric(a[rowindex,]),b)
if(!is.na(cortest$estimate)&cortest$estimate< -0.8& cortest$p.value<0.05)
{
temp2<-matrix(ncol=4)
rownames(temp2)<-c[i,3]
temp2[,1]<-c[i,5]
temp2[,2]<-cortest$estimate
temp2[,3]<-cortest$p.value
temp2[,4]<-as.character(c[i,4])
return(temp2)
}
}
|
b1b9a6335de6962c94fb712dc909770c0c8406c4
|
d920b7c82b630bbd3cdf8db95d6e4dd598bab440
|
/Predictive Contracting Rate Models.R
|
3917d2c654aefc5fad5c3c2a1cf4402c7471b9fb
|
[] |
no_license
|
johnstrohecker/govt_contractor_rates
|
a1e132c7dde6e6252ab0ec351cb698bec9b9dcad
|
93eb8648429d0f36964c09d61d366c3ecbe267f8
|
refs/heads/master
| 2020-07-03T03:02:17.520977
| 2019-10-04T00:52:50
| 2019-10-04T00:52:50
| 201,764,410
| 0
| 0
| null | 2019-11-09T19:22:55
| 2019-08-11T12:53:12
|
R
|
UTF-8
|
R
| false
| false
| 14,204
|
r
|
Predictive Contracting Rate Models.R
|
library(imputeTS)
library(caret)
library(dplyr)
library(randomForest)
library(Metrics)
library(rattle)
library(rpart)
library(rpart.plot)
library(MASS)
# Read in data from GSA
# data source can be found at Calc.GSA.gov
dataframe <- read.csv("government_contracting_rates.csv")
# Carry forward prices into next year if there is no pricing data for year +1 or year +2
dataframe$Next.Year.Labor.Price <- na_replace(dataframe$Next.Year.Labor.Price, dataframe$Current.Year.Labor.Price)
dataframe$Second.Year.Labor.Price <- na_replace(dataframe$Second.Year.Labor.Price, dataframe$Next.Year.Labor.Price)
# Convert data format for the dated elements
dataframe$Begin.Date <- as.Date(dataframe$Begin.Date)
dataframe$End.Date <- as.Date(dataframe$End.Date)
# omit rows with NA to simplify modeling
dataframe <- na.exclude(dataframe)
#########################
## Feature Engineering ##
#########################
# convert all text in labor category titles to lowercase to simplify key word matching
dataframe$Labor.Category <- tolower(dataframe$Labor.Category)
# create dummy variables for key words in LCAT titles
dataframe$Technician <- grepl("technician", dataframe$Labor.Category)
dataframe$Admistrative <- grepl("administrative", dataframe$Labor.Category)
dataframe$Clerk <- grepl("clerk", dataframe$Labor.Category)
dataframe$Engineer <- grepl("engineer", dataframe$Labor.Category)
dataframe$Architect <- grepl("architect", dataframe$Labor.Category)
dataframe$Analyst <- grepl("analyst", dataframe$Labor.Category)
dataframe$Program <- grepl("program", dataframe$Labor.Category)
dataframe$Project <- grepl("project", dataframe$Labor.Category)
dataframe$Manager <- grepl("manager", dataframe$Labor.Category)
dataframe$Director <- grepl("director", dataframe$Labor.Category)
dataframe$Expert <- grepl("expert", dataframe$Labor.Category)
dataframe$Executive <- grepl("executive", dataframe$Labor.Category)
dataframe$Principal <- grepl("principal", dataframe$Labor.Category)
dataframe$Lead <- grepl("lead", dataframe$Labor.Category)
dataframe$Senior <- grepl("senior", dataframe$Labor.Category)
dataframe$Junior <- grepl("junior", dataframe$Labor.Category)
dataframe$Journeyman <- grepl("journeyman", dataframe$Labor.Category)
dataframe$Partner <- grepl("partner", dataframe$Labor.Category)
dataframe$Strategy <- grepl("strategy", dataframe$Labor.Category)
dataframe$Actuary <- grepl("actuary", dataframe$Labor.Category)
dataframe$President <- grepl("president", dataframe$Labor.Category)
dataframe$Clevel <- grepl("c-level", dataframe$Labor.Category)
dataframe$Coach <- grepl("coach", dataframe$Labor.Category)
dataframe$ERP <- grepl("erp", dataframe$Labor.Category)
dataframe$Strategic <- grepl("strategic", dataframe$Labor.Category)
dataframe$Producer <- grepl("producer", dataframe$Labor.Category)
dataframe$Sr <- grepl("sr.", dataframe$Labor.Category)
dataframe$Jr <- grepl("jr.", dataframe$Labor.Category)
dataframe$Consultant <- grepl("consultant", dataframe$Labor.Category)
dataframe$Interpreter <- grepl("interpreter", dataframe$Labor.Category)
# subset for just the variables to use in the model
dataframe <- subset(dataframe, select= -c(Contract.., SIN, Vendor.Name, Labor.Category, Next.Year.Labor.Price, Second.Year.Labor.Price))
# partition data into training and test set
dataframe <- dataframe %>% mutate(row_id = row_number())
set.seed(82)
train <- dataframe %>% sample_frac(.75)
test <- anti_join(dataframe, train, by = 'row_id')
# remove Row ID so that the model won't attempt to train on these items
train <- subset(train, select= -c(row_id))
test <- subset(test, select= -c(row_id))
####################
# RANDOM FOREST #
####################
# Train RF
RF <- randomForest(Current.Year.Labor.Price ~ ., data = train, ntree = 200, mtry = 12, nodesize = 2, importance = TRUE, proximity = FALSE)
# Run model against test set
RFPred <- predict(RF, test)
# score model using RMSE
rmse(test$Current.Year.Labor.Price, RFPred)
# Plot results versus predictions
plotframe <- data.frame( predicted = RFPred, actual = test$Current.Year.Labor.Price, ed = test$education.Level)
ggplot(data = plotframe, aes( x = actual, y = predicted)) + geom_point() + labs(x = "Actual Bill Rate (current year)", y = "Predicted Bill Rate", title = "Bi-variate analysis, Random Forest Model") + geom_abline(slope = 1, intercept = 0)
# View relative feature importance
importance(RF)
varImpPlot(RF)
########################
# Linear Regression #
########################
# Fit the Full Model
LM <- lm(Current.Year.Labor.Price ~ ., data = train)
# Run model against test set
LMPred <- predict(LM, test)
# score model using RMSE
rmse(test$Current.Year.Labor.Price, LMPred)
# Plot results versus predictions
plotframe <- data.frame( predicted = LMPred, actual = test$Current.Year.Labor.Price, ed = test$education.Level)
ggplot(data = plotframe, aes( x = actual, y = predicted)) + geom_point() + labs(x = "Actual Bill Rate (current year)", y = "Predicted Bill Rate", title = "Bi-variate analysis, Linear Regression Model") + geom_abline(slope = 1, intercept = 0)
## Repeat prediction and scoring for Stepwise regression ##
# Stepwise regression model
step.model <- stepAIC(LM, direction = "forward", trace = FALSE)
# tried forward and backward stepping. No meaningful difference in model performance
summary(step.model)
# Run model against test set
StepwisePred <- predict(step.model, test)
# score model using RMSE
rmse(test$Current.Year.Labor.Price, StepwisePred)
# Plot results versus predictions
plotframe <- data.frame( predicted = StepwisePred, actual = test$Current.Year.Labor.Price, ed = test$education.Level)
ggplot(data = plotframe, aes( x = actual, y = predicted)) + geom_point() + labs(x = "Actual Bill Rate (current year)", y = "Predicted Bill Rate", title = "Bi-variate analysis, Stepwise Linear Regression Model") + geom_abline(slope = 1, intercept = 0)
## Repeat prediction and scoring for linear regression with 2 way interactions ##
LMinteractions <- lm(Current.Year.Labor.Price ~ . ^2, data = train)
# Stepwise regression model
interaction.model <- stepAIC(LMinteractions, direction = "forward", trace = FALSE)
# tried forward and backward stepping. No meaningful difference in model performance
summary(interaction.model)
# Run model against test set
InteractionPred <- predict(interaction.model, test)
# Some very wonky outliers. Set upper bound at $600 and lower bound @ $11 to match original data set
InteractionPred <- ifelse(InteractionPred > 600, 600, InteractionPred)
InteractionPred <- ifelse(InteractionPred < 11, 11, InteractionPred)
# score model using RMSE
rmse(test$Current.Year.Labor.Price, InteractionPred)
# Plot results versus predictions
plotframe <- data.frame( predicted = InteractionPred, actual = test$Current.Year.Labor.Price, ed = test$education.Level)
ggplot(data = plotframe, aes( x = actual, y = predicted)) + geom_point() + labs(x = "Actual Bill Rate (current year)", y = "Predicted Bill Rate", title = "Bi-variate analysis, Stepwise Linear Regression Model with two way interactions") + geom_abline(slope = 1, intercept = 0)
## Repeat prediction and scoring for linear regression using log transformation for the dependant variable ##
#create log-transformed variable - remove current year labor price from data set.
train$LogX <- log(train$Current.Year.Labor.Price)
test$LogX <- log(test$Current.Year.Labor.Price)
train <- subset(train, select = -c(Current.Year.Labor.Price))
# Fit the model
LMlog <- lm(LogX ~ ., data = train)
# Run model against test set
LogPred <- predict(LMlog, test)
#unwind the log transformation
LogPred <- exp(LogPred)
# score model using RMSE
rmse(test$Current.Year.Labor.Price, LogPred)
# Plot results versus predictions
plotframe <- data.frame(predicted = LogPred, actual = test$Current.Year.Labor.Price, ed = test$education.Level)
ggplot(data = plotframe, aes( x = actual, y = predicted)) + geom_point() + labs(x = "Bill Rate (current year)", y = "Predicted Bill Rate", title = "Bi-variate analysis, Linear Regression Model using log transform for DV") + geom_abline(slope = 1, intercept = 0)
########################
# Neural Network #
########################
#adapted from https://www.analyticsvidhya.com/blog/2017/09/creating-visualizing-neural-network-in-r/
df <- dataframe
colnames(df)
#pull in categorical variables
install.packages("dummies")
library(dummies)
df.new <- dummy.data.frame(df, names = c("Business.Size", "Schedule","education.Level"), sep = "_")
names(df.new)
keeps <- c("Schedule_36_Office_Imaging_Document", "Schedule_621i_Healthcare", "Schedule_71_Furniture", "Schedule_71_IIK",
"Schedule_736TAPS", "Schedule_78_SPORTS", "Schedule_AIMS", "Schedule_Consolidated", "Schedule_Environmental",
"Schedule_FABS", "Schedule_IT Schedule 70", "Schedule_Language Services", "Schedule_Logistics", "Schedule_PES",
"Schedule_MOBIS", "education.Level_Associates", "education.Level_Bachelors", "education.Level_Masters",
"education.Level_High School", "education.Level_Ph.D.", "Business.Size_other than small business", "Business.Size_small business", "Schedule_03FAC","Minimum.Years.Experience","Current.Year.Labor.Price")
df = df.new[keeps]
colnames(df)
head(df)
# fix column names
names(df)[names(df) == "Schedule_IT Schedule 70"] <- "Schedule_IT_Schedule"
names(df)[names(df) == "Schedule_Language Services"] <- "Schedule_Language_Services"
names(df)[names(df) == "Business.Size_other than small business"] <- "Business.Size_other_than_small_business"
names(df)[names(df) == "Business.Size_small business"] <- "Business.Size_small_business"
names(df)[names(df) == "education.Level_High School"] <- "education.Level_High_School"
colnames(df)
df <- na.exclude(df)
# Random sampling
samplesize = 0.60 * nrow(df)
set.seed(80)
index = sample( seq_len ( nrow ( df ) ), size = samplesize )
# Create training and test set
datatrain = df[ index, ]
datatest = df[ -index, ]
## Scale data for neural network
max = apply(df , 2 , max)
min = apply(df, 2 , min)
scaled = as.data.frame(scale(df, center = min, scale = max - min))
## Fit neural network
# install library
install.packages("neuralnet ")
# load library
library(neuralnet)
# creating training and test set
trainNN = scaled[index , ]
testNN = scaled[-index , ]
# fit neural network
#increased stepmax to make work https://stackoverflow.com/questions/19360835/neuralnet-overcoming-the-non-convergence-of-algorithm
set.seed(2)
NN = neuralnet(Current.Year.Labor.Price ~ Schedule_36_Office_Imaging_Document + Schedule_621i_Healthcare + Schedule_71_Furniture + Schedule_71_IIK + Schedule_736TAPS + Schedule_78_SPORTS + Schedule_AIMS + Schedule_Consolidated + Schedule_Environmental + Schedule_FABS + Schedule_IT_Schedule + Schedule_Language_Services + Schedule_Logistics + Schedule_PES + Schedule_MOBIS + education.Level_Associates + education.Level_Bachelors + education.Level_Masters + education.Level_High_School + education.Level_Ph.D. + Business.Size_other_than_small_business + Business.Size_small_business + Schedule_03FAC + Minimum.Years.Experience, trainNN, hidden = 3 , linear.output = T, stepmax = 1e6)
# plot neural network
plot(NN)
# Prediction using neural network
colnames(df)
predict_testNN = compute(NN, testNN[,c(1:25)])
predict_testNN = (predict_testNN$net.result * (max(df$Current.Year.Labor.Price) - min(df$Current.Year.Labor.Price))) + min(df$Current.Year.Labor.Price)
plot(datatest$Current.Year.Labor.Price, predict_testNN, col='blue', pch=16, ylab = "predicted price NN", xlab = "real price")
# Calculate Root Mean Square Error (RMSE) and MSE
RMSE.NN = (sum((datatest$Current.Year.Labor.Price - predict_testNN)^2) / nrow(datatest)) ^ 0.5
MSE.NN <- sum((datatest$Current.Year.Labor.Price - predict_testNN)^2)/nrow(datatest)
#plot NN with LM
plot(datatest$Current.Year.Labor.Price,predict_testNN,col='red',main='Real vs predicted NN',pch=18,cex=0.7)
points(test$Current.Year.Labor.Price, LogPred,col='blue',pch=18,cex=0.7)
abline(0,1,lwd=2)
legend('bottomright',legend=c('NN','LM'),pch=18,col=c('red','blue'))
#Cross validation of neural network model with MSE ***this needs work***
#https://www.r-bloggers.com/fitting-a-neural-network-in-r-neuralnet-package/ (also attempted cross validation example in first link)
set.seed(450)
cv.error <- NULL
k <- 10
library(plyr)
pbar <- create_progress_bar('text')
pbar$init(k)
for(i in 1:k){
index <- sample(1:nrow(df),round(0.9*nrow(df)))
trainNN = scaled[index,]
testNN = scaled[-index,]
NN <- neuralnet(Current.Year.Labor.Price ~ Schedule_36_Office_Imaging_Document + Schedule_621i_Healthcare + Schedule_71_Furniture + Schedule_71_IIK + Schedule_736TAPS + Schedule_78_SPORTS + Schedule_AIMS + Schedule_Consolidated + Schedule_Environmental + Schedule_FABS + Schedule_IT_Schedule + Schedule_Language_Services + Schedule_Logistics + Schedule_PES + Schedule_MOBIS + education.Level_Associates + education.Level_Bachelors + education.Level_Masters + education.Level_High_School + education.Level_Ph.D. + Business.Size_other_than_small_business + Business.Size_small_business + Schedule_03FAC + Minimum.Years.Experience, trainNN, hidden = 3 , linear.output = T, stepmax = 1e6)
predict_testNN = compute(NN,testNN[,c(1:25)])
predict_testNN = (predict_testNN$net.result*(max(df$Current.Year.Labor.Price)-min(df$Current.Year.Labor.Price)))+min(df$Current.Year.Labor.Price)
testNN.r <- (testNN$net.result*(max(df$Current.Year.Labor.Price)-min(df$Current.Year.Labor.Price)))+min(df$Current.Year.Labor.Price)
cv.error [i] <- sum((df$Current.Year.Labor.Price - predict_testNN)^2)/nrow(testNN)
pbar$step()
}
mean(cv.error)
cv.error
boxplot(cv.error,xlab='MSE CV',col='cyan',
border='blue',names='CV error (MSE)',
main='CV error (MSE) for NN',horizontal=TRUE)
|
af47e73676665e519836fbe8e4119d3887913bf5
|
d443605e74edd3b456ab80adc25ebec926541df4
|
/eda/ex1/Plot4.R
|
989a04132b1b93e46ae43871214f6c5da0b9d078
|
[] |
no_license
|
fagnersutel/coursera
|
8956fc248892c4ea143575445f1e87fdb47d3328
|
df964b387fea21d39fbe95bfbfb1854eec931485
|
refs/heads/master
| 2021-05-05T14:12:03.975463
| 2018-10-05T15:30:39
| 2018-10-05T15:30:39
| 118,434,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,731
|
r
|
Plot4.R
|
setwd('~/OneDrive/Cursos/Coursera/DataScienceSpecialization/coursera/eda/ex1/')
list.files()
#Carrega os dados de PM2.5
#Load de data about PM2.5
summarySCC_PM25 <- readRDS("FNEI_data//summarySCC_PM25.rds")
#Carrega o c??digo de classificacao de tipos de PM2.5
#Load the Source Classification Code of PM2.5
Source_Classification_Code <- readRDS("FNEI_data//Source_Classification_Code.rds")
#Buscamos no nome curto aqueles registros que possuem o valor "coal" = carvao, ignorando caixa alta e baixa
#Search in short.name rows with value "coal" with case insentitive filter
Source_Classification_CodeCoal <- Source_Classification_Code[grepl("coal", Source_Classification_Code$Short.Name, ignore.case = T),]
#Classifica os dados
#aqui summarySCC_PM25Coal tem todos os registros poss??veis totalizando 6497651
#Here summarySCC_PM25Coal has all posible ocurrences totalizing 6497651
summarySCC_PM25Coal <- summarySCC_PM25
#tamanho da base / dim data
dim(summarySCC_PM25Coal)
#Se fizermos um filtro utilizando a mesma base fazendo a interseccao apenas com os dados de Source_Classification_CodeCoal reduzimos para 53400 registros
#If run a filter with the same data executing the intersection exclusively eith the data of Source_Classification_CodeCoal this decreases to 53400 ocurrences
summarySCC_PM25Coal <- summarySCC_PM25[summarySCC_PM25$SCC %in% Source_Classification_CodeCoal$SCC,]
#tamanho da base / dim data
dim(summarySCC_PM25Coal)
#Realizamos a agregacao dos dados por meio da funcao soma
#Realize de aggregate of data by the function sum
ResultadorCarvao <- aggregate(Emissions ~ year + type, summarySCC_PM25Coal, sum)
#Plotamos oa dados em grafico de linhas
#Plot the data in line graph
ggplot(ResultadorCarvao, aes(year,Emissions/10^5, col = type)) +
ggtitle(expression("Emissoes PM2.5 de Carvao por Ano nos EUA (10^5 Toneladas) 1999 ~ 1998")) +
xlab("Ano") +
ylab(expression("Emissao Norte Americana de PM2.5 (10^5 Toneladas)")) +
scale_colour_discrete(name = "Tipo") +
theme(legend.title = element_text(face = "bold")) +
geom_line() +
geom_point()
#Plotamos os dados em grafico de barras
#Plot the data em bar chart
ggplot(summarySCC_PM25Coal,aes(factor(year),Emissions/10^5)) +
geom_bar(stat="identity",fill="#48D1CC",width=0.75) +
theme_bw() + guides(fill=FALSE) +
labs(x="Ano", y=expression("Emissao Norte Americana de PM2.5 (10^5 Toneladas)")) +
labs(title=expression("Emissoes PM2.5 de Carvao por Ano nos EUA (10^5 Toneladas) 1999 ~ 1998"))
#Abrimos o device paragerar o arquivo png
#Open device to output png file
png("Plot4.png", width=480, height=480)
#Plot the data
ggplot(ResultadorCarvao, aes(year, Emissions, col = type)) +
ggtitle(expression("Emissoes PM2.5 de Carvao por Ano nos EUA 1999 ~ 1998")) +
xlab("Ano") +
ylab(expression("Emissao Norte Americana de PM2.5 (10^5 Toneladas)")) +
scale_colour_discrete(name = "Tipo") +
theme(legend.title = element_text(face = "bold")) +
geom_line() +
geom_point()
#Fechamos o device para fezhar o arquivo e liberar o quartz
#Close device to close the archieve and release the quartz
dev.off()
#Abrimos o device paragerar o arquivo png
#Open device to output png file
png("Plot4b.png", width=480, height=480)
#Plot the data
ggplot(summarySCC_PM25Coal,aes(factor(year),Emissions/10^5)) +
geom_bar(stat="identity",fill="#48D1CC",width=0.75) +
theme_bw() + guides(fill=FALSE) +
labs(x="Ano", y=expression("Emissao Norte Americana de PM2.5 (10^5 Toneladas)")) +
labs(title=expression("Emissoes PM2.5 de Carvao por Ano nos EUA 1999 ~ 1998"))
#Fechamos o device para fezhar o arquivo e liberar o quartz
#Close device to close the archieve and release the quartz
dev.off()
|
c026c09e3c4d1682feef35916418b633982afaed
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.management/man/ssm_get_document.Rd
|
6d69755818421b67f1309ab802288a878fd973d3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,903
|
rd
|
ssm_get_document.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ssm_operations.R
\name{ssm_get_document}
\alias{ssm_get_document}
\title{Gets the contents of the specified Systems Manager document}
\usage{
ssm_get_document(Name, VersionName, DocumentVersion, DocumentFormat)
}
\arguments{
\item{Name}{[required] The name of the Systems Manager document.}
\item{VersionName}{An optional field specifying the version of the artifact associated with
the document. For example, "Release 12, Update 6". This value is unique
across all versions of a document and can't be changed.}
\item{DocumentVersion}{The document version for which you want information.}
\item{DocumentFormat}{Returns the document in the specified format. The document format can be
either JSON or YAML. JSON is the default format.}
}
\value{
A list with the following syntax:\preformatted{list(
Name = "string",
VersionName = "string",
DocumentVersion = "string",
Status = "Creating"|"Active"|"Updating"|"Deleting"|"Failed",
StatusInformation = "string",
Content = "string",
DocumentType = "Command"|"Policy"|"Automation"|"Session"|"Package"|"ApplicationConfiguration"|"ApplicationConfigurationSchema"|"DeploymentStrategy"|"ChangeCalendar"|"Automation.ChangeTemplate",
DocumentFormat = "YAML"|"JSON"|"TEXT",
Requires = list(
list(
Name = "string",
Version = "string"
)
),
AttachmentsContent = list(
list(
Name = "string",
Size = 123,
Hash = "string",
HashType = "Sha256",
Url = "string"
)
),
ReviewStatus = "APPROVED"|"NOT_REVIEWED"|"PENDING"|"REJECTED"
)
}
}
\description{
Gets the contents of the specified Systems Manager document.
}
\section{Request syntax}{
\preformatted{svc$get_document(
Name = "string",
VersionName = "string",
DocumentVersion = "string",
DocumentFormat = "YAML"|"JSON"|"TEXT"
)
}
}
\keyword{internal}
|
2d2544bdb11fa30cd24112224e5e8a61fed6a6e5
|
cd82b63e473697d97daf287df9681fb627109fb0
|
/Session2.R
|
4fa6af64b17e77193d104096d21f09d61c56ed6b
|
[] |
no_license
|
AlbertoCortes13/DSJHU01
|
01d8dd012ba09793c3ba725932e0bd220a13ff66
|
f8608c1c9ee2fd2c8be3fe4211e0a5456e49378e
|
refs/heads/master
| 2020-12-26T14:23:46.164548
| 2016-08-31T20:54:37
| 2016-08-31T20:54:37
| 63,817,294
| 0
| 0
| null | 2016-07-20T21:54:47
| 2016-07-20T21:54:47
| null |
UTF-8
|
R
| false
| false
| 2,488
|
r
|
Session2.R
|
##1) Create a single function to load the dataset of complete cases given the folder location: eg. load_complete('specdata')
load_complete <- function(directory){
directory <- setwd(file.path("C:", "Users", "a.cortes.guerrero", "Documents", directory))
id = 1:332
for (i in id) {
files <- list.files(path = "C:/Users/a.cortes.guerrero/Documents/specdata", pattern = "*.csv")
readfiles <- lapply(files, read.csv)
bindlist <- rbindlist(readfiles[id], use.names = TRUE)
bindlist <- bindlist[complete.cases(bindlist),]
}
bindlist
}
##2) What are the dimensions (number of rows and columns) of the dataset.
dim(bindlist)
##3) Specify the column name and the data type for each column of the dataset.
sapply(bindlist, class)
##4) What is the minimum and maximum of all the numeric columns of the dataset.
apply(bindlist, 2, min)
apply(bindlist, 2, max)
##5) What is the date range of the data included. (earliest and latest day that can be found) in the dataset
x <- bindlist[order(bindlist$Date),]
earliest <- tail(x, 1)
latest <- head(x, 1)
##6) Get the daily mean for the sulfate polutant levels in the dataset
aggregate(bindlist$sulfate ~ bindlist$Date, bindlist, mean)
##7) Get the mean nitrate levels for each monitor in the dataset.
aggregate(bindlist$nitrate ~ bindlist$ID, bindlist, mean)
########## PART 2 #########
##8) Load the iris dataset
data(iris)
##9) What are the dimensions of the iris dataset?
dim(iris)
##10) What are the column name and the data type for each column in the iris dataset?
##str(iris)
sapply(iris, class)
##sapply(iris, typeof)
##11) What is the minimum and maximum of all the numeric columns in the iris dataset?
apply(iris, 2, min)
apply(iris, 2, max)
##12) What are the different categories of species that exist in the iris dataset?
subset(iris$Species, iris$Species != iris$Species)
##13) What is the mean sepal length for the species versicolor in the iris dataset?
a <- subset(iris$Sepal.Length, iris$Species == "versicolor")
mean(a)
##14) Obtain a vector with the means of the sepal lenght, sepal width, petal length and petal width across all species from the iris dataset.
sapply(iris, mean)
##15) Obtain the mean petal length for each of the species
aggregate(iris$Petal.Length ~ iris$Species, iris, mean)
##tapply(iris$Petal.Length, iris$Species, mean)
##lapply(split(iris$Petal.Length, iris$Species), mean)
|
1f45f59be75c74299b9aeac1baecdce3dcacaf43
|
2b106b4488e294b561de4cdd8492d5341229d6d4
|
/man/data_collate.Rd
|
8476d493a1ff9e5d2e5f668c9b29828c4df3b217
|
[
"Apache-2.0"
] |
permissive
|
ysnghr/fastai
|
120067fcf5902b3e895b1db5cd72d3b53f886682
|
b3953ad3fd925347362d1c536777e935578e3dba
|
refs/heads/master
| 2022-12-15T17:04:53.154509
| 2020-09-09T18:39:31
| 2020-09-09T18:39:31
| 292,399,169
| 0
| 0
|
Apache-2.0
| 2020-09-09T18:34:06
| 2020-09-02T21:32:58
|
R
|
UTF-8
|
R
| false
| true
| 276
|
rd
|
data_collate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tabular.R
\name{data_collate}
\alias{data_collate}
\title{Data collate}
\usage{
data_collate(object, batch)
}
\arguments{
\item{batch}{batch}
}
\description{
Convert `batch` items to tensor data.
}
|
acba02893abacbd76cbd8d74e716bc60524b2118
|
9e08bd33cfcc12f94a1162e46b29da703ad702f1
|
/R/discretize_exprs_supervised.R
|
1031f4bbf295cd0d12510090a48c4c8e4183235f
|
[] |
no_license
|
AndiPauli/FCBF
|
237577dd4bd0453e23981ffa7fe123dbd81510bb
|
b851d25d1e311a40a15807edde985a47fa83cfde
|
refs/heads/master
| 2020-08-27T21:18:11.993204
| 2019-09-18T19:49:14
| 2019-09-18T19:49:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,847
|
r
|
discretize_exprs_supervised.R
|
#' @importMethodsFrom SummarizedExperiment assay colData
#' @import pbapply
#' @import parallel
NULL
#' supervised_disc_df
#'
#' Uses several discretizations and selects the one that is best for a given variable (gene)
#' in comparison to a target class by equivocation
#'
#' @param expression_table A previously normalized expression table
#' @param target A series of labels matching each of the values in the gene vector
#' (genes in rows, cells/samples in columns)
#' @param parallel Set calculations in parallel. May be worth it if the number of rows and columns is really large. Do watchout for memory overload.
#' @export
#' @return A data frame with the discretized features in the same order as previously
#' @examples
#' data(scDengue)
#' exprs <- as.data.frame(SummarizedExperiment::assay(scDengue, 'logcounts'))
#' exprs <- exprs [1:200, 1:120]
#' infection <- SummarizedExperiment::colData(scDengue)
#' target <- infection$infection
#' discrete_expression <- as.data.frame(discretize_exprs_supervised(exprs,target))
#' fcbf(discrete_expression,target, thresh = 0.05, verbose = TRUE)
discretize_exprs_supervised <-
function(expression_table, target, parallel = FALSE) {
if (parallel) {
ncores <- parallel::detectCores() - 2
cl <- parallel::makeCluster(ncores)
discrete_expression <-
parallel::parApply(cl,
expression_table,
1,
discretize_gene_supervised,
target)
parallel::stopCluster(cl)
}
else{
discrete_expression <-
pbapply::pbapply(expression_table, 1, discretize_gene_supervised, target)
}
discrete_expression <- as.data.frame(t(discrete_expression))
rownames(discrete_expression) <- rownames(expression_table)
return(discrete_expression)
}
|
b98e19c320e2b383b2e192c3805198e63dffc7f7
|
9b6eb608ffe210e5204d9b305594d62adbc98ce3
|
/footyr/man/ID.Rd
|
9ab058b66c1edf60db153e69007c0e13e8619f1b
|
[
"MIT"
] |
permissive
|
tyoung95/tyoung
|
7436310fc726328fe68b7c7dc69d470c260f33b2
|
4b3ae2bc1a08d7e2994b3313fd067d7b7edc4d3a
|
refs/heads/master
| 2020-07-23T02:16:09.425516
| 2019-12-13T19:51:54
| 2019-12-13T19:51:54
| 207,414,417
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,138
|
rd
|
ID.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/id.R
\name{ID}
\alias{ID}
\title{Get ID numbers for football leagues and teams.}
\usage{
ID(league_name, team_name = NA)
}
\arguments{
\item{league_name}{(string) The name of the league/cup/competition that you
are looking to retrieve the ID number for. Required. No default.}
\item{team_name}{(string) The name of the team in the league that you
want to pull the team ID number for. Optional. Defaults to NA.}
}
\value{
A dataframe with league and team (optional) ID numbers.
}
\description{
This is a basic function that allows you to retrieve the ID numbers of
leagues/cups and teams.
}
\details{
This function serves as the basis for the rest of the functions in the
package. Upon input of strings for league and team names, the function outputs
the ID numbers of the league and teams, that you will need to use in other
functions in this package, as the API uses these ID numbers for queries.
}
\examples{
ID("English Premier League", "Arsenal")
ID("Spanish La Liga", "FC Barcelona")
}
\keyword{ID,}
\keyword{football,}
\keyword{league,}
\keyword{team}
|
12550efb8185b50323132fd4acacd3490dd13963
|
10c2bc2f0ba9dacf702b373bc5f8b57d6f42a0f4
|
/bin/castle/xin.R
|
d1270a467fb8fb21c01586b1831d9b276ab12613
|
[] |
no_license
|
powellgenomicslab/SingleCell_Prediction
|
930a18575cae78282675d1be79844f529926b9d5
|
3935dee4cd1b811201a25c6403a6ae5be99f4ac4
|
refs/heads/master
| 2021-03-22T03:28:50.418324
| 2019-10-14T01:18:59
| 2019-10-14T01:18:59
| 88,580,986
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,111
|
r
|
xin.R
|
# tested on R 3.4.3
# setup required libraries and constants
library(scater) # tested on version 1.6.3, install from Bioconductor: source("https://bioconductor.org/biocLite.R"); biocLite("scater")
library(xgboost) # tested on version 0.6.4.1, install from CRAN: install.packages("xgboost")
library(igraph) # tested on version 1.2.1, install from CRAN: install.packages("igraph")
library(tidyverse)
BREAKS=c(-1, 0, 1, 6, Inf)
nFeatures = 100
# 1. Load datasets in scater format: loaded files expected to contain "Large SingleCellExperiment" object
source = readRDS("data/2018-04-15_pancreas_baron/baron-human.rds")
target = readRDS("data/2018-04-15_pancreas_xin/xin.rds")
ds1 = t(exprs(source))
ds2 = t(exprs(target))
colnames(ds2) <- colnames(ds2) %>% str_remove("__.*$") -> rownames(target)
sourceCellTypes = colData(source)[,"cell_type1"]
sourceCellTypes <- as.factor(ifelse(sourceCellTypes %in% c("alpha", "beta", "delta", "gamma"),
as.character(sourceCellTypes), "other"))
# 2. Unify sets, excluding low expressed genes
source_n_cells_counts = apply(exprs(source), 1, function(x) { sum(x > 0) } )
target_n_cells_counts = apply(exprs(target), 2, function(x) { sum(x > 0) } )
common_genes = intersect( rownames(source)[source_n_cells_counts>10],
rownames(target)[target_n_cells_counts>10]
)
remove(source_n_cells_counts, target_n_cells_counts)
ds1 = ds1[, colnames(ds1) %in% common_genes]
ds2 = ds2[, colnames(ds2) %in% common_genes]
ds = rbind(ds1[,common_genes], ds2[,common_genes])
isSource = c(rep(TRUE,nrow(ds1)), rep(FALSE,nrow(ds2)))
remove(ds1, ds2)
# 3. Highest mean in both source and target
topFeaturesAvg = colnames(ds)[order(apply(ds, 2, mean), decreasing = T)]
# 4. Highest mutual information in source
topFeaturesMi = names(sort(apply(ds[isSource,],2,function(x) { compare(cut(x,breaks=BREAKS),sourceCellTypes,method = "nmi") }), decreasing = T))
# 5. Top n genes that appear in both mi and avg
selectedFeatures = union(head(topFeaturesAvg, nFeatures) , head(topFeaturesMi, nFeatures) )
# 6. remove correlated features
tmp = cor(ds[,selectedFeatures], method = "pearson")
tmp[!lower.tri(tmp)] = 0
selectedFeatures = selectedFeatures[apply(tmp,2,function(x) any(x < 0.9))]
remove(tmp)
# 7,8. Convert data from continous to binned dummy vars
# break datasets to bins
dsBins = apply(ds[, selectedFeatures], 2, cut, breaks= BREAKS)
# use only bins with more than one value
nUniq = apply(dsBins, 2, function(x) { length(unique(x)) })
# convert to dummy vars
ds = model.matrix(~ . , as.data.frame(dsBins[,nUniq>1]))
remove(dsBins, nUniq)
# 9. Classify
train = runif(nrow(ds[isSource,]))<0.8
# slightly different setup for multiclass and binary classification
if (length(unique(sourceCellTypes)) > 2) {
xg=xgboost(data=ds[isSource,][train, ] ,
label=as.numeric(sourceCellTypes[train])-1,
objective="multi:softmax", num_class=length(unique(sourceCellTypes)),
eta=0.7 , nthread=5, nround=20, verbose=0,
gamma=0.001, max_depth=5, min_child_weight=10)
} else {
xg=xgboost(data=ds[isSource,][train, ] ,
label=as.numeric(sourceCellTypes[train])-1,
eta=0.7 , nthread=5, nround=20, verbose=0,
gamma=0.001, max_depth=5, min_child_weight=10)
}
# 10. Predict
predictedClasses = predict(xg, ds[!isSource, ])
predictedClasses <- levels(sourceCellTypes)[predictedClasses + 1]
targetCellTypes = colData(target)[,"cell_type1"]
props <- table(predictedClasses, targetCellTypes) %>%
as.data.frame() %>%
spread(key = "targetCellTypes", value = "Freq") %>%
column_to_rownames("predictedClasses")
mapply(function(x,y) x/y, props, colSums(props)) %>%
`rownames<-`(rownames(props)) %>%
round(2)
alpha alpha.contaminated beta beta.contaminated delta delta.contaminated
alpha 0.97 0.30 0.51 0.19 0.71 0.22
beta 0.00 0.02 0.44 0.10 0.06 0.00
delta 0.00 0.00 0.00 0.00 0.10 0.11
gamma 0.00 0.00 0.00 0.00 0.00 0.00
other 0.03 0.68 0.05 0.71 0.12 0.67
gamma gamma.contaminated
alpha 0.61 0.38
beta 0.00 0.00
delta 0.01 0.00
gamma 0.00 0.00
other 0.38 0.62
alpha alpha.contaminated beta beta.contaminated delta delta.contaminated
alpha 855 18 239 6 35 2
beta 1 1 209 3 3 0
delta 0 0 0 0 5 1
gamma 0 0 0 0 0 0
other 30 41 24 22 6 6
gamma gamma.contaminated
alpha 52 3
beta 0 0
delta 1 0
gamma 0 0
other 32 5
|
de85ce28cd87b0e611b17c8ed678e85d983fd740
|
306a2f9b10c63884e56f3eddd77c83c3b96df55e
|
/Joanne/Old/Feed international SVAR into RF/listData2.R
|
019001b8b181a10f67c5d28cec5da80dae2f7cf1
|
[] |
no_license
|
Allisterh/VAR_Sims_rfvar
|
0f74bda155a1d2e0768dcacc08f7a5fa6de8eddd
|
befb94ec18da7a7c88c581249e2382e86a319ebc
|
refs/heads/master
| 2023-03-17T13:32:11.188302
| 2017-10-11T19:15:22
| 2017-10-11T19:15:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 658
|
r
|
listData2.R
|
listData2 <- function(rfvar7results){
#function: runs the international model on the reduced form VARs
countries=rfvar7results$countries
nCountries=length(countries)
Y<-list()
for (iCountry in countries){
datay= rfvar7results$voutlst[[iCountry]]$u
Y[[i]]=datay
dlength[i]=dim(datay)
}
Y <- do.call("rbind", Y)
Y=ts(Y, start=1, frequency=1)
Tlength=dim(Y)[1]
X <- matrix(0,Tlength, nCountries)
start <- 0
for (ic in 1:(nCountries) {
X[(start + 1:dlength[ic]), ic] <- 1
start <- start + dlength[ic]
}
dlength=dlength[1:(nCountries-1)]
Tsigbrk =cumsum(dlength)
return(list(Y=Y, X=X, Tsigbrk=Tsigbrk, countries=countries)
|
0e0fd4d1d7a9bca96cf32eb54a439e6f9fcea7e1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/hyfo/examples/collectData.Rd.R
|
d3262b3adc67861114e9be590e7de30839294fbd
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 479
|
r
|
collectData.Rd.R
|
library(hyfo)
### Name: collectData
### Title: Collect data from different csv files.
### Aliases: collectData
### ** Examples
#use internal data as an example.
folder <- file.path(path.package("hyfo"), 'extdata')
# file may vary with different environment, it if doesn't work, use local way to get
# folder path.
a <- collectData(folder, fileType = 'csv', range = c(10, 20, 1,2))
# More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/
|
92c9e4042e78994b8958edd952745c397e8ee665
|
7b6da5dcf62921494da07794c2a02d5873df5f39
|
/data-raw/melbweather.R
|
15bb4ff5d591e1163ccf4fbd34b768b86fa6513b
|
[
"MIT"
] |
permissive
|
frycast/virgo
|
cacd86859673c73de4a3e731226ba406357e68f5
|
25b50ef94de7ebe19e1caff03e7713e69a182e20
|
refs/heads/master
| 2023-02-21T15:11:29.559606
| 2021-01-22T00:55:29
| 2021-01-22T00:55:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,487
|
r
|
melbweather.R
|
## code to prepare `melbweather` dataset goes here
# update once rwalkr gets put up on cran
# remotes::install_github("earowang/rwalkr@d528e7b")
# well we use three months of data for summer 2019/20
library(rwalkr)
library(dplyr)
start <- as.Date("2019-12-01")
end <- as.Date("2020-02-29")
# extract regular measurements, not any averaged forms
# variables are temperature, relative humidity, barometric pressure,
# particular matter 2.5 and 10, and wind speed
sensors <- c("TPH.TEMP", "TPH.RH", "TPH.PRESSURE", "PM2.5", "PM10", "WS")
sensors_clean <- c("ambient_temperature",
"relative_humidity",
"barometric_pressure",
"pm2.5",
"pm10",
"wind_speed")
names(sensors_clean) <- sensors
melbweather <- melb_weather(start, end)
melbweather <- melbweather %>%
filter(sensor_type %in% sensors) %>%
mutate(sensor_type = sensors_clean[sensor_type],
value = as.numeric(value))
melbweather <- tidyr::pivot_wider(melbweather,
id_cols = c("site", "date_time", "date"),
names_from = sensor_type)
# pull in the sites lat lon
sites <- select(pull_weather_sensors(), site_id, longitude, latitude)
melbweather <- left_join(melbweather, sites, by = c("site" = "site_id"))
melbweather <- melbweather %>%
select(site, longitude, latitude, date_time, date, everything())
usethis::use_data(melbweather, overwrite = TRUE)
|
e6e5890b0ba3b8107ad10ec1dd2551e50b66dae1
|
7be551356fbadf4f0c2eea4e06c1f6d1856f6288
|
/Case Studies/R/property_price_prediction/Problem 2/app.R
|
7a736d5ee09ce3e3f27f282f569d4481fbff0f23
|
[
"MIT"
] |
permissive
|
AnanduR32/Datascience
|
160c686f770adc086637b6467bbe6dd84508ce12
|
17a0b3056f558d917cec222b2d27a9d16920a9b6
|
refs/heads/master
| 2023-08-25T07:46:10.391445
| 2021-10-04T17:05:27
| 2021-10-04T17:05:27
| 252,497,031
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,153
|
r
|
app.R
|
library(caret)
library(stats)
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
titlePanel("House price prediction App"),
sidebarLayout(
sidebarPanel(
sliderInput("square", "Square Area of plot", 0, 650, 1,step = 1),
sliderInput("constructionTime", "Year constructed", 1950, 2020, 1,step = 1),
radioButtons("subway", "Subway nearby:",
c("Yes" = "Has_Subway",
"No" = "No_Subway")
),
selectInput("district", "Select district:",
c("Chao Yang" = "ChaoYang",
"Chang Ping" = "ChangPing",
"Dong Cheng" = "DongCheng",
"Men Tou Gou" = "MenTouGou",
"Xi Cheng" = "XiCheng",
"Feng Tai" = "FengTai",
"Hai Dian" = "HaiDian",
"Fa Xing" = "FaXing",
"Fang Shang" = "FangShang",
"Da Xing" = "DaXing",
"ShiJing Shan" = "ShiJingShan",
"Shun Yi" = "ShunYi",
"Tong Zhou" = "TongZhou"
)),
submitButton("submit"),
),
mainPanel(
tableOutput("print"),
)
),
)
server <- function(input, output) {
mdl = get(load(file ="./models/final_model.rda", .GlobalEnv))
output$print = renderPrint({
square = as.numeric(input$square)
constructionTime = as.numeric(input$constructionTime)
subway = as.character(input$subway)
district = as.character(input$district)
data = data.frame(square,constructionTime,subway,district)
pred = predict(mdl, newdata = data)
cat("<h1>Predicted value:</h1>")
cat("<h3>The price is estimated to be: ",pred,"</h3>")
cat("<h5>Using the parameters you can find the predicted cost of house
the selected region<h5>")
})
}
shinyApp(ui = ui, server = server)
|
c550d9b520afca028cdd57c92c16ad663969a73b
|
7821c14e8b0de6b106830f2008f5508aa8ef83f8
|
/plot3.R
|
aae5307a0f0e8954db15091f174a0f02e9f8ccdd
|
[] |
no_license
|
amandavarella/DataExploratory_Project2
|
c0f504528debe23d1a4e07308727d5b54f9dd9db
|
f9406366b46fae715a7e6a475002a4c11e48ca62
|
refs/heads/master
| 2020-05-24T13:32:33.263710
| 2014-09-19T12:47:24
| 2014-09-19T12:47:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 921
|
r
|
plot3.R
|
##setwd("C:/Users/ur3s/Dados/coursera/DataExploratory/Project2")
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
##Of the four types of sources indicated by the
##type (point, nonpoint, onroad, nonroad) variable,
##which of these four sources have seen decreases
##in emissions from 1999-2008 for Baltimore City?
##Which have seen increases in emissions from 1999-2008?
##Use the ggplot2 plotting system to make a plot answer this question.
##Transforms year in a factor variable
NEI <- transform(NEI, year= factor(year))
##extracts data from Baltimore
baltimore <- NEI[NEI$fips=="24510",]
##First part of the plot
g<-ggplot(baltimore, aes(year, Emissions))
##Plots without the outliers
g + geom_point() + facet_grid(.~type) + geom_smooth(aes(group = 1),method = "lm") + coord_cartesian (ylim = c(0,500))
dev.copy(png, file = "plot3.png")
dev.off()
|
3e12866b7868f47f7738dc23db8805346789c9dd
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/ddalpha/man/depthf.HR.Rd
|
4ce85b4e9d51a6503f3ddb74657f5322b3255be6
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,914
|
rd
|
depthf.HR.Rd
|
\name{depthf.HR}
\alias{depthf.HR}
\title{Half-Region Depth for Functional Data}
\usage{
depthf.HR(datafA, datafB, range = NULL, d = 101)
}
\arguments{
\item{datafA}{Functions whose depth is computed, represented by a \code{dataf} object of their arguments
and functional values. \code{m} stands for the number of functions.}
\item{datafB}{Random sample functions with respect to which the depth of \code{datafA} is computed.
\code{datafB} is represented by a \code{dataf} object of their arguments
and functional values. \code{n} is the sample size. The grid of observation points for the
functions \code{datafA} and \code{datafB} may not be the same.}
\item{range}{The common range of the domain where the functions \code{datafA} and \code{datafB} are observed.
Vector of length 2 with the left and the right end of the interval. Must contain all arguments given in
\code{datafA} and \code{datafB}.}
\item{d}{Grid size to which all the functional data are transformed. For depth computation,
all functional observations are first transformed into vectors of their functional values of length \code{d}
corresponding to equi-spaced points in the domain given by the interval \code{range}. Functional values in these
points are reconstructed using linear interpolation, and extrapolation.}
}
\value{
A vector of length \code{m} of the half-region depth values.
}
\description{
The half-region depth
for functional real-valued data.
}
\details{
The function returns the vector of the sample half-region depth values.
}
\examples{
datafA = dataf.population()$dataf[1:20]
datafB = dataf.population()$dataf[21:50]
depthf.HR(datafA,datafB)
}
\references{
Lopez-Pintado, S. and Romo, J. (2011).
A half-region depth for functional data.
\emph{Computational Statistics & Data Analysis} \bold{55} (4), 1679--1695.
}
\author{
Stanislav Nagy, \email{nagy at karlin.mff.cuni.cz}
}
\keyword{depth}
\keyword{functional}
|
2566818e19ca1dd707d0af2dd0d5ec13d2f9f800
|
dc411dbbe7f51b8131d06e49ecaed8d7f85e89b8
|
/plot3.R
|
98ec9be93f61697a9ebb8a382e39cae099c88c99
|
[] |
no_license
|
lestarr/ExData_Plotting1
|
5dfc27c39df3166914bebe80b10e71e91da57af8
|
3f28c4615c16715374cd617daab5821897237050
|
refs/heads/master
| 2021-01-24T02:40:04.489823
| 2015-09-12T13:07:09
| 2015-09-12T13:07:09
| 42,356,170
| 0
| 0
| null | 2015-09-12T12:23:40
| 2015-09-12T12:23:39
| null |
UTF-8
|
R
| false
| false
| 672
|
r
|
plot3.R
|
## Plot 3
source("C:/2Projects/DataAnalysis/Rprogs/wdir/getDataProject1.R")
filepath <- "plot3.png"
png(file = filepath)
f <- "data/household_power_consumption.txt"
data <- getData(f)
data[, "datetime"] <- paste(data$Date, data$Time)
posix <- strptime(data$datetime, format = "%Y-%m-%d %H:%M:%S")
plot(posix, dd$Sub_metering_1, type = "n", ylab = "Energy sub metering", xlab = "")
lines(posix, dd$Sub_metering_1, col= "black")
lines(posix, dd$Sub_metering_2, col = "red")
lines(posix, dd$Sub_metering_3, col = "blue")
legend("topright", lty = c(1,1,1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
e46cc751bd743a9d236f4d539fdcd35ba7d0b7b6
|
482eef7d15f1e98406793a8f74eb32dafcfed004
|
/cachematrix.R
|
fe4b864a2d4dc1afb7349e55d7943c8ff84707b6
|
[] |
no_license
|
elegantcoderM/ProgrammingAssignment2
|
948d1f25c73df63493a4372fb763c6c7e6b86465
|
8df1b0df6f77c0a21ec27e59f8ab088e5e8105b3
|
refs/heads/master
| 2021-01-14T09:54:26.402070
| 2015-05-22T08:00:23
| 2015-05-22T08:00:23
| 35,860,042
| 0
| 0
| null | 2015-05-19T04:55:01
| 2015-05-19T04:55:00
| null |
UTF-8
|
R
| false
| false
| 1,108
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## The below function makeCacheMatrix() caches a matrix and
## it's inverse. It has 4 functions to set and retreive the
## matrix and its cached inverse
makeCacheMatrix <- function(x = matrix()) {
invMatrix <- NULL
set <- function(y) {
x <<- y
invMatrix <<- NULL
}
get <- function() x
setInverse <- function(inv) {
invMatrix <<- inv
}
getInverse <- function() invMatrix
list(set = set, get = get, setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## Returns the inverse of the matrix cached in obj x
## If the inverse is already computed (and cached)
## then the cached value is returned, otherwise
## inverse is calculated using solve()
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
print("getting cached inverse")
return(inv)
}
inv <- solve(x$get(), ...)
x$setInverse(inv)
inv
}
|
83eb2c7370139729ba0f10b637e8f35b646f1c7c
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/Rfast/man/cova.Rd
|
ce5a50c306696d7151adea233b44ec46b393071c
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,697
|
rd
|
cova.Rd
|
\name{Covariance and correlation matrix}
\alias{cova}
\alias{cora}
\title{
Fast covariance and correlation matrix calculation
}
\description{
Fast covariance and correlation matrix calculation.
}
\usage{
cova(x, center = FALSE)
cora(x)
}
\arguments{
\item{x}{
A matrix with data. It has to be matrix, if it is data.frame for example the function does not turn it into a matrix.
}
\item{center}{
If you want to center the data prior to applying the cross product of the mateix set this equal to TRUE, otherwise leave it NULL.
}
}
\details{
The calculations take place faster than the built-in functions \code{\link{cor}}
as the number of variables increases. For a few tens of variables. This is true if the
number of variables is high, say from 500 and above. The "cova" on the other hand is always
faster. For the "cova" in specific, we have an option to center the data prior to the cross product. This can be more stable if you
have many tens of thousands of rows due to numerical issues that can arise. It is sligtly slower.
For the correlation matrix we took the code from here
https://stackoverflow.com/questions/18964837/fast-correlation-in-r-using-c-and-parallelization/18965892#18965892
}
\value{
The covariance or the correlation matrix.
}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris <mtsagris@yahoo.gr> and Manos Papadakis <papadakm95@gmail.com>.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{\link{colVars}, \link{cor}, \link{cov}
}
}
\examples{
x <- matrnorm(100, 40)
s1 <- cov(x)
s2 <- cova(x)
all.equal(s1, s2)
x <- NULL
}
\keyword{ Covariance matrix }
|
d6b3fa072e074a644470daf5d3fd2daf04ff0390
|
b28f74d681bb5dfbf34549c82a8c932f77c1b0a8
|
/man/mascot_refseq2uniprot.Rd
|
2d768ce04d3ba7acd60ea189ba581fabe295b3bf
|
[
"MIT"
] |
permissive
|
sailfish009/proteoQ
|
b07e179e9fe27a90fd76cde2ed7caa55e793e9d6
|
e6a4fe79a21f9a9106a35d78c2ce42d59e9d82e2
|
refs/heads/master
| 2022-12-25T20:06:40.340740
| 2020-10-15T20:18:14
| 2020-10-15T20:18:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 385
|
rd
|
mascot_refseq2uniprot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulData.R
\name{mascot_refseq2uniprot}
\alias{mascot_refseq2uniprot}
\title{Helper function}
\usage{
mascot_refseq2uniprot(dat_dir)
}
\arguments{
\item{dat_dir}{A character string to the working directory. The default is to
match the value under the global environment.}
}
\description{
Helper function
}
|
457c01b792cdfa4435be8c685c8f580a10f549b1
|
b1252f5d2c993d996fb7c06deac8bce8a632eb30
|
/R/density/main.R
|
f32a38269a0f2dc132298c312ecb56457ae1c363
|
[] |
no_license
|
504966078/utils
|
a221d3aed5030808432b11f4fb79a0224b5aa618
|
972c58aa53912d02455187758ff0904cb7bbdd8d
|
refs/heads/master
| 2020-03-23T18:22:33.774854
| 2018-07-23T11:27:46
| 2018-07-23T11:27:46
| 141,904,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 579
|
r
|
main.R
|
#!/usr/bin/R --vanilla -q -f
#
#The data is in the following format Value \t Quantity
#
tbl <- read.csv("data.csv",header=FALSE, sep="\t")
dsty <- tbl$V1/tbl$V2
dk <- density(dsty)
dn <- dnorm(dk$x,mean=mean(dsty),sd=sd(dsty))
pdf("out.pdf")
plot(dk,
col="red",
main=c("Kernel method vs Gaussian distribution",paste("data sz=", length(dsty),"mean=",as.integer(mean(dsty)),"variance=",as.integer(sd(dsty)))),
xlab="density",
ylab="probability")
lines(dk$x,dn,col="blue")
legend("topright",
c("Gaussian","Observed"),text.col=c("blue","red"))
dev.off()
|
863e713153cb7e3b5669fe62d4a7ae3bafb719df
|
a48797beca55474d7b39676389f77f8f1af76875
|
/man/f2_from_geno.Rd
|
8be2bb8b384dbde277da956d146f793244a6c8a1
|
[] |
no_license
|
uqrmaie1/admixtools
|
1efd48d8ad431f4a325a4ac5b160b2eea9411829
|
26759d87349a3b14495a7ef4ef3a593ee4d0e670
|
refs/heads/master
| 2023-09-04T02:56:48.052802
| 2023-08-21T21:15:27
| 2023-08-21T21:15:27
| 229,330,187
| 62
| 11
| null | 2023-01-23T12:19:57
| 2019-12-20T20:15:32
|
R
|
UTF-8
|
R
| false
| true
| 6,752
|
rd
|
f2_from_geno.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{f2_from_geno}
\alias{f2_from_geno}
\title{Compute blocked f2 statistics}
\usage{
f2_from_geno(
pref,
inds = NULL,
pops = NULL,
blgsize = 0.05,
maxmem = 8000,
maxmiss = 0,
minmaf = 0,
maxmaf = 0.5,
pops2 = NULL,
outpop = NULL,
outpop_scale = TRUE,
transitions = TRUE,
transversions = TRUE,
auto_only = TRUE,
keepsnps = NULL,
afprod = FALSE,
fst = FALSE,
poly_only = c("f2"),
format = NULL,
adjust_pseudohaploid = TRUE,
remove_na = TRUE,
apply_corr = TRUE,
qpfstats = FALSE,
verbose = TRUE,
...
)
}
\arguments{
\item{pref}{Prefix of \emph{PLINK/EIGENSTRAT/PACKEDANCESTRYMAP} files.
\emph{EIGENSTRAT/PACKEDANCESTRYMAP} have to end in \code{.geno}, \code{.snp}, \code{.ind}, \emph{PLINK} has to end in \code{.bed}, \code{.bim}, \code{.fam}}
\item{inds}{Individuals for which data should be extracted}
\item{pops}{Populations for which data should be extracted. If both \code{pops} and \code{inds} are provided, they should have the same length and will be matched by position. If only \code{pops} is provided, all individuals from the \code{.ind} or \code{.fam} file in those populations will be extracted. If only \code{inds} is provided, each indivdual will be assigned to its own population of the same name. If neither \code{pops} nor \code{inds} is provided, all individuals and populations in the \code{.ind} or \code{.fam} file will be extracted.}
\item{blgsize}{SNP block size in Morgan. Default is 0.05 (5 cM). If \code{blgsize} is 100 or greater, if will be interpreted as base pair distance rather than centimorgan distance.}
\item{maxmem}{Maximum amount of memory to be used. If the required amount of memory exceeds \code{maxmem}, allele frequency data will be split into blocks, and the computation will be performed separately on each block pair. This doesn't put a precise cap on the amount of memory used (it used to at some point). Set this parameter to lower values if you run out of memory while running this function. Set it to higher values if this function is too slow and you have lots of memory.}
\item{maxmiss}{Discard SNPs which are missing in a fraction of populations higher than \code{maxmiss}}
\item{minmaf}{Discard SNPs with minor allele frequency less than \code{minmaf}}
\item{maxmaf}{Discard SNPs with minor allele frequency greater than than \code{maxmaf}}
\item{pops2}{If specified, only a pairs between \code{pops} and \code{pops2} will be computed}
\item{outpop}{Keep only SNPs which are heterozygous in this population}
\item{outpop_scale}{Scale f2-statistics by the inverse \code{outpop} heteroygosity (\code{1/(p*(1-p))}). Providing \code{outpop} and setting \code{outpop_scale} to \code{TRUE} will give the same results as the original \emph{qpGraph} when the \code{outpop} parameter has been set, but it has the disadvantage of treating one population different from the others. This may limit the use of these f2-statistics for other models.}
\item{transitions}{Set this to \code{FALSE} to exclude transition SNPs}
\item{transversions}{Set this to \code{FALSE} to exclude transversion SNPs}
\item{auto_only}{Keep only SNPs on chromosomes 1 to 22}
\item{keepsnps}{SNP IDs of SNPs to keep. Overrides other SNP filtering options}
\item{afprod}{Return negative average allele frequency products instead of f2-statistics.
Setting \code{afprod = TRUE} will result in more precise f4-statistics when the original data
had large amounts of missingness, and should be used in that case for \code{\link{qpdstat}}
and \code{\link{qpadm}}. It can also be used for outgroup f3-statistics with a fixed outgroup
(for example for \code{\link{qpgraph}}); values will be shifted by a constant amount compared
to regular f3-statistics. This shift affects the fit of a graph only by small amounts, possibly
less than bias in regular f3-statistics introduced by large amounts of missing data.}
\item{fst}{Write files with pairwise FST for every population pair. Setting this to FALSE can make \code{extract_f2} faster and will require less memory.}
\item{poly_only}{Specify whether SNPs with identical allele frequencies in every population should be discarded (\code{poly_only = TRUE}), or whether they should be used (\code{poly_only = FALSE}). By default (\code{poly_only = c("f2")}), these SNPs will be used to compute FST and allele frequency products, but not to compute f2 (this is the default option in the original ADMIXTOOLS).}
\item{format}{Supply this if the prefix can refer to genotype data in different formats
and you want to choose which one to read. Should be \code{plink} to read \code{.bed}, \code{.bim}, \code{.fam} files, or \code{eigenstrat}, or \code{packedancestrymap} to read \code{.geno}, \code{.snp}, \code{.ind} files.}
\item{adjust_pseudohaploid}{Genotypes of pseudohaploid samples are usually coded as \code{0} or \code{2}, even though only one allele is observed. \code{adjust_pseudohaploid} ensures that the observed allele count increases only by \code{1} for each pseudohaploid sample. If \code{TRUE} (default), samples that don't have any genotypes coded as \code{1} among the first 1000 SNPs are automatically identified as pseudohaploid. This leads to slightly more accurate estimates of f-statistics. Setting this parameter to \code{FALSE} treats all samples as diploid and is equivalent to the \emph{ADMIXTOOLS} \code{inbreed: NO} option. Setting \code{adjust_pseudohaploid} to an integer \code{n} will check the first \code{n} SNPs instead of the first 1000 SNPs.}
\item{apply_corr}{Apply small-sample-size correction when computing f2-statistics (default \code{TRUE})}
\item{qpfstats}{Compute smoothed f2-statistics (default \code{FALSE}). In the presence
of large amounts of missing data, this option can be used to retain information
from all SNPs while introducing less bias than setting \code{maxmiss} to values greater
than 0. When setting \code{qpfstats = TRUE}, most other options to \code{extract_f2} will
be ignored. See \code{\link{qpfstats}} for more information. Arguments to
\code{\link{qpfstats}} can be passed via \code{...}}
\item{verbose}{Print progress updates}
\item{...}{Pass arguments to \code{\link{qpfstats}}}
}
\value{
A 3d array of f2-statistics (or scaled allele frequency products if \code{afprod = TRUE})
}
\description{
This function prepares data for various other \emph{ADMIXTOOLS 2} functions. It reads data from genotype files,
computes allele frequencies and blocked f2-statistics for selected populations, and returns them as a 3d array.
}
\seealso{
\code{\link{f2_from_precomp}} for reading previously stored f2-statistics into R, \code{\link{extract_f2}} for storing f2-statistics on disk
}
|
2c47225a9b430476fd567aa428a02ac4cf28a642
|
5795af8f47e78443b25a679210a17bf4f08aed71
|
/answer_07_09/answer.R
|
85056073e68b2d9f99088ad7cd9cf9d959f0d2cd
|
[] |
no_license
|
Ylab-Shiny/projects2018
|
94edbbfc257aa48c7c54376bfb04194298a260c3
|
eb6c31da029cc740e417d4e1db8a0b85c099aab8
|
refs/heads/master
| 2020-03-18T03:49:20.844508
| 2018-11-09T02:26:43
| 2018-11-09T02:26:43
| 134,255,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,477
|
r
|
answer.R
|
############################################################################################################################
###### answer.R -- 2017年度の月ごとのデータに正のトレンドがあるか検定する ##############################################
############################################################################################################################
#### ライブラリーの読み込み ####
library(tidyverse)
#### データの読み込み ####
# 2017年度の部局データセット
df2017 <- read_rds("dep_df_2017.rds")
#### データの整形 ####
## 1. substr関数でlabelを切り分け(ex. 2017-04)、mutate関数で後方に追加 ##
x <- df2017 %>% mutate(`年-月` = substr(label, 1, 7))
## 2. group_by関数で列"年-月"をグループとして認識させ、summaraise_each(funs(sum))関数でグループごとに集計 ##
x_group <- x %>% group_by(`年-月`) %>% summarise_each(funs(sum))
## 3. 必要な列をselect関数で選び出す(全学電力量=Dep1) ##
x_select <- x_group %>% select(`年-月`, `電力消費量[kWh]` = Dep1)
## 4. row_number関数で順位を加える ##
x_rank <- x_select %>% mutate(`順位` = row_number(`電力消費量[kWh]`))
## 5. アルゴリズムが思いつかないので、目視でベクトルを作り、加える ##
cnt <- c(10, 8, 4, 0, 2, 4, 2, 2, 1, 0, 0, 0)
# 合計
sum_cnt <- sum(cnt)
# 列の追加
x_cnt <- x_rank %>% mutate(`個数` = cnt)
## 6. 検定統計量を計算して加える ##
x_t <- x_cnt %>% mutate(`検定統計量T` = 2 * sum_cnt - 個数 * (個数-1)/2)
############################################# 正のトレンド検定 ###########################################################
# 仮説:時系列データに正のトレンドはない
# 対立仮説:時系列データに正のトレンドがある
# 有意水準を0.05とする
out <- t.test(x_t$検定統計量T, alternative="greater")
print(out)
## 有意確率が有意水準より小さいので、仮説は棄却
## よって、この時系列データには正のトレンドがある
##########################################################################################################################
#### 結果の出力 ####
# 作業ディレクトリの設定
setwd("7月9日打ち合わせ 解答例/") # / == \\
# データの保存
write_excel_csv(x_t, "dataset.csv")
# 解析結果の保存
save(list = ls(), file = "all.Rdata")
|
98e9244f17543e09f642efacbf7f368f0cf7dae7
|
705255987191f8df33b8c2a007374f8492634d03
|
/man/get-Samples-character-method.Rd
|
4f79511c33eebf1d6b285eb08a1af840fb3008d2
|
[] |
no_license
|
Roche/crmPack
|
be9fcd9d223194f8f0e211616c8b986c79245062
|
3d897fcbfa5c3bb8381da4e94eb5e4fbd7f573a4
|
refs/heads/main
| 2023-09-05T09:59:03.781661
| 2023-08-30T09:47:20
| 2023-08-30T09:47:20
| 140,841,087
| 24
| 9
| null | 2023-09-14T16:04:51
| 2018-07-13T11:51:52
|
HTML
|
UTF-8
|
R
| false
| true
| 1,702
|
rd
|
get-Samples-character-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Samples-methods.R
\name{get,Samples,character-method}
\alias{get,Samples,character-method}
\title{Get specific parameter samples and produce a data.frame}
\usage{
\S4method{get}{Samples,character}(x, pos = -1L, envir = NULL, mode = NULL, inherits = NULL)
}
\arguments{
\item{x}{the \code{\linkS4class{Samples}} object}
\item{pos}{the name of the parameter}
\item{envir}{for vectorial parameters, you can give the indices of the
elements you would like to extract. If \code{NULL}, the whole vector samples
will be returned}
\item{mode}{not used}
\item{inherits}{not used}
}
\value{
the data frame suitable for use with \code{\link[ggmcmc]{ggmcmc}}
}
\description{
Here you have to specify with \code{pos} which
parameter you would like to extract from the \code{\linkS4class{Samples}}
object
}
\examples{
# nolint start
# Create some data
data <- Data(x = c(0.1, 0.5, 1.5, 3, 6, 10, 10, 10),
y = c(0, 0, 0, 0, 0, 0, 1, 0),
cohort = c(0, 1, 2, 3, 4, 5, 5, 5),
doseGrid = c(0.1, 0.5, 1.5, 3, 6,
seq(from = 10, to = 80, by=2)))
# Initialize a model
model <- LogisticLogNormal(mean = c(-0.85, 1),
cov = matrix(c(1, -0.5, -0.5, 1), nrow = 2),
ref_dose = 56)
# Get posterior for all model parameters
options <- McmcOptions(burnin = 100,
step = 2,
samples = 2000)
set.seed(94)
samples <- mcmc(data, model, options)
# now extract the alpha0 samples (intercept of the regression model)
alpha0samples <- get(samples, "alpha0")
# nolint end
}
\keyword{methods}
|
50bc7574d8caef0d2536f1fb98a5201055f73f1a
|
a01bef83939991f1f45958a919c1a2cedf290304
|
/man/linearInterpolation.Rd
|
90454eedb98081b0e09d3ffb14e0ce4f3d55c7e1
|
[] |
no_license
|
JimGrange/trimr
|
73ab3e2568bc103f56209102e65191ffdfe23f9d
|
f35e44dbe7868d5111a48c8a679b9cbcb04c541e
|
refs/heads/master
| 2022-05-16T19:52:27.518171
| 2022-05-05T03:08:44
| 2022-05-05T03:08:44
| 28,407,568
| 12
| 3
| null | 2018-06-14T19:42:06
| 2014-12-23T16:19:25
|
R
|
UTF-8
|
R
| false
| true
| 796
|
rd
|
linearInterpolation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{linearInterpolation}
\alias{linearInterpolation}
\title{SDs used for the recursive / moving criterion trimming methods}
\format{
A data frame with 97 rows and 3 columns:
\describe{
\item{sampleSize}{Sample size of the data set being passed}
\item{nonRecursive}{The standard deviation to use as the criterion for
the nonRecursive function}
\item{modifiedRecursive}{The standard deviation to use as the criterion
for the modifiedRecursive function}
}
}
\usage{
linearInterpolation
}
\description{
A data frame containing the SDs used for each sample size as trimming
criterion for the nonRecursive function and the modifiedRecursive function
}
\keyword{datasets}
|
0315765b516bbfa4183e29c8736961282f484006
|
f041fcacb47f63214ec9e6c809ea86cddf1ef2dc
|
/R/00-setup.R
|
a16c36e2d5290c6c685b6e0fd4c32c55feef1deb
|
[] |
no_license
|
maurolepore/start-with-data
|
a955a7669581edf670683012f2e8d0fca1d1fe49
|
6c3f0bb76f6b84edc3bbb8d90fafef98baf337fc
|
refs/heads/main
| 2021-11-25T14:14:32.928996
| 2018-06-10T22:25:11
| 2018-06-10T22:25:11
| 136,827,041
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 407
|
r
|
00-setup.R
|
# install.packages("usethis")
usethis::use_blank_slate("project")
# WARNING: This will affect all your RStudio projects (recommended)
# usethis::use_blank_slate("user")
# Challenge ---------------------------------------------------------------
# What is the class of the object surveys?
# How many rows and how many columns are in this object?
# How many species have been recorded during these surveys?
|
e277994e121c73fae64aabc2122be5630d60e2b3
|
335eb7d0a695b9a7bc5e1f5ed9dc676272776703
|
/tests/testthat/test-1-nbits.R
|
ebafe7a377613a38642f05d005f1092eb58c7b82
|
[] |
no_license
|
ayazagan/amalgam
|
750298c89709814696642304b52b24d0fab9b4a7
|
00ca3804f031cc67ff104ce35c8522f82b444ec9
|
refs/heads/master
| 2022-04-16T11:59:22.642445
| 2020-02-25T03:40:22
| 2020-02-25T03:40:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 326
|
r
|
test-1-nbits.R
|
library(amalgam)
bin <- c(0,0,0,0,0,0,0,1,1,1,1,1)
test_that("nbits function deparses binary input correctly", {
expect_equal(
int.from2bit(bin),
c(0,0,0,1,3,3)
)
expect_equal(
int.from2bit(bin),
int.fromNbit(bin, nbits = 2)
)
expect_equal(
int.fromNbit(bin, nbits = 4),
c(0,1,15)
)
})
|
7af0b8733ce6a26f51f651be36d1959bbe8d7977
|
60fd1968c7fa8c8d5c794cdf583c3d47adbc3d45
|
/tests/testthat/test_hector.R
|
825f453161c06ffbc2b132c8a285643bb913ffdf
|
[] |
no_license
|
l5d1l5/hectorcal.rcmip.cmip6
|
91f59a906947eadd4e1a8bf482dcd9049722cc44
|
89d4db12be6781fc96b0884fde505422c32a656a
|
refs/heads/master
| 2022-04-06T00:15:32.637376
| 2019-12-05T18:40:15
| 2019-12-05T18:40:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,342
|
r
|
test_hector.R
|
context('Test the Hector support functions.')
library(dplyr)
devtools::load_all('/Users/dorh012/Documents/2019/AS/hector')
testthat::test_that('correct version of Hector is being used', {
years <- 1850:2100
ini <- system.file('input/hector_rcp45_constrained.ini', package = 'hector')
core <- newcore(inifile = ini, name = 'cmip5')
run(core)
default_rslt <- fetchvars(core = core, dates = years, vars = c(GLOBAL_TEMP(), ATMOSPHERIC_CH4()))
default_rslt %>%
filter(variable == ATMOSPHERIC_CH4()) %>%
mutate(value = value * 1/3) ->
reduced_CH4_conc
setvar(core = core, dates = reduced_CH4_conc$year, values = reduced_CH4_conc$value,
var = reduced_CH4_conc$variable, unit = reduced_CH4_conc$units)
reset(core)
run(core)
fetchvars(core = core, dates = years, vars = c(GLOBAL_TEMP(), ATMOSPHERIC_CH4())) %>%
rename(new_value = value) ->
reduced_CH4_out
comparison_df <- left_join(reduced_CH4_out, default_rslt, by = c("scenario", "year", "variable", "units"))
testthat::expect_true(all(c(comparison_df$value - comparison_df$new_value) != 0))
})
testthat::test_that('newCMIP6core works',{
# First check to make sure that the newCMIPcore will throw errors.
testthat::expect_error(newCMIP6core('bogus_sceanrio'), 'cmip_scenario not recognized.')
rcp45_core <- newcore(system.file('input/hector_rcp45_constrained.ini', package = 'hector'))
run(rcp45_core)
rcp45_results <- fetchvars(rcp45_core, 1850:2100,
c(hector::GLOBAL_TEMP(), hector::ATMOSPHERIC_CH4())) %>%
dplyr::rename(cmip5_output = value)
shutdown(rcp45_core)
# Make a new core that uses the CMIP6 inputs
cmip6_run <- 'concentration.ssp434'
cmip6_core <- newCMIP6core(cmip6_run)
reset(cmip6_core)
run(cmip6_core)
cmip6_results <-fetchvars(cmip6_core, 1850:2100,
c(hector::GLOBAL_TEMP(), hector::ATMOSPHERIC_CH4())) %>%
dplyr::rename(cmip6_output = value)
# First check to make sure that the atmospheric concentrations are what we expected by comparing with the CMIP6_inputs data.
cmip6_results %>%
left_join(rcp45_results, by = c('year', 'variable')) %>%
filter(variable != 'Tgav') %>%
mutate(dif = cmip6_output - cmip5_output) %>%
pull(dif) ->
difference
testthat::expect_true(all(difference!= 0))
})
|
48763376f1f6121492e6437bee03e562ab6eea8f
|
0badc974bc713fdd8af80e17cc00d1778054418a
|
/SLIS434Project.R
|
82ffab0d3870770eb92711b37faa63a4cb8a75ce
|
[] |
no_license
|
whhall/SLIS434Project
|
317bcb59c075f6f6520eed9207856a6a8e237ad1
|
5db8d732e3da1e0804ffafb1629e9daf71bcb041
|
refs/heads/main
| 2023-01-14T04:05:00.059250
| 2020-11-20T14:26:55
| 2020-11-20T14:26:55
| 313,080,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,773
|
r
|
SLIS434Project.R
|
install.packages("usmap")
library(usmap)
library(learningr)
library(ggplot2)
library(janitor)
install.packages("ggpubr")
library(ggpubr)
install.packages("tidyverse")
library(tidyverse)
library(Hmisc)
install.packages("corrplot")
library(corrplot)
install.packages("PerformanceAnalytics")
library(PerformanceAnalytics)
require(learningr)
require(ggplot2)
require(janitor)
require(ggpubr)
require(tidyverse)
require(Hmisc)
require(corrplot)
require(PerformanceAnalytics)
COVID_data_file <- file.choose()
df <- read.csv(COVID_data_file)
View(df)
f_random_state <- function(){
state_row <- sample(1:nrow(df), 1)
df[state_row,]
}
f_random_state()
f_variable <- function(x){
df[c("State", x)]
}
f_variable("COVID.19.Cases")
f_variable_rate <- function(){
var <- readline(prompt = "Enter a variable:")
max_index <- which(df[var] == max(df[var]))
min_index <- which(df[var] == min(df[var]))
paste(df$State[max_index], " has the most ", var, ", and ", df$State[min_index], " has the least ", var, sep = "")
}
f_variable_rate()
f_variable_rate_max_min <- function(){
for(i in names(df)){
if(i != "State"){
max_index <- which(df[i] == max(df[i]))
min_index <- which(df[i] == min(df[i]))
print(paste("Max rate for", i, "happened in", df$State[max_index], "and the Min rate of", i, "happened in", df$State[min_index]))
}
}
}
f_variable_rate_max_min()
f_high_variable_rate_states <- function(){
var <- readline(prompt = "Enter a variable:")
avg <- mean(df[,var])
print(paste("The average of", var, "is", avg))
print(paste("Below are states with", var, "that are greater than", avg))
above_avg_df <- df[which(df[var] > avg),]
print(above_avg_df[c("State", var)])
}
f_high_variable_rate_states()
f_plot_variable <- function(v1, v2, c, s, x, y){
par(mai=c(0.9,0.9,0.3,0.07))
ggplot(df, aes_string(x = v1, y = v2)) +
geom_point(stat = "identity", colour = c, shape = s) +
theme(axis.text.x=element_text(angle=90, hjust=1)) + xlab(x) + ylab(y)
}
f_plot_variable("State", "COVID.19.Cases", "red", 2, "State", "Number of Cases")
f_US_Map_Variable <- function(v, c, lc, hc){
df_2 <- df
df_2$fips <- fips(df_2$State)
plot_usmap(data = df_2, values = v, color = c) +
scale_fill_continuous(low = lc, high = hc, label = scales::comma) +
theme(legend.position = "right")
}
f_US_Map_Variable("COVID.19.Cases", "red", "blue", "orange")
f_US_Map_Variable_Reg_Div <- function(v, r, c, lc, hc){
df_2 <- df
df_2$fips <- fips(df_2$State)
plot_usmap(data = df_2, include = r, values = v, color = c) +
scale_fill_continuous(low = lc, high = hc, label = scales::comma) +
theme(legend.position = "right")
}
f_US_Map_Variable_Reg_Div("COVID.19.Cases", c(.south_region), "red", "blue", "orange")
f_US_Map_Variable_Correlation <- function(x, y){
ggscatter(df, x = x, y = y,
add = "reg.line",
conf.int = TRUE,
cor.coef = TRUE,
cor.method = "pearson",
xlab = x, ylab = y)
}
f_US_Map_Variable_Correlation("COVID.19.Death.Rate", "COVID.19.Cases")
cor_df
f_US_Map_Variable_Correlogram <- function(m, t, s) {
cor_df <- rcorr(data.matrix(df[,2:ncol(df)]))
M_df <- cor_df$r
p_mat_df <- cor_df$P
corrplot(M_df, method = m, type = t, order = "hclust",
p.mat = p_mat_df, sig.level = s, insig = "blank")
}
f_US_Map_Variable_Correlogram("color","lower", "0.05")
f_US_Map_Variable_Multi_Figs_corr <- function(){
par(mar = c(0,0,0,0))
chart.Correlation(df[,2:ncol(df)], histogram = TRUE, pch = 19)
}
f_US_Map_Variable_Multi_Figs_corr()
|
7933ebcd93d2b6bda1ad41710e35cc213328f7ad
|
f2ed5996cd8e1fa8527a09b00eb41796c1b57cb5
|
/scripts/make_graphs.R
|
30ffbff6cc61fe0eb0eeccec69f0fd945ee5e47f
|
[] |
no_license
|
UI-Research/la-bias-assessment-jpc
|
7b6e4ea91f792440f03e5274c6ff35cc11900fb4
|
4ea38bda0a8b813935ae923adc756bdb0107b90a
|
refs/heads/master
| 2022-11-13T08:37:53.545115
| 2019-10-01T18:46:06
| 2019-10-01T18:46:06
| 212,173,819
| 0
| 1
| null | 2022-11-11T18:30:46
| 2019-10-01T18:46:58
|
HTML
|
UTF-8
|
R
| false
| false
| 13,501
|
r
|
make_graphs.R
|
library(knitr)
library(tidyverse)
library(urbnthemes)
library(sf)
library(grid)
library(gridExtra)
library(leaflet)
library(extrafont)
library(mapview)
library(htmlwidgets)
library(patchwork)
### Helper functions
plotr <- function(p = NULL, title = NULL, subtitle = NULL,
source = "",
endnote = "") {
# Function that fills in logo, source, endnote, and title. Took out subtitle for now
# Input:
# p: a ggplot
# title: String, Your plot's title
# subtitle: String, Your plot's subtitle
# source: String, Your plot's source
# endnote: String, Your plot's endnote
titler <- function(title) {
textGrob(title,
x = unit(0, "npc"),
hjust = 0,
vjust = 0,
gp = gpar(fontsize = 14, fontface="bold", fontfamily = "Times New Roman"))
}
subtitler <- function(subtitle) {
textGrob(subtitle,
x = unit(0, "npc"),
hjust = 0,
vjust = 0,
gp = gpar(fontsize = 9.5, fontfamily = "Times New Roman"))
}
sourcer <- function(source) {
grobTree(
textGrob("Source: ",
name = "source1",
x = unit(0, "npc"),
hjust = 0,
vjust = 0,
gp = gpar(fontsize = 8, fontfamily = "Times New Roman", fontface = "bold")),
textGrob(source,
x = unit(0, "npc") + grobWidth("source1"),
hjust = 0,
vjust = 0,
gp = gpar(fontsize = 8, fontfamily = "Times New Roman"))
)
}
noter <- function(endnote) {
grobTree(
textGrob("Notes: ",
name = "note1",
x = unit(0, "npc"),
hjust = 0,
vjust = 0,
gp = gpar(fontsize = 8, fontfamily = "Times New Roman", fontface = "bold")),
textGrob(endnote,
x = unit(0, "npc") + grobWidth("note1"),
hjust = 0,
vjust = 0,
gp = gpar(fontsize = 8, fontfamily = "Times New Roman"))
)
}
caption <- grobTree(
gp = gpar(fontsize = 7, hjust = 1),
textGrob(label = "I N S T I T U T E",
name = "caption1",
x = unit(1, "npc"),
y = unit(0, "npc"),
hjust = 1,
vjust = 0),
textGrob(label = "U R B A N ",
x = unit(1, "npc") - grobWidth("caption1") - unit(0.01, "lines"),
y = unit(0, "npc"),
hjust = 1,
vjust = 0,
gp = gpar(col = "#1696d2")))
grid.arrange(titler(title),
#subtitler(subtitle),
p,
caption,
sourcer(source),
#noter(endnote),
heights = c(3, 27, 1, 1))
}
# Need to create custom annotation function to specify one facet to annotate in
annotation_custom2 <- function (grob, xmin = -Inf, xmax = Inf, ymin = -Inf,
ymax = Inf, dd) {
layer(data = dd, stat = StatIdentity, position = PositionIdentity,
geom = ggplot2:::GeomCustomAnn,
inherit.aes = TRUE, params = list(grob = grob,
xmin = xmin, xmax = xmax,
ymin = ymin, ymax = ymax))}
### Graph Functions
make_demo_diff_plot = function(stats_filepath, title){
stats = suppressMessages(read_csv(stats_filepath))
var_names = tibble(statistic = c('pct_bach','pct_pov','pct_unemp','pct_white','pct_black','pct_anai',
'pct_asian','pct_nh_pi','pct_other_race', 'pct_two_race', 'pct_hisp',
'pct_smaller_races'),
full_name = c("Bachelor's degree or higher", "Families in poverty (last 12 months)",
"Unemployment rate","White non-Hispanic ", "Black non-Hispanic ",
"Alaskan Native/American Indian non-Hispanic ", "Asian non-Hispanic ",
"Native Hawaiian/Pacific Islander non-Hispanic ","Percent Other",
"Two or more races", "Hispanic","Other race non-Hispanic"))
var_names$full_name = str_wrap(var_names$full_name, width = 17)
colors= c("#9d9d9d", "#ec008b")
sd = stats %>% select(ends_with("sd")) %>%
mutate(mean = c("Citywide Average","Data Implied Average")) %>%
gather(statistic, value_sd, -mean) %>%
mutate(statistic = str_replace_all(statistic, "cdp_",""),
statistic = str_replace_all(statistic, "_sd","")) %>%
spread(mean, value_sd) %>%
{colnames(.)[2] = "value_city_avg_sd"; .} %>%
{colnames(.)[3] = "value_data_avg_sd"; .}
s = stats %>%
select(-starts_with("hh"), -ends_with("sd")) %>%
mutate(mean = c("Citywide Average","Data Implied Average"))%>%
gather(statistic, value, - mean) %>%
spread(mean, value) %>%
{colnames(.)[2] = "value_city_avg"; .} %>%
{colnames(.)[3] = "value_data_avg"; .} %>%
left_join(sd, by = "statistic") %>%
mutate(vartype = factor(ifelse(statistic %in% c('pct_bach', 'pct_unemp',
'pct_pov'), "econ variables",
"race variables")),
diff_city_data = round(value_data_avg - value_city_avg,2),
value_avg_diff_sd = sqrt(value_city_avg_sd^2 + value_data_avg_sd^2))%>%
left_join(var_names)%>%
#reorder factors for better plotting order
{.$full_name <- factor(.$full_name, levels=c("Bachelor's degree\nor higher",
"Families in\npoverty (last 12\nmonths)", "Unemployment rate",
"White non-\nHispanic", "Black non-\nHispanic",
"Asian non-\nHispanic", "Alaskan Native/\nAmerican Indian\nnon-Hispanic",
"Native Hawaiian/\nPacific Islander\nnon-Hispanic","Two or more races",
"Other race non-\nHispanic", "Percent Other","Hispanic")); .}%>%
filter(!statistic %in% c("pct_anai","pct_nh_pi","pct_other_race","pct_two_race")) %>%
# gather(mean, value, ends_with("avg")) %>%
mutate(statistic = ifelse(statistic =="pct_smaller_races",
"pct_other", statistic),
positive_diff = ifelse(diff_city_data>0, "Overrepresented",
"Underrepresented")) %>%
arrange(desc(statistic, vartype))
overrep_grob=grobTree(textGrob("Overrepresented",x=0.02, y=0.51,hjust=0, rot =90,
gp=gpar(col="#55b748",fontsize=11,fontface="plain", alpha = 0.9)))
underrep_grob=grobTree(textGrob("Underrepresented",x=0.02, y=0.18,hjust=0, rot =90,
gp=gpar(col="#db2b27",fontsize=11,fontface="plain", alpha = 0.8)))
demo = ggplot(s, aes(x=full_name, y = diff_city_data, fill = positive_diff))+
# geom_point(show.legend = F) +
geom_bar(show.legend = F, stat = "identity", width = 0.4, alpha = 0.7) +
geom_hline(yintercept = 0, size = 1.2, color = colors[1], alpha = 0.6)+
geom_errorbar(aes(ymin = diff_city_data-3*value_avg_diff_sd,
ymax = diff_city_data+3*value_avg_diff_sd),
alpha = 0.6, size = 1.05, width = 0.15,color = colors[1]) +
# geom_segment(aes(xend = full_name, yend =0), show.legend = F)+
#the data.frame needs to replicate an actual data value bc it overwrites the data
annotation_custom2(overrep_grob, dd = data.frame(vartype = "econ variables",
full_name = s %>%
filter(statistic == "pct_bach") %>%
pull(full_name),
diff_city_data = s %>%
filter(statistic == "pct_bach") %>%
pull(diff_city_data),
positive_diff = "Overrepresented")) +
annotation_custom2(underrep_grob, dd = data.frame(vartype = "econ variables",
full_name = s %>%
filter(statistic == "pct_bach") %>%
pull(full_name),
diff_city_data = s %>%
filter(statistic == "pct_bach") %>%
pull(diff_city_data),
positive_diff = "Overrepresented")) +
scale_fill_manual(values=c("Overrepresented" = "#55b748",
"Underrepresented" = "#db2b27")) +
labs(title = title,
y="", x="")+
scale_y_continuous(labels = function(x) paste0(x, "%"),
limits = c(-max(abs(s$diff_city_data)) -
3.25*max(abs(s$value_avg_diff_sd)),
max(abs(s$diff_city_data)) +
3.25*max(abs(s$value_avg_diff_sd)))) +
facet_grid(.~vartype, scales = "free_x", space = "free") +
# facet_wrap(vars(vartype), ncol=2, scales = "free_x",
# strip.position = "bottom", space = "free") +
theme(strip.placement = "outside",
strip.background = element_blank(),
strip.text.x = element_blank(),
legend.position = "top",
axis.ticks.x = element_blank(),
plot.margin=grid::unit(c(0.01,0.01,0.01,0.01), "mm"))
return(demo)
}
make_tract_bias_map=function(map_filepath, outpath= NA, save = F, legend = T, title = ""){
map_data = suppressMessages(read_csv(map_filepath))
map_data = st_as_sf(map_data, wkt ="geometry")
#getting boundary shape as multipolygon
map_data_c = st_union(map_data)
#setting up title for Leaflet in HTML
g_title = paste0('<b><font face = "Lato" size ="3">', paste(title, " Geographic Bias"), '</font><b>')
### Note: We are setting the NA colors to a deeper pink than the color scale,
#but only because se manually confirmed that all prop_diffs in the data not
#between [-0.05, 0.05] are above 0.05.
pal_tot = colorNumeric(palette = c("#12719e","#ececec","#af1f6b"),
domain=c(-0.05, 0.05), na.color = "#761548")
map_plot =
leaflet(options = leafletOptions(zoomControl = F, attributionControl=F)) %>%
addProviderTiles("CartoDB.Positron") %>%
addPolygons(data= map_data %>% filter(!sig_diff),
color = "#eaeaea",
smoothFactor = 0.5,
opacity = 0.6,
fillOpacity = 0.5,
weight = 0.9,
fillColor = "#8e8e8e"
) %>%
addPolygons(data=(map_data%>% filter(sig_diff)),
color ="#eaeaea",
smoothFactor = 0.5,
opacity = 0.5,
fillOpacity = 0.5,
weight = 0.9,
fillColor = ~pal_tot((map_data%>% filter(sig_diff))$diff_prop)) %>%
addPolygons(data = map_data_c,
color = "black",
smoothFactor = 0.5,
opacity = 0.5,
fillOpacity = 0,
weight = 2)
if (legend){
map_plot = map_plot %>%
addLegend("bottomright",
colors =c("#8e8e8e"),
labels= c("No significant bias"),
title= "",
opacity = 0.8) %>%
addLegend(position ="bottomright",
pal= colorNumeric(palette = c("#af1f6b","#ececec","#12719e"),
domain=c(-.05, .05)),
values = c(-0.05,.05),
opacity=0.8,
title = "Tract Reporting Bias",
labFormat = labelFormat(
suffix = "%",
transform = function(x) 100 * sort(x, decreasing = TRUE)))
}
if(!(title == "")){
map_plot = map_plot %>%
addControl(g_title, position = "topleft")
}
if (save){
mapshot(map_plot, file=paste0(outpath), remove_url = F) }
return(map_plot)
}
lapd_arrests_demo = make_demo_diff_plot("data/output-data/arrests_2010_present.csv_stats.csv",
title = "LAPD Arrests Demographic Bias")
lapd_crimes_demo = make_demo_diff_plot("data/output-data/crimes_2010_present.csv_stats.csv",
title = "LAPD Crimes Demographic Bias")
lapd_arrests_geo = make_tract_bias_map("data/output-data/arrests_2010_present.csv_mapdata.csv",
title = "LAPD Arrests")
lapd_crimes_geo = make_tract_bias_map("data/output-data/crimes_2010_present.csv_mapdata.csv",
title = "LAPD Crimes")
ggsave("output/lapd_arrests_demo_bias.png", lapd_arrests_demo, width = 9, height = 5, units = "in")
ggsave("output/lapd_crimes_demo_bias.png", lapd_crimes_demo, width = 9, height = 5, units = "in")
mapshot(lapd_arrests_geo, url = "output/lapd_arrests_geo_bias.html", remove_url = F)
mapshot(lapd_crimes_geo, url = "output/lapd_crimes_geo_bias.html", remove_url = F)
# devtools::install_github("UrbanInstitute/urbnthemes@chartbook")
|
6c3b50d2f5ba7d91d11e5b66f685b460fc84c195
|
53cf23d7a0e1d3f1983171d119e6c3263e30995a
|
/R/PortfReturns.R
|
f37be81424c59c9780ca5861962c859e5d93c71e
|
[] |
no_license
|
redmode/blotter
|
54bcdb52c5a7e56d034e9b0dda57fdf8ae254a0a
|
0b32d38f3163f35102bf70cdf8b5e4af769606bc
|
refs/heads/master
| 2020-05-31T14:26:46.759479
| 2014-08-19T01:18:07
| 2014-08-19T01:18:07
| 23,091,843
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,175
|
r
|
PortfReturns.R
|
#' Calculate portfolio instrument returns
#'
#' This function (for now) calculates return on initial equity for each instrument
#' in the portfolio or portfolios that make up an account. These columns will be additive
#' to return on capital of each portfolio, or of the entire account.
#'
#' This function exists because of R/Finance community requests by Mark Breman and Thomas Bolton
#' @export
#' @param Account string name of the account to generate returns for
#' @param method for now, only 'contribution' is supported
#' @param \dots any other passthru parameters (like \code{native} for \code{.getBySymbol}
#' @param Dates xts style ISO 8601 date subset to retrieve, default NULL (all dates)
#' @param Portfolios concatenated string vector for portfolio names to retrieve returns on, default NULL (all portfolios)
#' @param period one of daily
#' @note
#' TODO handle portfolio and account in different currencies (not hard, just not done)
#'
#' TODO explicitly handle portfolio weights
#'
#' TODO provide additional methods of calculating returns
#'
#' TODO support additions and withdrawals to available capital
PortfReturns <- function (Account, method=c('contribution'),...,Dates=NULL,Portfolios=NULL,period=c('daily','none'))
{ # @author Brian Peterson
period <- period[1] #use first
aname <- Account
if(!grepl("account\\.",aname)) Account<-try(get(paste("account",aname,sep='.'),envir=.blotter), silent=TRUE)
else Account<-try(get(aname,envir=.blotter), silent=TRUE)
if(inherits(Account,"try-error"))
stop(paste("Account ",aname," not found, use initAcct() to create a new account"))
if(!inherits(Account,"account")) stop("Account ",aname," passed is not the name of an account object.")
if(is.null(Portfolios)) Portfolios = names(Account$portfolios)
table=NULL
if(period=='daily'){
table = dailyEqPL(Portfolios = Portfolios)
} else {
for(pname in Portfolios){
Portfolio <- getPortfolio(pname)
if(is.null(Dates)) Dates <- paste("::",last(index(Portfolio$summary)),sep='')
#extract
ptable = .getBySymbol(Portfolio = Portfolio, Attribute = "Net.Trading.PL", Dates = Dates,...)
ptable = PerformanceAnalytics:::zerofill(ptable)
#combine
if(is.null(table)) table=ptable
else table=cbind(table,ptable)
}
}
#TODO check portfolio and account currencies and convert if necessary
#TODO handle additions and withdrawals in equity
if(!is.null(attr(Account,'initEq'))){
initEq<-as.numeric(attr(Account,'initEq'))
if(initEq==0) stop("Initial equity of zero would produce div by zero NaN,Inf,-Inf returns, please fix in initAcct().")
table = table/initEq
}
return(table)
}
###############################################################################
# Blotter: Tools for transaction-oriented trading systems development
# for R (see http://r-project.org/)
# Copyright (c) 2008-2014 Peter Carl and Brian G. Peterson
#
# This library is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id: PortfReturns.R 1596 2014-03-30 17:21:55Z braverock $
#
###############################################################################
|
be23c9ee0a9811ad0343fb3412c280ab5067eb29
|
5ae46ded978297e5d78d838bd667bc0f3ec3e52d
|
/HF/rfiles/clean.r
|
aef1608617c0c79b27075da5e961599c495520e2
|
[] |
no_license
|
sykstan/soot
|
42588fb4ca87637cf15d3b166b59f257bd1b65c6
|
c581a73b4e46a03c50e7a189e07ba1ffc2565c59
|
refs/heads/master
| 2021-01-17T17:50:41.765238
| 2016-10-18T01:17:05
| 2016-10-18T01:17:05
| 70,653,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,106
|
r
|
clean.r
|
# script to clean data and create
# data objects to work with
# loads imported.data
# and saves to cleaned.data
require(data.table)
require(magrittr)
require(tidyr)
load(file = "~/GoogleDrive/scs-it/imported.data")
### ============== DATA MUNGING =============== ###
# extrapolation: general formula
# why SCS scaled for 2T, 2 (weird, uses 2S), SCF, frag1_2S,
# different numbers for frag1_2T and frag1_2
# not scaled for complex_2S??
# and no numbers for frag2_2T onwards (funny coeff for frag1_SCF and frag2_2S)
# >> SCF(intEn, VQZ) + (3^3 * 2(intEn, VTZ) - 4^4 * 2(intEn, VQZ)) / (-37)
# weird, "." wouldn't work as a separator so used "w"
mp2.dt <- gather(mp2.dt, f.g.e, en, complex_rw2S:frag2_gwSCF) %>%
data.table()
mp2.dt[, en := en * 2625.5] # convert to kJ/mol
mp2.dt <- separate(mp2.dt, f.g.e, c("sys_g", "spen"), sep = "w") %>%
spread(sys_g, en) %>%
data.table()
il_mp2.dt <- gather(il_mp2.dt, f.g.e, en, complex_rwSS:frag2_gwOS) %>%
data.table()
il_mp2.dt[, en := en * 2625.5] # convert to kJ/mol
il_mp2.dt <- separate(il_mp2.dt, f.g.e, c("sys_g", "spen"), sep = "w") %>%
spread(sys_g, en) %>%
data.table()
# so that no numbers in future column names
mp2.dt[ , spen := as.factor(spen)]
il_mp2.dt[ , spen := as.factor(spen)]
levels(mp2.dt$spen) <- c("two", "OS", "SS", "SCF") # original 2, 2S, 2T, SCF
#write.table(il_spen.dt, file = "il_spen.dt", sep = " | ", row.names = FALSE)
### ============== SETTING KEYS AND FACTOR ORDERS =============== ###
# order by INCREASING basis set size
mp2.dt$basis <- factor(mp2.dt$basis, levels = basisList)
il_mp2.dt$basis <- factor(il_mp2.dt$basis, levels = basisList)
setkey(mp2.dt, basis, system, suite, spen)
setkey(il_mp2.dt, basis, system, chain, cation, anion, conf, spen)
setkey(bench.dt, system, suite, categ)
setkey(il_bench.dt, system, chain, cation, anion, conf)
setkey(il_sapt.dt, system, chain, cation, anion, conf)
### =========== ILs merge SAPT with CCSD(T) ============== ###
il_bench.dt <- merge(il_bench.dt, il_sapt.dt, all = TRUE)
### ======= ILs benchmark merging--outer merge =========== ###
### ============== CALCULATING NEW COLUMNS =============== ###
# only separated by spen, so that can calculate cp and ncp
mp2.dt[, ncp := complex_r - frag1_r - frag2_r]
mp2.dt[, cp := complex_r - frag1_g - frag2_g]
il_mp2.dt[, ncp := complex_r - frag1_r - frag2_r]
il_mp2.dt[, cp := complex_r - frag1_g - frag2_g]
# calculate aVQZ SCF, to subtract from benchmark to obtain correlation energy
tmp.SCF <- mp2.dt[basis == "aVQZ" & spen == "SCF"][ ,c("system", "suite", "cp"), with = FALSE]
setkey(tmp.SCF, system, suite) # for merging
bench.dt <- tmp.SCF[bench.dt] # merge
setnames(bench.dt, "cp", "HF") # name change
bench.dt[, corrEn := benchmark - HF] # correlation correction
bench.dt[, santiagoCorr := 2625.5 * santiagoCorr] # convert to kJ/mol
# set up categories by 25% rule
# note that all systems have Disp contributing to more than 25% of Total
bench.dt[ Ind / Total > 0.25 & (CT < -4 | CT / Total > 0.25), form := "IDC"]
bench.dt[ Ind / Total > 0.25 & CT / Total < 0.25, form := "ID"]
bench.dt[ Ind / Total < 0.25 & CT / Total < 0.25, form := "D"]
## technically should be in idc.r, but this is needed for why_ncp.r
# # same for ILs
# # note however, CT is larger for ILs, not sure if we'll get sensible distributions
# il_bench.dt[ Ind / Total > 0.25 & CT < -4, form := "IDC"]
# il_bench.dt[ Ind / Total > 0.25 & CT / Total < 0.25, form := "ID"]
# il_bench.dt[ Ind / Total < 0.25 & CT / Total < 0.25, form := "D"]
################ kinda very bad #####################
### ================================================= ###
### ========= for S88 ======== ###
spen.dt <- gather(mp2.dt[spen == "OS" | spen == "SS", !c("frag1_g", "frag2_g", "cp"), with = FALSE] # omit unraw data cols
, comp, en, complex_r:ncp) %>%
spread(spen, en) %>%
data.table()
spen.dt[, ratio := OS / SS]
spen.dt <- gather(spen.dt, spen, en, OS:ratio) %>%
unite(lol, spen, comp, sep = ".") %>%
spread(lol, en) %>%
data.table()
# shorter names
setnames(spen.dt
, c("OS.complex_r", "OS.frag1_r", "OS.frag2_r"
, "SS.complex_r", "SS.frag1_r", "SS.frag2_r"
, "ratio.complex_r", "ratio.frag1_r", "ratio.frag2_r")
, c("OS.c", "OS.1", "OS.2"
, "SS.c", "SS.1", "SS.2"
, "ratio.c", "ratio.1", "ratio.2"))
setkey(spen.dt, basis, system, suite)
### ======= for ILs ======= ###
il_spen.dt <- gather(il_mp2.dt[, !c("frag1_g", "frag2_g", "cp"), with = FALSE] # omit unraw data cols
, comp, en, complex_r:ncp) %>%
spread(spen, en) %>%
data.table()
il_spen.dt[, ratio := OS / SS]
il_spen.dt <- gather(il_spen.dt, spen, en, OS:ratio) %>%
unite(lol, spen, comp, sep = ".") %>%
spread(lol, en) %>%
data.table()
# shorter names
setnames(il_spen.dt
, c("OS.complex_r", "OS.frag1_r", "OS.frag2_r"
, "SS.complex_r", "SS.frag1_r", "SS.frag2_r"
, "ratio.complex_r", "ratio.frag1_r", "ratio.frag2_r")
, c("OS.c", "OS.1", "OS.2"
, "SS.c", "SS.1", "SS.2"
, "ratio.c", "ratio.1", "ratio.2"))
setkey(il_spen.dt, basis, system, chain, cation, anion, conf)
# merge with benchmarks
spen.dt <- merge(bench.dt[, !c("HF", "categ", "benchmark", "santiagoCorr", "form"), with = FALSE], spen.dt)
il_spen.dt <- merge(il_bench.dt, il_spen.dt)
### ================================================= ###
### =========== COMBINE S88 and ILs NCP MP2 DATA ========== ###
# how about merge spen.dt with il_spen.dt; fill suite column with NA for ILs
all_spen.dt <- rbind(spen.dt[, !c("benchmark"), with = FALSE]
, il_spen.dt[, !c("chain", "cation", "anion", "conf"), with = FALSE]
, fill = TRUE)
# add factor in
all_spen.dt[is.na(suite), suite := as.factor("IL")]
setkey(all_spen.dt, system, suite, basis)
### ================================================= ###
### ================================================= ###
### ================ FOR CP BOOTSTRAP =================== ###
### ================================================= ###
cp.spen.dt <- gather(mp2.dt[spen == "OS" | spen == "SS", !c("frag1_r", "frag2_r", "ncp"), with = FALSE] # omit unraw data cols
, comp, en, complex_r:cp) %>%
spread(spen, en) %>%
data.table()
cp.spen.dt[, ratio := OS / SS]
cp.spen.dt <- gather(cp.spen.dt, spen, en, OS:ratio) %>%
unite(lol, spen, comp, sep = ".") %>%
spread(lol, en) %>%
data.table()
# ILs
cp.il_spen.dt <- gather(il_mp2.dt[, !c("frag1_r", "frag2_r", "ncp"), with = FALSE] # omit unraw data cols
, comp, en, complex_r:cp) %>%
spread(spen, en) %>%
data.table()
cp.il_spen.dt[, ratio := OS / SS]
cp.il_spen.dt <- gather(cp.il_spen.dt, spen, en, OS:ratio) %>%
unite(lol, spen, comp, sep = ".") %>%
spread(lol, en) %>%
data.table()
# rename to shorter column names
setnames(cp.spen.dt
, c("OS.complex_r", "OS.frag1_g", "OS.frag2_g"
, "SS.complex_r", "SS.frag1_g", "SS.frag2_g"
, "ratio.complex_r", "ratio.frag1_g", "ratio.frag2_g")
, c("OS.c", "OS.1", "OS.2"
, "SS.c", "SS.1", "SS.2"
, "ratio.c", "ratio.1", "ratio.2"))
setkey(cp.spen.dt, basis, system, suite)
setnames(cp.il_spen.dt
, c("OS.complex_r", "OS.frag1_g", "OS.frag2_g"
, "SS.complex_r", "SS.frag1_g", "SS.frag2_g"
, "ratio.complex_r", "ratio.frag1_g", "ratio.frag2_g")
, c("OS.c", "OS.1", "OS.2"
, "SS.c", "SS.1", "SS.2"
, "ratio.c", "ratio.1", "ratio.2"))
setkey(cp.il_spen.dt, basis, system, chain, cation, anion, conf)
# merge with benchmarks
cp.spen.dt <- merge(bench.dt[, !c("HF", "categ", "benchmark", "santiagoCorr", "form"), with = FALSE], cp.spen.dt)
cp.il_spen.dt <- merge(il_bench.dt, cp.il_spen.dt)
### ================================================== ###
### ================ COMBINE ALL CP TOGETHER ========= ###
cp.all_spen.dt <- rbind(cp.spen.dt[, !c("benchmark"), with = FALSE]
, cp.il_spen.dt[, !c("chain", "cation", "anion", "conf"), with = FALSE]
, fill = TRUE)
# add factor in
cp.all_spen.dt[is.na(suite), suite := as.factor("IL")]
setkey(cp.all_spen.dt, system, suite, basis)
### ================================================== ###
### ================================================== ###
### ============= NUMBER OF BASIS FNs ================ ###
numbas.dt$basis <- factor(numbas.dt$basis, levels = basisList)
numbas.dt <- merge(numbas.dt, spen.dt, by = c("basis", "suite", "system"))
setkey(numbas.dt, basis, suite, system)
### ================================================== ###
### ================ SAVE STUFF ====================== ###
save(list = c("basisList", "mp2.dt", "il_mp2.dt", "bench.dt", "spen.dt", "il_spen.dt", "all_spen.dt"
, "cp.all_spen.dt", "numbas.dt")
, file = "~/GoogleDrive/scs-it/cleaned.data")
|
1e611fa4969972ffbaead06b2075172b8bf9c9c0
|
ee6434c66468957934d216d7839f595b4fc07bb9
|
/MungingCoursera.R
|
a7f4b72cfd05f00a269c72df3b33e41e8a16cf07
|
[] |
no_license
|
sdperez/RSampleCode
|
73f72e13b9ba918efdb5a49934f90eb3548f9ff7
|
75ec4694ad92138075fee52cdb1d6fc05ca52751
|
refs/heads/master
| 2021-01-01T19:42:09.028964
| 2015-08-21T00:41:52
| 2015-08-21T00:41:52
| 13,844,254
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,483
|
r
|
MungingCoursera.R
|
##########################################################
#Data Munging
#Operations for cleaning and seting datasets for analysis.
#From "Data Analysis" course on Coursera.
#
########################################################
setwd("C:/Users/sdperez.EMORYUNIVAD/Desktop/My Documents/EducationalMaterials/DataAnalysis-Coursera/week2/008dataMungingBasics")
#Let's get some data first
#fileUrl1 <- "https://dl.dropbox.com/u/7710864/data/reviews-apr29.csv"
#fileUrl2 <- "https://dl.dropbox.com/u/7710864/data/solutions-apr29.csv"
#download.file(fileUrl1,destfile="./data/reviews.csv",method="curl")
#download.file(fileUrl2,destfile="./data/solutions.csv",method="curl")
sampleData <- read.csv("./data/reviews.csv")
solutions <- read.csv("./data/solutions.csv")
head(sampleData,2) #look at names and first observations;
tolower(names(sampleData)) #turn all column names into lowercase
splitnames<-strsplit(names(sampleData),"\\.") #splits the names of variables at a dot
#the \\ 'exits' the character because . is a special character in R.
#Now you can select the first element in each of the lists created
#(everything before the . becomes the new name)
splitnames<-strsplit(names(sampleData),"_")
sapply(splitnames, function(x){x[1]})
sub("_", "",names(sampleData)) #substitutes the character'_' for no character "". Only the first instance.
gsub #replaces all the instances.
sub("\\.", "",names(sampleData)) #substitutes the character'.'(notice the \\ escape characters)
|
2b2eed2752bf8e0859b05cdb7b5b6d9249db74a9
|
15d99902cd236f68cbe6ff1b64799a66a50fc9a0
|
/crosswalk_munge.R
|
9b8f3882bd816eb6ab46f576490b6ae412bb7300
|
[] |
no_license
|
DukeStatSci/thesis-sp18-driscoll-envjustice
|
a09fec63bcd30e10921a53afd18d59ebab9fa224
|
64086fedce0f2017c35d7be8ec0adfe2e8fa14bb
|
refs/heads/master
| 2021-09-12T18:23:25.632246
| 2018-04-19T21:05:22
| 2018-04-19T21:05:22
| 105,419,987
| 1
| 2
| null | 2018-04-13T12:48:43
| 2017-10-01T04:51:40
|
HTML
|
UTF-8
|
R
| false
| false
| 1,729
|
r
|
crosswalk_munge.R
|
library(readr)
library(data.table)
library(dplyr)
library(stringr)
## Raw census data not included in package due to github size constraints.
## Must get the nhgis_blk_1990_blk2010 files to be able to run.
#import the data
cw90 = as.data.frame(fread("data/census/raw/nhgis_blk1990_blk2010_ge.csv"))
names(cw90) = c("id90", "id10", "weight", "area")
#remove the rows that don't reference a 1990 block
cw90 = filter(cw90, id90 != "")
#take the first 12 chars from 1990 block
cw90$id90 = substr(cw90$id90, 1, 11)
#convert to char, pad with '0', and take first 12 char from 2010 block
cw90$id10 = as.character(cw90$id10)
cw90$id10 = str_pad(cw90$id10, 15, "left", pad = "0")
cw90$id10 = substr(cw90$id10, 1, 11)
#repeat for 2000
cw00 = as.data.frame(fread("data/census/raw/nhgis_blk2000_blk2010_ge.csv"))
names(cw00) = c("id00", "id10", "weight", "area")
cw00 = filter(cw00, id00 != "")
cw00$id00 = str_pad(cw00$id00, 15, "left", pad = "0")
cw00$id00 = substr(cw00$id00, 1, 11)
cw00$id10 = as.character(cw00$id10)
cw00$id10 = str_pad(cw00$id10, 15, "left", pad = "0")
cw00$id10 = substr(cw00$id10, 1, 11)
w90 = cw90 %>%
group_by(id90) %>%
summarise(tot_weight = sum(weight))
w90 = as.data.frame(w90)
w90 = merge(cw90, w90, by = "id90")
cw90 = w90 %>%
group_by(id90, id10) %>%
summarize(weight = sum(weight)/min(tot_weight))
write.csv(cw90, file = "data/census/raw/crosswalk_90_tract.csv", row.names = FALSE)
w00 = cw00 %>%
group_by(id00) %>%
summarise(tot_weight = sum(weight))
w00 = as.data.frame(w00)
w00 = merge(cw00, w00, by = "id00")
cw00 = w00 %>%
group_by(id00, id10) %>%
summarize(weight = sum(weight)/min(tot_weight))
write.csv(cw00, file = "data/census/raw/crosswalk_00_tract.csv", row.names = FALSE)
|
7a1ae07905b000271abace47703d55181f94bf22
|
c2726510193cc92441446ca6486da4f8ff6173c2
|
/man/nest_append_interval.Rd
|
3a35c4ba2f5514236797163ef503e869bc3c6e35
|
[] |
no_license
|
HSPS-DataScience/tsCogs
|
e8ae09e5918adc9a8b6ec57555f645f154b0e08b
|
5935701367e4ba6c8ffa07d0dbed73fa281a6105
|
refs/heads/master
| 2021-09-12T18:13:18.638805
| 2018-04-19T19:26:58
| 2018-04-19T19:26:58
| 120,468,450
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 793
|
rd
|
nest_append_interval.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nest_interval.R
\name{nest_append_interval}
\alias{nest_append_interval}
\title{nest_append_interval}
\usage{
nest_append_interval(nestTib, rawData, type, interval)
}
\arguments{
\item{nestTib}{Nested tibble with `AccountNumber`` as identifier}
\item{rawData}{Tibble/Data Frame with the following columns:
\itemize{
\item Account Number (unique identifier)
\item Date
\item Count
}}
\item{type}{Must be one of following strings:
\itemize{
\item \code{years}
\item \code{months}
\item \code{weeks}
\item \code{days}
}}
\item{interval}{Must be positive integer excluding zero}
}
\value{
nested tibble
}
\description{
\strong{Designed to create cognostics by certain interval/ratio in the past}
}
\examples{
test
}
|
43e19292d02d126325ba7a74d361bf770eb86723
|
2d32305806855dc8793ab0348acef458b139f1be
|
/man/r4ss_obj_err.Rd
|
fa178fbe21291d4c4dae45f0950f5651cd9c3b35
|
[
"MIT"
] |
permissive
|
nmfs-fish-tools/SSMSE
|
07a9ed4defb370833864183b7f4a775425c53b3c
|
47445d973a537eaf9a7361f842d3f7a404bca247
|
refs/heads/main
| 2023-08-16T21:18:17.253400
| 2023-08-09T21:40:26
| 2023-08-10T12:20:30
| 197,069,801
| 16
| 5
|
MIT
| 2023-09-08T16:03:34
| 2019-07-15T20:44:06
|
R
|
UTF-8
|
R
| false
| true
| 476
|
rd
|
r4ss_obj_err.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkinput.R
\name{r4ss_obj_err}
\alias{r4ss_obj_err}
\title{Error if object is not an r4ss object}
\usage{
r4ss_obj_err(obj_name = "object ", type = "list")
}
\arguments{
\item{obj_name}{Object name that is not an r4ss object to print in the error}
\item{type}{Type that obj_name was expected to be, but is not,}
}
\description{
Error if object is not an r4ss object
}
\author{
Kathryn Doering
}
|
48dfa007c3982c12accf7c6f46d3c72b330f34c1
|
af2df8e81dba2ecf02a9172037aad2c5f3974344
|
/cachematrix.R
|
70a94848852b93768bc0877bc4781df74520c6aa
|
[] |
no_license
|
aneyrac/ProgrammingAssignment2
|
292ffc4b06c853d06c457e6db1f16c059aa235e4
|
2d2cc33a5d1d6d78c4034b69565c43aec01780fa
|
refs/heads/master
| 2022-07-11T18:08:15.659960
| 2020-05-09T02:37:42
| 2020-05-09T02:37:42
| 262,468,196
| 0
| 0
| null | 2020-05-09T02:03:08
| 2020-05-09T02:03:07
| null |
ISO-8859-10
|
R
| false
| false
| 1,278
|
r
|
cachematrix.R
|
##These functions calculate and cache the inverse of a matrix
##When the user wants to calculate the inverse of a matrix previously computed,
##the saved value is returned instead of doing the calculation again
##This function creates a special "matrix" object that can cache the inverse of a matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL ##define the cache m
set <- function(y){ ##set the matrix and re-initialize m in the parent environment
x <<- y
m <<- NULL
}
get <- function() x ##get the matrix
setinverse <- function(inverse) m <<- inverse ##set the inverse of the matrix
getinverse <- function() m ##get the inverse
list(set=set, get=get,
setinverse=setinverse,
getinverse=getinverse)
}
##This function calculates the inverse of the object obtained from makeCacheMatrix
##If the inverse has already been calculated, the function returns the saved inverse from the cache
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)){ ##return the inverse if it has already been calculated and cached
message("getting cached data")
return(m)
}
data <- x$get() ##if it hasnīt, calculate the inverse, set it to the object, an finally return it
m <- solve(data, ...)
x$setinverse(m)
m
}
|
d93bb5bfeabf17843efe268317e5f6312a1f7a98
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612727000-test.R
|
eec612c803b84fbfa38fe52315942bd890852b67
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 165
|
r
|
1612727000-test.R
|
testlist <- list(latLongs = structure(3.34845030589153e-315, .Dim = c(1L, 1L)), r = -8.36580345060678e+303)
result <- do.call(MGDrivE::calcCos,testlist)
str(result)
|
9d88edfb36226d6ec54fa615b0e97b8dddc611e2
|
ddb26b2d0dd87c03f6ee3eb94d478b401ad84bcb
|
/src/ClassTest2/include/ClassTestFunctions.R
|
eac081b6e67493de897c7c2a55e14f91d2357efd
|
[] |
no_license
|
jamesbriant/UQ
|
b428707d2513b7b8cc4e1aa1ce8b123bd921cb22
|
790563a9ef3922b623ee04a27592d5d6bb4b6198
|
refs/heads/main
| 2023-04-30T05:22:16.380459
| 2021-05-18T15:40:33
| 2021-05-18T15:40:33
| 303,977,737
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 459
|
r
|
ClassTestFunctions.R
|
u <- function(lambda){
beta.m <- c(20, 20, 5, 5)
lambda.m <- c(-0.7, -0.3, 0.3, 0.7)
sum.m <- 0
for(m in 1:4){
numerator <- (-1)^(m+1)
denominator <- 1 + exp(-2*beta.m[m]*(lambda - lambda.m[m]))
sum.m <- sum.m + numerator/denominator
}
return(sum.m)
}
dwigner <- function(x, R=1){
a <- 2/(pi*R^2)
b <- sqrt(R^2 - x^2)
return(a*b)
}
rwigner <- function(n, R=1){
Y <- rbeta(n, 1.5, 1.5)
return(R*(2*Y - 1))
}
|
8cf1698f7fbe9f46175bf4e66eee614f7583c9ae
|
1ff6f8fb7a02981c9a385a4bb4fd63ac1a688afd
|
/Homework/Homework 2/Problem 2.R
|
1b631b69c672fba7365d7116deb68456537b281a
|
[] |
no_license
|
yingxuan0806/ESA
|
bf75b8325d9fd8350ee15ddc599a29b2787debf4
|
0b010cc2e26187e1d1571a4c539f6dde216203d5
|
refs/heads/master
| 2021-05-17T15:59:40.785230
| 2020-04-30T16:49:20
| 2020-04-30T16:49:20
| 250,858,555
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,764
|
r
|
Problem 2.R
|
rm(list=ls())
setwd("~/Documents/SUTD/Term 5/ESA/Homework/Homework 2")
if(!require(goalprog)) {
install.packages("goalprog")
library(goalprog)
}
if(!require(fmsb)) {
install.packages("fmsb")
library(fmsb)
}
# PROBLEM 2
# Task 1
coefficients <- matrix(c(4, 2, 4, 2, 1, 0, 0, 1), nrow = 4, byrow = TRUE)
targets <- c(48, 32, 7, 10)
objective_index <- c(1:4)
priority <- c(1:4)
p <- c(0, 2, 0, 0)
n <- c(1, 1, 5, 5)
achievements <- data.frame(objective = objective_index, priority = priority, p = p, n = n)
solution <- llgp(coefficients = coefficients, targets = targets, achievements = achievements)
solution
summary <- llgpout(solution$tab, coefficients = coefficients, targets = targets)
summary
# Task 3
priority_new <- c(1, 1, 2, 3)
achievements_new <- data.frame(objective = objective_index, priority = priority_new, p = p, n = n)
solution_new <- llgp(coefficients = coefficients, targets = targets, achievements = achievements_new)
solution_new
summary_new <- llgpout(solution_new$tab, coefficients = coefficients, targets = targets)
summary_new
# Task 4
g1 <- c(abs(summary$b[1] - summary$f[1]), abs(summary_new$b[1] - abs(summary_new$f[1])))
max(g1) # 16
g2 <- c(abs(summary$b[2] - summary$f[2]), abs(summary_new$b[2] - abs(summary_new$f[2])))
max(g2) # 16
g3 <- c(abs(summary$b[3] - summary$f[3]), abs(summary_new$b[3] - abs(summary_new$f[3])))
max(g3) # 0
g4 <- c(abs(summary$b[4] - summary$f[4]), abs(summary_new$b[4] - abs(summary_new$f[4])))
max(g4) # 8
df <- data.frame(G1 = g1, G2 = g2, G3 = g3, G4 = g4)
max_range <- c(20, 20, 4, 12)
min_range <- c(0, 0, 0, 0)
df <- rbind(max_range, min_range, df)
radarchart(df)
legend(x = 1, y = 1, lty = c(1, 1), lwd = c(2.5, 2.5), col = c("black", "red"), c("Task 1", "Task 3"), cex = 0.4)
|
8fa3d5a6aa4e79c2cfb292c408a4bfd8600acfcc
|
2b2aee3352f8a10c121fe74036eddec01b3ee595
|
/man/drawByStrength.Rd
|
c4ae09ab7f2ba1d96f45e617390ffcceed251afc
|
[
"MIT"
] |
permissive
|
rdinnager/slimr
|
56f1fef0a83198bce292dd92dc1014df87c2d686
|
e2fbb7115c7cca82dabd26dc6560e71a8cd0958b
|
refs/heads/master
| 2023-08-21T14:00:36.089104
| 2023-07-31T03:11:09
| 2023-07-31T03:11:09
| 226,999,099
| 8
| 1
|
NOASSERTION
| 2023-08-03T05:44:32
| 2019-12-10T01:04:16
|
R
|
UTF-8
|
R
| false
| true
| 3,732
|
rd
|
drawByStrength.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_lang.R
\name{drawByStrength}
\alias{drawByStrength}
\alias{InteractionType$drawByStrength}
\alias{.IT$drawByStrength}
\title{SLiM method drawByStrength}
\usage{
drawByStrength(receiver, count, exerterSubpop)
}
\arguments{
\item{receiver}{An object of type Individual object. Must be of length 1 (a
singleton). See details for description.}
\item{count}{An object of type integer. Must be of length 1 (a singleton). The
default value is \code{1}. See details for description.}
\item{exerterSubpop}{An object of type null or Subpopulation object. Must be
of length 1 (a singleton). The default value is \code{NULL}. See details for
description.}
}
\value{
An object of type Individual object.
}
\description{
Documentation for SLiM function \code{drawByStrength}, which is a method of the
SLiM class \code{\link{InteractionType}}.
Note that the R function is a stub, it does not do anything in R (except bring
up this documentation). It will only do
anything useful when used inside a \code{\link{slim_block}} function further
nested in a \code{\link{slim_script}}
function call, where it will be translated into valid SLiM code as part of a
full SLiM script.
}
\details{
Documentation for this function can be found in the official
\href{http://benhaller.com/slim/SLiM_Manual.pdf#page=660}{SLiM manual: page
660}.
Returns up to count individuals drawn from exerterSubpop, or if that
is NULL (the default), then from the subpopulation of receiver. The probability
of drawing particular individuals is proportional to the strength of interaction
they exert upon receiver. All exerters must belong to a single subpopulation
(but not necessarily the same subpopulation as receiver). The evaluate() method
must have been previously called for the receiver and exerter subpopulations,
and positions saved at evaluation time will be used. This method may be used
with either spatial or non-spatial interactions, but will be more efficient
with spatial interactions that set a short maximum interaction distance. Draws
are done with replacement, so the same individual may be drawn more than once;
sometimes using unique() on the result of this call is therefore desirable. If
more than one draw will be needed, it is much more efficient to use a single
call to drawByStrength(), rather than drawing individuals one at a time. Note
that if no individuals exert a non-zero interaction strength upon receiver,
the vector returned will be zero- length; it is important to consider this
possibility.
}
\section{Copyright}{
This is documentation for a function in the SLiM software, and has been
reproduced from the official manual,
which can be found here: \url{http://benhaller.com/slim/SLiM_Manual.pdf}. This
documentation is
Copyright © 2016-2020 Philipp Messer. All rights reserved. More information
about SLiM can be found
on the official website: \url{https://messerlab.org/slim/}
}
\seealso{
Other InteractionType:
\code{\link{InteractionType}},
\code{\link{clippedIntegral}()},
\code{\link{distanceFromPoint}()},
\code{\link{distance}()},
\code{\link{evaluate}()},
\code{\link{interactingNeighborCount}()},
\code{\link{interactionDistance}()},
\code{\link{localPopulationDensity}()},
\code{\link{nearestInteractingNeighbors}()},
\code{\link{nearestNeighborsOfPoint}()},
\code{\link{nearestNeighbors}()},
\code{\link{neighborCountOfPoint}()},
\code{\link{neighborCount}()},
\code{\link{setInteractionFunction}()},
\code{\link{strength}()},
\code{\link{totalOfNeighborStrengths}()},
\code{\link{unevaluate}()}
}
\author{
Benjamin C Haller (\email{bhaller@benhaller.com}) and Philipp W Messer
(\email{messer@cornell.edu})
}
\concept{InteractionType}
|
43c88b3f98674cab42b53b7b7a0e2ce8d505e7b6
|
7b729f9f4a1dfded7f7e5f9f349a836a72f72e39
|
/R/prediction.R
|
04ffef47f77ee18ebf7ff20f7f6345e3a3a4710e
|
[] |
no_license
|
cran/dMod
|
911209d72a4d837dd736cdb4b20de8e34727379e
|
5a567de82579b081c1f5cc643fa893a4dea3b398
|
refs/heads/master
| 2021-07-18T00:15:04.690663
| 2021-01-26T23:30:13
| 2021-01-26T23:30:13
| 37,273,446
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,798
|
r
|
prediction.R
|
#' Model prediction function for ODE models.
#' @description Interface to combine an ODE and its sensitivity equations
#' into one model function \code{x(times, pars, deriv = TRUE)} returning ODE output and sensitivities.
#' @param odemodel object of class \link{odemodel}
#' @param forcings data.frame with columns name (factor), time (numeric) and value (numeric).
#' The ODE forcings.
#' @param events data.frame of events with columns "var" (character, the name of the state to be
#' affected), "time" (numeric, time point), "value" (numeric, value), "method" (character, either
#' "replace", "add" or "multiply"). See \link[deSolve]{events}.
#' ATTENTION: Sensitivities for event states will only be correctly computed if defined within
#' \code{\link{odemodel}()}. Specify events within \code{Xs()} only for forward simulation.
#' @param names character vector with the states to be returned. If NULL, all states are returned.
#' @param condition either NULL (generic prediction for any condition) or a character, denoting
#' the condition for which the function makes a prediction.
#' @param optionsOde list with arguments to be passed to odeC() for the ODE integration.
#' @param optionsSens list with arguments to be passed to odeC() for integration of the extended system
#' @return Object of class \link{prdfn}. If the function is called with parameters that
#' result from a parameter transformation (see \link{P}), the Jacobian of the parameter transformation
#' and the sensitivities of the ODE are multiplied according to the chain rule for
#' differentiation. The result is saved in the attributed "deriv",
#' i.e. in this case the attibutes "deriv" and "sensitivities" do not coincide.
#' @export
#' @import deSolve
Xs <- function(odemodel, forcings=NULL, events=NULL, names = NULL, condition = NULL, optionsOde=list(method = "lsoda"), optionsSens=list(method = "lsodes")) {
func <- odemodel$func
extended <- odemodel$extended
if (is.null(extended)) warning("Element 'extended' empty. ODE model does not contain sensitivities.")
myforcings <- forcings
myevents <- events
if (!is.null(attr(func, "events")) & !is.null(myevents))
warning("Events already defined in odemodel. Additional events in Xs() will be ignored. Events need to be defined in either odemodel() or Xs().")
if (is.null(attr(func, "events")) & !is.null(myevents))
message("Events should be definend in odemodel(). If defined in Xs(), events will be applied, but sensitivities will not be reset accordingly.")
# Variable and parameter names
variables <- attr(func, "variables")
parameters <- attr(func, "parameters")
forcnames <- attr(func, "forcings")
# Variable and parameter names of sensitivities
sensvar <- attr(extended, "variables")[!attr(extended, "variables")%in%variables]
senssplit <- strsplit(sensvar, ".", fixed=TRUE)
senssplit.1 <- unlist(lapply(senssplit, function(v) v[1]))
senssplit.2 <- unlist(lapply(senssplit, function(v) v[2]))
svariables <- intersect(senssplit.2, variables)
sparameters <- setdiff(senssplit.2, variables)
# Initial values for sensitivities
yiniSens <- as.numeric(senssplit.1 == senssplit.2)
names(yiniSens) <- sensvar
# Names for deriv output
sensGrid <- expand.grid(variables, c(svariables, sparameters), stringsAsFactors=FALSE)
sensNames <- paste(sensGrid[,1], sensGrid[,2], sep=".")
# Only a subset of all variables/forcings is returned
if (is.null(names)) names <- c(variables, forcnames)
# Update sensNames when names are set
select <- sensGrid[, 1] %in% names
sensNames <- paste(sensGrid[,1][select], sensGrid[,2][select], sep = ".")
# Controls to be modified from outside
controls <- list(
forcings = myforcings,
events = myevents,
names = names,
optionsOde = optionsOde,
optionsSens = optionsSens
)
P2X <- function(times, pars, deriv=TRUE){
yini <- unclass(pars)[variables]
mypars <- unclass(pars)[parameters]
forcings <- controls$forcings
events <- controls$events
optionsOde <- controls$optionsOde
optionsSens <- controls$optionsSens
names <- controls$names
# Add event time points (required by integrator)
event.times <- unique(events$time)
times <- sort(union(event.times, times))
# Sort event time points
if (!is.null(events)) events <- events[order(events$time),]
myderivs <- NULL
mysensitivities <- NULL
if (!deriv) {
# Evaluate model without sensitivities
# loadDLL(func)
if (!is.null(forcings)) forc <- setForcings(func, forcings) else forc <- NULL
out <- suppressWarnings(do.call(odeC, c(list(y = unclass(yini), times = times, func = func, parms = mypars, forcings = forc, events = list(data = events)), optionsOde)))
out <- submatrix(out, cols = c("time", names))
#out <- cbind(out, out.inputs)
} else {
# Evaluate extended model
# loadDLL(extended)
if (!is.null(forcings)) forc <- setForcings(extended, forcings) else forc <- NULL
outSens <- suppressWarnings(do.call(odeC, c(list(y = c(unclass(yini), yiniSens), times = times, func = extended, parms = mypars,
forcings = forc,
events = list(data = events)), optionsSens)))
#out <- cbind(outSens[,c("time", variables)], out.inputs)
out <- submatrix(outSens, cols = c("time", names))
mysensitivities <- submatrix(outSens, cols = !colnames(outSens) %in% c(variables, forcnames))
# Apply parameter transformation to the derivatives
variables <- intersect(variables, names)
sensLong <- matrix(outSens[,sensNames], nrow = dim(outSens)[1]*length(variables))
dP <- attr(pars, "deriv")
if (!is.null(dP)) {
sensLong <- sensLong %*% submatrix(dP, rows = c(svariables, sparameters))
sensGrid <- expand.grid.alt(variables, colnames(dP))
sensNames <- paste(sensGrid[,1], sensGrid[,2], sep = ".")
}
myderivs <- matrix(0, nrow = nrow(outSens), ncol = 1 + length(sensNames), dimnames = list(NULL, c("time", sensNames)))
myderivs[, 1] <- out[, 1]
myderivs[, -1] <- sensLong
}
#prdframe(out, deriv = myderivs, sensitivities = mysensitivities, parameters = unique(sensGrid[,2]))
prdframe(out, deriv = myderivs, sensitivities = mysensitivities, parameters = pars)
}
attr(P2X, "parameters") <- c(variables, parameters)
attr(P2X, "equations") <- as.eqnvec(attr(func, "equations"))
attr(P2X, "forcings") <- forcings
attr(P2X, "events") <- events
attr(P2X, "modelname") <- func[1]
prdfn(P2X, c(variables, parameters), condition)
}
#' Model prediction function for ODE models without sensitivities.
#' @description Interface to get an ODE
#' into a model function \code{x(times, pars, forcings, events)} returning ODE output.
#' It is a reduced version of \link{Xs}, missing the sensitivities.
#' @param odemodel Object of class \link{odemodel}.
#' @param forcings, see \link{Xs}
#' @param events, see \link{Xs}
#' @param condition either NULL (generic prediction for any condition) or a character, denoting
#' the condition for which the function makes a prediction.
#' @param optionsOde list with arguments to be passed to odeC() for the ODE integration.
#' @details Can be used to integrate additional quantities, e.g. fluxes, by adding them to \code{f}.
#' All quantities that are not initialised by pars
#' in \code{x(..., forcings, events)} are initialized with 0. For more details and
#' the return value see \link{Xs}.
#' @export
Xf <- function(odemodel, forcings = NULL, events = NULL, condition = NULL, optionsOde=list(method = "lsoda")) {
func <- odemodel$func
myforcings <- forcings
myevents <- events
variables <- attr(func, "variables")
parameters <- attr(func, "parameters")
yini <- rep(0,length(variables))
names(yini) <- variables
# Controls to be modified from outside
controls <- list(
forcings = myforcings,
events = myevents,
optionsOde = optionsOde
)
P2X <- function(times, pars, deriv = TRUE){
events <- controls$events
forcings <- controls$forcings
optionsOde <- controls$optionsOde
# Add event time points (required by integrator)
event.times <- unique(events$time)
times <- sort(union(event.times, times))
yini[names(pars[names(pars) %in% variables])] <- pars[names(pars) %in% variables]
mypars <- pars[parameters]
#alltimes <- unique(sort(c(times, forctimes)))
# loadDLL(func)
if(!is.null(forcings)) forc <- setForcings(func, forcings) else forc <- NULL
out <- suppressWarnings(do.call(odeC, c(list(y=yini, times=times, func=func, parms=mypars, forcings=forc,events = list(data = events)), optionsOde)))
#out <- cbind(out, out.inputs)
prdframe(out, deriv = NULL, parameters = pars)
}
attr(P2X, "parameters") <- c(variables, parameters)
attr(P2X, "equations") <- as.eqnvec(attr(func, "equations"))
attr(P2X, "forcings") <- forcings
attr(P2X, "events") <- events
attr(P2X, "modelname") <- func[1]
prdfn(P2X, c(variables, parameters), condition)
}
#' Model prediction function from data.frame
#'
#' @param data data.frame with columns "name", "time", and row names that
#' are taken as parameter names. The data frame can contain a column "value"
#' to initialize the parameters.
#' @param condition either NULL (generic prediction for any condition) or a character, denoting
#' the condition for which the function makes a prediction.
#' @return Object of class \link{prdfn}, i.e.
#' a function \code{x(times pars, deriv = TRUE, conditions = NULL)},
#' see also \link{Xs}. Attributes are "parameters", the parameter names (row names of
#' the data frame), and possibly "pouter", a named numeric vector which is generated
#' from \code{data$value}.
#' @examples
#' # Generate a data.frame and corresponding prediction function
#' timesD <- seq(0, 2*pi, 0.5)
#' mydata <- data.frame(name = "A", time = timesD, value = sin(timesD),
#' row.names = paste0("par", 1:length(timesD)))
#' x <- Xd(mydata)
#'
#' # Evaluate the prediction function at different time points
#' times <- seq(0, 2*pi, 0.01)
#' pouter <- structure(mydata$value, names = rownames(mydata))
#' prediction <- x(times, pouter)
#' plot(prediction)
#'
#' @export
Xd <- function(data, condition = NULL) {
states <- unique(as.character(data$name))
# List of prediction functions with sensitivities
predL <- lapply(states, function(s) {
subdata <- subset(data, as.character(name) == s)
M <- diag(1, nrow(subdata), nrow(subdata))
parameters.specific <- rownames(subdata)
if(is.null(parameters.specific)) parameters.specific <- paste("par", s, 1:nrow(subdata), sep = "_")
sensnames <- paste(s, parameters.specific, sep = ".")
# return function
out <- function(times, pars) {
value <- approx(x = subdata$time, y = pars[parameters.specific], xout = times, rule = 2)$y
grad <- do.call(cbind, lapply(1:nrow(subdata), function(i) {
approx(x = subdata$time, y = M[, i], xout = times, rule = 2)$y
}))
colnames(grad) <- sensnames
attr(value, "sensitivities") <- grad
attr(value, "sensnames") <- sensnames
return(value)
}
attr(out, "parameters") <- parameters.specific
return(out)
}); names(predL) <- states
# Collect parameters
parameters <- unlist(lapply(predL, function(p) attr(p, "parameters")))
# Initialize parameters if available
pouter <- NULL
if(any(colnames(data) == "value"))
pouter <- structure(data$value[match(parameters, rownames(data))], names = parameters)
sensGrid <- expand.grid(states, parameters, stringsAsFactors=FALSE)
sensNames <- paste(sensGrid[,1], sensGrid[,2], sep=".")
controls <- list()
P2X <- function(times, pars, deriv=TRUE){
predictions <- lapply(states, function(s) predL[[s]](times, pars)); names(predictions) <- states
out <- cbind(times, do.call(cbind, predictions))
colnames(out) <- c("time", states)
mysensitivities <- NULL
myderivs <- NULL
if(deriv) {
# Fill in sensitivities
outSens <- matrix(0, nrow = length(times), ncol = length(sensNames), dimnames = list(NULL, c(sensNames)))
for(s in states) {
mysens <- attr(predictions[[s]], "sensitivities")
mynames <- attr(predictions[[s]], "sensnames")
outSens[, mynames] <- mysens
}
mysensitivities <- cbind(time = times, outSens)
# Apply parameter transformation to the derivatives
sensLong <- matrix(outSens, nrow = nrow(outSens)*length(states))
dP <- attr(pars, "deriv")
if (!is.null(dP)) {
sensLong <- sensLong %*% submatrix(dP, rows = parameters)
sensGrid <- expand.grid.alt(states, colnames(dP))
sensNames <- paste(sensGrid[,1], sensGrid[,2], sep = ".")
}
outSens <- cbind(times, matrix(sensLong, nrow = dim(outSens)[1]))
colnames(outSens) <- c("time", sensNames)
myderivs <- outSens
#attr(out, "deriv") <- outSens
}
#attr(out, "parameters") <- unique(sensGrid[,2])
prdframe(out, deriv = myderivs, sensitivities = mysensitivities, parameters = pars)
}
attr(P2X, "parameters") <- structure(parameters, names = NULL)
attr(P2X, "pouter") <- pouter
prdfn(P2X, attr(P2X, "parameters"), condition)
}
#' Observation functions.
#'
#' @description Creates an object of type \link{obsfn} that evaluates an observation function
#' and its derivatives based on the output of a model prediction function, see \link{prdfn},
#' as e.g. produced by \link{Xs}.
#' @param g Named character vector or equation vector defining the observation function
#' @param f Named character of equations or object that can be converted to eqnvec or object of class fn.
#' If f is provided, states and parameters are guessed from f.
#' @param states character vector, alternative definition of "states", usually the names of \code{f}. If both,
#' f and states are provided, the states argument overwrites the states derived from f.
#' @param parameters character vector, alternative definition of the "parameters",
#' usually the symbols contained in "g" and "f" except for \code{states} and the code word \code{time}. If both,
#' f and parameters are provided, the parameters argument overwrites the parameters derived from f and g.
#' @param condition either NULL (generic prediction for any condition) or a character, denoting
#' the condition for which the function makes a prediction.
#' @param attach.input logical, indiating whether the original input should be
#' returned with the output.
#' @param deriv logical, generate function to evaluate derivatives of observables. Necessary for parameter estimation.
#' @param compile Logical, compile the function (see \link{funC0})
#' @param modelname Character, used if \code{compile = TRUE}, sets a fixed filename for the
#' C file.
#' @param verbose Print compiler output to R command line.
#' @return Object of class \link{obsfn}, i.e.
#' a function \code{y(..., deriv = TRUE, conditions = NULL)} representing the evaluation of the
#' observation function. Arguments \code{out} (model prediction) and \code{pars} (parameter values)
#' shoudl be passed by the \code{...} argument.
#' If \code{out} has the attribute "sensitivities", the result of
#' \code{y(out, pars)}, will have an attributed "deriv" which reflecs the sensitivities of
#' the observation with respect to the parameters.
#' If \code{pars} is the result of a parameter transformation \code{p(pars)} (see \link{P}),
#' the Jacobian
#' of the parameter transformation and the sensitivities of the observation function
#' are multiplied according to the chain rule for differentiation.
#' @details For \link{odemodel}s with forcings, it is best, to pass the prediction function \code{x} to the "f"-argument
#' instead of the equations themselves. If an eqnvec is passed to "f" in this case, the forcings and states
#' have to be specified manually via the "states"-argument.
#' @example inst/examples/prediction.R
#' @export
Y <- function(g, f = NULL, states = NULL, parameters = NULL, condition = NULL, attach.input = TRUE, deriv = TRUE, compile = FALSE, modelname = NULL, verbose = FALSE) {
# Idea:
# If replicate scaling is undispensible and different
# observable names for different replicates is not an option, then
# g could be a list of observables. For this case, the observation
# function has to return a list of observations for each condition.
# Not yet clear how this works with the "+" operator.
myattach.input <- attach.input
warnings <- FALSE
modelname_deriv <- NULL
if (is.null(f) && is.null(states) && is.null(parameters))
stop("Not all three arguments f, states and parameters can be NULL")
# Modify modelname by condition
if (!is.null(modelname) && !is.null(condition)) modelname <- paste(modelname, sanitizeConditions(condition), sep = "_")
# Then add suffix(es) for derivative function
if (!is.null(modelname)) modelname_deriv <- paste(modelname, "deriv", sep = "_")
# Get potential paramters from g, forcings are treated as parameters because
# sensitivities dx/dp with respect to forcings are zero.
# Distinguish between
# symbols = any symbol that occurrs in g
# states = states of the underlying prediction function
# parameters = parameters of the underlying prediction function
# estimate = parameters p for which derivatives should be returned and states x assumed to provide derivatives, dx/dp
symbols <- getSymbols(unclass(g))
if (is.null(f)) {
states <- union(states, "time")
estimate <- union(states, parameters)
parameters <- union(parameters, setdiff(symbols, c(states, "time")))
} else if (inherits(f, "fn")) {
myforcings <- Reduce(union, lapply(lapply(attr(f, "mappings"),
function(mymapping) {attr(mymapping, "forcings")}),
function(myforcing) {as.character(myforcing$name)}))
mystates <- unique(c(do.call(c, lapply(getEquations(f), names)), "time"))
if(length(intersect(myforcings, mystates)) > 0)
stop("Forcings and states overlap in different conditions. Please run Y for each condition by supplying only the condition specific f.")
mystates <- c(mystates, myforcings)
myparameters <- setdiff(union(getParameters(f), getSymbols(unclass(g))), c(mystates, myforcings))
estimate <- c(states, parameters)
if (is.null(states)) estimate <- c(estimate, setdiff(mystates, myforcings))
if (is.null(parameters)) estimate <- c(estimate, myparameters)
states <- union(mystates, states)
parameters <- union(myparameters, parameters)
} else {
# Get all states and parameters from f
f <- as.eqnvec(f)
mystates <- union(names(f), "time")
myparameters <- getSymbols(c(unclass(g), unclass(f)), exclude = mystates)
# Set states and parameters to be estimated according to arguments, and
# take values from mystates and myparameters, if NULL
estimate <- c(states, parameters)
if (is.null(states)) estimate <- c(estimate, mystates)
if (is.null(parameters)) estimate <- c(estimate, myparameters)
# Return states and parameters according to what is found in the equations and what is supplied by the user (probably not needed)
states <- union(mystates, states)
parameters <- union(myparameters, parameters)
}
# cat("States:\n")
# print(states)
# cat("Parameters:\n")
# print(parameters)
# cat("Estimate:\n")
# print(estimate)
# Observables defined by g
observables <- names(g)
gEval <- funC0(g, variables = states, parameters = parameters, compile = compile, modelname = modelname,
verbose = verbose, convenient = FALSE, warnings = FALSE)
# Produce everything that is needed for derivatives
if (deriv) {
# Character matrices of derivatives
dxdp <- dgdx <- dgdp <- NULL
states.est <- intersect(states, estimate)
pars.est <- intersect(parameters, estimate)
variables.deriv <- c(
states,
as.vector(outer(states.est, c(states.est, pars.est), paste, sep = "."))
)
if (length(states.est) > 0 & length(pars.est) > 0) {
dxdp <- apply(expand.grid.alt(states.est, c(states.est, pars.est)), 1, paste, collapse = ".")
dxdp <- matrix(dxdp, nrow = length(states.est))
}
if (length(states.est) > 0)
dgdx <- matrix(jacobianSymb(g, states.est), nrow = length(g))
if (length(pars.est) > 0) {
dgdp <- cbind(
matrix("0", nrow = length(g), ncol = length(states.est)),
matrix(jacobianSymb(g, pars.est), nrow = length(g))
)
}
# Sensitivities of the observables
derivs <- as.vector(sumSymb(prodSymb(dgdx, dxdp), dgdp))
if (length(derivs) == 0) stop("Neither states nor parameters involved. Use Y() with argument 'deriv = FALSE' instead.")
names(derivs) <- apply(expand.grid.alt(observables, c(states.est, pars.est)), 1, paste, collapse = ".")
derivsEval <- funC0(derivs, variables = variables.deriv, parameters = parameters, compile = compile, modelname = modelname_deriv,
verbose = verbose, convenient = FALSE, warnings = FALSE)
}
# Vector with zeros for possibly missing derivatives
# zeros <- rep(0, length(dxdp))
# names(zeros) <- dxdp
# Redundant -> missing values have been implemented in funC0
controls <- list(attach.input = attach.input)
X2Y <- function(out, pars) {
attach.input <- controls$attach.input
# Prepare list for with()
nOut <- ncol(out)
values <- gEval(M = out, p = pars)
sensitivities.export <- NULL
myderivs <- NULL
dout <- attr(out, "sensitivities")
if (!is.null(dout) & deriv) {
dvalues <- derivsEval(M = cbind(out, dout), p = pars)
sensitivities.export <- cbind(time = out[, 1], dvalues)
}
# Parameter transformation
dP <- attr(pars, "deriv")
if (!is.null(dP) & !is.null(dout) & deriv) {
parameters.all <- c(states.est, pars.est)
parameters.missing <- parameters.all[!parameters.all %in% rownames(dP)]
if (length(parameters.missing) > 0 & warnings)
warning("Parameters ", paste(parameters.missing, collapse = ", ", "are missing in the Jacobian of the parameter transformation. Zeros are introduced."))
dP.full <- matrix(0, nrow = length(parameters.all), ncol = ncol(dP), dimnames = list(parameters.all, colnames(dP)))
dP.full[intersect(rownames(dP), parameters.all),] <- dP[intersect(rownames(dP), parameters.all),]
# Multiplication with tangent map
sensLong <- matrix(dvalues, nrow = nrow(out)*length(observables))
sensLong <- sensLong %*% dP.full
dvalues <- matrix(sensLong, nrow = dim(out)[1])
# Naming
sensGrid <- expand.grid.alt(observables, colnames(dP.full))
sensNames <- paste(sensGrid[,1], sensGrid[,2], sep = ".")
colnames(dvalues) <- sensNames
}
# Format output
values <- cbind(time = out[,"time"], values)
if (attach.input)
values <- cbind(values, submatrix(out, cols = -1))
myderivs <- myparameters <- NULL
if (!is.null(dout) & deriv & !attach.input) {
myderivs <- cbind(time = out[,"time"], dvalues)
if (is.null(dP)) myparameters <- names(pars) else myparameters <- colnames(dP)
}
if (!is.null(dout) & deriv & attach.input) {
myderivs <- cbind(time = out[,"time"], dvalues, submatrix(attr(out, "deriv"), cols = -1))
if (is.null(dP)) myparameters <- names(pars) else myparameters <- colnames(dP)
}
# Output
prdframe(prediction = values, deriv = myderivs, sensitivities = sensitivities.export, parameters = pars)
}
attr(X2Y, "equations") <- g
attr(X2Y, "parameters") <- parameters
attr(X2Y, "states") <- states
attr(X2Y, "modelname") <- modelname
obsfn(X2Y, parameters, condition)
}
#' Generate a prediction function that returns times
#'
#' Function to deal with non-ODE models within the framework of dMod. See example.
#'
#' @param condition either NULL (generic prediction for any condition) or a character, denoting
#' the condition for which the function makes a prediction.
#' @return Object of class \link{prdfn}.
#' @examples
#' x <- Xt()
#' g <- Y(c(y = "a*time^2+b"), f = NULL, parameters = c("a", "b"))
#'
#' times <- seq(-1, 1, by = .05)
#' pars <- c(a = .1, b = 1)
#'
#' plot((g*x)(times, pars))
#' @export
Xt <- function(condition = NULL) {
# Controls to be modified from outside
controls <- list()
P2X <- function(times, pars, deriv=TRUE){
out <- matrix(times, ncol = 1, dimnames = list(NULL, "time"))
sens <- deriv <- out
prdframe(out, deriv = deriv, sensitivities = sens, parameters = pars)
}
attr(P2X, "parameters") <- NULL
attr(P2X, "equations") <- NULL
attr(P2X, "forcings") <- NULL
attr(P2X, "events") <- NULL
prdfn(P2X, NULL, condition)
}
#' An identity function which vanishes upon concatenation of fns
#'
#' @return fn of class idfn
#' @export
#'
#' @examples
#' x <- Xt()
#' id <- Id()
#'
#' (id*x)(1:10, pars = c(a = 1))
#' (x*id)(1:10, pars = c(a = 1))
#' str(id*x)
#' str(x*id)
Id <- function() {
outfn <- function() return(NULL)
class(outfn) <- c("idfn", "fn")
return(outfn)
}
|
15889ad5e74d7b17b92f27224399275b894844df
|
959a99ac6395d54b2ea01c4bdfee07b16a0c4f73
|
/ADdata/functions.R
|
bff0d36ac555b81b8aaced5c2fad1a3ba4e4a038
|
[] |
no_license
|
rheitkamp/microbiomePower
|
862b3c300a1203872ddd7b4732556c482273a05c
|
ecbbe4ca7d4950f9c2a9f91b1ec9e7592ec34d07
|
refs/heads/master
| 2016-09-06T08:23:01.598865
| 2014-06-25T20:14:21
| 2014-06-25T20:14:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 635
|
r
|
functions.R
|
###Broken stick with linear SD transformation
bMSD <- function(x){
(nrow <- dim(x)[1])
(ncol <- dim(x)[2])
(y <- matrix(nrow=nrow,ncol=ncol, dimnames=list(rownames(x), colnames(x))))
(total <- 100)
(y[1,2] <- (x[1,2])/100)
(y[1,1] <- (x[1,1])/100)
for(i in 2:nrow){
(y[i,2] <- x[i,2]/(total - x[i-1,1]))
(total <- total - x[i-1,1])
(y[i,1] <- x[i,1]/total)
}
return(y)
}
###Beta distribution
getBetaParams <- function(mean, sd){
v <- sd^2
m <- mean * (1-mean)
n <- (m/v) - 1
alpha <- mean*n
beta <- (1 - mean)*n
params <- list(type=1, a=alpha, b=beta, location=0, scale=1)
return(params)
}
|
3d5b8a8519f8eb5fe73c9b15e510674415655059
|
d26b1b446e18850cae39119828067d21a5079acd
|
/R/GetPropAligned.R
|
b038aecc317d191576be420db883dc36ef0c8c6a
|
[] |
no_license
|
ziyili20/TOAST
|
c623dcb2c64a89f00f84fddfab759da62d6434a5
|
102b6b1fa801561976b070adf1e15378bde43f76
|
refs/heads/master
| 2022-08-31T11:28:40.598741
| 2022-08-24T19:35:31
| 2022-08-24T19:35:31
| 145,612,563
| 12
| 4
| null | 2021-07-20T06:07:43
| 2018-08-21T19:53:08
|
R
|
UTF-8
|
R
| false
| false
| 781
|
r
|
GetPropAligned.R
|
GetPropAligned <- function(input,reference,L){
colnames(input)=colnames(reference)=seq(1,dim(input)[2],by=1)
corMat = cor(input,reference,use="pairwise.complete.obs")
prop_cor = rep(0,L)
tmpmat=corMat
tmpmat[is.na(tmpmat)] = rnorm(L,-1,0.01)
if(L>2){
for(i in 1:L){
maxind = which(tmpmat == max(tmpmat), arr.ind = TRUE)
prop_cor[maxind[1]] = colnames(corMat)[maxind[2]]
tmpmat[maxind[1],]=tmpmat[,maxind[2]]=rep(-1,L)
}
}else if(L==2){
if(tmpmat[1,1]>0){
prop_cor = c("1","2")
}else{
prop_cor = c("2","1")
}
}
colnames(input) = prop_cor
trans_input = input[,colnames(reference)]
return(trans_input)
}
|
a4e60a0514cd6500a775cf4c3f9d72d9b58ceb9e
|
f5feacda6bcf986bf61cdfa57f5387ed7e651918
|
/man/points_sd.Rd
|
422bd5227ed79d3b5d40d0a18cb856b81e24b338
|
[] |
no_license
|
cran/functClust
|
3386c3179bdf9e255bfec00ed8f39b6c3c696da1
|
f7415612fbc0fd749a1da01e822b6217e2b8bb0e
|
refs/heads/master
| 2023-01-20T01:30:18.270906
| 2020-12-02T09:30:02
| 2020-12-02T09:30:02
| 318,755,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,088
|
rd
|
points_sd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test_fclust.R
\name{points_sd}
\alias{points_sd}
\title{Plot a point with x- and y-error bars}
\usage{
points_sd(x, y, opt = c("mean", "mean"), fig, col, bg, cex = 2, name = "")
}
\arguments{
\item{x, y}{two numeric vectors, x in abscissa and y in coordinate.}
\item{opt}{two strings, equal to \code{"mean"} or \code{"median"}.
Indicate if the point should be plotted at \code{mean(x), mean(y)},
\code{mean(x), median(y)}, \code{median(x), mean(y)}
or \code{median(x), median(y)}.}
\item{fig, col}{two integers, indicating the symbol and the colour to use.}
\item{bg}{a string, indicating the colour background to use.}
\item{cex}{a numeric, indicating the size of symbol to plot.}
\item{name}{a string, indicating the label to add near the point.}
}
\value{
Nothing. It is a procedure.
}
\description{
Take two vectors,
compute their mean and standard deviation,
then plot the mean point with error bars along x- and y-axis.
}
\details{
None.
}
\keyword{internal}
|
2b9059c3da958e694b3cee583f17b2f8f1fc25f9
|
14c45e24952fe246a557c49a7795da5f19af2d58
|
/talks/sibaura220615/fig/ex1.r
|
b27b55a75bceb71cf29d9b02c4c5afecce26190b
|
[] |
no_license
|
s-takato/s-takato.github.io
|
59e51d938c410fb1c886dfb32247e8b42dcdb5df
|
735f07b341990569171718291c336abf221ca08f
|
refs/heads/master
| 2023-03-08T07:38:52.870212
| 2023-02-26T00:39:43
| 2023-02-26T00:39:43
| 183,700,769
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,695
|
r
|
ex1.r
|
# date time=2022/6/15 08:18:00
setwd('/githubio.git/talks/sibaura220615/fig')
source('/Applications/KeTTeX.app/texlive/texmf-dist/scripts/ketcindy/ketlib/ketpiccurrent.r')
source('/Applications/KeTTeX.app/texlive/texmf-dist/scripts/ketcindy/ketlib/ketpiccurrent_rep2e.r')
Ketinit()
cat(ThisVersion,'\n')
Fnametex='ex1.tex'
FnameR='ex1.r'
Fnameout='ex1.txt'
arccos=acos; arcsin=asin; arctan=atan
Acos<- function(x){acos(max(-1,min(1,x)))}
Asin<- function(x){asin(max(-1,min(1,x)))}
Atan=atan
Sqr<- function(x){if(x>=0){sqrt(x)}else{0}}
Factorial=factorial
Norm<- function(x){norm(matrix(x,nrow=1),"2")}
Setwindow(c(-4.94,5), c(-4.69,5))
A=c(-4,1);Assignadd('A',A)
B=c(-1.0601153362,0.5864467817);Assignadd('B',B)
C=c(1.1277822726,1.2405604998);Assignadd('C',C)
D=c(3.0224564905,2.5036766451);Assignadd('D',D)
E=c(4.1051274722,4.3306839267);Assignadd('E',E)
P=c(1.7522974336,1.56);Assignadd('P',P)
bzo1=Bezier(list(c(-4,1),c(-1.0601153362,0.5864467817),c(1.1277822726,1.2405604998),c(3.0224564905,2.5036766451),c(4.1051274722,4.3306839267)),list(c(c(-1.8774317797,0.5481038799)),c(c(-0.2427988928,0.6247896836),c(0.4770617645,0.9349735209)),c(c(1.8378411527,1.5740135651),c(2.5046234951,1.9662287635)),c(c(3.540289486,3.0411245268))),"Num=30")
gr1=Plotdata('(0.558192)*(x-P[1])+P[2]','x')
axx1=Listplot(c(c(-4.93969,0),c(5,0)))
axy1=Listplot(c(c(0,-4.69157),c(0,5)))
PtL=list()
GrL=list()
# Windisp(GrL)
if(1==1){
Openfile('/githubio.git/talks/sibaura220615/fig/ex1.tex','1cm','Cdy=ex1.cdy')
Drwline(bzo1)
Texcom("{")
Setcolor(c(0,1,1,0))
Drwline(gr1)
Texcom("}")
Drwline(axx1)
Drwline(axy1)
Letter(c(5,0),"e","$x$")
Letter(c(0,5),"n","$y$")
Letter(c(0,0),"sw","O")
Closefile("0")
}
quit()
|
3dea0bfda005460a89260fc73cc63bc8ac6e2183
|
31978b16ba68fce7ea8afcf932be6d11cda8df75
|
/seedlings_basiclevels_dataprep_pnas.R
|
5e5ccfd9d85e9a55e82af68b824ff6268d3ce4b0
|
[
"MIT"
] |
permissive
|
ebergelson/sixmonth_seedlings_paper
|
bfe96405f3b4e9421ec81994fd7c28811f0c9b52
|
86fe7f9ec2e64a2882312a8229b253462c9f044b
|
refs/heads/master
| 2021-08-17T06:29:59.623376
| 2017-11-20T21:29:24
| 2017-11-20T21:29:24
| 104,960,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,567
|
r
|
seedlings_basiclevels_dataprep_pnas.R
|
# this script aggregates the seedlings basiclevels at the recording-level, and adds some relevant statistics.
# it runs over the all_basiclevel_feather files that are made by concatenating
# each file's annotations lab internally
# for the pnas paper, we only care about month 6
# so at the end of this script we write out the subset of all_basiclevel that's month 6,
# and the aggregated version of just the month six subset (and the overall agg file for other purposes)
# you don't need to run this script to recreate our analysis since sixmonth_only_stats_pnas.R is self-contained
#first let's get our libraries and our functions
library(tidyverse)
library(feather)
library(forcats)
library(entropy)
options(tibble.width = Inf)
options(tibble.print_max = 200, tibble.print_min = 100)
all_basiclevel_home_data <- read_feather("data/all_basiclevel_new_09-26-17.feather")
all_basiclevel_home_data <- all_basiclevel_home_data%>%
#adding in noun onset, i.e. when the child said their first noun
filter(speaker == "CHI")%>%
group_by(subj)%>%
summarise(noun_chi_onset = min(as.numeric(as.character(month))))%>%
right_join(all_basiclevel_home_data)
summary(all_basiclevel_home_data, maxsum=50)
levels(all_basiclevel_home_data$speaker)
#should produce no rows.
all_basiclevel_home_data%>%
filter(nchar(as.character(speaker))!=3)
subset(all_basiclevel_home_data, is.na(utterance_type))
# aggregating the home data -----------------------------------------------
#we make lots of little dataframes first
#the majority of these aggregate over subj, month, and audio_video
# num words in 6 month experiment -----------------------------------------
#note: established possible basic levels for other versions of these words
# #by crawling like so:
# all_basiclevel_home_data %>%
# distinct(basic_level)%>%
# filter(grepl("^[b]", x = basic_level))%>%
# arrange(basic_level)%>%as.data.frame()
basic_level_tested_words <- c("baby","babe","baby+doll",
"ball","bally",
"blanket","blankey","blanky",
"book","books",
"bottle","baba","ba",
"car", "car+car",
"diaper","diape","diapey","diapers","diatee","didey","diadey",
"foot","footsy","footy","feet","feetsie","footsie","feetsy","feety",
"hair","hairs",
"hand",
"juice","juices","juice+box","juice+boxes","juicey",
"milk","milkies","milky","milk+water","milk+jug","milks",
"mouth",
"nose","nosey",
"spoon","spoony",
"stroller")
num_experimentwords <- all_basiclevel_home_data %>%
filter(basic_level %in% basic_level_tested_words) %>%
group_by(subj, month, audio_video)%>%
summarise(num_exp_tokens = n(),
num_exp_types = n_distinct(basic_level))
# MOT and FAT count -------------------------------------------------------
six_to_seventeen_home_FAT_MOT_count <- all_basiclevel_home_data %>%
filter(speaker %in%c("MOT","FAT"))%>%
group_by(subj, month, audio_video, speaker)%>%
tally()%>%
spread(speaker, n)
# utterance type count ----------------------------------------------------
six_to_seventeen_home_utt_count <- all_basiclevel_home_data %>%
filter(utterance_type %in%c("d","i","q","r","s","n"))%>%
group_by(subj, month, audio_video, utterance_type)%>%
tally()%>%
spread(utterance_type, n)
# object present count ----------------------------------------------------
six_to_seventeen_home_op <- all_basiclevel_home_data %>%
filter(object_present %in% c("n","y"))%>%
group_by(subj, month, audio_video, object_present)%>%
tally()%>%
spread(object_present, n)%>%
mutate(prop_op = y/(n+y))%>%
rename(y_op = y,
n_op = n)
six_to_seventeen_home_op_exp <- all_basiclevel_home_data %>%
filter(basic_level %in% basic_level_tested_words &
object_present %in% c("n","y"))%>%
group_by(subj, month, audio_video, object_present)%>%
tally()%>%
spread(object_present, n)%>%
mutate(prop_op_exp = y/(n+y))%>%
rename(y_op_exp = y,
n_op_exp = n)
# device and toy use count ------------------------------------------------
six_to_seventeen_home_device_count <- all_basiclevel_home_data %>%
filter(speaker %in% c("TOY","TVN","TVF", "TVM","TVS","TVB"))%>%
group_by(subj, month, audio_video, speaker)%>%
tally()%>%
spread(speaker, n)
# few versions of kid talk info -------------------------------------------
#chi tokens
six_to_seventeen_home_chi_count <- all_basiclevel_home_data %>%
filter(speaker %in% c("CHI"))%>%
group_by(subj, month, audio_video, speaker)%>%
tally()%>%
spread(speaker, n)
#chi types
six_to_seventeen_home_chi_type_count <- all_basiclevel_home_data %>%
filter(speaker %in% c("CHI"))%>%
group_by(subj, month,audio_video)%>%
dplyr::select(subj, month, basic_level)%>%
distinct(basic_level)%>%
tally()%>%
rename(CHItypes = n)
# noun production onset age
six_to_seventeen_home_noun_chi_onset <- all_basiclevel_home_data %>%
dplyr::select(subj, noun_chi_onset) %>%
distinct()
# finally, big aggregation of our little datasets -------------------------
all_basiclevel_home_data_agg <- all_basiclevel_home_data %>%
group_by(subj, month, SubjectNumber, audio_video)%>%
summarise(numspeakers = n_distinct(speaker),
numtokens = n(),
numtypes = n_distinct(basic_level))%>%
left_join(six_to_seventeen_home_FAT_MOT_count)%>%
left_join(num_experimentwords)%>%
left_join(six_to_seventeen_home_utt_count)%>%
left_join(six_to_seventeen_home_device_count)%>%
left_join(six_to_seventeen_home_op)%>%
left_join(six_to_seventeen_home_op_exp)%>%
left_join(six_to_seventeen_home_chi_count)%>%
left_join(six_to_seventeen_home_chi_type_count)%>%
mutate_each(funs(replace(., which(is.na(.)), 0)))%>%
group_by(subj, month, SubjectNumber, audio_video)%>%
mutate(prop_mom = MOT/numtokens,
prop_dad = FAT/numtokens,
prop_parent = prop_mom+prop_dad,
prop_tech = (TVN+TVF+TVS+TVM+TOY+TVB)/numtokens,
tech = (TVN+TVF+TVS+TVM+TOY+TVB),
propd = d/numtokens,
propi = i/numtokens,
propn = n/numtokens,
propq = q/numtokens,
propr = r/numtokens,
props = s/numtokens,
type_token_ratio = numtypes/numtokens,
exp_type_ratio = num_exp_types/numtypes,
exp_token_ratio = num_exp_tokens/numtokens,
ent_subj_av = entropy(c(d/numtokens,
q/numtokens,
s/numtokens,
r/numtokens,
n/numtokens,
i/numtokens),unit = "log2"),
sum_prop_ut = round(sum(c(d/numtokens,
q/numtokens,
s/numtokens,
r/numtokens,
n/numtokens,
i/numtokens)),2))%>%
dplyr::select(-TVF, -TVM, -TVS, -TVF, -TVN, -TVB)%>%
left_join(six_to_seventeen_home_noun_chi_onset)%>%
mutate(posttalk = ifelse(as.numeric(as.character(month))<noun_chi_onset|
is.na(noun_chi_onset),F,T))
summary(all_basiclevel_home_data_agg, maxsum=50)
#overall agg feather
write_feather(all_basiclevel_home_data_agg, "data/all_basiclevel_home_data_agg_feather9-26-17")
# two feathers, agg, and not-agg, six month only, for pnas
write_feather(all_basiclevel_home_data_agg %>% filter(month=="06"), "data/sixmonth_basiclevel_home_data_agg_feather")
write_feather(all_basiclevel_home_data%>% filter(month=="06"), "data/sixmonth_basiclevel_home_data_feather")
|
4feb6005a92ab7c87d7bcd5f358cce804e677ba6
|
c61b367db07762465c749e85ed0c933a0d2e9f5d
|
/Code/calculate_immune_ave_gsva_singlecelldata_v2.R
|
12b96ad752d3502bffd6b304ec0aabbebd46229e
|
[] |
no_license
|
lizhu06/TILsComparison_PBTvsMET
|
08880f46b9d1a42e3f9b8e841a0edc35fd00386e
|
5adec0fc526a3025ccbcd99ea34d40b3c56055ba
|
refs/heads/master
| 2020-06-02T05:54:49.924270
| 2019-06-09T22:45:08
| 2019-06-09T22:45:08
| 191,060,883
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,591
|
r
|
calculate_immune_ave_gsva_singlecelldata_v2.R
|
rm(list=ls())
options(stringsAsFactors = FALSE)
setwd("/net/wong05/home/liz86/Steffi/primary_vs_mets/")
# R 3.4.0
library(limma) #3.32.6
library(GSVA) # 1.26.0
#### load data
load("Data_v2/scRNA_Chung_tpm_noFiltering.RData")
tpm <- tpm_noFiltering
rm(tpm_noFiltering)
log2tpm <- log2(tpm+1)
## pick type
cell_names <- sapply(1:ncol(log2tpm), function(x)
strsplit(colnames(log2tpm)[x], split="_")[[1]][1])
type <- sapply(1:ncol(log2tpm), function(x)
strsplit(colnames(log2tpm)[x], split="_")[[1]][2])
type[type=="Tumor" | type=="Pooled"]
cor(log2tpm[,"BC01_Tumor"], log2tpm[,"BC01_Pooled"]) #0.842
## only selected pooled samples
tpm <- tpm[, type=="Pooled"]
dim(tpm) #55860 12
log2tpm <- log2tpm[, type=="Pooled"]
dim(log2tpm) #55860 12
################################
### compute davoli signatures
################################
load("/net/wong05/home/liz86/Steffi/Kevin_IDC_ILC_DE/RawData/Immune_cell_signatures_Davoli2016.RData")
## check if any genes are not in the dataset
sapply(1:length(immune_cell_signatures_davoli), function(x)
immune_cell_signatures_davoli[[x]][!(immune_cell_signatures_davoli[[x]]
%in% rownames(log2tpm))])
fit <- gsva(log2tpm, immune_cell_signatures_davoli, rnaseq=FALSE)
gsva_score_davoli <- fit
save(gsva_score_davoli, file="Results_v2/gsva_score_davoli_log2tpm_Chung_pooled.RData")
################################
# Tamborero signature
###############################
load("/net/wong05/home/liz86/Steffi/Kevin_IDC_ILC_DE/RawData/Immune_cell_signatures_Tamborero2017.RData")
sapply(1:length(immune_cell_signatures_tamborero2017), function(x)
length(immune_cell_signatures_tamborero2017[[x]]))
## check if any genes are not in the dataset
sapply(1:length(immune_cell_signatures_tamborero2017), function(x)
immune_cell_signatures_tamborero2017[[x]][!
(immune_cell_signatures_tamborero2017[[x]] %in%
rownames(log2tpm))])
# To alias
for(i in 1:length(immune_cell_signatures_tamborero2017)){
out_gene <- immune_cell_signatures_tamborero2017[[i]][!
(immune_cell_signatures_tamborero2017[[i]] %in%
rownames(log2tpm))]
if(length(out_gene)>0){
symbol2 <- alias2SymbolTable(out_gene, species="Hs")
symbol2[is.na(symbol2)] <- "NA2"
symbol2_in <- symbol2 %in% rownames(log2tpm)
out_gene_hit <- out_gene[symbol2_in]
out_gene_hit_symbol <- symbol2[symbol2_in]
immune_cell_signatures_tamborero2017[[i]][match(out_gene_hit,
immune_cell_signatures_tamborero2017[[i]])] <- out_gene_hit_symbol
}
}
immune_cell_signatures_tamborero2017[[5]] <- gsub("6-Mar", "MARCH6",
immune_cell_signatures_tamborero2017[[5]])
sapply(1:length(immune_cell_signatures_tamborero2017), function(x)
immune_cell_signatures_tamborero2017[[x]][!(immune_cell_signatures_tamborero2017[[x]] %in%
rownames(log2tpm))])
## GSVA (use log2 cpm)
tamborero_genes_common <- lapply(1:length(immune_cell_signatures_tamborero2017),
function(x) intersect(immune_cell_signatures_tamborero2017[[x]],
rownames(log2tpm)))
names(tamborero_genes_common) <- names(immune_cell_signatures_tamborero2017)
fit <- gsva(log2tpm, tamborero_genes_common,rnaseq=FALSE)
gsva_score_tamborero <- fit
save(gsva_score_tamborero, file="Results_v2/gsva_score_tamborero_tpm_Chung_pooled.RData")
#################################
# Prepare for TIMER
#################################
tpm_timer <- cbind(rownames(tpm), tpm)
write.csv(tpm_timer, file="Data_v2/tpm_timer_singlecell_chuang.csv",
quote=FALSE, row.names=FALSE) # file format doesn't work for cibersort
# load results
timer_raw <- read.csv("Results_v2/TIMER_res_singleCell_Chuang.csv")
timer <- timer_raw[, -1]
rownames(timer) <- timer_raw[,1]
timer <- t(timer)
save(timer,file="Results_v2/TIMER_res_singleCell_Chuang.RData")
#################################
# Prepare for CIBERSORT
#################################
tpm_for_cibersort <- cbind(rownames(tpm), tpm)
write.table(tpm_for_cibersort, file="Data_v2/tpm_for_cibersort_singlecell_chuang.txt",
quote=FALSE, sep="\t", row.names=FALSE, col.names=TRUE)
# load results
ciber_raw <- read.csv("Results_v2/CIBERSORT_relative_singleCell_chuang.csv")
ciber_relative <- ciber_raw[,seq(2, 23)]
rownames(ciber_relative) <- ciber_raw[,1]
ciber_relative <- t(ciber_relative)
save(ciber_relative, file="Results_v2/cibersort_relative_singleCell_chuang.RData")
# load results
ciber_raw <- read.csv("Results_v2/CIBERSORT_absolute_singleCell_chuang.csv")
ciber_absolute <- ciber_raw[,seq(2, 23)]
rownames(ciber_absolute) <- ciber_raw[,1]
ciber_absolute <- t(ciber_absolute)
save(ciber_absolute, file="Results_v2/cibersort_absolute_singleCell_chuang.RData")
|
dbfbfaffe7a94ba3d99c38f830d68c0eeece61c5
|
6dfe71a04d7ca18ef55b205f9b15523b934cc6b4
|
/flexible_copula.R
|
cced3d7057dfcd94f6fd5132129f85323ab6069b
|
[] |
no_license
|
siverskog/r_functions
|
e5d54938a836334dfb56d1822396dac447f71d6d
|
ab3579833f5eed6c062ca1fac4754514f85ebc18
|
refs/heads/master
| 2016-09-07T19:08:18.948748
| 2015-09-21T07:41:23
| 2015-09-21T07:41:23
| 40,779,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,671
|
r
|
flexible_copula.R
|
########################################################################################
##### FLEXIBLE COPULA ESTIMATION
##### By Jonathan Siverskog
##### 2015-09-06
#####
##### CODED FOLLOWING ZIMMER (2012) AND HO ET AL. (2015)
#####
########################################################################################
install <- function(packages) {
for(i in 1:length(packages)) {
if(!is.element(packages[i], installed.packages()[,1])) install.packages(packages[i])
library(packages[i], character.only = TRUE)
}
warnings()
}
packages <- c("np", "boot", "copula", "rugarch", "fGarch", "foreach", "doParallel")
install(packages)
source("https://raw.githubusercontent.com/siverskog/r_functions/master/diagnostics.R")
########################################################################################
##### SELECT BEST GARCH-SPECIFICATION ##################################################
########################################################################################
garch.spec <- function(data, ar.order = 0:4, garch.model = c("sGARCH", "eGARCH", "gjrGARCH"), error.dist = c("norm", "std", "ged"), q.lag = 10, signif.lvl = 0.05) {
##### SETUP LIST ALL SPECIFICATIONS #####
X <- list()
count <- 0
for(n in 1:ncol(data)) {
for(i in 1:length(garch.model)) {
for(j in 1:length(ar.order)) {
for(k in 1:length(error.dist)) {
count <- count+1
X[[count]] <- c(n, garch.model[i], ar.order[j], error.dist[k])
}
}
}
}
X <- do.call(rbind, X)
##### FUNCTION TO RUN THROUGH FOREACH LOOP #####
garch.test <- function(X, i) {
spec <- ugarchspec(variance.model = list(model = X[i,2], garchOrder = c(1,1)),
mean.model = list(armaOrder= c(as.numeric(X[i,3]), 0)),
distribution.model = X[i,4])
fit <- ugarchfit(spec, data[,as.numeric(X[i,1])], solver = "hybrid")
resid <- fit@fit$residuals/fit@fit$sigma
q <- Box.test(resid, type = "Ljung-Box", lag = q.lag, fitdf = as.numeric(X[i,3]))$p.value
q2 <- Box.test(resid^2, type = "Ljung-Box", lag = q.lag, fitdf = as.numeric(X[i,3]))$p.value
llh <- fit@fit$LLH
return(c(q, q2, llh))
}
##### FOREACH LOOP #####
print("SETTING UP CLUSTER...")
cluster <- makeCluster(detectCores()-1)
registerDoParallel(cluster)
print("ESTIMATING ALL MODELS. THIS MAY TAKE SOME TIME...")
test <- foreach(i = 1:nrow(X), .packages = c("rugarch", "stats")) %dopar% {
garch.test(X = X, i = i)
}
stopCluster(cluster)
test <- do.call(rbind, test)
##### SELECT BEST MODEL #####
best <- as.data.frame(matrix(NA, ncol = 3, nrow = ncol(data)))
colnames(best) <- c("Model", "AR", "ErrorDist")
for(i in 1:ncol(data)) {
good <- X[,1]==i & test[,1]>=signif.lvl & test[,2]>=signif.lvl
if(all(!good)) next
best[i,] <- X[test[,3]==max(test[good,3]),-1]
}
return(best)
}
########################################################################################
##### APPLY AR-GARCH-FILTER TO DATA ####################################################
########################################################################################
garch.filter <- function(x, spec) {
if(class(spec)=="uGARCHspec") {
if(is.vector(x)) {
fit <- ugarchfit(spec, x)
if(fit@fit$convergence==0) {
res <- fit@fit$residuals/fit@fit$sigma
} else {
res <- x/sd(x)
}
} else {
res <- list()
for(i in 1:ncol(x)) {
fit <- ugarchfit(spec, x[,i])
if(fit@fit$convergence==0) {
res[[i]] <- residuals(fit)/sigma(fit)
} else {
res[[i]] <- x[,i]/sd(x[,i])
}
}
}
} else {
if(is.vector(x)) {
fit <- garchFit(formula = spec, data = x, trace = FALSE)
res <- fit@residuals/fit@sigma.t
} else {
res <- list()
for(i in 1:ncol(x)) {
fit <- garchFit(formula = spec, data = x[,i], trace = FALSE)
res[[i]] <- fit@residuals/fit@sigma.t
}
}
}
if(!is.vector(x)) {
res <- data.frame(do.call(cbind, res))
colnames(res) <- colnames(x)
rownames(res) <- rownames(x)
}
return(res)
}
########################################################################################
##### DESCRIPTIVE STATISTICS ###########################################################
########################################################################################
desc.stat <- function(df, dec = 2, dlog = TRUE, obsperyear = 260, only.stars = TRUE) {
result <- as.data.frame(matrix(NA, nrow = ncol(df), ncol = 9))
colnames(result) <- c("Obs", "Mean", "StdDev", "Skewness", "Kurtosis", "JB", "Q(10)", "$Q^2$(10)", "ARCH(10)")
for(i in 1:ncol(df)) {
x <- as.numeric(na.exclude(df[,i]))
if(length(x)>50) {
result[i,"Obs"] <- format(round(length(x), 0), nsmall = 0)
result[i,"Skewness"] <- format(round(.skew(x), dec), nsmall = dec)
result[i,"Kurtosis"] <- format(round(.kurt(x), dec), nsmall = dec)
result[i,"JB"] <- sign(jb.test(x), digits = dec, only.stars = only.stars)
result[i,"Q(10)"] <- sign(q.test(x, lag = 10), digits = dec, only.stars = only.stars)
result[i,"$Q^2$(10)"] <- sign(q.test(x, lag = 10, sq = TRUE), digits = dec, only.stars = only.stars)
result[i,"ARCH(10)"] <- sign(arch.test(x, lag = 10), digits = dec, only.stars = only.stars)
} else {
result[i,] <- rep(NA, ncol(result))
result[i,"Obs"] <- 0
}
if(dlog) {
result[i,"Mean"] <- format(round(mean(x)*obsperyear*100, dec), nsmall = dec)
result[i,"StdDev"] <- format(round(sd(x)*sqrt(obsperyear)*100, dec), nsmall = dec)
} else{
result[i,"Mean"] <- format(round(mean(x), dec), nsmall = dec)
result[i,"StdDev"] <- format(round(sd(x), dec), nsmall = dec)
}
}
if(dlog) {rownames(df)[2:3] <- c("Mean (%)", "StdDev (%)")}
rownames(result) <- colnames(df)
return(result)
}
########################################################################################
##### COMPUTE BANDWIDTH FOR CDF ########################################################
########################################################################################
compute.bandwidth <- function(data, x, y, ckertype = "gaussian", nmulti = 30, bwtype = "adaptive_nn") {
bw <- list()
for(i in 1:length(x)) {
print(paste("COMPUTING CDF BANDWIDTH FOR PAIR", i, sep = " "))
form <- as.formula(paste("~",colnames(data)[x[i]], "+", colnames(data)[y[i]], sep = " "))
bw[[i]] <- npudistbw(form, data = data, ckertype = ckertype, bwmethod = "cv.cdf", nmulti = nmulti, bwtype = bwtype)
}
return(bw)
}
########################################################################################
##### COPULA FUNCTION ##################################################################
########################################################################################
nonparametric.copula <- function(data, x, y, bw, bwtype = "adaptive_nn", grid = seq(-2,2,0.1), as.vector = TRUE) {
prob <- list()
for(i in 1:length(bw)) {
ucdf.x <- npudistbw(as.formula(paste("~",colnames(data)[x[i]], sep = " ")), data = data, bws = bw[[i]]$bw[1], bandwidth.compute = FALSE, bwtype = bwtype)
ucdf.y <- npudistbw(as.formula(paste("~",colnames(data)[y[i]], sep = " ")), data = data, bws = bw[[i]]$bw[2], bandwidth.compute = FALSE, bwtype = bwtype)
z <- data.frame(grid)
colnames(z) <- colnames(data)[x[i]]
ux <- fitted(npudist(bws = ucdf.x,newdata = data.frame(z)))
colnames(z) <- colnames(data)[y[i]]
uy <- fitted(npudist(bws = ucdf.y,newdata = data.frame(z)))
uxy <- cbind(ux, uy)
copula <- npcopula(bws = bw[[i]], data = data, u = uxy)
C <- diag(matrix(copula$copula,length(grid),length(grid)))
prob[[i]] <- c((C/uy)[1:(ceiling(length(grid)/2))],((1-ux-uy+C)/(1-uy))[(ceiling(length(grid)/2)):length(grid)])
}
if(as.vector) {
return(unlist(prob))
} else {
result <- do.call(cbind, prob)
colnames(result) <- paste(colnames(data)[x], colnames(data)[y], sep = ".")
return(result)
}
}
########################################################################################
##### BOOTSTRAP ########################################################################
########################################################################################
boot.copula <- function(data, x, y, grid, rep = 5, block.size = 20, sim = "fixed", bw, bwtype = "adaptive_nn", garchSpec) {
### FUNCTION TO PASS THROUGH BOOTSTRAP ###
func <- function(data, x, y, grid, bw, bwtype, garchSpec) {
res <- garch.filter(x = data, spec = garchSpec)
result <- nonparametric.copula(data = res, x = x, y = y, bw = bw, bwtype = bwtype, grid = grid)
return(result)
}
### BOOTSTRAP ###
print("CREATING CLUSTER...")
cluster <- makeCluster(detectCores()-1)
clusterExport(cluster, varlist = list("data", "func", "rep", "block.size", "sim", "x", "y", "grid", "bw", "bwtype", "garchSpec"), envir = environment())
clusterExport(cluster, varlist = list("garch.filter", "nonparametric.copula"))
clusterCall(cluster, function() library(np))
clusterCall(cluster, function() library(rugarch))
clusterCall(cluster, function() library(fGarch))
print("STARTING BOOTSTRAP...")
bc <- tsboot(data,
func,
R = rep-1,
l = block.size,
sim = sim,
n.sim = nrow(data),
x = x,
y = y,
grid = grid,
bw = bw,
bwtype = bwtype,
garchSpec = garchSpec,
parallel = "snow",
ncpus = detectCores()-1,
cl = cluster)
print("BOOTSTRAP FINISHED")
stopCluster(cluster)
mu <- as.data.frame(matrix(apply(bc$t, 2, mean), ncol = length(bw), nrow = ceiling(length(grid)/2)*2, dimnames = list(NULL, paste(colnames(data)[x], colnames(data)[y], sep = "."))))
sd <- as.data.frame(matrix(apply(bc$t, 2, sd), ncol = length(bw), nrow = ceiling(length(grid)/2)*2, dimnames = list(NULL, paste(colnames(data)[x], colnames(data)[y], sep = "."))))
return(list(mu = mu, sd = sd, grid = grid))
}
########################################################################################
##### EMPIRICAL COPULA #################################################################
########################################################################################
empirical.copula <- function(data, x, y, grid) {
prob <- list()
for(i in 1:length(x)) {
#res <- garch.filter(data)
res <- data
ucdf.x <- ecdf(res[,x[i]])
ucdf.y <- ecdf(res[,y[i]])
ux <- ucdf.x(grid)
uy <- ucdf.y(grid)
U <- pobs(res[,c(x[i], y[i])])
u <- as.matrix(expand.grid(ux,uy))
Cn <- C.n(u = u, U = U)
C <- diag(matrix(Cn, length(grid), length(grid)))
prob[[i]] <- c((C/uy)[1:(ceiling(length(grid)/2))],((1-ux-uy+C)/(1-uy))[(ceiling(length(grid)/2)):length(grid)])
}
prob <- do.call(cbind, prob)
colnames(prob) <- paste(colnames(data)[x], colnames(data)[y], sep = ".")
return(list(prob = prob, grid = grid))
}
########################################################################################
##### PLOT #############################################################################
########################################################################################
plot.copula <- function(c = NULL, bc, ec = NULL, extra = NULL, mfrow, w = 200, h = 200, print = FALSE, file = "copula.png") {
if(print) png(filename = file, width = w, height = h, units = "mm", res = 600)
par(mfrow = mfrow, mar = c(2,3,2,1))
if(!is.null(c)) bc$mu <- as.data.frame(c)
neg.grid <- bc$grid[1:ceiling(length(bc$grid)/2)]
pos.grid <- bc$grid[ceiling(length(bc$grid)/2):length(bc$grid)]
neg <- 1:(nrow(bc$mu)/2)
pos <- (nrow(bc$mu)/2 + 1):nrow(bc$mu)
for(i in 1:ncol(bc$mu)) {
neg.grid <- bc$grid[1:ceiling(length(bc$grid)/2)]
pos.grid <- bc$grid[ceiling(length(bc$grid)/2):length(bc$grid)]
neg <- 1:(nrow(bc$mu)/2)
pos <- (nrow(bc$mu)/2 + 1):nrow(bc$mu)
hi.neg <- bc$mu[neg,i]+1.96*bc$sd[neg,i]
lo.neg <- bc$mu[neg,i]-1.96*bc$sd[neg,i]
hi.pos <- bc$mu[pos,i]+1.96*bc$sd[pos,i]
lo.pos <- bc$mu[pos,i]-1.96*bc$sd[pos,i]
#plot(NULL,
# xlim = c(bc$grid[1], bc$grid[length(bc$grid)]),
# ylim = c(min(lo.pos, lo.neg), max(hi.pos, hi.neg)),
# xlab = NA,
# ylab = NA,
# main = colnames(bc$mu)[i],
# las = 1,
# xaxt = "n")
plot(NULL,
xlim = c(bc$grid[1], bc$grid[length(bc$grid)]),
ylim = c(0, 1),
xlab = NA,
ylab = NA,
main = colnames(bc$mu)[i],
las = 1)
polygon(c(neg.grid, rev(neg.grid)),c(lo.neg, rev(hi.neg)),col="lightgray",border=FALSE)
polygon(c(rev(pos.grid), pos.grid),c(rev(hi.pos), lo.pos),col="lightgray",border=FALSE)
points(neg.grid, bc$mu[neg,i], type = "l", lwd = 2, col = "red")
points(pos.grid, bc$mu[pos,i], type = "l", lwd = 2, col = "red")
abline(v=0)
box()
if(!is.null(ec)) {
points(neg.grid, ec$prob[neg,i], pch = "+")
points(pos.grid, ec$prob[pos,i], pch = "+")
}
if(!is.null(extra)) {
points(rownames(extra), extra[,i], type = "l", col = "blue", lwd = 2)
}
}
if(print) dev.off()
}
|
a7f111bfa9f5a57335340b05f01b990024f35b51
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/FeaLect/examples/FeaLect-package.Rd.R
|
dba4efd2685676393a9f4a79a7e873645e4f183e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 579
|
r
|
FeaLect-package.Rd.R
|
library(FeaLect)
### Name: FeaLect-package
### Title: Scores Features for Feature Selection
### Aliases: FeaLect-package
### Keywords: package regression multivariate classif models
### ** Examples
library(FeaLect)
data(mcl_sll)
F <- as.matrix(mcl_sll[ ,-1]) # The Feature matrix
L <- as.numeric(mcl_sll[ ,1]) # The labels
names(L) <- rownames(F)
message(dim(F)[1], " samples and ",dim(F)[2], " features.")
## For this data, total.num.of.models is suggested to be at least 100.
FeaLect.result.1 <-FeaLect(F=F,L=L,maximum.features.num=10,total.num.of.models=20,talk=TRUE)
|
87d751a2a09b896154e21f38b588f9b1b30bb49a
|
8754297873779d402874ee1e52a5f9631ffb83b2
|
/preprocess_trials.R
|
cb102620bd7b8d3472eb80815a886e41c04eaa10
|
[] |
no_license
|
gitter-badger/safari-task-model-mshvarts
|
234827eb89e084db8f7eb01fa7f2ef12999b8cf4
|
49064cb968435ac8e3e1901aed0e364cd6781658
|
refs/heads/master
| 2021-01-18T00:03:28.077257
| 2015-08-06T19:26:38
| 2015-08-06T19:26:38
| 63,691,163
| 0
| 0
| null | 2016-07-19T12:13:15
| 2016-07-19T12:13:15
| null |
UTF-8
|
R
| false
| false
| 1,739
|
r
|
preprocess_trials.R
|
library(data.table)
library(plyr)
setwd('~/OneDrive/repos/safari-task-model/')
d <- fread('../safari-data/tours.csv')
d.trials <- fread('../safari-data/trials.csv')
# for each test we want the combined evidence (counts) and the response, for each sequence. but sequences are not marked :/
d.trials <- d.trials[subject!=2]
d.trials[,prevTrial:=shift(trial, 1L, type="lag")]
d.trials[,prevSubject:=shift(subject, 1L, type="lag")]
d.trials[,isNewSector:=prevTrial>=trial]
d.trials[,isNewSubject:=prevSubject!=subject]
# ugly ugly loop! takes ~5 seconds :(. make me less ugly!
pb <- txtProgressBar(min=1, max=nrow(d.trials), style=3)
s <- 1
d.trials[1,secId:=1]
for (i in 2:nrow(d.trials)){
setTxtProgressBar(pb, i)
if (d.trials[i,isNewSector]) s <- s + 1
if (d.trials[i,isNewSubject]) s <- 1
d.trials[i,secId:=s]
}
close(pb)
# for each test we want the combined evidence (counts) and the response, for each sequence. but sequences are not marked :/
trialSummary <- d.trials[,sum(.N),by="secId,subject,animal"]
# we want a column per animal
trialSummary <- dcast(trialSummary, secId+subject~animal, value.var="V1")
colnames(trialSummary)[3:7] <- paste("animal", 1:5, sep="")
nasTo0 <- colnames(trialSummary)[3:7]
# this is fast but obtuse. What it does is select from trialSummary the rows where a column in nasTo0 is NA, set that column for those rows to 0
for (nat0 in nasTo0){
trialSummary[is.na(get(nat0, trialSummary)),nat0:=0,with=F]
}
head(trialSummary)
trialResponseSummary <- d.trials[trial==1]
setkey(trialResponseSummary, secId, subject)
trialSummary <- trialResponseSummary[trialSummary] # combine
# now trialSummary has one row per trial, just what we want.
write.csv(trialSummary, 'trials_preprocessed.csv')
|
23ea5ff2558a2d903d7e1d87dab7fd0c5819a318
|
c05abc348f38cf1dbcaecb97db05e6607ee17284
|
/tests/testthat/test-physical_parameters.R
|
75d6888f5a2247bc425654abd43ca319a1198d01
|
[] |
no_license
|
paleolimbot/pb210
|
79f443373a42c36fedbd160ad7a18f641fab8fce
|
12dec355dadcc8d7b3b5759718f33402308cd640
|
refs/heads/master
| 2022-05-13T11:31:10.110552
| 2022-05-05T12:37:42
| 2022-05-05T12:37:42
| 180,489,399
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,921
|
r
|
test-physical_parameters.R
|
context("test-physical_parameters")
test_that("cumulative_mass function works", {
masses <- withr::with_seed(39, runif(10))
expect_identical(pb210_cumulative_mass(masses, position = 1), cumsum(masses))
expect_identical(pb210_cumulative_mass(masses, position = 0), c(0, cumsum(masses[-1])))
})
test_that("density and porosity estimation are correct", {
# use all known parameters with a toy example
# 2 cm x 8 cm diameter
thickness <- 0.02
core_area <- pb210_core_area(0.08)
total_volume <- pb210_slice_volume(thickness, core_area)
porosity <- 0.75
# use reasonable but non-default densities
inorganic_density <- 2800
organic_density <- 1500
water_density <- 1100
organic_content <- 0.5
solid_density <- (inorganic_density + organic_density) / 2
volume_water <- total_volume * porosity
volume_solid <- total_volume - volume_water
mass_water <- water_density * volume_water
mass_solid <- solid_density * volume_solid
mass_total <- mass_water + mass_solid
water_content <- mass_water / mass_total
bulk_density <- mass_solid / total_volume
expect_equal(
pb210_bulk_density_estimate(
water_content = water_content,
organic_content = organic_content,
density_inorganic = inorganic_density,
density_organic = organic_density,
density_water = water_density
),
bulk_density
)
expect_equal(
pb210_porosity(
water_content = water_content,
organic_content = organic_content,
density_organic = organic_density,
density_inorganic = inorganic_density,
density_water = water_density
),
porosity
)
expect_equal(
pb210_slice_mass(bulk_density = bulk_density, thickness = thickness, core_area = core_area),
mass_solid
)
expect_equal(
pb210_bulk_density(
slice_mass = mass_solid,
thickness = thickness,
core_area = core_area
),
bulk_density
)
})
|
85178c660fb3447cc124a35f3b57d327e38aeb47
|
7a5f67a6fcc1f602224b350046eed0a1b486a41e
|
/sports.R
|
05121b9d71626c59928fed92daed4791b1a88632
|
[] |
no_license
|
cklamann/higherData-r
|
40d08166ba603e49247b4909ace0aea523bf6220
|
5283bc20c8499a161a0d9864cf53ecadb92edaef
|
refs/heads/master
| 2023-03-01T09:06:51.900005
| 2021-02-08T23:02:23
| 2021-02-08T23:02:23
| 133,805,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,266
|
r
|
sports.R
|
#https://ope.ed.gov/athletics/api/dataFiles/file?fileName=EADA_2016-2017.zip
#https://ope.ed.gov/athletics/api/dataFiles/file?fileName=EADA%202002-2003.zip
#each year is fiscal year
#note: the data is consistent only from 2006!
sportsReturnFields <- tolower(c("unitid","fiscal_year","STUDENTAID_MEN","STUDENTAID_WOMEN","HDCOACH_SAL_FTE_MEN",
"HDCOACH_SAL_FTE_WOMN","ASCOACH_SAL_FTE_MEN", "ASCOACH_SAL_FTE_WOMN","GRND_TOTAL_REVENUE",
"GRND_TOTAL_EXPENSE"))
#server is down for now: https://ope.ed.gov/athletics/#/datafile/list
library('xlsx')
sportYears1 <- c(2006:2015)
sportYears2 <- c(2017:2020)
sportDownloadYears1 <- fyToAyFull(sportYears1,'-')
sportDownloadYears2 <- fyToAyFull(sportYears2,'-')
sportSourceFiles <- data.table(file = c(paste0("EADA%20",sportDownloadYears1,".zip")),fy = sportYears1)
sportSourceFiles <- rbind(sportSourceFiles, data.table(file = c(paste0("EADA_",sportDownloadYears2,".zip")),fy = sportYears2))
sportDownloadUrl <- "https://ope.ed.gov/athletics/api/dataFiles/file?fileName="
sportsDownloadTable<-function(targetDir){
temp <- tempfile()
for(n in 1:nrow(sportSourceFiles)){
download_file<-sportSourceFiles[n,file]
download.file(paste0(sportDownloadUrl,download_file),temp)
unzipped_data<-unzip(temp)
unlink(temp)
file<-grep("inst.+\\.xls",as.vector(unzipped_data),ignore.case=TRUE, perl=TRUE, value=FALSE) #search for file that starts with inst and ends with xls/xlsx
if(length(file > 1)){
file <- file[1] #
}
if(length(file) == 0 ){
file<-grep(".xls",as.vector(unzipped_data),ignore.case=TRUE, perl=TRUE, value=FALSE) #search for file that starts with inst and ends with xls/xlsx
}
table<-read.xlsx2(unzipped_data[file],1) #readxlsx2 works better
write.csv(table, paste0(targetDir,sportSourceFiles[n,fy], '.csv'), row.names=FALSE)
}
}
transformSportsTable<-function(targetDir){
years<-c(2007:2019)
sportsTable<-initializeDataTable(sportsReturnFields)
for(n in 1:(length(years))){
table<-fread(paste0(targetDir,years[n],".csv"))
table<-cleanNumericTable(table)
table[,fiscal_year:=years[n]]
sportsTable <- rbind(sportsTable,table[,..sportsReturnFields])
}
sportsTable
}
|
e396e15786e8640989a59c52acee58613f5bddee
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DGVM3D/examples/getCone.Rd.R
|
f6b19129e88f022fcdd4f886d7d1941ca7fb6043
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 288
|
r
|
getCone.Rd.R
|
library(DGVM3D)
### Name: getCone
### Title: calculate a cone
### Aliases: getCone
### ** Examples
if (require(rgl)) {
cone=getCone(faces=13, close=TRUE)
triangles3d(cone@vertices[cone@id, ], col="green")
} else {
message("the library 'rgl' is required for this example!")
}
|
d950850a1d303770660d62a75132fb35edf4029a
|
b11a9a886f0809ab2e342134dc41da7b95e8b422
|
/R/ggexport.R
|
aeca4c3d9255bea7fc5b7cab9cece8f90f766a57
|
[] |
no_license
|
kassambara/ggpubr
|
dbf17d6a921efe5e39b87ab566f3c9fd4f4ef047
|
6aeb4f701399929b130917e797658819c71a2304
|
refs/heads/master
| 2023-09-01T19:43:28.585371
| 2023-02-13T18:28:59
| 2023-02-13T18:28:59
| 63,722,465
| 1,041
| 195
| null | 2023-08-06T16:55:18
| 2016-07-19T19:35:48
|
R
|
UTF-8
|
R
| false
| false
| 3,200
|
r
|
ggexport.R
|
#' @include utilities.R
NULL
#'Export ggplots
#'@description Export ggplots
#'@inheritParams ggarrange
#'@param ... list of plots to be arranged into the grid. The plots can be either
#' ggplot2 plot objects, arbitrary gtables or an object of class
#' \code{\link{ggarrange}}.
#' @param filename File name to create on disk.
#'@param width,height plot width and height, respectively (example, width = 800,
#' height = 800). Applied only to raster plots: "png", "jpeg", "jpg", "bmp" and
#' "tiff".
#'@param pointsize the default pointsize of plotted text (example, pointsize =
#' 8). Used only for raster plots.
#'@param res the resolution in ppi (example, res = 250). Used only for raster
#' plots.
#'@param verbose logical. If TRUE, show message.
#'@author Alboukadel Kassambara <alboukadel.kassambara@@gmail.com>
#' @examples
#' \dontrun{
#' require("magrittr")
#' # Load data
#' data("ToothGrowth")
#' df <- ToothGrowth
#' df$dose <- as.factor(df$dose)
#'
#' # Box plot
#' bxp <- ggboxplot(df, x = "dose", y = "len",
#' color = "dose", palette = "jco")
#' # Dot plot
#' dp <- ggdotplot(df, x = "dose", y = "len",
#' color = "dose", palette = "jco")
#' # Density plot
#' dens <- ggdensity(df, x = "len", fill = "dose", palette = "jco")
#'
#'# Export to pdf
#' ggarrange(bxp, dp, dens, ncol = 2) %>%
#' ggexport(filename = "test.pdf")
#'
#' # Export to png
#' ggarrange(bxp, dp, dens, ncol = 2) %>%
#' ggexport(filename = "test.png")
#' }
#'
#'@export
ggexport <- function(..., plotlist = NULL, filename = NULL, ncol = NULL, nrow = NULL,
width = 480, height = 480, pointsize = 12, res = NA, verbose = TRUE)
{
# File name and extension
if(is.null(filename))
filename <- .collapse(.random_string(), ".pdf", sep = "")
file.ext <- .file_ext(filename)
# Device
dev <- .device(filename)
dev.opts <- list(file = filename)
if(file.ext %in% c("ps", "eps"))
dev.opts <- dev.opts %>%
.add_item(onefile = FALSE, horizontal = FALSE)
else if(file.ext %in% c("png", "jpeg", "jpg", "bmp", "tiff"))
dev.opts <- dev.opts %>%
.add_item(width = width, height = height, pointsize = pointsize, res = res)
if(file.ext %in% c("pdf", "svg")){
if(!missing(width)) dev.opts <- dev.opts %>% .add_item(width = width)
if(!missing(height)) dev.opts <- dev.opts %>% .add_item(height = height)
if(!missing(pointsize)) dev.opts <- dev.opts %>% .add_item(pointsize = pointsize)
}
#width=800, height=800, pointsize=8, res=250
# Plots
plots <- c(list(...), plotlist)
nb.plots <- length(plots)
if(nb.plots == 1)
plots <- plots[[1]]
else if(!is.null(ncol) | !is.null(nrow)){
plots <- ggarrange(plotlist = plots, ncol = ncol, nrow = nrow)
}
if(inherits(plots, "ggarrange") & .is_list(plots))
nb.plots <- length(plots)
if(nb.plots > 1 & file.ext %in% c("eps", "ps", "png", "jpeg", "jpg", "tiff", "bmp", "svg")){
filename <- gsub(paste0(".", file.ext), paste0("%03d.",file.ext), filename)
dev.opts$file <- filename
print(filename)
}
do.call(dev, dev.opts)
utils::capture.output(print(plots))
utils::capture.output(grDevices::dev.off())
if(verbose) message("file saved to ", filename)
}
|
bc266e351533ec9a9bfd6aa8e2f32b329425f563
|
57a607818308047a9c729a27afd112267556e5ce
|
/R/xyplot.R
|
eeff44a13268679f5a35574054005b1f2b820224
|
[] |
no_license
|
oscarperpinan/pdcluster
|
bf16799943a4e75bd6c4f7811b268e4e02cb0cf5
|
db2c47535a5807ef9dc12670368fe40216c8cdd9
|
refs/heads/master
| 2021-01-02T08:56:24.038707
| 2018-02-18T10:11:17
| 2018-02-18T10:11:17
| 11,253,765
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,071
|
r
|
xyplot.R
|
setGeneric('xyplot')##, function(x, data,...){standardGeneric('xyplot')})
setMethod('xyplot',
signature=c(x='PD', data='missing'),
definition <- function(x, plot.refl=TRUE, yvar = 'energy', ...){
dt=as.data.frame(x)
settings <- list(xlab='phase', ylab=yvar,
xscale.components=angleScale,
par.settings=pd.theme, alpha=0.2)
call <- modifyList(settings, list(...))
call$data <- dt
if (x@refl.rm==FALSE & plot.refl==TRUE){ ##muestro los reflejos en paneles separados
call$strip=strip.custom(strip.names=c(TRUE, TRUE),
strip.levels=c(TRUE, TRUE), bg='gray')
call$x <- as.formula(paste(yvar, '~ angle | refl'))
p <- do.call(xyplot, call)
} else { ##todo junto
call$x <- as.formula(paste(yvar, '~ angle'))
p <- do.call(xyplot, call)
}
p$panel <- pdPanel
for(i in seq_along(p$panel.args)) p$panel.args[[i]]$yvar <- dt[[yvar]]
print(p)
}
)
setMethod('xyplot',
signature=c(x='PDCluster', data='missing'),
definition <- function(x,
distances, clusters,
plot.refl=TRUE,
panelClust=TRUE,
yvar = 'energy',
...
){
if (missing(distances)) distances <- seq_along(levels(factor(x@dist$distFactor)))
if (missing(clusters)) clusters <- seq_along(levels(factor(x@cluster)))
dt <- as.data.frame(x)
settings <- list(xlab='phase', ylab=yvar,
alpha=0.2,
xscale.components=angleScale,
auto.key=list(space='right',
cex.title=0.8,
lines=FALSE, points=TRUE, cex=1),
strip=strip.custom(strip.names=c(TRUE, TRUE),
strip.levels=c(TRUE, TRUE), bg='gray'))
call <- modifyList(settings, list(...))
call$data=subset(dt, (distFactor %in% distances) & (cluster %in% clusters))
if (panelClust){
call$auto.key$title <- 'Distance\nto Medoid'
call$groups=call$data$distFactor
call$par.settings <- custom.theme.4
if (x@refl.rm==FALSE & plot.refl==TRUE){ ##muestro los reflejos en paneles separados
call$x <- as.formula(paste(yvar, '~ angle | cluster + refl'))
px <- do.call(xyplot, call)
p <- useOuterStrips(px,
strip=strip.custom(strip.names=c(TRUE, TRUE),
strip.levels=c(TRUE, TRUE), bg='gray'),
strip.left=strip.custom(strip.levels=c(TRUE, TRUE),
strip.names=c(TRUE, TRUE), bg='gray')
)
} else { ##todo junto
call$x <- as.formula(paste(yvar, '~ angle | cluster'))
p <- do.call(xyplot, call)
}
} else { ##end of panelClust==TRUE
call$auto.key$title <- 'Clusters'
call$groups=call$data$cluster
call$par.settings <- pd.theme
if (plot.refl & !x@refl.rm){ ##muestro los reflejos en paneles separados
call$x <- as.formula(paste(yvar, '~ angle | refl'))
p <- do.call(xyplot, call)
} else { ##todo junto
call$x <- as.formula(paste(yvar, '~ angle'))
p <- do.call(xyplot, call)
}
}
p$panel <- pdPanel
for(i in seq_along(p$panel.args)) p$panel.args[[i]]$yvar <- dt[[yvar]]
print(p)
}
)
|
6870d957b7945f2307bb817d06c31756cd3181d4
|
0fdaaceecd7760548b25d95a3acd62a90168cf71
|
/Old code to make fuctions/dissimilarity_change.R
|
36c27a51ced031e9e07bba8335e15bce668b6d81
|
[] |
no_license
|
mavolio/RACs
|
4ba7253022569515431da1585e8e13bacea6433d
|
e93455e50ed7d957f2a997a9df70c6c3e39e4615
|
refs/heads/master
| 2020-06-24T02:09:41.534635
| 2019-06-17T18:11:20
| 2019-06-17T18:11:20
| 96,915,481
| 0
| 1
| null | 2018-04-13T21:09:29
| 2017-07-11T16:42:32
|
R
|
UTF-8
|
R
| false
| false
| 7,449
|
r
|
dissimilarity_change.R
|
#'@title Bray-Curtis dissimilarity of replicates between and within time periods
#' @description Calculates the average changes in Bray-Curtis dissimilarity of replicates between two consecutive time periods and within time periods. Between changes the average Bray-Curtis dissimilaritiy of all pairwise comparisions of replicates between two consecutive time periods. This is a measure of how dissimilar the community composition of two time periods is. Bray-Curtis dissimilarity ranges from 0-1, where 0 are identical communities, and 1 and completelty different communiites. Within change is derived in two steps. First, the average Bray-Curtis dissimilarity of all replicates within a time period is calculated. This is a measure of how homogenous a community is within a time step is. Then, these averages are compared between two consecutive time periods.
#' @param df A data frame containing time, species, abundance and replicate columns and an optional column of treatment
#' @param time.var The name of the time column
#' @param species.var The name of the species column
#' @param abundance.var The name of the abundance column
#' @param replicate.var The name of the replicate column
#' @param treatment.var the neame of the optional treatment column
#' @return The multivariate_change function returns a data frame with the following attributes:
#' \itemize{
#' \item{time.var: }{A characteric column that has the first of two time periods that are being compared.}
#' \item{time.var2: }{A characteric column that has the second of two time periods that are being compared.}
#' \item{BC_between_change: }{A numeric column that is the average pairwise Bray-Curtis dissimilarity of replicates in two consecutive time periods. 0 - The communities are similar over time. 1 - The communities are changing over time.}
#' \item{BC_within_change: }{A numeric column that is change between two time periods in the average pairwise Bray-Curtis dissimilarity of replicates within a time period. A positive number indicates that time.var2 has greater varaiblity in community composition than time.var}
#' \item{treatment.var: }{A column that has same name and type as the treatment.var column, if treatment.var is specified.}
#' }
#' @examples
#' data(pplots)
#' #With treatment
#' dissimilarity_change(pplots,
#' time.var="year",
#' replicate.var = "plot",
#' treatment.var = "treatment",
#' species.var = "species",
#' abundance.var = "relative_cover")
#'
#' #Without treatment
#' df <- subset(pplots, treatment == "N1P0")
#' dissimilarity_change(df,
#' time.var="year",
#' replicate.var = "plot",
#' species.var = "species",
#' abundance.var = "relative_cover")
#' @importFrom vegan vegdist
#' @importFrom stats aggregate as.formula
#' @references Avolio et al. 2015; Avolio et al. OUR PAPER, Mari Anderson?
#' @export
dissimilarity_change <- function(df, time.var, species.var, abundance.var, replicate.var, treatment.var = NULL){
# check no NAs in abundance column
if(any(is.na(df[[abundance.var]]))) stop("Abundance column contains missing values")
# check unique species x time x replicate combinations
check_single(df, time.var, species.var, replicate.var)
df <- as.data.frame(df)
if (is.null(treatment.var)) {
output <- dissim_change(df, time.var, species.var, abundance.var, replicate.var)
} else {
# calculate change for each treatment
splitvars <- treatment.var
X <- split(df, df[splitvars])
out <- lapply(X, FUN = dissim_change, time.var, species.var, abundance.var, replicate.var)
unsplit <- lapply(out, nrow)
unsplit <- rep(names(unsplit), unsplit)
output <- do.call(rbind, c(out, list(make.row.names = FALSE)))
output[splitvars] <- do.call(rbind, as.list(unsplit))
}
output_order <- c(
time.var,
paste(time.var,"2", sep=""),
treatment.var,
'BC_between_change', 'BC_within_change')
return(output[intersect(output_order, names(output))])
}
############################################################################
#
# Private functions: these are internal functions not intended for reuse.
# Future package releases may change these without notice. External callers
# should not use them.
#
############################################################################
# A function calculate the Bray-Curtis dissimilarity change between consequtive time periods and dispersion change (the difference in the average dispersion of the replicates around the centriod for the two consecutive time periods). For dispersion change a negative value indicates replicates are converging over time and a postive value indicates replicates are diverging over time.
# @param df a dataframe
# @param time.var the name of the time column
# @param species.var the name of the species column
# @param replicate.var the name of the replicate column
dissim_change <- function(df, time.var, species.var, abundance.var, replicate.var) {
df2<-subset(df, select = c(time.var, species.var, abundance.var, replicate.var))
df2$id <- paste(df2[[time.var]], df2[[replicate.var]], sep="##")
species <- codyn:::transpose_community(df2, 'id', species.var, abundance.var)
bc <- as.data.frame(as.matrix(vegdist(species, method="bray")))
#extracting lower diagonal
bc2 <- as.data.frame(cbind(rownames(bc)[which(lower.tri(bc), arr.ind=T)[,1]],
colnames(bc)[which(lower.tri(bc), arr.ind=T)[,2]],
bc[lower.tri(bc)]))
c1 <- as.data.frame(do.call('rbind', strsplit(as.character(bc2$V1), "##", fixed = T)))
c2 <- as.data.frame(do.call('rbind', strsplit(as.character(bc2$V2), "##", fixed = T)))
bc3 <- cbind(bc2, c1, c2)
bc3$bc_dissim <- as.numeric(as.character(bc3$V3))
colnames(bc3)[4] <- paste(time.var, 2, sep="")
colnames(bc3)[6] <- time.var
bc3$compare <- ifelse(bc3[[time.var]] == bc3[[paste(time.var, 2, sep="")]], 1, 2)
#within time differences
bc_within <- subset(bc3, compare == 1)
myformula <- as.formula(paste("bc_dissim", "~", time.var))
bc_within_ave <- aggregate(myformula, mean, data=bc_within)
colnames(bc_within_ave)[2] <- "BC_dissim_within"
#between time differences
bc_between <- subset(bc3, compare == 2)
myformula2 <- as.formula(paste("bc_dissim", "~", time.var, "+", paste(time.var, 2, sep = "")))
bc_between_ave <- aggregate(myformula2, mean, data=bc_between)
colnames(bc_between_ave)[3] <- "BC_between_change"
#select only consecutive years
bc_between_ave$yr1 <- as.integer(as.factor(bc_between_ave[[time.var]]))
bc_between_ave$yr2 <- as.integer(as.factor(bc_between_ave[[paste(time.var, 2, sep = "")]]))
bc_between_ave$diff <- bc_between_ave$yr2 - bc_between_ave$yr1
bc_between_ave2 <- subset(bc_between_ave, diff==1)
bc_between_ave2$yr1 <- NULL
bc_between_ave2$yr2 <- NULL
bc_between_ave2$diff <- NULL
#mege into get bc_within differences for each time comparision
bc_dis1 <- merge(bc_between_ave2, bc_within_ave, by = time.var)
bc_dis <- merge(bc_dis1, bc_within_ave, by.x = paste(time.var, 2, sep = ""), by.y = time.var)
bc_dis$BC_within_change <- bc_dis$BC_dissim_within.y - bc_dis$BC_dissim_within.x
bc_dis$BC_dissim_within.x <- NULL
bc_dis$BC_dissim_within.y <- NULL
return(bc_dis)
}
|
516629825fd3ca15f90ffd077920fc62777ddcb8
|
731a24a20ad268418747f77d2f1ac596fead1b78
|
/Chapter3.R
|
f8230b1d6db1c4f0cc875f6b1a252c2a0c312d7c
|
[] |
no_license
|
zjardyn/RforDataScience
|
1a8558aef2387ae7c3593e23ae7d76da47a7e729
|
fc6909e506b2d2a09db05690be14db3a9de03489
|
refs/heads/main
| 2023-05-31T19:22:53.932789
| 2021-05-28T22:14:09
| 2021-05-28T22:14:09
| 366,436,451
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,664
|
r
|
Chapter3.R
|
# answers to exercises: https://jrnold.github.io/r4ds-exercise-solutions/transform.html#exercise-5.6.1
# install.packages("nycflights13")
library(nycflights13)
library(tidyverse)
# stats::filter()
# dplyr::filter()
attach(flights)
flights
# int, dbl, chr, dttm, lgl, fctr, date
# dplyr basics: filter() observations, arrange() rows, select() variables, mutate() new variables, summarise() many values down, as well as group_by() (changes scope of each function)
# select all flights on January 1st
filter(flights, month == 1, day == 1)
# assign it
jan1 <- filter(flights, month == 1, day == 1)
# R either prints results or saves them as a variable, this does both
(dec25 <- filter(flights, month == 12, day == 25))
# comparisons
filter(flights, month = 1)
# dbl (float) doesn't equal int
sqrt(2) ^ 2 == 2
1 / 49 * 49 == 1
near(sqrt(2) ^ 2, 2)
near(1 / 49 * 49, 1)
# booleans
# November or December
filter(flights, month == 11 | month == 12)
# equivalent
nov_dec <- filter(flights, month %in% c(11, 12))
# De Morgan's Law: !(x & y) is the same as !x | !y, and !(x | y) is the same as !x & !y.
filter(flights, !(arr_delay > 120 | dep_delay > 120))
filter(flights, arr_delay <= 120, dep_delay <= 120)
# missing values
NA > 5
10 == NA
NA + 10
NA / 2
NA == NA
# x and ys age. we don't know them.
x <- NA
y <- NA
# therefore we don't know if their ages are equal!
x == y
is.na(x)
df <- tibble(x = c(1, NA, 3))
filter(df, x > 1)
filter(df, is.na(x) | x > 1)
# Exercises 1 ----
# flights
colnames(flights)
filter(flights, arr_delay >= 2)
filter(flights, dest %in% c("IAH", "HOU"))
unique(flights$carrier)
filter(flights, carrier %in% c("UA", "AA", "DL"))
filter(flights, month %in% 7:9)
# De morgan's law
(x <- filter(flights, arr_delay >= 120 & dep_delay <= 0))
(y <- filter(flights, !(arr_delay < 120 | dep_delay > 0)))
all.equal(x, y)
# checks out!
filter(flights, dep_delay >= 60, dep_delay > arr_delay + 30)
flights$dep_time
filter(flights, dep_time %in% 0:600)
?between
(x <- filter(flights, month %in% 7:9))
(y <- filter(flights, between(month, 7,9)))
all.equal(x,y)
(x <- filter(flights, dep_time %in% 0:600))
(y <- filter(flights, between(dep_time, 0, 600)))
all.equal(x,y)
sum(is.na(flights$dep_time))
apply(flights, 2, function(x) sum(is.na(x)))
# canceled flights?
NA ^ 0
NA | TRUE
FALSE & NA
TRUE & NA
NA * 0
# ----
# sort rows with arrange
arrange(flights, year, month, day)
arrange(flights, desc(dep_delay))
df <- tibble(x = c(5, 2, NA))
# missing values at the end
arrange(df, x)
arrange(df, desc(x))
# Exercises 2 ----
arrange(df, desc(is.na(x)))
colnames(flights)
arrange(flights, desc(dep_delay))
arrange(flights, dep_delay)
# v = d/t
arrange(flights, desc(distance/air_time))
arrange(flights, desc(distance))
arrange(flights, distance)
# ----
# select columns
colnames(flights)
select(flights, year, month, day)
# slice
select(flights, year:day)
# everything but
select(flights, -(year:day))
# helper funcs
# starts_with
# ends_with
# contains
# matches
# num_range
# renaming
rename(flights, tail_num = tailnum)
# select a few first, then the rest
select(flights, time_hour, air_time, everything())
# Exercises 3 ----
select(flights, dep_time, dep_delay, arr_time, arr_delay)
select(flights, starts_with('dep'), starts_with('arr'))
select(flights, dep_time, dep_time)
vars <- c("year", "month", "day", "dep_delay", "arr_delay")
select(flights, any_of(vars))
select(flights, contains("TIME"))
?contains
select(flights, contains("TIME", ignore.case = F))
# ----
# add new vars with mutate
flights_sml <- select(flights,
year:day,
ends_with("delay"),
distance,
air_time
)
flights_new <- mutate(flights_sml,
gain = dep_delay - arr_delay,
speed = distance / air_time * 60
)
View(flights_new)
mutate(flights_sml,
gain = dep_delay - arr_delay,
hours = air_time / 60,
gain_per_hour = gain / hours
)
transmute(flights,
gain = dep_delay - arr_delay,
hours = air_time / 60,
gain_per_hour = gain / hours
)
# must be vectorized: input is vector, output is vector of same length
# arithmetic operators: +, -, *, /, ^, are vectorized as they use "recycling"
# modular arithmetic: %/% (integer division) and %% (remainder)
transmute(flights,
dep_time,
hour = dep_time %/% 100,
minute = dep_time %% 100)
# logs: log(), log2(), log10(), transforming across magnitudes. log2 is most interpretable
# offsetting
(x <- 1:10)
lag(x)
lead(x)
# Cumulative and rolling aggregates:cumsum(), cumprod(), cummin(), cummax(), dplyr::cummean()
x
cumsum(x)
cummean(x)
y <- c(1, 2, 2, NA, 3, 4)
min_rank(y)
min_rank(desc(y))
# also try row_number(), dense_rank(), percent_rank(), cume_dist(), ntile()
y
row_number(y)
dense_rank(y)
percent_rank(y)
cume_dist(y)
# Exercises 4 ----
flights_new <- select(flights,
dep_time,
sched_dep_time,
air_time,
arr_time,
dep_delay)
flights_new <- mutate(flights_new,
dep_min_midn = (dep_time %/% 100) * 60 + dep_time %% 100,
sched_dep_min_midn = (sched_dep_time %/% 100) * 60 + sched_dep_time %% 100)
transmute(flights_new,
air_time,
air_time2 = arr_time - dep_time)
flights_new <- mutate(flights_new,
arr_min_midn = (arr_time %/% 100) * 60 + arr_time %% 100,
)
flights_new <- mutate(flights_new,
flight_time = arr_min_midn - dep_min_midn)
select(flights_new, air_time, flight_time)
sum(flights_new$air_time == flights_new$flight_time, na.rm = TRUE)
# clock format, difference is in dep_delay
select(flights_new, dep_time, sched_dep_time, dep_delay)
# min_rank() is equivalent to rank() method with the argument ties.method = 'min'
head(arrange(flights_new, min_rank(desc(dep_delay))), 10)
?min_rank
# recycling
1:3 + 1:10
?sin
# ----
# group with summarise, collapse down to a single row
summarise(flights, delay = mean(dep_delay, na.rm = T))
# pair it with group by
by_day <- group_by(flights, year, month, day)
summarise(by_day, delay = mean(dep_delay, na.rm = TRUE))
by_dest <- group_by(flights, dest)
delay <- summarise(by_dest,
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
)
delay <- filter(delay, count > 20, dest != "HNL")
ggplot(data = delay, mapping = aes(x = dist, y = delay)) +
geom_point(aes(size = count), alpha = 1/3) +
geom_smooth(se = FALSE)
# group by dest then summarise then filter noise, all in one with pipe
delays <- flights %>%
group_by(dest) %>%
summarise(
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
) %>%
filter(count > 20, dest != "HNL")
flights %>%
group_by(year, month, day) %>%
summarise(mean = mean(dep_delay))
flights %>%
group_by(year, month, day) %>%
summarise(mean = mean(dep_delay, na.rm = TRUE))
# save new dataset
not_cancelled <- flights %>%
filter(!is.na(dep_delay), !is.na(arr_delay))
not_cancelled %>%
group_by(year, month, day) %>%
summarise(mean = mean(dep_delay))
# include counts in aggregates
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay)
)
ggplot(data = delays, mapping = aes(x = delay)) +
geom_freqpoly(binwidth = 10)
# number of flights vs average delay
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay, na.rm = TRUE),
n = n()
)
# variation decrease as n increases
ggplot(data = delays, mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
delays %>%
filter(n > 25) %>%
ggplot(mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
# same pattern
# Convert to a tibble so it prints nicely
# install.packages("Lahman")
library(Lahman)
batting <- as_tibble(Lahman::Batting)
batters <- batting %>%
group_by(playerID) %>%
summarise(
ba = sum(H, na.rm = TRUE) / sum(AB, na.rm = TRUE),
ab = sum(AB, na.rm = TRUE)
)
batters %>%
filter(ab > 100) %>%
ggplot(mapping = aes(x = ab, y = ba)) +
geom_point() +
geom_smooth(se = FALSE)
batters %>%
arrange(desc(ba))
# useful summary funcs
not_cancelled %>%
group_by(year, month, day) %>%
summarise(
avg_delay1 = mean(arr_delay),
avg_delay2 = mean(arr_delay[arr_delay > 0]) # the average positive delay
)
# measures of spread: sd(x), IQR(x), mad(x)
not_cancelled %>%
group_by(dest) %>%
summarise(distance_sd = sd(distance)) %>%
arrange(desc(distance_sd))
# rank measures: min(x), quantile(x, 0.25), max(x)
not_cancelled %>%
group_by(year, month, day) %>%
summarise(
first = min(dep_time),
last = max(dep_time)
)
# measures of position: first(x), nth(x, 2), last(x), similar to indexing
not_cancelled %>%
group_by(year, month, day) %>%
summarise(
first_dep = first(dep_time),
last_dep = last(dep_time)
)
not_cancelled %>%
group_by(year, month, day) %>%
mutate(r = min_rank(desc(dep_time))) %>%
filter(r %in% range(r))
# To count the number of non-missing values, use sum(!is.na(x))
not_cancelled %>%
group_by(dest) %>%
summarise(carriers = n_distinct(carrier)) %>%
arrange(desc(carriers))
not_cancelled %>%
count(dest)
not_cancelled %>%
count(tailnum, wt = distance)
# How many flights left before 5am? (these usually indicate delayed
# flights from the previous day)
not_cancelled %>%
group_by(year, month, day) %>%
summarise(n_early = sum(dep_time < 500))
# What proportion of flights are delayed by more than an hour?
not_cancelled %>%
group_by(year, month, day) %>%
summarise(hour_prop = mean(arr_delay > 60))
daily <- group_by(flights, year, month, day)
(per_day <- summarise(daily, flights = n()))
(per_month <- summarise(per_day, flights = sum(flights)))
(per_year <- summarise(per_month, flights = sum(flights)))
daily %>%
ungroup() %>% # no longer grouped by date
summarise(flights = n()) # all flights
# Exercises 5 ----
# flight delay is costly to passengers, arrival delay is more costly because it can impact later stages of travel. if arrival delay doesnt impact departure delay then it wont matter.
not_cancelled %>%
count(dest)
not_cancelled %>%
group_by(dest) %>%
summarise(n=n())
# count() counts number of instances within each group of vars
not_cancelled %>%
count(tailnum, wt=distance)
not_cancelled %>%
group_by(tailnum) %>%
summarise(n=sum(distance))
apply(flights, 2, function(x) sum(is.na(x)))
# air time, arr_delay
cancelled_per_day <- flights %>%
mutate(cancelled = (is.na(arr_delay) | is.na(dep_delay))) %>%
group_by(year, month, day) %>%
summarise(
cancelled_num = sum(cancelled),
flights_num = n(),
)
ggplot(cancelled_per_day) +
geom_point(aes(x = flights_num, y = cancelled_num))
cancelled_and_delays <-
flights %>%
mutate(cancelled = (is.na(arr_delay) | is.na(dep_delay))) %>%
group_by(year, month, day) %>%
summarise(
cancelled_prop = mean(cancelled),
avg_dep_delay = mean(dep_delay, na.rm = TRUE),
avg_arr_delay = mean(arr_delay, na.rm = TRUE)
) %>%
ungroup()
ggplot(cancelled_and_delays) +
geom_point(aes(x = avg_dep_delay, y = cancelled_prop))
ggplot(cancelled_and_delays) +
geom_point(aes(x = avg_arr_delay, y = cancelled_prop))
flights %>%
group_by(carrier) %>%
summarise(arr_delay = mean(arr_delay, na.rm = TRUE)) %>%
arrange(desc(arr_delay))
filter(airlines, carrier == "F9")
flights %>%
filter(!is.na(arr_delay)) %>%
# Total delay by carrier within each origin, dest
group_by(origin, dest, carrier) %>%
summarise(
arr_delay = sum(arr_delay),
flights = n()
) %>%
# Total delay within each origin dest
group_by(origin, dest) %>%
mutate(
arr_delay_total = sum(arr_delay),
flights_total = sum(flights)
) %>%
# average delay of each carrier - average delay of other carriers
ungroup() %>%
mutate(
arr_delay_others = (arr_delay_total - arr_delay) /
(flights_total - flights),
arr_delay_mean = arr_delay / flights,
arr_delay_diff = arr_delay_mean - arr_delay_others
) %>%
# remove NaN values (when there is only one carrier)
filter(is.finite(arr_delay_diff)) %>%
# average over all airports it flies to
group_by(carrier) %>%
summarise(arr_delay_diff = mean(arr_delay_diff)) %>%
arrange(desc(arr_delay_diff))
flights %>%
count(dest, sort = TRUE)
# ----
# grouped mutates
# worst members
flights_sml %>%
group_by(year, month, day) %>%
filter(rank(desc(arr_delay)) < 10)
# threshold
popular_dests <- flights %>%
group_by(dest) %>%
filter(n() > 365)
popular_dests
popular_dests %>%
filter(arr_delay > 0) %>%
mutate(prop_delay = arr_delay / sum(arr_delay)) %>%
select(year:day, dest, arr_delay, prop_delay)
# Exercises 6
# summary funcs: mean(), lead(), lag(), min_rank(), row_number(), in combination with group_by( ) in a mutate or filter
a_tibble <- tibble(x = 1:9,
group = rep(c("a", "b", "c"), each = 3))
a_tibble %>%
mutate(x_mean = mean(x)) %>%
group_by(group) %>%
mutate(x_mean_2 = mean(x))
# operators not affected by group_by()
a_tibble %>%
mutate(y = x + 2) %>%
group_by(group) %>%
mutate(z = x + 2)
a_tibble %>%
mutate(y = x %% 2) %>%
group_by(group) %>%
mutate(z = x %% 2)
a_tibble %>%
mutate(y = log(x)) %>%
group_by(group) %>%
mutate(z = log(x))
a_tibble %>%
mutate(x_lte_y = x <= y) %>%
group_by(group) %>%
mutate(x_lte_y_2 = x <= y)
tibble(x = runif(9),
group = rep(c("a", "b", "c"), each = 3)) %>%
group_by(group) %>%
arrange(x)
# are affected
a_tibble %>%
group_by(group) %>%
mutate(lag_x = lag(x),
lead_x = lead(x))
a_tibble %>%
mutate(x_cumsum = cumsum(x)) %>%
group_by(group) %>%
mutate(x_cumsum_2 = cumsum(x))
a_tibble %>%
mutate(rnk = min_rank(x)) %>%
group_by(group) %>%
mutate(rnk2 = min_rank(x))
tibble(group = rep(c("a", "b", "c"), each = 3),
x = runif(9)) %>%
group_by(group) %>%
arrange(x) %>%
mutate(lag_x = lag(x))
# worst on-time record
flights %>%
filter(!is.na(tailnum)) %>%
mutate(on_time = !is.na(arr_time) & (arr_delay <= 0)) %>%
group_by(tailnum) %>%
summarise(on_time = mean(on_time), n = n()) %>%
filter(min_rank(on_time) == 1)
quantile(count(flights, tailnum)$n)
flights %>%
filter(!is.na(tailnum), is.na(arr_time) | !is.na(arr_delay)) %>%
mutate(on_time = !is.na(arr_time) & (arr_delay <= 0)) %>%
group_by(tailnum) %>%
summarise(on_time = mean(on_time), n = n()) %>%
filter(n >= 20) %>%
filter(min_rank(on_time) == 1)
flights %>%
filter(!is.na(arr_delay)) %>%
group_by(tailnum) %>%
summarise(arr_delay = mean(arr_delay), n = n()) %>%
filter(n >= 20) %>%
filter(min_rank(desc(arr_delay)) == 1)
# best hour to fly
flights %>%
group_by(hour) %>%
summarise(arr_delay = mean(arr_delay, na.rm = TRUE)) %>%
arrange(arr_delay)
flights %>%
filter(arr_delay > 0) %>%
group_by(dest) %>%
mutate(
arr_delay_total = sum(arr_delay),
arr_delay_prop = arr_delay / arr_delay_total
) %>%
select(dest, month, day, dep_time, carrier, flight,
arr_delay, arr_delay_prop) %>%
arrange(dest, desc(arr_delay_prop))
# lag
lagged_delays <- flights %>%
arrange(origin, month, day, dep_time) %>%
group_by(origin) %>%
mutate(dep_delay_lag = lag(dep_delay)) %>%
filter(!is.na(dep_delay), !is.na(dep_delay_lag))
lagged_delays %>%
group_by(dep_delay_lag) %>%
summarise(dep_delay_mean = mean(dep_delay)) %>%
ggplot(aes(y = dep_delay_mean, x = dep_delay_lag)) +
geom_point() +
scale_x_continuous(breaks = seq(0, 1500, by = 120)) +
labs(y = "Departure Delay", x = "Previous Departure Delay")
lagged_delays %>%
group_by(origin, dep_delay_lag) %>%
summarise(dep_delay_mean = mean(dep_delay)) %>%
ggplot(aes(y = dep_delay_mean, x = dep_delay_lag)) +
geom_point() +
facet_wrap(~ origin, ncol=1) +
labs(y = "Departure Delay", x = "Previous Departure Delay")
# suspicious flights
standardized_flights <- flights %>%
filter(!is.na(air_time)) %>%
group_by(dest, origin) %>%
mutate(
air_time_mean = mean(air_time),
air_time_sd = sd(air_time),
n = n()
) %>%
ungroup() %>%
mutate(air_time_standard = (air_time - air_time_mean) / (air_time_sd + 1))
ggplot(standardized_flights, aes(x = air_time_standard)) +
geom_density()
standardized_flights %>%
arrange(air_time_standard) %>%
select(
carrier, flight, origin, dest, month, day,
air_time, air_time_mean, air_time_standard
) %>%
head(10) %>%
print(width = Inf)
# ranking carriers
flights %>%
# find all airports with > 1 carrier
group_by(dest) %>%
mutate( n_carriers = n_distinct(carrier)) %>%
filter(n_carriers > 1) %>%
# rank carriers by numer of destinations
group_by(carrier) %>%
summarize(n_dest = n_distinct(dest)) %>%
arrange(desc(n_dest))
filter(airlines, carrier == "EV")
filter(airlines, carrier %in% c("AS", "F9", "HA"))
# For each plane, count the number of flights before the first delay of greater than 1 hour.
flights %>%
# sort in increasing order
select(tailnum, year, month,day, dep_delay) %>%
filter(!is.na(dep_delay)) %>%
arrange(tailnum, year, month, day) %>%
group_by(tailnum) %>%
# cumulative number of flights delayed over one hour
mutate(cumulative_hr_delays = cumsum(dep_delay > 60)) %>%
# count the number of flights == 0
summarise(total_flights = sum(cumulative_hr_delays < 1)) %>%
arrange(total_flights)
|
d73658803e74f846628c49c62a8e152b964dbf69
|
07d83acd9d6a3ddb8a2fe4bb811db4801c3f40de
|
/man/mutate_microsats.Rd
|
73d91c73412a3477ddcd63848b22fff1ff2f086d
|
[] |
no_license
|
andersgs/microsimr
|
da152a68a065419b4cc0b02d337f152227d07ee3
|
4def264efff266c0787a30b154f7f42d9c560808
|
refs/heads/master
| 2021-01-19T01:50:24.757285
| 2016-07-08T03:43:09
| 2016-07-08T03:43:09
| 30,562,400
| 1
| 1
| null | 2016-07-08T03:43:10
| 2015-02-09T22:34:32
|
R
|
UTF-8
|
R
| false
| true
| 2,172
|
rd
|
mutate_microsats.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutate_microsats.R
\name{mutate_microsats}
\alias{mutate_microsats}
\title{A function to mutate microsatellites}
\usage{
mutate_microsats(n_mutations, mutation_model = "smm", p_single = 0.8,
sigma2 = 50)
}
\arguments{
\item{n_mutations}{A vector indicating the total number of mutations along
each branch of a tree}
\item{mutation_model}{A character string indicating the mutation model to use.
Currently, only the strict stepwise mutation model of Ohta and Kimura (1973) ('smm'),
and the DiRienzo et al. (1994) two-phase model ('tpm') are implemented.
Default is 'smm'}
\item{p_single}{Probability of a single-step mutation to be used in the 'tmp' model}
\item{sigma2}{Variance in allele size to be used in the 'tpm' model}
}
\description{
A function to mutate microsatellites
}
\details{
A tree is first simulated using 'ms'. Mutations are simulated along the
branches of the tree following a Poisson distribution with lambda proportional
to branch length times theta (4Nmu). This first part is done in \link{sim_microsats}.
Here, the number of mutations at each branch is transformed to either a loss
or gain in number of repeats. In the 'smm' model, each mutation represents either
a loss or gain of a single repeat unit with equal probability. In the 'tpm'
model, with probability \code{p_single} a mutation represents either a
gain or loss of a single repeat, and with probability (1 - \code{p_single})
the gain/loss is larger following a symmetric geometric distribution.
Please refer to the vignette to see a deeper explanation, and test of each
model.
}
\references{
Di Rienzo, A., Peterson, A. C., Garza, J. C., Valdes, A. M., Slatkin, M., & Freimer, N. B. (1994). Mutational processes of simple-sequence repeat loci in human populations. Proceedings of the National Academy of Sciences of the United States of America, 91(8), 3166–3170.
Ohta, T., & Kimura, M. (2007). A model of mutation appropriate to estimate the number of electrophoretically detectable alleles in a finite population. Genetical Research, 89(5-6), 367–370. http://doi.org/10.1017/S0016672308009531
}
|
f8896fccf4926f58ca8edba6ed3e3b411bdd580e
|
4970340c008543a88393b65601a4fab7f3d1d7db
|
/Linear model (Ap_density, APG, Ap_parasitized,SF, Biomass and PR)/Some linear models (Biomass) .R
|
72bc15d1188132b84380bcb1249f6078be9e9dba
|
[] |
no_license
|
Drissmiele/Group-project
|
33c44a8f3a466892628214188e53d70faf83ac60
|
753375e30f97d6760781e5146a91f77e24a6d89b
|
refs/heads/main
| 2023-02-27T06:55:34.394807
| 2021-02-05T14:44:29
| 2021-02-05T14:44:29
| 313,594,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,748
|
r
|
Some linear models (Biomass) .R
|
# creation of DataOG with all variables
Data1 <- read.table("Project data.csv", header = TRUE, dec = ",", sep = ";")
Data1$APG <- NA
A <- Data1$Date == "1"
Data1[A, "APG"] <- ((log(Data1$aphid_live[A] + 1) - log(Data1$aphidsinoculated_init[A] + 1))/10)
A <- Data1$Date == "2"
Data1[A, "APG"] <- ((log(Data1$aphid_live[A] + 1) - log(Data1$aphidsinoculated_init[A] + 1))/20)
A <- Data1$Date == "3"
Data1[A, "APG"] <- ((log(Data1$aphid_live[A] + 1) - log(Data1$aphidsinoculated_init[A] + 1))/30)
DataOG <- Data1
parasitism_rate <- (Data1$aphid_parasitized/(Data1$aphid_live + Data1$aphid_parasitized))
DataOG$parasitism_rate <- parasitism_rate
syrphid_fraction <- (Data1$syrphidl_p / ((Data1$aphid_live) + (Data1$syrphidl_p)))
DataOG$syrphid_fraction <- syrphid_fraction
# conversion of character vectors into numeric vectors !
B <- lapply(DataOG[c(2,3,11,13)], as.factor)
B <- lapply(B, as.numeric)
DataOG[c(2,3,11,13)] <- B
#convert NaN to 0
is.nan.data.frame <- function(x)
do.call(cbind, lapply(x, is.nan))
DataOG[is.nan(DataOG)] <- 0
#convert inf to 1
is.infinite.data.frame <- function(y)
do.call(cbind, lapply(y, is.infinite))
DataOG[is.infinite(DataOG)] <- 1
#linear model with biomass
lmM1 <- lm(DataOG$Biomass_fin ~ DataOG$aphid_live)
lmM2 <- lm(DataOG$Biomass_fin ~ DataOG$APG)
lmM3 <- lm(DataOG$Biomass_fin ~ DataOG$syrphidl_p)
lmM4 <- lm(DataOG$Biomass_fin ~ DataOG$syrphid_fraction)
lmM5 <- lm(DataOG$Biomass_fin ~ DataOG$aphid_parasitized)
lmM6 <- lm(DataOG$Biomass_fin ~ DataOG$parasitism_rate)
AIC(lmM1, lmM2, lmM3, lmM4, lmM5, lmM6)
# df AIC
# lmM1 3 163970.1 #rank 6
# lmM2 3 163957.9 #rank 4
# lmM3 3 163579.3 #rank 1
# lmM4 3 163790.5 #rank 2
# lmM5 3 163954.1 #rank 3
# lmM6 3 163969.3 #rank 5
|
63f6c1585d21810aebd6c3bda334943f997c138c
|
cfe15da6b9fdf5529f1312f3a7af32f6306736a2
|
/load_supervised_data_variableselection.R
|
37dbcec3162372f23de4aafb9602aa9b92570d09
|
[] |
no_license
|
R-Techy-work/GMM
|
c343d4d67723751776ef22e7451f0a87801790b5
|
459bba70e5e0fde0a3f7e456343c9b2f712a8fb0
|
refs/heads/main
| 2023-02-24T05:27:02.260887
| 2021-02-04T04:02:02
| 2021-02-04T04:02:02
| 335,833,175
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 199
|
r
|
load_supervised_data_variableselection.R
|
# data.csv : csv file including training data
# X: X of training data
# y: Y of training data
data = read.csv("data.csv", row.names = 1)
y = as.matrix(data[1])
X = as.matrix(data[c(2:ncol(data))])
|
d339a28b64b20e9765573e5a90439f0fb2fb0c09
|
dea748ddba3f7052d788c0d235e2dcb0a491e1b1
|
/R/deprecated.R
|
4e5aa6259c69e5724c639263b4b0062b9b8b0100
|
[
"MIT"
] |
permissive
|
dernst/rparso
|
45bb0b7420f07304411fd14f25cb5dda5bb22bbe
|
56128d12895192bcd36f61831250a222a9fa7456
|
refs/heads/master
| 2020-04-07T23:27:54.482244
| 2018-11-23T10:04:15
| 2018-11-23T10:04:15
| 158,813,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,622
|
r
|
deprecated.R
|
if(FALSE) {
j_read_chunk = function(obj)
.jcall(tst, "I", "read_chunk")
j_get_ints = function(obj, col)
.jcall(tst, "[I", "getInts", as.integer(col-1L))
j_get_strings = function(obj, col)
.jcall(tst, "[Ljava/lang/String;", "getStrings", as.integer(col-1L))
j_get_string = function(obj, col)
.jcall(tst, "Ljava/lang/String;", "getString", as.integer(col-1L))
read_parso = function(filename) {
options(java.parameters = "-Xmx2048m")
library(rparso)
library(rJava)
library(data.table)
tst = .jnew("de/misc/rparso/BulkRead", "/home/ernst/mnt/bvs/Daten/DBABZUG20180619/tsch.sas7bdat")
cn = .jcall(tst, "[Ljava/lang/String;", method="getColnames")
ct = .jcall(tst, "[Ljava/lang/String;", method="getColtypes")
num_rows = .jcall(tst, "J", method="getNumRows")
dataframes = list()
rows_total = 0L
#Rprof("test.prof")
print(system.time({
while(TRUE) {
rows_read = .jcall(tst, "I", "read_chunk")
if(rows_read <= 0)
break
rows_total = rows_read + rows_total
if(rows_total %% (4096*10) == 0)
cat(sprintf("%d\n", rows_total))
lst = lapply(seq_along(ct), function(i) {
cti = ct[i]
if(cti == "java.lang.Number")
j_get_ints(tst, i)
else
j_get_string(tst, i)
#strsplit(j_get_string(tst, i), "\n", fixed=TRUE)[[1]]
})
names(lst) = cn
#dataframes[[length(dataframes)+1L]] = as.data.table(lst)
dataframes[[length(dataframes)+1L]] = lst
#xx = .jcall(tst, "[Ljava/lang/String;", "getStrings", 2L)
#xx = .jcall(tst, "[I", "getInts", 0L)
}
}))
#Rprof(NULL)
}
if(FALSE) {
library(rJava)
library(rparso)
.jnew("de/misc/rparso/BulkRead", "s")
.jmethods("Ljava/io/FileInputStream;")
library(rparso)
rparso:::rparso_init()
print(system.time({
.Call("parso_read_sas", "/home/ernst/mnt/bvs/Daten/DBABZUG20180619/tsch.sas7bdat")
}))
library(rJava)
library(rparso)
rparso:::init_rparso()
tst = .jnew("de/misc/rparso/BulkRead", "/home/ernst/mnt/bvs/Daten/DBABZUG20180619/tsch.sas7bdat")
num_rows = .jcall(tst, "J", "getNumRows")
xdf = lapply(.jcall(tst, "[Ljava/lang/String;", "getColtypes"), function(ct) {
if(ct == "java.lang.Number") {
integer(num_rows)
} else {
character(num_rows)
}
})
.Call("rparso_set_df", xdf)
print(system.time({
xx = .jcall(tst, "I", "read_all")
}))
x = as.data.frame(xdf)
xdf[[1]]
}
}
|
ac1e8021ff481ee525c94f8d55ded18519e30f05
|
499a61bf2c8e46b24e58818eb809117705f9c855
|
/examples/example.prob_wheel.R
|
11a765da4936979479aaea6c741d0ca961179fb0
|
[] |
no_license
|
vandenman/abtest
|
911ff5e05bac953de86b9782a967453e84eb9f79
|
2b6bbe936f1fe7983c75e569b5c1a2984d34fdc0
|
refs/heads/master
| 2023-03-01T17:25:29.481356
| 2021-11-22T07:45:35
| 2021-11-22T07:45:35
| 223,951,557
| 0
| 0
| null | 2019-11-25T13:10:17
| 2019-11-25T13:10:17
| null |
UTF-8
|
R
| false
| false
| 318
|
r
|
example.prob_wheel.R
|
# synthetic data
data <- list(y1 = 10, n1 = 28, y2 = 14, n2 = 26)
# Bayesian A/B test with default settings
ab <- ab_test(data = data)
print(ab)
# visualize prior probabilities of the hypotheses
prob_wheel(ab, type = "prior")
# visualize posterior probabilities of the hypotheses
prob_wheel(ab, type = "posterior")
|
b3612ba255cafc33aba5b2d5d305f420d071d085
|
73bf596bac857c20ee91e8f79e1614b46740ccf8
|
/man/spawn_index_by_area_table.Rd
|
770b9b37705c40ad3867c8bd339d838a1ce2c63b
|
[] |
no_license
|
pbs-assess/herringutils
|
cd553ec05d1a8eb21006fc8f56b77a4b3ac40985
|
d13704a663baeaf90b23541435860c91e03f95f1
|
refs/heads/master
| 2023-09-03T02:00:38.050712
| 2023-09-01T15:31:29
| 2023-09-01T15:31:29
| 201,638,012
| 0
| 0
| null | 2023-02-16T17:28:12
| 2019-08-10T14:04:36
|
R
|
UTF-8
|
R
| false
| true
| 619
|
rd
|
spawn_index_by_area_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tables.R
\name{spawn_index_by_area_table}
\alias{spawn_index_by_area_table}
\title{Table for spawn index by area}
\usage{
spawn_index_by_area_table(tab, cap = "", first_yr, translate = FALSE, ...)
}
\arguments{
\item{tab}{data.frame as read in by [readr::read_csv()]}
\item{cap}{caption for table}
\item{first_yr}{Earliest year to show in the table}
\item{translate}{Logical. Translate to french if TRUE}
\item{...}{arguments passed to [csas_table()]}
}
\value{
a [csasdown::csas_table()]
}
\description{
Table for spawn index by area
}
|
d154b59eb2cbb3565a0a3fd581aec4de882b62ff
|
ddf54049e171b9cf2d1fc8e70cace106c5ade846
|
/cachematrix.R
|
9ef40c634b48dc277ca5d377c25495c68bb2ee12
|
[] |
no_license
|
darioromero/Coursera
|
ae4910c01102c6149d527b8bcc3c5e3428a936b9
|
5f76890e0b3f4b19f91a6dd2dae79cc87fa495d5
|
refs/heads/master
| 2021-01-18T12:07:52.805248
| 2015-03-22T20:39:44
| 2015-03-22T20:39:44
| 32,663,931
| 0
| 0
| null | 2015-03-22T05:56:34
| 2015-03-22T05:56:34
| null |
UTF-8
|
R
| false
| false
| 1,246
|
r
|
cachematrix.R
|
makeCacheMatrix <- function(m = matrix()) {
## studnt: Dario H. Romero
## course: programming in R (rprog-012)
## date : 2015-03-22
## @x: a square invertible matrix
##
## makeCacheMatrix creates a special "matrix",
## which is really a list containing a function
## to:
##
## return: list containing functions for
## 1 - set the matrix (set)
## 2 - get the matrix (get)
## 3 - set the matrix-inverted (setInv)
## 4 - get the matrix-inverted (getInv)
##
## matrix object initialization
inv <- NULL
## setMatrix function
set <- function(y){
m <<- y
inv <<- NULL
}
## getMatrix function
get <- function() m
## setMatrix inverted function
setInv = function(solve) inv <<- solve
## getMatrix inverted function
getInv = function() inv
## returning list of functions
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
cacheSolve <- function(x, ...) {
## studnt: Dario H. Romero
## course: programming in R (rprog-012)
## date : 2015-03-22
m <- x$getInv()
if(!is.null(m)) {
message("... getting cached data!!!")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInv(m)
m
}
|
be00f35e99c3f99670b8c28b9c23d85497c4b0ba
|
f6ca52564abacc865e3e0a3c8785cf347753cae1
|
/DATA_FRAMES/LAB2/L7.r
|
a82cbec9c8c20c23d6a1e34d88151ecdee318447
|
[] |
no_license
|
jabhij/DAT204x_R_DataScience
|
c7e98b4087e4a2cba08e30bdad9c0fd75a813cf0
|
a495072d3ffcdbb827c11d3032adb7e6512fa0b4
|
refs/heads/master
| 2021-01-17T08:44:18.817730
| 2016-08-11T06:41:13
| 2016-08-11T06:41:13
| 59,910,304
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 672
|
r
|
L7.r
|
Instructions --
The data for pluto is already there; you just have to add the appropriate names such that it matches the names of planets_df. You can
choose how.
Add the pluto data frame to planets_df and assign the result to planets_df_ext.
Inspect the resulting data frame by printing it out.
----------------
# planets_df is pre-loaded (without the columns moon and mass)
planets_df
# Name pluto correctly
pluto <- data.frame(name = "Pluto", type = "Terrestrial planet", diameter = 0.18, rotation = -6.38, has_rings = FALSE)
# Bind planets_df and pluto together: planets_df_ext
planets_df_ext <- rbind(planets_df, pluto)
# Print out planets_df_ext
planets_df_ext
|
98443929373f5af93dd9c5734acfaed9a7addbf9
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkItemFactoryCreateItem.Rd
|
da0d78e48c8a067de87e4a779c03b7e0eb407cd7
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 896
|
rd
|
gtkItemFactoryCreateItem.Rd
|
\alias{gtkItemFactoryCreateItem}
\name{gtkItemFactoryCreateItem}
\title{gtkItemFactoryCreateItem}
\description{
Creates an item for \code{entry}.
\strong{WARNING: \code{gtk_item_factory_create_item} has been deprecated since version 2.4 and should not be used in newly-written code. Use \code{\link{GtkUIManager}} instead.}
}
\usage{gtkItemFactoryCreateItem(object, entry, callback.data = NULL,
callback.type)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkItemFactory}}}
\item{\verb{entry}}{the \code{\link{GtkItemFactoryEntry}} to create an item for}
\item{\verb{callback.data}}{data passed to the callback function of \code{entry}}
\item{\verb{callback.type}}{1 if the callback function of \code{entry} is of type
\code{\link{GtkItemFactoryCallback1}}, 2 if it is of type \code{\link{GtkItemFactoryCallback2}}}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
2c804012c0e9ad0903d0c364cf4e7250562b84ce
|
c5b0584a453517869b6c03166bb7d3b2729866fc
|
/man/retrieval.time.Rd
|
973addc2b35f607f696b8f1faa5d5b9fcb445fdd
|
[
"MIT"
] |
permissive
|
nealhaddaway/predicter
|
9fed1ff27e0b4636df7bdf5373ef95dba144727a
|
14b168f28f2378e4dd3e4a913f4d31b742460bf3
|
refs/heads/master
| 2022-09-26T08:52:22.056061
| 2022-07-29T11:56:19
| 2022-07-29T11:56:19
| 192,175,432
| 11
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,038
|
rd
|
retrieval.time.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retrieval.time.R
\name{retrieval.time}
\alias{retrieval.time}
\title{Time needed to retrieve full texts of relevant records}
\usage{
retrieval.time(inclabstracts.number = 310.1662, retrieval.day = 171,
retrieval.checked = 0)
}
\description{
This function calculates the time needed to retrieve the full texts of
the abstracts deemed to be relevant at the previous stage in a systematic
review, based on the inputs of the number of relevant abstracts
('inclabstracts.number', see 'inclabstracts.number' function), the number of
records that can be retrieved per day ('retrieved.day'), and the
percentage of all retrievals that are double checked for consistency
('retrieval.checked'). Where full dual screening of all records is used,
this will equal a percentage of 100 abstracts being checked. Default
values are provided based on the empirical study of environmental
systematic reviews by Haddaway and Westgate (2018)
https://doi.org/10.1111/cobi.13231.
}
|
99112090512ee62f908f4d4b07c10087aa3dae13
|
44415fd86412a96b039d60a6ba83b8065bde6f1d
|
/man/useBIC.Rd
|
be632b5bc2aafa702786b53ce74ef1b21c97cbc5
|
[] |
no_license
|
cran/AICcmodavg
|
f9451566b4415350ff91d4e1fffc323ca6f6082e
|
69bf7930f2228ed6fb06683cd766a16b0bf5cdce
|
refs/heads/master
| 2023-04-08T21:23:38.333939
| 2023-03-20T15:20:02
| 2023-03-20T15:20:02
| 17,677,598
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,807
|
rd
|
useBIC.Rd
|
\name{useBIC}
\Rdversion{1.1}
\alias{useBIC}
\alias{useBIC.default}
\alias{useBIC.aov}
\alias{useBIC.betareg}
\alias{useBIC.clm}
\alias{useBIC.clmm}
\alias{useBIC.coxme}
\alias{useBIC.coxph}
\alias{useBIC.fitdist}
\alias{useBIC.fitdistr}
\alias{useBIC.glm}
\alias{useBIC.glmmTMB}
\alias{useBIC.gls}
\alias{useBIC.gnls}
\alias{useBIC.hurdle}
\alias{useBIC.lavaan}
\alias{useBIC.lm}
\alias{useBIC.lme}
\alias{useBIC.lmekin}
\alias{useBIC.maxlikeFit}
\alias{useBIC.mer}
\alias{useBIC.merMod}
\alias{useBIC.lmerModLmerTest}
\alias{useBIC.multinom}
\alias{useBIC.nlme}
\alias{useBIC.nls}
\alias{useBIC.polr}
\alias{useBIC.rlm}
\alias{useBIC.survreg}
\alias{useBIC.unmarkedFit}
\alias{useBIC.vglm}
\alias{useBIC.zeroinfl}
\title{
Computing BIC or QBIC
}
\description{
Functions to compute the Bayesian information criterion (BIC) or a
quasi-likelihood analogue (QBIC).
}
\usage{
useBIC(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{aov}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{betareg}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{clm}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{clmm}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{coxme}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{coxph}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{fitdist}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{fitdistr}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{glm}(mod, return.K = FALSE, nobs = NULL, c.hat = 1,
\dots)
\method{useBIC}{glmmTMB}(mod, return.K = FALSE, nobs = NULL, c.hat = 1,
\dots)
\method{useBIC}{gls}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{gnls}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{hurdle}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{lavaan}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{lm}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{lme}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{lmekin}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{maxlikeFit}(mod, return.K = FALSE, nobs = NULL, c.hat =
1, \dots)
\method{useBIC}{mer}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{merMod}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{lmerModLmerTest}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{multinom}(mod, return.K = FALSE, nobs = NULL, c.hat = 1,
\dots)
\method{useBIC}{nlme}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{nls}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{polr}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{rlm}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{survreg}(mod, return.K = FALSE, nobs = NULL, \dots)
\method{useBIC}{unmarkedFit}(mod, return.K = FALSE, nobs = NULL, c.hat =
1, \dots)
\method{useBIC}{vglm}(mod, return.K = FALSE, nobs = NULL, c.hat = 1,
\dots)
\method{useBIC}{zeroinfl}(mod, return.K = FALSE, nobs = NULL, \dots)
}
\arguments{
\item{mod}{
an object of class \code{aov}, \code{betareg}, \code{clm},
\code{clmm}, \code{clogit}, \code{coxme}, \code{coxph},
\code{fitdist}, \code{fitdistr}, \code{glm}, \code{glmmTMB},
\code{gls}, \code{gnls}, \code{hurdle}, \code{lavaan}, \code{lm},
\code{lme}, \code{lmekin}, \code{maxlikeFit}, \code{mer},
\code{merMod}, \code{lmerModLmerTest}, \code{multinom}, \code{nlme},
\code{nls}, \code{polr}, \code{rlm}, \code{survreg}, \code{vglm},
\code{zeroinfl}, and various \code{unmarkedFit} classes containing
the output of a model.
}
\item{return.K}{
logical. If \code{FALSE}, the function returns the information
criterion specified. If \code{TRUE}, the function returns K (number
of estimated parameters) for a given model.
}
\item{nobs}{
this argument allows to specify a numeric value other than total
sample size to compute the BIC (i.e., \code{nobs} defaults to total
number of observations). This is relevant only for mixed models or
various models of \code{unmarkedFit} classes where sample size is not
straightforward. In such cases, one might use total number of
observations or number of independent clusters (e.g., sites) as the
value of \code{nobs}.
}
\item{c.hat}{
value of overdispersion parameter (i.e., variance inflation factor)
such as that obtained from \code{c_hat}. Note that values of c.hat
different from 1 are only appropriate for binomial GLM's with trials
> 1 (i.e., success/trial or cbind(success, failure) syntax), with
Poisson GLM's, single-season occupancy models (MacKenzie et al. 2002),
dynamic occupancy models (MacKenzie et al. 2003), or \emph{N}-mixture
models (Royle 2004, Dail and Madsen 2011). If \code{c.hat} > 1,
\code{useBIC} will return the quasi-likelihood analogue of the
information criteria requested and multiply the variance-covariance
matrix of the estimates by this value (i.e., SE's are multiplied by
\code{sqrt(c.hat)}). This option is not supported for generalized
linear mixed models of the \code{mer} or \code{merMod} classes.
}
\item{\dots}{
additional arguments passed to the function.
}
}
\details{
\code{useBIC} computes the Bayesian information criterion (BIC,
Schwarz 1978): \deqn{BIC = -2 * log-likelihood + K * log(n),} where
the log-likelihood is the maximum log-likelihood of the model, \emph{K}
corresponds to the number of estimated parameters, and \emph{n}
corresponds to the sample size of the data set.
In the presence of overdispersion, a quasi-likelihood analogue of
the BIC (QBIC) will be computed, as \deqn{QBIC = \frac{-2 *
log-likelihood}{c-hat} + K * log(n),} where \emph{c-hat} is the
overdispersion parameter specified by the user with the argument
\code{c.hat}. Note that BIC or QBIC values are meaningful to select
among \code{gls} or \code{lme} models fit by maximum likelihood.
BIC or QBIC based on REML are valid to select among different models
that only differ in their random effects (Pinheiro and Bates 2000).
}
\value{
\code{useBIC} returns the BIC or the number of estimated parameters,
depending on the values of the arguments.
}
\note{
The actual (Q)BIC values are not really interesting in themselves, as
they depend directly on the data, parameters estimated, and likelihood
function. Furthermore, a single value does not tell much about model
fit. Information criteria become relevant when compared to one
another for a given data set and set of candidate models.
%
% Note that for robust regression models of class \code{rlm}, the
% AIC is computed based on Tharmaratnam and Claeskens (2013). The
% second-order AIC is not yet implemented for the \code{rlm} class.
}
\references{
Burnham, K. P., Anderson, D. R. (2002) \emph{Model Selection and
Multimodel Inference: a practical information-theoretic
approach}. Second edition. Springer: New York.
Dail, D., Madsen, L. (2011) Models for estimating abundance from
repeated counts of an open population. \emph{Biometrics} \bold{67},
577--587.
MacKenzie, D. I., Nichols, J. D., Lachman, G. B., Droege, S., Royle,
J. A., Langtimm, C. A. (2002) Estimating site occupancy rates when
detection probabilities are less than one. \emph{Ecology} \bold{83},
2248--2255.
MacKenzie, D. I., Nichols, J. D., Hines, J. E., Knutson, M. G.,
Franklin, A. B. (2003) Estimating site occupancy, colonization, and
local extinction when a species is detected imperfectly. \emph{Ecology}
\bold{84}, 2200--2207.
Pinheiro, J. C., Bates, D. M. (2000) \emph{Mixed-effect models in S and
S-PLUS}. Springer Verlag: New York.
Royle, J. A. (2004) \emph{N}-mixture models for estimating population
size from spatially replicated counts. \emph{Biometrics} \bold{60},
108--115.
Schwarz, G. (1978) Estimating the dimension of a model. \emph{Annals of
Statistics} \bold{6}, 461--464.
% Tharmaratnam, K., Claeskens, G. (2013) A comparison of robust
% versions of the AIC based on M-, S- and MM-estimators. \emph{Statistics}
% \bold{47}, 216--235.
}
\author{
Marc J. Mazerolle
}
\seealso{
\code{\link{AICc}}, \code{\link{bictab}},
\code{\link{bictabCustom}}, \code{\link{useBICCustom}}
}
\examples{
##cement data from Burnham and Anderson (2002, p. 101)
data(cement)
##run multiple regression - the global model in Table 3.2
glob.mod <- lm(y ~ x1 + x2 + x3 + x4, data = cement)
##compute BIC with full likelihood
useBIC(glob.mod, return.K = FALSE)
##compute BIC for mixed model on Orthodont data set in Pinheiro and
##Bates (2000)
\dontrun{
require(nlme)
m1 <- lme(distance ~ age, random = ~1 | Subject, data = Orthodont,
method= "ML")
useBIC(m1, return.K = FALSE)
}
}
\keyword{models}
|
5268980f3176698edeb9b11ebdb04f5d0cab55d0
|
80acc61dc3d4717f2b57a8fcd37420337038486d
|
/ui/cmsy/cmsyUI.R
|
7dbbac5dd8a53d1f289472744e1e5e34474429c0
|
[] |
no_license
|
pink-sh/StockMonitoringTool
|
97e378b0db835b01ec62334f357514c03b3607cf
|
dbcb20c4c4ef64c025742966b3885143952873a3
|
refs/heads/master
| 2023-04-03T22:45:23.310381
| 2020-10-26T12:22:10
| 2020-10-26T12:22:10
| 109,261,257
| 2
| 2
| null | 2021-03-29T02:01:57
| 2017-11-02T12:22:17
|
R
|
UTF-8
|
R
| false
| false
| 12,260
|
r
|
cmsyUI.R
|
tabCmsyIntro <- tabItem("cmsyIntro",htmlOutput("cmsyIntroOut"))
tabCmsySampleDataset <- tabItem("cmsySampleDataset",htmlOutput("cmsySampleDataset"))
tabCmsy <- function(id) {
ns <- NS(id)
tabItem("cmsyWidget",
htmlOutput(ns("cmsyMethodTitle")),
actionButton("cmsyDataConsiderations", "Data Considerations", class="topLevelInformationButton"),
fluidRow(
bsModal("modalExampleCMSY", "CMSY Data Considerations", "cmsyDataConsiderations", size = "large", htmlOutput(ns("cmsyDataConsiderationsText"))),
box(title = "Main Parameters",
width = NULL,
collapsible = T,
class = "collapsed-box",
box(
fileInput(ns("fileCmsy"), "Choose Stock CSV File",
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv", id="fileCmsy")
)
),
box(
tags$div(id="stockSelectorContainer")
)
),
box(title = "Optional Parameters",
width = NULL,
collapsible = T,
class = "collapsed-box",
collapsed = T,
box(
numericInput(ns("minOfYear"), p("Earliest year of the catch series (", withMathJax("\\(minOfYear\\)"), ")"), 1998, min = 1900, max = 2030, step=1),
numericInput(ns("maxOfYear"), p("Latest year of the catch series (", withMathJax("\\(maxOfYear\\)"), ")"), 2015, min = 1900, max = 2030, step=1),
selectInput(ns("resiliance"), p("Resilience, or intrinsic growth rate (", withMathJax("\\(r\\)"), ") as qualitative information (Use information from FishBase or SeaLifeBase)"), choices=c("Very low", "Low", "Medium", "High"), selected="Medium"),
textInput(ns("r.low"), "Lower limit of resilience (Both the high and low range of this parameter must be set by the user, otherwise, the range is calculated automatically from Resilience)", "NA"),
#numericInput(ns("r.low"), "Lowest resilience (automatically calculated if not set)", "NA", min = 10^-5, max = NA, step=NA),
textInput(ns("r.hi"), "Upper limit of resilience (Both the high and low range of this parameter must be set by the user, otherwise, the range is calculated automatically from Resilience)", "NA"),
p("**The user should take care when setting the prior estimates for depletion at the beginning and end of the time series. Depletion levels are assumptions about the initial and current state of the stock, and they have a strong influence on the results of CMSY, so careful evaluation of these parameters is recommended. These parameters are determined in CMSY using the relationship between current catch and maximum catch."),
#numericInput(ns("stb.low"), "**Starting depletion range: Lowest possible relative biomass at the beginning of the catch time series (automatically calculated if not set)", 0, min = 0, max = 10, step=0.1),
#numericInput(ns("stb.hi"), "**Starting depletion range: Highest possible relative biomass at the beginning of the catch time series (automatically calculated if not set)", 0, min = 0, max = 10, step=0.1),
sliderInput(ns("stb"), "**Starting depletion range: Lower and upper limits of relative biomass at the beginning of the catch time series (automatically calculated if not set)",min = 0, max = 10,step=0.1,value = c(0,0)),
textInput(ns("int.yr"), p("Intermediate year (", withMathJax("\\(int.yr\\)"), " automatically calculated if not set. Must be specified by user if intermediate biomass range is specified below)"), "NA"),
textInput(ns("intb.low"), "Lower limit of relative biomass at the intermediate year of the catch time series (intermediate year, low range and high range must all be set by user; otherwise leave all three fields blank)", "NA"),
textInput(ns("intb.hi"), "Upper limit of relative biomass at the intermediate year of the catch time series (intermediate year, low range and high range must all be set by user; otherwise leave all three fields blank)", "NA"),
#numericInput(ns("endb.low"), "**Ending depletion range: Lowest possible relative biomass at the end of the catch time series (automatically calculated if not set)", 0.01, min = 0, max = 10, step=0.01),
#numericInput(ns("endb.hi"), "**Ending depletion range: Highest possible relative biomass at the end of the catch time series (automatically calculated if not set)", 0.4, min = 0, max = 10, step=0.1),
sliderInput(ns("endb"), "**Ending depletion range: Lower and upper limits of relative biomass at the end of the catch time series (automatically calculated if not set)",min = 0, max = 10,step=0.1,value = c(0.01,0.4)),
textInput(ns("q.start"), p("Start year over which to calculate catchability (", withMathJax("\\(q\\)"), ") value at the beginning of a stable catch-biomass period (", withMathJax("\\(q.start\\)"), " automatically calculated if not set)"), "NA"),
textInput(ns("q.end"), p("End year over which to calculate catchability (", withMathJax("\\(q\\)"), ") at the end of a stable catch-biomass period (", withMathJax("\\(q.end\\)"), " automatically calculated if not set)"), "NA")
),
box(
numericInput(ns("startYear"), "Start year to process the catch series from", 1998, min = 1900, max = 2030, step=1),
numericInput(ns("endYear"), "End year to process the catch series up to", 2015, min = 1900, max = 2030, step=1),
textInput(ns("blim"), p("Biomass biological limit (", withMathJax("\\(B_{lim}\\)"), ")"), "NA"),
textInput(ns("bpa"), p("Biomass precautionary value (",withMathJax("\\(B_{pa}\\)") , ")"), "NA"),
textInput(ns("bmsy"), p("Biomass maximum sustainable yield (", withMathJax("\\(B_{MSY}\\)"), ")"), "NA"),
textInput(ns("b40"), p("Biomass at 40% over the unfished level (", withMathJax("\\(B_{40\\%}\\)"), ")"), "NA"),
textInput(ns("fmsy"), p("Fishing mortality at Maximum Sustainable Yield (",withMathJax("\\(F_{MSY}\\)") , "). If"
,withMathJax("\\(F_{MSY}\\)") ,"is known, the resilience prior range (lowest and highest resilience estimates)
could be defined to include estimate of", withMathJax("\\(F_{MSY}\\)") ,
"assuming that r", withMathJax("\\(\\approx\\)"),withMathJax("\\(F_{MSY}\\)")), "NA"),
textInput(ns("flim"), p("Fishing mortality biological limit (", withMathJax("\\(F_{lim}\\)"), ")"), "NA"),
textInput(ns("fpa"), p("Fishing mortality precautionary value (", withMathJax("\\(F_{pa}\\)"), ")"), "NA"),
textInput(ns("fofl"), p("Fishing mortality at overfishing level (", withMathJax("\\(F_{ofl}\\)"),")"), "NA"),
textInput(ns("last_f"), "Last known exploitation rate", "NA"),
textInput(ns("msy"), p("Maximum Sustainable Yield (", withMathJax("\\(MSY\\)"), ")"), "NA"),
textInput(ns("msyBTrigger"), p("Spawning Stock Biomass at MSY (", withMathJax("\\(SSB_{MSY}\\)"), ")"), "NA"),
textInput(ns("m"), p("**Natural mortality (", withMathJax("\\(M\\)"), ")"), "NA"),
p("**If desired, the life history parameters pulled from FishBase.org in the Supporting Tools: 'Natural Mortality Estimators' tool could be used to provide estimates of M here."),
##KK: it's not clear to me what the user input would be here if not "None". Suggest deleting (also for Comments.
#textInput("btype", "btype indicates if the catch file contains biomass, CPUE or no information associated with the catch time series", "None"),
#textInput("comments", "Comments on data and computation", "landings"),
checkboxInput(ns("force.cmsy"), "Check this if CMSY results are to be preferred over the Bayesian State Model results (only when biomass or CPUE is available)", FALSE)
)
),
tags$div(disabled(actionButton(ns("go_cmsy"), "Run CMSY Method", class="topLevelInformationButton")),
actionButton(ns("reset_cmsy"), "Reset", class="topLevelInformationButton"), style="margin-left: 15px;")
,
htmlOutput("cmsyWarning"),
hr(),
box( width= 100, id = "box_cmsy_results",
title = "Results of CMSY Method",
tags$style(type="text/css",
".recalculating {opacity: 1.0;}"
),
fluidRow(
box(
uiOutput(ns("downloadCmsyReportButton")),
uiOutput(ns("CmsyVREUpload"))
)
),
fluidRow(
box(
"The upper left panel shows catches relative to the estimate of MSY, with indication of 95% confidence limits in grey. The upper right panel shows the development of relative total biomass (B/Bmsy), with the grey area indicating uncertainty. The lower left graph shows relative exploitation (F/Fmsy), with Fmsy corrected for reduced recruitment below 0.5 Bmsy. The lower-right panel shows the trajectory of relative stock size (B/Bmsy) over relative exploitation (F/Fmsy).",
htmlOutput(ns("renderCmsyLog")),
htmlOutput(ns("renderCmsyInfo"))
),
box(id = "box_cmsy_results_charts",
htmlOutput(ns("titleCmsyManagementChart")),
"Panel A shows in black the time series of catches and in blue the three-years moving average with indication of highest and lowest catch, as used in the estimation of prior biomass by the default rules. Panel B shows the explored r-k log space and in dark grey the r-k pairs which were found by the CMSY model to be compatible with the catches and the prior information. Panel C shows the most probable r-k pair and its approximate 95% confidence limits in blue. Panel D shows in blue the biomass trajectory estimated by CMSY. Dotted lines indicate the 2.5th and 97.5th percentiles. Vertical blue lines indicate the prior biomass ranges. Panel E shows in blue the harvest rate from CMSY. Panel F shows the Schaefer equilibrium curve of catch/MSY relative to B/k, here indented at B/k < 0.25 to account for reduced recruitment at low stock sizes. The blue dots are scaled by CMSY estimates.",
imageOutput(ns("renderCmsyManagementChart")),
htmlOutput(ns("titleCmsyAnalisysChart")),
imageOutput(ns("renderCmsyAnalysisChart"))
)
)
)
)
)
}
resetCmsyInputValues <- function() {
shinyjs::reset("fileCmsy")
shinyjs::reset("minOfYear")
shinyjs::reset("maxOfYear")
shinyjs::reset("resiliance")
shinyjs::reset("r.low")
shinyjs::reset("r.hi")
#shinyjs::reset("stb.low")
#shinyjs::reset("stb.hi")
shinyjs::reset("stb")
shinyjs::reset("int.yr")
shinyjs::reset("intb.low")
shinyjs::reset("intb.hi")
#shinyjs::reset("endb.low")
#shinyjs::reset("endb.hi")
shinyjs::reset("endb")
shinyjs::reset("q.start")
shinyjs::reset("q.end")
shinyjs::reset("startYear")
shinyjs::reset("endYear")
shinyjs::reset("blim")
shinyjs::reset("bpa")
shinyjs::reset("bmsy")
shinyjs::reset("b40")
shinyjs::reset("fmsy")
shinyjs::reset("flim")
shinyjs::reset("fpa")
shinyjs::reset("fofl")
shinyjs::reset("last_f")
shinyjs::reset("msy")
shinyjs::reset("msyBTrigger")
shinyjs::reset("m")
shinyjs::reset("force.cmsy")
#careful removeUI conflict with event
removeUI(selector="#stockSelectorContainerInner")
shinyjs::disable("go_cmsy")
clearResults("box_cmsy_results")
}
|
64993b8904ded62d9771e4021287f4e603bcc899
|
1413941e63cfa561c00618601f8ef4404171d7c7
|
/R/posterior_logspline.R
|
32988b7bb2ab848baf5a734f6c9db333b71121a0
|
[] |
no_license
|
cran/metaBMA
|
8c4699d0ae85e32bf7e5069b463de2fa14a240f9
|
75e3abeb55b2d8dc1ada46c7b918b3d899cad732
|
refs/heads/master
| 2021-07-12T20:47:18.831443
| 2021-03-17T05:50:02
| 2021-03-17T05:50:02
| 98,450,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 872
|
r
|
posterior_logspline.R
|
posterior_logspline <- function(stanfit, parameter, prior) {
if (missing(stanfit) || is.null(stanfit)) {
warning(
"MCMC/Stan samples missing: To approximate the posterior density",
"\n by MCMC samples, one of the available priors must be used (see ?prior)",
"\n and the argument 'sample' must be larger than zero!"
)
}
if (class(stanfit) == "stanfit") {
ss <- extract(stanfit, parameter)[[parameter]]
} else {
ss <- stanfit
}
bnd <- bounds_prior(prior)
mini <- max(-Inf, bnd[1])
maxi <- min(Inf, bnd[2])
args <- list(
"x" = ss,
"knots" = quantile(ss, probs = c(.20, .50, .80)),
"maxknots" = 5
)
if (mini != -Inf) args$lbound <- mini
if (maxi != Inf) args$ubound <- maxi
lspline <- do.call("logspline", args)
dens <- function(x) dlogspline(x, lspline)
dens
}
|
2a9a7ce62190213caa6d60b6e930be2496e2688e
|
442f9770e53101c4461c9031dfd69d3dfa69a757
|
/R/labelPoints.R
|
b22df590c0d08989a3228a1a786fa3262f2bd4f5
|
[] |
no_license
|
cran/WGCNA
|
edaf87638c6cf0c9105dbb67637ebe059f598cb1
|
31f538c2f9d7d48f35f7098b4effe17331357d0d
|
refs/heads/master
| 2023-01-25T02:34:33.041279
| 2023-01-18T11:10:05
| 2023-01-18T11:10:05
| 17,694,095
| 48
| 54
| null | 2019-08-17T13:25:00
| 2014-03-13T03:47:40
|
R
|
UTF-8
|
R
| false
| false
| 5,617
|
r
|
labelPoints.R
|
#=================================================================================================
#
# labelPoints: label points in a scatterplot while trying to avoid labels overlapping with one another
# and with points.
#
#=================================================================================================
labelPoints = function(x, y, labels, cex = 0.7, offs = 0.01, xpd = TRUE, jiggle = 0,
protectEdges = TRUE,
doPlot = TRUE, ...)
{
nPts = length(labels);
box = par("usr");
dims = par("pin");
scaleX = dims[1]/(box[2] - box[1]);
scaleY = dims[2]/(box[4] - box[3]);
#ish = charmatch(shape, .shapes);
#if (is.na(ish))
# stop(paste("Unrecognized 'shape'. Recognized values are", paste(.shapes, collapse = ", ")));
if (par("xlog"))
{
xx = log10(x);
} else
xx = x;
if (par("ylog"))
{
yy = log10(y);
} else
yy = y;
xx = xx * scaleX;
yy = yy * scaleY;
if (jiggle > 0)
{
rangeX = max(xx, na.rm = TRUE) - min(xx, na.rm = TRUE)
jx = xx + jiggle * rangeX * (runif(nPts) - 0.5);
rangeY = max(yy, na.rm = TRUE) - min(yy, na.rm = TRUE)
jy = yy + jiggle * rangeY * (runif(nPts) - 0.5);
} else {
jx = xx;
jy = yy;
}
dx = offs;
dy = offs;
labWidth = strwidth(labels, cex=cex) * scaleX;
labHeight = strheight(labels, cex=cex) * scaleY;
if (nPts==0) return(0);
if (nPts==1)
{
if (protectEdges)
{
shift = ifelse(x - labWidth/2/scaleX < box[1], box[1] - x + labWidth/2/scaleX,
ifelse(x + labWidth/2/scaleX > box[2], box[2] - x - labWidth/2/scaleX, 0));
x = x + shift;
# Also check the top and bottom edges
yShift = if (y + labHeight/scaleY + offs/scaleY > box[4]) -(labHeight + 2*offs)/scaleY else 0;
y = y + yShift
}
text(x, y + labHeight/2/scaleY + offs/scaleY, labels, cex = cex, xpd = xpd, adj = c(0.5, 0.5), ...)
return (0);
}
xMat = cbind(xx,yy);
jxMat = cbind(jx, jy);
distX = as.matrix(dist(jx));
distY = as.matrix(dist(jy));
dir = matrix(0, nPts, 2);
d0SqX = (labWidth+2*offs)^2
d0SqY = (labHeight + 2*offs)^2;
for (p in 1:nPts)
{
difs = matrix(jxMat[p, ], nPts, 2, byrow = TRUE) - jxMat;
difSc = difs / sqrt(matrix(apply(difs^2, 1, sum, na.rm = TRUE), nPts, 2));
difSx = rbind(difSc, c(0,1));
difSx[p, ] = 0;
w = c(exp(-distX[,p]^4 / d0SqX[p]^2 - distY[,p]^4/d0SqY^2));
w[distX[, p]==0 & distY[,p]==0] = 0;
w = c(w, 0.01);
dir[p, ] = apply(difSx * matrix(w, (nPts+1), 2), 2, sum, na.rm = TRUE) / sum(w, na.rm = TRUE)
if (sum(abs(dir[p, ]))==0) dir[p, ] = runif(2);
}
scDir = dir / sqrt(matrix(apply(dir^2, 1, sum, na.rm = TRUE), nPts, 2));
offsMat = cbind(labWidth/2 + offs, labHeight/2 + offs)
Rmat = abs(scDir / offsMat);
ind = Rmat[, 1] > Rmat[, 2]; # This is an indicator of whether the labels touch the vertical (TRUE ) or
# horizontal (FALSE) edge of the square around the point
# These are preliminary text coordinates relative to their points.
dx = offsMat[, 1] * sign(scDir[, 1])
dx[!ind] = scDir[!ind, 1] * offsMat[!ind, 2]/abs(scDir[!ind,2]);
dy = offsMat[, 2] * sign(scDir[, 2]);
dy[ind] = scDir[ind, 2] * offsMat[ind, 1]/abs(scDir[ind,1]);
# Absolute coordinates
xt = (xx + dx)/scaleX;
yt = (yy + dy)/scaleY;
# Check if any of the points overlap with a label (of a different point)
pointMaxx = matrix(xx + offs, nPts, nPts);
pointMinx = matrix(xx - offs, nPts, nPts);
pointMiny = matrix(yy - offs, nPts, nPts);
pointMaxy = matrix(yy + offs, nPts, nPts);
labelMinx = matrix(xt - labWidth/2, nPts, nPts, byrow = TRUE);
labelMaxx = matrix(xt + labWidth/2, nPts, nPts, byrow = TRUE);
labelMiny = matrix(yt - labHeight/2, nPts, nPts, byrow = TRUE);
labelMaxy = matrix(yt + labHeight/2, nPts, nPts, byrow = TRUE);
overlapF = function(x1min, x1max, x2min, x2max)
{
overlap = matrix(0, nPts, nPts);
overlap[ x1max > x2min & x1max < x2max & x1min < x2min ] = 1;
overlap[ x1max > x2min & x1max < x2max & x1min > x2min ] = 2;
overlap[ x1max > x2max & x1min > x2min ] = 3;
overlap;
}
overlapX = overlapF(pointMinx, pointMaxx, labelMinx, labelMaxx);
overlapY = overlapF(pointMiny, pointMaxy, labelMiny, labelMaxy);
indOvr = overlapX > 0 & overlapY >0;
overlap = matrix(0, nPts, nPts);
overlap[indOvr] = (overlapY[indOvr] - 1) * 3 + overlapX[indOvr];
# For now try to fix cases of a single overlap.
nOvrPerLabel = apply(overlap>0, 1, sum);
#for (p in 1:nPts) if (nOverPerLabel[p]==1)
#{
# Check if any of the labels extend past the left or right edge of the plot
if (protectEdges)
{
shift = ifelse(xt - labWidth/2/scaleX < box[1], box[1] - xt + labWidth/2/scaleX,
ifelse(xt + labWidth/2/scaleX > box[2], box[2] - xt - labWidth/2/scaleX, 0));
xt = xt + shift;
# Also check the top and bottom edges
# Do labels overlap with points along the x coordinate?
xOverlap = abs(xt-x) < (labWidth/2 + offs)/scaleX;
yShift = ifelse(yt - labHeight/2/scaleY < box[3],
ifelse(xOverlap, (labHeight + 2*offs)/scaleY, box[3] - yt + labHeight/2/scaleY),
ifelse(yt + labHeight/2/scaleY > box[4], -(labHeight + 2*offs)/scaleY, 0));
yt = yt + yShift
}
if (par("xlog")) xt = 10^xt;
if (par("ylog")) yt = 10^yt;
if (doPlot)
text(xt, yt, labels, cex = cex, xpd = xpd, adj = c(0.5, 0.5), ...)
invisible(data.frame(x = xt, y= yt, label = labels));
}
|
ad6e5318e54b01d9f65a6387e1fb840603cbe6f8
|
e756bcd2b578b74c238dbca53ef88e2d026bd121
|
/man/h_substitute.Rd
|
870061c29199cb409cbbc6104ddecdebd49b3e47
|
[] |
no_license
|
HYDFKI7/htsr
|
d95abbdbafde547b726733524176bd08e55ccdee
|
d56e8ea98c99f0158b0465d279ae805728ec5adb
|
refs/heads/master
| 2023-02-02T23:56:09.548157
| 2020-12-16T07:50:02
| 2020-12-16T07:50:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 680
|
rd
|
h_substitute.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h_substitute.R
\name{h_substitute}
\alias{h_substitute}
\title{Subtitute the missing values in a series by existing values of another series}
\usage{
h_substitute(files)
}
\arguments{
\item{files}{List of two file names}
}
\description{
The series to proceed (first in file list) contents missing values or gaps
to be replaced by those of the second series (second in file list).
The function only works on the common dates of both series.
}
\details{
The output file is named with a sb_ prefix.
}
\examples{
\dontrun{
f <- h_substitute(c(f1, f2))
}
}
\author{
P. Chevallier - Feb 2017 - Mar 2020
}
|
47d54598df30f08e662551fde0d7a126b6e10236
|
96f1959559ed420202158d9dc56d6af510c6879c
|
/tests/testthat/test_sstock.R
|
8fbbdb66d3b6e5b44949515b7b90e421570be671
|
[] |
no_license
|
AnthonyTedde/StockPriceSimulator
|
998044b2974ce3ee2898a4ae4a3caae52bea6e37
|
8f704f59bf0fe456da790808f489dff2af56058f
|
refs/heads/master
| 2018-08-30T07:07:53.447807
| 2018-08-25T15:31:54
| 2018-08-25T15:31:54
| 111,553,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,045
|
r
|
test_sstock.R
|
library(testthat)
library(StockPriceSimulator)
test_that("sstock return a dataframe", {
expect_is(sstock(), class(data.frame()))
})
test_that("sstock names are correct", {
expect_equal(names(sstock()), c('time_periods', 'stock_price_path'))
})
# Test the class of an s3 object
test_that("sstock return a S3 object", {
expect_s3_class(sstock(), 'theoretical_stock_price')
})
# The following test emerge from the theory of Stochatic Calculus for finance ii
# page 107, formula 3.4.16:
#
# 1 m-1 S(t_j+1) 2 2
# ---------- * SUM (log ------------) = sigma
# T2 - T1 j=0 S(t_j)
#
test_that("Equality between log return formula and volatility (formula 3.4.16)", {
si = 9
S <- sstock(time_to_maturity = 4,
scale = 1000,
sigma = si)
T2 <- 4
T1 <- 0
si_squared <- sum(sapply(1:(nrow(S) - 1),
function(j)
log(S[j + 1, 2] / S[j, 2]) ^ 2)) * (T2 - T1)^-1
expect_equal(si ^ 2, si_squared, tolerance = 1e-1)
})
|
546e42cbe9216fb41e2c8704210a1706ddffbab2
|
1211c97e396c39cd4ceaadb44687451f3f93ef85
|
/set_column.R
|
d7aa71b6a16ac26518565a10b7b6a1c860805c08
|
[] |
no_license
|
gary917/scripts
|
81cbbdc61d548f5c19736a4ccb91053846727151
|
9abee2c4ae5d4bc90b37fbbc41ebdccf7e30c2fc
|
refs/heads/master
| 2020-03-26T23:54:40.193488
| 2018-08-21T14:40:25
| 2018-08-21T14:40:25
| 145,577,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,631
|
r
|
set_column.R
|
library(anytime)
rm(list=ls())
#Given timeframe and date, calculate number of rows of data in a CSV file
#add file headers as well
FILE_PATH_CSV <- "/Users/garychen/Desktop/ELEC4120/ssh_folder/csv/08/BARKER_ENTER_2018-08-10.CSV"
#ACTUAL_PLATE replaced field PLATE_COLOR, camera gives no info about that field
file_header <- c("EVENT_DESC","DATE","TIME","PLATE","PLATE_HASH","PLATE_NOT_READ","PLATE_STRING","PLATE_COUNTRY",
"PLATE_PROVINCE","PLATE_COUNTRY_CODE","PLATE_REGION","OCRSCORE","OCRSCORE_CHAR","CHAR_HEIGHT",
"CHAR_WIDTH","NREAD","SHUTTER","GAIN","STROBO","AI_LEVEL","SPEED","CLASS","CLASS_STRING","VEHICLE_TYPE",
"DIRECTION","POS","DEVICE_SN","PLATE_COLOR_STRING","ACTUAL_LICENSE","DIAG_STATUS","DIAG_MASK","DIAG_STRING",
"ACQUISITION_MODE","PLATE_MIN_X","PLATE_MIN_Y","PLATE_MAX_X","PLATE_MAX_Y","ORIG_PLATE_MIN_X","ORIG_PLATE_MIN_Y",
"ORIG_PLATE_MAX_X","ORIG_PLATE_MAX_Y","TRANSIT_ID","TRIGGER_COUNT","PLATE_DESC_A","PLATE_DESC_B","VEHICLE_TYPE_NUM",
"QUALIF_0","QUALIF_1","QUALIF_0_DESC","QUALIF_1_DESC","OCCUPANCY_TIME","GAP_TIME","GAIN_RED","GAIN_BLUE","PLATE_STD","PLATE_TRL","PLATE_ADR","GRAB_MODE")
df <- read.csv(FILE_PATH_CSV, header = F, sep=";")
colnames(df) <- file_header #adds the header name to the df
#apply filter to calculate how many cars left
relevant_col <- df[strptime(df$TIME,"%H-%M-%S") > "2018-08-13 16:56:12 AEST",]
relevant_col <- relevant_col[strptime(relevant_col$TIME,"%H-%M-%S") < "2018-08-13 17:30:59 AEST", ]
print(nrow(relevant_col))
#enter 2018-08-10, first 50 image
|
12302e2f42b3527416775b697486be3eaa70c1e4
|
2c1c5e012e540409e34a999374e0ee893a6d6a71
|
/code/Convert_index.R
|
0e0f540615fb71033cc99346222ac45a9e78f23c
|
[] |
no_license
|
muschellij2/muschellij2.github.io
|
153051ccbae07ea8a0cd6da50ac5397d362e4ca9
|
950d841ced29857fa17fd698d922286db2d7f10d
|
refs/heads/master
| 2021-01-10T06:55:46.670314
| 2020-06-15T21:33:37
| 2020-06-15T21:33:37
| 49,293,062
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,214
|
r
|
Convert_index.R
|
# rm(list=ls())
convert_index <- function(infile="index_original.html", outfile = "index.html"){
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
ff <- file(infile)
dat <- readLines(ff)
close(ff)
where.scene <- which(dat == " r0 = new X.renderer('r0');")
dat[where.scene] <- " renderer0 = new X.renderer3D();"
dat[where.scene+1] <- " renderer0.container = 'r0';"
dat[where.scene+2] <- " renderer0.init();"
dat <- sub(x=dat, pattern="http://get.goXTK.com/xtk.js", replacement = "http://get.goXTK.com/xtk_edge.js", fixed=TRUE)
dat <- gsub(x=dat, pattern=" r0.", replacement = " renderer0.", fixed=TRUE)
### () denotes what you want to match
dat <- gsub(x=dat, pattern="\\.setColor\\((.*)\\)", replacement = "\\.color = \\[\\1\\]")
dat <- gsub(x=dat, pattern="\\.setUp\\((.*)\\)", replacement = "\\.up = \\[\\1\\]")
dat <- gsub(x=dat, pattern="\\.setPosition\\((.*)\\)", replacement = "\\.position = \\[\\1\\]")
dat <- gsub(x=dat, pattern="\\.setOpacity\\((.*)\\)", replacement = "\\.opacity = \\1")
dat <- gsub(x=dat, pattern="\\.load\\((.*)\\)", replacement = "\\.file = \\1")
dat <- gsub(x=dat, pattern="\\.setVisible\\((.*)\\)", replacement = "\\.visible = \\1")
dat <- gsub(x=dat, pattern="\\.setCaption\\((.*)\\)", replacement = "\\.caption = \\1")
### making models meshes
dat <- gsub(x=dat, pattern="vtkMRMLModelNode(.*) = new X\\.object\\(\\);", replacement = "vtkMRMLModelNode\\1 = new X\\.mesh\\(\\);")
dat <- gsub(x=dat, pattern=".children().push", replacement = ".children.push", fixed=TRUE)
dat <- gsub(x=dat, pattern=".camera()", replacement = ".camera", fixed=TRUE)
# parents <- dat[grepl(x=dat, pattern=".children.push", fixed=TRUE)]
# parents <- unique(trim(gsub(x=parents, pattern=".(.*)\\.children\\.push.*", replacement = "\\1", fixed=FALSE)))
# parents <- parents[parents != "scene"]
# for(iparent in 1:length(parents)){
# dat <- gsub(x=dat, pattern=paste(parents[iparent], ".children.push", sep=""), replacement = "scene.children.push", fixed=TRUE)
# }
#children.push(vtkMRMLModelNode
#outfile <- file.path(moddir, "index.html")
ff <- file(outfile)
writeLines(dat, con=ff)
close(ff)
}
|
9ac6d1c2700abd2bc17f73719764740ed596e724
|
3fc12685acd8034eea0a08d946f49efb746fcf88
|
/man/get_samples.Rd
|
7cb132def9f4fba56911765fe756fbf66f662279
|
[
"BSD-3-Clause"
] |
permissive
|
surbut/mashr
|
16eb64d685085409aeba5785a65d0384d6843ea6
|
b66d2af16503bc46d785ac9c9ba447ecc29b6fae
|
refs/heads/master
| 2020-04-08T03:15:53.456268
| 2018-11-24T20:04:50
| 2018-11-24T20:04:50
| 158,968,895
| 0
| 0
|
BSD-3-Clause
| 2018-11-24T19:54:17
| 2018-11-24T19:54:17
| null |
UTF-8
|
R
| false
| true
| 288
|
rd
|
get_samples.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_functions.R
\name{get_samples}
\alias{get_samples}
\title{Return samples from a mash object}
\usage{
get_samples(m)
}
\arguments{
\item{m}{The mash fit.}
}
\description{
Return samples from a mash object
}
|
63cd23a3fd7403eab0f5c5ffad1e4ff372ee1598
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qat/examples/qat_save_boot_distribution_1d.Rd.R
|
61328f1eeb63621b1e3968d3680d6afd7b27b2e9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 366
|
r
|
qat_save_boot_distribution_1d.Rd.R
|
library(qat)
### Name: qat_save_boot_distribution_1d
### Title: Produce a savelist from a resultlist for a Boot Distribution
### Test
### Aliases: qat_save_boot_distribution_1d
### Keywords: utilities
### ** Examples
vec <- rnorm(1000)
result <- list(result=qat_analyse_boot_distribution_1d(vec, 1000))
savelist <- qat_save_boot_distribution_1d(result)
|
94a50ac13f422614e349dba36f741f638835b7cb
|
fecaed3dea0f640b38c43ea515a5fd615d127164
|
/OptimalControl/hamilSymTest4/hamilSym-f9f8ebef2d8bdfb201c0658dcec51ea75c2a0905/greedyApproach.R
|
a42eb6cb87b0425d84b45c4a6f250c5d003ee605
|
[] |
no_license
|
Dominik12345/R
|
57f272b1afb554090e4311a509c964f1bcc9b828
|
4843d80e9b3c4771df5dbefe32670a44773d7084
|
refs/heads/master
| 2018-09-03T08:35:56.354468
| 2018-06-04T05:32:39
| 2018-06-04T05:32:39
| 109,264,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,220
|
r
|
greedyApproach.R
|
greedyApproach <- function(alphaStep,Beta,alpha1, alpha2, x0, optW, times, measFunc, measData, std,
parameters, modelFunc, greedyBoolean) {
graphics.off()
source("classOdeEquation.R")
source('symbolicDiff.R')
source("createFunctions.R")
source("dynElasticNet.R")
#create the needed files
odeEq <- odeEq()
odeEq <- createModelEqClass(odeEq,modelFunc)
odeEq <- setMeassureFunc(odeEq,measFunc)
odeEq <- isDynElaNet(odeEq)
odeEq <- calculateCostate(odeEq)
createFunctions(odeEq)
#' optW if knots are specific left out of optimisation, get the maximal estimated inputs
iter <- (sum(optW))
#' cross validation alpha
#' tries to calculate the best value for alpha2 based on the best fit, without setting the value to low
crossRes <- list()
if(is.null(alpha2)) {
steps <- 5
error <- matrix(rep(0,2),ncol=2)
colnames(error) <- c('alpha','MSE')
for (i in 1:steps) {
alpha1 = 0
alpha2 = 1*10^(1-i)
print('')
print(paste0('Alpha 1=',alpha1,' Alpha2=',alpha2))
crossRes[[i]] <- dynElasticNet(alphaStep = alphaStep,armijoBeta = Beta,x0 = x0, optW = optW,
times=times, measFunc= testMessure, measData = y, STD = std,
alpha1 = alpha1, alpha2 = alpha2,
parameters = parameters, modelFunc = testModel, odeObj = odeEq,maxIteration=100)
if (i==1){
error[i,] = c(alpha2,mean(crossRes[[i]]$rmse))
} else {
error = rbind(error,c(alpha2,mean(crossRes[[i]]$rmse)))
}
print(error)
}
alpha2 = 1*10^(1-which.min(error[,2]))
results <- crossRes[[which.min(error[,2])]] #use the estimated results of the crosssvalidation for saving time
} else {
#' Get initial values for the aucs
#' start first esitmation
results <- dynElasticNet(alphaStep = alphaStep,armijoBeta = Beta, x0 = x0, optW = optW,
times=times, measFunc= testMessure, measData = y, STD = std,
alpha1 = alpha1, alpha2 = alpha2,
parameters = parameters, modelFunc = testModel, odeObj = odeEq)
}
if (!greedyBoolean) {
return(results)
}
else {
orgOptW <- optW <- results$optW
orgAUC <- results$AUC
barplot(orgAUC)
print(results$rmse)
optWs <- list()
resAlg <- list()
costError <- cbind(rep(0,length(optW)),rep(0,length(optW)))
colnames(costError) <- c('sum(MSE)','cost')
alphaStep = alphaStep*10
for(i in 1:iter) {
print('-----------------------------------------')
print('selection done: starting new optimization')
print('selected inputs:')
print(which(optW > 0))
optWs[[i]] <- optW
resAlg[[i]] <- dynElasticNet(alphaStep = alphaStep,armijoBeta = Beta, alpha1 = alpha1, alpha2 = alpha2,x0 = x0, optW = optW,
times=times, measFunc= testMessure, measData = y, STD = std,
parameters = parameters, modelFunc = testModel, odeObj = odeEq, origAUC = orgAUC)
print(resAlg[[i]]$optW)
costError[i,] = c(sum(resAlg[[i]]$rmse),resAlg[[i]]$J)
if(i > 1 && ( costError[i,1] > costError[i-1,1]) ) {
print('hidden inputs on knots:')
print(which(optWs[[i-1]] %in% 1))
break
}
optW <- resAlg[[i]]$optW
}
minY = min(min(measData[,-1]),min(resAlg[[i-1]]$y[,-1]))
maxY = max(max(measData[,-1]),max(resAlg[[i-1]]$y[,-1]))
par(mfrow=c(2,2))
barplot(resAlg[[i-1]]$AUC)
matplot(x = measData[,1], y = measData[,-1], type = 'l', col = 4, xlab = '', ylab = '', ylim = c(minY,maxY))
par(new=TRUE)
matplot(x = resAlg[[i-1]]$y[,1], y = resAlg[[i-1]]$y[,-1], type = 'l', col = 2, xlab = 't', ylab = 'y', ylim = c(minY,maxY))
matplot(x = resAlg[[i-1]]$y[,1], y = resAlg[[i-1]]$w, type = 'l', col = 2, xlab = 't', ylab = 'w')
plot(x= 1:length(unlist(resAlg[[i-1]]$totalJ)),y= unlist(resAlg[[i-1]]$totalJ), type = 'l', xlab = 'Iteration', ylab = 'J[w]')
return(resAlg[[i-1]])
}
}
|
67a99e90e08a492024dcb1c4156d9c08041046ad
|
7505f15b5e579e8d38b84e45ce55fb96f33b8ce2
|
/man/confidence-Rules.Rd
|
130bc06f8caced651feca78e3bafa0e1bc9f9679
|
[] |
no_license
|
abuchmueller/Rpriori
|
9eaf823234760a7353cbf8bca770da128ac4763c
|
209b23c4596d641d86513381c2ea2c61b4b10880
|
refs/heads/master
| 2020-03-27T20:59:08.084554
| 2019-05-03T21:51:28
| 2019-05-03T21:51:28
| 147,108,083
| 1
| 0
| null | 2018-09-02T17:44:29
| 2018-09-02T17:44:29
| null |
UTF-8
|
R
| false
| true
| 481
|
rd
|
confidence-Rules.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allMethods.R
\docType{methods}
\name{confidence-Rules}
\alias{confidence-Rules}
\alias{confidence,Rules-method}
\title{Extract confidence of all rules within a Rules object.}
\usage{
\S4method{confidence}{Rules}(object)
}
\arguments{
\item{object}{Object of class Rules}
}
\value{
Vector of confidence values from all Rules in x.
}
\description{
Extract confidence of all rules within a Rules object.
}
|
5415a241ade996f638e506fc3762afe7eb1b27c7
|
e9fc5f1e721ae9f96fb4a9d12a452a4c8d4f35f9
|
/man/boundary2alpha.Rd
|
f83f4112d6d2ecfbc4c53cf1cc351921fe29f7c3
|
[] |
no_license
|
cran/triggerstrategy
|
8504f4b0c3a5cffa16025d290091fefc61f7edef
|
6d15e9f81567f76ecadcfc4db044238f2b408412
|
refs/heads/master
| 2023-07-25T04:19:55.456694
| 2023-07-04T13:40:02
| 2023-07-04T13:40:02
| 372,566,911
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 657
|
rd
|
boundary2alpha.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rfun_boundary2alpha.R
\name{boundary2alpha}
\alias{boundary2alpha}
\title{Convert normal critical boundaries to cumulative alpha levels.}
\usage{
boundary2alpha(cvec, t)
}
\arguments{
\item{cvec}{a vector of critical boundaries}
\item{t}{a vector of information times}
}
\value{
alphas, a vector of cumulative errors
}
\description{
This function converts normal critical boundaries to cumulative alpha levels.
}
\examples{
t <- c(0.5,0.8,1)
iuse <- 4
result <- gbounds(t=t, iuse=iuse)
print(result)
boundary2alpha(cvec=result$bd, t=t)
}
\author{
Jiangtao Gou
Fengqing Zhang
}
|
6770a8724ea2faa7f7912ccc806a9527e7a97e62
|
0e28ca7ffd8665ccdb471c15f0978696fab43a88
|
/R/check_element2.R
|
13f40c1a94d6c7edd5abb968a7647f2bbe2673e0
|
[] |
no_license
|
jaspershen/MSannotator
|
fe5ee2ee56b7961b8bb3db2006068960a5c77de6
|
493e4a90c1eb34f9a49bbc7ca951e8236776f957
|
refs/heads/master
| 2021-01-12T04:08:10.470681
| 2016-12-28T06:19:39
| 2016-12-28T06:19:39
| 77,508,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 526
|
r
|
check_element2.R
|
check_element2 <-
function(curformula,elementname){
curformula<-as.character(curformula)
strsplit_var<-strsplit(curformula,split="")
g3<-gregexpr(curformula,pattern=paste(elementname,"[0-9]*",sep=""))
regexp_len3<-attr(g3[[1]],"match.length")
if(regexp_len3>0){
if(regexp_len3==1){
numelement<-1
}else{
numelement<-paste(strsplit_var[[1]][(g3[[1]][1]+1):(g3[[1]][1]+regexp_len3-1)],collapse="")
numelement<-as.numeric(numelement)
}
}else{
numelement<-0
}
return(numelement)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.