blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a86eaa02de22fd7910bacb609af40fb05aae82dc
|
f84a075495fd8ca5e799b82e5b40f1896fa4e901
|
/extra/encoderExample.r
|
774a9d344690ad29b0e8471375005ceb54aedd03
|
[] |
no_license
|
ABMI/RadiologyFeatureExtraction
|
d388212277af4609cad6ce6e4c7d08cae49163a7
|
898069202864331e84876395817c8818d7324023
|
refs/heads/master
| 2020-04-17T16:22:33.975421
| 2019-01-25T05:38:36
| 2019-01-25T05:38:36
| 166,737,629
| 2
| 1
| null | 2019-01-25T05:38:37
| 2019-01-21T02:44:02
|
R
|
UTF-8
|
R
| false
| false
| 24,351
|
r
|
encoderExample.r
|
####VAE####
library(dplyr)
library(keras)
buildRadiologyVae <- function(dataPaths = imagePaths, vaeValidationSplit = 0.2, vaeBatchSize = 200L,
vaeLatentDim = 1000L, vaeIntermediateDim = 10000L,
vaeEpoch = 500L, vaeEpislonStd = 1.0, dimensionOfTarget = 2,
dataFolder,
originalDimension = c(128, 128),
ROI2D = list(c(10:119), c(10:119)),
MaxLimitUnit = 1500,
samplingGenerator = FALSE) {
if(dimensionOfTarget != 2)
stop("Currently only dimesion of Target = 2 is avaiablbe")
originalDim <- length(ROI2D[[1]]) * length(ROI2D[[2]])
K <- keras::backend()
x <- keras::layer_input(shape = originalDim)
h <- keras::layer_dense(x, vaeIntermediateDim, activation = "relu")
z_mean <- keras::layer_dense(h, vaeLatentDim)
z_log_var <- keras::layer_dense(h, vaeLatentDim)
sampling <- function(arg) {
z_mean <- arg[, 1:vaeLatentDim]
z_log_var <- arg[, (vaeLatentDim + 1):(2 * vaeLatentDim)]
epsilon <- keras::k_random_normal(
shape = c(keras::k_shape(z_mean)[[1]]),
mean = 0.,
stddev = vaeEpislonStd
)
z_mean + keras::k_exp(z_log_var / 2) * epsilon
}
z <- keras::layer_concatenate(list(z_mean, z_log_var)) %>%
keras::layer_lambda(sampling)
# We instantiate these layers separately so as to reuse them later
decoder_h <- keras::layer_dense(units = vaeIntermediateDim, activation = "relu")
decoder_mean <- keras::layer_dense(units = originalDim, activation = "sigmoid")
h_decoded <- decoder_h(z)
x_decoded_mean <- decoder_mean(h_decoded)
# end-to-end autoencoder
vae <- keras::keras_model(x, x_decoded_mean)
# encoder, from inputs to latent space
encoder <- keras::keras_model(x, z_mean)
# generator, from latent space to reconstruted inputs
decoder_input <- keras::layer_input(shape = vaeLatentDim)
h_decoded_2 <- decoder_h(decoder_input)
x_decoded_mean_2 <- decoder_mean(h_decoded_2)
generator <- keras::keras_model(decoder_input, x_decoded_mean_2)
vae_loss <- function(x, x_decoded_mean) {
xent_loss <- (originalDim / 1.0) * keras::loss_binary_crossentropy(x, x_decoded_mean)
k1_loss <- -0.5 * keras::k_mean(1 + z_log_var - keras::k_square(z_mean) - keras::k_exp(z_log_var), axis = -1L)
xent_loss + k1_loss
}
# if (!is.null(dataValidation)) dataValidation <- list(dataValidation,dataValidation)
vaeEarlyStopping <- keras::callback_early_stopping(monitor = "val_loss", patience = 10, mode = "auto", min_delta = 1e-1)
vae %>% keras::compile(optimizer = "rmsprop", loss = vae_loss)
# Paths for data
actualPaths <- apply(imagePaths, 1, function(x) file.path(dataFolder, x))
if(samplingGenerator) {
# validation data
valIndex <- sample(seq(actualPaths), length(actualPaths) * vaeValidationSplit)
valImages <- lapply(as.array(actualPaths[valIndex]), function(x) {
try({
x <- oro.dicom::dicom2nifti(oro.dicom::readDICOM(x, verbose = FALSE))
x <- EBImage::resize(x, w = originalDimension[1], h = originalDimension[2])[ROI2D[[1]], ROI2D[[2]] ]
}, silent = T)
})
valImages <- array(unlist(valImages), dim = c(originalDimension, length(valImages)))
valImages <- valImages %>% apply(3, as.numeric) %>% t()
## Regularization the data with the max ##
valImages[is.na(valImages)] <- 0
valImages <- ifelse(valImages > MaxLimitUnit, MaxLimitUnit, valImages)
valImages <- valImages / MaxLimitUnit
sampling_generator <- function(dataPath, batchSize, MaxLimitUnit) {
function() {
# gc()
index <- sample(length(dataPath), batchSize, replace = FALSE)
data.mat <- as.array(dataPath[index])
images <- lapply(data.mat, function(x) {
try({
x <- oro.dicom::dicom2nifti(oro.dicom::readDICOM(x, verbose = FALSE))
x <- EBImage::resize(x, w = originalDimension[1], h = originalDimension[2]) [ROI2D[[1]], ROI2D[[2]] ]
}, silent = T)
})
images <- array(unlist(images), dim = c(ROI2D[[1]], ROI2D[[2]], length(images)))
images <- images %>% apply(3, as.numeric) %>% t() # Error
images[is.na(images)] <- 0
images <- ifelse(images > MaxLimitUnit, MaxLimitUnit, images)
images <- images / MaxLimitUnit
list(images, images)
}
}
vae %>% keras::fit_generator(
sampling_generator(actualPaths[-valIndex], vaeBatchSize, MaxLimitUnit),
steps_per_epoch = length(actualPaths[-valIndex]) / vaeBatchSize
, epochs = vaeEpoch
, validation_data = list(valImages, valImages)
, callbacks = list(vaeEarlyStopping)
)
} else {
data.mat <- as.array(actualPaths)
images <- lapply(data.mat, function(x) {
try({
x <- oro.dicom::dicom2nifti(oro.dicom::readDICOM(x, verbose = FALSE))
x <- EBImage::resize(x, w = originalDimension[1], h = originalDimension[2]) [ROI2D[[1]], ROI2D[[2]] ]
}, silent = T)
})
images <- array(unlist(images), dim = c(length(ROI2D[[1]]), length(ROI2D[[2]]), length(images)))
images <- images %>% apply(3, as.numeric) %>% t() # error seems vector size upper 2.0 GB
# saveRDS(images,file.path(dataFolder,"image64.rds"))
# images<-readRDS(file.path(dataFolder,"image.rds"))
images[is.na(images)] <- 0
images <- ifelse(images > MaxLimitUnit, MaxLimitUnit, images)
images <- images / MaxLimitUnit
vae %>% keras::fit(
images, images,
shuffle = TRUE,
epochs = vaeEpoch,
batch_size = vaeBatchSize,
validation_split = vaeValidationSplit,
callbacks = list(vaeEarlyStopping)
)
}
return(list(vae = vae, encoder = encoder, MaxLimitUnit = MaxLimitUnit, vaeBatchSize = vaeBatchSize, vaeLatentDim = vaeLatentDim))
}
testVae <- function(dataPaths = imagePaths, vaeBatchSize = 200L,
vaeLatentDim = 1000L, vaeIntermediateDim = 10000L,
dimensionOfTarget = 2,
dataFolder,
originalDimension = c(128, 128),
ROI2D = list(c(10:119), c(10:119)),
MaxLimitUnit = 1500,
samplingN = 10,
vae) {
if(dimensionOfTarget != 2)
stop("Currently only dimesion of Target = 2 is avaiablbe")
# Original dimenstion calculator
originalDim <- length(ROI2D[[1]]) * length(ROI2D[[2]])
# Paths for data
actualPaths <- apply(imagePaths, 1, function(x) file.path(dataFolder, x))
actualPaths <- actualPaths[seq(samplingN)]
# actualPaths <- actualPaths[732:832]
data.mat <- as.array(actualPaths)
images <- lapply(data.mat, function(x) {
try({
x <- oro.dicom::dicom2nifti(oro.dicom::readDICOM(x, verbose = FALSE))
x <- EBImage::resize(x, w = originalDimension[1], h = originalDimension[2]) [ROI2D[[1]], ROI2D[[2]] ]
}, silent = T)
})
images <- array(unlist(images), dim = c(length(ROI2D[[1]]), length(ROI2D[[2]]), length(images)))
images <- images %>% apply(3, as.numeric) %>% t() # error seems vector size upper 2.0 GB
images[is.na(images)] <- 0
images <- ifelse(images > MaxLimitUnit, MaxLimitUnit, images)
images <- images / MaxLimitUnit
mergeImage <- function(nifs) {
resList <- NA
for(i in 1:dim(nifs)[1]) {
nif <- oro.nifti::as.nifti(nifs[i,,])
if(is.na(resList))
resList <- nif
else
resList <- abind::abind(resList, nif, along = 3)
}
return(resList)
}
originalImages <- array(unlist(images), dim = c(samplingN, length(ROI2D[[1]]), length(ROI2D[[2]])))
# Original image view
# oro.nifti::image(oro.nifti::as.nifti(mergeImage(originalImages)))
oro.nifti::image(oro.nifti::as.nifti(originalImages[5,,]))
predicted <- predict(VAE$vae, images, batch_size = vaeBatchSize)
predictedImages <- array(unlist(predicted), dim = c(samplingN, length(ROI2D[[1]]), length(ROI2D[[2]])))
# Predicted image view
return(mergeImage(predictedImages))
}
####VQ-VAE####
#' This is the companion code to the post
#' "Discrete Representation Learning with VQ-VAE and TensorFlow Probability"
#' on the TensorFlow for R blog.
#'
#' https://blogs.rstudio.com/tensorflow/posts/2019-01-24-vq-vae/
#' https://github.com/rstudio/keras/pull/642/commits/b3ab58640702d0dd46e6f72b8056c0579bc0f9e2#diff-8f710ecd930d2604fb377340770cab03
# library(keras)
# use_implementation("tensorflow")
# library(tensorflow)
# tfe_enable_eager_execution(device_policy = "silent")
#
# use_session_with_seed(7778,
# disable_gpu = FALSE,
# disable_parallel_cpu = FALSE)
#
# tfp <- import("tensorflow_probability")
# tfd <- tfp$distributions
#
# library(tfdatasets)
# library(dplyr)
# library(glue)
# library(curry)
#
# moving_averages <- tf$python$training$moving_averages
#
#
# # Utilities --------------------------------------------------------
#
# visualize_images <-
# function(dataset,
# epoch,
# reconstructed_images,
# random_images) {
# write_png(dataset, epoch, "reconstruction", reconstructed_images)
# write_png(dataset, epoch, "random", random_images)
#
# }
#
# write_png <- function(dataset, epoch, desc, images) {
# png(paste0(dataset, "_epoch_", epoch, "_", desc, ".png"))
# par(mfcol = c(8, 8))
# par(mar = c(0.5, 0.5, 0.5, 0.5),
# xaxs = 'i',
# yaxs = 'i')
# for (i in 1:64) {
# img <- images[i, , , 1]
# img <- t(apply(img, 2, rev))
# image(
# 1:28,
# 1:28,
# img * 127.5 + 127.5,
# col = gray((0:255) / 255),
# xaxt = 'n',
# yaxt = 'n'
# )
# }
# dev.off()
#
# }
#
#
# # Setup and preprocessing -------------------------------------------------
#
# np <- import("numpy")
#
# # download from: https://github.com/rois-codh/kmnist
# kuzushiji <- np$load("kmnist-train-imgs.npz")
# kuzushiji <- kuzushiji$get("arr_0")
#
# train_images <- kuzushiji %>%
# k_expand_dims() %>%
# k_cast(dtype = "float32")
# train_images <- train_images %>% `/`(255)
#
# buffer_size <- 60000
# batch_size <- 64
# num_examples_to_generate <- batch_size
#
# batches_per_epoch <- buffer_size / batch_size
#
# train_dataset <- tensor_slices_dataset(train_images) %>%
# dataset_shuffle(buffer_size) %>%
# dataset_batch(batch_size, drop_remainder = TRUE)
#
# # test
# iter <- make_iterator_one_shot(train_dataset)
# batch <- iterator_get_next(iter)
# batch %>% dim()
#
# # Params ------------------------------------------------------------------
#
# learning_rate <- 0.001
# latent_size <- 1
# num_codes <- 64L
# code_size <- 16L
# base_depth <- 32
# activation <- "elu"
# beta <- 0.25
# decay <- 0.99
# input_shape <- c(28, 28, 1)
#
# # Models -------------------------------------------------------------------
#
# default_conv <-
# set_defaults(layer_conv_2d, list(padding = "same", activation = activation))
# default_deconv <-
# set_defaults(layer_conv_2d_transpose,
# list(padding = "same", activation = activation))
#
# # Encoder ------------------------------------------------------------------
#
# encoder_model <- function(name = NULL,
# code_size) {
#
# keras_model_custom(name = name, function(self) {
# self$conv1 <- default_conv(filters = base_depth, kernel_size = 5)
# self$conv2 <-
# default_conv(filters = base_depth,
# kernel_size = 5,
# strides = 2)
# self$conv3 <-
# default_conv(filters = 2 * base_depth, kernel_size = 5)
# self$conv4 <-
# default_conv(
# filters = 2 * base_depth,
# kernel_size = 5,
# strides = 2
# )
# self$conv5 <-
# default_conv(
# filters = 4 * latent_size,
# kernel_size = 7,
# padding = "valid"
# )
# self$flatten <- layer_flatten()
# self$dense <- layer_dense(units = latent_size * code_size)
# self$reshape <-
# layer_reshape(target_shape = c(latent_size, code_size))
#
# function (x, mask = NULL) {
# x %>%
# # output shape: 7 28 28 32
# self$conv1() %>%
# # output shape: 7 14 14 32
# self$conv2() %>%
# # output shape: 7 14 14 64
# self$conv3() %>%
# # output shape: 7 7 7 64
# self$conv4() %>%
# # output shape: 7 1 1 4
# self$conv5() %>%
# # output shape: 7 4
# self$flatten() %>%
# # output shape: 7 16
# self$dense() %>%
# # output shape: 7 1 16
# self$reshape()
# }
#
# })
# }
#
#
# # Decoder ------------------------------------------------------------------
#
# decoder_model <- function(name = NULL,
# input_size,
# output_shape) {
#
# keras_model_custom(name = name, function(self) {
# self$reshape1 <- layer_reshape(target_shape = c(1, 1, input_size))
# self$deconv1 <-
# default_deconv(
# filters = 2 * base_depth,
# kernel_size = 7,
# padding = "valid"
# )
# self$deconv2 <-
# default_deconv(filters = 2 * base_depth, kernel_size = 5)
# self$deconv3 <-
# default_deconv(
# filters = 2 * base_depth,
# kernel_size = 5,
# strides = 2
# )
# self$deconv4 <-
# default_deconv(filters = base_depth, kernel_size = 5)
# self$deconv5 <-
# default_deconv(filters = base_depth,
# kernel_size = 5,
# strides = 2)
# self$deconv6 <-
# default_deconv(filters = base_depth, kernel_size = 5)
# self$conv1 <-
# default_conv(filters = output_shape[3],
# kernel_size = 5,
# activation = "linear")
#
# function (x, mask = NULL) {
# x <- x %>%
# # output shape: 7 1 1 16
# self$reshape1() %>%
# # output shape: 7 7 7 64
# self$deconv1() %>%
# # output shape: 7 7 7 64
# self$deconv2() %>%
# # output shape: 7 14 14 64
# self$deconv3() %>%
# # output shape: 7 14 14 32
# self$deconv4() %>%
# # output shape: 7 28 28 32
# self$deconv5() %>%
# # output shape: 7 28 28 32
# self$deconv6() %>%
# # output shape: 7 28 28 1
# self$conv1()
# tfd$Independent(tfd$Bernoulli(logits = x),
# reinterpreted_batch_ndims = length(output_shape))
# }
# })
# }
#
# # Vector quantizer -------------------------------------------------------------------
#
# vector_quantizer_model <-
# function(name = NULL, num_codes, code_size) {
#
# keras_model_custom(name = name, function(self) {
# self$num_codes <- num_codes
# self$code_size <- code_size
# self$codebook <- tf$get_variable("codebook",
# shape = c(num_codes, code_size),
# dtype = tf$float32)
# self$ema_count <- tf$get_variable(
# name = "ema_count",
# shape = c(num_codes),
# initializer = tf$constant_initializer(0),
# trainable = FALSE
# )
# self$ema_means = tf$get_variable(
# name = "ema_means",
# initializer = self$codebook$initialized_value(),
# trainable = FALSE
# )
#
# function (x, mask = NULL) {
#
# # bs * 1 * num_codes
# distances <- tf$norm(tf$expand_dims(x, axis = 2L) -
# tf$reshape(self$codebook,
# c(
# 1L, 1L, self$num_codes, self$code_size
# )),
# axis = 3L)
#
# # bs * 1
# assignments <- tf$argmin(distances, axis = 2L)
#
# # bs * 1 * num_codes
# one_hot_assignments <-
# tf$one_hot(assignments, depth = self$num_codes)
#
# # bs * 1 * code_size
# nearest_codebook_entries <- tf$reduce_sum(
# tf$expand_dims(one_hot_assignments,-1L) * # bs, 1, 64, 1
# tf$reshape(self$codebook, c(
# 1L, 1L, self$num_codes, self$code_size
# )),
# axis = 2L # 1, 1, 64, 16
# )
#
# list(nearest_codebook_entries, one_hot_assignments)
# }
# })
# }
#
#
# # Update codebook ------------------------------------------------------
#
# update_ema <- function(vector_quantizer,
# one_hot_assignments,
# codes,
# decay) {
# # shape = 64
# updated_ema_count <- moving_averages$assign_moving_average(
# vector_quantizer$ema_count,
# tf$reduce_sum(one_hot_assignments, axis = c(0L, 1L)),
# decay,
# zero_debias = FALSE
# )
#
# # 64 * 16
# updated_ema_means <- moving_averages$assign_moving_average(
# vector_quantizer$ema_means,
# # selects all assigned values (masking out the others) and sums them up over the batch
# # (will be divided by count later)
# tf$reduce_sum(
# tf$expand_dims(codes, 2L) *
# tf$expand_dims(one_hot_assignments, 3L),
# axis = c(0L, 1L)
# ),
# decay,
# zero_debias = FALSE
# )
#
# # Add small value to avoid dividing by zero
# updated_ema_count <- updated_ema_count + 1e-5
# updated_ema_means <-
# updated_ema_means / tf$expand_dims(updated_ema_count, axis = -1L)
#
# tf$assign(vector_quantizer$codebook, updated_ema_means)
# }
#
#
# # Training setup -----------------------------------------------------------
#
# encoder <- encoder_model(code_size = code_size)
# decoder <- decoder_model(input_size = latent_size * code_size,
# output_shape = input_shape)
#
# vector_quantizer <-
# vector_quantizer_model(num_codes = num_codes, code_size = code_size)
#
# optimizer <- tf$train$AdamOptimizer(learning_rate = learning_rate)
#
# checkpoint_dir <- "./vq_vae_checkpoints"
#
# checkpoint_prefix <- file.path(checkpoint_dir, "ckpt")
# checkpoint <-
# tf$train$Checkpoint(
# optimizer = optimizer,
# encoder = encoder,
# decoder = decoder,
# vector_quantizer_model = vector_quantizer
# )
#
# checkpoint$save(file_prefix = checkpoint_prefix)
#
# # Training loop -----------------------------------------------------------
#
# num_epochs <- 20
#
# for (epoch in seq_len(num_epochs)) {
#
# iter <- make_iterator_one_shot(train_dataset)
#
# total_loss <- 0
# reconstruction_loss_total <- 0
# commitment_loss_total <- 0
# prior_loss_total <- 0
#
# until_out_of_range({
#
# x <- iterator_get_next(iter)
#
# with(tf$GradientTape(persistent = TRUE) %as% tape, {
#
# codes <- encoder(x)
# c(nearest_codebook_entries, one_hot_assignments) %<-% vector_quantizer(codes)
# codes_straight_through <- codes + tf$stop_gradient(nearest_codebook_entries - codes)
# decoder_distribution <- decoder(codes_straight_through)
#
# reconstruction_loss <-
# -tf$reduce_mean(decoder_distribution$log_prob(x))
#
# commitment_loss <- tf$reduce_mean(tf$square(codes - tf$stop_gradient(nearest_codebook_entries)))
#
# prior_dist <- tfd$Multinomial(total_count = 1,
# logits = tf$zeros(c(latent_size, num_codes)))
# prior_loss <- -tf$reduce_mean(tf$reduce_sum(prior_dist$log_prob(one_hot_assignments), 1L))
#
# loss <-
# reconstruction_loss + beta * commitment_loss + prior_loss
#
# })
#
# encoder_gradients <- tape$gradient(loss, encoder$variables)
# decoder_gradients <- tape$gradient(loss, decoder$variables)
#
# optimizer$apply_gradients(purrr::transpose(list(
# encoder_gradients, encoder$variables
# )),
# global_step = tf$train$get_or_create_global_step())
# optimizer$apply_gradients(purrr::transpose(list(
# decoder_gradients, decoder$variables
# )),
# global_step = tf$train$get_or_create_global_step())
#
# update_ema(vector_quantizer,
# one_hot_assignments,
# codes,
# decay)
#
# total_loss <- total_loss + loss
# reconstruction_loss_total <-
# reconstruction_loss_total + reconstruction_loss
# commitment_loss_total <- commitment_loss_total + commitment_loss
# prior_loss_total <- prior_loss_total + prior_loss
#
# })
#
# checkpoint$save(file_prefix = checkpoint_prefix)
#
# cat(
# glue(
# "Loss (epoch): {epoch}:",
# " {(as.numeric(total_loss)/trunc(buffer_size/batch_size)) %>% round(4)} loss",
# " {(as.numeric(reconstruction_loss_total)/trunc(buffer_size/batch_size)) %>% round(4)} reconstruction_loss",
# " {(as.numeric(commitment_loss_total)/trunc(buffer_size/batch_size)) %>% round(4)} commitment_loss",
# " {(as.numeric(prior_loss_total)/trunc(buffer_size/batch_size)) %>% round(4)} prior_loss",
#
# ),
# "\n"
# )
#
# # display example images (choose your frequency)
# if (TRUE) {
# reconstructed_images <- decoder_distribution$mean()
# # (64, 1, 16)
# prior_samples <- tf$reduce_sum(
# # selects one of the codes (masking out 63 of 64 codes)
# # (bs, 1, 64, 1)
# tf$expand_dims(prior_dist$sample(num_examples_to_generate),-1L) *
# # (1, 1, 64, 16)
# tf$reshape(vector_quantizer$codebook,
# c(1L, 1L, num_codes, code_size)),
# axis = 2L
# )
# decoded_distribution_given_random_prior <-
# decoder(prior_samples)
# random_images <- decoded_distribution_given_random_prior$mean()
# visualize_images("k", epoch, reconstructed_images, random_images)
# }
# }
|
fcfe10daed18a5df8736bc21c2dfa122ead649a0
|
2bcd0393bd4a98f415e3ed1353e8213d226fa6dc
|
/clustering.R
|
133e351961ce521bdacd5a5e2ef9604f2734f93c
|
[] |
no_license
|
kabalord/clustering
|
ae96e5ede932933942334232a43a1a50693a72a3
|
5e6b630277949a3b88dba108d41dc12e2545820c
|
refs/heads/master
| 2023-02-02T07:25:02.347324
| 2020-12-18T10:39:14
| 2020-12-18T10:39:14
| 319,901,731
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,734
|
r
|
clustering.R
|
#installation des packages avec une fonction pour la gestion des packages
list_packages_names <- c("fpc", "clusteval", "cluster")
for (packname in list_packages_names){
if(packname %in% rownames(installed.packages()) == FALSE){
install.packages(packname)
}
}
library(fpc)
library(clusteval)
library(cluster)
#1.1 Données
n1<-20
n2<-30
n3<-15
n4<-5
set.seed(123) # important : fixer la graine du générateur
echantillon<-data.frame(rbind(matrix(rnorm(n1*2),ncol=2),
matrix(rnorm(n2*2,mean = 4),ncol=2),
matrix(c(rnorm(n3,mean = 4),rnorm(n3,mean=-4.5)),ncol=2),
matrix(c(rnorm(n4,mean = -2),rnorm(n4,mean=-2.5)),ncol=2)))
names(echantillon)<-c("Variable1","Variable2")
#1.2 Partition
#maxence fonction
random_clust <- function (data, x){
sample(1:x, nrow(data),
replace=T)
}
res <- random_clust(echantillon,3) # pour 4 classes
print(res)
echantillon$Classe <- res
print(echantillon)
quad_clust <- function(df){
med1 <- median(df$Variable1)
med2 <- median(df$Variable2)
df[df$Variable1<med1 & df$Variable2<med2,"Classe"] <- 1
df[df$Variable1>med1 & df$Variable2>med2,"Classe"] <- 2
df[df$Variable1>med1 & df$Variable2<med2,"Classe"] <- 3
df[df$Variable1<med1 & df$Variable2>med2,"Classe"] <- 4
return(df)
}
quad_clust(echantillon)
#1.3 Visualisation des classes obtenues
plot(x = echantillon$Variable1 ,
y = echantillon$Variable2 ,
pch = 21 ,
bg = echantillon$Classe ,
xlab = "Variable 1" ,
ylab = "Variable 2")
#1.4 Description des classes obtenues
synthese<-do.call(data.frame,aggregate(echantillon,
by=list(echantillon$Classe),
FUN = function(x){
return(c(Effectif = length(x),
Moyenne = mean(x),
Mediane = median(x),
Minimum = min(x),
Maximum = max(x),
Ecart_type = sd(x)))
}))
# knitr::kable(synthese)
print(synthese)
#1.5 Évaluation comparaison
list_packages_names <- c("fpc", "clusteval")
for (packname in list_packages_names){
if(packname %in% rownames(installed.packages()) == FALSE){
install.packages(packname)
}
}
#1.5.1 Visualisation avancée
x1 <- quad_clust(echantillon)
clusplot(pam(x1, 4))
plotcluster(x1, x1$Classe)
x2 <- random_clust(echantillon,3)
clusplot(pam(x2, 3))
plotcluster(x2, x2)
#1.6 Analyse des coefficients de silhouette.
si<-silhouette(echantillon$Classe,dist(echantillon[,1:2]))
plot(si)
print(si)
#Faire de même avec les résultats de quad_clust
resqc<-silhouette(x1$Classe,dist(x1[,1:2]))
plot(resqc)
print(resqc)
#1.7 Calcul d’inertie
# Fonction de calcul d'inertie
# df : un data frame contenant le nuage de points
# p : un vecteur de pondération
inertie<-function(df,p=NULL){
# Si le vecteur de pondération n'est pas fourni
# tous les poids valebt 1
if (is.null(p)){
p <- rep(1,nrow(df))
}
# Calcul du centre de gravité : moyenne de chaque colonne, pondérée par p
g <- ( p %*% as.matrix(df) ) / sum(p)
# calcul de l'inertie
iner <- 0
for (i in seq(nrow(df))){ # pour chaque point
iner <- iner + sum((df[i,] - g)^2) * p[i] # ajouter à iner la distance au carré de ce point à g, pondérée par le poids du point
}
return(iner)
}
# Fonction de calcul de l'inertie intra-classe
# df : toujours le data frame
# cl : le vecteur des labels d'appartenance aux clusters
inertie_intra<-function(df,cl){
res<-rep(0,length(unique(cl)))
for (k in unique(cl)){
res[k] <- inertie(df[which(cl==k),])
}
return(sum(res))
}
# Fonction de clacul de l'inertie interclasse
# df : toujours le data frame
# clu : idem
inertie_inter<-function(df,clu){
M<-matrix(rep(0,ncol(df)*length(unique(clu))),ncol = ncol(df))
for (k in unique(clu)){
M[k,]<-unname(colMeans(df[which(clu==k),]))
}
return(inertie(data.frame(M),unname(table(clu))))
}
# -------
# MAIN
# ------
# Chargement des données
data(cars)
# Partitionnement des données (ici par CAH)
hca <- hclust(dist(cars))
clu <- cutree(hca,h=40)
# Affichage
print(hca)
print(clu)
# Affichage des inerties calculées
print(inertie_intra(cars,clu))
print(inertie_inter(cars,clu))
print(inertie(cars))
print(inertie_intra(echantillon,random_clust(echantillon,3)))
print(inertie_inter(echantillon,random_clust(echantillon,3)))
print(inertie2(echantillon))
#1.8 Similarité
#2 Clustering sur des données « réelles » avec les algorithmes des k-means et CAH
#2.1 Installation du package
install.packages("cluster.datasets")
#2.2 Chargement des données
library(cluster.datasets)
data("acidosis.patients")
#2.3 Première description
summary(acidosis.patients)
str(acidosis.patients)
head(acidosis.patients,n = 4)
#2.4 CAH
#Représenter les données à l’aide d’une matrice de scatterplots
pairs(acidosis.patients)
#Centrer et réduire les données
acidosis.patients.scale <- scale(acidosis.patients)
#Calculer la matrice des distances euclidiennes entre individus
acidosis.patients.dist <-dist(acidosis.patients)
#Effectuer une CAH à partir de cette matrice
acidosis.patients.hclust <- hclust(acidosis.patients.dist)
plot(acidosis.patients.hclust)
#Effectuer le partitionnement correspondant. Afficher la matrice de scatterplots en coloriant les points en fonction de leur classe d’appartenance
dendrogram <- as.dendrogram(acidosis.patients.hclust)
plot(dendrogram)
rect.hclust(acidosis.patients.hclust, k=4)
pairs(acidosis.patients.hclust[,4], col = c("red", "cornflowerblue", "purple", "yellow"))
#2.5 k-means
kmeansmet <- kmeans(acidosis.patients.scale,k=4)
plot(kmeans)
#3 Nombre optimal de classes
install.packages(c("factoextra","NbClust"))
library(factoextra)
library(NbClust)
# Elbow method
fviz_nbclust(acidosis.patients, kmeans, method = "wss") +
geom_vline(xintercept = 4, linetype = 2)+
labs(subtitle = "Elbow method")
# Silhouette method
fviz_nbclust(acidosis.patients, kmeans, method = "silhouette")+
labs(subtitle = "Silhouette method")
# Gap statistic
# nboot = 50 to keep the function speedy.
# recommended value: nboot= 500 for your analysis.
# Use verbose = FALSE to hide computing progression.
set.seed(123)
fviz_nbclust(acidosis.patients, kmeans, nstart = 25, method = "gap_stat", nboot = 50)+
labs(subtitle = "Gap statistic method")
library("NbClust")
nb <- NbClust(acidosis.patients, distance = "euclidean", min.nc = 2,
max.nc = 10, method = "kmeans")
library("factoextra")
fviz_nbclust(nb)
#tests
setwd("/Users/walterroaserrano/Desktop/M22021/apprentissageNONsupervise")
getwd()
df <- read.csv("varied.csv")
# Elbow method
fviz_nbclust(df, kmeans, method = "wss") +
geom_vline(xintercept = 4, linetype = 2)+
labs(subtitle = "Elbow method")
# Silhouette method
fviz_nbclust(df, kmeans, method = "silhouette")+
labs(subtitle = "Silhouette method")
# Gap statistic
# nboot = 50 to keep the function speedy.
# recommended value: nboot= 500 for your analysis.
# Use verbose = FALSE to hide computing progression.
set.seed(123)
fviz_nbclust(df, kmeans, nstart = 25, method = "gap_stat", nboot = 50)+
labs(subtitle = "Gap statistic method")
# 7 Aides, indications et compléments
# 7.1 Analyses descriptives et multivariées
install.packages("ade4")
library(ade4)
varied <- read.csv(file = 'varied.csv',row.names=1, header=FALSE)
#ACP
dudi.pca(varied)
|
582ec2cf23bbcf2d94f18d0f87687f3def524643
|
2487dfa8bb23d3e1a9000dba265c416cccb69939
|
/man/df2matrix.Rd
|
82cbd967762586937cf7d7d0bdaec943d370b0cb
|
[] |
no_license
|
cran/R2MLwiN
|
f2c5694b60e3a392ad516ab63689c642f3fc72bb
|
593d94db244d3fc07538aedf83fc183859b9f5fd
|
refs/heads/master
| 2023-03-21T15:14:11.554599
| 2023-03-14T04:40:02
| 2023-03-14T04:40:02
| 17,681,793
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,208
|
rd
|
df2matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/df2matrix.R
\name{df2matrix}
\alias{df2matrix}
\title{Translates a data.frame, formatted for use in multiple membership
modelling in MLwiN, to a matrix.}
\usage{
df2matrix(data, idcols, weightcols)
}
\arguments{
\item{data}{A \code{\link[base]{data.frame}} object.}
\item{idcols}{String vector of the identifier column names.}
\item{weightcols}{String vector of the weight column names.}
}
\value{
An adjacency matrix as returned by \code{\link[Matrix]{sparseMatrix}}.
}
\description{
Translates a \code{\link[base]{data.frame}}, in a form usable by MLwiN for multiple membership models,
into a \code{\link[base]{matrix}}. The data.frame needs to contain (a) columns with membership IDs
(e.g. first row of which might be \code{2, 3, 5, 6, 0, 0}) and (b) columns containing weights
(e.g. first row of which might be \code{0.25, 0.25, 0.25, 0.25, 0, 0}; in this example the first row of
resulting matrix would be \code{0, 1, 1, 0, 1, 1}).
}
\seealso{
\code{\link{matrix2df}}
}
\author{
Zhang, Z., Charlton, C.M.J., Parker, R.M.A., Leckie, G., and Browne,
W.J. (2016) Centre for Multilevel Modelling, University of Bristol, U.K.
}
|
59451e197d068e1d67ac59b737f525a2bfa558c2
|
d69ce1ab7f083da591cb8608f1ebd77588d3fa5a
|
/man/list_accession.Rd
|
c09aa3404a917896c71dd3de5eea2f602d8a1c66
|
[] |
no_license
|
YosefLab/ImpulseDE2
|
6da580082878c2a8c2913c9d378623e844388cb4
|
ab5282464d92ea7c06d211e42af1cbdfbe127b73
|
refs/heads/master
| 2022-10-04T18:12:18.080047
| 2022-09-14T09:27:18
| 2022-09-14T09:27:18
| 55,623,543
| 29
| 13
| null | 2022-09-14T09:27:19
| 2016-04-06T16:45:41
|
R
|
UTF-8
|
R
| false
| true
| 1,826
|
rd
|
list_accession.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/srcImpulseDE2_classImpulseDE2Object.R
\docType{methods}
\name{list_accession}
\alias{list_accession}
\alias{names.ImpulseDE2Object}
\alias{names,ImpulseDE2Object-method}
\alias{$,ImpulseDE2Object-method}
\alias{[[,ImpulseDE2Object,character,missing-method}
\title{List-like accessor methods for ImpulseDE2Object}
\usage{
\S4method{names}{ImpulseDE2Object}(x)
\S4method{[[}{ImpulseDE2Object,character,missing}(x, i, j, ...)
\S4method{$}{ImpulseDE2Object}(x, name)
}
\arguments{
\item{x}{(ImpulseDE2Object) ImpulseDE2 output object.}
\item{i, name}{(idx or str) Name or index of core output
element of ImpulseDE2Object.}
\item{j}{Not used, only vectors.}
\item{...}{Not used.}
}
\value{
Names of core output in ImpulseDE2Object.
Target element from ImpulseDE2Object.
Target element from ImpulseDE2Object.
}
\description{
Allow usage of ImpulseDE2 ouput object like a list with
respect to the core output:
dfImpulseDE2Results and vecDEGenes.
}
\examples{
lsSimulatedData <- simulateDataSetImpulseDE2(
vecTimePointsA = rep(seq(1,8),3),
vecTimePointsB = NULL,
vecBatchesA = NULL,
vecBatchesB = NULL,
scaNConst = 30,
scaNImp = 10,
scaNLin = 10,
scaNSig = 10)
objectImpulseDE2 <- runImpulseDE2(
matCountData = lsSimulatedData$matObservedCounts,
dfAnnotation = lsSimulatedData$dfAnnotation,
boolCaseCtrl = FALSE,
vecConfounders = NULL,
scaNProc = 1 )
names(objectImpulseDE2) # Display core output
# With respect to this core output, objectImpulseDE2
# can be treated like a list.
head(objectImpulseDE2[['dfImpulseDE2Results']])
head(objectImpulseDE2$dfImpulseDE2Results)
head(objectImpulseDE2[['vecDEGenes']])
head(objectImpulseDE2$vecDEGenes)
}
\author{
David Sebastian Fischer
}
|
85c280cab0e21c1b3da06999e098a6c5767330b6
|
c5f0075dbf7a226beca7d2cac63ef70bca921f8d
|
/Data-Mining/regression/one_useful_variable.r
|
0ea5cc57fe6768c12356c2cc7993a3508c344a18
|
[] |
no_license
|
jaeyoung-jane-choi/2019_Indiana_University
|
23a271ce5294f69e08775f06437d2125c789c419
|
c9a7604147d9efee5bc2386cc9ae8d5240eaf450
|
refs/heads/main
| 2023-06-07T07:56:20.466645
| 2021-07-02T03:49:19
| 2021-07-02T03:49:19
| 382,091,809
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,311
|
r
|
one_useful_variable.r
|
# Have a single useful variable and 99 garbage variables for prediction response
# when we fit the model we see y = yhat indicating very good predition on training data.
# But model performs badly when we look at new data FROM THE SAME MODEL
# Ridge regression adds complexity penalty to "complexity" of regression coeffs (a)
#
n = 100
d = 100 # note we have as many predictor variables as observations so likely overfitting
X = matrix(rnorm(n*d),nrow = n, ncol=d);
y = 5*X[,1] + rnorm(n); # the relation between the response and predictors only involves 1st predictor variable!
plot(X[,1],y); # can see obvious relation between first predictor and y
a = solve(t(X) %*% X, t(X) %*% y);
#lambda = .5; # for ridge regression
#a = solve(t(X) %*% X + lambda*diag(d), t(X) %*% y); # ridge regression solves different optimization
yhat = X %*% a;
plot(y,yhat); # perfect prediction. we should be happy ..... ?
X = matrix(rnorm(n*d),nrow = n, ncol=d); # try new data from same model
y = 5*X[,1] + rnorm(n);
yhat = X %*% a;
plot(y,yhat); # prediction not so good ... but improves with ridge regression
#cat("lambda = ", lambda, " test error = ", sum((y-yhat)^2), "normsq of a = ", sum(a*a), "\n") # for RR
cat(" test error = ", sum((y-yhat)^2), "normsq of a = ", sum(a*a), "\n")
|
9ea7a2b13201d885a41f1fe68aaf1c6b091db15f
|
3fbd8c5078d5ebb28e23558b158ab74ec0f2ed6b
|
/man/lutInfo.Rd
|
0632d88ef1644c29f04e486ad7a5b37b4d08417e
|
[
"MIT"
] |
permissive
|
envima/envimaR
|
161cf2e8a0569292ae18b0edfbb0f99900f97bd4
|
c8854cd06203a12cf923818728f9cff9a2e41a3d
|
refs/heads/master
| 2021-07-23T23:29:35.948757
| 2021-07-13T22:40:40
| 2021-07-13T22:40:40
| 156,347,896
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 427
|
rd
|
lutInfo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpMa.R
\name{lutInfo}
\alias{lutInfo}
\title{Get values of default environment from internal look-up table}
\usage{
lutInfo()
}
\value{
List containing lut content.
}
\description{
Get values of default environment look-up table (not required
for the package but to cross-check from a user).
}
\details{
None
}
\examples{
\dontrun{
lutInfo()
}
}
|
eb6763b35ccd36a67e2646018a7fc0c711b2c7dd
|
a0d90bd3ecde3cbeab475095c889ac3f3c2afe05
|
/man/sk_utils.Rd
|
f63a05200cd0cc4bc8e89cae3dbf64ebe339e17b
|
[
"MIT"
] |
permissive
|
abresler/sklearn
|
2ad843933019ed69cd7b1e12df4a2fbcd3de57e0
|
e51874420bb75eebd8e49aa677a30b7ec8f5bd3d
|
refs/heads/master
| 2023-08-03T18:05:48.101569
| 2023-07-19T13:09:25
| 2023-07-19T13:09:25
| 174,732,352
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 416
|
rd
|
sk_utils.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sklearn.R
\name{sk_utils}
\alias{sk_utils}
\title{SK Utilities
\href{https://scikit-learn.org/stable/modules/classes.html#module-sklearn.utils}{utility functions}}
\usage{
sk_utils()
}
\value{
python object
}
\description{
SK Utilities
\href{https://scikit-learn.org/stable/modules/classes.html#module-sklearn.utils}{utility functions}
}
|
762d250f7a3d5675d1690c0adbe3c21125ae3a77
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/glycanr/examples/glyco.outliers.Rd.R
|
193fdee011d18dd930806ea02592cd0801dd84b4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 228
|
r
|
glyco.outliers.Rd.R
|
library(glycanr)
### Name: glyco.outliers
### Title: Discover outliers in glycan data
### Aliases: glyco.outliers
### ** Examples
data(mpiu)
glyco.outliers(mpiu)
# outliers per plate
glyco.outliers(mpiu, group="Plate")
|
a585017b5526bf141a0a3f21112a21b163869955
|
6c616664e83c91416219efcf9050fa65e8c50a66
|
/distamp_tangara.R
|
50c65abcb4ae0c16eab86f0037cab9a8ad494134
|
[
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
biodiego88/Population-density-of-the-Multicolored-Tanager
|
16137aa1d0f5c399217c086a695c1b954e969614
|
ea5c78d79575c690c7b892c82b17fd7d4700c6f7
|
refs/heads/main
| 2023-03-30T18:11:54.801207
| 2021-04-01T16:41:35
| 2021-04-01T16:41:35
| 353,763,329
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 6,525
|
r
|
distamp_tangara.R
|
# TUTORIAL
vignette("distsamp")
install.packages("unmarked")
install.packages("AICcmodavg")
library(unmarked)
library(AICcmodavg)
# setiamos la carpeta donde se encuenhtran nuestros archivos
setwd("C:/RAnalysis/Unmarked/distsamp/tangara")
# le damos el nombre de "dists" al archivo que contiene nuestros datos
dists <- read.csv("tangara_correcto.csv", header = TRUE, sep=",")
# en algunos tutoriales piden darle el formato de factor a la columna que contiene las etiquetas de nuestras unidades de muestreo
dists$point <- as.factor(dists$point)
levels(dists$point) <- c(levels(dists$point))
levels(dists$point)
# se convierten los datos de frecuencias de distancias a rangos de distancias, en este caso cada 10 mestros
cp = c(0, 10, 20, 30, 40)
cp
# organizamos el formato de nuestros datos con la funcion formatDistData
yData = formatDistData(dists, "dist", "point", cp)
yData
# importamos los datos de las covariables, de manera que coincidan con la organización de los datos de los conteos en cada unidad de muestreo por cada rango de distancia
covs <- read.csv("tangaracovs.csv", header = TRUE, sep=",")
# con la funcion unmarkedFrameDS organizamos nuestros datos para correrlos con distamp
umf <- unmarkedFrameDS(y=as.matrix(yData), siteCovs=covs, survey="point",
dist.breaks=c(0, 10, 20, 30, 40), unitsIn="m")
summary(umf)
hist(umf, xlab="distance (m)", main="", cex.lab=0.8, cex.axis=0.8)
# procedemos a ajustar mnuestros datos a los modelos, iniciando con un modelo nulo con las diferentes funciones de distribución halfnormal, hazard, exp y uniforme
hn_Null <- distsamp(~1~1, umf, keyfun="halfnorm", output="density", unitsOut="ha")
hz_Null <- distsamp(~1~1, umf, keyfun="hazard", output="density", unitsOut="ha")
exp_Null <- distsamp(~1~1, umf, keyfun="exp", output="density", unitsOut="ha")
unf_Null <- distsamp(~1~1, umf, keyfun="uniform", output="density", unitsOut="ha")
# a continuación probamos los modelos en los que la probabilidad de detección sin covariables y la densidad explicada por habitat con cada función de distribución
hn_Nullhab <- distsamp(~1~habitat, umf, keyfun="halfnorm", output="density", unitsOut="ha")
hz_Nullhab <- distsamp(~1~habitat, umf, keyfun="hazard", output="density", unitsOut="ha")
exp_Nullhab <- distsamp(~1~habitat, umf, keyfun="exp", output="density", unitsOut="ha")
unf_Nullhab <- distsamp(~1~habitat, umf, keyfun="uniform", output="density", unitsOut="ha")
# ajustamos los modelos en los que la probabilidad de detección este explicada por tipo de habitat y la densidad sin covariables (modelo nulo) con cada función de distribución
hn_habNull <- distsamp(~habitat~1, umf, keyfun="halfnorm", output="density", unitsOut="ha")
hz_habNull <- distsamp(~habitat~1, umf, keyfun="hazard", output="density", unitsOut="ha")
exp_habNull <- distsamp(~habitat~1, umf, keyfun="exp", output="density", unitsOut="ha")
unf_habNull <- distsamp(~habitat~1, umf, keyfun="uniform", output="density", unitsOut="ha")
# ajustamos los modelos en los que la probabilidad de detección y la desnidad esten explicadas por tipo de habitat con cada función de distribución
hn_habhab <- distsamp(~habitat~habitat, umf, keyfun="halfnorm", output="density", unitsOut="ha")
hz_habhab <- distsamp(~habitat~habitat, umf, keyfun="hazard", output="density", unitsOut="ha")
exp_habhab <- distsamp(~habitat~habitat, umf, keyfun="exp", output="density", unitsOut="ha")
unf_habhab <- distsamp(~habitat~habitat, umf, keyfun="uniform", output="density", unitsOut="ha")
# ajuste y selección del modelo
fmList <- fitList(hn_Null=hn_Null, hz_Null=hz_Null, exp_Null=exp_Null, unf_Null=unf_Null,
hn_Nullhab=hn_Nullhab, hz_Nullhab=hz_Nullhab, exp_Nullhab=exp_Nullhab,
unf_Nullhab=unf_Nullhab, hn_habNull=hn_habNull, hz_habNull=hz_habNull,
exp_habNull=exp_habNull, unf_habNull=unf_habNull, hn_habhab=hn_habhab,
hz_habhab=hz_habhab, exp_habhab=exp_habhab, unf_habhab=unf_habhab)
fmList <- fitList(hn_Null=hn_Null, hz_Null=hz_Null, exp_Null=exp_Null, unf_Null=unf_Null,
hn_Nullhab=hn_Nullhab, hz_Nullhab=hz_Nullhab, exp_Nullhab=exp_Nullhab,
unf_Nullhab=unf_Nullhab)
modSel(fmList, nullmod="hn_Null")
Cand.models <- list(hn_Null, hz_Null, exp_Null, unf_Null,
hn_Nullhab, hz_Nullhab, exp_Nullhab,
unf_Nullhab, hn_habNull, hz_habNull,
exp_habNull, unf_habNull, hn_habhab,
hz_habhab, exp_habhab, unf_habhab)
#nombrar los modelos
Modnames <- c("hn_Null", "hz_Null", "exp_Null", "unf_Null",
"hn_Nullhab", "hz_Nullhab", "exp_Nullhab",
"unf_Nullhab", "hn_habNull", "hz_habNull",
"exp_habNull", "unf_habNull", "hn_habhab",
"hz_habhab", "exp_habhab", "unf_habhab")
aictab(cand.set = Cand.models, modnames = Modnames,
second.ord = TRUE, nobs = NULL, sort = TRUE)
summary(hz_habhab)
# al elegir el mejor modelo, manipulamos los resultados a través de la función Backtransformation para aplicar el antilogaritmo a las estimaciones, debido a que son modelos lineales generalizados de la familia Poisson en escala logaritmica
backTransform(hn_Null, type="state")
backTransform(hn_Null, type="det")
backTransform(hz_Null, type="scale")
# cuando el modelo incluye covariables, se realiza un backtransform a través de la función linearComb. Sin embargo, en este caso no fue posible
backTransform(linearComb(hz_Nullhab['state'], c('BM', 'BS', 'CB', 'R')))
# por lo anterior realizamos la predicción que hace la transformación, con la función predict. Para esto creamos un formato de datos de las variables "nuevo" y luego se calcula con un nivel de 0.90 (IC 95%)
nuevo = data.frame(habitat = c("R", "BS", "BM", "CB"))
phz_Nullhab= predict(hz_Nullhab, type = "state", newdata = nuevo, appendData = T, level = 0.90)
# Otra forma de calcular los intervalos de confianza es a través de la función confint. Cuando se realiza sobre el codigo que representa las estimaciones de predicción se hace directo. Si se hace para el modelo sin transformar, se puede realizar un exponencial del confint
confint(phz_Nullhab, type='state', method='normal')
exp(confint(hz_Null, type='state', method='normal', level=0.90))
|
3d412e5d07ca817a45d669ab726d31013e33e581
|
81ff4f78af4de923a71b1ac05cfebc82fb01818d
|
/CRAN/contrib/spaMM/R/LR.R
|
a2eb290447fa5965d3eba65ff538084d36a10854
|
[] |
no_license
|
PRL-PRG/dyntrace-instrumented-packages
|
9d56a2a101c4bd9bee6bbe2ababe014d08cae1a0
|
6c586a26b8007dc6808865883e1dd7d2a1bf6a04
|
refs/heads/master
| 2020-03-08T15:45:59.296889
| 2018-04-06T09:41:52
| 2018-04-06T09:41:52
| 128,220,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,349
|
r
|
LR.R
|
.compare_model_structures <- function(object,object2) {
if (inherits(object,"HLfitlist") || inherits(object2,"HLfitlist")) {
stop("This does not yet work on HLfitlist objects")
}
X1 <- colnames(object$`X.pv`)
X2 <- colnames(object2$`X.pv`)
if (length(X1)==0L) {
REML1 <- NULL ## compatible with both ML or REML tests
} else REML1 <- (object$APHLs$p_v != object$APHLs$p_bv)
if (length(X2)==0L) {
REML2 <- NULL ## idem
} else REML2 <- (object2$APHLs$p_v != object2$APHLs$p_bv)
REML <- unique(c(REML1,REML2))
meth1 <- object$HL
meth2 <- object2$HL
if (! identical(object$family[c("family","link")],object2$family[c("family","link")] ) ) {
stop("Models may not be nested (distinct families)") ## but COMpoisson vs poisson ?
}
if (! identical(meth1,meth2) || length(REML)>1 ) {
stop("object fitted by different methods cannot be compared")
}
if ( ! is.null(X1)) X1 <- sapply(strsplit(X1,':'), function(x) paste(sort(x),collapse=':')) ## JBF 2015/02/23: sort variables in interaction terms before comparison
if ( ! is.null(X2)) X2 <- sapply(strsplit(X2,':'), function(x) paste(sort(x),collapse=':'))
dX12 <- setdiff(X1,X2)
dX21 <- setdiff(X2,X1)
if (length(dX12)>0 && length(dX21)>0) {
warning("Fixed-effect models may not be nested") # F I X M E : correct detection of non-nested models
} else if (length(dX12)>0) {
Xnest <- "2in1"
} else if (length(dX21)>0) {
Xnest <- "1in2"
} else Xnest <- NULL
if (object$spaMM.version < "2.2.116") {
ranterms1 <- attr(object$ZAlist,"ranefs")
} else ranterms1 <- attr(object$ZAlist,"exp_ranef_strings")
if (object2$spaMM.version < "2.2.116") {
ranterms2 <- attr(object2$ZAlist,"ranefs")
} else ranterms2 <- attr(object2$ZAlist,"exp_ranef_strings")
randist1 <- lapply(object$rand.families, function(v) paste(paste(v)[1:2],collapse="")) ## makes a string from each $family and $link
randist2 <- lapply(object2$rand.families, function(v) paste(paste(v)[1:2],collapse="")) ## makes a string from each $family and $link
ranterms1 <- paste(ranterms1,randist1) ## joins each term and its distrib
ranterms2 <- paste(ranterms2,randist2) ## joins each term and its distrib
dR12 <- setdiff(ranterms1,ranterms2)
dR21 <- setdiff(ranterms2,ranterms1)
if (length(dR12)>0 && length(dR21)>0) {
stop("Non-nested random-effect models")
} else if (length(dR12)>0) {
Rnest <- "2in1"
} else if (length(dR21)>0) {
Rnest <- "1in2"
} else Rnest <- NULL
nest <- c(Xnest,Rnest)
unest <- unique(nest)
if (length(nest)==0L) { ## NULL,NULL
stop("Fixed-effect specifications do not appear different from each other.")
} else if (length(unest)==2) {
stop("Models not nested (opposite nestings for fixed and random terms). ")
} else {
df1 <- length(X1)
df2 <- length(X2)
if (!is.null(Rnest)) {
lambda.object <- object$lambda.object
if (!is.null(lambda.object)) df1 <- df1+length(unlist(lambda.object$coefficients_lambdaS))
cov.mats <- .get_compact_cov_mats(object$strucList)
if (length(cov.mats)) {
nrows <- unlist(lapply(cov.mats,NROW))
df1 <- df1+sum(nrows*(nrows-1)/2)
}
lambda.object <- object2$lambda.object
if (!is.null(lambda.object)) df2 <- df2+length(unlist(lambda.object$coefficients_lambdaS))
cov.mats <- .get_compact_cov_mats(object2$strucList)
if ( length(cov.mats)) {
nrows <- unlist(lapply(cov.mats,NROW))
df2 <- df2+sum(nrows*(nrows-1)/2)
}
}
if (unest=="1in2") {
fullm <- object2
nullm <- object
df <- df2-df1
} else {
fullm <- object
nullm <- object2
df <- df1-df2
}
if (length(nest)==2) {
message("Nested models differing both by in their fixed and in their random terms. ")
message("Tentatively using marginal likelihood to compare them... ")
testlik <- "p_v"
} else {
if (is.null(Rnest)) { ## fixed effect test
if (REML) {
## checking the comparability of REML fits
if ( ! is.null(fullm$distinctX.Re) ) {
df.f.Re <- ncol(fullm$distinctX.Re)
} else df.f.Re <- ncol(fullm$`X.pv`)
if ( ! is.null(nullm$distinctX.Re) ) {
df.n.Re <- ncol(nullm$distinctX.Re)
} else df.n.Re <- ncol(nullm$`X.pv`)
if ( df.f.Re != df.n.Re ) {
warning("LRT comparing REML fits with different designs is highly suspect")
}
}
testlik <- "p_v"
} else { ## random effect test
if ( ! REML) warning("ML fits used to compare different random-effects models...")
testlik <- "p_bv" ## used in both case, identical to p_v in the non-standard case
stop("The two models have identical fixed-effect formulas\n and cannot yet be compared properly by this function.")
## need to take into account correlations in random slope models for example
}
}
}
return(list(fullfit=fullm,nullfit=nullm,test_obj=testlik,df=df))
}
# (fixme?) : create as.lm method for HLfit object?
LRT <- function(object,object2,boot.repl=0,nb_cores=NULL,...) { ## compare two HM objects
if (nrow(object$data)!=nrow(object2$data)) {
stop("models were not both fitted to the same size of dataset.")
}
info <- .compare_model_structures(object,object2)
nullfit <- info$nullfit
fullfit <- info$fullfit
test_obj <- info$test_obj
df <- info$df
LRTori <- 2*(logLik(fullfit,which=test_obj)-logLik(nullfit,which=test_obj))
pvalue <- 1-pchisq(LRTori,df=df) ## but not valid for testing null components of variance
resu <- list(nullfit=nullfit,fullfit=fullfit,basicLRT = data.frame(chi2_LR=LRTori,df=df,p_value=pvalue)) ## format appropriate for more tests
if (boot.repl>0) {
if (boot.repl<100) message("It is recommended to set boot.repl>=100 for Bartlett correction")
nb_cores <- .check_nb_cores(nb_cores=nb_cores)
aslistfull <- as.list(getCall(fullfit))
aslistfull$processed <- NULL ## may capture bugs
if (nb_cores>1) for(st in names(aslistfull)[-1]) aslistfull[[st]] <- eval(aslistfull[[st]]) ## force evaluation before running in another R session
aslistnull <- as.list(getCall(nullfit))
aslistnull$processed <- NULL ## may capture bugs
if (nb_cores>1) for(st in names(aslistnull)[-1]) aslistnull[[st]] <- eval(aslistnull[[st]])
simbData <- nullfit$data
if (tolower(nullfit$family$family)=="binomial") {
cbf <- .check_binomial_formula(nullfit=nullfit, data=fullfit$data, fullfit=fullfit)
cbindTest <- cbf$cbindTest
if (cbindTest) {
nnegname <- cbf$nnegname
nposname <- cbf$nposname
aslistfull$formula <- cbf$full_formula
aslistnull$formula <- cbf$null_formula
}
} else cbindTest <- FALSE
eval_replicate <- function(newy,only_vector=TRUE) { ## only_vector controls how to handle errors
if (cbindTest) {
simbData[[nposname]] <- newy
simbData[[nnegname]] <- .get_BinomialDen(nullfit) - newy
} else {simbData[[as.character(nullfit$predictor[[2L]])]] <- newy} ## allows y~x syntax for binary response
## analyze under both models
aslistfull$data <- simbData
aslistnull$data <- simbData
fullfit <- (eval(as.call(aslistfull)))
if (inherits(fullfit,"try-error")) {
if (only_vector) {
return(c(NA,NA))
} else return(fullfit)
}
nullfit <- try(eval(as.call(aslistnull)))
if (inherits(nullfit,"try-error")) {
if (only_vector) {
return(c(NA,NA))
} else return(nullfit)
}
## return pair of likelihoods
return(c(logLik(fullfit,which=test_obj),logLik(nullfit,which=test_obj)))
}
bootblob <- .eval_boot_replicates(eval_replicate=eval_replicate,boot.repl=boot.repl,nullfit=nullfit,nb_cores=nb_cores,
aslistfull=aslistfull, aslistnull=aslistnull,simbData=simbData)
bootreps <- bootblob$bootreps
colnames(bootreps) <- paste(c("full.","null."),test_obj,sep="")
bootdL <- bootreps[,1]-bootreps[,2]
meanbootLRT <- 2*mean(bootdL)
resu <- c(resu,list(rawBootLRT = data.frame(chi2_LR=LRTori,df=df,p_value=(1+sum(bootdL>=LRTori/2))/(boot.repl+1)))) ## format appropriate for more tests
LRTcorr <- LRTori*df/meanbootLRT
resu <- c(resu,list(BartBootLRT = data.frame(chi2_LR=LRTcorr,df=df,p_value=1-pchisq(LRTcorr,df=df)))) ## format appropriate for more tests
bootInfo <- list(meanbootLRT = meanbootLRT,bootreps = bootreps, RNGstates=bootblob$RNGstates)
resu <- c(resu,list(bootInfo=bootInfo)) ## keeps the sublist structure, which is not compatible with hglmjob.R...
}
class(resu) <- c("fixedLRT",class(resu))
return(resu)
}
## anova treated as alias for LRT
anova.HLfit <- function(object, object2=NULL, ..., method="") {
# if (method=="anova.lm" && is.null(object2)) {
# #identical(fullm$models[c("eta","lambda","phi")],list(eta="etaGLM",lambda="",phi="phiScal"))
# .anova_HLfit_lm(object, ...) ## may now handle factors but not continuosu variance => source in 'ignored' directory
# } else
LRT(object,object2, ...)
}
|
9e198fcff35c70b65a2ba1d77c7cc4a177cc9000
|
16cbcd4b55e9df1e91f2d69702790023c9cf6780
|
/252286816.r
|
4dbf7be97b366dc44af745c1094aec27dec09548
|
[] |
no_license
|
erex/MT3607-peer-review
|
3f65c9a168f34e947fe0e531e773029384c19314
|
bc0750e9a7fb5f2d0a7c7e35b34b3a80213d9fde
|
refs/heads/master
| 2020-06-03T06:12:34.093705
| 2014-10-10T09:49:51
| 2014-10-10T09:49:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,156
|
r
|
252286816.r
|
#I confirm that the attached is my own work, except where clearly indicated in the text.
# Normal RV generator
# purpose: my.rnorm is a random number generator to simulate random normal variables.
# Simulated by the uniform RV command "runif" then transformed by an algorithm into Normal random variables.
# inputs: n - number of values to return. positive numeric integer. default = 1
# mean - mean of returned values. numeric. default = 0
# sd - standard deviation of returned values. positive numeric. default = 1.
# outputs: n random normal variables with mean "mean" and standard deviation "sd"
# "invalid arguments" if n, mean or sd arguments are not acceptable forms.
my.rnorm <- function(n,mean=0,sd=1){
if (sd >= 0 && n==as.integer(n) && n >=0 && is.numeric(mean)==T && is.numeric(sd)==T && is.numeric(n)==T){ #check if entered arguments are valid
c <- vector(length = n) #c stores the n random normal variables
for (i in 1:n){ #loop for each normal RV to be simulated
c[i]<-(((sum(runif(16))-8)*((12/16)^0.5))*sd)+mean #performs the algorithmic transformation from uniform RV to Normal RV
}
return(c) #returns n normal RVs
} else { # if the entered arguments were not valid
stop("invalid arguments") # an error message stating invalid arguments is returned
}
}
# Chi-Squared RV generator
# Purpose: my.rchisq function simulates random chi-squared variables.
# Chi-squared random variables are simulated by summing squared my.rnorm generated standard normal variables.
# Inputs: n - number of values to return. Must be positive numeric integer. No Default
# df - degrees of freedom ie: how many squared normal variables are summed. Positive numeric integer. Default = 1
# Outputs: n chi-squared random variables with df degrees of freedom
# "invalid arguments" if n or df are not acceptable forms.
my.rchisq <- function( n, df=1){
if (n==as.integer(n) && df==as.integer(df) && n >=0 && df >=0 && is.numeric(n)==T && is.numeric(df)==T){ # checks if entered arguments are valid
c <- vector(length = n) #create a vector in which to store the n random chi-squared variables
for (i in 1:n){ #loop for each Chi-squared RV to be simulated
c[i] <- sum((my.rnorm(n = df, sd = 1, mean = 0))^2) #sums the squared normal RVs
}
return(c) #returns n chi-squared RVs
} else { # if the entered arguments were not valid
stop("invalid arguments") # an error message stating invalid arguments is returned
}
}
#F-Distribution RV generator
# Purpose: my.rf function simulates random F-Distribution variables.
# F-Distribution RVs simulated by simulating 2 chi-squared RVs, dividing by their degrees of freedom, then dividing one by the other
# Inputs: n - number of values to return. Must be positive numeric integer. No Default
# df1 - numerator degrees of freedom ie: no of squared normal RVs are summed in the chi-squared RV. Positive numeric integer. Default = 1
# df2 - denominator degrees of freedom ie: no of squared normal RVs are summed in the Chi-Squared RV. Positive numeric integer. Default = 1
# Outputs: n F-Distribution RVs with df1 and df2 degrees of freedom
# "invalid arguments" if n, df1 or df2 are not acceptable forms.
my.rf <- function(n, df1=1, df2=1){
if (n==as.integer(n) && df1==as.integer(df1) && df2==as.integer(df2) && n >= 0 && df1 >= 0 && df2 >= 0 && is.numeric(n)==T && is.numeric(df1)==T && is.numeric(df2)==T){ #checks if entered arguments are valid
c <- vector(length=n) #c stores the n random F-Distribution variables
for (i in 1:length(c)){ #loop for n F-Distribution RVs
c[i] <- (((my.rchisq(1,df1))/df1)/((my.rchisq(1,df2))/df2)) #Transforms by the algorithm the Chi-Squared RVs into F-Distribution RVs
}
return(c) #returns n F-Distribution RVs
} else { # if the entered arguments were not valid
stop("invalid arguments") # an error message stating invalid arguments is returned
}
}
#Test 1
#purpose: Ask the functions for a random number of RVs then check that number of RVs are returned and if they are numeric.
#inputs: none
#output: "pass test" if the correct number of numeric values are returned
# "fail test" if the incorrect number of values are returned or values are not numeric
test.1 <- function(){
a <- sample(1:100,1) #assigns a a random integer between 1 and 100
b <- sample(1:100,1) #assigns b a random integer between 1 and 100
c <- sample(1:100,1) #assigns c a random integer between 1 and 100
t <- c(my.rnorm(a),my.rchisq(b),my.rf(c)) #concatenates RVs from each function into a vector t.
if (length(t)==(a+b+c) && t==as.numeric(t)){ # verifies that t contains the correct number of RVs and they are numeric
return("pass test") # returns a success
} else { #if not numeric or incorrect numer of RVs
return("fail test") #return a failure
}
}
#Test 2
# purpose: Creates a distribution of 5000 random variables from the my.rnorm function. Should be the Normal Distibution N(0.1)
# Uses a shapiro-wilks test to perform a hypothesis test on the distibution being Normal.
# Null hypothesis H0:if the distribution is normal. Alternative hypothesis H1: the distribution is not normal
#inputs: none
#outputs: "Accept H0: Distribution is Normal" if p-value > 0.05
# "Reject H0 in favour of H1:Distribution is not Normal" if p-value < 0.05
test.2 <- function(){
reject.H0 <- shapiro.test(my.rnorm(5000))
#implimentation note - reject.H0 is a list of 4 results from shapiro-wilk test, the 2nd of which is the p-value
if ((reject.H0[2]<=0.05)==F){
return("Accept H0: Distribution is Normal")
} else {
return("Reject H0 in favour of H1:Distribution is not Normal")
}
}
|
dd575c47e9c9df6c6a6773e2e8cd7680ea96485e
|
8858836e281c2624d097fe62f73fa4a648f72f72
|
/empirical.R
|
a8abc7efa2aef44030712effce7a7d28ad94f5e8
|
[] |
no_license
|
aripurwantosp/psoks_fit_lognormal_wafm
|
19e2032fcc739fcb388bd25b3baee16254eb0644
|
e6412dfa8aef7129b28e2e8e0e7b7527460f6bdf
|
refs/heads/master
| 2021-07-08T18:28:23.657143
| 2020-07-24T13:11:52
| 2020-07-24T13:11:52
| 169,533,768
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,478
|
r
|
empirical.R
|
#PSO-KS Empirical Study Fromm IDHS 2017 Data
#Fitting Lognormal Distribution to Women Age at First Marriage Data
#
#Ari Purwanto Sarwo Prasojo & Puguh Prasetyoputra (2019)
#Research Center for Population, Indonesian Institute of Sciences
#________________________________________________________________
#Load package, source, data ----
source("macro/stat.R")
source("macro/pso.fitdist.R")
library(fitdistrplus)
library(readstata13)
library(dplyr)
library(ggplot2)
#/function to calculate lognormal properties (mode, mean, variance)
lnorm_prop <- function(theta){
mode <- exp(theta[1]-theta[2]^2)
mean <- exp(theta[1]+.5*theta[2]^2)
var <- (exp(theta[2]^2)-1)*exp(2*theta[1]+theta[2]^2)
centr <- c(mode,mean,var)
names(centr) <- c("mode","mean","var")
return(centr)
}
#/read data form stata format
evwomen <- read.dta13("sdki17-evwomen.dta")
# View(evwomen)
# str(evwomen)
# National Aggregate ------------------------------------------------------
#/data sample ----
all_afm <- filter(evwomen, V501 != "Never in union")
all_afm <- all_afm$V511
#/fit distribution ----
psoks_all_afm <- pso.fitdist(dt = all_afm, dist = "lognormal", stat = "ks.stat",
limit = c(0,100), max.iter = 200, n.swarm = 20)
#//pso behaviour
pso_beh <- data.frame(iter = 0:200, ks = psoks_all_afm$stat.trace)
ggplot(pso_beh, aes(iter, ks)) + geom_line(color = "brown1") + geom_point(color = "brown1") +
labs(title = "PSO-KS's Behaviour, Fitting Lognormal Distribution",
subtitle = "Women's age at first marriage, national aggregation",
x = "Iteration", y = "KS Distance") +
theme_bw()
mle_all_afm <- fitdist(all_afm, distr = "lnorm", method = "mle")
mme_all_afm <- fitdist(all_afm, distr = "lnorm", method = "mme")
est_all_afm <- rbind(psoks_all_afm$solution, mle_all_afm$estimate, mme_all_afm$estimate)
colnames(est_all_afm) <- c("meanlog","sdlog")
rownames(est_all_afm) <- c("PSOKS","MLE","MME")
cat("Parameter estimation, national\n")
cat("---------------------------------------------\n")
print(est_all_afm)
#/properties ----
all_prop <- rbind(lnorm_prop(psoks_all_afm$solution),
lnorm_prop(mle_all_afm$estimate),
lnorm_prop(mme_all_afm$estimate))
rownames(all_prop) <- c("PSOKS","MLE","MME")
cat("Properties, national\n")
cat("---------------------------------------------\n")
print(all_prop)
#/goodness of fit statistcs ----
#//mse of distribution fitting
mse_psoks_all_afm <- mse.stat(all_afm,"lognormal", meanlog = psoks_all_afm$solution[1],
sdlog = psoks_all_afm$solution[2])
mse_mle_all_afm <- mse.stat(all_afm,"lognormal", meanlog = mle_all_afm$estimate[1],
sdlog = mle_all_afm$estimate[2])
mse_mme_all_afm <- mse.stat(all_afm,"lognormal", meanlog = mme_all_afm$estimate[1],
sdlog = mme_all_afm$estimate[2])
#//ks distance
ks_psoks_all_afm <- ks.stat(all_afm,"lognormal", meanlog = psoks_all_afm$solution[1],
sdlog = psoks_all_afm$solution[2])
ks_mle_all_afm <- ks.stat(all_afm,"lognormal", meanlog = mle_all_afm$estimate[1],
sdlog = mle_all_afm$estimate[2])
ks_mme_all_afm <- ks.stat(all_afm,"lognormal", meanlog = mme_all_afm$estimate[1],
sdlog = mme_all_afm$estimate[2])
#//log-likelihood
loglik_psoks_all_afm <- loglik(all_afm, "lognormal", psoks_all_afm$solution)
loglik_mle_all_afm <- loglik(all_afm, "lognormal", mle_all_afm$estimate)
loglik_mme_all_afm <- loglik(all_afm, "lognormal", mme_all_afm$estimate)
stat_all_afm <- rbind(c(mse_psoks_all_afm, mse_mle_all_afm, mse_mme_all_afm), #mse
c(ks_psoks_all_afm, ks_mle_all_afm, ks_mme_all_afm), #ks
c(loglik_psoks_all_afm, loglik_mle_all_afm, loglik_mme_all_afm)) #loglik
colnames(stat_all_afm) <- c("PSOKS","MLE","MME")
rownames(stat_all_afm) <- c("mse dist","ks distance","loglik")
cat("Goodness of fit measure, national aggregation\n")
cat("---------------------------------------------\n")
print(stat_all_afm)
#/graph fitting ----
#//histogram & pdf
len_all_afm <- length(all_afm)
den_all_psoks <- dlnorm(all_afm, meanlog = psoks_all_afm$solution[1],
sdlog = psoks_all_afm$solution[2])
den_all_mle <- dlnorm(all_afm, meanlog = mle_all_afm$estimate[1],
sdlog = mle_all_afm$estimate[2])
den_all_mme <- dlnorm(all_afm, meanlog = mme_all_afm$estimate[1],
sdlog = mme_all_afm$estimate[2])
den_all_afm <- data.frame(afm = rep(all_afm,3),
Fit = c(rep("PSO-KS", len_all_afm), rep("MLE", len_all_afm),
rep("MME", len_all_afm)),
Density = c(den_all_psoks, den_all_mle, den_all_mme))
all_hist <- hist(all_afm, freq = FALSE)
ggplot(den_all_afm, aes(x = afm)) +
geom_histogram(aes(y = ..density..), breaks = all_hist$breaks,
fill = "gray70", color = "white") +
xlim(min(all_afm),max(all_afm)) +
geom_line(aes(x = afm, y = Density, color = Fit), size = .8) +
labs(title = "Histogram & Lognormal PDF",
subtitle = "Women's age at first marriage, national",
x = "Age", y = "Density") +
theme_bw()
#//ecdf & cdf
cumden_all_psoks <- plnorm(all_afm, meanlog = psoks_all_afm$solution[1],
sdlog = psoks_all_afm$solution[2])
cumden_all_mle <- plnorm(all_afm, meanlog = mle_all_afm$estimate[1],
sdlog = mle_all_afm$estimate[2])
cumden_all_mme <- plnorm(all_afm, meanlog = mme_all_afm$estimate[1],
sdlog = mme_all_afm$estimate[2])
cumden_all_afm <- data.frame(afm = rep(all_afm,3),
Fit = c(rep("PSO-KS", len_all_afm), rep("MLE", len_all_afm),
rep("MME", len_all_afm)),
Cumden = c(cumden_all_psoks, cumden_all_mle, cumden_all_mme))
ggplot(data = cumden_all_afm, aes(afm))+
stat_ecdf(geom = "step") +
geom_line(aes(x = afm, y = Cumden, color = Fit), size = .7) +
labs(title = "Empirical & Lognormal CDF",
subtitle = "Women's age at first marriage, national",
x = "Age", y = "Cumulative Density") +
theme_bw()
# Diassagregation by Residence (Rural/Urban) ---------------------------------
#/data sample ----
rural_afm <- filter(evwomen, V501 != "Never in union" & V025 == "Rural")$V511
urban_afm <- filter(evwomen, V501 != "Never in union" & V025 == "Urban")$V511
#/fit distribution ----
psoks_rural_afm <- pso.fitdist(dt = rural_afm, dist = "lognormal", stat = "ks.stat",
limit = c(0,100), max.iter = 200, n.swarm = 20)
psoks_urban_afm <- pso.fitdist(dt = urban_afm, dist = "lognormal", stat = "ks.stat",
limit = c(0,100), max.iter = 200, n.swarm = 20)
#//pso behaviour
pso_beh_reg <- data.frame(iter = rep(0:200,2), Residence = c(rep("Rural", 201),rep("Urban", 201)),
ks = c(psoks_rural_afm$stat.trace, psoks_urban_afm$stat.trace))
ggplot(pso_beh_reg, aes(iter, ks, group = Residence)) + geom_line(aes(color = Residence)) +
geom_point(aes(color = Residence)) +
labs(title = "PSO-KS's Behaviour, Fitting Lognormal Distribution",
subtitle = "Women's age at first marriage, national by residence",
x = "Iteration", y = "KS Distance") +
theme_bw()
mle_rural_afm <- fitdist(rural_afm, distr = "lnorm", method = "mle")
mle_urban_afm <- fitdist(urban_afm, distr = "lnorm", method = "mle")
mme_rural_afm <- fitdist(rural_afm, distr = "lnorm", method = "mme")
mme_urban_afm <- fitdist(urban_afm, distr = "lnorm", method = "mme")
est_rural_afm <- rbind(psoks_rural_afm$solution, mle_rural_afm$estimate, mme_rural_afm$estimate)
colnames(est_rural_afm) <- c("meanlog","sdlog")
rownames(est_rural_afm) <- c("PSOKS","MLE","MME")
cat("Parameter estimation, national-rural region\n")
cat("---------------------------------------------\n")
print(est_rural_afm)
est_urban_afm <- rbind(psoks_urban_afm$solution, mle_urban_afm$estimate, mme_urban_afm$estimate)
colnames(est_urban_afm) <- c("meanlog","sdlog")
rownames(est_urban_afm) <- c("PSOKS","MLE","MME")
cat("Parameter estimation, national-urban region\n")
cat("---------------------------------------------\n")
print(est_urban_afm)
#/properties ----
rural_prop <- rbind(lnorm_prop(psoks_rural_afm$solution),
lnorm_prop(mle_rural_afm$estimate),
lnorm_prop(mme_rural_afm$estimate))
rownames(rural_prop) <- c("PSOKS","MLE","MME")
cat("Properties, national-rural region\n")
cat("---------------------------------------------\n")
print(rural_prop)
urban_prop <- rbind(lnorm_prop(psoks_urban_afm$solution),
lnorm_prop(mle_urban_afm$estimate),
lnorm_prop(mme_urban_afm$estimate))
rownames(urban_prop) <- c("PSOKS","MLE","MME")
cat("Properties, national-urban region\n")
cat("---------------------------------------------\n")
print(urban_prop)
#/goodness of fit statistcs ----
#//mse of distribution fitting
mse_psoks_rural_afm <- mse.stat(rural_afm,"lognormal", meanlog = psoks_rural_afm$solution[1],
sdlog = psoks_rural_afm$solution[2])
mse_mle_rural_afm <- mse.stat(rural_afm,"lognormal", meanlog = mle_rural_afm$estimate[1],
sdlog = mle_rural_afm$estimate[2])
mse_mme_rural_afm <- mse.stat(rural_afm,"lognormal", meanlog = mme_rural_afm$estimate[1],
sdlog = mme_rural_afm$estimate[2])
mse_psoks_urban_afm <- mse.stat(urban_afm,"lognormal", meanlog = psoks_urban_afm$solution[1],
sdlog = psoks_urban_afm$solution[2])
mse_mle_urban_afm <- mse.stat(urban_afm,"lognormal", meanlog = mle_urban_afm$estimate[1],
sdlog = mle_urban_afm$estimate[2])
mse_mme_urban_afm <- mse.stat(urban_afm,"lognormal", meanlog = mme_urban_afm$estimate[1],
sdlog = mme_urban_afm$estimate[2])
#//ks distance
ks_psoks_rural_afm <- ks.stat(rural_afm,"lognormal", meanlog = psoks_rural_afm$solution[1],
sdlog = psoks_rural_afm$solution[2])
ks_mle_rural_afm <- ks.stat(rural_afm,"lognormal", meanlog = mle_rural_afm$estimate[1],
sdlog = mle_rural_afm$estimate[2])
ks_mme_rural_afm <- ks.stat(rural_afm,"lognormal", meanlog = mme_rural_afm$estimate[1],
sdlog = mme_rural_afm$estimate[2])
ks_psoks_urban_afm <- ks.stat(urban_afm,"lognormal", meanlog = psoks_urban_afm$solution[1],
sdlog = psoks_urban_afm$solution[2])
ks_mle_urban_afm <- ks.stat(urban_afm,"lognormal", meanlog = mle_urban_afm$estimate[1],
sdlog = mle_urban_afm$estimate[2])
ks_mme_urban_afm <- ks.stat(urban_afm,"lognormal", meanlog = mme_urban_afm$estimate[1],
sdlog = mme_urban_afm$estimate[2])
#//log-likelihood
loglik_psoks_rural_afm <- loglik(rural_afm, "lognormal", psoks_rural_afm$solution)
loglik_mle_rural_afm <- loglik(rural_afm, "lognormal", mle_rural_afm$estimate)
loglik_mme_rural_afm <- loglik(rural_afm, "lognormal", mme_rural_afm$estimate)
loglik_psoks_urban_afm <- loglik(urban_afm, "lognormal", psoks_urban_afm$solution)
loglik_mle_urban_afm <- loglik(urban_afm, "lognormal", mle_urban_afm$estimate)
loglik_mme_urban_afm <- loglik(urban_afm, "lognormal", mme_urban_afm$estimate)
stat_rural_afm <- rbind(c(mse_psoks_rural_afm, mse_mle_rural_afm, mse_mme_rural_afm), #mse
c(ks_psoks_rural_afm, ks_mle_rural_afm, ks_mme_rural_afm), #ks
c(loglik_psoks_rural_afm, loglik_mle_rural_afm, loglik_mme_rural_afm)) #loglik
stat_urban_afm <- rbind(c(mse_psoks_urban_afm, mse_mle_urban_afm, mse_mme_urban_afm), #mse
c(ks_psoks_urban_afm, ks_mle_urban_afm, ks_mme_urban_afm), #ks
c(loglik_psoks_urban_afm, loglik_mle_urban_afm, loglik_mme_urban_afm)) #loglik
colnames(stat_rural_afm) <- colnames(stat_urban_afm) <- c("PSOKS","MLE","MME")
rownames(stat_rural_afm) <- rownames(stat_urban_afm) <- c("mse dist","ks distance","loglik")
cat("Goodness of fit measure, national-rural\n")
cat("---------------------------------------------\n")
print(stat_rural_afm)
cat("Goodness of fit measure, national-urban\n")
cat("---------------------------------------------\n")
print(stat_urban_afm)
#/graph fitting ----
#//histogram & pdf
reg_hist <- hist(rural_afm)
len_rural_afm <- length(rural_afm)
len_urban_afm <- length(urban_afm)
den_rural_psoks <- dlnorm(rural_afm, meanlog = psoks_rural_afm$solution[1],
sdlog = psoks_rural_afm$solution[2])
den_rural_mle <- dlnorm(rural_afm, meanlog = mle_rural_afm$estimate[1],
sdlog = mle_rural_afm$estimate[2])
den_rural_mme <- dlnorm(rural_afm, meanlog = mme_rural_afm$estimate[1],
sdlog = mme_rural_afm$estimate[2])
den_urban_psoks <- dlnorm(urban_afm, meanlog = psoks_urban_afm$solution[1],
sdlog = psoks_urban_afm$solution[2])
den_urban_mle <- dlnorm(urban_afm, meanlog = mle_urban_afm$estimate[1],
sdlog = mle_urban_afm$estimate[2])
den_urban_mme <- dlnorm(urban_afm, meanlog = mme_urban_afm$estimate[1],
sdlog = mme_urban_afm$estimate[2])
reg_df <- data.frame(afm = c(rep(rural_afm, 3), rep(urban_afm, 3)),
Residence = c(rep("Rural", 3*len_rural_afm), rep("Urban", 3*len_urban_afm)),
Fit = c(rep("PSO-KS",len_rural_afm), rep("MLE",len_rural_afm),
rep("MME",len_rural_afm),
rep("PSO-KS",len_urban_afm), rep("MLE",len_urban_afm),
rep("MME",len_urban_afm)),
Density = c(den_rural_psoks, den_rural_mle, den_rural_mme,
den_urban_psoks, den_urban_mle, den_urban_mme))
ggplot(data = reg_df)+
geom_histogram(aes(x = afm, y = ..density..),
fill = "gray70", color = "white", breaks = reg_hist$breaks) +
xlim(min(all_afm),max(all_afm)) +
geom_line(aes(x = afm, y = Density, color = Fit), size = .8) +
labs(title = "Histogram & Lognormal PDF",
subtitle = "Women's age at first marriage, national by residence",
x = "Age", y = "Density") +
theme_bw() +
facet_grid(Residence ~.)
#//ecdf & cdf
cum_rural_psoks <- plnorm(rural_afm, meanlog = psoks_rural_afm$solution[1],
sdlog = psoks_rural_afm$solution[2])
cum_rural_mle <- plnorm(rural_afm, meanlog = mle_rural_afm$estimate[1],
sdlog = mle_rural_afm$estimate[2])
cum_rural_mme <- plnorm(rural_afm, meanlog = mme_rural_afm$estimate[1],
sdlog = mme_rural_afm$estimate[2])
cum_urban_psoks <- plnorm(urban_afm, meanlog = psoks_urban_afm$solution[1],
sdlog = psoks_urban_afm$solution[2])
cum_urban_mle <- plnorm(urban_afm, meanlog = mle_urban_afm$estimate[1],
sdlog = mle_urban_afm$estimate[2])
cum_urban_mme <- plnorm(urban_afm, meanlog = mme_urban_afm$estimate[1],
sdlog = mme_urban_afm$estimate[2])
reg_cumd <- data.frame(afm = c(rep(rural_afm, 3), rep(urban_afm, 3)),
Residence = c(rep("Rural", 3*len_rural_afm), rep("Urban", 3*len_urban_afm)),
Fit = c(rep("PSO-KS",len_rural_afm), rep("MLE",len_rural_afm),
rep("MME",len_rural_afm),
rep("PSO-KS",len_urban_afm), rep("MLE",len_urban_afm),
rep("MME",len_urban_afm)),
Cumden = c(cum_rural_psoks, cum_rural_mle, cum_rural_mme,
cum_urban_psoks, cum_urban_mle, cum_urban_mme))
ggplot(data = reg_cumd, aes(afm))+
stat_ecdf(geom = "step") +
geom_line(aes(x = afm, y = Cumden, color = Fit), size = .7) +
labs(title = "Empirical & Lognormal CDF",
subtitle = "Women's age at first marriage, national by residence",
x = "Age", y = "Cumulative Density") +
theme_bw() +
facet_grid(Residence ~.)
# Properties --------------------------------------------------------------
prop <- function(theta){
meanlog <- theta[1]
sdlog <- theta[2]
mode <- exp(meanlog-sdlog^2)
mean <- exp(meanlog + .5*sdlog^2)
vari <- (exp(sdlog^2)-1)*exp(2*meanlog + sdlog^2)
proper <- c(mode, mean, vari)
names(proper) <- c("mode", "mean", "variance")
return(proper)
}
# t <- rlnorm(100, meanlog = 1, sdlog = .5)
# Mode(t)
# mean(t)
# var(t)
# fit <- fitdist(t,"lnorm","mle")
# prop(fit$estimate)
#National
nat_psoks <- c(2.994, 0.213)
prop(nat_psoks)
nat_mle <- c(2.997, 0.221)
prop(nat_mle)
nat_mme <- c(2.997, 0.223)
prop(nat_mme)
#Reside
#/rural
rur_psoks <- c(2.949, 0.201)
prop(rur_psoks)
rur_mle <- c(2.956, 0.220)
prop(rur_mle)
rur_mme <- c(2.955, 0.224)
prop(rur_mme)
#/urban
ur_psoks <- c(3.037, 0.209)
prop(ur_psoks)
ur_mle <- c(3.038, 0.215)
prop(ur_mle)
ur_mme <- c(3.038, 0.215)
prop(ur_mme)
|
c91ba9d797599fc6d98d80b5862707a0d206a8f7
|
992b927610cfd0528def7a7a2a440058f22b6828
|
/man/ovun.sample.Rd
|
12247e2991912e2fab92a8df1339d00b7a0738d6
|
[] |
no_license
|
ramnathv/ROSE
|
1a15478e7a846fdca459e970595997efe311caa4
|
6e6e32cd79515887920d65857dfb929254500ee6
|
refs/heads/master
| 2020-04-01T21:52:57.067164
| 2018-10-18T19:57:54
| 2018-10-18T19:57:54
| 153,679,959
| 1
| 0
| null | 2018-10-18T19:57:06
| 2018-10-18T19:57:05
| null |
UTF-8
|
R
| false
| false
| 3,321
|
rd
|
ovun.sample.Rd
|
\name{ovun.sample}
\alias{ovun.sample}
\title{
Over-sampling, under-sampling, combination of over- and under-sampling.
}
\description{
Creates possibly balanced samples by random over-sampling minority examples, under-sampling majority examples or combination of over- and under-sampling.
}
\usage{
ovun.sample(formula, data, method="both", N, p=0.5,
subset=options("subset")$subset,
na.action=options("na.action")$na.action, seed)
}
\arguments{
\item{formula}{
An object of class \code{\link{formula}} (or one that can be coerced to that class).
See \code{\link{ROSE}} for information about interaction among predictors or
their transformations.
}
\item{data}{
An optional data frame, list or environment (or object
coercible to a data frame by \code{as.data.frame}) in which
to preferentially interpret ``formula''.
If not specified, the variables are taken from ``environment(formula)''.
}
\item{method}{
One among \code{c("over", "under", "both")} to perform over-sampling minority examples, under-sampling majority
examples or combination of over- and under-sampling, respectively.
}
\item{N}{
The desired sample size of the resulting data set.
If missing and \code{method} is either \code{"over"} or \code{"under"} the sample size is determined by oversampling or, respectively, undersampling examples so that the minority class occurs approximately in proportion \code{p}.
When \code{method = "both"} the default value is given by the length of vectors
specified in \code{formula}.
}
\item{p}{
The probability of resampling from the rare class.
If missing and \code{method} is either \code{"over"} or \code{"under"} this proportion is determined by oversampling
or, respectively, undersampling examples so that the sample size is equal to \code{N}.
When \code{method ="both"} the default value given by 0.5.
}
\item{subset}{
An optional vector specifying a subset of observations to be used in the sampling process.
The default is set by the \code{\link{subset}} setting of \code{\link{options}}.
}
\item{na.action}{
A function which indicates what should happen when the data contain 'NA's.
The default is set by the \code{\link{na.action}} setting of \code{\link{options}}.
}
\item{seed}{
A single value, interpreted as an integer, recommended to specify seeds and keep trace of the
sample.
}
}
\value{
The value is an object of class \code{ovun.sample} which has components
\item{Call}{The matched call.}
\item{method}{The method used to balance the sample. Possible choices are \cr \code{c("over", "under", "both")}.}
\item{data}{ The resulting new data set.}
}
\seealso{
\code{\link{ROSE}}.
}
\examples{
# 2-dimensional example
# loading data
data(hacide)
# imbalance on training set
table(hacide.train$cls)
# balanced data set with both over and under sampling
data.balanced.ou <- ovun.sample(cls~., data=hacide.train,
N=nrow(hacide.train), p=0.5,
seed=1, method="both")$data
table(data.balanced.ou$cls)
# balanced data set with over-sampling
data.balanced.over <- ovun.sample(cls~., data=hacide.train,
p=0.5, seed=1,
method="over")$data
table(data.balanced.over$cls)
}
|
c8ad64e143f549d7b5ec1bcc63025cedf7ecbf67
|
be3adc68f32d8ab896996a7236325dea700ff416
|
/run_analysi.R
|
a4d148385fcb38034419f136652b354c768fcbc2
|
[] |
no_license
|
santiagoferriere/Coursera-Getting-and-Cleaning-Data-Course-Project
|
9e790ad7ae40b47c6eb0f07f0692bf94e73349af
|
68548c60c99f0f69c33de3eb470662c66d6d0f74
|
refs/heads/master
| 2022-11-25T23:20:31.615560
| 2020-07-27T03:58:06
| 2020-07-27T03:58:06
| 282,745,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,247
|
r
|
run_analysi.R
|
#reading features and activity data #Acordarse que la carpeta esta un nivel abajo del wd
library(dplyr)
features <- read.table("./UCI HAR Dataset/features.txt")
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
test <- read.table("./UCI HAR Dataset/test/X_test.txt")
train <- read.table("./UCI HAR Dataset/train/X_train.txt")
ytrain <- read.table("./UCI HAR Dataset/train/y_train.txt")
ytest <- read.table("./UCI HAR Dataset/test/y_test.txt")
subjecttrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
subjecttest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
####################
feat <- as.character(features[, 2])
colnames(test) <- feat
colnames(train) <- feat
colnames(ytrain) <- "activity"
colnames(ytest) <- "activity"
colnames(subjecttest) <- "subject"
colnames(subjecttrain) <- "subject"
# Cambiar nombres a las columnas de acuerdo a un vector
test <- cbind(test, ytest)
train <- cbind(train, ytrain)
test <- cbind(test, subjecttest)
train <- cbind(train, subjecttrain)
# uno columnas a los data sets existentes
tate <- rbind(test, train)
# uno los data sets
mn_sd <- tate[, grep("mean()|std()|subject|activity", colnames(tate)) ]
#selecciono las columnas que contengan las palabras "mean()", "std()", "subject" o "activity"
description <- factor(mn_sd$activity, levels = activities[, 1], labels = activities[, 2])
#remplazo las actividades (que eran numericas) por su valor en palabras
mn_sd <- mutate(mn_sd, description )
# añado la col description al data.frame existente
#####################################
for_act <- mn_sd
for_act <- split(for_act, for_act$activity)
# separar el data.frame por activity
prev <- for_act[[1]]
prev <- sapply(prev, mean)
prev <- as.data.frame(prev)
prev <- rotate_df(prev)
# crear un data.frame llamado prev
for(i in 2:6) {
fr <- for_subj[[i]]
mn <- sapply(fr, mean)
dmn <- as.data.frame(mn)
dmn <- rotate_df(dmn)
#esta saliendo volteado el data.frame
prev <- rbind( prev, dmn)
}
mean_act <- setattr(prev, "row.names", activities[ , 2])
## cambiar nombre de las rows!
|
cb774fe280c5c635be084e9ec693557cf6207f8d
|
a8f98c245c02fa7741cbdba001f5e71a0069e7a8
|
/total_jobs/plot.R
|
847308ae3053f79f1ae082ceedc51cce943ccd31
|
[] |
no_license
|
schelcj/biostat-cluster-reports
|
b4d6aa711921c8989ae7a0c9f7ac4880becae75f
|
0f6a03e4058df0f9380f13450acca3f65a96d241
|
refs/heads/master
| 2021-01-22T11:38:21.289928
| 2015-01-20T14:35:22
| 2015-01-20T14:35:22
| 2,601,256
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 420
|
r
|
plot.R
|
data <- read.table('report.dat', header=T, sep=",")
u_range <- range(0, data$total_jobs)
pdf('report.pdf')
plot(data$total_jobs, type="o", lwd=2, lty=1, pch=NA, col="blue", ann=F, axes=F, ylim=u_range)
axis(1, 1:52, lab=T)
labels <-seq(0, length(axTicks(2))^2, length(axTicks(2)))
axis(2, at=axTicks(2), labels=labels[0:length(axTicks(2)-1)], cex=0.8, tick=TRUE, line=NA, pos=NA)
title(ylab="Jobs")
box()
dev.off()
|
6c81baf712f977181d4e7ce98b3021ea03f9a605
|
aa06ef73c7eabdeb93b93bbb02d1f5e05758b728
|
/Multivariate_Analysis/Vegan_Demo3_20190208.R
|
83388a7a58d77a6b4f0288a2401e5a86a5b6671e
|
[
"BSD-3-Clause"
] |
permissive
|
lehostert/Stream_Ecology_Lab_Stats_Workshop
|
a0b82e6aa91c3a2e982b1136064261f2b3ed6c60
|
f1767f7096573f719b3722193b72bdbf2fe86bde
|
refs/heads/master
| 2021-07-21T14:57:43.822246
| 2020-05-12T04:08:18
| 2020-05-12T04:08:18
| 161,689,145
| 1
| 1
|
BSD-3-Clause
| 2020-05-12T03:59:26
| 2018-12-13T20:04:27
|
R
|
UTF-8
|
R
| false
| false
| 2,048
|
r
|
Vegan_Demo3_20190208.R
|
library(tidyverse)
library(vegan)
### Ordination Techniques Continued
fish <- read.csv("~/Documents/Github/Stats_Workshop_SEL/Multivariate_Analysis/five_assemblages.csv", header = T, na = ".", row.names = 1)
fish.evi <- read.delim2("~/Documents/Github/Stats_Workshop_SEL/Multivariate_Analysis/TV.txt", sep = ",", header = T, na = ".", row.names = 1)
LTEF <- read.csv("~/Documents/Github/Stats_Workshop_SEL/Multivariate_Analysis/LTEF_fish_data.csv", header = T, na = ".", row.names = 1)
Indic_Species <- read.csv("~/Documents/Github/Stats_Workshop_SEL/Multivariate_Analysis/indicator_species_data.csv", header = T, na = ".", row.names = 1)
indic_spp.type <- Indic_Species %>% select(Type)
indic_spp.data <- Indic_Species %>% select(-c(Type))
indic_spp.dist2 <- vegdist(indic_spp.data)
indic_spp.anov2 <- with(indic_spp.type, anosim(indic_spp.dist, Type))
summary(indic_spp.anov2)
plot(indic_spp.anov2)
indic_spp.dist <- vegdist(Indic_Species)
indic_spp.ano <- anosim(indic_spp.dist, Indic_Species$Type)
summary(indic_spp.ano)
plot(indic_spp.ano)
##Example
data(dune)
data(dune.env)
dune.dist <- vegdist(dune)
dune.ano <- with(dune.env, anosim(dune.dist, Management))
summary(dune.ano)
plot(dune.ano)
#### MRPP Multi- Reponse Permutation Procedure
data(dune)
data(dune.env)
dune.mrpp <- with(dune.env, mrpp(dune, Management))
dune.mrpp
indic.mrpp <- with(indic_spp.type, mrpp(indic_spp.data, Type))
indic.mrpp
# Save and change plotting parameters
def.par <- par(no.readonly = TRUE)
layout(matrix(1:2,nr=1))
plot(dune.ord <- metaMDS(dune), type="text", display="sites" )
with(dune.env, ordihull(dune.ord, Management))
with(dune.mrpp, {
fig.dist <- hist(boot.deltas, xlim=range(c(delta,boot.deltas)),
main="Test of Differences Among Groups")
abline(v=delta);
text(delta, 2*mean(fig.dist$counts), adj = -0.5,
expression(bold(delta)), cex=1.5 ) }
)
par(def.par)
## meandist
dune.md <- with(dune.env, meandist(vegdist(dune), Management))
dune.md
summary(dune.md)
plot(dune.md)
plot(dune.md, kind="histogram")
|
2698bb3022c9cf5f8e244cbc1af5480293591a9f
|
f25acfa44ded4602f844422ef93c8233ea400039
|
/plot2.R
|
9ce8d737f7ddbb9ded3cd76f43c40f00f5da859f
|
[] |
no_license
|
irving/ExData_Plotting1
|
d6f2d16c9f0d3384e9c1b01285d335a852483569
|
0095f0ac46b0b429dce2365e56afed835e818c72
|
refs/heads/master
| 2021-01-16T12:19:50.002003
| 2015-06-07T01:13:38
| 2015-06-07T01:13:38
| 36,740,862
| 0
| 0
| null | 2015-06-02T15:00:56
| 2015-06-02T15:00:56
| null |
UTF-8
|
R
| false
| false
| 1,291
|
r
|
plot2.R
|
# Project 1 for Coursera Exploratory Data Analysis course
# June 2015 section.
#
# This program reads a data file, selects the small portion of it to be used
# And generates an exploratory plot from it.
library(data.table)
library(dplyr)
library(lubridate)
library(sqldf)
# Check for file before reading. It should be in the same folder
if (!file.exists("household_power_consumption.txt")){
stop("Unable to find data file. Please place \"household_power_consumption.txt\" in the same folder as this script to run.")
}
# open the output channel
png(file="plot2.png", width=480, height=480)
# read the data
# NOTE: Assumes data are in same folder.
# NOTE: I found info about a method to read just some rows of a file
# using sqldf here: http://r.789695.n4.nabble.com/How-to-set-a-filter-during-reading-tables-td893857.html
# So my reading code is adapted from that.
pow <- file("household_power_consumption.txt")
attr(pow, "file.format") <- list(sep = ";", header = TRUE)
p2 <- data.table(sqldf("select * from pow where Date == \"1/2/2007\" OR Date == \"2/2/2007\""))
# never forget to close a file when you're done with it
close(pow)
p2[,When:= dmy_hms(paste(Date, Time))]
plot(p2$When, p2$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.off()
|
8bd55f3977ba86aba894a75787b3b93a6c5ac44a
|
03f2c9c3e7d87c11400511a2994536efa8eba6de
|
/R/submissions/glm_0.58566_june_14_2014/avs_models_submission.R
|
7842f0bf037d940baeb02db188d5c5e3083ebcb6
|
[] |
no_license
|
lorenzol/kaggle_avs
|
43dc9b16ad8be1a2726ecae7a5ebdddf8031247e
|
c61d2d43562dad2411bc22994f84b968f68e4ab5
|
refs/heads/master
| 2021-01-10T22:09:55.147687
| 2014-09-18T00:46:23
| 2014-09-18T00:46:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,141
|
r
|
avs_models_submission.R
|
load(file="test.RData")
# Create Submission File on Test Data (must have test data ready)
########################################################
# name of test file = test
# use the model to generate predictions
repeater_glm <- predict(glm.tune, newdata = test, type="prob")
repeater_gbm <- predict(gbm.tune, newdata = test, type="prob")
repeater_glmnet <- predict(glmnet.tune, newdata = test, type="prob")
# Create dataset for export
#########################################################
repeater_glm <- data.frame(cbind("id" = test$id, "repeatProbability" = repeater_glm$t))
repeater_gbm <- data.frame(cbind("id" = test$id, "repeatProbability" = repeater_gbm$t))
repeater_glmnet <- data.frame(cbind("id" = test$id, "repeatProbability" = repeater_glmnet$t))
# write predictions to csv file for submission to Kaggle
#########################################################
write.csv(repeater_glm, file="glm_tune_model1.csv", row.names=FALSE, quote=FALSE)
write.csv(repeater_gbm, file="gbm_tune_model1.csv", row.names=FALSE, quote=FALSE)
write.csv(repeater_glmnet, file="glmnet_tune_model1.csv", row.names=FALSE, quote=FALSE)
|
2b917081095186cf2998a923a34e54c6028a68d9
|
ca88f343307390ee7854779d5a6d3d7797eefbf2
|
/nomear_paises.R
|
681dea8c2515c0e1291e87a052a3141c4271392b
|
[] |
no_license
|
FlavioMarchi/Mapeamento-Tecnologico
|
570f84e34aaa210184ca650702ea1ee0f599d8c9
|
072d3356fc8ef663742b20ea946f1dd78a2493d2
|
refs/heads/master
| 2021-09-01T21:17:24.669599
| 2017-12-28T17:04:27
| 2017-12-28T17:04:27
| 115,642,464
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,022
|
r
|
nomear_paises.R
|
nomear_paises <- function(country, flag = TRUE){
if(flag == TRUE){
custom_match01 <- c(WO = "World Office", EP = "European Union", SU = "Soviet Union",
AP = "NA", EA = "NA", OA = "NA",
UK = "UK")
country <- countrycode(country, "iso2c",
"country.name.en", custom_match = custom_match01)}
for (i in 1:length(country))
{
if (country[i] == "United Kingdom of Great Britain and Northern Ireland" )
{
country[i] <- "UK"
}
else if(country[i] == "United States of America"){
country[i] <- "USA"
} else if(country[i] == "Republic of Korea"){
country[i] <- "South Korea"
} else if (country[i] == "Russian Federation"){
country[i] <- "Russia"
}else if (country[i] =="Taiwan, Province of China"){
country[i] <- "Taiwan"
}else if (country[i] =="United Kingdom"){
country[i] <- "UK"
}
}
return(country)
}
|
ffb723f4f8f283e0657ca4d226147e463d4f3a35
|
22e937ad20cc8b4609d7e4b883c2c1dc5c701a27
|
/man/plot.shotperformance.Rd
|
57c579617504e65351bc045f00b3569db0782378
|
[] |
no_license
|
sndmrc/BasketballAnalyzeR
|
becf8ac9f302b6fb02a6ee901e5656d90d2568f3
|
9e8a01f0bece63442c30348d24205244a1c6aa76
|
refs/heads/master
| 2023-06-10T14:13:01.684480
| 2023-05-31T20:17:42
| 2023-05-31T20:17:42
| 142,006,460
| 34
| 7
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,645
|
rd
|
plot.shotperformance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.shotperformance.R
\name{plot.shotperformance}
\alias{plot.shotperformance}
\title{Plots a bubbleplot representing the data contained in the dataframe produced by the function 'shotperformance'}
\usage{
\method{plot}{shotperformance}(x, title = "Shooting performance", ...)
}
\arguments{
\item{x}{an object of class \code{ashotperformance} obtained using the shotperformance function}
\item{title}{character, plot title.}
\item{...}{other graphical parameters.}
}
\value{
A \code{ggplot2} object
}
\description{
Plots a bubbleplot representing the data contained in the dataframe produced by the function 'shotperformance'
}
\examples{
# Draw the plot for the performances on 2 point shots, when the high pressure situation is
# the one regarding shots taken when \code{shotclock} is between 0 and 2
PbP <- PbPmanipulation(PbP.BDB)
PbP <- scoredifference(PbP, team_name = "GSW", player_data=Pbox, team_data = Tadd)
PbP <- shotclock(PbP, sec_14_after_oreb = FALSE, team_data = Tadd)
players_perf <- shotperformance(PbP, shotclock_interval = c(0, 2),
player_data=Pbox, team_data = Tadd,
shot_type = "2P", teams = "GSW")
plot(players_perf)
}
\references{
P. Zuccolotto and M. Manisera (2020) Basketball Data Science: With Applications in R. CRC Press.
P. Zuccolotto, M. Manisera and M. Sandri (2018) Big data analytics for modeling scoring probability in basketball: The effect of shooting under high pressure conditions. International Journal of Sports Science & Coaching.
}
\author{
Andrea Fox
}
|
d8c0e8fa512249a0f1acc04d6b867ac782e5475d
|
63c5a1fcadd2d4a94fb81678bdc5f3e45df01855
|
/R/segment.R
|
44747c0d3253e2100826b6d6998637acbe6bd113
|
[] |
no_license
|
aoles/EBImage
|
884709162b60b93b75a0fdf6ec937d535a8ab7ef
|
d0f7973ac05a96914c63570e450730abd326aef5
|
refs/heads/devel
| 2023-06-23T11:26:44.695456
| 2023-04-09T20:52:13
| 2023-04-09T20:52:13
| 22,993,924
| 67
| 35
| null | 2023-06-15T13:36:39
| 2014-08-15T15:25:54
|
R
|
UTF-8
|
R
| false
| false
| 2,071
|
r
|
segment.R
|
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
watershed = function (x, tolerance=1, ext=1) {
validImage(x)
tolerance = as.numeric(tolerance)
if (tolerance<0) stop( "'tolerance' must be non-negative" )
ext = as.integer(ext)
if (ext<1) stop( "'ext' must be a positive integer" )
.Call(C_watershed, castImage(x), tolerance, ext)
}
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
propagate = function (x, seeds, mask=NULL, lambda=1e-4) {
validImage(x)
checkCompatibleImages(x, seeds)
if (!is.integer(seeds))
storage.mode(seeds) = 'integer'
if (!is.null(mask)) {
checkCompatibleImages(x, mask)
if (!is.integer(mask))
storage.mode(mask) = 'integer'
}
lambda = as.numeric(lambda)
if (lambda<0.0) stop("'lambda' must be positive" )
return(.Call(C_propagate, castImage(x), seeds, mask, lambda))
}
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ocontour = function(x) {
validImage(x)
if(!is.integer(x)) storage.mode(x) = 'integer'
y = .Call(C_ocontour, x)
names(y) = seq_along(y)
y = y[sapply(y, length)>0] # remove missing objects
}
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
bwlabel = function(x) {
validImage(x)
.Call(C_bwlabel, x)
}
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
colorLabels = function(x, normalize = TRUE){
len = length( (d = dim(x)) )
res <- x
# linearize image data for convenient processing
dim(res) = c(prod(d[1:2]), if(len>2) prod(d[3:len]) else 1)
f = function(y, m) {
idx <- y > 0
y[idx] <- sample(m)[y[idx]]
y
}
tmp = apply(res, 2, function(y) {
m = max(y)
replicate(3, f(y, m))
})
# restore proper dimensions
dim(tmp) = c(d[1:2], 3, if(len>2) d[3:len] else NULL)
if ( is.Image(x) ) {
imageData(res) <- tmp
colorMode(res) <- Color
}
else {
res = new("Image", .Data = tmp, colormode = Color)
}
if (normalize) normalize(res) else res
}
|
dfd77d4126b2c4029e1c297e9aaccb86e7f3ffb8
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/RSDA/R/sym.cor.R
|
511176b9eea190ac6461d8efb00f5e694ad63112
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 878
|
r
|
sym.cor.R
|
sym.cor <-
function(sym.var.x,sym.var.y,method=c('centers','interval','billard','histogram'),na.rm=FALSE, ...) {
method<-match.arg(method)
if(method=='centers') {
if((sym.var.x$var.type=='$C')&&(sym.var.y$var.type=='$C'))
return(cor(sym.var.x$var.data.vector,sym.var.y$var.data.vector))
if((sym.var.x$var.type=='$I')&&(sym.var.y$var.type=='$I'))
return(cor((sym.var.x$var.data.vector[,1]+sym.var.x$var.data.vector[,2])/2,
(sym.var.y$var.data.vector[,1]+sym.var.y$var.data.vector[,2])/2))
else
stop("Impossible to compute the Standard Deviation for this type of variable with this method")
}
if(method=='billard') {
if((sym.var.x$var.type=='$I')&&(sym.var.y$var.type=='$I'))
return(sym.cov(sym.var.x,sym.var.y,method='billard')/(sym.sd(sym.var.x,method='billard')*sym.sd(sym.var.y,method='billard')))
}
}
|
b3ab6dfd31e5abaa64db58fcbed40e1c0656c742
|
c5138fb9dacb509d778881d2ad97ab056dbc16a3
|
/R/neural-nets.R
|
82fffa27321278bb5dff0aeecc289975cf431d2c
|
[] |
no_license
|
cran/deep
|
8f5c142e5e2aef91a14ded9b9b2f80af0eabd067
|
1a3c7676464430b245b9f092649c87202c606aad
|
refs/heads/master
| 2020-12-21T22:24:49.733218
| 2019-12-20T10:50:03
| 2019-12-20T10:50:03
| 236,581,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,740
|
r
|
neural-nets.R
|
#' deep: A Neural Networks Framework
#'
#' The deep package provides classes for layers, types of neurons and the
#' neural network as a whole.
#'
#' @docType package
#' @name deep
NULL
#' The main NeuralNetwork class, that holds the layers.
#'
#' @field eta The learning tax, representes the size of the weight adjustment
#' between each epoch of training.
#'
#' @field layers This field is a list of the layers of the network, you can use
#' subsetting to inspect them.
#'
#' @examples
#' # Create a dataset
#' dataset <- iris
#' dataset$Petal.Length <- NULL
#' dataset$Petal.Width <- NULL
#' dataset <- dataset[dataset$Species != "versicolor",]
#' dataset$Code <- as.integer(dataset$Species == "virginica")
#' dataset <- dataset[sample(20),]
#'
#' # Create the network
#' net <- neuralNet(2, perceptronLayer(1))
#'
#' # Train the network, takes a while
#' net$train(dataset[,c(1,2), drop=FALSE], dataset[,'Code', drop=FALSE], epochs = 10)
#'
#' # Check the output
#' net$compute(c(1,2))
#'
#' # See accuracy
#' net$validationScore(dataset[,c(1,2), drop=FALSE], dataset[,'Code', drop=FALSE])
#'
neuralNet <- setRefClass(
"NeuralNetwork",
fields = list(eta = "numeric", layers = "vector")
)
#' @method initialize This runs when you instantiate the class, either with
#' \code{neuralNet$new()} or \code{neuralNet()}, the intent is to set up the
#' internal fields and layers passed in ... .
#'
#' @method compute Gives the output of passing data inside the network.
#' @param input The actual data to be fed into the network, this input's
#' dimentions vary with the first layer in your network.
#' @return Vector of the outputs of the last layer in your network.
#'
#' @method train Runs the backpropagation algorithm to train all layers of
#' the network
#' @param ins The list of vectors of inputs to the first layer in the network
#' @param outs The list of vectors of outputs of the last layer in the network
#' @param epochs How many rounds of training to run
#' @param tax This is the learning rate, aka eta
#' @param maxErr A contition to early stop the training process
#' @return Vector of computed values of the same size of the last layer
#'
neuralNet$methods(
initialize = function(input, ...) {
# Initialize each layer
layers <<- vector("list", ...length())
for (i in 1:...length()) {
l <- ...elt(i)
switch (class(l),
"PerceptronLayer" = {
layers[[i]] <<- perceptronLayer(l$n, input)
input <- l$n
},
"McCullochPittsLayer" = {
layers[[i]] <<- mcCullochPittsLayer(l$n, input)
input <- l$n
},
stop("argument in ... is not a layer!")
)
}
},
compute = function(input) {
for (layer in layers) {
input <- layer$output(input)
}
input
},
train = function (ins, outs, epochs = 1, tax = .01, maxErr = 0) {
nLayers <- length(layers)
r <- nrow(ins)
for (epoch in 1:epochs) {
# Initialize changes vector
ch <- vector("list", nLayers)
ch[[1]] <- vector("list", layers[[1]]$n)
for (neu in 1:layers[[1]]$n) {
ch[[1]][[neu]] <- list(
"ws" = vector("numeric", length(ins[1,])),
"b" = vector("numeric", 1)
)
}
if (nLayers > 1) {
for (l in 2:nLayers) {
ch[[l]] <- vector("list", layers[[l]]$n)
for (ne in 1:layers[[l]]$n) {
ch[[l]][[ne]] <- list(
"ws" = vector("numeric", layers[[l-1]]$n),
"b" = vector("numeric", 1)
)
}
}
}
for (i in 1:r) {
inputs <- vector("list", nLayers + 1)
inputs[[1]] <- ins[i,]
# Record each output
for (l in 1:nLayers) {
inputs[[l+1]] <- layers[[l]]$output(inputs[[l]])
}
cost <- sum(outs[i,] - inputs[[nLayers+1]])
# Calculate weight changes
li <- nLayers
newErr <- cost
for (l in rev(layers)) {
err <- newErr
newErr <- vector("numeric", length(inputs[[li]]))
ni <- 1
for (neu in l$neurons) {
d <- neu$ws*inputs[[li]]*err[[ni]]*tax
db <- err[[ni]]*tax
ch[[li]][[ni]][["ws"]] <- ch[[li]][[ni]][["ws"]] + d
ch[[li]][[ni]][["b"]] <- ch[[li]][[ni]][["b"]] + db
newErr <- newErr + err[[ni]]*neu$ws
ni <- ni + 1
}
li <- li - 1
}
}
# Average changes and apply
li <- 1
for (l in layers) {
ni <- 1
for (neu in l$neurons) {
wsChange <- ch[[li]][[ni]]$ws/r
bChange <- ch[[li]][[ni]]$b/r
neu$ws <- neu$ws + unlist(wsChange, use.names = F)
neu$bias <- neu$bias - unlist(bChange, use.names = F)
ni <- ni + 1
}
li <- li + 1
}
}
},
validationScore = function(ins, outs) {
corrects <- 0
for (i in 1:nrow(ins)) {
corrects <- corrects + as.integer(compute(ins[i,]) == outs[i,])
}
corrects/nrow(ins)
}
)
|
6ec6971b11622ea65ed7f6fa14c32498360358c4
|
350f369998282044eeff0794540189c89ad8710c
|
/inst/doc/qle_with_R.R
|
413f60b26c7cfc7262c89e4fe04c68db935220fd
|
[] |
no_license
|
cran/qle
|
26b2edf6e372d4a966aa85754ba4c88377036290
|
857a96cfcf8dbbf116c944c23924f6cedb37abd8
|
refs/heads/master
| 2021-09-24T10:39:46.030022
| 2018-10-08T11:00:03
| 2018-10-08T11:00:03
| 110,973,979
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,022
|
r
|
qle_with_R.R
|
### R code from vignette source 'qle_with_R.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: qle_with_R.Rnw:1083-1085
###################################################
options(useFancyQuotes="UTF-8")
options(digits=4, prompt="R> ")
###################################################
### code chunk number 2: qle_with_R.Rnw:1168-1170
###################################################
library(qle)
data(mm1q)
###################################################
### code chunk number 3: qle_with_R.Rnw:1173-1177 (eval = FALSE)
###################################################
## options(mc.cores=8L)
## options(qle.multicore="mclapply")
## RNGkind("L'Ecuyer-CMRG")
## set.seed(1326)
###################################################
### code chunk number 4: qle_with_R.Rnw:1184-1188
###################################################
cond <- list("n"=100)
simfn <- function(tet,cond){
mean(rgeom(cond$n,prob=1-tet[1]))
}
###################################################
### code chunk number 5: qle_with_R.Rnw:1192-1194
###################################################
lb <- c("rho"=0.05)
ub <- c("rho"=0.95)
###################################################
### code chunk number 6: qle_with_R.Rnw:1200-1204 (eval = FALSE)
###################################################
## nsim <- 10
## X <- multiDimLHS(N=9,lb=lb,ub=ub,
## method="maximinLHS",type="matrix")
## sim <- simQLdata(sim=simfn,cond=cond,nsim=nsim,X=X)
###################################################
### code chunk number 7: qle_with_R.Rnw:1206-1209
###################################################
sim <- mm1q$sim
X <- attr(sim,"X")
nsim <- attr(sim,"nsim")
###################################################
### code chunk number 8: qle_with_R.Rnw:1215-1217 (eval = FALSE)
###################################################
## qsd <- getQLmodel(sim, lb, ub, obs=c("N"=1),
## var.type="wlogMean",verbose=TRUE)
###################################################
### code chunk number 9: qle_with_R.Rnw:1221-1222 (eval = FALSE)
###################################################
## S0 <- qscoring(qsd,x0=c("rho"=0.8))
###################################################
### code chunk number 10: qle_with_R.Rnw:1224-1227
###################################################
OPT <- mm1q$OPT
qsd <- mm1q$qsd
S0 <- mm1q$S0
###################################################
### code chunk number 11: qle_with_R.Rnw:1229-1230
###################################################
print(S0)
###################################################
### code chunk number 12: qle_with_R.Rnw:1244-1249 (eval = FALSE)
###################################################
## OPT <- qle(qsd,simfn,cond=cond,
## global.opts = list("maxeval"=5, "NmaxLam"=5),
## local.opts = list("nextSample"="score","weights"=0.5,
## "ftol_abs"=1e-4, "lam_max"=1e-5),
## method = c("qscoring","bobyqa","direct"), iseed=1326)
###################################################
### code chunk number 13: qle_with_R.Rnw:1254-1273
###################################################
library(graphics)
# statistics
op <- par(xaxs='i', yaxs='i')
rho <- as.matrix(seq(0.1,0.9,by=0.001))
y <- as.numeric(unlist(simQLdata(sim=simfn,cond=cond,nsim=nsim,X=rho,mode="mean")))
T <- qsd$qldata[grep("mean.",names(qsd$qldata))]
Y <- predictKM(qsd$covT,rho,X,T,krig.type="var")
# steady state values
y0 <- rho/(1-rho)
plot(NULL, type="n", xlab=expression(rho),
ylab="y",xlim=c(0,1), ylim=c(0,10))
lines(as.numeric(rho),y,col="black",lt=2,lwd=0.3)
lines(as.numeric(rho),Y,col="blue",lwd=0.3)
lines(as.numeric(rho),y0,col="red",lwd=0.3)
legend("topleft", c("Number of customers in the system",
"Expected number at steady state","Kriging approximation"),
lty=c(2,1,1),col=c("black","red","blue"),
xpd=TRUE,pt.cex=1,cex=1)
par(op)
###################################################
### code chunk number 14: qle_with_R.Rnw:1275-1297
###################################################
op <- par(xaxs='i', yaxs='i')
p <- seq(lb,ub,by=0.0001)
QD <- quasiDeviance(X,qsd,value.only=TRUE)
qd <- quasiDeviance(as.matrix(p),qsd)
y <- sapply(qd,"[[","value")
score <- sapply(qd,"[[","score")
## plot quasi-deviance and quasi-score function
plot(NULL, type="n", xlab=expression(rho),
ylab="",xlim=c(0,1), ylim=c(-10,50))
abline(h=0,col="gray")
points(X,QD,pch=3,cex=1)
lines(p,score, type='l',col="blue",lwd=1.5)
lines(p,y,col="black",lwd=0.8)
legend("topright", c("quasi-deviance","quasi-score","sample points", "approximate root","additional samples"),
lty=c(1,1),lwd=c(1.5,1.5,NA,NA,NA),pch=c(NA,NA,3,5,8),
col=c("black","blue","black","magenta","green"),pt.cex=1,cex=1)
points(S0$par,S0$val,col="magenta",pch=5,cex=1)
nmax <- OPT$ctls["maxeval","val"]
X <- as.matrix(qsd$qldata[,1])
Xnew <- OPT$qsd$qldata[(nrow(X)+1):(nrow(X)+nmax),1]
points(cbind(Xnew,0),pch=8,cex=2,col="green")
par(op)
###################################################
### code chunk number 15: qle_with_R.Rnw:1305-1306
###################################################
OPT
###################################################
### code chunk number 16: qle_with_R.Rnw:1311-1330
###################################################
op <-par(xaxs='i', yaxs='i')
qd <- quasiDeviance(as.matrix(p),OPT$qsd)
y <- sapply(qd,"[[","value")
score <- sapply(qd,"[[","score")
## plot quasi-deviance and quasi-score function
plot(NULL, type="n", xlab=expression(rho),
ylab="",xlim=c(0,1), ylim=c(-10,50))
abline(h=0,col="gray")
lines(p,score, type='l',col="blue",lwd=1.5)
lines(p,y,col="black",lwd=0.8)
legend("topright", c("quasi-deviance","quasi-score","sample points", "QL estimate"),
lty=c(1,1),lwd=c(1,1,NA,NA,NA),pch=c(NA,NA,3,5,8),
col=c("black","blue","black","magenta","green"),pt.cex=1,cex=1)
X <- as.matrix(OPT$qsd$qldata[,1])
QD <- quasiDeviance(X,OPT$qsd,value.only=TRUE)
points(X,QD,pch=3,cex=1)
points(OPT$par,OPT$val,col="magenta",pch=5)
par(op)
###################################################
### code chunk number 17: qle_with_R.Rnw:1340-1341
###################################################
checkMultRoot(OPT,verbose = TRUE)
###################################################
### code chunk number 18: qle_with_R.Rnw:1351-1354
###################################################
X <- as.matrix(OPT$qsd$qldata[,1])
Tstat <- OPT$qsd$qldata[grep("mean.",names(qsd$qldata))]
predictKM(OPT$qsd$covT,c("rho"=0.5),X,Tstat)
###################################################
### code chunk number 19: qle_with_R.Rnw:1393-1395 (eval = FALSE)
###################################################
## tet0 <- c("rho"=0.5)
## obs0 <- simQLdata(sim=simfn,cond=cond,nsim=100,X=tet0)
###################################################
### code chunk number 20: qle_with_R.Rnw:1397-1399
###################################################
tet0 <- mm1q$tet0
obs0 <- mm1q$obs0
###################################################
### code chunk number 21: qle_with_R.Rnw:1404-1411
###################################################
mle <- do.call(rbind,
lapply(obs0[[1]],function(y,n){
tet <- 1-1/(1+y[[1]])
c("mle.rho"=tet,"mle.var"=(tet*(1-tet)^2)/n)
}, n=cond$n))
x <- mle[,1]-tet0
mle.var <- c(sum(x^2)/length(x),mean(mle[,2]))
###################################################
### code chunk number 22: qle_with_R.Rnw:1415-1427 (eval = FALSE)
###################################################
## OPTS <- parLapply(cl,obs0[[1]],
## function(obs,...) {
## qle(...,obs=obs)
## },
## qsd=qsd,
## sim=simfn,
## cond=cond,
## global.opts=list("maxeval"=10,"NmaxLam"=10),
## local.opts=list("nextSample"="score","weights"=0.5,
## "ftol_abs"=1e-4,"lam_max"=1e-5,
## "useWeights"=TRUE),
## method=c("qscoring","bobyqa","direct"))
###################################################
### code chunk number 23: qle_with_R.Rnw:1429-1430
###################################################
OPTS <- mm1q$OPTS
###################################################
### code chunk number 24: qle_with_R.Rnw:1432-1441
###################################################
# get results
QLE <- do.call(rbind,
lapply(OPTS,
function(x) {
c("qle"=x$par,"qle.var"=1/as.numeric(x$final$I))
}))
y <- QLE[,1]-tet0
# MSE and average estimated variance of the parameters
qle.var <- c(sum(y^2)/length(y),mean(QLE[,2]))
###################################################
### code chunk number 25: qle_with_R.Rnw:1443-1444
###################################################
Stest0 <- mm1q$Stest0
###################################################
### code chunk number 26: qle_with_R.Rnw:1473-1474 (eval = FALSE)
###################################################
## Stest0 <- qleTest(OPT,sim=simfn,cond=cond,obs=obs0,cl=cl)
###################################################
### code chunk number 27: qle_with_R.Rnw:1476-1477
###################################################
print(Stest0)
###################################################
### code chunk number 28: qle_with_R.Rnw:1506-1510
###################################################
data(normal)
OPT <- qsd$OPT
QS <- qsd$QS
simfunc <- qsd$simfn
###################################################
### code chunk number 29: qle_with_R.Rnw:1512-1520 (eval = FALSE)
###################################################
## # use a local cluster
## cl <- makeCluster(8L)
## clusterSetRNGStream(cl,1234)
## # simulation function
## simfunc <- function(pars) {
## x <- rnorm(10,mean=pars["mu"],sd=pars["sigma"])
## c("T1"=median(x),"T2"=mad(x))
## }
###################################################
### code chunk number 30: qle_with_R.Rnw:1525-1527 (eval = FALSE)
###################################################
## lb <- c("mu"=0.5,"sigma"=0.1)
## ub <- c("mu"=8.0,"sigma"=5.0)
###################################################
### code chunk number 31: qle_with_R.Rnw:1533-1537 (eval = FALSE)
###################################################
## sim <- simQLdata(sim=simfunc,
## nsim=10,N=8,lb=lb,ub=ub,method="maximinLHS")
## # reset number of simulations (10 x 10)
## attr(sim,"nsim") <- 100
###################################################
### code chunk number 32: qle_with_R.Rnw:1543-1544 (eval = FALSE)
###################################################
## obs <- structure(c("T1"=2,"T2"=1),class="simQL")
###################################################
### code chunk number 33: qle_with_R.Rnw:1547-1548 (eval = FALSE)
###################################################
## qsd <- getQLmodel(sim,lb,ub,obs,var.type="wcholMean")
###################################################
### code chunk number 34: qle_with_R.Rnw:1552-1553 (eval = FALSE)
###################################################
## QS <- qscoring(qsd, x0=c("mu"=5,"sigma"=3.0))
###################################################
### code chunk number 35: qle_with_R.Rnw:1555-1556
###################################################
print(QS)
###################################################
### code chunk number 36: qle_with_R.Rnw:1565-1571 (eval = FALSE)
###################################################
## OPT <- qle(qsd,
## simfunc,
## nsim=20,
## global.opts=list("maxeval"=50),
## local.opts=list("lam_max"=1e-3,"weights"=0.5,
## "useWeights"=FALSE,"test"=TRUE),cl=cl)
###################################################
### code chunk number 37: qle_with_R.Rnw:1573-1574
###################################################
OPT
###################################################
### code chunk number 38: qle_with_R.Rnw:1578-1620
###################################################
op <- par(mfrow=c(1, 2), mar=c(5.1, 4.1, 1.1, 1.1),
oma=c(5,4,1,1),xaxs='i', yaxs='i',
cex=2.2, cex.axis=2.2, cex.lab=2.2)
# get points for plotting
theta0 <- c("T1"=2,"T2"=1)
x <- seq(qsd$lower[1],qsd$upper[1],by=0.05)
y <- seq(qsd$lower[2],qsd$upper[2],by=0.05)
p <- as.matrix(expand.grid(x,y))
X <- as.matrix(qsd$qldata[,1:2])
Tstat <- qsd$qldata[grep("mean.",names(qsd$qldata))]
Xp <- quasiDeviance(X,qsd,value.only=TRUE)
D <- quasiDeviance(p,qsd,value.only=TRUE)
z <- matrix(D,ncol=length(y))
Xnext <- as.matrix(OPT$qsd$qldata[,1:2])
Dnext <- quasiDeviance(p,OPT$qsd,value.only=TRUE)
znext <- matrix(Dnext,ncol=length(y))
nmax <- OPT$ctls["maxeval","val"]
Xnew <- OPT$qsd$qldata[(nrow(X)+1):(nrow(X)+nmax),c(1,2)]
# left
plot(x=0,y=0,type="n", xlim=range(x),ylim=range(y),
xlab=expression(mu),ylab=expression(sigma))
contour(x,y,z,col="black",lty="solid",nlevels=50,add=TRUE)
#
points(X,pch=23,cex=2,bg="black")
points(Xnew,pch=8,cex=2,col="green")
# right
plot(x=0,y=0,type="n", xlim=range(x),ylim=range(y),
xlab=expression(mu),ylab=expression(sigma))
contour(x,y,znext,col="black",lty="solid",nlevels=50,add=TRUE)
points(Xnext,pch=23,cex=2,bg="black")
points(rbind(OPT$par),pch=18,cex=2.5,col="magenta")
points(rbind(unlist(theta0)),pch=17,cex=2.5,col="red")
# legend
par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE)
plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n")
cols <- c("black","green","magenta","red")
legend("bottomleft",text.width=c(0.45,0.45,0.45,0.45),
legend=c("initial design points", "new sample points",
"estimated parameter","true parameter"),
pch=c(23,8,18,17),col=cols,pt.bg=cols,bty='n',
horiz=TRUE,xpd=TRUE,pt.cex=2.5,cex=2.5)
par(op)
###################################################
### code chunk number 39: qle_with_R.Rnw:1629-1630
###################################################
checkMultRoot(OPT,verbose=TRUE)
###################################################
### code chunk number 40: qle_with_R.Rnw:1638-1641
###################################################
obs0 <- simQLdata(simfunc,X=OPT$par,nsim=1000,mode="matrix")[[1]]
var(obs0)
attr(OPT$final,"Sigma")
###################################################
### code chunk number 41: qle_with_R.Rnw:1643-1644 (eval = FALSE)
###################################################
## stopCluster(cl)
###################################################
### code chunk number 42: qle_with_R.Rnw:1674-1680
###################################################
data(matclust)
OPT <- matclust$OPT
qsd <- matclust$qsd
cvm <- matclust$cvm
Stest <- matclust$Stest
library(spatstat)
###################################################
### code chunk number 43: qle_with_R.Rnw:1684-1686
###################################################
data(redwood)
fitMat <- kppm(redwood, ~1, "MatClust")
###################################################
### code chunk number 44: qle_with_R.Rnw:1689-1690
###################################################
fitMat$modelpar
###################################################
### code chunk number 45: qle_with_R.Rnw:1692-1694 (eval = FALSE)
###################################################
## RNGkind("L'Ecuyer-CMRG")
## set.seed(297)
###################################################
### code chunk number 46: qle_with_R.Rnw:1698-1707
###################################################
simStat <- function(X,cond){
x <- Kest(X,r=cond$rr,correction="best")
x <- x[[attr(x,"valu")]]
x <- x[x>0]
if(anyNA(x) || any(!is.finite(x))) {
warning(.makeMessage("`NA`, `NaN` or `Inf` detected.","\n"))
x <- x[!is.nan(x) & is.finite(x)]}
return(c(intensity(X),x))
}
###################################################
### code chunk number 47: qle_with_R.Rnw:1711-1715
###################################################
simClust <- function(theta,cond){
X <- rMatClust(theta["kappa"],theta["R"],theta["mu"],win=cond$win)
simStat(X,cond)
}
###################################################
### code chunk number 48: qle_with_R.Rnw:1720-1724
###################################################
nsim <- 50
Nsample <- 12
cond <- list(win=owin(c(0, 2),c(0, 2)),
rr=seq(0,0.3,by=0.05))
###################################################
### code chunk number 49: qle_with_R.Rnw:1727-1729
###################################################
lb <- c("kappa"=20,"R"=0.01,"mu"=1)
ub <- c("kappa"=30,"R"=0.25,"mu"=5)
###################################################
### code chunk number 50: qle_with_R.Rnw:1733-1737 (eval = FALSE)
###################################################
## cl <- makeCluster(8L)
## clusterSetRNGStream(cl)
## clusterCall(cl,fun=function(x) library("spatstat", character.only=TRUE))
## clusterExport(cl=cl,varlist=c("simStat"), envir=environment())
###################################################
### code chunk number 51: qle_with_R.Rnw:1741-1742 (eval = FALSE)
###################################################
## obs0 <- simStat(redwood,cond)
###################################################
### code chunk number 52: qle_with_R.Rnw:1745-1747 (eval = FALSE)
###################################################
## sim <- simQLdata(sim=simClust,cond=cond,nsim=nsim,
## method="randomLHS",lb=lb,ub=ub,N=Nsample,cl=cl)
###################################################
### code chunk number 53: qle_with_R.Rnw:1750-1752 (eval = FALSE)
###################################################
## qsd <- getQLmodel(sim,lb,ub,obs0,criterion="qle",
## var.type="kriging",verbose=TRUE)
###################################################
### code chunk number 54: qle_with_R.Rnw:1762-1763 (eval = FALSE)
###################################################
## cvm <- prefitCV(qsd, reduce=FALSE, verbose=TRUE)
###################################################
### code chunk number 55: qle_with_R.Rnw:1769-1770 (eval = FALSE)
###################################################
## crossValTx(qsd, cvm, type = "acve")
###################################################
### code chunk number 56: qle_with_R.Rnw:1772-1773
###################################################
matclust$ACVE
###################################################
### code chunk number 57: qle_with_R.Rnw:1778-1779 (eval = FALSE)
###################################################
## crossValTx(qsd, cvm, type = "mse")
###################################################
### code chunk number 58: qle_with_R.Rnw:1781-1782
###################################################
matclust$MSE
###################################################
### code chunk number 59: qle_with_R.Rnw:1793-1794 (eval = FALSE)
###################################################
## crossValTx(qsd, cvm, type = "ascve")
###################################################
### code chunk number 60: qle_with_R.Rnw:1796-1797
###################################################
matclust$ASCVE
###################################################
### code chunk number 61: qle_with_R.Rnw:1807-1808
###################################################
attr(cvm,"type") <- "max"
###################################################
### code chunk number 62: qle_with_R.Rnw:1812-1815
###################################################
x0 <- c("kappa"=24,"R"=0.08,"mu"=2.5)
searchMinimizer(x0,qsd,info=TRUE,
method="direct",cvm=cvm,verbose=TRUE)
###################################################
### code chunk number 63: qle_with_R.Rnw:1818-1821
###################################################
qscoring(qsd,x0,
opts=list("ftol_rel"=1e-6,"slope_tol"=1e-4),
cvm=cvm)
###################################################
### code chunk number 64: qle_with_R.Rnw:1837-1844 (eval = FALSE)
###################################################
## qs.opts <-
## list("xscale"=c(10,0.1,1),
## "xtol_rel"=1e-10,
## "ftol_stop"=1e-8,
## "ftol_rel"=1e-6,
## "ftol_abs"=1e-4,
## "score_tol"=1e-4)
###################################################
### code chunk number 65: qle_with_R.Rnw:1847-1863 (eval = FALSE)
###################################################
## OPT <- qle(qsd, simClust, cond=cond,
## qscore.opts = qs.opts,
## global.opts = list("maxiter"=10,"maxeval" = 20,
## "weights"=c(50,10,5,1,0.1),
## "NmaxQI"=5,"nstart"=100,
## "xscale"=c(10,0.1,1)),
## local.opts = list("lam_max"=1e-2,
## "nobs"=200, # number of (bootstrap) observations for testing
## "nextSample"="score", # sampling criterion
## "ftol_abs"=1e-2, # lower bound on criterion value, triggers testing
## "weights"=c(0.55), # constant weight factor
## "eta"=c(0.025,0.075), # ignored, automatic adjustment of weights
## "test"=TRUE), # testing approximate root is enabled
## method = c("qscoring","bobyqa","direct"), # restart methods
## errType="max", # use max of kriging and CV error
## iseed=297, cl=cl) # store seed and use given cluster object
###################################################
### code chunk number 66: qle_with_R.Rnw:1866-1867
###################################################
print(OPT)
###################################################
### code chunk number 67: qle_with_R.Rnw:1870-1871
###################################################
attr(OPT,"optInfo")
###################################################
### code chunk number 68: qle_with_R.Rnw:1876-1877
###################################################
OPT$final
###################################################
### code chunk number 69: qle_with_R.Rnw:1881-1883
###################################################
S0 <- searchMinimizer(OPT$par,OPT$qsd,
method="bobyqa",cvm=OPT$cvm,verbose=TRUE)
###################################################
### code chunk number 70: qle_with_R.Rnw:1888-1891
###################################################
QS <- qscoring(OPT$qsd,OPT$par,
opts=list("slope_tol"=1e-4,"score_tol"=1e-3),
cvm=OPT$cvm)
###################################################
### code chunk number 71: qle_with_R.Rnw:1895-1897
###################################################
par <- rbind("QS"=QS$par,"S0"=S0$par)
checkMultRoot(OPT,par=par)
###################################################
### code chunk number 72: qle_with_R.Rnw:1900-1901
###################################################
QS$par
###################################################
### code chunk number 73: qle_with_R.Rnw:1906-1917 (eval = FALSE)
###################################################
## par0 <- OPT$par
## obs0 <- OPT$qsd$obs
## # testing `par0` with observed statistics `obs0`
## # which can be replaced by the user and are obsolete below
## Stest <- qleTest(OPT, # estimation results
## par0=par0, # parameter to test
## obs0=obs0, # alternative observed statistics
## sim=simClust,cond=cond,nsim=100,
## method=c("qscoring","bobyqa","direct"), # restart methods
## opts=qs.opts,control=list("ftol_abs"=1e-8), # minimization options
## multi.start=1L,cl=cl,verbose=TRUE) # multi-start and parallel options
###################################################
### code chunk number 74: qle_with_R.Rnw:1919-1920
###################################################
print(Stest)
###################################################
### code chunk number 75: qle_with_R.Rnw:1923-1924 (eval = FALSE)
###################################################
## stopCluster(cl)
###################################################
### code chunk number 76: qle_with_R.Rnw:1933-1934
###################################################
diag(attr(Stest,"qi"))^0.5
###################################################
### code chunk number 77: qle_with_R.Rnw:1937-1938
###################################################
sqrt(diag(attr(Stest,"msem")))
###################################################
### code chunk number 78: qle_with_R.Rnw:1942-1943
###################################################
attr(Stest,"msem")
|
25d32961e0235ee365e936d70142d4bf007c4798
|
b7b87a80d60ae83d4789e9e2df1be3cb87676ba5
|
/other/crosstalk.R
|
8e4ea1518c0572e455abbc714be448c2145ba42f
|
[] |
no_license
|
malvikarajeev/misc_code
|
ff169c87586ba3583c38893cb7c5eccce03318eb
|
6e603a015cc5cdb0343f7a0518912bd7e8fe0239
|
refs/heads/master
| 2023-04-21T13:58:40.716294
| 2021-04-20T07:08:49
| 2021-04-20T07:08:49
| 270,853,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 994
|
r
|
crosstalk.R
|
#####experimenting with crosstalk
#devtools::install_github("rstudio/crosstalk")
#devtools::install_github("jcheng5/d3scatter")
#devtools::install_github("rstudio/leaflet")
library(d3scatter)
library(crosstalk)
library(leaflet)
library(sf)
library(sp)
library(rgdal)
library(rgeos)
library(ggplot2)
library(ggthemes)
library(plotly)
shared_dat <- merge(q85, q65, by = c('city', 'state', 'lat','long','state','zip'))
shared_dat <- SharedData$new(shared_dat)
#####linked brushing using bootstrap
shared_quakes <- SharedData$new(sample_n(q85_long, 100))
bscols(
leaflet(shared_quakes, width = "100%", height = 300) %>%
addTiles() %>%
addMarkers(),
d3scatter(shared_quakes, ~option, ~no_of_ppl, ~state, width = "100%", height = 300)
)
##ADDING FILTERS: what to filter by?
library(dplyr)
#########
library(d3scatter)
library(crosstalk)
library(leaflet)
library(sf)
library(sp)
library(rgdal)
library(rgeos)
library(ggplot2)
library(ggthemes)
library(plotly)
|
62c3e4f7047553806eb015ad18d81890fe8c64fd
|
6b987d7b5663bd70e2cd902a99deb3feb5741c5d
|
/R/param.R
|
6dd84c4619b1a5d289938d707d01778f8cd99dde
|
[
"Apache-2.0"
] |
permissive
|
NicolasDurrande/gpflowr
|
081e67d52f5c863c04340ef4330177a2f428a6d0
|
c82aead1a82883af49c619962147a8e83c809a39
|
refs/heads/master
| 2021-01-12T18:16:27.999422
| 2016-11-22T16:10:57
| 2016-11-22T16:10:57
| 71,359,140
| 1
| 0
| null | 2016-10-19T13:17:46
| 2016-10-19T13:17:46
| null |
UTF-8
|
R
| false
| false
| 25,295
|
r
|
param.R
|
# define R6 classes: Parentable, Param, DataHolder, Parameterized and ParamList
# port of GPflow/GPflow/param.py
# ' @title Parentable class
# '
# ' @description A very simple class for objects in a tree, where each node contains a
# ' reference to '_parent'.
# '
# ' @details This class can figure out its own name (by seeing what it's called by the
# ' _parent's __dict__) and also recurse up to the highest_parent.
# '
Parentable <- R6Class('Parentable',
public = list(
# enclosing env of parent
.parent_env = NULL,
# own hex in parent's .hex_list
.hex = NULL,
# named list of shas of children
.hex_list = list(),
# set the environment of the parent
.set_parent_env = function (parent)
self$.parent_env <- parent$.__enclos_env__,
# bespoke assignment, to build tree structure
`$<-` = function (x, i, value) {
# when setting values, if the new value is a parentable,
# link the child (value) and parent (self)
if (inherits(value, 'Parentable')){
# generate unique hex to index the child in the parent
hex <- get_hex()
value[['.hex']] <- hex
self[['.hex_list']][[i]] <- hex
# and give the child the environment of the parent
value[['.set_parent_env']](self)
}
# either way, assign the value
self[[i]] <- value
self
},
# get the index to a child
which_child = function (child) {
if (!inherits(child, 'Parentable'))
stop ('Parentables can only have children that are also Parentables')
# return the index, retaining its name in the hex list
idx <- match(child$.hex, self$.hex_list)
names(idx) <- names(self$.hex_list)[idx]
idx
},
print = function (...) {
# find the classes to which this object belongs and print them
classes <- class(self$clone())
main_class <- classes[1]
other_classes <- classes[-1]
other_classes <- other_classes[other_classes != 'R6']
if (length(other_classes) > 0) {
inheritance_msg <- sprintf('(inheriting from %s)\n',
paste(other_classes,
collapse = ' < '))
} else {
inheritance_msg <- ''
}
msg <- sprintf('%s object\n%s',
main_class,
inheritance_msg)
cat (msg)
}
# used for pickling, so ignore for now
# would need to write a dict class
# .getstate <- function () {
# # get list of elements, remove parent, return list?
# d <- self$.dict
# d$pop('_parent')
# return (d)
# },
# .setstate = function (d) {
# # replace list of elements with new list of elements, then remove parent?
# self$.dict$update(d)
# self$parent <- NULL
# }
),
active = list(
# make name and long_name properties
name = function (value) {
# let the user know they can't assign names in this way
if (!missing(value))
warning ('name assignment ignored')
# An automatically generated name, given by the
# reference of the _parent to this instance
if (is.null(self$parent))
return ('unnamed')
# get the index
idx <- self$parent$which_child(self)
if (inherits(self$parent, 'ParamList'))
return (sprintf('item%i', self$parent$.list$index(self)))
if (length(idx) == 0)
stop("mis-specified parent. This Param's .parent does not contain a reference to it.")
if (length(idx) > 1)
stop("This Param appears to be doubly referenced by a parent")
names(idx)
},
long_name = function (value) {
# This is a unique identifier for a param object
# within a structure, made by concatenating the names
# through the tree.
# let the user know they can't assign names in this way
if (!missing(value))
warning ('name assignment ignored')
if (is.null(self$parent))
return (self$name)
paste(self$parent$long_name,
self$name,
sep = '$')
},
parent = function (value) {
# get the parent object from its environment
if (missing(value))
self$.parent_env$self
else
self$.parent_env$self <- value
},
highest_parent = function (value) {
# A reference to the top of the tree, usually a Model
# instance
if (missing(value)) {
if (is.null(self$parent))
self
else
self$parent$highest_parent
} else {
if (is.null(self$parent))
self <- value
else
self$parent$highest_parent <- value
}
}
))
Param <- R6Class('Param',
inherit = Parentable,
public = list(
.array = NULL,
.tf_array = NULL,
.log_jacobian = NULL,
prior = NULL,
transform = NULL,
fixed = FALSE,
initialize = function (array, transform = transforms$Identity) {
self$value <- as.array(array)
self$transform <- transform
},
# get_parameter_dict = function (d)
# d[[self$long_name]] <- self$value,
#
# set_parameter_dict = function (d)
# self$value <- d[[self$long_name]],
# get_samples_df = function (samples) {
# # Given a numpy array where each row is a valid free-state
# # vector, return a pandas.DataFrame which contains the
# # parameter name and associated samples in the correct form
# # (e.g. with positive constraints applied).
# # if (self$fixed)
# # return (pd.Series([self.value for _ in range(samples.shape[0])], name=self.long_name))
# start <- self$highest_parent()$get_param_index(self)[1]
# end <- start + self$size - 1
# samples <- samples[, start:end]
# # samples <- samples.reshape((samples.shape[0],) + self.shape)
# samples <- self$transform$forward(samples)
# # return (pd.Series([v for v in samples], name=self.long_name))
# },
make_tf_array = function (free_array) {
# free_array is a tensorflow vector which will be the optimisation
# target, i.e. it will be free to take any value.
# Here we take that array, and transform and reshape it so that it can be
# used to represent this parameter
# Then we return the number of elements that we've used to construct the
# array, so that it can be sliced for the next Param.
# fixed parameters are treated by tf.placeholder
if (self$fixed)
return (0)
free_size <- self$size
x_free <- free_array[1:free_size]
mapped_array <- self$transform$tf_forward(x_free)
self$.tf_array <- tf$reshape(mapped_array, self$shape)
self$.log_jacobian <- self$transform$tf_log_jacobian(x_free)
return (free_size)
},
get_free_state = function () {
# Take the current state of this variable, as stored in
# self.value, and transform it to the 'free' state. This is
# a numpy method.
if (self$fixed)
return (0)
return (self$transform$backward(self$value))
},
# get_feed_dict = function() {
# # Return a dictionary matching up any fixed-placeholders to their values
# d <- list()
# if (self$fixed)
# d[[self$.tf_array]] <- self$value
# return (d)
# },
set_state = function (x) {
# Given a vector x representing the 'free' state of this Param, transform
# it 'forwards' and store the result in self._array. The values in
# self._array can be accessed using self.value
# This is a numpy method.
if (self$fixed)
return (0)
new_x <- self$transform$forward(x)
new_array <- array(new_x, dim = self$shape)
stopifnot(all(dim(new_array) == dim(self$.array)))
self$.array <- new_array
return (self$size)
},
build_prior = function () {
# Build a tensorflow representation of the prior density.
# The log Jacobian is included.
if (is.null(self$prior))
return (tf$constant(0.0, float_type))
else if (is.null(self$.tf_array)) # pragma: no cover
stop ("tensorflow array has not been initialized")
else
return (self$prior$logp(self$.tf_array) + self$.log_jacobian)
}#,
#
# `$<-` = function (x, i, value) {
# # When some attributes are set, we need to recompile the tf model before
# # evaluation.
# self[[i]] <- value
# if (i %in% recompile_keys)
# self$highest_parent$.needs_recompile <- TRUE
#
# # when setting the fixed attribute, make or remove a placeholder appropraitely
# if (i == 'fixed') {
# if (value)
# self$.tf_array <- tf$placeholder(dtype = float_type,
# shape = self$.array$shape,
# name = self$name)
# else
# self$.tf_array = NULL
# }
# },
# def __str__(self, prepend=''):
# return prepend + \
# '\033[1m' + self.name + '\033[0m' + \
# ' transform:' + str(self.transform) + \
# ' prior:' + str(self.prior) + \
# (' [FIXED]' if self.fixed else '') + \
# '\n' + str(self.value)
# getstate = function (self) {
# d <- super$getstate()
# d$pop('_tf_array')
# d$pop('_log_jacobian')
# return (d)
# },
#
# setstate = function (self) {
# super$setstate(d)
# self$.log_jacobian <- NULL
# self$fixed <- self$fixed
# }
),
# point 'value' at the array
active = list(
value = property('.array'),
shape = function (value) dim(self$.array),
size = function (value) prod(self$shape)
)
)
# DataHolder <- R6Class('DataHolder',
# inherit = Parentable,
# public = list(
#
# ))
Parameterized <- R6Class('Parameterized',
inherit = Parentable,
public = list(
x = NULL,
.tf_mode = FALSE,
.tf_mode_storage = list(),
initialize = function () {
self$.tf_mode <- FALSE
},
get_parameter_dict = function (d = NULL) {
if (is.null(d))
d <- list()
for (p in self$sorted_params)
p$get_parameter_dict(d)
d
},
set_parameter_dict = function (d) {
for (p in self$sorted_params)
p$set_parameter_dict(d)
},
`$` = function (x, i) {
# return a tensorflow array if `x` is in tf_mode,
# and the object containing that array otherwise
# equivalent to python __getattribute__ method
o <- x[[i]]
if (has(x, '.tf_mode') && x[['.tf_mode']] && has(o, '.tf_array'))
o <- o[['.tf_array']]
o
},
# `$<-` = function (x, i, value) {
#
# },
.kill_autoflow = function () {
# remove all AutoFlow storage dicts recursively
self$.tf_mode_storage <- list()
for (i in seq_len(self$sorted_params)) {
if (inherits(self$sorted_params[[i]]))
self$sorted_params[[i]]$.kill_autoflow()
}
},
make_tf_array = function (X) {
# X is a tf placeholder. It gets passed to all the
# children of this class (that are Parameterized or
# Param objects), which then construct their
# tf_array variables from consecutive sections.
nrow <- X$get_shape()$as_list()[1]
count <- 0
for (i in seq_along(self$sorted_params))
count <- count + self$sorted_params[[i]]$make_tf_array(X[count:nrow])
count
},
get_free_state = function () {
# recurse get_free_state on all child parameters, and hstack them.
free_states <- lapply(self$sorted_params,
function(x) x$get_free_state())
array(do.call(c, free_states))
},
get_feed_dict = function () {
# Recursively fetch a dictionary matching up fixed-placeholders to
# associated values
lapply(c(self$sorted_params, self$data_holders),
function(x) x$get_feed_dict())
},
set_state = function (x) {
# Set the values of all the parameters by recursion
nrow <- x$get_shape()$as_list()[1]
count <- 0
for (name in names(self$sorted_params))
count <- count + self$sorted_params[[name]]$set_state(x[count:nrow])
count
},
tf_mode = function () {
on.exit(self$.end_tf_mode())
self$.begin_tf_mode()
return (self$clone())
},
.begin_tf_mode = function () {
self$.tf_mode <- TRUE
},
.end_tf_mode = function () {
self$.tf_mode <- FALSE
},
build_prior = function () {
# Build a tf expression for the prior by summing all child-node priors.
nparam <- length(self$sorted_params)
pri <- self$sorted_params[[1]]$build_prior()
if (nparam > 0) {
for (i in 2:nparam)
pri <- pri + self$sorted_params[[i]]$build_prior()
}
}#,
#
# str = function (object, prepend = '') {
#
# },
#
# .html_table_rows = function (name_prefix = '') {
#
# },
#
# .repr_html_ = function () {
#
# },
#
# .__setstate__ = function (d) {
#
# }
),
active = list(
sorted_params = function (value) {
# Return a list of all the child parameters, sorted by id. This makes
# sure they're always in the same order.
if (!missing(value))
warning ('assignment ignored')
# find names of elements
names <- names(self)
names <- names[names != 'parent']
# pull out those that are Param-esque
params <- list()
for (name in names) {
if (inherits(self[[name]], c('Param', 'Parameterized')))
params[[name]] <- self[[name]]
}
# order them
params <- params[order(names(params))]
},
data_holders = function (value) {
# Return a list of all the child DataHolders
if (!missing(value))
warning ('assignment ignored')
params <- list()
for (name in names(self)) {
if (inherits(self[[name]], 'DataHolder'))
params[[name]] <- self[[name]]
}
params
},
fixed = function (value) {
if (!missing(value)) {
for (name in names(self))
self[[name]]$fixed <- value
} else {
ans <- vapply(self$sorted_params,
function(x) x$fixed,
FALSE)
return (ans)
}
}
))
ParamList <- R6Class('ParamList',
inherit = Parameterized,
public = list(
))
|
97f25911f28a71d375aae3a23b8ccbbf64d7d73c
|
a15bafa2b845fff694d5428c27796ae02fc04b17
|
/man/distanceheatmaps.Rd
|
22e7257351207978fa6ad68a265d8ddb32150f14
|
[] |
no_license
|
cran/IntClust
|
741db502112c3407aefa5eb5d08b92e2d2a75f7a
|
235fe4a67855c96b48247aa2f02895604caf507e
|
refs/heads/master
| 2018-10-30T23:12:34.173124
| 2018-07-30T11:10:15
| 2018-07-30T11:10:15
| 55,162,571
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 610
|
rd
|
distanceheatmaps.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{distanceheatmaps}
\alias{distanceheatmaps}
\title{Determine the distance in a heatmap}
\usage{
distanceheatmaps(Data1, Data2, names = NULL, nrclusters = 7)
}
\arguments{
\item{Data1}{The resulting clustering of method 1.}
\item{Data2}{The resulting clustering of method 2.}
\item{names}{The names of the objects in the data sets. Default is NULL.}
\item{nrclusters}{The number of clusters to cut the dendrogram in. Default is NULL.}
}
\description{
Internal function of \code{HeatmapPlot}
}
|
cb320cd5423587ea5c995ccc87df4664c417ef34
|
86061b2a5b230a1f30f8bf361b4a22b5e73623c4
|
/Green_Values.R
|
b6b9242a161a232717e0df0b15b865e9328f69f0
|
[] |
no_license
|
C-WWU/Seminar
|
f1e0f0ee10bdfbbe8f185bcc2239de63cd663c83
|
30d0f3f545a3b7117abf80f3e7b8dfd401d9f9a0
|
refs/heads/master
| 2023-03-06T15:42:34.040389
| 2021-02-17T15:00:42
| 2021-02-17T15:00:42
| 324,803,807
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,065
|
r
|
Green_Values.R
|
#----------------------FINAL CODE IN SOCIALLY IRRESPONSIBLE ALGORITHMS------------------------------
#install and load relevant packages
install.packages("cowplot")
install.packages("randomForest")
install.packages("pROC")
install.packages("readr")
install.packages("caret")
install.packages("e1071")
install.packages("stepPlr")
install.packages("mlbench")
install.packages("readxl")
install.packages("DMwR")
install.packages("ROSE")
install.packages("ranger")
install.packages("MASS")
install.packages("pdp")
install.packages("elasticnet")
install.packages("glmnet")
install.packages("Matrix")
install.packages("Hmisc")
library(ggplot2)
library(cowplot)
library(randomForest)
library(pROC)
library(readr)
library(caret)
library(e1071)
library(plyr)
library(dplyr)
library(stepPlr)
library(mlbench)
library(readxl)
library(DMwR)
library(ROSE)
library(ranger)
library(tidyverse)
library(MASS)
library(pdp)
library(elasticnet)
library(glmnet)
library(Matrix)
library(Hmisc)
options(max.print = 100000)
#######################
#Green Values: numeric
######################
#--------------------------------------DATA PRE-PROCESSING------------------------------------------
# load data
load("data_for_analysis.RData")
#data <- full #oder ändern zu data <- reduced_set
cols_names <- names(data)
cols_names
#define data for analysis
data_Green1 <- data[,c(305, 27:255)]
#Gibt es NAs in der DV?
sum(is.na(data_Green1$Green_Values)) #keine NAs
#ist die Variable unbalanced?
table(data_Green1$Green_Values) #Überhang zu höheren Werten, aber nicht zu stark (mean: 4,97)
max(table(data_Green1$Green_Values)/sum(table(data_Green1$Green_Values))) #no information rate 7,65%
#----------------------------------------DATA PARTITIONING------------------------------------
#Training und Test Dataset
set.seed(1997)
# Partitioning of the data: Create index matrix of selected values
index <- createDataPartition(data_Green1$Green_Values, p=.8, list= FALSE, times= 1)
# Create train_dfGeschlecht & test_dfGeschlecht
train_dfGreen1 <- data_Green1[index,]
test_dfGreen1 <- data_Green1[-index,]
#---------------------------------------------------RANDOM FOREST----------------------------------------------------
#--------------------------------------------BUILDING AND TRAINING THE MODEL---------------------------------------------
# Specify the type of training method used & number of folds --> bei uns 10-fold Cross-Validation
myControl = trainControl(
method = "cv",
number = 10,
verboseIter = TRUE,
allowParallel=TRUE,
search = "grid",
)
#set tuning grid
set.seed(1997)
myGrid = expand.grid(mtry = c(10:20),
splitrule = "extratrees",
min.node.size = c(5,10,15))
####-------tree 1: mtry, splitrule and min.node.size tunen --------------------------------------------------
# test of the ideal mtry, splitrule and min-node.size for 500 trees
set.seed(1997)
RFGreen1_1 <- train(Green_Values ~ .,
data=train_dfGreen1,
tuneGrid = myGrid,
method="ranger",
metric= "RMSE",
num.tree = 500,
trControl = myControl,
na.action = na.omit,
importance = 'impurity')
# Print model to console
RFGreen1_1
summary(RFGreen1_1)
plot(RFGreen1_1)
#best mtry:10
#splitrule: extratrees
#min.node.size used: 10
# predict outcome using model from train_df applied to the test_df
predictions <- predict(RFGreen1_1, newdata=test_dfGreen1)
MAE(predictions, test_dfGreen1$Green_Values)
RMSE(predictions, test_dfGreen1$Green_Values)
R2(predictions, test_dfGreen1$Green_Values)
#calculate Pearson coefficient for predictions and actual values
# Correlations with significance levels
pearsonGreen1_1 <- cor.test(predictions, test_dfGreen1$Green_Values, method = "pearson")
pearsonGreen1_1
spearmanGreen1_1 <- cor.test(predictions, test_dfGreen1$Green_Values, method = "spearman")
spearmanGreen1_1
####-------tree 2: num.tree prüfen --------------------------------------------------
#1000 num.tree ausprobieren --> ist mehr besser?
set.seed(1997)
RFGreen1_2 <- train(Green_Values ~ .,
data=train_dfGreen1,
tuneGrid = myGrid,
method="ranger",
metric= "RMSE",
num.tree = 1000,
trControl = myControl,
na.action = na.omit,
importance = 'impurity')
# Print model to console
RFGreen1_2
summary(RFGreen1_2)
plot(RFGreen1_2)
#mtry = 10, extratrees, min.node.size = 10
# predict outcome using model from train_df applied to the test_df
predictions <- predict(RFGreen1_2, newdata=test_dfGreen1)
MAE(predictions, test_dfGreen1$Green_Values)
RMSE(predictions, test_dfGreen1$Green_Values)
R2(predictions, test_dfGreen1$Green_Values)
#calculate Pearson coefficient for predictions and actual values
# Correlations with significance levels
pearsonGreen1_2 <- cor.test(predictions, test_dfGreen1$Green_Values, method = "pearson")
pearsonGreen1_2
spearmanGreen1_2 <- cor.test(predictions, test_dfGreen1$Green_Values, method = "spearman")
spearmanGreen1_2
#num.trees 1000 performs slightly better
####-------tree 3: Final --------------------------------------------------
#final getunte Werte einsetzen
set.seed(1997)
RFGreen1_fin <- RFGreen1_2
# Print model
RFGreen1_fin
summary(RFGreen1_fin)
#evaluate variable importance
# Mean Decrease Gini - Measure of variable importance based on the Gini impurity index used for the calculation of splits in trees.
varImp(RFGreen1_fin)
plot(varImp(RFGreen1_fin), 20, main = "Green_Values")
# predict outcome using model from train_df applied to the test_df
predictions <- predict(RFGreen1_fin, newdata=test_dfGreen1)
MAE(predictions, test_dfGreen1$Green_Values)
RMSE(predictions, test_dfGreen1$Green_Values)
R2(predictions, test_dfGreen1$Green_Values)
#calculate Pearson coefficient for predictions and actual values
# Correlations with significance levels
pearsonGreen1_fin <- cor.test(predictions, test_dfGreen1$Green_Values, method = "pearson")
pearsonGreen1_fin
spearmanGreen1_fin <- cor.test(predictions, test_dfGreen1$Green_Values, method = "spearman")
spearmanGreen1_fin
#--------------Variable Direction: Partial Plots-----------------------------------------
#checking direction of the 10 most important variables
imp <- importance(RFGreen1_fin$finalModel)
imp <- as.data.frame(imp)
impvar <- rownames(imp)[order(imp[1], decreasing=TRUE)]
impvar <- impvar[1:20]
PartialPlots <- RFGreen1_fin
PartialPlots %>% partial(pred.var = impvar[1]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[2]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[3]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[4]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[5]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[6]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[7]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[8]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[9]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[10]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[11]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[12]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[13]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[14]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[15]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[16]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[17]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[18]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[19]) %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[20]) %>%plotPartial
#------------------------------------------------WHEN BEST MODEL IS FOUND-----------------------------------------------------
#save model to disk
besttree_Green1 <- RFGreen1_fin
saveRDS(besttree_Green1, "./tree_Green1.rds")
#load the model
super_model <- readRDS("./tree_Green1.rds")
print(super_model)
#######################
#Green Values 2: binary
#######################
#--------------------------------------DATA PRE-PROCESSING------------------------------------------
# load data
load("data_for_analysis.RData")
cols_names <- names(data)
cols_names
#define data for analysis
data_Green2 <- data[,c(306, 27:255)]
#Gibt es NAs in der DV?
sum(is.na(data_Green2$Green2)) #keine NAs
#ist die Variable unbalanced?
table(data_Green2$Green2) #Verteilung ca 1:6 --> in Tests mit beachten!
max(table(data_Green2$Green2)/sum(table(data_Green2$Green2))) #no information rate 84%
#----------------------------------------DATA PARTITIONING------------------------------------
#Training und Test Dataset
set.seed(1997)
# Partitioning of the data: Create index matrix of selected values
index <- createDataPartition(data_Green2$Green2, p=.8, list= FALSE, times= 1)
# Create train_dfGeschlecht & test_dfGeschlecht
train_dfGreen2 <- data_Green2[index,]
test_dfGreen2 <- data_Green2[-index,]
#---------------------------------------------------RANDOM FOREST----------------------------------------------------
#--------------------------------------------BUILDING AND TRAINING THE MODEL---------------------------------------------
# Specify the type of training method used & number of folds --> bei uns 10-fold Cross-Validation
set.seed(1997)
myControl1 = trainControl(
method = "cv",
number = 10,
verboseIter = TRUE,
summaryFunction = twoClassSummary,
classProbs = TRUE,
allowParallel=TRUE,
sampling = "smote",
search = "grid"
)
#set tuning grid
set.seed(1997)
myGrid = expand.grid(mtry = c(10:20),
splitrule = "extratrees",
min.node.size = c(5,10,15))
####-------tree 1: mtry, splitrule and min.node.size tunen --------------------------------------------------
# test of the ideal mtry, splitrule and min-node.size for 500 trees
set.seed(1997)
RFGreen2_1 <- train(Green2 ~ .,
data=train_dfGreen2,
tuneGrid = myGrid,
method="ranger",
metric= "ROC",
num.tree = 500,
na.action = na.omit,
trControl = myControl1,
importance = 'impurity')
# Print models to console
RFGreen2_1
summary(RFGreen2_1)
plot(RFGreen2_1)
#mtry = 13, extratrees, min.node.size = 10
# predict outcome using model from train_df applied to the test_df
predictions1 <- predict(RFGreen2_1, newdata=test_dfGreen2)
# Create confusion matrix
confusionMatrix(data=as.factor(predictions1), as.factor(test_dfGreen2$Green2))
#check for auc
test_roc <- function(model, data) {
roc(test_dfGreen2$Green2,
predict(model, data, type = "prob")[, "Ja"])
}
#model auc: 0,6346
RFGreen2_1 %>%
test_roc(data = test_dfGreen2) %>%
auc()
#check ROC plot
model_list <- list(Model1 = RFGreen2_1)
model_list_roc <- model_list %>%
map(test_roc, data = test_dfGreen2)
model_list_roc %>%
map(auc)
results_list_roc <- list(NA)
num_mod <- 1
for(the_roc in model_list_roc){
results_list_roc[[num_mod]] <-
tibble(tpr = the_roc$sensitivities,
fpr = 1 - the_roc$specificities,
model = names(model_list)[num_mod])
num_mod <- num_mod + 1
}
results_df_roc <- bind_rows(results_list_roc)
# Plot ROC curve
custom_col <- c("#000000", "#009E73", "#0072B2", "#D55E00", "#CC79A7")
ggplot(aes(x = fpr, y = tpr, group = model), data = results_df_roc) +
geom_line(aes(color = model), size = 1) +
scale_color_manual(values = custom_col) +
geom_abline(intercept = 0, slope = 1, color = "gray", size = 1) +
theme_bw(base_size = 18)
####-------tree 2: num.tree prüfen --------------------------------------------------
#1000 für num.tree ausprobieren --> ist mehr besser?
#set random seed again
set.seed(1997)
RFGreen2_2 <- train(Green2 ~ .,
data=train_dfGreen2,
tuneGrid = myGrid,
method="ranger",
metric= "ROC",
num.tree = 1000,
na.action = na.omit,
trControl = myControl1,
importance = 'impurity')
# Print models to console
RFGreen2_2
summary(RFGreen2_2)
plot(RFGreen2_2)
#mtry = 13, extratrees, min.node.size = 10
# predict outcome using model from train_df applied to the test_df
predictions2 <- predict(RFGreen2_2, newdata=test_dfGreen2)
# Create confusion matrix
confusionMatrix(data=as.factor(predictions2), as.factor(test_dfGreen2$Green2))
#check for auc
test_roc <- function(model, data) {
roc(test_dfGreen2$Green2,
predict(model, data, type = "prob")[, "Ja"])
}
#model auc: 0,6341
RFGreen2_2 %>%
test_roc(data = test_dfGreen2) %>%
auc()
#ROC plot
model_list <- list(Model1 = RFGreen2_2)
model_list_roc <- model_list %>%
map(test_roc, data = test_dfGreen2)
model_list_roc %>%
map(auc)
results_list_roc <- list(NA)
num_mod <- 1
for(the_roc in model_list_roc){
results_list_roc[[num_mod]] <-
tibble(tpr = the_roc$sensitivities,
fpr = 1 - the_roc$specificities,
model = names(model_list)[num_mod])
num_mod <- num_mod + 1
}
results_df_roc <- bind_rows(results_list_roc)
# Plot ROC curve
custom_col <- c("#000000", "#009E73", "#0072B2", "#D55E00", "#CC79A7")
ggplot(aes(x = fpr, y = tpr, group = model), data = results_df_roc) +
geom_line(aes(color = model), size = 1) +
scale_color_manual(values = custom_col) +
geom_abline(intercept = 0, slope = 1, color = "gray", size = 1) +
theme_bw(base_size = 18)
#better num.trees: 500 trees sorts 1 person more correctly
####-------tree 3: Final --------------------------------------------------
#final getunte Werte einsetzen
set.seed(1997)
RFGreen2_fin <- RFGreen2_1
# Print models
RFGreen2_fin
summary(RFGreen2_fin)
#evaluate variable importance
# Mean Decrease Gini - Measure of variable importance based on the Gini impurity index used for the calculation of splits in trees.
varImp(RFGreen2_fin)
plot(varImp(RFGreen2_fin), 20, main = "Green_Values")
# predict outcome using model from train_df applied to the test_df
predictions3 <- predict(RFGreen2_fin, newdata=test_dfGreen2)
# Create confusion matrix
confusionMatrix(data=as.factor(predictions3), as.factor(test_dfGreen2$Green2))
#check for auc
test_roc <- function(model, data) {
roc(test_dfGreen2$Green2,
predict(model, data, type = "prob")[, "Ja"])
}
#model auc: 0,6346
RFGreen2_fin %>%
test_roc(data = test_dfGreen2) %>%
auc()
#compare different ROC plots
model_list <- list(Model1 = RFGreen2_1,
Model2 = RFGreen2_2)
model_list_roc <- model_list %>%
map(test_roc, data = test_dfGreen2)
model_list_roc %>%
map(auc)
results_list_roc <- list(NA)
num_mod <- 1
for(the_roc in model_list_roc){
results_list_roc[[num_mod]] <-
tibble(tpr = the_roc$sensitivities,
fpr = 1 - the_roc$specificities,
model = names(model_list)[num_mod])
num_mod <- num_mod + 1
}
results_df_roc <- bind_rows(results_list_roc)
# Plot ROC curve
custom_col <- c("#000000", "#009E73", "#0072B2", "#D55E00", "#CC79A7")
ggplot(aes(x = fpr, y = tpr, group = model), data = results_df_roc) +
geom_line(aes(color = model), size = 1) +
scale_color_manual(values = custom_col) +
geom_abline(intercept = 0, slope = 1, color = "gray", size = 1) +
theme_bw(base_size = 18)
#--------------Variable Direction: Partial Plots-----------------------------------------
#checking direction of the 10 most important variables
imp <- importance(RFGreen2_fin$finalModel)
imp <- as.data.frame(imp)
impvar <- rownames(imp)[order(imp[1], decreasing=TRUE)]
impvar <- impvar[1:20]
PartialPlots <- RFGreen2_fin
PartialPlots %>% partial(pred.var = impvar[1], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[2], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[3], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[4], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[5], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[6], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[7], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[8], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[9], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[10], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[11], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[12], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[13], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[14], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[15], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[16], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[17], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[18], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[19], which.class = "Ja") %>%plotPartial
PartialPlots %>% partial(pred.var = impvar[20], which.class = "Ja") %>%plotPartial
#------------------------------------------------WHEN BEST MODEL IS FOUND-----------------------------------------------------
#save model to disk
besttree_Green2 <- RFGreen2_fin
saveRDS(besttree_Green2, "./tree_Green2.rds")
#load the model
besttree_Green2 <- readRDS("./tree_Green2.rds")
print(besttree_Green2)
|
f330389081bab83b63905ad911a2776e81e39112
|
92e597e4ffc9b52cfb6b512734fb10c255543d26
|
/man/firstElement.Rd
|
96af7264d5b2ce4ccdc15e8f00e52b3394770cb8
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.utils
|
3b978dba2a86a01d3c11fee1fbcb965dd15a710d
|
0930eaeb9303cd9359892c1403226a73060eed5b
|
refs/heads/master
| 2023-05-12T15:26:14.529039
| 2023-04-21T04:28:29
| 2023-04-21T04:28:29
| 60,531,844
| 9
| 1
|
MIT
| 2023-04-21T04:28:30
| 2016-06-06T13:52:43
|
R
|
UTF-8
|
R
| false
| true
| 303
|
rd
|
firstElement.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vector.R
\name{firstElement}
\alias{firstElement}
\title{First Element}
\usage{
firstElement(x)
}
\arguments{
\item{x}{object}
}
\value{
first element: x[1]
}
\description{
Returns the first element using the function head
}
|
3e2c04218020fefdf364e94a0105206f480c3dc9
|
c871ac402d8b98b7cfba55092fe7b0ae1480e74e
|
/Anc_char_state/ANS.R
|
8f24e84cdd0cc66f1950e5912fbc0a7022c04c1f
|
[] |
no_license
|
sergeitarasov/Course_Helsinki_March2019
|
6a7849119336ce0d371dd6f63a8b7541ddd26b9d
|
46b4b03e4841c0271c680b425ffda6d0ac8cf607
|
refs/heads/master
| 2023-01-12T01:30:45.583752
| 2023-01-03T17:42:54
| 2023-01-03T17:42:54
| 177,663,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,291
|
r
|
ANS.R
|
library("corHMM")
library("phytools")
library("plyr")
#########
# Simulating data
#########
# simulate tree
tree<-pbtree(n=200, scale=100)
plot(tree)
# simulate character history
# make Q matrix
Q <- matrix(
c(
-0.03, 0.01, 0.01, 0.01,
0.03, -0.09, 0.03, 0.03,
0.01, 0.01, -0.03, 0.01,
0.03, 0.03, 0.03, -0.09
), 4,4, byrow = T
)
# simulate character history on tree
hist <- sim.history(tree, Q, nsim=1)
plot(hist)
#########
# Inference using ML and corHMM package (Traditional model)
#########
# make 2-rate Q matrix for inference
Qinf <- matrix(
c(
0, 1, 1, 1,
2, 0, 2, 2,
1, 1, 0, 1,
2, 2, 2, 0
), 4,4, byrow = T
)
diag(Qinf) <- NA
# Inference using 2 rates
taxa <- cbind(hist$tip.label, hist$states)
rate2 <- rayDISC(hist, taxa, rate.mat=Qinf, node.states="marginal",
model="ARD", root.p="maddfitz")
# plot
plotRECON(tree, rate2$states,title="2-rate Matrix")
# make 1-rate Q matrix for inference
Qinf1 <- matrix(
c(
0, 1, 1, 1,
1, 0, 1, 1,
1, 1, 0, 1,
1, 1, 1, 0
), 4,4, byrow = T
)
diag(Qinf1) <- NA
# Inference using 1 rate
taxa <- cbind(hist$tip.label, hist$states)
rate1 <- rayDISC(hist, taxa, rate.mat=Qinf1, node.states="marginal",
model="ARD", root.p="maddfitz")
# plot
plotRECON(tree, rate1$states,title="1-rate Matrix")
# Compare AICs
rate1
rate2
# make ARD Q matrix for inference
QinfARD <- matrix(
c(
0, 1, 2, 3,
4, 0, 5, 6,
7, 8, 0, 9,
10, 11, 12, 0
), 4,4, byrow = T
)
diag(QinfARD) <- NA
taxa <- cbind(hist$tip.label, hist$states)
rate1 <- rayDISC(hist, taxa, rate.mat=QinfARD, node.states="marginal",
model="ARD", root.p="maddfitz")
#########
# Inference using ML and corHMM package (HMM)
#########
char.recode<-mapvalues(hist$states, from = c(1, 2, 3,4),
to = c('1&2', '1&2', '3&4','3&4') )
# Inference using 2 rate HMM
taxa.hmm <- cbind(hist$tip.label, char.recode)
rate2.hmm <- rayDISC(hist, taxa.hmm, rate.mat=Qinf, node.states="marginal",
model="ARD", root.p="maddfitz")
# Inference using 1 rate HMM
rate1.hmm <- rayDISC(hist, taxa.hmm, rate.mat=Qinf1, node.states="marginal",
model="ARD", root.p="maddfitz")
|
c0c3dec3eca78a641c79be5b7ab946cd7c4b2b2a
|
ccbdf07cc6a176b3337dd3e650cb8904f7ab80bc
|
/global.R
|
83011b3139efffd3ca1e381f417fb142803b29a0
|
[
"MIT"
] |
permissive
|
keegangeorge/shiny_app_dev
|
29f32a045d26376ea40e75c5318a4aa32eab9cb5
|
db7408070e15b66825d8ec32bd379753e3981038
|
refs/heads/main
| 2023-06-26T04:01:07.554448
| 2021-07-28T23:58:54
| 2021-07-28T23:58:54
| 390,544,542
| 0
| 0
|
MIT
| 2021-07-29T00:07:07
| 2021-07-29T00:07:07
| null |
UTF-8
|
R
| false
| false
| 703
|
r
|
global.R
|
# Load the libraries and import function files here.
# Global.R is run one time at app initiallization.
library(bs4Dash)
library(shiny)
library(fresh)
# other imports
packages1<-c("oligo","GEOquery","affy","limma","arrayQualityMetrics","sva","Biobase","affyPLM", "simpleaffy")#, "AffyBatch")
for(x in packages1){
library(x,character.only=TRUE)
}
packages2<-c("ggplot2", "pheatmap")
for(y in packages2){
library(y,character.only=TRUE)
}
packages12<-c("stringr","R.utils", "EnhancedVolcano", "shinyWidgets", "bs4Dash", "shiny", "fresh")
for(y in packages12){
library(y,character.only=TRUE)
}
#Import Functions from External R scripts.
source("functions/testFunction.R", local = TRUE)$value
|
233712715d5bad4b256911efa980f27e7795beab
|
c5cdb6ddfad911d21273feddd05b082c75c29f27
|
/R/sim.index.R
|
67e9e2ade3e3e2b34eda6864f415f4562150a1be
|
[] |
no_license
|
cran/ONETr
|
6d313681e1a124c521fdb737e469f2af8867f310
|
d5f427e9f697bab27e634b120fb140de14fef9f2
|
refs/heads/master
| 2020-05-18T06:26:09.646327
| 2015-08-23T00:00:00
| 2015-08-23T00:00:00
| 21,559,611
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,154
|
r
|
sim.index.R
|
sim.index <- function(list1, list2, FUN, index=c("sd","ji","all")){
job1 <- FUN(list1)
job2 <- FUN(list2)
if(".attrs.id" %in% names(job1) & ".attrs.id" %in% names(job2)){
if(index == "sd"){
sd <- 2*length(intersect(job1$.attrs.id,job2$.attrs.id))/(length(unique(job1$.attrs.id))+length(unique(job2$.attrs.id))) # Sorensen-Dice index
list("Sorensen-Dice index"=round(sd,2))
}
else if(index == "ji"){
ji <- length(intersect(job1$.attrs.id,job2$.attrs.id))/length(union(job1$.attrs.id,job2$.attrs.id)) # Jaccard index
list("Jaccard index"=round(ji,2))
}
else{
sd <- 2*length(intersect(job1$.attrs.id,job2$.attrs.id))/(length(unique(job1$.attrs.id))+length(unique(job2$.attrs.id))) # Sorensen-Dice index
ji <- length(intersect(job1$.attrs.id,job2$.attrs.id))/length(union(job1$.attrs.id,job2$.attrs.id)) # Jaccard index
print(list("Sorensen-Dice index"=round(sd,2),"Jaccard index"=round(ji,2))) # print both in a list
}
}
else{
message("This function cannot yet handle this job data type. Please try another.")
}
}
|
c44293d5ccef7774b28b4510afd5e12ecd16ac5c
|
63ef3937256d030123bcb2aac6c08d73c7a372bb
|
/tests/testthat/test-mcmc-nnls.R
|
e6d9dc7d8762e02e8b5e1633381e6143601e4f27
|
[] |
no_license
|
shackett/simmer
|
740c3c8caa2b6c0c6244ed9e073b6f5eab322183
|
7138a45fb9653ce50d8556676a9fd1cc00bf0395
|
refs/heads/master
| 2020-03-30T15:56:51.864765
| 2016-09-16T15:38:04
| 2016-09-16T15:38:04
| 58,556,604
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,443
|
r
|
test-mcmc-nnls.R
|
test_that("fit_reaction_equations applies the MCMC-NNLS algorithm to one reaction equation", {
options(stringsAsFactors = F)
rMech_summary_table <- suppressMessages(readr::read_tsv(system.file("extdata", "reactionEqn_fitting", "rMech_summary_table.tsv", package = "simmer")))
reactionForms <- suppressMessages(readr::read_rds(system.file("extdata", "reactionEqn_fitting", "reactionForms.Rds", package = "simmer")))
expect_equal(colnames(rMech_summary_table), c("reaction", "rMech", "modelType"))
expect_equal(rMech_summary_table$rMech, names(reactionForms))
markov_pars <- list()
markov_pars$sample_freq <- 20 #what fraction of markov samples are reported (this thinning of samples decreases sample autocorrelation)
markov_pars$n_samples <- 200 #how many total markov samples are desired
markov_pars$burn_in <- 0 #how many initial samples should be skipped
fitted_reaction_equations <- fit_reaction_equations(reactionForms[[1]])
expect_equal(names(fitted_reaction_equations), names(reactionForms[1]))
expect_equal(names(fitted_reaction_equations[[1]]), c("kineticPars", "all_species", "kineticParPrior", "markovChain",
"logLikelihood", "occupancyEq", "metabolites", "enzymes", "flux",
"specSD", "specCorr"))
expect_equal(nrow(fitted_reaction_equations[[1]]$markovChain), markov_pars$n_samples)
})
|
6a7e42b1f364febd39937c369f87b078cc440d30
|
dda9309eb6ab192ad4e955a46e5620791a9f9d05
|
/inst/doc/FishPhyloMaker_vignette.R
|
b8aa09c989405ffef3e813bfaaec577e8c9109aa
|
[
"MIT"
] |
permissive
|
cran/FishPhyloMaker
|
a9fd0aa90f4c9433b80508fbead9885418b92ba6
|
e2e74e2bdaa9be715907b4f148cbdb8fad1c8686
|
refs/heads/master
| 2023-08-21T19:20:02.438998
| 2021-09-15T07:00:10
| 2021-09-15T07:00:10
| 406,826,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,734
|
r
|
FishPhyloMaker_vignette.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----install_pkg, echo=TRUE, eval=FALSE---------------------------------------
# devtools::install_github("GabrielNakamura/FishPhyloMaker", ref = "main")
## ----read_data, eval=FALSE, echo=TRUE-----------------------------------------
# library(FishPhyloMaker)
# data(neotropical_comm)
# data_comm <- neotropical_comm[, -c(1, 2)] # removing Latitude and Longitude
## ----taxon_data, echo=TRUE, eval=FALSE----------------------------------------
# taxon_data <- FishTaxaMaker(data_comm, allow.manual.insert = TRUE)
# Characidae
# Characiformes
# Characidae
# Characiformes
# Characidae
# Characiformes
# Loricariidae
# Siluriformes
# Characidae
# Characiformes
# Cichlidae
# Cichliformes
# Crenuchidae
# Characiformes
# Gymnotidae
# Gymnotiformes
# Loricariidae
# Siluriformes
# Loricariidae
# Siluriformes
# Loricariidae
# Siluriformes
# Loricariidae
# Siluriformes
# Heptapteridae
# Siluriformes
# Characidae
# Characiformes
# Loricariidae
# Siluriformes
# Characidae
# Characiformes
## ----phylo_make, eval=FALSE, echo=TRUE----------------------------------------
# phylo_fish_streams <- FishPhyloMaker(data = taxon_data,
# return.insertions = TRUE,
# insert.base.node = TRUE,
# progress.bar = TRUE)
## ----plot_phylo, eval=FALSE, echo=TRUE----------------------------------------
# library(ggtree)
# tree.PR<- phylo_fish_streams$Phylogeny
#
# tree.PR <- ape::makeNodeLabel(tree.PR)
# phylo <- tree.PR
#
# rm.famNames <- which(table(taxon_dataPR$f) == 1) # monotipic families
# names.fam <- setdiff(unique(taxon_dataPR$f), names(rm.famNames)) # removing monotipic families from the names
#
# for (i in 1:length(names.fam)) {
# set <- subset(taxon_dataPR, f == names.fam[i])
# phylo <- ape::makeNodeLabel(phylo, "u", nodeList = list(Fam_name = set$s))
#
# phylo$node.label[which(phylo$node.label ==
# "Fam_name") ] <- paste(set$f[1])
# }
#
# pos.node <- unlist(lapply(names.fam, function(x){
# which(phylo$node.label == x) + length(phylo$tip.label)
# }))
#
# df.phylo <- data.frame(Fam.names = names.fam,
# node.number = pos.node)
#
# plot.base <- ggtree(phylo) + theme_tree2()
# plot1 <- revts(plot.base) + scale_x_continuous(labels=abs)
#
#
# PR.PG <- plot1 + geom_hilight(data = df.phylo, aes(node = node.number, fill = Fam.names),
# alpha = .6) +
# scale_fill_viridis(discrete = T, name = "Family names")
|
b5bac5850f03ddab200e990ba1d64b3e374c5272
|
abb02adb369d8edf6499d048d903db1f30a3cedc
|
/run_analysis.R
|
d04428b93e6686f881086620120582961e665683
|
[] |
no_license
|
WajihaSaid/run_analysis
|
63bcd948e1b56761aea65b88bfe9f63cf8f653b4
|
583df5ccbf442409172efcefcd9ea680851d3fd7
|
refs/heads/master
| 2021-01-19T07:53:40.044636
| 2014-08-24T19:10:13
| 2014-08-24T19:10:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,119
|
r
|
run_analysis.R
|
#create folder for storing, reading and writing data
if(!file.exists("./run_analysis")) {dir.create("./run_analysis")}
#get wd to restore to at the end of the procedure
wd <- getwd()
#set wd to the directory where data is saved
setwd("./run_analysis")
#download and unzip data. Store paths to files in filehandle
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip","./temp",method="curl",quiet=TRUE)
filehandle <- unzip("./temp", exdir="./")
#load relevant files from filehandle
features <- read.table(filehandle[2],sep=" ", colClasses="character")
activity_labels <-read.table(filehandle[1],col.names=c("index","labels"))
testsubject <- read.table(filehandle[14],col.names='subject_name')
testx <- read.table(filehandle[15])
testy <- read.table(filehandle[16],col.names='activity_name')
trainsubject <- read.table(filehandle[26],col.names='subject_name')
trainx <- read.table(filehandle[27])
trainy <- read.table(filehandle[28],col.names='activity_name')
#renaming column names on test x and test y data with feature names, this is Step 4
library(data.table)
colnames(testx) <- features$V2
colnames(trainx) <- features$V2
#Consolidating & merging data, writing it out to file. This is Step 1
totaltest <- cbind(testsubject, testy, testx)
totaltrain <- cbind(trainsubject, trainy, trainx)
totaldata <- rbind(totaltest,totaltrain)
write.table(totaldata,file="totaldata.txt",sep=" ",row.names=FALSE)
#Extracting only the measures with mean and stdev, this is Step 2
extracteddata <-totaldata[,c(grepl("subject|activity|mean\\(\\)|std\\(\\)",colnames(totaldata),perl=TRUE))]
#Replacing activity numbers with descriptive activity names, Step 3
extracteddata$activity_name<-factor(extracteddata$activity_name,activity_labels$index,activity_labels$labels)
#Creating tidy data set and saving to file, Step 5
tidydata <- aggregate(extracteddata[,3:ncol(extracteddata)],by=list(subject_name=extracteddata$subject_name,activity_name=extracteddata$activity_name),mean)
write.table(tidydata,file="tidydata.txt",sep=" ",row.names=FALSE)
#reset working directory
setwd(wd)
|
65fd9229e446c897625a190493865bd285777a97
|
3646e6edc7b38488a562a99d34c4f764c7163b23
|
/WTCTools/man/leaderboard15.Rd
|
eb125144652fe62e3d5fd4cf090b298b8ebdbdeb
|
[] |
no_license
|
CSJCampbell/WTCTools
|
2d98f0659ffc79edaa10a0a949799e3a577a0d64
|
e97b1af05ad2003a5a7811f7f7acc72dfddb7773
|
refs/heads/master
| 2021-06-02T17:17:12.413625
| 2017-09-21T01:18:12
| 2017-09-21T01:18:12
| 41,179,428
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 705
|
rd
|
leaderboard15.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scoreSequence.R
\docType{data}
\name{leaderboard15}
\alias{leaderboard15}
\title{World Team Championships 2015 Team Leaderboard}
\format{Data frame with columns:\itemize{
\item Rank integer noting team position
\item Team character name of team
\item Rounds.won integer number of rounds won
\item Team.SoS integer strength of schedule
\item Games.won integer number of games won
\item Total.CP integer control points scored
\item Total.VP integer army points scored
}}
\description{
Team results for WTC 2015.
}
\examples{
hist(leaderboard15$Total.CP, col = "#99334455")
}
\keyword{datasets}
|
3bf7803e0aac09f4053ed8f0b95ded01f82944b1
|
93def2f4add7bbb43889435eb42fb6dda5eec619
|
/hw2/hw2_Adam.R
|
bcccdf48b1d11a0bfd96ccfd922979e0954eaad2
|
[] |
no_license
|
mikeasilva/data-621-group-projects
|
9e13c9df18956e05d6c47427ae8073204ba47cae
|
0a0c33e59322e382de3d0191703b157c48d4f072
|
refs/heads/master
| 2020-07-23T19:47:43.130227
| 2019-12-19T17:48:26
| 2019-12-19T17:48:26
| 207,687,536
| 1
| 6
| null | 2019-12-19T17:48:28
| 2019-09-11T00:24:24
|
HTML
|
UTF-8
|
R
| false
| false
| 1,988
|
r
|
hw2_Adam.R
|
##################################################
# Functions to compute classification measures.
##################################################
# I know you have these already, but I thought
# it may be good practice to do my own.
#
accuracy <- function(df, pred, act){
sum(pred == act)/nrow(df)
}
errors <- function(df, pred,act){
sum(pred != act)/nrow(df)
}
precision <- function(df, pred, act){
sum(pred == 1 & act == 1) /
(sum(pred == 1 & act == 1) + sum(pred == 1 & act == 0))
}
sensitivity <- function(df, pred, act){
sum(pred == 1 & act == 1) /
(sum(act == 1))
}
specificity <- function(df, pred, act){
sum(pred == 0 & act == 0) /
(sum(pred == 0 & act == 0) + sum(pred == 1 & act == 0))
}
false_positive <- function(df, pred, act){
sum(pred == 1 & act == 0) /
(sum(act == 0))
}
f1 <- function(df, pred, act){
p <- precision(df, pred, act)
s <- sensitivity(df, pred, act)
2 * (p * s)/
(p + s)
}
########################
# ROC Curve generation
########################
# I think this will also work with your functions, Mike.
#
ROC <- function(df, prob, act){
require(dplyr)
outcome <- data.frame(matrix(ncol = 4, nrow = 0))
colnames(outcome) <- c("prob","TPR","FPR")
for (thresh in seq(0,1,0.01)){
outcome <- rbind(outcome,
data.frame(
prob=thresh,
TPR = sensitivity(df,ifelse(prob >= thresh,1,0),act),
FPR = false_positive(df,ifelse(prob >= thresh,1,0),act),
area = 0
)
)
}
# Get AUC
outcome$area <- (dplyr::lag(outcome$FPR) - outcome$FPR) * outcome$TPR
plot.new()
plot(x=outcome$FPR, y=outcome$TPR, type="l",main="ROC Curve",
xlab="False Positive Rate (FPR)",
ylab="True Positive Rate (TPR)")
abline(a=0,b=1,lty=2)
text(0.6,0.1,paste("AUC = ",round(sum(outcome$area, na.rm = T),3)))
}
ROC(df,df$scored.probability,df$class)
|
2cf916aeec968f4a498df2aced90c6645ca1253c
|
adc2fd58b4326ecfaf0e743698246da8846c981d
|
/Data_Summarization/lab/Data_Summarization_Lab.R
|
fb6c892ef2e77b5b2ef89dca9ca9fd60031beb55
|
[
"MIT"
] |
permissive
|
andrewejaffe/summerR_2016
|
c26d8e709c26100e1e40df3d9c1ad213fa34d94a
|
8f47a6735f039499eba66e30e0b40c59a36c1c6b
|
refs/heads/gh-pages
| 2020-04-06T03:36:03.439029
| 2016-06-24T19:25:02
| 2016-06-24T19:25:02
| 60,783,896
| 0
| 4
| null | 2016-06-14T02:00:58
| 2016-06-09T15:08:11
|
HTML
|
UTF-8
|
R
| false
| false
| 1,199
|
r
|
Data_Summarization_Lab.R
|
####################
# Data Summarization - Lab
# 6/15/2016
####################
# Bike Lanes Dataset: BikeBaltimore is the Department of Transportation's bike program.
# https://data.baltimorecity.gov/Transportation/Bike-Lanes/xzfj-gyms
# Download as a CSV (like the Monuments dataset) in your current working directory
# Note its also available at:
# http://www.aejaffe.com/summerR_2016/data/Bike_Lanes.csv
# 1. How many bike "lanes" are currently in Baltimore?
# You can assume each observation/row is a different bike "lane"
# 2. How many (a) feet and (b) miles of bike "lanes" are currently in Baltimore?
# 3. How many types of bike lanes are there? Which type has
# (a) the most number of and (b) longest average bike lane length?
# 4. How many different projects do the "bike" lanes fall into?
# Which project category has the longest average bike lane?
# 5. What was the average bike lane length per year that they were installed?
# 6. (a) Numerically [hint: `quantile()`] and
# (b) graphically [hint: `hist()` or `plot(density())`]
# describe the distribution of bike "lane" lengths.
# 7. Then describe as above, after stratifying by i) type then ii) number of lanes
|
b612042dd5348ffb5dda8479b9610d11c87734c5
|
1ef6c2a474a51bb9cf01a8c523bae221e0c6665b
|
/Codes/1_GetData.R
|
a5afb9c9875396cc9fb6f7eda137a58487221a7c
|
[] |
no_license
|
acszczep/f-curve
|
0b72347f71fd19cea792417cd557b95925a478d4
|
70d72ef26dd69b627f64b9e0049b520128eb45c2
|
refs/heads/master
| 2022-06-17T22:02:07.532757
| 2020-05-16T10:16:28
| 2020-05-16T10:16:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,960
|
r
|
1_GetData.R
|
#-------------------------------------------------------------------------------------
# A daily fever curve for the Swiss economy
#-------------------------------------------------------------------------------------
# Feel free to copy, adapt, and use this code for your own purposes at
# your own risk.
#
# Please cite:
# Burri, Marc and Daniel Kaufmann (2020): "A daily fever curve for the
# Swiss economy", IRENE Working Paper No., University of Neuchâtel,
# https://github.com/dankaufmann/f-curve
#
#-------------------------------------------------------------------------------------
# V 1.0
#-------------------------------------------------------------------------------------
# Packages and settings
#rm(list = ls())
source("AllPackages.R")
endDate <- Sys.Date()
updateNews <- FALSE # Choose whether you want to update news (takes up to 20 min)
#-------------------------------------------------------------------------------------
# Download the data
#-------------------------------------------------------------------------------------
# Macro data (only GDP, rest loaded in separate file) and trendecon
download.file(url = "https://www.seco.admin.ch/dam/seco/en/dokumente/Wirtschaft/Wirtschaftslage/VIP%20Quartalssch%C3%A4tzungen/qna_p_csa.xls.download.xls/qna_p_csa.xls", destfile = "../Data/PIBSuisse.xls", mode="wb")
download.file(url = "https://raw.githubusercontent.com/trendecon/data/master/daily/trendecon_sa.csv", destfile = "../Data/TrendEcon.csv", mode="wb")
# Get news indicators
if (updateNews) {
# Scrape News from Web and save in all.RData file
# Note:
# - can take up to 20 min.
# - Depends on Python (including working selenium) and cURL
updateNewsIndicator()
}
load(file="../Data/News/all.RData")
load(file="../Data/News/nzz.RData")
load(file="../Data/News/fuw.RData")
load(file="../Data/News/ta.RData")
News.CH <- df_all_ch %>%
select("time", "mean") %>%
ts_xts() %>%
ts_span(start = "2000-01-01", end = endDate)
News.FOR <- df_all_int %>%
select("time", "mean") %>%
ts_xts() %>%
ts_span(start = "2000-01-01", end = endDate)
News.NZZ.CH <- df_nzz_ch %>%
select("time", "mean") %>%
ts_xts() %>%
ts_span(start = "2000-01-01", end = endDate)
News.NZZ.FOR <- df_nzz_int %>%
select("time", "mean") %>%
ts_xts() %>%
ts_span(start = "2000-01-01", end = endDate)
News.FUW.CH <- df_fuw_ch %>%
select("time", "mean") %>%
ts_xts() %>%
ts_span(start = "2000-01-01", end = endDate)
News.FUW.FOR <- df_fuw_int %>%
select("time", "mean") %>%
ts_xts() %>%
ts_span(start = "2000-01-01", end = endDate)
News.TA.CH <- df_ta_ch %>%
select("time", "mean") %>%
ts_xts() %>%
ts_span(start = "2000-01-01", end = endDate)
News.TA.FOR <- df_ta_int %>%
select("time", "mean") %>%
ts_xts() %>%
ts_span(start = "2000-01-01", end = endDate)
GDP <- read.xlsx("../Data/PIBSuisse.xls", sheetName = "real_q", as.data.frame = TRUE, startRow = 11)
GDP <- (xts(GDP[,3], order.by = as.Date(paste(GDP[,1], GDP[,2]*3-2, "01", sep = "-"))))
GDPDefl <- read.xlsx("../Data/PIBSuisse.xls", sheetName = "defl_q", as.data.frame = TRUE, startRow = 11)
GDPDefl <- (xts(GDPDefl[,3], order.by = as.Date(paste(GDPDefl[,1], GDPDefl[,2]*3-2, "01", sep = "-"))))
NGDP <- GDP*GDPDefl
Tecon <- read.csv("../Data/TrendEcon.csv")
Tecon <- xts(Tecon[,2], order.by = as.Date(Tecon[,1]))
# Financial market variables
download.file(url = "https://www.six-group.com/exchanges/downloads/indexdata/hsb_maturity_gov_y.csv", destfile = "../Data/ObligationsConf.csv", mode="wb")
download.file(url = "https://www.six-group.com/exchanges/downloads/indexdata/hsb_maturity_dom_non_gov_rating_sbi_y.csv", destfile = "../Data/ObligationsEnt.csv", mode="wb")
download.file(url = "https://www.six-group.com/exchanges/downloads/indexdata/h_vsmi_30.csv", destfile = "../Data/VIX.csv", mode="wb")
download.file(url = "https://www.six-group.com/exchanges/downloads/indexdata/hsb_foreign_gov_y.csv", destfile = "../Data/ForGov.csv", mode="wb")
download.file(url = "https://www.six-group.com/exchanges/downloads/indexdata/hsb_foreign_cor_y.csv", destfile = "../Data/ForCorp.csv", mode="wb")
download.file(url = "https://www.six-group.com/exchanges/downloads/indexdata/hsb_maturity_for_rating_sbi_y.csv", destfile = "../Data/ForShort.csv", mode="wb")
download.file(url = "https://www.bundesbank.de/statistic-rmi/StatisticDownload?tsId=BBK01.WT1010&its_csvFormat=de&its_fileFormat=csv&mode=its", destfile = "../Data/GermanBondYield.csv", mode="wb")
download.file(url = "http://sdw.ecb.europa.eu/quickviewexport.do;jsessionid=62044D6532AB70D16C184E5A8FFADEEC?SERIES_KEY=165.YC.B.U2.EUR.4F.G_N_A.SV_C_YM.SR_1Y&type=csv", destfile = "../Data/EuroShortRate.csv", mode="wb")
# German government bond yields
Gov10.DEU <- read.csv("../Data/GermanBondYield.csv", sep = ";", skip = 5, na.strings = "Kein Wert vorhanden", stringsAsFactors = FALSE, )
Gov10.DEU <- Gov10.DEU[,1:2]
Gov10.DEU[,2] <- as.numeric(gsub(",", ".", gsub("\\.", "", Gov10.DEU[,2])))
Gov10.DEU <- Gov10.DEU[!is.na(Gov10.DEU[,2]),]
Gov10.DEU <- xts(Gov10.DEU[,2], order.by = as.Date(Gov10.DEU[,1]))
Gov1.EUR <- read.csv("../Data/EuroShortRate.csv", sep = ",", skip = 4, stringsAsFactors = FALSE, )
Gov1.EUR <- xts(Gov1.EUR[,2], order.by = as.Date(Gov1.EUR[,1]))
LIB1.EUR <- ts_ts(ts_fred('EUR12MD156N'))
# Volatility CH
VIX.CH <- read.csv("../Data/VIX.csv", sep = ";", skip = 1)
VIX.CH <- xts(VIX.CH[,3], order.by = dmy(VIX.CH[,1]))
# Bond yields history SNB
BondHistory <- read.xlsx("../Data/BondsHistory/SNBBondHistory.xlsx", startRow = 17, sheetName = "Report")
Gov1.CH.h <- xts(BondHistory$EID1, order.by = as.Date(BondHistory$Date))
Gov2.CH.h <- xts(BondHistory$EID2, order.by = as.Date(BondHistory$Date))
Gov8.CH.h <- xts(BondHistory$EID8, order.by = as.Date(BondHistory$Date))
Gov9.CH.h <- xts(BondHistory$EID8, order.by = as.Date(BondHistory$Date))
Gov10.CH.h <- xts(BondHistory$EID10, order.by = as.Date(BondHistory$Date))
Bank8.CH.h <- xts(BondHistory$BANK8, order.by = as.Date(BondHistory$Date))
Ind8.CH.h <- xts(BondHistory$IND8, order.by = as.Date(BondHistory$Date))
Gov10.DEU.h <- xts(BondHistory$DEU10, order.by = as.Date(BondHistory$Date))
ForA8.h <- xts(BondHistory$FORA, order.by = as.Date(BondHistory$Date))
ForAA8.h <- xts(BondHistory$FORAA, order.by = as.Date(BondHistory$Date))
ForAAA8.h <- xts(BondHistory$FORAAA, order.by = as.Date(BondHistory$Date))
# Bonds raw data SIX
Gov <- read.csv("../Data/ObligationsConf.csv", sep = ";", skip = 4)
GovLab <- read.csv("../Data/ObligationsConf.csv", sep = ";", nrows = 3)
NonGov <- read.csv("../Data/ObligationsEnt.csv", sep = ";", skip = 4)
NonGovLab <- read.csv("../Data/ObligationsEnt.csv", sep = ";", nrows = 3)
ForGov <- read.csv("../Data/ForGov.csv", sep = ";", skip = 4)
ForGovLab <- read.csv("../Data/ForGov.csv", sep = ";", nrows = 3)
ForCorp <- read.csv("../Data/ForCorp.csv", sep = ";", skip = 4)
ForCorpLab <- read.csv("../Data/ForCorp.csv", sep = ";", nrows = 3)
ForShort <- read.csv("../Data/ForShort.csv", sep = ";", skip = 4)
ForShortLab <- read.csv("../Data/ForShort.csv", sep = ";", nrows = 3)
# More recent data from SIX
Gov2.CH <- xts(Gov[,GovLab[2,] == "SBI Dom Gov 1-3 Y"], order.by = dmy(Gov[,1]))
Gov8.CH <- xts(Gov[,GovLab[2,] == "SBI Dom Gov 7-10 Y"], order.by = dmy(Gov[,1]))
AAA_BBB2.CH <- xts(NonGov[,NonGovLab[2,] == "SBI Dom Non-Gov AAA-BBB 1-3 Y"], order.by = dmy(NonGov[,1]))
AAA_BBB8.CH <- xts(NonGov[,NonGovLab[2,] == "SBI Dom Non-Gov AAA-BBB 7-10 Y"], order.by = dmy(NonGov[,1]))
AAA_AA2.CH <- xts(NonGov[,NonGovLab[2,] == "SBI Dom Non-Gov AAA-AA 1-3 Y"], order.by = dmy(NonGov[,1]))
AAA_AA8.CH <- xts(NonGov[,NonGovLab[2,] == "SBI Dom Non-Gov AAA-AA 7-10 Y"], order.by = dmy(NonGov[,1]))
AAA_A8.CH <- xts(NonGov[,NonGovLab[2,] == "SBI Dom Non-Gov AAA-AA 7-10 Y"], order.by = dmy(NonGov[,1]))
AAA_A2.CH <- xts(NonGov[,NonGovLab[2,] == "SBI Dom Non-Gov AAA-A 1-3 Y"], order.by = dmy(NonGov[,1]))
AAA_A10.CH <- xts(NonGov[,NonGovLab[2,] == "SBI Dom Non-Gov AAA-A 7-10 Y"], order.by = dmy(NonGov[,1]))
ForAAA_BBB <- xts(ForCorp[,ForCorpLab[2,] == "SBI For Corp AAA-BBB Y"], order.by = dmy(ForCorp[,1]))
ForAAA_A <- xts(ForCorp[,ForCorpLab[2,] == "SBI For Corp AAA-A Y"], order.by = dmy(ForCorp[,1]))
ForAAA_AA <- xts(ForCorp[,ForCorpLab[2,] == "SBI For Corp AAA-AA Y"], order.by = dmy(ForCorp[,1]))
ForAAA_BBB2 <- xts(ForShort[,ForShortLab[2,] == "SBI For AAA-BBB 1-3 Y"], order.by = dmy(ForShort[,1]))
# Link 2Y history with 1-3 year current data
Gov1.EUR.l <- ts_bind(ts_span(LIB1.EUR, ts_summary(LIB1.EUR)$start, ts_summary(Gov1.EUR)$start), Gov1.EUR)
p <- ts_ggplot(
`Linked data` = Gov1.EUR.l,
`ECB data` = Gov1.EUR,
`LIBOR data` = LIB1.EUR,
title = "Short-term euro bond yields"
)
p <- ggLayout(p)
p
ggsave(filename = "../Results/LinkedData/Short.EUR.pdf", width = figwidth, height = figheight)
# Link 2Y history with 1-3 year current data
Gov2.CH.l <- ts_bind(ts_span(Gov2.CH.h, ts_summary(Gov2.CH.h)$start, ts_summary(Gov2.CH)$start), Gov2.CH)
p <- ts_ggplot(
`Linked data` = Gov2.CH.l,
`SIX Data (1-3Y)` = Gov2.CH,
`SNB Data (2Y)` = Gov2.CH.h,
title = "Short-term confederation bond yields"
)
p <- ggLayout(p)
ggsave(filename = "../Results/LinkedData/GovShort.CH.pdf", width = figwidth, height = figheight)
# Link 8Y history with 7-10 year current data
Gov8.CH.l <- ts_bind(ts_span(Gov8.CH.h, ts_summary(Gov8.CH.h)$start, ts_summary(Gov8.CH)$start), Gov8.CH)
p <- ts_ggplot(
`Linked data` = Gov8.CH.l,
`SIX Data (7-10Y)` = Gov8.CH,
`SNB Data (8Y)` = Gov8.CH.h,
title = "Long-term confederation bond yields"
)
p <- ggLayout(p)
ggsave(filename = "../Results/LinkedData/GovLong.CH.pdf", width = figwidth, height = figheight)
# Link 8Y history banks with average between AA-AAA and A-AAA corporate bonds
AAA8.CH.l <- ts_bind(ts_span(Bank8.CH.h, ts_summary(Bank8.CH.h)$start, ts_summary(AAA_AA8.CH)$start), (AAA_AA8.CH+AAA_A8.CH)/2)
p <- ts_ggplot(
`Linked data` = AAA8.CH.l,
`SIX Data (incl. A 7-10Y)` = AAA_A8.CH,
`SIX Data (incl. AA 7-10Y)` = AAA_AA8.CH,
`SNB Data Banks (8Y)` = Bank8.CH.h,
title = "Long-term corporate bond yields"
)
p <- ggLayout(p)
ggsave(filename = "../Results/LinkedData/CorpLong1.CH.pdf", width = figwidth, height = figheight)
# Link 8Y history manufacturing with adjusted AAA-BBB yield index (use same mean and volatility
Ind8.CH.h.adj <- (Ind8.CH.h-mean(ts_span(Ind8.CH.h, ts_summary(AAA_BBB8.CH)$start), na.rm = TRUE)[1])/sqrt(var(ts_span(Ind8.CH.h, ts_summary(AAA_BBB8.CH)$start), na.rm=TRUE))[1]
Ind8.CH.h.adj <- Ind8.CH.h.adj*sqrt(var(AAA_BBB8.CH, na.rm=TRUE))[1] + mean(AAA_BBB8.CH, na.rm = TRUE)[1]
BBB8.CH.l <- ts_bind(ts_span(Ind8.CH.h.adj, ts_summary(Ind8.CH.h.adj)$start, ts_summary(AAA_BBB8.CH)$start), AAA_BBB8.CH)
p <- ts_ggplot(
`Linked data` = BBB8.CH.l,
`SIX Data (incl. BBB 7-10Y)` = AAA_BBB8.CH,
`SNB Data Manufact (adj. 8Y)` = Ind8.CH.h.adj ,
title = "Long-term corporate bond yields"
)
p <- ggLayout(p)
ggsave(filename = "../Results/LinkedData/CorpLong2.CH.pdf", width = figwidth, height = figheight)
# Link 8Y bonds of foreign corporations
# Take the average from SIX data and link with data of SNB
# Not clear that same credit rating and/or maturity
ForCorp.l <- ts_bind(ts_span(ForAAA8.h, ts_summary(ForAAA8.h)$start, ts_summary(ForAAA_A)$start), (ForAAA_A+ForAAA_AA+ForAAA_BBB)/3)
p <- ts_ggplot(
`Linked data` = ForCorp.l,
`SIX Data (incl. A 7-10Y)` = ForAAA_A,
`SIX Data (incl. AA 7-10Y)` = ForAAA_AA,
`SIX Data (incl. BBB 7-10Y)` = ForAAA_BBB,
`SNB Data AAA (8Y)` = ForAAA8.h,
title = "Long-term foreign corporate bond yields"
)
p <- ggLayout(p)
ggsave(filename = "../Results/LinkedData/ForCorpLong.CH.pdf", width = figwidth, height = figheight)
# Construct final data set
IRDIFF.CH <- Gov2.CH.l - Gov1.EUR.l
TS.CH <- Gov8.CH.l - Gov2.CH.l
RP.CH <- AAA8.CH.l - Gov8.CH.l
#RP2.CH <- BBB8.CH.l - Gov8.CH.l
RPShort.CH <- AAA_BBB2.CH - Gov2.CH.l
RPShort.FOR <- ForAAA_AA - Gov2.CH.l
RP.FOR <- ForCorp.l - Gov8.CH.l
TS.EUR <- Gov10.DEU - Gov1.EUR.l
TS.US <- ts_xts(ts_fred("T10Y2Y"))
VIX.US <- ts_xts(ts_fred("VIXCLS"))
# Check start and end dates
ts_summary(IRDIFF.CH)
ts_summary(TS.CH)
ts_summary(RP.CH)
ts_summary(RPShort.CH)
ts_summary(RPShort.FOR)
ts_summary(RP.FOR)
ts_summary(TS.EUR)
ts_summary(TS.US)
ts_summary(VIX.US)
ts_summary(VIX.CH)
# Export the data (only those series that work well!)
Indicators <- ts_c(TS.CH, RP.CH, RPShort.CH, VIX.CH, IRDIFF.CH, News.CH,
News.FOR, RPShort.FOR, RP.FOR, TS.US, VIX.US, TS.EUR,
Tecon, News.NZZ.CH, News.FUW.CH, News.TA.CH, News.NZZ.FOR, News.FUW.FOR, News.TA.FOR)
# Save indicators for f-curve
save(list = c("GDP", "NGDP", "GDPDefl", "Indicators"), file = "../Data/IndicatorData.RData")
|
a8feb13b62e8e32bcc548ace2849c6e68061df6c
|
d97e908015d9fafe3adf3183738a3e9fc89ceef3
|
/Integrative_analysis/Integrative-FimoMotif_presence_in_Iri_MarkerGene_nearest_dynamicDMAR.r
|
7680180d0bec6ca32ae1f8aec8e5eab67f6eaa2e
|
[
"MIT"
] |
permissive
|
yjchen1201/zebrafish_pigment_cell_dev
|
ca7393fab7c6942c260c39fd9b7c2bc167d71e86
|
f4c0e3f81619778cf4e13955b23209c108ba9b94
|
refs/heads/main
| 2023-04-17T10:06:59.855315
| 2021-09-08T22:31:54
| 2021-09-08T22:31:54
| 399,949,907
| 0
| 2
|
MIT
| 2021-09-07T04:56:48
| 2021-08-25T20:31:54
|
R
|
UTF-8
|
R
| false
| false
| 10,552
|
r
|
Integrative-FimoMotif_presence_in_Iri_MarkerGene_nearest_dynamicDMAR.r
|
####### USE FIMO TO scan FOR IRI-SPECIFIC TF motifs ###########
# Read DMAR list generated from "Integrative-generate_DMAR_master_list_02.r"
DMAR <- read.table("All_DMAR_Combined_wINFO.bed",sep = "\t",header =T, stringsAsFactors =F)
DMAR_Iri_accessible <- DMAR[DMAR_new$DMRs24vIri >0 | DMAR_new$DARs24vIri >0 | DMAR_new$DMRMelvIri >0 | DMAR_new$DARMelvIri <0,]
write.table(DMAR_Iri_accessible[,c(1,2,3,4)], "DMAR_Iri_Accesible_for_FIMO.bed", sep = "\t", quote = F, col.names = F, row.names =F)
#DMAR_Mel_accessible <- DMAR_new[DMAR_new$DMRs24vMel >0 | DMAR_new$DARs24vMel >0 | DMAR_new$DMRMelvIri <0 | DMAR_new$DARMelvIri >0,]
#write.table(DMAR_Mel_accessible[,c(1,2,3,4)], "DMAR_Mel_Accesible_for_FIMO.bed", sep = "\t", quote = F, col.names = F, row.names =F)
# Use UCSC twoBitToFa to convert regions in bed file to fasta
system("module load kentUCSC")
system("twoBitToFa -bed=DMAR_Iri_Accesible_for_FIMO.bed danRer10.2bit DMAR_Iri_Accesible_for_FIMO.fasta")
# Use fimo to scan for Iri TFs
system("module meme")
system("fimo Iri_TFs_FIMO.meme DMAR_Iri_Accesible_for_FIMO.fasta")
#system("mast Iri_TFs_FIMO.meme DMAR_Iri_Accesible_for_FIMO.fasta")
#system("mcast Iri_TFs_FIMO.meme DMAR_Iri_Accesible_for_FIMO.fasta")
# Use Parse_fimo.py code to parse out motif presence in bash
system("module load python3")
system("python3 Parse_fimo.py fimo.txt") #fimo.txt is the output from fimo step
# Read the output generated using Parsed_fimo.py
FIMO <- read.table("fimo_parsed.txt",sep = "\t", header = T, quote = "", stringsAsFactors =F)
# merge DMAR_Iri_accessible table with FIMO table by chrompos column
DMAR_Iri_FIMO <- merge(DMAR_Iri_accessible[,c(1:32)],FIMO, by = "chrompos", all.x = T)
DMAR_Iri_FIMO[is.na(DMAR_Iri_FIMO)] <- 0
# Save Iri accessibleDMAR with FIMO scanned motifs
#write.table(DMAR_Iri_FIMO[,c(2:4,1,5:52)], "accessibleDMAR_Iri_wFIMOmotif.txt", sep = "\t", col.names = T, row.names = F, quote =F)
write.table(DMAR_Iri_FIMO[,c(2:4,1,5:52)], "accessibleDMAR_Iri_wFIMOmotif_noheader.txt", sep = "\t", col.names = F, row.names = F, quote =F)
# Input file "Danio_rerio.GRCz10.85.GENE.PROMOTER.bed" is generated from "Integrative-promoter-centric.r"
# import prmoter information
prom <- read.table("Danio_rerio.GRCz10.85.GENE.PROMOTER.bed", sep = "\t", stringsAsFactors =F, header = F, quote ="")
# extract Iri marker genee promoter info out
Iri_MarkerGenes <- prom[prom$gene == "ENSDARG00000003732"|prom$gene =="ENSDARG00000002933"|prom$gene == "ENSDARG00000016706" |prom$gene == "ENSDARG00000077467" | prom$gene == "ENSDARG00000098745" |prom$gene == "ENSDARG00000003732" |prom$gene == "ENSDARG00000059279" |prom$gene == "ENSDARG00000008861" | prom$gene == "ENSDARG00000021032"| prom$gene == "ENSDARG00000024431",]
# Read DEG info
## "DEG_p0.01_ENSEMBL_to_Gene_Name.txt" and "DEGs_combined_samples_p0.01_wTFinfo.txt" are from RNA/RNA_02_DEGanalysis.r
DEG2_genenames<- read.table("DEG_p0.01_ENSEMBL_to_Gene_Name.txt",header = T, ,quote = "", sep = "\t",stringsAsFactors = F)
DEG2 <- read.table("DEGs_combined_samples_p0.01_wTFinfo.txt",header = T, ,quote = "", sep = "\t",stringsAsFactors = F)
colnames(DEG2_genenames) <- c("gene","genename")
DEG3 <- merge(DEG2,DEG2_genenames, by = "gene", all.x = T)
# Add DEG info to Iri Marker gene table
Iri_MarkerGenes2 <- merge(Iri_MarkerGenes,DEG3[,c(1:15)],by = "gene", all.x = T)
Iri_MarkerGenes2[is.na(Iri_MarkerGenes2)] <- 0
Iri_MarkerGenes2[Iri_MarkerGenes2$gene == "ENSDARG00000021032",]$genename.x <- "foxd3"
# Extend promoter region to 50kb on both side
Iri_MarkerGenes2$start50kb <- Iri_MarkerGenes2$start-50000
Iri_MarkerGenes2$end50kb <- Iri_MarkerGenes2$end+50000
# Correct the start to 0 if it is <0
Iri_MarkerGenes2[Iri_MarkerGenes2$start50kb < 0,]$start50kb <-0
# Save Iri marker gene promoter extended information table
write.table(Iri_MarkerGenes2[,c(2,22,23,1,14)],"Iri_MarkerGenes_promoter_50kb_extended.bed", sep = "\t", col.names = F, row.names = F, quote =F)
# Use bedtools to intersect Iri markergene promoter extended regions with Iri accessible DMARs with FIMO motifs
system("module load bedtools")
system("bedtools intersect -wao -a Iri_MarkerGenes_promoter_50kb_extended.bed -b accessibleDMAR_Iri_wFIMOmotif_noheader.txt > Iri_MarkerGenes_promoter_50kb_extended_wDMAR_FIMO.txt")
# Import the intersected file
Iri_Marker_50kb_DMAR_FIMO <- read.table("Iri_MarkerGenes_promoter_50kb_extended_wDMAR_FIMOMotif.txt", header = F, stringsAsFactors = F, sep = "\t", quote = "")
colnames(Iri_Marker_50kb_DMAR_FIMO) <- c("chr","start50kb","end50kb","gene","genename",colnames(DMAR_Iri_FIMO),"overlap")
colnames(Iri_Marker_50kb_DMAR_FIMO)[6:9] <- c("chr","start","end","chrompos")
Iri_Marker_50kb_DMAR_FIMO[Iri_Marker_50kb_DMAR_FIMO == "."]<-0
Iri_Marker_50kb_DMAR_FIMO[,c(38:45)] <- sapply(Iri_Marker_50kb_DMAR_FIMO[,c(38:45)],as.numeric)
Iri_DEG_50kb_DMAR_FIMO[Iri_DEG_50kb_DMAR_FIMO$genename == "alx1" | Iri_DEG_50kb_DMAR_FIMO$genename == "alx3"| Iri_DEG_50kb_DMAR_FIMO$genename == "alx4a" | Iri_DEG_50kb_DMAR_FIMO$genename == "alx4b" | Iri_DEG_50kb_DMAR_FIMO$genename == "ltk"| Iri_DEG_50kb_DMAR_FIMO$genename == "ednrba" | Iri_DEG_50kb_DMAR_FIMO$genename == "pnp4a",]
Iri_Marker_50kb_DMAR_FIMO_ALL <- rbind(Iri_Marker_50kb_DMAR_FIMO,Iri_DEG_50kb_DMAR_FIMO[Iri_DEG_50kb_DMAR_FIMO$genename == "gbx2" | Iri_DEG_50kb_DMAR_FIMO$genename == "alx1" | Iri_DEG_50kb_DMAR_FIMO$genename == "alx3"| Iri_DEG_50kb_DMAR_FIMO$genename == "alx4a" | Iri_DEG_50kb_DMAR_FIMO$genename == "alx4b" | Iri_DEG_50kb_DMAR_FIMO$genename == "ltk"| Iri_DEG_50kb_DMAR_FIMO$genename == "ednrba"| Iri_DEG_50kb_DMAR_FIMO$genename == "pnp4a",])
Iri_Marker_50kb_DMAR_FIMO_ALL <- merge(Iri_Marker_50kb_DMAR_FIMO_ALL[Iri_Marker_50kb_DMAR_FIMO_ALL$genename == "tfap2a" | Iri_Marker_50kb_DMAR_FIMO_ALL$GBX2 > 0 |Iri_Marker_50kb_DMAR_FIMO_ALL$ALX1 > 0 | Iri_Marker_50kb_DMAR_FIMO_ALL$ALX3 > 0 |Iri_Marker_50kb_DMAR_FIMO_ALL$ALX4 > 0 |Iri_Marker_50kb_DMAR_FIMO_ALL$SOX10 > 0 |Iri_Marker_50kb_DMAR_FIMO_ALL$ETS1 > 0 |Iri_Marker_50kb_DMAR_FIMO_ALL$TFEC > 0,],DEG3[,c(0:15)], by = "gene", all.x = T)
Iri_Marker_50kb_DMAR_FIMO_ALL[is.na(Iri_Marker_50kb_DMAR_FIMO_ALL)] <- 0
#Iri TF specific#
Iri_Marker_50kb_DMAR_FIMO_TF <- Iri_Marker_50kb_DMAR_FIMO_ALL[Iri_Marker_50kb_DMAR_FIMO_ALL$genename != "ltk" & Iri_Marker_50kb_DMAR_FIMO_ALL$genename != "atic" & Iri_Marker_50kb_DMAR_FIMO_ALL$genename != "ednrba",]
Iri_Marker_50kb_DMAR_FIMO_TF<- Iri_Marker_50kb_DMAR_FIMO_TF[order(Iri_Marker_50kb_DMAR_FIMO_TF$genename),]
Iri_Marker_50kb_DMAR_FIMO_TF <- Iri_Marker_50kb_DMAR_FIMO_TF[Iri_Marker_50kb_DMAR_FIMO_TF$ALX1 >0 |Iri_Marker_50kb_DMAR_FIMO_TF$ALX3 >0 |Iri_Marker_50kb_DMAR_FIMO_TF$ALX4 >0 |Iri_Marker_50kb_DMAR_FIMO_TF$GBX2>0 |Iri_Marker_50kb_DMAR_FIMO_TF$SOX10 >0 |Iri_Marker_50kb_DMAR_FIMO_TF$ETS1 >0 |Iri_Marker_50kb_DMAR_FIMO_TF$TFEC >0,]
######## Generate supFig7c heatmap plot ###########
name <- make.unique(Iri_Marker_50kb_DMAR_FIMO_TF[,9])
df.OG2 <- Iri_Marker_50kb_DMAR_FIMO_TF[,c(38:44)]
row.names(df.OG2) <- name
colnames(df.OG2) <- colnames(Iri_Marker_50kb_DMAR_FIMO_TF[,c(38:45)])
ht2 = Heatmap(df.OG2, column_title = "Iri TFs DMARs\n(50kb from DEG promoter)",name= "Motif\nOccurrence",col = colorRamp2(c(0, 2, 4), c("#FFFEF2","#17C0F2","#0084A1")),row_names_side = "left",row_names_max_width = unit(20, "cm"),
cluster_rows = F, cluster_columns = F,show_row_names = T,clustering_method_rows= "ward.D",row_dend_side = c("right"),)
ht2
name <- make.unique(Iri_Marker_50kb_DMAR_FIMO_TF[,5])
df.OG2 <- (Iri_Marker_50kb_DMAR_FIMO_TF[,c(35)])
row.names(df.OG2) <- name
colnames(df.OG2) <- colnames(Iri_Marker_50kb_DMAR_FIMO_TF[,c(35)])
ht = Heatmap(df.OG2,name= "Annotation",col = c("#FFD4E5","#FFB3BA","#FFDFBA","#FFFFBA","#BAFFC9"),
cluster_rows = F, cluster_columns = F)
ht
name <- make.unique(Iri_Marker_50kb_DMAR_FIMO_TF[,5])
df.OG2 <- Iri_Marker_50kb_DMAR_FIMO_TF[,c(37)]
row.names(df.OG2) <- name
colnames(df.OG2) <- colnames(Iri_Marker_50kb_DMAR_FIMO_TF[,c(37)])
ht1 = Heatmap(df.OG2,name= "Type",col = c("#D2A8DD","#A6FDFC","#FFE3D6"),
cluster_rows = F, cluster_columns = F)
ht1
name <- make.unique(Iri_Marker_50kb_DMAR_FIMO_TF[,5])
df.OG2 <- Iri_Marker_50kb_DMAR_FIMO_TF[,c(64)]
df.OG2 <- df.OG2*-1
row.names(df.OG2) <- name
colnames(df.OG2) <- colnames(Iri_Marker_50kb_DMAR_FIMO_TF[,c(64)])
ht3= Heatmap(df.OG2,name= "Fold Change",col = colorRamp2(c(-5, 0, 5), c("#BAFFC9", "#FFFEF2","#C9C9FF")),
cluster_rows = F, cluster_columns = FALSE)+scale_x_discrete(labels=c("s24vIri_log2_change" = "24hpf vs Iri"))
ht3
name <- make.unique(Iri_Marker_50kb_DMAR_FIMO_TF[,5])
df.OG2 <- Iri_Marker_50kb_DMAR_FIMO_TF[,c(70,72)] #Motif occurrence
row.names(df.OG2) <- name
colnames(df.OG2) <- colnames(Iri_Marker_50kb_DMAR_FIMO_TF[,c(70,72)])
ht4= Heatmap(df.OG2,name= "RPKM",col = colorRamp2(c(0,50,200,300,400), c("#BAE1FF","#FFFFBA","#FFB3BA","#E0301E","#800000")), cluster_rows = F, cluster_columns = FALSE)
ht4
ht2+ht+ht1+ht3+ht4
######## Generate supFig8 stacked bar plot ###########
#Import table (can be found in "Input_files" folder)
data<-read.table(file="supFig8_table_Motif_presence_in_IriDMAR_nearest_IriDEGs.txt", header=TRUE,sep="\t")
#Reorder the genes, and motif combination
data$genename <- reorder(data$genename, data$Order)
data$Motif_presence <- factor(data$Motif_presence, levels=c("ALX only","SOX10 only","TFEC only", "ALX & SOX10","ALX & TFEC","SOX10 & TFEC","ALX & SOX10 & TFEC","None"))
#choose color
mypalette <-c(brewer.pal(8,"Set3"))
#plot
p3<-ggplot(data, aes(fill=Motif_presence, y=DMAR_count, x=genename))+
geom_bar(position="stack", stat="identity")+
labs(y= "DM/AR counts", x="Gene name")+
coord_flip()+
theme_bw() +
theme(
panel.border = element_rect(colour = "black",size = 0.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black",size = 0.25),
axis.title=element_text(size=12,face = "bold"),
axis.text.x = element_text(face = "bold",size = 12, color="black"),
axis.text.y = element_text(face = "bold.italic",size = 8, color="black")
)+
theme(panel.background = element_rect(fill = 'white',colour = 'black'))+
theme(legend.title=element_text(size=12,face = "bold"),legend.text=element_text(size=10,face = "bold"))+
scale_fill_discrete(name = "Motif presence", breaks=c("ALX only","SOX10 only","TFEC only", "ALX & SOX10","ALX & TFEC","SOX10 & TFEC","ALX & SOX10 & TFEC","None"))+
theme(legend.justification=c(0.95,0.95), legend.position=c(0.95,0.95))+
scale_fill_manual(values = mypalette)+ scale_color_manual(values=mypalette)
p3
|
9b2969c238f5749e4643266450e8f885f45b99b2
|
a79da6845229920cc17f3b25cb0b667262715b88
|
/man/analyse_trscr_cnv.Rd
|
91847e11d64e8c53a9111faafbd9bd96d3d4d4ad
|
[] |
no_license
|
fchuffar/methmybeachup
|
58d55ec37d711b7fe3a6a8cdc9022105e7c6c514
|
f74cb52edac31eaf76efe174a2ae7d1c38a09750
|
refs/heads/master
| 2021-01-12T02:53:01.934598
| 2017-12-05T20:27:25
| 2017-12-05T20:27:25
| 78,121,581
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 875
|
rd
|
analyse_trscr_cnv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyse_meth.R
\name{analyse_trscr_cnv}
\alias{analyse_trscr_cnv}
\title{A Function Analyses CNV and Transcriptome data}
\usage{
analyse_trscr_cnv(gene, trscr_res, cnv_res, meth_idx, ctrl_idx, cols, idxes,
PLOT = TRUE)
}
\arguments{
\item{gene}{A vector describing the gene (line of a bed file).}
\item{trscr_res}{A vector of expression values.}
\item{cnv_res}{A vector of CN values.}
\item{meth_idx}{A vector of sample for which methylom analysis is available.}
\item{ctrl_idx}{A vector of sample used to define threshold}
\item{cols}{A color vectors indexed by by samples names.}
\item{idxes}{A list of index defining sample groups.}
\item{PLOT}{A boolean defining if graphical output must be dispayed on the graphical output.}
}
\description{
This function analyses methylome data.
}
|
6b75fd2e159cc608b5556b1dc887ede4efb8b627
|
cd32d300faed890d520e27399e93662b47388926
|
/Projekt1/trzejMuszkieterowie_Projekt1/wykresMiecze.r
|
85878b565306d33658f7ca8918232583496db2e0
|
[] |
no_license
|
Siemashko/TechnikiWizualizacjiDanych2018
|
f01a7b3b513cf07d31261d176e56fb7b99583df6
|
c6072805f3a9b3c1f0d7db34c2728cb4e20d562f
|
refs/heads/master
| 2020-03-30T16:08:56.922922
| 2019-01-16T11:08:26
| 2019-01-16T11:08:26
| 151,395,331
| 0
| 1
| null | 2019-01-09T11:39:22
| 2018-10-03T10:24:06
|
HTML
|
UTF-8
|
R
| false
| false
| 4,216
|
r
|
wykresMiecze.r
|
library(ggplot2)
library(ggimage)
library(extrafont)
# DO STWORZENIA WYKRESU TRZEBA POBRAĆ I WGRAĆ CZCIONKĘ DO CZCIONEK NA KOMPUTERZE
windowsFonts(k = windowsFont("starjout"))
loadfonts()
windowsFonts(sans="starjout")
loadfonts(device="win")
loadfonts(device="postscript")
colors <- c("Blue", "Red", "Green", "Yellow", "Purple", "Violet", "White", "orange", "others")
numbers <- c(40,38,27,7,6,4,1,1,4)
numbers <- numbers +5
data <- data.frame(colors,numbers)
order <- c(1,6,2,9,5,6,8,3,4)
data$colors <- factor(data$colors, levels = rev(data$colors))
p <- ggplot(data, aes(data,x=colors,y=numbers)) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=rep(5, 9), yend = 0), color = "gray", size = 15) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=data$numbers+0.5, yend = 5), color = c("blue", "red", "green", "yellow",
"purple", "violet", "white","orange", "gray"), size = 20, alpha = 0.1) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=data$numbers, yend = 0), color = c("blue", "red", "green", "yellow",
"purple", "violet", "white","orange", "gray"), size = 11, alpha = 0.65) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=data$numbers-0.5, yend = 0), color = "white", size = 8, alpha = 0.9) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=data$numbers+0.2, yend = 0), color = c("blue", "red", "green", "yellow",
"purple", "violet", "white","orange", "gray"), size = 15, alpha = 0.2) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=data$numbers+0.6, yend = 5), color = c("blue", "red", "green", "yellow",
"purple", "violet", "white","orange", "gray"), size = 13, alpha = 0.1) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=data$numbers-0.5, yend = 0), color = "white", size = 6, alpha = 0.8) +
coord_flip()+xlab("") + ylab("") +
scale_y_continuous(breaks = seq(5,45,5),labels = c("0","5","10","15","20","25","30","35","40")) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=rep(5, 9), yend = 0), color = "gray17", size = 14) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=rep(5, 9), yend = 1), color = "gray", size = 5) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=rep(5, 9), yend = 4.5), color = "gray", size = 16) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=rep(-2, 9), yend = 0), color = "gray", size = 12) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=rep(-1.5, 9), yend = -1), color = "black", size = 12) +
geom_segment(aes(x=data$colors, xend = data$colors,
y=data$numbers+0.15, yend = 5), color = c("blue", "red", "green", "yellow",
"purple", "violet", "white","orange", "gray"), size = 20, alpha = 0.1) +
theme_dark() + theme(panel.grid.minor = element_line(colour="grey20", size=0.5),
panel.grid = element_line(colour = "darkgray", size = 0.4),
text = element_text(size = rel(5.5), colour = "yellow",family="Star Jedi Outline"),
axis.text.y = element_text(colour = "white",
family = "Star Jedi Outline", size = rel(4.5)),
axis.text.x = element_text(colour = "white",family = "Star Jedi Outline",size = rel(4.5)))
img = "https://upload.wikimedia.org/wikipedia/commons/thumb/a/a7/Flag_of_Afghanistan_%281880%E2%80%931901%29.svg/900px-Flag_of_Afghanistan_%281880%E2%80%931901%29.svg.png"
p <- ggbackground(p, img)
p
# panel.background = element_rect(fill = "transparent") # bg of the panel
# , plot.background = element_rect(fill = "transparent", color = NA),
# ggsave("plot.png",p)
# ggsave(p, filename = "tr_tst2.png", bg = "transparent")
|
3b1956909c340cd068aa0f783cb6e75ca22e6561
|
91b06d684a3c638d6053241f5599be3f0abcd1e4
|
/Part3_R기초/sec 6_1 (Vector, Factor).R
|
6ac60844fe4fd2fe31d04919cb267bbefad6db02
|
[] |
no_license
|
limwonki0619/R_Data_Analysis
|
2680b07e68e5f44afd5c167a7338487c4205da62
|
8efd42061be81996b0f3eb4ee6e2e1bd3ff3d8b0
|
refs/heads/master
| 2020-05-29T13:19:54.578617
| 2019-08-26T11:33:42
| 2019-08-26T11:33:42
| 189,156,993
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,999
|
r
|
sec 6_1 (Vector, Factor).R
|
#--------------------------------------------------------------#
# 코딩에서 가장 중요한 것 두 가지 #
# #
# 1. Indentation #
# #
# 2. Comment #
#--------------------------------------------------------------#
#--------------------------------------------------------------#
#------------- section 6 : R Vector 형태 다루기 ---------------#
#--------------------------------------------------------------#
# 6.1 Vector : 단일값(스칼라)들이 한 군데 모여 있는 자료구조 -----------------------------------
c(1:5)
c(1,2,3,4,"5") # 숫자가 문자로 자동변환
# Vector는 한가지 타입의 데이터 형태만 가질 수 있다.
# vector 생성 연산자 :
# vector 생성 함수 : c(), seq(), rep
c(1:5)
c(1,2,3,4,"5") # 숫자가 문자로 자동변환
seq(1,5,2) # 위치 argument
seq(by=2, from=2, to=6) # 키워드 argument
seq(-3,3, length.out = 61) # length.out : from ~ to 사이에 생성할 vector의 개수 지정
seq(-3,3, 0.1) # 간격 설정
rep(1:3,each=2) # 각각 반복
rep(1:3,time=2) # 전체 반복
# 어떤 컴퓨터 언어에서든지 자료에 access 할 수 있다고 할 때, CRUD를 알고 있어야 한다
# C : creat
# R : retrieve
# U : update
# D : delete
# 6.1.1 벡터 내의 원소에 접근하기 - 슬리이싱(Slicing)과 필터링(Filtering) ----------------------
vec1 <- c(1:5); vec1
vec1[3] # 세 번째 요소만 보여준다. R에서는 1에서 시작, 타 언어는 0
vec1[-3] # 세 번째 요소만 빼고 보여준다.
vec1[2:4] # 2-4번쨰 요소를 보여준다.
length(vec1) # length : 벡터의 원소개수 파악
vec1[1:(length(vec1)-2)]
vec1[1:length(vec1)]
vec1[-(1:3)] # 1-3번째 요소를 뺴고 보여준다.
vec1[2] <- 6; vec1 # vec1의 두 번째 값을 6으로 변경
vec1 <- c(vec1,7); vec1 # 벡터에 새로운 내용을 추가할 수 있다.
vec1[7] <- 8; vec1
vec1[9] <- 9; vec1 # 요소사이에 빈 공간은 NA로 처리된다.
append(vec1,10,after=3) # vec1의 3 번쨰 값 뒤에 10을 넣고자할 때 사용할 수 있다.
append(vec1,(-7:-1),after=7) # 다양한 값 설정이 가능하다.
append(vec1,100,after=0) # 0은 가장 앞자리를 의미한다.
ex <- c(1,3,7,NA,12)
ex < 10
ex[ex %% 2 == 0] # ex에서 2로 나누어 나머지가 0인 수, 즉 짝수를 추출
ex[is.na(ex)] # ex에서 값이 NA인 값 추출
ex[ex %% 2 == 0 & is.na(ex)] # ex에서 짝수이며, NA가 아닌 값 추출
# 6.1.2 벡터로 연산하기 ------------------------------------------------------------------------
c(1,2,3)+c(4,5,6)
c(1,2,3)+1
var1 <- c(1,2,3)
var2 <- c(4,5,6)
var1 + var2
var3 <- c('3','4',5)
var1 + var3 # 다른 형태의 변수는 연산을 할 수 없다.
union(var1, var3) # 데이터의 형태가 다를 경우 union을 사용해야 한다.
# union = 합집합(var1 ∪ var2)
var4 <- c(1,2,3,4,5)
var1; var4 # 1,2,3,(1),(2) 객체의 길이가 서로다른 경우 순환 원리가 적용됨
var1+var4 # + 1,2,3, 4 , 5
# = 2,4,6, 5 , 7
var2 <- c(3,4,5)
var1 - var2
intersect(var1,var2) # intersect = 교집합 (var1 ∩ var2)
setdiff(var1,var2) # setdiff = 차집합 (var1 - var2)
setdiff(var2,var1)
# 6.1.3 벡터의 각 컬럼에 이름 지정하기 --------------------------------------------------------
fruits <- c(10,20,30); fruits
names(fruits) <- c('apple','banana','peach'); fruits # names(변수명)는 벡터의 이름 설정 함수다.
# 6.1.4 벡터에 연속적인 데이터 할당하기 -------------------------------------------------------
var5 <- seq(1,6); var5
var6 <- seq(2,-2); var6
even <- seq(2,10,by=2); even # seq(from, to, by), by는 증가분 즉, 간격을 의미한다.
odd <- seq(1,10,2); odd
var8 <- rep(1:3, times=2); var8 # rep(x, times) x값을 전체 반복
var9 <- rep(1:3, each=2); var9 # rep(x, each) x값을 각각 반복
# 6.1.5 벡터의 길이 찾기 ----------------------------------------------------------------------
length(var1)
nrow(var1) # nrow는 number of row로 벡터에서는 사용이 불가하고 배열에서 사용
# 6.1.6 벡터에 특정 문자의 포함 여부 찾기 -----------------------------------------------------
5 %in% even # even 변수에 5의 포함여부 묻기
4 %in% even
# 6.1 연습문제 p 323 --------------------------------------------------------------------------
# 1. seq() 함수를 사용해 date4 변수에 2015-01-01 부터 2015-01-31까지 1씩 증가하는 날짜를 입력하는 방법은?
date4 <- seq(as.Date("2015-01-01"),as.Date("2015-01-31"),by=1); date4
# 2. vec1의 3번째 요소값을 뺀 vec1의 값을 출력하는 방법은?
vec1 <- c('사과','배','감','버섯','고구마'); vec1
vec1[-3]
# 3. vec1과 vec2의 합집합, 차집합, 교집합을 구하는 방법은?
vec1 <- c('봄','여름','가을','겨울')
vec2 <- c('봄','여름','늦여름','초가을')
union(vec1,vec2)
setdiff(vec1,vec2)
intersect(vec1,vec2)
rm(list=ls())
# *** Factor : 질적 자료를 저장하는 자료구조 ------------------------------------------------------------------------------
# factor(x = character(), levels, labels = levels, ordered=FALSE)
# x : factor로 만들 데이터
# levels : 주어진 데이터 중 factor의 각 값(수준)으로 할 값을 벡터 형태로 지정(여기서 빠진 값은 NA로 처리)
# labels : 실제 값 외에 사용할 각 수준의 이름, 예를 들어 데이터에서 1이 남자를 가리킬 경우 labels를 통해 "남" 혹은 "M"으로 변경
# ordered : 순위형 자료 여부(T/F)로, levels에 입력한 순서를 가짐. 순서형 자료에 사용
x <- 1:5
factor(x, levels = c(1:4))
factor(x, levels = c(1:4), labels = c('a','b','c','d')) # 레이블 변경
factor(x, levels = c(1:4), ordered = T) # 순서 자동 지정 (ordered = T 옵션 : 순서형 자료)
factor(x, levels = c(4,2,3,1,5)) # 순서 직접 지정
weekends <- c("일","월","화","수","목","금","토")
factor(weekends, levels = c("월","화","수","목","금","토","일")) # 순서 변경
factor(1:7, levels = 1:7, labels = c("일","월","화","수","목","금","토"), ordered = T)
|
01a939bfb765c924f31f02a288a6d6b4005fe94c
|
2730ee0c85e25f0ec675f6b6060c801ca3f9642a
|
/tools/rpkg/man/rel_from_df.Rd
|
1a00698704a632ea9daf2f1cc91f10e5dbf30b56
|
[
"MIT"
] |
permissive
|
tiagokepe/duckdb
|
ff725a6771b940498cedd7a32981e6a3df54b9f5
|
5b2859f520fa5bd130bf5bdd14d7113d1db0e140
|
refs/heads/master
| 2022-07-21T23:54:19.442338
| 2022-07-12T16:21:06
| 2022-07-12T16:21:06
| 241,940,233
| 1
| 1
|
MIT
| 2020-02-20T16:59:50
| 2020-02-20T16:59:49
| null |
UTF-8
|
R
| false
| true
| 524
|
rd
|
rel_from_df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/relational.R
\name{rel_from_df}
\alias{rel_from_df}
\title{Convert a R data.frame to a DuckDB relation object}
\usage{
rel_from_df(con, df)
}
\arguments{
\item{con}{a DuckDB DBI connection object}
\item{df}{the data.frame}
}
\value{
the \code{duckdb_relation} object wrapping the data.frame
}
\description{
Convert a R data.frame to a DuckDB relation object
}
\examples{
con <- DBI::dbConnect(duckdb::duckdb())
rel <- rel_from_df(con, mtcars)
}
|
7f8f4b58b0f5ca11853810b5346baa04ea1c1719
|
ade30db1a78b731ef3fe4720597390cddaa9c3c1
|
/Boston2.R
|
42a513d7173d4a9c6e2c160259188068ab7843d0
|
[] |
no_license
|
tiinasip/IODS-project
|
98ecafc0d8c7e957fc7a16fb925ad57862c149d3
|
c89019acd51ed3d1564505381dcd1d2d656214c6
|
refs/heads/master
| 2020-08-31T01:51:56.500785
| 2019-12-05T19:10:18
| 2019-12-05T19:10:18
| 218,550,450
| 0
| 0
| null | 2019-10-30T14:46:40
| 2019-10-30T14:46:34
| null |
UTF-8
|
R
| false
| false
| 1,073
|
r
|
Boston2.R
|
##reload data, different name is given
library(MASS)
library(ggplot2)
library(tidyr)
library(cluster)
# load the data
data("Boston")
summary(Boston)
boston_scaled2 <- scale(Boston)
summary(boston_scaled2)
# class of the boston_scaled object
class(boston_scaled2)
# change the object to data frame
boston_scaled2<-as.data.frame(boston_scaled2)
#distance calculation
dist_eu<- dist(boston_scaled2)
summary(dist_eu)
#kmeans clusterin, first try with 4 clusters
km4 <-kmeans(boston_scaled2, centers = 4)
# plot the Boston dataset with clusters
pairs(boston_scaled2, col = km4$cluster)
#Find the optimal number for k
set.seed(123)
# determine the number of clusters
k_max <- 10
# calculate the total within sum of squares
twcss <- sapply(1:k_max, function(k){kmeans(boston_scaled2, k)$tot.withinss})
# visualize the results
qplot(x = 1:k_max, y = twcss, geom = 'line',main="WGSS and groups in K-means solution")
#optimal number of groups is 2
km <-kmeans(boston_scaled2, centers = 2)
str(km)
# plot the Boston dataset with clusters
pairs(boston_scaled2, col = km$cluster)
|
dfddbec54ab99a214d1143bd21000feb5295b74e
|
5555c8f3f24a2e571ed10d2c59e90f8477b5a1a4
|
/network_analysis.R
|
17abbb31cb884fbbd6c96d361c0da34350d12a1f
|
[] |
no_license
|
c2huc2hu/mcviz
|
409866e19ad7aaf9b5360783f897b9cfc5be720d
|
a41c00a45eef36c7df17a84855e34059b31c0b24
|
refs/heads/master
| 2020-05-18T13:46:43.205721
| 2015-02-28T05:01:01
| 2015-02-28T05:01:01
| 31,170,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 381
|
r
|
network_analysis.R
|
library(rPython)
library(igraph)
python_file <-file("fb.py", "r")
python.load(python_file)
post <-python.get("tree")
net_matrix <-comment_network(post)
net <-graph.edgelist(net_matrix)
net.sp <- shortest.paths(net)
net.hc <- hclust(dist(net.sp))
plot(net.hc, labels=FALSE)
net <-network_cluster(net, net.hc, 7)
plot(net, vertex.label=NA, vertex.size=3, edge.arrow.size=0.05)
|
c33d747f2aea1e8b7035f3c67864baaf5e5ef9d4
|
268c1997d0fb8a1e068288fe921023f548002ad3
|
/man/summary.Rd
|
c72fa59d68a90fca3fe6f59ff9bc366d696835f5
|
[] |
no_license
|
tom-gu/biosurvey
|
686269b345f87c57ca6f2ef9179ae524badbf23f
|
4f75a60de2ed42441be72ad258d54c8a3ccf3267
|
refs/heads/master
| 2022-12-26T03:19:35.372797
| 2020-10-03T21:43:46
| 2020-10-03T21:43:46
| 271,777,062
| 0
| 0
| null | 2020-06-12T11:00:13
| 2020-06-12T11:00:12
| null |
UTF-8
|
R
| false
| true
| 1,067
|
rd
|
summary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Methods.R
\name{summary}
\alias{summary}
\alias{summary.master_matrix}
\alias{summary,master_matrix-method}
\alias{summary,master_selection-method}
\alias{summary,base_PAM-method}
\alias{summary,PAM_subset-method}
\alias{summary.master_selection}
\alias{summary.base_PAM}
\alias{summary.PAM_subset}
\title{Summary of attributes and results}
\usage{
\method{summary}{master_matrix}(object, ...)
\method{summary}{master_selection}(object, nrow = 6, ncol = 2, ...)
\method{summary}{base_PAM}(object, ...)
\method{summary}{PAM_subset}(object, ...)
}
\arguments{
\item{object}{object of class master_matrix or master_selection.}
\item{...}{additional arguments affecting the summary produced. Ignored in
these functions.}
\item{nrow}{number of rows to be printed for selected_sites in a
master_selection object.}
\item{ncol}{number of columns to be printed for selected_sites in a
master_selection object.}
}
\value{
A printed summary.
}
\description{
Summary of attributes and results
}
|
f19eb12f18e643bddd047544edc7cc62ae33f96d
|
18bd07f679785ef493611720aebe702cfa9a28f5
|
/chapter2.R
|
63c134d8ec7f8e61a9de39880c74339afaf39d4a
|
[] |
no_license
|
Tomajo/StatisticalRethinkingObsolet
|
0aa82c4fcdf935250447dad379174ca6d2d1f970
|
2469cd0f922086e3fc33745bb04e14bb41acae91
|
refs/heads/master
| 2021-09-07T21:46:56.131398
| 2018-03-01T13:19:29
| 2018-03-01T13:19:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 572
|
r
|
chapter2.R
|
##### Pagina 40 Statistics Rethinking
#define grid
p_grid<-seq(from=0, to=1,length.out = 20)
#define prior
prior<-rep(1,20)
#compute likelihood at each value in grid
likelihood<-dbinom(6,size = 9,prob = p_grid)
#compute product of likelihood and prior
unstd.posterior<-likelihood*prior
#standarize the posterior, so it sums to 1
posterior <- unstd.posterior/sum(unstd.posterior)
plot(p_grid,posterior, type = "b", xlab = "probability of water", ylab = "posterior probability")
mtext("20 points")
# prior<-ifelse(p_grid<0.5,0,1)
#
# prior<-exp(-5*abs(p_grid-0.5))
|
f6fa1406d1e60c2a4415257856c149a82776ea69
|
4b290a0098c237b5c8f2737f70a406eaf78c6b2b
|
/Problems/control_Structures-1.R
|
287670eb7e767f6c8b191f289bd06adda22affbc
|
[] |
no_license
|
Philip-Abraham/JohnsHopkins_R_Programming
|
71849251f51a7f9736f8abd86438c988aa98d1ae
|
1c1278890530beb378fd0707ef4c4c3c7ba8689d
|
refs/heads/master
| 2021-07-04T15:51:44.200953
| 2017-09-28T02:30:07
| 2017-09-28T02:30:07
| 105,092,754
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,122
|
r
|
control_Structures-1.R
|
#This is a valid if/else structure.
x<-100
if(x > 3) {
y <- 10
} else {
y <- 0
}
#So is this one.
x<-100
y <- if(x > 3) {
10
} else {
0
}
# for loops take an interator variable and assign it successive values from a sequence or vector. For
# loops are most commonly used for iterating over the elements of an object (list, vector, etc.)
# This loop takes the i variable and in each iteration of the loop gives it values 1, 2, 3, ..., 10, and then
# exits.
for(i in 1:10) {
print(i)
}
#These four loops have the same behavior.
x <- c("a", "b", "c", "d")
for(i in 1:4) {
print(x[i])
}
for(i in seq_along(x)) {
print(x[i])
}
for(letter in x) {
print(letter)
}
for(i in 1:4) print(x[i]) #if u only have one statement in the "for loop", then curly braces not needed.
#for loops can be nested.
#Be careful with nesting though. Nesting beyond 2-3 levels is often very difficult to read/understand.
x <- matrix(1:6, 2, 3)
for(i in seq_len(nrow(x))) {
for(j in seq_len(ncol(x))) {
print(x[i, j])
}
}
#While loops begin by testing a condition. If it is true, then they execute the loop body. Once the loop
# body is executed, the condition is tested again, and so forth.
# While loops can potentially result in infinite loops if not written properly. Use with care!
count <- 0
while(count < 10) {
print(count)
count <- count + 1}
#Sometimes there will be more than one condition in the test.
#Conditions are always evaluated from left to right.
z <- 5
while(z >= 3 && z <= 10) {
print(z)
coin <- rbinom(1, 1, 0.5)
print("coin=")
print(coin)
if(coin == 1) { ## random walk
z <- z + 1
} else {
z <- z - 1
}
}
#Repeat initiates an infinite loop;The only way to exit a repeat loop is to call break.
x0 <- 1
tol <- 1e-8
repeat {
x1 <- x0/2.22
if(abs(x1 - x0) < tol) {
break
} else {
x0 <- x1
}
}
# next is used to skip an iteration of a loop
x2<-1.5
for(i in 1:100) {
if(i <= 20) {
## Skip the first 20 iterations
next
}
## Do something here
x3<-(x2)^.3
}
|
23fcdaa0dbc2b75bcf107ee0069118a5cd0d2743
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/gRain/R/loadSaveHuginNet.R
|
2823d9f3d0df51f6c62f887c0124a01d8619372e
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,680
|
r
|
loadSaveHuginNet.R
|
##
## Reading / writing Bayesian networks from / to HUGIN net files
##
#' @title Load and save Hugin net files
#'
#' @description These functions can load a net file saved in the
#' 'Hugin format' into R and save a network in R as a file in the
#' 'Hugin format'.
#'
#' @name load-save-hugin
#'
#' @aliases loadHuginNet saveHuginNet
#' @param gin An independence network
#' @param file Name of HUGIN net file. Convenient to give the file the
#' extension '.net'
#' @param description A text describing the network, defaults to
#' \code{file}
#' @param details Debugging information
#' @return An object of class `grain`.
#' @author Søren Højsgaard, \email{sorenh@@math.aau.dk}
#' @seealso \code{\link{grain}}
#' @references Søren Højsgaard (2012). Graphical Independence
#' Networks with the gRain Package for R. Journal of Statistical
#' Software, 46(10), 1-26.
#' \url{http://www.jstatsoft.org/v46/i10/}.
#' @keywords utilities
#' @examples
#'
#'
#' ## Load HUGIN net file
#' tf <- system.file("huginex", "chest_clinic.net", package = "gRain")
#' chest <- loadHuginNet(tf, details=1)
#' chest
#'
#' ## Save a copy
#' td <- tempdir()
#' saveHuginNet(chest, paste(td,"/chest.net",sep=''))
#'
#' ## Load the copy
#' chest2 <- loadHuginNet(paste(td,"/chest.net",sep=''))
#'
#' tf <- system.file("huginex", "golf.net", package = "gRain")
#' golf <- loadHuginNet(tf, details=1)
#'
#' saveHuginNet(golf, paste(td,"/golf.net",sep=''))
#' golf2 <- loadHuginNet(paste(td,"/golf.net",sep=''))
#'
#' @export loadHuginNet
loadHuginNet <- function(file, description=NULL, details=0){
if (is.null(description))
description <- rev(unlist(strsplit(file, "/")))[1]
xxx <-.readHuginNet(file,details)
yyy <-.transformHuginNet2internal(xxx)
universe <- .asUniverse(yyy)
plist <- lapply(yyy$potentialList, .hpot2cptable, universe)
##return(plist)
value <- grain(compileCPT(plist))
return(value)
}
#' @export loadHuginNet
loadHuginNet2 <- function(file, description=NULL, details=0){
if (is.null(description))
description <- rev(unlist(strsplit(file, "/")))[1]
xxx <-.readHuginNet(file,details)
yyy <-.transformHuginNet2internal(xxx)
universe <- .asUniverse(yyy)
plist <- lapply(yyy$potentialList, .hpot2cptable, universe)
plist
}
.transformHuginNet2internal <- function(x){
nodeList2 <- lapply(x$nodeList, .getNodeSpec)
potentialList2 <- lapply(x$potentialList, .getPotentialSpec)
nl <- .makeNodeNamesUnique(nodeList2)
repeat{
if (length(nl$nonunique)==0)
break()
nl <- .makeNodeNamesUnique(nl$nodeList)
}
nodeList2 <- nl$nodeList
value <- structure(list(nodeList=nodeList2, potentialList=potentialList2))
class(value)<- "huginnet"
return(value)
}
.readHuginNet <- function(file, details=0){
.infoPrint(details, 1, cat(".HUGIN netfile:", file,"\n"))
nodeCount <- 0
con <- file(file, "rb")
repeat{
cline <- .getLine(con); #print(cline)
if (!length(cline))
break()
if (.hasToken("node", cline)) ## Fragile if 'node' is the name of a variable...
nodeCount <- nodeCount + 1
}
close(con)
.infoPrint(details, 3, cat("...there are around", nodeCount, "nodes \n"))
## Data structure for holding specification (possibly too long)
##
nodeList <- potentialList <- as.list(rep(NA, nodeCount))
con <- file(file, "rb")
currNode <- currPotential <- 1
state<-"start"
repeat{
cline <- .getLine(con); #print(cline)
if (!length(cline))
break()
switch(state,
"start"={
if (.hasToken("net",cline)){
state="net"
.infoPrint(details, 2, cat("..NET action\n"))
wline <- cline
}
},
"net"={
wline <- c(wline, cline)
if (.hasToken("}",cline)){
state="run1"
.infoPrint(details,2,cat("..end NET action\n"))
}
},
"run1"={
if (.hasToken("node", cline)){
state="node"
.infoPrint(details, 2, cat("..NODE action\n"))
} else {
if (.hasToken("potential", cline)){
state="potential";
.infoPrint(details,2, cat("..POTENTIAL action\n"))
}
}
wline <- cline
},
"node"={
wline <- c(wline, cline)
if (.hasToken("}",cline)){
state="run1";
.infoPrint(details,2,cat("..end NODE action\n"))
nodeList[[currNode]] <- wline;
currNode <- currNode + 1
}
},
"potential"={
wline <- c(wline, cline)
if (.hasToken("}",cline)){
state="run1";
.infoPrint(details,2, cat("..end POTENTIAL action\n"))
potentialList[[currPotential]] <- wline;
currPotential <- currPotential + 1
}
}
)
}
close(con)
nodeList <- nodeList[!sapply(lapply(nodeList, is.na),all)]
potentialList <- potentialList[!sapply(lapply(potentialList, is.na),all)]
value <- structure(list(nodeList=nodeList, potentialList=potentialList))
return(value)
}
.asUniverse <- function(from){
ccshort <-sapply(from$nodeList, function(x)x$nodeVar)
ccnames <-sapply(from$nodeList, function(x)x$nodeLabel)
cclabels <-lapply(from$nodeList, function(x)x$nodeStates)
names(cclabels) <- ccnames
di <- c(lapply(cclabels, length),recursive=TRUE)
list(nodes=ccnames, short=ccshort, levels=cclabels, nlev=di)
}
.hpot2cptable <- function(cpot, universe){
idx <- match(c(cpot[c("nodeVar","parentVar")],recursive=TRUE), universe$short)
vpa <- universe$nodes[idx]
v <- vpa[1]
cptable(vpa, values=cpot$potential, levels=universe$levels[[v]])
}
.getLine <- function(con) {
readLines(con, n=1)
}
.hasToken <- function(token, cline) {
##print(cline)
cline <- gsub("^ +","",cline)
a <- unlist(strsplit(cline," "))[1]
if (!is.na(a))
a==token
else
FALSE
}
.tokenIdx <- function(token, x){
idx <- which(as.logical(lapply(x, function(d) grep(token,d))))
idx
}
.capWords <- function(s, strict = FALSE) {
cap <- function(s) paste(toupper(substring(s,1,1)),
{s <- substring(s,2); if(strict) tolower(s) else s},
sep = "", collapse = " " )
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
## .toCamel <- function(s){
## s<-gsub(" +"," ",s)
## s<-unlist(strsplit(s, " "))
## paste(sapply(s, .capWords),collapse='')
## }
.toCamel <- function(s){
s<-gsub(" +"," ",s)
s<-unlist(strsplit(s, " "))
paste(c(s[1],sapply(s[-1], .capWords)),collapse='')
}
.getNodeSpec <- function(nodeSpec){
tmp <- nodeSpec[.tokenIdx("node", nodeSpec)]
nodeVar <- gsub("node +","",tmp)[1]
nodeVar <- gsub(" +","",nodeVar)
tmp <- nodeSpec[.tokenIdx("label", nodeSpec)]
nodeLabel <- gsub(" +label += +","",tmp);
nodeLabel <- gsub(";", "", nodeLabel)
nodeLabel <- gsub('"',"", nodeLabel)
nodeLabel <- gsub(" +"," ",nodeLabel)
if (length(nodeLabel) && nchar(nodeLabel)>0){
nodeLabel <- .toCamel(nodeLabel)
nl <- gsub("[^[:alnum:]]","",nodeLabel)
nodeLabel <- gsub("[^[:alnum:]|\\.]","",nodeLabel)
base<-as.character(0:9)
if(subsetof(unlist(strsplit(nl,"")), base)){
nodeLabel <- paste("X",nodeLabel,sep='')
}
} else {
##if (nchar(nodeLabel)==0)
nodeLabel <- nodeVar
}
tmp <- nodeSpec[.tokenIdx("states", nodeSpec)]
nodeStates <- gsub(" +states += +","",tmp);
nodeStates <- gsub("[\\(,\\);]","",nodeStates);
nodeStates <- unlist(strsplit(nodeStates, '\\"'))
nodeStates <- sapply(nodeStates, function(d) gsub("^ +","",d))
nodeStates <- nodeStates[sapply(nodeStates, nchar)>0]
nodeStates <- sapply(nodeStates, .toCamel)
nodeStates <- gsub(" +",".", nodeStates)
names(nodeStates)<-NULL
value <- list(nodeVar=nodeVar, nodeLabel=nodeLabel, nodeStates=nodeStates)
value
}
.getPotentialSpec <- function(potSpec){
tmp <- potSpec[.tokenIdx("potential", potSpec)]
tmp <- gsub("potential +","", tmp)
tmp <- gsub("[\\(,\\),|]","", tmp)
tmp <- gsub(" +"," ", tmp)
tmp <- unlist(strsplit(tmp," "))
tmp <- tmp[sapply(tmp, nchar)>0]
nodeVar <- tmp[1]
parentVar <- tmp[-1]
sss <- paste(potSpec,collapse="") ##; ss <<- sss
sss2 <- gsub("^.*data[[:space:]]*=([^;]*);(.*)", "\\1", sss) ##; ss2<<-sss2
##sss3: ((( 0.5 1.2E-5 ) ( 3E3 0.5 )) ( 0.5 0.5 ) ( 0.5 0.5 )))
sss3 <- gsub("\\)[^\\)]*\\(", ") (", sss2) ##; ss3<<-sss3
## sss4: " 0.5 1.2E-5 3E3 0.5 0.5 0.5 0.5 0.5 "s
sss4 <- gsub("[\\(,\\),\\}]","", sss3)
## sss5: remove leading white space: "0.5 1.2E-5 3E3 0.5 0.5 0.5 0.5 0.5 "
sss5 <- gsub("^[[:space:]]*","",sss4)
## sss6: remove trailing white space: "0.5 1.2E-5 3E3 0.5 0.5 0.5 0.5 0.5"
sss6 <- gsub("[[:space:]]$*","",sss5)
## sss7: split to atoms
sss7 <- strsplit(sss6, " +")[[1]]
###: Now create numerical values
pot <- as.numeric( sss7 )
value <- list(nodeVar=nodeVar, parentVar=rev(parentVar), potential=pot)
value
}
.makeNodeNamesUnique <- function(nodeList2){
nl<-t(sapply(nodeList2, function(d)unlist(d[1:2])))
nonunique <- names(which(table(nl[,2])>1))
if (length(nonunique)){
cat ("Label(s): {", nonunique, "} appears mode than once in NET file\n")
for (i in 1:length(nonunique)){
cnu <- nonunique[i]
idx<-which(cnu ==nl[,2])
for (j in idx){
a <- nodeList2[[j]]$nodeVar
cat(" Replacing label", cnu, " with node name", a, "\n")
nodeList2[[j]]$nodeLabel <- a
}
}
}
return(list(nodeList=nodeList2, nonunique=nonunique))
}
#' @export
#' @rdname load-save-hugin
saveHuginNet <- function(gin, file, details=0){
if (!inherits( gin, "grain"))
stop("Not a grain object")
if (is.null(gmd <- getgin(gin, "universe")))
stop("Strange error: no universe in network")
if (is.null(cptlist <- getgin(gin, "cptlist"))){
cat("Object does not have 'cptlist' component; creating one for you...\n")
cptlist <- make_cptlist(gin)
}
vlab <- gmd$levels
vnam <- gmd$nodes
nn <- length(vlab)
th <- cumsum(c(0,rep(2*pi/nn, nn-1)))
r <- 100
coords <- lapply(th, function(d) round(r+r*c(cos(d), sin(d))))
con <- file(file, "wb")
## Write (trivial) net specification
##
writeLines("net\n{", con)
writeLines(" node_size = (100 30);", con)
writeLines("\n}\n\n", con)
## Write node specification
##
for (ii in 1:length(vlab)){
st <-paste("node ", vnam[ii],"\n","{","\n",sep='')
writeLines(st, con, sep="")
## cat(st)
st <- paste(" label = \"\";","\n")
writeLines(st, con, sep="")
## cat(st)
st <- paste(" position = (", paste(coords[[ii]], collapse=' '), ");\n")
writeLines(st, con, sep="")
## cat(st)
st2 <- sapply(vlab[[ii]], function(d) paste('"',d,'"',sep=''))
st <- paste(" states = (", paste(st2, collapse=' '), ");\n")
writeLines(st, con, sep="")
## cat(st)
st <- paste("}\n")
writeLines(st, con, sep="")
## cat(st)
}
for (ii in 1:length(cptlist)){
cpot <- cptlist[[ii]]
nam <- varNames(cpot) ## BRIS
lev <- valueLabels(cpot) ## BRIS
val <- cpot ## BRIS
v <- nam[1]
pa <- nam[-1]
lev <- rev(lev[-1])
wval <- val
if (length(lev)>0){
for (kk in 1:length(lev)){
##print("splitVec:"); print(wval); print(class(wval))
wval<-splitVec(wval,length(lev[[kk]]))
}
}
##print(wval); print(class(wval))
plx <- printlist(wval)
if (length(pa)){
st <- paste("potential (",v, "|", paste(rev(pa), collapse=' '),")\n")
writeLines(st,con,sep="")
## cat(st)
st <- "{\n";
writeLines(st,con,sep="")
## cat(st)
st <- paste(" data = \n")
writeLines(st,con,sep="")
## cat(st)
##a<-lapply(plx, cat, "\n")
a<-lapply(plx, writeLines, con, sep="\n")
st <- paste(";\n}\n")
writeLines(st,con,sep="")
## cat(st)
} else {
st <- paste("potential (", v, ")\n")
writeLines(st,con,sep="")
## cat(st)
st <- "{\n";
writeLines(st,con,sep="")
## cat(st)
st <- paste(" data = ", plx, ";\n")
writeLines(st,con,sep="")
## cat(st)
st <- "}\n\n";
writeLines(st,con,sep="")
## cat(st)
}
}
close(con)
}
|
a76c4dc435b4ea57b70703f854d6b212a6260494
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/GGMncv/R/predict.R
|
9aa5ace38a5b984503eb102a57c1e2cfa618b643
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,219
|
r
|
predict.R
|
#' Predict method for \code{ggmncv} Objects
#'
#' @description Predicted values based on a \code{ggmncv} object
#'
#' @param object An object of class \code{ggmncv}
#'
#' @param train_data Data used for model fitting.
#'
#' @param newdata An optional data frame in which to look for variables with which to predict.
#' If omitted, the fitted values are used.
#'
#' @param ... Currently ignored
#'
#' @return A matrix of predicted values
#'
#' @examples
#' # data
#' Y <- scale(Sachs)
#'
#' # test data
#' Ytest <- Y[1:100,]
#'
#' # training data
#' Ytrain <- Y[101:nrow(Y),]
#'
#' fit <- ggmncv(cor(Ytrain), n = nrow(Ytrain))
#'
#' pred <- predict(fit, train_data = Y,
#' newdata = Ytest)
#'
#' round(apply((pred - Ytest)^2, 2, mean), 2)
#' @export
predict.ggmncv <- function(object, train_data, newdata = NULL,...){
object$x <- train_data
if(isSymmetric(as.matrix(object$x))){
stop("data matrix not found")
}
if(!is.null(newdata)){
x_scale <- as.matrix(newdata)
} else {
x_scale <- as.matrix(scale(object$x))
}
coefs <- as.matrix(unclass(coef(object)))
p <- ncol(x_scale)
y_hat <- sapply(1:p, function(x) x_scale[,-x] %*% coefs[x,])
return(y_hat)
}
|
2a0a185e33779f7c2ec2d521bbcf77df79d1e60a
|
51e54e1ce824a1953d69650b56dfb31ba4b1aa69
|
/ggplot_demo.R
|
6aec16ecba2f27ddfd1a2a6a2368b583a6c4c9b3
|
[] |
no_license
|
DHS-OEDA/spatial_workshop
|
4ba3a2aeae4da782ba61688f42f90fed029e1e41
|
7b0e686dde13c12df5f3c429d1c2fa9ff9759c3e
|
refs/heads/master
| 2020-03-12T11:14:44.700781
| 2018-04-24T20:25:19
| 2018-04-24T20:25:19
| 130,591,810
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,275
|
r
|
ggplot_demo.R
|
# static map demo in R
# this is just a quick demo of the ggplot2 development version's simple features compatibility
# To run this script you will need a US Census API key available here:
# https://api.census.gov/data/key_signup.html
# Then call this function to store your key.
# tidycensus::census_api_key("MY_KEY_HERE", install = TRUE, overwrite = TRUE)
library(tidyverse)
library(tidycensus)
library(sf)
library(viridis)
devtools::install_github("tidyverse/ggplot2")
require(ggplot2)
racevars <- c(White = "P0050003",
Black = "P0050004",
Asian = "P0050006",
Hispanic = "P0040003")
multnomah <- get_decennial(geography="tract", variables = racevars, state="OR", county = "Multnomah County", geometry=TRUE, summary_var = "P0010001")
head(multnomah)
labels <- c("P0050003" = "White", "P0050004" = "Black", "P0050006" = "Asian", "P0040003" = "Any Hispanic")
multnomah %>%
mutate(pct = 100 * (value / summary_value)) %>%
ggplot(aes(fill = pct, color = pct)) +
facet_wrap(~variable, labeller=labeller(variable = labels)) +
geom_sf() +
coord_sf(crs = 26910) +
scale_fill_viridis(option = "C") +
scale_color_viridis(option = "C") +
ggtitle('Select racial and ethnic demographics, Multnomah County')
|
960fff18b6ca1625802ca989387f1313321b7984
|
6490ecf3d1374ac7f4fc146cc4d467307e3870ed
|
/src/ndvi_over_time.R
|
718370632a444836dfcd5cf1df2dda75e5408620
|
[] |
no_license
|
danyellshub/Hayman_Fire_NDVI_Trace
|
1a0ca73962f09c93758efe583fef3c32ed141d71
|
5373ffc47cc09a7db74ba57e0469b63d8c05099d
|
refs/heads/master
| 2020-07-18T00:07:13.759696
| 2019-09-13T20:47:13
| 2019-09-13T20:47:13
| 206,130,645
| 0
| 0
| null | 2019-09-03T17:10:15
| 2019-09-03T17:10:14
| null |
UTF-8
|
R
| false
| false
| 1,962
|
r
|
ndvi_over_time.R
|
library(tidyverse)
library(tidyr)
library(ggthemes)
library(lubridate)
#Reading in the data and removing na
ndvi <- read_csv('data/hayman_ndvi.csv') %>%
rename(burned=2,unburned=3) %>%
filter(!is.na(burned),
!is.na(unburned))
# Converting from wide to long data
ndvi_long <- gather(ndvi,
key='site',
value='NDVI',
-DateTime)
# Plotting all the data
ggplot(ndvi_long,aes(x=DateTime,y=NDVI,color=site)) +
geom_point(shape=1) +
geom_line() +
theme_few() +
scale_color_few() +
theme(legend.position=c(0.3,0.3))
# Summarizing the data by year
ndvi_annual <- ndvi_long %>%
mutate(year=year(DateTime)) %>%
mutate(month=month(DateTime)) %>%
filter(month %in% c(5,6,7,8,9)) %>%
group_by(site,year) %>%
summarize(mean_NDVI=mean(NDVI))
#Here making an annual plot
ggplot(ndvi_annual,aes(x=year,y=mean_NDVI,color=site)) +
geom_point(shape=1) +
geom_line() +
theme_few() +
scale_color_few() +
theme(legend.position=c(0.3,0.3))
# Summarizing the data by month
ndvi_month <- ndvi_long %>%
mutate(year=year(DateTime)) %>%
mutate(month=month(DateTime)) %>%
group_by(site,month)%>%
summarize(mean_NDVI=mean(NDVI))
#Here making a month plot
ggplot(ndvi_month,aes(x=month,y=mean_NDVI,color=site)) +
geom_point(shape=1) +
geom_line() +
theme_few() +
scale_color_few() +
theme(legend.position=c(0.6,0.2))
#pre post analysis
ndvi_month_pre_post <- ndvi_long %>%
mutate(year = year(DateTime),
month = month(DateTime),
treatment = cut(year, breaks = c(0,2003,2020),
labels = c('pre-burn', 'post-burn'))) %>%
group_by(month, site, treatment) %>%
summarize(mean_ndvi = mean(NDVI))
ggplot(ndvi_month_pre_post,aes(x=month,y=mean_ndvi,color=treatment)) +
geom_point(shape=1) +
geom_line() +
theme_few() +
scale_color_few() +
theme(legend.position=c(0.6,0.2)) +
facet_wrap(~site)
|
1671e1af79fee58f725069915a39ff4c37b0f64b
|
bc90e649b58c73662bc993b49daebf780ad68f7f
|
/beer_pca.r
|
c753835e06f82c267a5bea1a4dd9863b4528a760
|
[] |
no_license
|
YangJongWon/study
|
cd71d5423ce97ca49206f0b9573d5de57059d2b4
|
35170f8ffa3c95d765cffa35595ba53b543ba28b
|
refs/heads/master
| 2020-03-31T15:49:38.947966
| 2018-12-14T00:41:34
| 2018-12-14T00:41:34
| 152,352,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 507
|
r
|
beer_pca.r
|
install.packages("readxl")
install.packages("HSAUR")
install.packages("corrplot")
library(readxl)
library(HSAUR)
library(corrplot)
setwd("F:/BigData3/R/beer")
beer<-read_xls("beer_pca.xls")
beer_cost<-beer
beer_cost$cost<-max(beer_cost$cost)-beer_cost$cost
beer_cost.pca<-princomp(beer_cost,scale=T)
beer_cost.pca
summary(beer_cost.pca)
biplot(beer_cost.pca)
screeplot(beer_cost.pca,type = 'l',pch=19)
?predict()
str(beer_cost)
#pred_beer<-predict(beer_cost)
bc<-cor(beer_cost)
corrplot(bc,method='number')
|
75b925769b933596e5267516ce2cfce880ae3671
|
5c5915807ea728324875a615a1b9c5b919f2962f
|
/modules/amAnalysisMergeLandCover/amServer.R
|
7cfd944d3c62d61edff8803bc39b5b21f256e8fa
|
[] |
no_license
|
demel/AccessMod_shiny
|
3d969228ff6ca8a9076a30a75fbf94ed60a87d55
|
70ffe0ba8ea6c558466689fdb419e3061afb971e
|
refs/heads/master
| 2021-01-17T06:44:53.433833
| 2015-11-02T18:27:09
| 2015-11-02T18:27:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,059
|
r
|
amServer.R
|
# ___ __ ___ __ ______
# / | _____ _____ ___ _____ _____ / |/ /____ ____/ / / ____/
# / /| | / ___// ___// _ \ / ___// ___// /|_/ // __ \ / __ / /___ \
# / ___ |/ /__ / /__ / __/(__ )(__ )/ / / // /_/ // /_/ / ____/ /
# /_/ |_|\___/ \___/ \___//____//____//_/ /_/ \____/ \__,_/ /_____/
#
# Module 1 : Add road and barrier to an existing land cover in a given order.
#
# input : road, barrier, land cover
# output : merged land cover
observe({
amModEnabled<-listen$tabControl_module_selector
if(isTRUE(!is.null(amModEnabled) && amModEnabled)){
#------------------------------------------------------------------------------#
# MERGING NEW LANDCOVER
#------------------------------------------------------------------------------#
# populate selectize input
observe({
mapStack<-dataList$raster[grep('^stack_',dataList$raster)]
updateSelectInput(session,'mapStack',choices=mapStack,selected=mapStack)
})
# button to hide stack items
observe({
rmMerge<-input$btnRmMerge
if(!is.null(rmMerge)&& rmMerge>0){
updateSelectInput(session = session, inputId = "mapStack",selected='')
}
})
observe({
addMerge<-input$btnAddMerge
if(!is.null(addMerge)&& addMerge>0){
mapStack<-dataList$raster[grep('^stack_',dataList$raster)]
updateSelectInput(session = session, inputId = "mapStack",choices=mapStack,selected=mapStack)
}
})
observeEvent(input$btnDeleteStack,{
sel<-amNameCheck(dataList,input$mapStack,'raster')
if(length(sel)>0){
for(m in sel){
rmRastIfExists(m)
}
amUpdateDataList(listen)
}
})
stackConflictTable<-reactive({
#TODO: use only stack present in mapStack
amErrorAction(title='stack conflict table',{
#sel<-amNameCheck(dataList,dataList$raster[grep('^stack*',dataList$raster)],'raster')
sel<-amNameCheck(dataList,input$mapStack,'raster')
btnStack<-input$btnAddStackRoad
btnStack<-input$btnAddStackLcv
listen$updatedConflictTable
tbl<-data.frame(map="-",class="-",label="-")
if(!is.null(sel)){
tbl=data.frame()
sel <- sel[! sel %in% grep('^stack_barrier*',sel,value=T)]
for(m in sel){
t<-read.table(text=execGRASS('r.category',map=m,intern=T),
sep="\t",
stringsAsFactors=F
)
t$map=m
names(t)<-c('class','label','map')
tbl=rbind(t,tbl)
}
dupClass <- tbl$class[duplicated(tbl$class)]
tbl <- tbl[tbl$class %in% dupClass,]
tbl <- tbl[order(tbl$class),]
}
return(tbl)
})
})
# validation
observe({
tbl<-stackConflictTable()
stackList <- amNameCheck(dataList,input$mapStack,'raster')
stackTags <- input$stackTag
rmArtefact<-input$cleanArtefact
amErrorAction(title='stack merge validation',{
# conflict table update
if(!is.null(tbl)){
isolate({
nRowConflict <- nrow(tbl)
# test if nrow >0 and send a message to UI in case of conflict
if(nRowConflict>1){
tbl$newClass=tbl$class
}else{
tbl<-data.frame(map=as.character(NA),class=as.integer(NA),label=as.character(NA),newClass=as.integer(NA))
}
# render hotable with a possibly empty table
output$stackConflict<-renderHotable({tbl},
stretched='last',readOnly=c(1,2,3)
)
})
}
# validation process
if(TRUE){
err = character(0)
stackItemMissing <- isTRUE(any(sapply(stackList,is.null)))
hasConflict <- isTRUE(!is.null(tbl) && nrow(tbl) > 1)
hasTag <- isTRUE(!any(stackTags=='', is.null(stackTags), nchar(stackTags)<1))
stackLcvName <- config$dataClass[config$dataClass$id=="amStackLcv","class"]
stackNotOneLcv <- !isTRUE(length(grep(stackLcvName,stackList))==1)
if(stackItemMissing){
err <- c(err,"Stack listed not found, relaunch the application.")
}else{
if(hasConflict){
nConf <- duplicated(tbl$class)
nConf <- length(nConf[nConf])
confPlur <- ifelse(nConf>1,"conflicts","conflict")
err <- c(err,paste(nConf,confPlur,"of class found. See under \"Conflicting classes among items in the stack\""))
}else{
if(stackNotOneLcv){
err <- c(err,"At least one land cover stack item is required to proceed.")
}else{
if(!hasTag){
err <- c(err,"Please enter a least one tag")
}
}
}
}
if(length(err)>0){
err <- HTML(paste("<div>",icon('exclamation-triangle'),err,'</div>',collapse=""))
msgList <- tagList(tags$b('Validation issues:'),err)
disBtn <- TRUE
}else{
msgList <- tagList(p(''))
disBtn <- FALSE
}
}
# set outputname if no validation problem
if(!is.null(rmArtefact) && hasTag && !disBtn){
stackTag <- amGetUniqueTags(stackTags)
addTag<-function(base,tag=stackTag,sepT=config$sepTagFile,sepC=config$sepClass){
paste(c(base,paste(tag,collapse=sepT)),collapse=sepC)
}
# existing dataset (use local scoping)
outTxt <- function(x,condition=TRUE){
if(isTRUE(condition)){
e <- x %in% dataList$df$origName
y <- paste(amGetClass(x,config$sepClass),'[',paste(stackTag,collapse=" "),']')
if(e){
return(sprintf(" %s <b style=\"color:#FF9900\"> (overwrite warning)</b> ",y))
}else{
return(sprintf("%s <b style=\"color:#00CC00\">(ok)</b>",y))
}
}else{
NULL
}
}
# set names
merged <- addTag(amClassInfo('amLcvM')$class)
bridges <- addTag(amClassInfo('amLcvMB')$class)
# output lines
out <- c(outTxt(merged),outTxt(bridges))
# take ony merged name if not rm artefect
if(!rmArtefact)out=out[1]
outMap <- tagList(
tags$b('Output dataset:'),
HTML(paste("<div>",icon('sign-out'),out,"<div/>",collapse=""))
)
}else{
outMap=""
}
output$stackNameInfo<-renderUI(outMap)
output$stackWarning<-renderUI({msgList})
amActionButtonToggle(id='btnMerge',session,disable=disBtn)
})
})
# if btn correct stack is pressed
# reclassify raster.
# NOTE: we can't do a simple update using r.mapcalc or r.category : we need to keep cat label.
observeEvent(input$btnCorrectStack,{
# get input table with modified column
cTable<-hot.to.df(input$stackConflict)
nCtbl<-nrow(cTable)
# if number of row is greater than one
if(nCtbl>1){
# for each map in table
for(m in cTable$map){
# get tables orig and new classes
oClass = cTable[cTable$map==m,'class']
nClass = cTable[cTable$map==m,'newClass']
# if texts in classes are different
if(!identical(paste(oClass),paste(nClass))){
# read table from raster category
tbl<-read.csv(
text=execGRASS('r.category',
map=m,
intern=T),
sep='\t',
header=F,
stringsAsFactors=F
)
tbl[is.na(tbl$V2),'V2']<-"no label"
rulesFile<-tempfile()
# extract all old classes
clOrig=tbl$V1
clNew=tbl$V1
clNew[clNew==oClass]<-nClass
# compose a new rules file and
rules=paste(clOrig,"=",clNew," ",tbl$V2,collapse="\n")
write(x=rules,file=rulesFile)
execGRASS('g.copy',raster=c(m,'tmp_reclass'),flags='overwrite')
execGRASS('r.reclass', input='tmp_reclass',output='tmp_reclass_2', rules=rulesFile,flags='overwrite')
execGRASS('r.resample',input='tmp_reclass_2',output=m,flags='overwrite')
# signal change to reactive stack conflict table using a listener.
listen$updatedConflictTable<-runif(1)
}
}
}
})
# merge action
observeEvent(input$btnMerge,{
timeCheck<-system.time({
amActionButtonToggle(session=session,id='btnMerge',disable=TRUE)
stackTag<-input$stackTag
sel<-amNameCheck(dataList,input$mapStack,'raster')
if(!is.null(sel) && isTRUE(nchar(stackTag)>0)){
amErrorAction(title='Module 1: merge process',{
updateTextInput(session,'stackTag',value="")
selL<-length(sel)
cleanBridge<-input$cleanArtefact
inc<-1/(selL+1)*100
message('Merging land cover map requested.')
stackTag<-amSubPunct(stackTag,config$sepTagFile,rmTrailingSep=T,rmLeadingSep=T,rmDuplicateSep=T)
addTag<-function(base,tag=stackTag,sepT=config$sepTagFile,sepC=config$sepClass){
paste(c(base,paste(tag,collapse=sepT)),collapse=sepC)
}
# set names
merged<-addTag(amClassInfo('amLcvM')$class)
bridges<-addTag(amClassInfo('amLcvMB')$class)
mapPosition=1
tempBase<-'tmp__'
isFirstMap=TRUE
rmRastIfExists('tmp_*')
if(amRastExists('MASK'))execGRASS('r.mask',flags='r')
message(paste('stack will be merged in this order:',paste(sel,collapse=', ')))
amUpdateProgressBar(session,"stackProgress",1)
# Use barrier as mask for each stack element
# keep order in tempMap name. eg. tmp__12_stack_road_test
for(i in 1:length(sel)){
map<-sel[i]
message(paste('Proceding map',map,'MASK is',amRastExists('MASK')))
if(length(grep('stack_barrier__', map))>0){
if(amRastExists('MASK')){
execGRASS('r.mapcalc',expression=paste("MASK=isnull(",map,")?MASK:null()"),flags="overwrite")
}else{
execGRASS('r.mask',raster=map,flags=c('i'))
}
}else{
tempMap=paste0(tempBase,mapPosition,'_',map)
execGRASS('r.mapcalc',expression=paste(tempMap,"=",map),flags='overwrite')
}
mapPosition=mapPosition+1
amUpdateProgressBar(session,'stackProgress',i*inc)
}
#removing temp mask and active mask
rmRastIfExists('tmp_mask__*')
if(amRastExists('MASK'))execGRASS('r.mask',flags='r')
# get list of tmp__stack... maps.
tempMapList<-execGRASS('g.list',type='raster',pattern=paste0(tempBase,'*'),intern=TRUE)
if(length(tempMapList)>1){
execGRASS('r.patch',input=paste(tempMapList,collapse=','),output=merged,flags=c('overwrite'))
}else{
execGRASS('g.copy',raster=paste0(tempMapList,',',merged),flags='overwrite')
}
# In accessmod accessibility analysis, a null cell is a barrier, e.g. a river, mountain, militarized zone.
# When we patch road maps to land cover maps, small overlaps can appear on top of predefined barrier.
# Those overlaps act as briges when used in travel time analyis, thus, create shortcuts and wrong calculation.
# If we used densified lines during rasterization process of roads, we can safely set the "one cell diagonal
# bridge" as barrier without breaking road continuity.
#
# Example:
# X=non-null cell in <road_map>; N=null in <merged_map>; A=non-null cell in <merged_map>
# X will be set as null in fallowing cases:
#
# X N
# N A
#
# N X
# A N
if(cleanBridge){
message('Cleaning artefact/bridges of one sel')
fromRoad<-sel[grep('stack_road',sel)]
amBridgeFinder(fromRoad,merged,bridges)
amBridgeRemover(bridges,removeFromMap=merged)
}
execGRASS('r.colors',map=merged,color='random')
rmRastIfExists(paste0(tempBase,'*'))
message(paste(merged,'created'))
amUpdateProgressBar(session,'stackProgress',100)
amUpdateDataList(listen)
})
}
amActionButtonToggle(session=session,id='btnMerge',disable=FALSE)
})
print(timeCheck)
})
#------------------------------------------------------------------------------#
# ADD TO STACK LANDCOVER
#------------------------------------------------------------------------------#
observe({
lcv<-dataList$raster[grep('^land_cover__',dataList$raster)]
lcvTable<-dataList$table[grep('^table_land_cover__',dataList$table)]
if(length(lcv)<1)lcv=""
if(length(lcvTable)<1)lcvTable=""
updateSelectInput(session,'landCoverSelect',choices=lcv)
updateSelectInput(session,'landCoverSelectTable',choices=lcvTable)
})
# toggle buttons to merge lcv table and add to stack
observe({
lS<-amNameCheck(dataList,input$landCoverSelect,'raster')
# lT<-amNameCheck(dataList,input$landCoverSelectTable,'table',dbCon=isolate(grassSession$dbCon))
tbl <- hot.to.df(input$landCoverRasterTable)
if(TRUE){
err = character(0)
uTable = tolower(gsub("\\s","",unlist(tbl)))
hasEmptyCells <- isTRUE("-" %in% uTable || "" %in% uTable || NA %in% uTable)
hasDuplicate <- isTRUE(any(duplicated(uTable)))
lcvNotFound <- isTRUE(is.null(lS))
if(lcvNotFound){
err <- c(err,"Land cover layer not found")
}else{
if(hasEmptyCells){
err <- c(err,"The table has empty values")
}else{
if(hasDuplicate) err <- c(err,"The table has duplicated values")
}
}
if(length(err)>0){
err <- HTML(paste("<div>",icon('exclamation-triangle'),err,'</div>',collapse=""))
disBtn <- TRUE
}else{
disBtn <- FALSE
}
}
# send result to ui
if(length(err)>0){
msgList <- tagList(tags$b('Validation issues:'),err)
}else{
msgList <- tagList(p('Save labels and add land cover data to the stack:'))
}
output$stackLandcoverValidation <- renderUI(msgList)
# lab<-hot.to.df(input$landCoverRasterTable)$label
# disableMerge=any(is.null(lS),lS=='',is.null(lT),lT=="")
# disableStack=any(is.null(lS),lS=='',is.null(lab),"" %in% lab,NA %in% lab)
amActionButtonToggle(id='btnAddStackLcv',session,disable=disBtn)
#amActionButtonToggle(id='mergeLcvUndo',session,disable=!allow)
# amActionButtonToggle(id='mergeLcv',session,disable=disableMerge)
},label='observeBtnsLcv')
observe({
tblUpdated<-hot.to.df(input$landCoverRasterTable)
isolate({
tblOriginal<-isolate(landCoverRasterTable())
testNrow<-nrow(tblUpdated)==nrow(tblOriginal)
# rule 1 : if nrow doesnt match, return original
if(!is.null(tblUpdated) && !is.null(tblOriginal) && testNrow){
# rule 2: do not allow changing class
tblValidated<-data.frame(class=tblOriginal$class,label=amSubPunct(tblUpdated$label,'_'))
}else{
tblValidated<-tblOriginal
}
output$landCoverRasterTable<- renderHotable({tblValidated}, readOnly = FALSE, fixed=1, stretched='last')
})
})
# Get reactive land cover cat table from raster.
landCoverRasterTable<-reactive({
sel<-amNameCheck(dataList,input$landCoverSelect,'raster')
if(!is.null(sel)){
tbl<-read.csv(
text=execGRASS('r.category',
map=sel,
intern=T),
sep='\t',
header=F,
stringsAsFactors=F
)
names(tbl)<-config$tableColNames[['table_land_cover']]
tbl[,1]<-as.integer(tbl[,1])
tbl[,2]<-as.character(amSubPunct(tbl[,2],'_'))
}else{
tbl<-data.frame(as.integer(NA),as.character(NA))
names(tbl)<-config$tableColNames[['table_land_cover']]
}
tbl
})
landCoverSqliteTable<-reactive({
sel<-amNameCheck(dataList,input$landCoverSelectTable,'table',dbCon=isolate(grassSession$dbCon))
if(!is.null(sel)){
tbl<-dbGetQuery(isolate(grassSession$dbCon),paste('select * from',sel))
tbl[,1]<-as.integer(tbl[,1])
tbl[,2]<-amSubPunct(tbl[,2],'_')
}else{
tbl<-data.frame(as.integer(NA),as.character(NA))
names(tbl)<-config$tableColNames[['table_land_cover']]
}
tbl
})
# Save change in the lcv map.
landCoverRasterSave<-function(selLcv,tblLcv){
if(!is.null(selLcv) && !is.null(tblLcv)){
tblOut<-tempfile()
stackName<-paste0('stack_',selLcv)
amMsg(session,type="log",text=paste('Add to stack requested for: ',selLcv,'. Stack name is',stackName))
write.table(tblLcv,file=tblOut,row.names=F,col.names=F,sep='\t',quote=F)
execGRASS('r.category', map=selLcv, rules=tblOut)
execGRASS('g.copy',raster=paste0(selLcv,',',stackName),flags='overwrite')
colorSetting<-unlist(strsplit(config$dataClass[config$dataClass$class=='stack_land_cover','colors'],'&'))
execGRASS('r.colors',map=stackName,color=colorSetting[1])
}
}
# if select lcv map change or undo btn is pressed, update hotable with value from raster.
observe({
input$mergeLcvUndo # re evaluate if undo is pressed
tblSqlite<-landCoverSqliteTable()
tblRaster<-landCoverRasterTable()
output$landCoverRasterTable<-renderHotable(tblRaster,readOnly=F,fixedCols=1,stretched='last')
output$landCoverSqliteTable<-renderHotable(tblSqlite,readOnly=T,fixedCols=1,stretched='last')
})
# if merge button is pressed, merge external and raster table
observeEvent(input$mergeLcv,{
print('btn merge lcv pressed')
#if(!is.null(btn) && btn > 0){
tbl<-hot.to.df(isolate(input$landCoverRasterTable))
tbl[tbl==""]<-NA
#tblOrig$label<-NULL
tblExt<-hot.to.df(isolate(input$landCoverSqliteTable))
tblExt[tblExt==""]<-NA
# merging. we have to preserve Y classes and manual edits !
# so, replacing only NA with corresponding value from ext table.
# It doesn't seems to be doable with merge(y,x,all.x=T,by='class')
# so, do it manually.
naClass<-tbl[is.na(tbl$label),]$class # class of missing label
tblExtSub<-na.omit(tblExt[tblExt$class %in% naClass,]) # find corresponding value in ext
tbl[tbl$class %in% tblExtSub$class,]$label<-tblExtSub$label #replace value with non-na remplacement
tbl[,1]<-as.integer(tbl[,1])
tbl[,2]<-amSubPunct(tbl[,2],'_')
output$landCoverRasterTable<- renderHotable({tbl}, readOnly = FALSE, fixedCols=1, stretched='last')
#}
# })
})
# if stack btn is pressed, save in GRASS.
observe({
btn<-input$btnAddStackLcv
amErrorAction(title='Add to stack: lcv',{
isolate({
sel<-amNameCheck(dataList,input$landCoverSelect,'raster')
tbl<-hot.to.df(input$landCoverRasterTable)
if(!is.null(btn) && btn>0 && !is.null(sel)){
amUpdateProgressBar(session,"lcvStackProgress",1)
landCoverRasterSave(sel,tbl)
amUpdateDataList(listen)
amUpdateProgressBar(session,"lcvStackProgress",100)
}
})
})
})
#------------------------------------------------------------------------------#
# ADD TO STACK ROAD
#------------------------------------------------------------------------------#
# populate selectize input
observe({
roadList<-amListData('amRoad',dataList)
if(length(roadList)==0)hfList=character(1)
amDebugMsg('Road 1. update input. roadList=',roadList)
updateSelectInput(session,'roadSelect',choices=roadList,selected=roadList[1])
})
# get road table columns
observe({
sel<-amNameCheck(dataList,input$roadSelect,'vector')
amErrorAction(title='get road table columns',{
if(!is.null(sel)){
cla<-grassDbColType(sel,'INTEGER')
cla<-cla[!cla %in% c('cat')]
lab<-grassDbColType(sel,'CHARACTER')
}else{
cla=""
lab=""
}
updateSelectInput(session,'roadSelectClass',choices=cla,selected=cla[1])
updateSelectInput(session,'roadSelectLabel',choices=lab,selected=lab[1])
})
})
# create raod preview table
observe({
cla<-input$roadSelectClass
lab<-input$roadSelectLabel
amErrorAction(title='create road preview table',{
isolate({
sel<-amNameCheck(dataList,input$roadSelect,'vector')
if(!is.null(sel) && !is.null(cla) && !cla=="" && !is.null(lab) && !lab==""){
q=paste('SELECT DISTINCT',cla,',',lab,' FROM',sel,'LIMIT',config$maxRowPreview)
tbl<-dbGetQuery(grassSession$dbCon,q)
names(tbl)<-config$tableColNames[['table_stack_road']]
tbl[,2]<-amSubPunct(tbl[,2],'_')
}else{
tbl<-data.frame("-","-")
names(tbl)<-config$tableColNames[['table_stack_road']]
tbl
}
output$roadPreviewTable<-renderHotable({tbl},readOnly=T,stretched='all',fixedCols=2)
if(TRUE){
err = character(0)
uTable = tolower(gsub("\\s","",unlist(tbl)))
hasEmptyCells <- isTRUE("-" %in% uTable || "" %in% uTable || NA %in% uTable)
hasDuplicate <- isTRUE(any(duplicated(uTable)))
roadLayerNotFound <- isTRUE(is.null(sel))
if(roadLayerNotFound){
err <- c(err,"Road layer not found")
}else{
if(hasEmptyCells){
err <- c(err,"The table has empty values")
}else{
if(hasDuplicate) err <- c(err,"The table has duplicated values")
}
}
if(length(err)>0){
err <- HTML(paste("<div>",icon('exclamation-triangle'),err,'</div>',collapse=""))
disBtn <- TRUE
}else{
disBtn <- FALSE
}
}
# send result to ui
if(length(err)>0){
msgList <- tagList(tags$b('Validation issues:'),err)
}else{
msgList <- ""# tagList(tags$b('Ready to compute.'))
}
output$stackRoadValidation <- renderUI(msgList)
amActionButtonToggle(session=session,id='btnAddStackRoad',disable=disBtn)
})
})
})
# Add vector road to raster road stack
observeEvent(input$btnAddStackRoad,{
amErrorAction(title='Module 1: add stack road',{
amActionButtonToggle(session=session,id='btnAddStackRoad',disable=TRUE)
sel<-amNameCheck(dataList,input$roadSelect,'vector')
cla<-input$roadSelectClass
lab<-input$roadSelectLabel
if(!is.null(sel) && !is.null(cla) && !is.null(lab)){
tbl<-hot.to.df(input$roadPreviewTable)
message('Module 1: Spliting',sel)
tblN <- nrow(tbl)
amUpdateProgressBar(session,'roadStackProgress',1)
#increment
inc <- 1/tblN*100
for(i in 1:tblN){
class <- tbl[i,'class']
label <- tolower(amSubPunct(vect = tbl[i,'label'],sep='_'))
#labelRule <- amSubPunct(vect = tbl[i,'label'],sep=' ')
labelRule <- amSubPunct(label,sep=' ')
tmpFile<-tempfile()
tmpRules<-paste0(class,'\t',labelRule)
write(tmpRules,file=tmpFile)
outNameTmp<-paste0('tmp__',sel)
outNameStack<-paste0('stack_',sel,'_',label)
message(paste('Vector add to stack : extract class',class,' from ',sel))
execGRASS('v.extract',
input=sel,
output=outNameTmp,
where=paste0(cla,"=",class),
flags='overwrite'
)
message(paste('Vector add to stack : Vector to raster, class',class,' from',outNameTmp))
execGRASS('v.to.rast',
use='val',
type='line',
input=outNameTmp,
output=outNameStack,
value=class,
flags=c('d','overwrite')
)
colorSetting<-unlist(strsplit(config$dataClass[config$dataClass$class=='stack_road','colors'],'&'))
execGRASS('r.colors',map=outNameStack,color=colorSetting[1])
message(paste('Vector add to stack : setting categories. class',class,' for',outNameStack))
execGRASS('r.category',
map=outNameStack,
rules=tmpFile
)
rmVectIfExists(outNameTmp)
amUpdateProgressBar(session,'roadStackProgress',i*inc)
}
amUpdateDataList(listen)
}
amActionButtonToggle(session=session,id='btnAddStackRoad',disable=FALSE)
})
})
#------------------------------------------------------------------------------#
# ADD TO STACK BARRIER
#------------------------------------------------------------------------------#
# populate select input
observe({
barrierList<-dataList$vector[grep('barrier__',dataList$vector)]
if(length(barrierList)<1)barrierList=""
updateSelectInput(session,'barrierSelect',choices=barrierList,selected=barrierList[1])
})
# toggle add to stack barrier btn
observe({
bT<-input$barrierType
bS<-amNameCheck(dataList,input$barrierSelect,'vector')
disableBtn<-any(is.null(bT), bT=="", is.null(bS) , bS=="")
amActionButtonToggle(id="btnAddStackBarrier",session,disable=disableBtn)
})
# preview table of barrier features
barrierPreview<-reactive({
sel<-amNameCheck(dataList,input$barrierSelect,'vector')
amErrorAction(title='Module 1: barrier preview',{
if(length(sel)>0 && !sel==""){
tbl<-read.table(text = execGRASS('v.info',map=sel,flags='t',intern=T),sep="=")
names(tbl)<-c('type','count')
tbl$type <- as.character(tbl$type)
tbl<-tbl[tbl$type %in% c('areas','lines','points'),]
tbl[tbl$type == "areas","type"] <- "polygons"
return(tbl)
}else{
tbl<-data.frame(as.character(NA),as.integer(NA))
names(tbl)<-c('type','count')
return(tbl)
}
})
})
# render table
observe({
tbl<-barrierPreview()
output$barrierPreviewTable <- renderHotable({tbl},readOnly=T,fixedCols=2,stretched='all')
})
# pre select feature based on max count by type
observe({
tbl<-barrierPreview()
if(isTRUE(!any(is.na(tbl)))){
sel <- tbl[which.max(tbl$count),'type']
if(sel=="polygons"){
sel == "areas"
}
updateRadioButtons(session,'barrierType',selected=gsub('s$','',sel))
}
})
# add to stack process
observeEvent(input$btnAddStackBarrier,{
amErrorAction(title='Add to stack : barrier',{
amActionButtonToggle(session=session,id='btnAddStackBarrier',disable=TRUE)
sel<-amNameCheck(dataList,input$barrierSelect,'vector')
type <- input$barrierType
if(!is.null(sel) && !sel==''){
cl=1
la='barrier'
tmpFile<-tempfile()
write(paste0(cl,'\t',la),tmpFile)
inc=1/length(sel)*100
amUpdateProgressBar(session,'barrierProgress',1)
for(i in 1:length(sel)){
s<-sel[i]
outNameStack<-paste0('stack_',s)
message('Barrier add to stack : Vector to raster, class',cl,' from',outNameStack)
execGRASS('v.to.rast',use='val',
input=s,
output=outNameStack,
type=type,
value=cl,
flags=c('overwrite',if(type=='line')'d')# bug densified lines with area: not working.
)
execGRASS('r.category',map=outNameStack,rules=tmpFile)
rmVectIfExists('tmp__')
amUpdateProgressBar(session,'barrierProgress',1*inc)
}
amUpdateDataList(listen)
}
amActionButtonToggle(session=session,id='btnAddStackBarrier',disable=FALSE)
})
})
}
})
|
44edb52e9d52b2194a3f4701de6998e5e382f3b9
|
4459eb5432916b4ad6c5c5d911b50c9d2fec1ad5
|
/man/Selectivity.Rd
|
27683b995b099c779ce26b042895265c3ad6f94f
|
[] |
no_license
|
braverock/PerformanceAnalytics
|
057af55b0a4ddeb4befcc02e36a85f582406b95c
|
49a93f1ed6e2e159b63bf346672575f3634ed370
|
refs/heads/master
| 2023-08-03T10:18:27.115592
| 2023-03-29T09:23:17
| 2023-03-29T09:23:17
| 58,736,268
| 209
| 113
| null | 2023-05-23T17:46:08
| 2016-05-13T12:02:42
|
R
|
UTF-8
|
R
| false
| true
| 1,188
|
rd
|
Selectivity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Selectivity.R
\name{Selectivity}
\alias{Selectivity}
\title{Selectivity of the return distribution}
\usage{
Selectivity(Ra, Rb, Rf = 0, ...)
}
\arguments{
\item{Ra}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{Rb}{return vector of the benchmark asset}
\item{Rf}{risk free rate, in same period as your returns}
\item{\dots}{any other passthru parameters}
}
\description{
Selectivity is the same as Jensen's alpha
}
\details{
\deqn{Selectivity = r_p - r_f - \beta_p * (b - r_f)}{Selectivity = r_p - r_f - beta_p * (b - r_f)}
where \eqn{r_f} is the risk free rate, \eqn{\beta_r} is the regression beta,
\eqn{r_p} is the portfolio return and b is the benchmark return
}
\examples{
data(portfolio_bacon)
print(Selectivity(portfolio_bacon[,1], portfolio_bacon[,2])) #expected -0.0141
data(managers)
print(Selectivity(managers['2002',1], managers['2002',8]))
print(Selectivity(managers['2002',1:5], managers['2002',8]))
}
\references{
Carl Bacon, \emph{Practical portfolio performance measurement
and attribution}, second edition 2008 p.78
}
\author{
Matthieu Lestel
}
|
e52e4adc3b7f7306ce151d45b78e1de256b266a9
|
866de7bdc48ab2f44126a44b7790b41d6b11951b
|
/tests/testthat/test-superdiv.R
|
ddd767b4b6594d1c202af0fe01e8e9258874ff88
|
[
"BSD-2-Clause"
] |
permissive
|
claireh93/rdiversity
|
1ccc589b1b82125309fcfef776d9dff097c58c30
|
2826e891704b305ded35b1865d8dfd0f60a9ff76
|
refs/heads/master
| 2020-12-11T07:41:58.693995
| 2016-05-25T09:09:48
| 2016-05-25T09:09:48
| 59,580,987
| 0
| 0
| null | 2016-05-24T14:35:59
| 2016-05-24T14:35:58
| null |
UTF-8
|
R
| false
| false
| 827
|
r
|
test-superdiv.R
|
context('Testing the superdiv() function')
pop <- data.frame(a=c(1,1,0),b=c(2,0,0),c=c(3,1,0))
pop <- pop / sum(pop)
test_that("Supercommunity diversity across multiple populations", {
expect_equivalent(as.matrix(superdiv(alpha(supercommunity(pop)), 0)$diversity), t(5))
expect_equivalent(as.matrix(superdiv(alphabar(supercommunity(pop)), 0)$diversity), t(7/4))
expect_equivalent(as.matrix(superdiv(beta(supercommunity(pop)), 0)$diversity), t(19/48))
expect_equivalent(as.matrix(superdiv(betabar(supercommunity(pop)), 0)$diversity), t(13/12))
expect_equivalent(as.matrix(superdiv(rho(supercommunity(pop)), 0)$diversity), t(11/4))
expect_equivalent(as.matrix(superdiv(rhobar(supercommunity(pop)), 0)$diversity), t(15/16))
expect_equivalent(as.matrix(superdiv(gamma(supercommunity(pop)), 0)$diversity), t(2))
})
|
bb2947642a6885294b004b27019dc595fe1a718b
|
a18c2a7cf79b96fd50d45dab7493a482d37eddb0
|
/data/cellrangerRkit/man/write_cluster_specific_genes.Rd
|
c3962a83e4fc4c664c180d572889c06d087b2f06
|
[
"MIT"
] |
permissive
|
buenrostrolab/10x_scRNAseq
|
b6514c07873ae2a9c8959498234958fb833db568
|
8e65ceffd8a7186d0c81b159e6b316bc2bfdc6bf
|
refs/heads/master
| 2021-01-11T01:53:54.856901
| 2016-11-21T03:41:37
| 2016-11-21T03:41:37
| 70,646,869
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 884
|
rd
|
write_cluster_specific_genes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.r
\name{write_cluster_specific_genes}
\alias{write_cluster_specific_genes}
\title{Write cluster-specific genes to file}
\usage{
write_cluster_specific_genes(gene_clusters, gene_folder_path, n_genes = 10,
output_type = "symbol")
}
\arguments{
\item{gene_clusters}{A list of genes that are prioritized (output from function: prioritize_genes)}
\item{gene_folder_path}{Path to folder to store the cluster-specific genes}
\item{n_genes}{Number of genes to include for each cluster}
\item{output_type}{The format of the output gene list "ensembl" or "symbol}
}
\value{
matrix that includes all the genes written to a folder
}
\description{
Write cluster-specific genes to file
}
\examples{
\dontrun{
write_cluster_specific_genes(genes_to_plot,GENE_RES_FOLDER, n_genes=10,output_type='ensembl')
}
}
|
dad6c5cad2cda560e02cd7c253cc02ea8c2f84fc
|
12acaeb0d68e55bb2eab8c508fdd637b461f2717
|
/cachematrix.R
|
e004e13dbc59ec529009505d9f135fd87003e82c
|
[] |
no_license
|
Martin77PP/ProgrammingAssignment2
|
a0a838c1fc991e74b98cce6eaecf978a27b1398f
|
997969bbdccf815f49c81a68489dedb8ee8318b7
|
refs/heads/master
| 2021-01-17T20:20:06.451022
| 2014-08-24T12:44:57
| 2014-08-24T12:44:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,089
|
r
|
cachematrix.R
|
# makeCacheMatrix is a function that take as argument a square matrix, in this
# function:
# set is a function that stores in the cache the matrix (argument of makeCacheMatrix)
# and initialize in the cache the value s of solve(matrix);
# get get the matrix;
# setSolve apply the function solve (inverse of the square matrix) and stores
# in the cache the value s of solve(matrix);
# getSolve get the value s of solve(matrix);
# finally the MakeCachematrix create a list with 4 elements containing the
# functions: set, get, setSolve and getSolve.
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setSolve <- function(solve) s <<- solve
getSolve <- function() s
list(set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
## cacheSolve is a function that take as argument the value of makeCacheMatrix,
## to s is assigned the value stored in getSolve,
## the if loop check if the value is null, if not it print the message
## "getting cached data" and return the value s stored inth cache by getSolve
## and end the caheSolve function;
## if s is null to data is passed the matrix makeCacheMatrix$get, to s is
## assigned the value of solve(matrix) and then is passed the value s to setSolve
## that will store it in the cache, finally s is returned
##
cacheSolve <- function(x, ...) {
s <- x$getSolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setSolve(s)
s ## Return a matrix that is the inverse of 'x'
}
mtrx1 <- matrix(1:4, 2) # make a square matrix
mCMx1 <- makeCacheMatrix(mtrx1) # assign the makeCaheMatrix value
inv1 <- cacheSolve(mCMx1) # assign to inv1 the cahesolve valu
test1 <- mtrx1 %*% inv1 # check if the value of inv1 is the inverse of mtrx1
mtrx2 <- matrix(8:11, 2)
mCMx2 <- makeCacheMatrix(mtrx2)
inv2 <- cacheSolve(mCMx2)
test2 <- mtrx2 %*% inv2
|
04658da1242caa089cef450c46a7d1e38462a639
|
6d340495ef5b1858b957b1e34ee7f53eb1b12bc7
|
/kpca-sim.R
|
6b5816f5cf0e0ed3e62b1dbf4cbf001e8c5fb867
|
[] |
no_license
|
chblw/stt7330-projet1
|
ba52ffa7c13be1c5b271cdb00140176b59991223
|
387dea97f8784b273cb56472e94ddff239df0815
|
refs/heads/master
| 2021-09-11T09:54:52.770364
| 2018-04-06T15:47:40
| 2018-04-06T15:47:40
| 125,749,392
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,608
|
r
|
kpca-sim.R
|
xlibrary(kernlab)
library(readr)
library(dplyr)
library(plotly)
angles <- runif(200, 0, 2 * pi)
radius <- runif(200, c(0, 7), c(2, 9))
circle_class <- rep(c(2,4), 100)
x <- radius * cos(angles)
y <- radius * sin(angles)
circle_data <- cbind(x, y)
plot(circle_data, col = circle_class, asp = 1)
phi_data = function(x,y) {
return(c(x^2,sqrt(2)*x*y,y^2))
}
projected_data =numeric(600)
for(index_to_project in 1:200){
point_to_project = circle_data[index_to_project,]
projected_point = phi_data(point_to_project[1],point_to_project[2])
projected_data[(3*(index_to_project-1)+1):(3*(index_to_project))] = projected_point
}
projected_data = matrix(projected_data, nrow=200, ncol=3, byrow = TRUE)
projected_data_as_dataframe = as.data.frame(projected_data)
colnames(projected_data_as_dataframe) = c("x", "y", "z")
pca <- princomp(projected_data_as_dataframe)
plot(pca$score[,1],pca$scores[,2], col=circle_class,main="Données transformées selon les composantes principales", xlab="Première composante principale", ylab="Deuxième composante principale")
plotly::plot_ly(projected_data_as_dataframe, x=~x, y=~y, z=~z, color=circle_class) %>%
add_markers() %>%
layout(scene = list(xaxis = list(title = 'x'),
yaxis = list(title = 'y'),
zaxis = list(title = 'x*y')))
kerneled_data =(circle_data[,1] * circle_data[,2])^2
plot(cbind(kerneled_data, rep(1,200)), col=circle_class, xlab="Log-valeur du noyau", yaxt="n" , main="Valeur de la fonction noyau polynomial", log="x")
kpc_rbf <- kpca(circle_data, kernel = "rbfdot", kpar = list(sigma = 0.05), features = 2)
kpc_bessel <- kpca(circle_data, kernel = "besseldot", kpar = list(sigma = 0.6, order = 3), features = 2)
par(mfrow = c(1, 1))
plot(rotated(kpc_rbf), col = circle_class, pch = 19, main = "kpca rbf", asp = 1, xlab = "Première composante", ylab = "Deuxième composante")
plot(rotated(kpc_bessel), col = circle_class, pch = 19, main = "kpca bessel", asp = 1, xlab = "Première composante", ylab = "Deuxième composante")
pc <- prcomp(circle_data, rank. = 1)
kpc_rbf <- kpca(circle_data, kernel = "rbfdot", kpar = list(sigma = 0.05), features = 1)
kpc_bessel <- kpca(circle_data, kernel = "besseldot", kpar = list(sigma = 0.6, order = 3), features = 1)
plot(pc$x, rep(0, 300), col = circle_class, pch = 19, main = "pca", xlab = "Première composante")
plot(rotated(kpc_rbf), rep(0, 300), col = circle_class, pch = 19, main = "kpca rbf", xlab = "Première composante")
plot(rotated(kpc_bessel), rep(0, 300), col = circle_class, pch = 19, main = "kpca bessel", xlab = "Première composante")
|
fbb1a460bc6e111f8cde546ddf4d540afa392234
|
6796f2593432f8ab8bc3f3db317f22e2022184ce
|
/Coronavirus.R
|
04d7ddb46d4f4413862328d4c843a25cf57d4fb9
|
[] |
no_license
|
aj-vy/coronavirus
|
bdd18e81c1938653d75286a83f22e9af63c4b27a
|
447165e60c6bf475a20854228a6a6dab3e195ecb
|
refs/heads/master
| 2022-04-11T06:31:40.340308
| 2020-03-25T14:13:18
| 2020-03-25T14:13:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,725
|
r
|
Coronavirus.R
|
library(tidyverse)
library(rvest)
rm(list=ls())
# Page: https://www.worldometers.info/coronavirus/
url=read_html('https://www.worldometers.info/coronavirus/')
df=url%>%html_nodes('.main_table_countries_div')
df=url%>%html_nodes('.main_table_countries_div tbody')
df_t=df[[1]]
df_t_c=df[[1]] %>% html_nodes('tr td')%>%
html_text()
df_t_c=length(df_t_c)/10
# Vectors
Country=c()
TotalCases=c()
NewCases=c()
TotalDeaths=c()
NewDeaths=c()
TotalRecovered=c()
ActiveCases=c()
Serious_Critical=c()
TotCases1Mpop=c()
TotDeaths1Mpop=c()
# Extract Nodes
for(i in 1:df_t_c){
Nodos=xml_child(df_t, i)
Country=c(Country,xml_child(Nodos, 1)%>%html_text())
TotalCases=c(TotalCases,xml_child(Nodos, 2)%>%html_text())
NewCases=c(NewCases,xml_child(Nodos, 3)%>%html_text())
TotalDeaths=c(TotalDeaths,xml_child(Nodos, 4)%>%html_text())
NewDeaths=c(NewDeaths,xml_child(Nodos, 5)%>%html_text())
TotalRecovered=c(TotalRecovered,xml_child(Nodos, 6)%>%html_text())
ActiveCases=c(ActiveCases,xml_child(Nodos, 7)%>%html_text())
Serious_Critical=c(Serious_Critical,xml_child(Nodos, 8)%>%html_text())
TotCases1Mpop=c(TotCases1Mpop,xml_child(Nodos, 9)%>%html_text())
TotDeaths1Mpop=c(TotDeaths1Mpop,xml_child(Nodos, 10)%>%html_text())
print(i)
}
# Create dataframe
casos=data.frame(
Country,
TotalCases,
NewCases,
TotalDeaths,
NewDeaths,
TotalRecovered,
ActiveCases,
Serious_Critical,
TotCases1Mpop,
TotDeaths1Mpop,
stringsAsFactors = F
)
# Delete files
rm(df,df_t,Nodos,url,Country,
TotalCases,
NewCases,
TotalDeaths,
NewDeaths,
TotalRecovered,
ActiveCases,
Serious_Critical,
TotCases1Mpop,
TotDeaths1Mpop,df_t_c,i)
# Write file csv
write_csv(casos,'casos.csv')
|
26e60a1b1564808f4f9a63ae6a617b4e6ad9e7e4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ArDec/examples/ardec.lm.Rd.R
|
f2f0755dbb152158fc6ff37823032fb0ad17b188
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 196
|
r
|
ardec.lm.Rd.R
|
library(ArDec)
### Name: ardec.lm
### Title: Fit an autoregressive model as a linear regression
### Aliases: ardec.lm
### Keywords: ts
### ** Examples
data(tempEng)
model=ardec.lm(tempEng)
|
9d1b0a9496b191bc785c21a74e8888f0e48d2312
|
bdef750a2d626c9d70ddcad057dfbd83dda66245
|
/tests/testthat/tests_calc_summaryStats.R
|
09a55695aaf9f80e65af74e8b5120b71c3b8c6c9
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
cpenn-usgs/sedReview
|
4f6cabd39f7d73f86efb0fb1c097c6f728c08526
|
505906643f56f4aec2e244e64900dba0a0729c7e
|
refs/heads/master
| 2021-01-11T04:33:16.981342
| 2020-08-24T19:09:50
| 2020-08-24T19:09:50
| 71,158,151
| 0
| 0
| null | 2016-10-17T16:22:56
| 2016-10-17T16:22:56
| null |
UTF-8
|
R
| false
| false
| 450
|
r
|
tests_calc_summaryStats.R
|
context("calc_summaryStats")
test_that("Return values check", {
data("exampleData", package = "sedReview")
# calc_summaryStatsOut <- calc_summaryStats(exampleData)
# saveRDS(calc_summaryStatsOut,"tests/testthat/data/calc_summaryStatsOut.rds")
calc_summaryStatsOut <- readRDS("data/calc_summaryStatsOut.rds")
calc_summaryStatsOutTest <- calc_summaryStats(exampleData)
expect_equal(calc_summaryStatsOut, calc_summaryStatsOutTest)
})
|
f049ae3b0657ecdce048877661d8686f7512e40a
|
51fdd67d355df9ed4378bc86e432386edd31d4ca
|
/man/tremmel_shape_pal.Rd
|
1f6d389f67f6be99f9dac500188c88dd901bf8e3
|
[] |
no_license
|
bobbbbbi/Data-Visualisation-with-ggplot2-ggthemes
|
f434c6dede486849236ddc26d6a31a3ee093ffe9
|
6ff7f1589b6199bf4c11ffde12b5fed9ceee4fce
|
refs/heads/master
| 2020-06-19T08:08:40.380067
| 2017-05-01T00:02:28
| 2017-05-01T00:02:28
| 94,182,518
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,499
|
rd
|
tremmel_shape_pal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shapes.R
\name{tremmel_shape_pal}
\alias{tremmel_shape_pal}
\title{Shape palette from Tremmel (1995) (discrete)}
\usage{
tremmel_shape_pal(overlap = FALSE, n3alt = TRUE)
}
\arguments{
\item{overlap}{use an empty circle instead of a solid circle when
\code{n == 2}.}
\item{n3alt}{If \code{TRUE} then use a solid circle, plus sign and
empty triangle, else use a solid circle, empty circle, and empty
triangle.}
}
\description{
Based on experiments Tremmel (1995) suggests the following shape palettes:
}
\details{
If two symbols, then use a solid circle and plus sign.
If three symbols, then use a solid circle, empty circle, and an
empty triangle. However, that set of symbols does not satisfy the
requirement that each symbol should differ from the other symbols
in the same feature dimension. A set of three symbols that
satisfies this is a circle (curvature), plus sign (number of
terminators), triangle (line orientation).
If more than three groups of data, then separate the groups into
different plots.
}
\references{
Tremmel, Lothar, (1995) "The Visual Separability of Plotting Symbols in Scatterplots"
Journal of Computational and Graphical Statistics,
\url{http://www.jstor.org/stable/1390760}
}
\seealso{
Other shapes: \code{\link{circlefill_shape_pal}},
\code{\link{cleveland_shape_pal}},
\code{\link{scale_shape_circlefill}},
\code{\link{scale_shape_cleveland}},
\code{\link{scale_shape_tremmel}}
}
|
df935f73d3c3301ca8086ac740067180a8135a0d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/minimaxdesign/examples/CtoB.Rd.R
|
8d389e88a9cae832d170c6a62d2bde72f3c90996
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 465
|
r
|
CtoB.Rd.R
|
library(minimaxdesign)
### Name: CtoB
### Title: Inverse Rosenblatt transformation from the unit hypercube to the
### unit ball.
### Aliases: CtoB
### ** Examples
## Not run:
##D # Map the first 100 points of the Sobol' sequence in 3D
##D # onto the unit ball in 3D
##D library(randtoolbox)
##D des <- sobol(100,3)
##D des_ball <- CtoB(des)
##D
##D pairs(des_ball,xlim=c(-1,1),ylim=c(-1,1),pch=16)
##D
##D
## End(Not run)
|
3dce69cd7ee476ec0b459e1679a2268d9d30fe24
|
767ef79477c8c9cd67906280ff11553e84386cfc
|
/server.r
|
df5e76942b26790dabf8efc06892770f861be0c2
|
[] |
no_license
|
RichardKen1/Health-insurance-comparer
|
6d8c6afa05db9bcfd54b4e1d6e69f8c0e34234d0
|
b454debb684e0b1787987a20994f90476bd1de95
|
refs/heads/master
| 2016-09-13T03:08:13.316164
| 2016-04-22T19:23:24
| 2016-04-22T19:23:24
| 56,879,789
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,478
|
r
|
server.r
|
library(shiny)
library(Rcpp)
library(ggplot2)
cost<-read.csv("age.and.gender/age and gender.csv", header=FALSE)
fillout<- read.csv("fillout.data/Copy of fillout dataset.csv", header=TRUE)
pop<-read.csv("Polished.population/Polished Population Data.csv")
percentile<- read.csv("Polished.percentile/Polished Percentile Data.csv")
p_fillout<- read.csv("probability.fillout/probability_ fillout dataset.csv")
cppFunction('double find_max(DataFrame udf){
Rcpp::DoubleVector value = udf["Cost"];
double max=0;
for(int i=0; i<5; i++){
if(value[i]>=max)
max=value[i];
}
return max;
}')
cppFunction('DataFrame find_percentile(DataFrame percentile, DataFrame p_fillout, int age){
Rcpp::CharacterVector xplot = p_fillout["Medical.Procedure"];
Rcpp::DoubleVector yplot = p_fillout["Probability"];
Rcpp:: DoubleVector min_age = percentile["Age.Min"];
Rcpp:: DoubleVector max_age = percentile["Age.Max"];
Rcpp:: DoubleVector data_pop = percentile["Medical.Condtions.POP"];
double true_cost[5];
double true_per[5];
for(int i=0; i<5; i++){
true_cost[i]=0;
true_per[i]=0;
}
int m=0;
for(int n=0; n<18; n++)
{
if(age>=min_age[n] && age<=max_age[n])
true_per[m]=data_pop[n];
if(n==3)
m=1;
if(n==7)
m=2;
if(n==9)
m=3;
if(n==13)
m=4;
}
for(int i=0; i<5; i++){
yplot[i]=true_per[i];
}
return DataFrame::create(_["Medical.Procedure"]=xplot,_["Probability"]=yplot );
}')
cppFunction('DataFrame uninsured_munge(DataFrame fillout, DataFrame cost, DataFrame pop, int age){
Rcpp::CharacterVector xplot = fillout["Medical.Procedure"];
Rcpp::DoubleVector yplot = fillout["Cost"];
Rcpp:: DoubleVector data_cost = cost["V9"];
Rcpp:: DoubleVector data_pop = pop["Medical.Condtions.POP"];
Rcpp:: DoubleVector min_age = pop["Age.Min"];
Rcpp:: DoubleVector max_age = pop["Age.Max"];
double true_cost[5];
double true_pop[5];
for(int i=0; i<5; i++){
true_cost[i]=0;
true_pop[i]=0;
}
int med_start[5];
med_start[0]=1028;
med_start[1]=92;
med_start[2]=524;
med_start[3]=416;
med_start[4]=920;
for(int j=0; j<5; j++)
{
if(age>=0 && age<=18)
true_cost[j]=data_cost[med_start[j]];
else if(age>= 19 && age<= 44){
med_start[j]=med_start[j]+1;
true_cost[j]=data_cost[med_start[j]];
}
else if(age>=45 && age<=64){
med_start[j]=med_start[j]+2;
true_cost[j]=data_cost[med_start[j]];
}
else if(age>=65 && age<=84){
med_start[j]=med_start[j]+3;
true_cost[j]=data_cost[med_start[j]];
}
else if(age>=85 && age<=100){
med_start[j]=med_start[j]+4;
true_cost[j]=data_cost[med_start[j]];
}
else
true_cost[j]=0;
}
int m=0;
for(int n=0; n<18; n++)
{
if(age>=min_age[n] && age<=max_age[n])
true_pop[m]=data_pop[n];
if(n==3)
m=1;
if(n==7)
m=2;
if(n==9)
m=3;
if(n==13)
m=4;
}
for( int z=0; z<5; z++){
if(true_pop[z]==0)
yplot[z]=0;
else
yplot[z]=((true_cost[z])/true_pop[z])*1000000;
}
return DataFrame::create(_["Medical.Procedure"]=xplot,_["Cost"]=yplot );
}')
cppFunction('DataFrame insured_munge(DataFrame fillout, DataFrame cost, DataFrame pop, int age){
Rcpp::CharacterVector xplot = fillout["Medical.Procedure"];
Rcpp::DoubleVector yplot = fillout["Cost"];
Rcpp:: DoubleVector data_cost = cost["V9"];
Rcpp:: DoubleVector data_pop = pop["Medical.Condtions.POP"];
Rcpp:: DoubleVector min_age = pop["Age.Min"];
Rcpp:: DoubleVector max_age = pop["Age.Max"];
double true_cost[5];
double true_pop[5];
for(int i=0; i<5; i++){
true_cost[i]=0;
true_pop[i]=0;
}
int med_start[5];
med_start[0]=992;
med_start[1]=56;
med_start[2]=488;
med_start[3]=380;
med_start[4]=884;
for(int j=0; j<5; j++)
{
if(age>=0 && age<=18)
true_cost[j]=data_cost[med_start[j]];
else if(age>= 19 && age<= 44){
med_start[j]=med_start[j]+1;
true_cost[j]=data_cost[med_start[j]];
}
else if(age>=45 && age<=64){
med_start[j]=med_start[j]+2;
true_cost[j]=data_cost[med_start[j]];
}
else if(age>=65 && age<=84){
med_start[j]=med_start[j]+3;
true_cost[j]=data_cost[med_start[j]];
}
else if(age>=85 && age<=100){
med_start[j]=med_start[j]+4;
true_cost[j]=data_cost[med_start[j]];
}
else
true_cost[j]=0;
}
int m=0;
for(int n=0; n<18; n++)
{
if(age>=min_age[n] && age<=max_age[n])
true_pop[m]=data_pop[n];
if(n==3)
m=1;
if(n==7)
m=2;
if(n==9)
m=3;
if(n==13)
m=4;
}
for( int z=0; z<5; z++){
if(true_pop[z]==0)
yplot[z]=0;
else
yplot[z]=((true_cost[z])/true_pop[z])*1000000;
}
return DataFrame::create(_["Medical.Procedure"]=xplot,_["Cost"]=yplot );
}')
shinyServer(
function(input, output){
output$percent_table <-renderTable({find_percentile(percentile, p_fillout, input$age)})
output$uninsured_plot <-renderPlot({
udf<-uninsured_munge(fillout, cost, pop, input$age)
max<-find_max(udf)
ggplot(data=udf, aes(x=Medical.Procedure, y=Cost, fill=Cost)) +
geom_bar(colour="black", stat="identity") + ylim(0, max+50)+
ggtitle("Cost Without Insurance")+theme(plot.title=element_text(face="bold"))
})
output$insured_plot <-renderPlot({
udf<-uninsured_munge(fillout, cost, pop, input$age)
idf<-insured_munge(fillout, cost, pop, input$age)
max<-find_max(udf)
ggplot(data=idf, aes(x=Medical.Procedure, y=Cost, fill=Cost))+
geom_bar(colour="black", stat="identity") + ylim(0, max+50)+
ggtitle("Cost With Insurance")+theme(plot.title=element_text(face="bold"))
})
}
)
|
38d03891985c79c2bd10edae22543c3118ca3064
|
903da089f3ac659f7295a2b1d351981394e8bcdc
|
/man/mc.bootstrap.Rd
|
08887bbe3a68986c1c7d177bfa7cd6446e25503d
|
[] |
no_license
|
cran/mcr
|
1f27b59cda2a87be199a8f6534bec6882154b042
|
069b879be631491ed07a54a0f348b1adbebf7867
|
refs/heads/master
| 2023-02-06T22:38:44.942104
| 2023-01-26T21:00:19
| 2023-01-26T21:00:19
| 17,697,375
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,856
|
rd
|
mc.bootstrap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcBootstrap.r
\name{mc.bootstrap}
\alias{mc.bootstrap}
\title{Resampling estimation of regression parameters and standard errors.}
\usage{
mc.bootstrap(
method.reg = c("LinReg", "WLinReg", "Deming", "WDeming", "PaBa", "PaBaLarge", "TS",
"PBequi"),
jackknife = TRUE,
bootstrap = c("none", "bootstrap", "nestedbootstrap"),
X,
Y,
error.ratio,
nsamples = 1000,
nnested = 25,
iter.max = 30,
threshold = 1e-08,
NBins = 1e+06,
slope.measure = c("radian", "tangent")
)
}
\arguments{
\item{method.reg}{Regression method. It is possible to choose between five regression types:
\code{"LinReg"} - ordinary least square regression,
\code{"WLinReg"} - weighted ordinary least square regression,\code{"Deming"} - Deming regression,
\code{"WDeming"} - weighted Deming regression, \code{"PaBa"} - Passing-Bablok regression.}
\item{jackknife}{Logical value. If TRUE - Jackknife based confidence interval estimation method.}
\item{bootstrap}{Bootstrap based confidence interval estimation method.}
\item{X}{Measurement values of reference method}
\item{Y}{Measurement values of test method}
\item{error.ratio}{Ratio between squared measurement errors of reference- and test method,
necessary for Deming regression. Default 1.}
\item{nsamples}{Number of bootstrap samples.}
\item{nnested}{Number of nested bootstrap samples.}
\item{iter.max}{maximum number of iterations for weighted Deming iterative algorithm.}
\item{threshold}{Numerical tolerance for weighted Deming iterative algorithm convergence.}
\item{NBins}{number of bins used when 'reg.method="PaBaLarge"' to classify each slope in one of 'NBins' bins of constant slope angle covering the range of all slopes.}
\item{slope.measure}{angular measure of pairwise slopes used for exact PaBa regression (see \code{\link{mcreg}} for details).\cr
\code{"radian"} - for data sets with even sample numbers median slope is calculated as average of two central slope angles.\cr
\code{"tangent"} - for data sets with even sample numbers median slope is calculated as average of two central slopes (tan(angle)).\cr}
}
\value{
a list consisting of
\item{glob.coef}{Numeric vector of length two with global point estimations of intercept and slope.}
\item{glob.sigma}{Numeric vector of length two with global estimations of standard errors of intercept and slope.}
\item{xmean}{Global (weighted-)average of reference method values.}
\item{B0jack}{Numeric vector with point estimations of intercept for jackknife samples.
The i-th element contains point estimation for data set without i-th observation}
\item{B1jack}{Numeric vector with point estimations of slope for jackknife samples.
The i-th element contains point estimation for data set without i-th observation}
\item{B0}{Numeric vector with point estimations of intercept for each bootstrap sample.
The i-th element contains point estimation for i-th bootstrap sample.}
\item{B1}{Numeric vector with point estimations of slope for each bootstrap sample.
The i-th element contains point estimation for i-th bootstrap sample.}
\item{MX}{Numeric vector with point estimations of (weighted-)average of reference method values for each bootstrap sample.
The i-th element contains point estimation for i-th bootstrap sample.}
\item{sigmaB0}{Numeric vector with estimation of standard error of intercept for each bootstrap sample.
The i-th element contains point estimation for i-th bootstrap sample.}
\item{sigmaB1}{Numeric vector with estimation of standard error of slope for each bootstrap sample.
The i-th element contains point estimation for i-th bootstrap sample.}
\item{nsamples}{Number of bootstrap samples.}
\item{nnested}{Number of nested bootstrap samples.}
\item{cimeth}{Method of confidence interval calculation (bootstrap).}
\item{npoints}{Number of observations.}
}
\description{
Generate jackknife or (nested-) bootstrap replicates of a statistic applied to data.
Only a nonparametric ballanced design is possible. For each sample calculate
point estimations and standard errors for regression coefficients.
}
\references{
Efron, B., Tibshirani, R.J. (1993)
\emph{An Introduction to the Bootstrap}. Chapman and Hall.
Carpenter, J., Bithell, J. (2000)
Bootstrap confidence intervals: when, which, what? A practical guide for medical statisticians.
\emph{Stat Med}, \bold{19 (9)}, 1141--1164.
}
\author{
Ekaterina Manuilova \email{ekaterina.manuilova@roche.com}, Fabian Model \email{fabian.model@roche.com}, Sergej Potapov \email{sergej.potapov@roche.com}
}
|
81c2f92d7dbee97d2f4be12a03c0e47b80fff821
|
ff7141cb3fe8fe044dc3381a5672726d3733496b
|
/Chunk.R
|
12007162e01fe6184e518f61ff31aab88036e783
|
[] |
no_license
|
kenjimyzk/knitr-template
|
f5cfd87f69d891cd96151bb989ab021d7f373e92
|
184e75e43d812f44467638e2d189447abbe2ec58
|
refs/heads/master
| 2021-01-16T18:43:21.655575
| 2014-01-19T08:24:37
| 2014-01-19T08:24:37
| 15,764,462
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 244
|
r
|
Chunk.R
|
## ---- Chunk01 ----
x <- runif(100)
y <- 10 + 2 * x + rnorm(100)
## ---- Chunk02 ----
result <- lm(y~x)
## ---- Chunk03 ----
library(xtable)
print(xtable(result, digits=3), floating=FALSE)
## ---- Chunk04 ----
plot(x,y)
abline(result)
|
6d3f9e7daeb2dd19566147de6da047fa26601ffd
|
3d084d7aba8db3cbcd9acbbcb9d7684d56645ed1
|
/Tournament.R
|
f4925c2f546729511b9229fba330a42183916613
|
[] |
no_license
|
profnote/MLB_Playoffs_Prediction
|
3d46b32e9be580978fc27727475cd15447bdc587
|
8dfe769e0d2f650dddd3f53b9974d8b4354616f4
|
refs/heads/master
| 2022-12-03T23:24:06.784802
| 2020-08-21T15:25:13
| 2020-08-21T15:25:13
| 226,808,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,376
|
r
|
Tournament.R
|
# Tournament.R
#
# Predicts the outcome of the entire tounament of 8 MLB teams
# The logistic regression model predictors selected by lasso: A.b.K and B.b.K (strikeouts per plate)
library(caret)
# import and filter data
team_stats <- read.csv("dataPPS.csv")
teams <- team_stats[team_stats$Season==2019,-c(1,21:27)]
columns <- colnames(teams)[3:19]
# train CV logistic model (based on 2001-2018 data)
AUBdata <- read.csv("AvsB.csv")
AUBdata <- subset(AUBdata, select = c(A.b.K, B.b.K, result))
# train a logistic regression model via 10 fold cross-validation
ctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 10)
#AUB_las_LR <- train(result ~ A.b.K + B.b.K, data = AUBdata, method = 'glm', family = binomial(), trControl = ctrl)
# predict 2019 matches
results <- read.csv("AvsB2019.csv")
res2019 <- results[results$Year==2019,]
res2019 <- subset(res2019, select = c(win, Lose, A.b.K, B.b.K, result))
res2019$win_p <- 0
res2019$pred <- "lose"
pred.matches <- function(res2019){
for (i in 1:nrow(res2019)) {
win_prob <- predict(AUB_las_LR, newdata = res2019[i,], type = "prob")
res2019$win_p[i] = win_prob[,2]
if (res2019$win_p[i] > 0.5){
res2019$pred[i] <- "win"
}
print(res2019[i,])
}
}
# Assign teams by data row according to real brackets
team1 <- teams[1,] #Astros
team2 <- teams[6,] #Rays
team3 <- teams[8,] #Yankees
team4 <- teams[7,] #Twins
team5 <- teams[5,] #Nationals
team6 <- teams[4,] #Dodgers
team7 <- teams[3,] #Cardinals
team8 <- teams[2,] #Braves
bracket <- rbind(team1, team2, team3, team4, team5, team6, team7, team8)
# predict the outcome of the whole tournament
winning_team <- ""
round_name <- c("Quarterfinals", "Semifinals", "Grand finals")
playoff <- function(){
for (rnd in 1:3) {
for (match in 2^(3-rnd):1) {
temp_stats <- data.frame(bracket[2*match-1, "b.K"], bracket[2*match, "b.K"])
colnames(temp_stats) <- c("A.b.K", "B.b.K")
win_prob <- predict(AUB_las_LR, newdata = temp_stats, type = "prob")
teamA <- bracket[2*match-1, 2]
teamB <- bracket[2*match, 2]
if (win_prob["win"]>0.5){
winning_team = teamA
bracket <- bracket[-2*match,]
}
else {
winning_team = teamB
bracket <- bracket[-(2*match-1),]
}
print(paste(round_name[rnd], ": ", teamA, "vs", teamB, ", winner =", winning_team))
}
}
}
|
769e48edc76c9954fccd90f7bcf1cf344d8a62ff
|
06f0689d9b960bf8a86e96cea4a0308e24e1413b
|
/man/PERFect_sim.Rd
|
6958791c436ec51c49930e7038f2694c86ed568a
|
[] |
no_license
|
cxquy91/PERFect
|
4746fc51dcf45dd86b4765759ea08601b65b79b6
|
199ad444e10c81c472deefe5dff5e32dff977b41
|
refs/heads/master
| 2020-07-24T08:07:50.134566
| 2020-04-26T03:33:03
| 2020-04-26T03:33:03
| 207,858,306
| 3
| 0
| null | 2019-09-16T22:58:11
| 2019-09-11T16:31:48
|
R
|
UTF-8
|
R
| false
| true
| 5,881
|
rd
|
PERFect_sim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PERFect_sim.R
\name{PERFect_sim}
\alias{PERFect_sim}
\title{Simulation PERFect filtering for microbiome data}
\usage{
PERFect_sim(X,infocol = NULL, Order = "NP", Order.user = NULL, normalize = "counts",
center = FALSE, quant = c(0.1, 0.25, 0.5), distr = "sn",
alpha = 0.1, rollmean = TRUE, direction = "left", pvals_sim = NULL,
nbins = 30, col = "red", fill = "green", hist_fill = 0.2,
linecol = "blue")
}
\arguments{
\item{X}{OTU table, where taxa are columns and samples are rows of the table.
It should be a in data frame format with columns corresponding to taxa names.
It could contains columns of metadata.}
\item{infocol}{Index vector of the metadata. We assume user only gives a taxa table,
but if the metadata of the samples are included in the columns of the input, this option
needs to be specified.}
\item{Order}{Taxa ordering. The default ordering is the number of occurrences (NP) of the taxa in all samples.
Other types of order are p-value ordering, number of connected taxa and weighted number of connected taxa,
denoted as \code{"pvals"}, \code{"NC"}, \code{"NCw"} respectively. More details about taxa ordering are described in Smirnova et al.
User can also specify their preference order with Order.user.}
\item{Order.user}{User's taxa ordering. This argument takes a character vector of ordered taxa names.}
\item{normalize}{Normalizing taxa count. The default option does not normalize taxa count,
but user can convert the OTU table into a proportion table using the option \code{"prop"}
or convert it into a presence/absence table using \code{"pres"}.}
\item{center}{Centering OTU table. The default option does not center the OTU table.}
\item{quant}{Quantile values used to fit the distribution to log DFL values.
The number of quantile values corresponds to the number of parameters in the distribution the data is fitted to.
Assuming that at least 50\% of taxa are not informative, we suggest fitting the log Skew-Normal distribution
by matching the 10\%, 25\% and 50\% percentiles of the log-transformed samples to the Skew-Normal distribution.}
\item{distr}{The type of distribution to fit log DFL values to. While we suggest using Skew-Normal distribution,
and set as the default distribution, other choices are available.
\describe{
\item{\code{"sn"}}{Skew-Normal distribution with 3 parameters: location xi, scale omega^2 and shape alpha}
\item{\code{"norm"}}{Normal distribution with 2 parameters: mean and standard deviation sd}
\item{\code{"t"}}{Student t-distribution with 2 parameters: n degrees of freedom and noncentrality ncp}
\item{\code{"cauchy"}}{Cauchy distribution with 2 parameters: location and scale}
}}
\item{alpha}{Test level alpha, set to 0.1 by default.}
\item{rollmean}{Binary TRUE/FALSE value. If TRUE, rolling average (moving mean) of p-values will be calculated,
with the lag window set to 3 by default.}
\item{direction}{Character specifying whether the index of the result should be left- or right-aligned
or centered compared to the rolling window of observations, set to "left" by default.}
\item{pvals_sim}{Object resulting from simultaneous PERFect with taxa abundance ordering,
allowing user to perform Simultaneous PERFect with p-values ordering.
Be aware that the choice of distribution for both methods must be the same.}
\item{nbins}{Number of bins used to visualize the histogram of log DFL values, set to 30 by default.}
\item{col}{Graphical parameter for color of histogram bars border, set to "red" by default.}
\item{fill}{Graphical parameter for color of histogram fill, set to "green" by default.}
\item{hist_fill}{Graphical parameter for intensity of histogram fill, set to 0.2 by default.}
\item{linecol}{Graphical parameter for the color of the fitted distribution density, set to "blue" by default.}
}
\value{
A list is returned containing:
\item{filtX}{Filtered OTU table.}
\item{info}{The metadata information.}
\item{pvals}{P-values of the test.}
\item{DFL}{Differences in filtering loss values.}
\item{fit}{Fitted values and further goodness of fit details passed from the \code{fitdistr()} function.}
\item{hist}{Histogram of log differences in filtering loss.}
\item{est}{Estimated distribution parameters.}
\item{pDFL}{Plot of differences in filtering loss values.}
}
\description{
Simultaneous filtering of the provided OTU table X at a test level alpha. One distribution is fit to taxa simultaneously.
}
\details{
Filtering is the process of identifying and removing a subset of taxa according to a particular criterion.
Function \code{PERFect_sim()} filters the provided OTU table X and outputs a filtered table
that contains signal taxa. \code{PERFect_sim()} calculates differences in filtering loss DFL
for each taxon according to the given taxa order. By default, the function fits Skew-Normal distribution
to the log-differences in filtering loss but Normal, t, or Cauchy distributions can be also used.
This is implementation of Algorithm 1 described in Smirnova et al.
}
\examples{
data(mock2)
# Proportion data matrix
Prop <- mock2$Prop
# Counts data matrix
Counts <- mock2$Counts
dim(Counts) # 240x46
# Perform simultaenous filtering of the data
res_sim <- PERFect_sim(X=Counts)
dim(res_sim$filtX) # 240x10, removing 36 taxa
colnames(res_sim$filtX) # signal taxa
#permutation perfect colored by FLu values
pvals_Plots(PERFect = res_sim, X = Counts, quantiles = c(0.25, 0.5, 0.8, 0.9), alpha=0.05)
}
\references{
Azzalini, A. (2005). The skew-normal distribution and related multivariate families. Scandinavian Journal of Statistics, 32(2), 159-188.
Smirnova, E., Huzurbazar, H., Jafari, F. ``PERFect: permutationfiltration of microbiome data", to be submitted.
}
\seealso{
\code{\link{PERFect_perm}}
}
\author{
Ekaterina Smirnova
}
|
3ea434768bf909fb4c83f156c9f5433c376652d4
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/9491_0/rinput.R
|
7c6cc7bf3a9ab123fa17672882185e3c04c776a4
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("9491_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9491_0_unrooted.txt")
|
5596ecff3b491ec02fd217f92e00ddef0ed8f09a
|
f5ecf7e7692092ddb3d77242a239d43f49d446f2
|
/man/FlowLiteral-class.Rd
|
4502b074515927ac0c32906e165daaa88ce677d8
|
[] |
no_license
|
andref1989/Flow
|
5c3aee2f206f2918d47d535b65132325f2047bd9
|
7526166ff94d6ae3fa9ee9627f830c4e0a25a0a7
|
refs/heads/new_master
| 2020-06-06T15:31:39.702865
| 2019-07-15T20:17:04
| 2019-07-15T20:17:04
| 192,778,870
| 0
| 0
| null | 2019-07-11T20:15:40
| 2019-06-19T17:45:07
|
R
|
UTF-8
|
R
| false
| true
| 633
|
rd
|
FlowLiteral-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Flow.R
\docType{class}
\name{FlowLiteral-class}
\alias{FlowLiteral-class}
\title{Class to represent a literal argument to a task (ie a static path or value), used for wiring modules into tasks}
\description{
S4 class for \code{FlowLiteral}
Class \code{FlowLiteral} is a simple container for storing arguments to Task that are to be interpreted literally
used when initializing a task config.
}
\section{Slots}{
\describe{
\item{arg}{scalar character argument that will be interpreted literally}
}
}
\author{
Marcin Imielinski
}
|
d0a6a43778c9f4b70fc24a3208e58ef12007ffcc
|
cebf0d6fbb54696c4a9209734cdc466bb623b46f
|
/Exercise_7.R
|
f647c550deb19ae6f76e2d9aa93558d711dc09c9
|
[] |
no_license
|
slynch5/ICB2019_Exercise07
|
d0649fbd13dee6954e51c16c6b3fd364099c6a66
|
6fed1d2992ecd3e30d12651f25a403cae910c513
|
refs/heads/master
| 2020-09-06T17:32:37.838291
| 2019-11-12T01:38:17
| 2019-11-12T01:38:17
| 220,495,645
| 0
| 0
| null | 2019-11-08T15:32:12
| 2019-11-08T15:32:11
| null |
UTF-8
|
R
| false
| false
| 1,824
|
r
|
Exercise_7.R
|
## Exercise_7: Custom Functions in R
## Write a function that returns the odd (1,3,5,etc.) rows of any
## dataframe passed as an argument
iris = read.csv("iris.csv", header = TRUE, sep = ",", quote = "") # Read in .csv file (Test file)
oddFunc <- function(dataFrame){
oddOut <- dataFrame[(seq(from=1,to=nrow(iris),by=2)),]
return(oddOut)
}
oddFunc(iris) # Odd rows of the dataframe passed to the oddFunc function; example uses 'iris' dataframe
## Repeat a subset of last week's exercise, but write functions
## to accomplish these tasks.
# Return the number of observations for a given species included
# in the data set
speciesFunc <- function(species){
output <- paste(species,nrow(iris[grep(species,iris$X.Species.),]))
return(output)
}
speciesFunc("setosa") # Input of species name needs quotes around it for function to work
speciesFunc("versicolor")
speciesFunc("virginica")
# Return a dataframe for flowers with Sepal.Width greater than
# a value specified by the function user
greaterWidth <- function(value){
iris[which(iris$X.Sepal.Width. >= value, arr.ind = TRUE),]
}
greaterWidth(3.5) # Example of calling function and returning desired data for value = 3.5
# Write the data for a given species to a comma-delimited file
# with the given species name as the file name; Hint: look at
# paste() to add the .csv extension to your file.
createCSV <- function(species){
species2 <- iris[grep(species,iris$X.Species.),1:5]
file_name <- paste(species, '.csv', sep="")
write.table(species2, file = file_name, sep = " ", quote = FALSE)
}
createCSV("setosa") # Calls function to create a .csv file for "setosa" data
createCSV("versicolor") # Calls function to create a .csv file for "versicolor" data
createCSV("virginica") # Calls function to create a .csv file for "virginica" data
|
90fe2ebb511cd385cc6f4a3d76e0dea91f49bc13
|
50959bf31e007657fe5f5dcf65f86b276e6fb4c1
|
/man/strr_process_property.Rd
|
da0454f5ef74ddb9e3ac8e94621e06e4caf77a02
|
[] |
no_license
|
UPGo-McGill/strr
|
dc1aebc2f1d9eed9733058215b4cc75975ce8491
|
d960ca300d6efc8b865beeaa267819bf3f0d9717
|
refs/heads/master
| 2023-07-26T14:01:09.518232
| 2023-07-06T19:07:38
| 2023-07-06T19:07:38
| 183,255,784
| 1
| 0
| null | 2020-07-29T02:30:48
| 2019-04-24T15:22:28
|
R
|
UTF-8
|
R
| false
| true
| 2,666
|
rd
|
strr_process_property.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/strr_process_property.R
\name{strr_process_property}
\alias{strr_process_property}
\title{Function to process raw property STR tables into UPGo format}
\usage{
strr_process_property(property, keep_cols = FALSE, quiet = FALSE)
}
\arguments{
\item{property}{An unprocessed property data frame in the raw AirDNA format,
with either 37 or 56 fields.}
\item{keep_cols}{A logical scalar. If the `property` table has 56 fields,
should the superfluous 19 fields be kept, or should the table be trimmed to
the 37 fields which UPGo uses (default)?}
\item{quiet}{A logical scalar. Should the function execute quietly, or should
it return status updates throughout the function (default)?}
}
\value{
A list with three elements: 1) the processed `property` table; 2) an
`error` table identifying corrupt or otherwise invalid row entries; 3) a
`missing_geography` table identifying property_IDs with missing
latitude/longitude coordinates.
}
\description{
\code{strr_process_property} takes raw property tables from AirDNA and cleans
them for analysis or for upload in the UPGo database storage format.
}
\details{
A function for cleaning raw property tables from AirDNA and preparing them
for subsequent analysis or upload in the UPGo format. The function also
produces error files which identify possible corrupt or missing lines in the
input file.
The function expects the input property file to have either 56 fields (the
default for a raw table from AirDNA) or 36 fields (the default for UPGo,
after the `Zipcode`, `Average Daily Rate (USD)`,
`Average Daily Rate (Native)`, `Annual Revenue LTM (USD)`,
`Annual Revenue LTM (Native)`, `Occupancy Rate LTM`,
`Number of Bookings LTM`, `Count Reservation Days LTM`,
`Count Available Days LTM`, `Count Blocked Days LTM`,
`Calendar Last Updated`, `Security Deposit (Native)`,
`Cleaning Fee (Native)`, `Extra People Fee (Native)`,
`Published Nightly Rate (USD)`, `Published Monthly Rate (USD)`,
`Published Weekly Rate (USD)`, `Airbnb Listing URL`, and
`HomeAway Listing URL` fields are removed on import). Eventually the function
will be updated to support inputs from Inside Airbnb as well.
Because the expectation is that the input files will be very large, the
function uses updating by reference on the property input table. This saves a
considerable amount of memory by avoiding making an unnecessary copy of the
input table, but has the side effect of changing the initial input file even
if the output is being assigned to a new object. An `update_by_reference`
argument may be added to the function in the future to override this
behaviour.
}
|
09922be36faef4ff2b3282c081af17bf12c9b308
|
771bcf8c606a6b9313b1583e4dceaa15d0d44ad4
|
/inst/examples/sales/man/print-methods.Rd
|
a7ead2d559e8a4120801e401010a6b0b6cf27c43
|
[] |
no_license
|
MangoTheCat/testCoverage
|
5aa438f202636f9b464817a367f7d6d55911ddfa
|
339ffef39d851c79a0011fc3444e1128b687308c
|
refs/heads/master
| 2020-12-24T05:17:25.157657
| 2018-11-19T10:01:29
| 2018-11-19T10:01:29
| 23,995,468
| 18
| 4
| null | 2014-10-02T09:49:37
| 2014-09-13T13:53:25
|
R
|
UTF-8
|
R
| false
| true
| 570
|
rd
|
print-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unique.R
\docType{methods}
\name{print,SalesData-method}
\alias{print,SalesData}
\alias{print,SalesData-method}
\title{Print Sales Data}
\usage{
\S4method{print}{SalesData}(x, ...)
}
\arguments{
\item{x}{SalesData object.}
\item{\dots}{further arguments passed to or from other methods.}
}
\description{
A method to print an object.
}
\examples{
x <- new(Class = "SalesData",
Date = as.Date(x = 1:3, origin = "2015-12-18"),
Daily.Total = 1:3,
Outlet = "CHIPPENHAM")
print(x)
}
|
957752c8fd4e1074f86036422b81c9d0056227f8
|
a8751ed8f4113510037204fb0f03964235fa2250
|
/man/plot.MSLT.S.Rd
|
ce62943f074ee5790031bd8b3eafc869bcbc7e08
|
[] |
no_license
|
al00014/Biograph
|
83ed141195adfec30634576e6a99c4ed54cbf0a3
|
15b46e3416f83964aab1baeaa58d1d92fa4e7e2b
|
refs/heads/master
| 2023-04-23T19:58:23.499789
| 2016-03-31T17:50:43
| 2016-03-31T17:50:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,647
|
rd
|
plot.MSLT.S.Rd
|
\name{plot.MSLT.S}
\alias{plot.MSLT.S}
\title{Plots survival function. }
\description{Plot method for object of class 'MSLT.S' using the ggplot2 package. It plots the multistate survival function by as estimated by \code{\link{MSLT.S}}
}
\usage{
\method{plot}{MSLT.S}(x, e0, order,colours,title,area,xmin,xmax,...)
}
\arguments{
\item{x}{The multistate survival function. An object of class \code{MSLT.S} produced by the \code{MSLT.S} function. It is one of two components of the object produced by the \code{MSLT.S} function.}
\item{e0}{Life expectancy at reference age (e.g. at birth)}
\item{order}{Desired sequence of states in plot}
\item{colours}{Colours}
\item{title}{Title for multistate survival plot}
\item{area}{Graph type: area (area=TRUE) or bar (area=FALSE)}
\item{xmin}{Minimum value on x-axis}
\item{xmax}{Maximum value on x-axis}
\item{\dots}{Further arguments to plot}
}
\value{The function plot.MSLT.S returns the multistate survival function (S) and the plot. It returns an object with the following components:
\item{S }{The multistate survival function (values used in the plot) }
\item{plot }{The plot produced by the ggplot2 package. }
}
\author{Frans Willekens}
\seealso{MSLT.S}
\examples{
# The multistate life table based on occurrence-exposure rates
data (GLHS)
param <- Parameters (GLHS)
cr <- Cumrates (irate=3,Bdata=GLHS)
S <- MSLT.S(cr$oe)
radix <- c(10000,0)
mslt <- MSLT.e (S,radix)
# Plot the multistate survival function (object of class 'MSLT.S' )
z<- plot (x=S$S,e0=mslt$e0,title="Multistate survival function",area=TRUE,order=c("N","J"))
}
|
17383a1bc7f7d4925c65ee0958224584675f7f29
|
030d72630276cb91cc7be3647c25dda7942d8842
|
/6_3_Mtry_OOBerror_RF(spam)/code/Question_6_3.R
|
6602ee0fe7d6bf242a98ab0027b7158f56bf6d1d
|
[] |
no_license
|
WL0118/Supervised_Learning
|
a2ba36c84ad5aa41a2bb5a8264173c6e5c51fb54
|
9b0d3aefb2baf3fb8b257c2b9549048969fd89b0
|
refs/heads/master
| 2023-03-18T09:18:30.842612
| 2021-03-12T16:46:25
| 2021-03-12T16:46:25
| 327,725,976
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,966
|
r
|
Question_6_3.R
|
rm(list = ls())
set.seed(777)
library(randomForest)
library(trainControl)
library(caret)
SPAM = read.csv("Data/SPAM.csv",header = FALSE,sep=' ')
train_ind = sample(1:nrow(SPAM), nrow(SPAM)*0.60)
train = SPAM[train_ind,]
test = SPAM[-train_ind,]
error_store_test <- c()
error_store <- c()
for(i in 4:8){
model = randomForest(x=train[1:57], y=as.factor(train[,58]), ntree=500, mtry = i, na.action = na.omit )
y_hat <- predict(model, newdata = test[1:57], type = "response")
predicted_y<-y_hat
yy <- as.factor(test$V58)
conf_mat<-confusionMatrix(y_hat, yy)
acc_RF<-conf_mat$overall[1]
error_store_test <- c(error_store_test, unname(acc_RF))
print(model$err.rate[,1])
error_store <- c(error_store, sum(model$err.rate[,1])/500)
}
model <- randomForest(x=train[1:57], y=as.factor(train[,58]), ntree=500, mtry = 30, importance=TRUE )
print(sum(model$err.rate[,1])/500)
metric <- "Accuracy"
mtry<- sqrt(57)
control <- trainControl(method="boot", search="random")
bestmtry<-tuneRF(train[1:57],as.factor(train[,58]), stepFactor = 1.3,mtryStart =mtry,improve = 1e-5, ntree=500)
mtry_num<- 4:8
plot(x=mtry_num,y=error_store_test, main = "test error by mtry", ylab = "Accuracy")
plot(y=error_store,x= mtry_num, main = "train error by mtry", ylab = "Accuracy")
error_store_test_ntree <- c()
error_store_ntree <- c()
tree_num<- 1:10*100
for(i in 1:10){
model = randomForest(x=train[1:57], y=as.factor(train[,58]), ntree=i*100, mtry = 5, na.action = na.omit )
y_hat <- predict(model, newdata = test[1:57], type = "response")
predicted_y<-y_hat
yy <- as.factor(test$V58)
conf_mat<-confusionMatrix(y_hat, yy)
acc_RF<-conf_mat$overall[1]
error_store_test_ntree <- c(error_store_test, unname(acc_RF))
print(model$err.rate[,1])
error_store_ntree <- c(error_store, sum(model$err.rate[,1]))
}
plot(error_store_test_ntree, main = "test error by mtry", ylab = "Accuracy")
|
cefbdbf7b2cf2006cf4b1df8da181e0b53ab4da6
|
144b6d59fc15074f37713eff96eb0aefab6537a6
|
/client2UseCaseTest.R
|
05ad41701b7377d7337275218bca30d46d56e99b
|
[] |
no_license
|
dkdupuis/R
|
0aaab4d0183ebc7ff96f3261e326268c4cc8445a
|
95cf3dc4efd7048de7a33d1df19d2411066db89d
|
refs/heads/master
| 2021-01-20T18:20:13.436051
| 2020-02-04T15:09:34
| 2020-02-04T15:09:34
| 60,870,121
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,500
|
r
|
client2UseCaseTest.R
|
parDirLoc <- paste(getwd(),"/c1/", sep = "")
parDir <- dir(parDirLoc)
for(i in 1:length(parDir))
{
subDirLoc <- paste(parDirLoc, parDir[i], "/", sep = "")
subDir <- dir(subDirLoc)
for(r in 1:length(subDir))
{
file <- paste(subDirLoc, subDir[r], sep = "")
watchData <- read.timeseries(file)
anchorDate <- paste("X",gsub("-",".",as.character(watchData[watchData$category=="anchor_date",]$label)),sep = "")
dates <- detectTopics(file, daysToUse = 115, mainCatagoryLabel = "watchlist_score", useDC = FALSE) #+ 1
print(paste("Topic - ", as.character(watchData[watchData$category=="watchlist_score",]$label),sep = ""))
topicID <- watchData[watchData$category=="watchlist_topic",]$uuid
burstScores <- burstOutliers(file,filterToUse = edgeFilter(), daysToWeigh = 1, daysToUse = 116, burstParam = .5)
for(t in 1:length(dates))
{
colDate <- paste("X", gsub("-", ".", dates[t]), sep = "")
rowOfMax <- which.max(burstScores[,colnames(burstScores)==colDate])
infID <- as.character(burstScores$uuid[rowOfMax])
print(dates[t])
print(paste("Influencer - ",as.character(burstScores$label[rowOfMax]),sep = ""))
bashCom <- paste("bash explaininf.sh",topicID,dates[t],infID, sep = " ")
print(system(bashCom, intern = TRUE))
}
}
}
|
ecf5f89701ad2bab29c14bd5eac971bb7db19842
|
845a71cc97ffeef053d0ca142649d4f231126e5d
|
/R/shiny_2/server.R
|
d7b4407586ee6572dc0d3a687997004228633556
|
[] |
no_license
|
ewkerez/data_science
|
94b086c20c0ea30f89f0a6c33537c904a0e488ae
|
6095188091af2dc6e90983c7f28101b13fe40878
|
refs/heads/master
| 2021-04-27T03:41:24.735415
| 2018-06-13T15:06:56
| 2018-06-13T15:06:56
| 122,718,381
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,583
|
r
|
server.R
|
################################################################################
# A file including backend code (server)
################################################################################
# global r otwiera się tylko raz i dopeiro potem patrzymy na ui i server
# dobre to wciagania paczek, możemy trzymać tutaj stałe zmiennne
# zdefiniowac połącznien z bazą danych
shinyServer(function(input, output, session) {
out$delayRangeUI <- renderUI({
sliderInput(
inputId = "delayRange",
label = "Okresl przedzial opóżnienia:",
min = 0,
max = 1000,
value = c(100,1000)
)
})
agreg_data <- reactive({
aggDelayFlights <- modFlights %>%
filter(name %in% input$carrierName) %>%
group_by(name, hour) %>%
summarise(delayed_flight_perc = sum(
dep_delay > input$delayRange[1] &
dep_delay < input$delayRange[2] &
distance >= input$distance_valuse/ n()))
})
output$delay_plot <- renderPlot({
# Aggregate data
# Create a plot
ggplot(agreg_data(), aes(hour, delayed_flight_perc, fill= name)) +
geom_col(position = 'dodge') +
theme_hc(base_size = 18) +
scale_fill_hc() +
xlab("Hour") +
ylab("Percentage of delayed flights") +
scale_y_continuous(labels = scales::percent) +
scale_x_continuous(limits = c(0,24), breaks = seq(0, 24, 2))
})
output$table_plot <- renderTable({
agreg_data() %>%
mutate(
delayed_flight_perc = scales::percent(delayed_flight_perc)
)
})
})
|
c150b6470c04d58be091343b3c6ab5a7c724fe82
|
e205d4542b2f7d13bc3c1a3bba2eae4c16cfc743
|
/man/Qconduction_substrate.Rd
|
372c4ddb61319398766763c18b231ddd6ffb6142
|
[
"MIT"
] |
permissive
|
trenchproject/TrenchR
|
03afe917e19b5149eae8a76d4a8e12979c2b752f
|
7164ca324b67949044827b743c58196483e90360
|
refs/heads/main
| 2023-08-20T11:54:26.054952
| 2023-08-04T03:52:42
| 2023-08-04T03:52:42
| 78,060,371
| 8
| 8
|
NOASSERTION
| 2022-09-15T21:36:08
| 2017-01-04T23:09:28
|
R
|
UTF-8
|
R
| false
| true
| 2,934
|
rd
|
Qconduction_substrate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energybalance_functions.R
\name{Qconduction_substrate}
\alias{Qconduction_substrate}
\title{Conductance Assuming Substrate Thermal Conductivity is Rate Limiting}
\usage{
Qconduction_substrate(T_g, T_b, D, K_g = 0.5, A, proportion)
}
\arguments{
\item{T_g}{\code{numeric} surface temperature (K).}
\item{T_b}{\code{numeric} body temperature (K).}
\item{D}{\code{numeric} characteristic dimension of the animal (m).}
\item{K_g}{\code{numeric} thermal conductivity of substrate (\ifelse{html}{\out{W K<sup>-1</sup> m<sup>-1</sup>}}{\eqn{W K^-1 m^-1}{ASCII}}).}
\item{A}{\code{numeric} surface area (\ifelse{html}{\out{m<sup>2</sup>}}{\eqn{m^2}{ASCII}}).}
\item{proportion}{\code{numeric} proportion in contact to the surface.}
}
\value{
\code{numeric} conductance (W).
}
\description{
The function calculates conductance (W) of an ectothermic animal to its substrate. The method assumes the major resistance to conduction is the substrate and that the interior of the animal is equal in temperature to its surface (thermally well mixed) \insertCite{Spotila1992}{TrenchR}.
}
\examples{
Qconduction_substrate(T_g = 293,
T_b = 303,
D = 0.01,
K_g = 0.3,
A = 10^-2,
proportion = 0.2)
}
\references{
\insertAllCited{}
}
\seealso{
Other biophysical models:
\code{\link{Grashof_number_Gates}()},
\code{\link{Grashof_number}()},
\code{\link{Nusselt_from_Grashof}()},
\code{\link{Nusselt_from_Reynolds}()},
\code{\link{Nusselt_number}()},
\code{\link{Prandtl_number}()},
\code{\link{Qconduction_animal}()},
\code{\link{Qconvection}()},
\code{\link{Qemitted_thermal_radiation}()},
\code{\link{Qevaporation}()},
\code{\link{Qmetabolism_from_mass_temp}()},
\code{\link{Qmetabolism_from_mass}()},
\code{\link{Qnet_Gates}()},
\code{\link{Qradiation_absorbed}()},
\code{\link{Qthermal_radiation_absorbed}()},
\code{\link{Reynolds_number}()},
\code{\link{Tb_CampbellNorman}()},
\code{\link{Tb_Gates2}()},
\code{\link{Tb_Gates}()},
\code{\link{Tb_butterfly}()},
\code{\link{Tb_grasshopper}()},
\code{\link{Tb_limpetBH}()},
\code{\link{Tb_limpet}()},
\code{\link{Tb_lizard_Fei}()},
\code{\link{Tb_lizard}()},
\code{\link{Tb_mussel}()},
\code{\link{Tb_salamander_humid}()},
\code{\link{Tb_snail}()},
\code{\link{Tbed_mussel}()},
\code{\link{Tsoil}()},
\code{\link{actual_vapor_pressure}()},
\code{\link{boundary_layer_resistance}()},
\code{\link{external_resistance_to_water_vapor_transfer}()},
\code{\link{free_or_forced_convection}()},
\code{\link{heat_transfer_coefficient_approximation}()},
\code{\link{heat_transfer_coefficient_simple}()},
\code{\link{heat_transfer_coefficient}()},
\code{\link{saturation_vapor_pressure}()},
\code{\link{saturation_water_vapor_pressure}()}
}
\concept{biophysical models}
|
d2d1050e1ac734deacdcbfd214df6d838f1caa90
|
c8fd30b59b962df19adff091bfa28fb766520d13
|
/R/archived_20181010/gp_archive_20180504/gp_univariate/stan_gp_test.R
|
0b40af4bc3cfec293e3ecd290233c7a6c9c1af61
|
[] |
no_license
|
tkmckenzie/Iterative_ML
|
bdde722ce8ffb90561e155339105ade24ee2f9b8
|
ee274ef242d4e73b0138cb39fdb323382654b2bf
|
refs/heads/master
| 2021-06-20T10:55:42.637141
| 2019-06-18T22:12:59
| 2019-06-18T22:12:59
| 136,051,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
r
|
stan_gp_test.R
|
library(rstan)
setwd("~/docs/Iterative_ML_paper/R/gp_univariate")
rm(list = ls())
load("data.RData")
N = length(x)
N.pred = 100
x.pred = seq(min(x), max(x), length.out = N.pred)
alpha = 1
length.scale = 0.15
sigma = 0.32
stan.data = list(N = N, N_pred = N.pred,
x_pred = x.pred, y = y, x = x,
alpha = alpha, length_scale = length.scale, sigma = sigma)
stan.fit = stan("gp_univariate_genfit.stan", data = stan.data,
algorithm = "Fixed_param",
chains = 1, iter = 2, warmup = 1)
stan.extract = extract(stan.fit)
fit = t(stan.extract$f_pred[1,,])
|
f936824bf619ca4d8a617ad6bf457b800a0ab67b
|
785085d938a1178aac085aa1bc220c355c82692b
|
/plot4.R
|
867d5ca062c897c25734ddd97715ffbecade1d70
|
[] |
no_license
|
mandeldm/ExData_Plotting1
|
ee2a9af830d361c63f12713000fb33a8c63fdb26
|
4b9507c64063bf40409b3a7b332fc477baf87f9b
|
refs/heads/master
| 2020-12-25T10:14:01.102344
| 2015-04-12T22:45:31
| 2015-04-12T22:45:31
| 31,859,562
| 0
| 0
| null | 2015-03-08T18:02:42
| 2015-03-08T18:02:42
| null |
UTF-8
|
R
| false
| false
| 1,835
|
r
|
plot4.R
|
powdata2 <- read.table("household_power_consumption.txt",
sep=";",
dec=".",
header=TRUE,
stringsAsFactors=FALSE,
na.strings="?",
colClasses=c(rep("character",2), rep("numeric",7)))
powdata2 <- within(powdata2, datetime <- paste(Date, Time))
powdata2$datetime2 <- strptime(powdata2$datetime, format="%d/%m/%Y %H:%M:%S")
powdata3 <- subset(powdata2, datetime2>=strptime("2007-02-01", format="%Y-%m-%d")
& datetime2<strptime("2007-02-03", format="%Y-%m-%d"))
par_default <- par(no.readonly = TRUE)
par(mfrow = c(2, 2), mar = c(4, 5, 0.5, 0.5), oma = c(0, 0, 0, 0))
with(powdata3, {
plot(datetime2, Global_active_power,
type="l", ylab="Global Active Power", xlab="")
plot(datetime2, Voltage,
type="l", ylab="Voltage", xlab="datetime")
plot(powdata3$datetime2, powdata3$Sub_metering_1, type="l",
ylab="Energy sub metering", xlab="")
points(powdata3$datetime2, powdata3$Sub_metering_2, type="l",
ylab="Energy sub metering", xlab="", col="red")
points(powdata3$datetime2, powdata3$Sub_metering_3, type="l",
ylab="Energy sub metering", xlab="", col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lty=1, inset=0.15, bty="n")
plot(datetime2, Global_reactive_power,
type="l", ylab="Global_reactive_power", xlab="datetime")
})
dev.copy(png, file="plot4.png")
dev.off()
par(par_default)
# Restoring previous parameters so that subsequent graphs are not placed in 2x2 grid etc
|
7490c661b57932e70ca98056a8bb13d43199c21c
|
20bb25b978b163ac11161d7c455d4ef42233d520
|
/basicR.R
|
7c6687a642e392a18eb97a798cc0774d463e0a4e
|
[] |
no_license
|
monicamurugesan/DS-Basics
|
4fecbcda0e811b240f6d6ae9ab31b7a1caf98ebe
|
50308cde07b08bdc58924af3b17c76be5a4377af
|
refs/heads/master
| 2022-12-03T12:24:25.051826
| 2020-08-27T15:17:23
| 2020-08-27T15:17:23
| 290,532,225
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 488
|
r
|
basicR.R
|
1+1
x<- 1+1
x
a <-50
b <-20
a*b+x
a+b*x
(6*3)+5/2
exp(1)
db <-c(1,2,4.5)
db
d <-(1,2,4)
rep(1,10)
rep(2,3)
vect_x <-c(2,0,0,4)
vect <-c(1,2,3,4)
vec1 <-vect_x+vect
vec1
vec1*4
vec1[1:4]
vec1[2]=3
vect_char <- c('R','python','java',10,100.5)
vect2<-c('R','python','java',1)
c <-c("a","b","c")
c
df <-data.frame(x=1:3,y=c("a","b","c"))
df[1,1]
df[1,]
df1 <-data.frame(height=c(102,120),width=c(50,50))
df1
df1[1,1]
df[c(1,3),2]
df[c(1,3),1]
df[c(1,3),1:2]
|
ab4af11f38dd5251af26ebb3229c1bc5e9b320aa
|
78132aee39db22e146b6fd42c525cc74d2004e26
|
/01GenerateData.R
|
96de13920bcfae74160d822df28d8692167b9ad1
|
[
"MIT"
] |
permissive
|
jamespaul007/GeospatialLineGraphs
|
fa2878cfc63738beae2e63926b82061debd6eda8
|
717569c9e70cf90e503242f6dc4f269b66655a4d
|
refs/heads/master
| 2021-01-18T06:14:09.689996
| 2014-08-04T15:16:48
| 2014-08-04T15:16:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,567
|
r
|
01GenerateData.R
|
library(foreach)
library(doParallel)
all.data <- read.csv("./DataSets/WorldPopulation.csv", header=TRUE, stringsAsFactors=FALSE)
# The following are used to manipulate various data sets
# colnames(all.data) <- c("Name", "Mass", "Latitude", "Longitude") # Meteorites
# all.data$X <- as.numeric(all.data$X)
# all.data$Y <- as.numeric(all.data$Y)
# all.data$Mass <- as.numeric(all.data$Mass)
# Time the code
start <- proc.time()
startEnd <- function(lats, lngs) {
# Find the "upper left" (NW) and "bottom right" (SE) coordinates of a set of data.
#
# Args:
# lats: A list of latitude coordinates
# lngs: A list of longitude coordinates
#
# Returns:
# A list of values corresponding to the northwest-most and southeast-most coordinates
# Convert to real number and remove NA values
lats <- na.omit(as.numeric(lats))
lngs <- na.omit(as.numeric(lngs))
topLat <- max(lats)
topLng <- min(lngs)
botLat <- min(lats)
botLng <- max(lngs)
return(c(topLat, topLng, botLat, botLng))
}
startEndVals <- startEnd(all.data$Y, all.data$X)
remove(startEnd)
startLat <- startEndVals[1]
endLat <- startEndVals[3]
startLng <- startEndVals[2]
endLng <- startEndVals[4]
remove(startEndVals)
num_intervals = 200.0
interval <- (startEndVals[1] - startEndVals[3]) / num_intervals
remove(num_intervals)
lat.list <- seq(startLat, endLat + interval, -1*interval)
# testLng <- -66.6462379307115
# testLat <- 45.9581234392
# Prepare the data to be sent in
# If you have a value you want to sum, use this
data <- all.data[,c("Y", "X", "DN")]
# If you want to perform a count, use this
# data <- all.data[,c("Longitude", "Latitude")]
# data["Value"] <- 1
sumInsideSquare <- function(pointLat, pointLng, interval, data) {
# Sum all the values that fall within a square on a map given a point,
# an interval of the map, and data that contains lat, lng and the values
# of interest
colnames(data) <- c("lat", "lng", "value")
# Data inside boundaries
data <- na.omit(data[data$lng >= pointLng & data$lng < pointLng + interval & data$lat >= pointLat - interval & data$lat < pointLat,])
return(sum(data$value))
}
# Debugging
# squareSumTemp <- sumInsideSquare(testLat, testLng, interval, data)
# Given a start longitude and an end longitude, calculate an array of values
# corresponding to the sums for that latitude
calcSumLat <- function(startLng, endLng, lat, interval, data) {
row <- c()
lng <- startLng
while (lng < endLng) {
row <- c(row, sumInsideSquare(lat, lng, interval, data))
lng <- lng + interval
}
return(row)
}
# Debugging
# rowTemp <- calcSumLat(startLng, endLng, testLat, interval, data)
# write.csv(rowTemp, file = "Temp.csv", row.names = FALSE)
# Set up parallel computing with the number of cores you have
cl <- makeCluster(detectCores(), outfile = "./Progress.txt")
registerDoParallel(cl)
all.sums <- foreach(lat=lat.list) %dopar% {
lat.data <- calcSumLat(startLng, endLng, lat, interval, data)
# Progress indicator that works on Mac/Windows
print((startLat - lat)/(startLat - endLat)*100) # Prints to Progress.txt
lat.data
}
stopCluster(cl = NULL)
# Convert to data frame
all.sums.frame <- data.frame(all.sums)
# Save to disk so I don't have to run it again
write.csv(all.sums.frame, file = "./GeneratedData/WorldPopulation.csv", row.names = FALSE)
# End timer
totalTime <- proc.time() - start
print(totalTime)
remove(all.sums, data, cl, endLat, endLng, startLat, startLng, lat.list, start, startEndVals, totalTime, calcSumLat, sumInsideSquare, interval)
|
561223d8f72b369a61264c3d9aa72d889ca7de9e
|
da725622bc962b639e1eb6df535b433e4366bcc5
|
/skillPathways/skillsData.R
|
96d387f0fa1b59fff3019ea5cf5dbd2a61f24f31
|
[] |
no_license
|
bekahdevore/rKW
|
5649a24e803b88aa51a3e64020b232a23bd459fa
|
970dcf8dc93d4ec0e5e6a79552e27ddc0f850b91
|
refs/heads/master
| 2020-04-15T12:41:49.567456
| 2017-07-25T16:29:31
| 2017-07-25T16:29:31
| 63,880,311
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 363
|
r
|
skillsData.R
|
library(dplyr)
library(stringr)
skills <- read.csv("skills.csv")
wagesEducation <- read.csv("skillsOccupationsWagesAndEducation.csv")
#Remove - from SOC in EMSI data
wagesEducation$SOC <- str_replace_all(wagesEducation$SOC, '-','')
skillsData <- merge(wagesEducation, skills, by="SOC")
write.csv(skillsData, file = "skillsData.csv")
|
361d95eaefb124b1940917e68e0ce577e758a982
|
55f449a5496f173f62fc037574c4d7fc5a1c72b3
|
/man/mbes.Rd
|
da337cd727cbcbe8e66df1d6762d14d2d1862bd9
|
[] |
no_license
|
davan690/samplingbook
|
3db105da3416cd48f824c9df37127c5d0d0683af
|
62cf8cc97d27be5bb9ef02bdedd82f21a1e69e4c
|
refs/heads/master
| 2023-04-06T16:24:40.942282
| 2021-04-02T21:13:07
| 2021-04-02T21:13:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,937
|
rd
|
mbes.Rd
|
\name{mbes}
\alias{mbes}
\title{Model Based Estimation}
\description{
mbes is used for model based estimation of population means using auxiliary variables. Difference, ratio and regression estimates are available.
}
\usage{
mbes(formula, data, aux, N = Inf, method = 'all', level = 0.95, ...)
}
\arguments{
\item{formula}{object of class \code{formula} (or one that can be coerced to that class): symbolic description for connection between primary and secondary information}
\item{data}{data frame containing variables in the model}
\item{aux}{known mean of auxiliary variable, which provides secondary information}
\item{N}{positive integer for population size. Default is \code{N=Inf}, which means that calculations are carried out without finite population correction.}
\item{method}{estimation method. Options are \code{'simple','diff','ratio','regr','all'}. Default is \code{method='all'}.}
\item{level}{coverage probability for confidence intervals. Default is \code{level=0.95}.}
\item{\dots}{further options for linear regression model}
}
\details{
The option \code{method='simple'} calculates the simple sample estimation without using the auxiliary variable.
The option \code{method='diff'} calculates the difference estimate, \code{method='ratio'} the ratio estimate, and \code{method='regr'} the regression estimate which is based on the selected model. The option \code{method='all'} calculates the simple and all model based estimates.
For methods \code{'diff'}, \code{'ratio'} and \code{'all'} the formula has to be \code{y~x} with \code{y} primary and \code{x} secondary information.
For method \code{'regr'}, it is the symbolic description of the linear regression model. In this case, it can be used more than one auxiliary variable. Thus, \code{aux} has to be a vector of the same length as the number of auxiliary variables in order as specified in the formula.
}
\value{
The function \code{mbes} returns an object, which is a list consisting of the components
\item{call}{is a list of call components: \code{formula} formula, \code{data} data frame, \code{aux} given value for mean of auxiliary variable, \code{N} population size, \code{type} type of model based estimation and \code{level} coverage probability for confidence intervals}
\item{info}{is a list of further information components: \code{N} population size, \code{n} sample size, \code{p} number of auxiliary variables, \code{aux} true mean of auxiliary variables in population and \code{x.mean} sample means of auxiliary variables}
\item{simple}{is a list of result components, if \code{method='simple'} or \code{method='all'} is selected: \code{mean} mean estimate of population mean for primary information, \code{se} standard error of the mean estimate, and \code{ci} vector of confidence interval boundaries}
\item{diff}{is a list of result components, if \code{method='diff'} or \code{method='all'} is selected: \code{mean} mean estimate of population mean for primary information, \code{se} standard error of the mean estimate, and \code{ci} vector of confidence interval boundaries}
\item{ratio}{is a list of result components, if \code{method='ratio'} or \code{method='all'} is selected: \code{mean} mean estimate of population mean for primary information, \code{se} standard error of the mean estimate, and \code{ci} vector of confidence interval boundaries}
\item{regr}{is a list of result components, if \code{type='regr'} or \code{type='all'} is selected: \code{mean} mean estimate of population mean for primary information, \code{se} standard error of mean estimate, \code{ci} vector of confidence interval boundaries, and \code{model} underlying linear regression model}
}
\references{
Kauermann, Goeran/Kuechenhoff, Helmut (2010): Stichproben. Methoden und praktische Umsetzung mit R. Springer.
}
\author{Juliane Manitz}
\seealso{\code{\link{Smean}}, \code{\link{Sprop}}}
\examples{
## 1) simple suppositious example
data(pop)
# Draw a random sample of size=3
set.seed(802016)
data <- pop[sample(1:5, size=3),]
names(data) <- c('id','x','y')
# difference estimator
mbes(formula=y~x, data=data, aux=15, N=5, method='diff', level=0.95)
# ratio estimator
mbes(formula=y~x, data=data, aux=15, N=5, method='ratio', level=0.95)
# regression estimator
mbes(formula=y~x, data=data, aux=15, N=5, method='regr', level=0.95)
## 2) Bundestag election
data(election)
# draw sample of size n = 20
N <- nrow(election)
set.seed(67396)
sample <- election[sort(sample(1:N, size=20)),]
# secondary information SPD in 2002
X.mean <- mean(election$SPD_02)
# forecast proportion of SPD in election of 2005
mbes(SPD_05 ~ SPD_02, data=sample, aux=X.mean, N=N, method='all')
# true value
Y.mean <- mean(election$SPD_05)
Y.mean
# Use a second predictor variable
X.mean2 <- c(mean(election$SPD_02),mean(election$GREEN_02))
# forecast proportion of SPD in election of 2005 with two predictors
mbes(SPD_05 ~ SPD_02+GREEN_02, data=sample, aux=X.mean2, N=N, method= 'regr')
## 3) money sample
data(money)
mu.X <- mean(money$X)
x <- money$X[which(!is.na(money$y))]
y <- na.omit(money$y)
# estimation
mbes(y~x, aux=mu.X, N=13, method='all')
## 4) model based two-phase sampling with mbes()
id <- 1:1000
x <- rep(c(1,0,1,0),times=c(10,90,70,830))
y <- rep(c(1,0,NA),times=c(15,85,900))
phase <- rep(c(2,1), times=c(100,900))
data <- data.frame(id,x,y,phase)
# mean of x out of first phase
mean.x <- mean(data$x)
mean.x
N1 <- length(data$x)
# calculation of estimation for y
est.y <- mbes(y~x, data=data, aux=mean.x, N=N1, method='ratio')
est.y
# correction of standard error with uncertaincy in first phase
v.y <- var(data$y, na.rm=TRUE)
se.y <- sqrt(est.y$ratio$se^2 + v.y/N1)
se.y
# corrected confidence interval
lower <- est.y$ratio$mean - qnorm(0.975)*se.y
upper <- est.y$ratio$mean + qnorm(0.975)*se.y
c(lower, upper)
}
|
d7d49e705160cfcca4dbd9e282dd93c4c8c0079c
|
95e509eb99116d4311c0b71c39a0863e15c2da86
|
/plot4.R
|
cc5356f6b22632cf99c03021eb35864e5d46477b
|
[] |
no_license
|
weiweigithub/ExData_Plotting1
|
8f5edb839f67131865e7a5ea4feaa53afa8ce554
|
764c72852a61fe9a201437ac84adeaa988661452
|
refs/heads/master
| 2021-01-17T16:22:18.121627
| 2015-03-08T22:40:47
| 2015-03-08T22:40:47
| 31,867,993
| 0
| 0
| null | 2015-03-08T22:18:25
| 2015-03-08T22:18:25
| null |
UTF-8
|
R
| false
| false
| 1,516
|
r
|
plot4.R
|
## Reading the downloaded data file from local folder
rawdata<-read.table("household_power_consumption.txt", header = TRUE, colClasses = "character", sep = ";")
## Extract the portion of the data within the specified time period
data<-filter(rawdata, Date=="1/2/2007" | Date=='2/2/2007')
## Extract the data for plotting
datetime<- strptime(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S")
gap<-as.numeric(data$Global_active_power)
grp<-as.numeric(data$Global_reactive_power)
vol<-as.numeric(data$Voltage)
sm1<-as.numeric(data$Sub_metering_1)
sm2<-as.numeric(data$Sub_metering_2)
sm3<-as.numeric(data$Sub_metering_3)
## Plot all plots in one window
par(mfrow=c(2,2))
## The first plot, row 1 column 1
with(data, plot(datetime, gap, type="l", xlab = "", ylab="Global Active Power"))
## The second plot, row 1 column 2
with(data, plot(datetime, vol, type ="l", ylab = "Voltage"))
## The third plot, row 2 column 1 (3 lines in one plot)
with(data, plot(datetime, sm1, type = "n", xlab = "", ylab="Energy sub metering"))
with(data, lines(datetime, sm1, col="black"))
with(data, lines(datetime, sm2, col="red"))
with(data, lines(datetime, sm3, col="blue"))
legend("topright", lty = c(1,1,1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## The fourth plot, row 2 column 2
with(data, plot(datetime, grp, type="l"))
## Copy the plot to specified format and close the PNG device
dev.copy(png, file = "plot4.png", width =480, height=480)
dev.off()
|
f75729e56d623ea273cf1329d87623f72edcbbf2
|
42f4fea527642694954067192d4c881f387f1264
|
/R/RcppExports.R
|
3a739d2838f62d4b6b0b37abb57b649123fd68f9
|
[] |
no_license
|
jonbramble/RScattnlay
|
34e520c75e58e1562d56655d846bde8e3ef2294b
|
f0e2e2e8d74f37084249aaa41c4e26cf98f300a9
|
refs/heads/master
| 2022-11-22T15:23:15.009484
| 2018-04-28T16:57:01
| 2018-04-28T16:57:01
| 30,366,821
| 1
| 1
| null | 2020-01-29T20:25:49
| 2015-02-05T16:54:04
|
C
|
UTF-8
|
R
| false
| false
| 345
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
S4_AMPL <- function(fullstack) {
.Call('_Rscattnlay_S4_AMPL', PACKAGE = 'Rscattnlay', fullstack)
}
S4_SCATTNLAY <- function(fullstack) {
.Call('_Rscattnlay_S4_SCATTNLAY', PACKAGE = 'Rscattnlay', fullstack)
}
|
c1774094b8aadb4e398687da45f40fd07ab5b17d
|
bb226480c35979bb7ec0ef70c53fa0a8c4fec5ed
|
/R/hello.R
|
0533166e7e367f75bfc191652fd08f80c3a3bba5
|
[] |
no_license
|
hulinhui-code/hulinhui
|
78b078bcb2eff2ab0622aa8dab3610cb01bb63ca
|
50226bd2a2fa410628d5967ddaf5ccb38163ef3e
|
refs/heads/main
| 2023-03-24T20:28:00.620345
| 2021-03-22T03:57:32
| 2021-03-22T03:57:32
| 332,137,455
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,210
|
r
|
hello.R
|
# Hello, world!
#' @export
hello <- function() {
print('Good luck')
}
updateR <- function() {
lib <- (.packages()) # 已加载包列表
if ('mypkg' %in% lib) {detach(package:mypkg)} #如果mypkg已加载,则卸载
library(devtools)
install_github('hulinhui-code/hulinhui', lib = c('D:/R pkg lib', 'C:/Users/Jack/Documents/R/win-library/4.0')) # 同时放在C盘(默认安装盘)
# library(mypkg) # 安装完成加载 ## 没用,更新完后还是要重新载入
}
mygithub_install <- function(package_name, github_name){
lib <- (.packages()) # 已加载包列表
if (package_name %in% lib) {remove(package_name, lib="D:/R pkg lib")} #如果已加载,则卸载
library(devtools)
install_github(github_name, lib = c('D:/R pkg lib'))
}
myInstall <- function(package_name) {
install.packages(package_name, lib="D:/R pkg lib")
}
myLibrary <- function(package_name) {
if (length(package_name) ==1) {
library(package_name, lib="D:/R pkg lib", character.only=TRUE)
} else
for (pkg in package_name){
library(pkg, lib="D:/R pkg lib", character.only=TRUE)
}
}
display_file <- function(pdf_path){
pdf_html <- paste('<iframe src="',pdf_path,
'" align="center" width="1111" height="900" frameBorder="0"></iframe>',
sep="")
IRdisplay::display_html(pdf_html)
# return(pdf_html)
}
display_panel <- function(address1, address2){
pdf_html <- paste('<iframe src="',address1,
'" align="center" width="650" height="900" frameBorder="0"></iframe><iframe src="', address2,
'" align="center" width="650" height="900" frameBorder="0"></iframe>',
sep="")
IRdisplay::display_html(pdf_html)}
display_missing_data <- function(df){df[which(rowSums(is.na(df))>=1),]}
yt <- function(short_code_url){
embed_html <- paste('<iframe src="https://www.youtube.com/embed/',short_code_url,
'" align="center" width="900" height="506" frameBorder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>',
sep="")
IRdisplay::display_html(embed_html)
}
open_wd <- function(dir = getwd()){
if (.Platform['OS.type'] == "windows"){
shell.exec(dir)
} else {
system(paste(Sys.getenv("R_BROWSER"), dir))
}
}
open_r <- function(){
shell.exec("D:/HonorFiles/备份文件夹/代码集/Jupyter/胡林辉/R/display_file.R")
}
蓝灯<- function(){
shell.exec("C:/Users/Jack/AppData/Roaming/Lantern/lantern.exe")
}
OCR_app<- function(){
shell.exec("D:/Python App/图形化界面应用_OCR.exe")
}
cal_metrics <- function(label, pred){
# 根据金标准和预测值的列表计算最佳cutoff及对应的sens和spec
# label: 金标准,0 1 变量
# pred: 模型预测值,连续变量
roc.p=pROC::roc(label, pred)
if (roc.p$auc>0.5){
cutoff=roc.p$thresholds[which.max(roc.p$sensitivities+roc.p$specificities)]
sensitivity=roc.p$sensitivities[which.max(roc.p$sensitivities+roc.p$specificities)]
specificity=roc.p$specificities[which.max(roc.p$sensitivities+roc.p$specificities)]
df=data.frame(type='positive classification',
auc=round(roc.p$auc,3),cutoff=cutoff,
sensitivity=sensitivity,specificity=specificity)
return(df)
}
else{
cutoff=roc.p$thresholds[which.min(roc.p$sensitivities+roc.p$specificities)]
sensitivity=roc.p$sensitivities[which.min(roc.p$sensitivities+roc.p$specificities)]
specificity=roc.p$specificities[which.min(roc.p$sensitivities+roc.p$specificities)]
df=data.frame(type='negative classification',
auc=1-round(roc.p$auc,3),cutoff=cutoff,
sensitivity=1-sensitivity,specificity=1-specificity)
return(df)
}
}
cal_statistics_sspn<-function(fp, tp, tn, fn, decimal_digit=3){
z=1.95996
zsq = z**2
a=fp
b=tp
c=tn
d=fn
ab=fp+tp
cd=tn+fn
ac=fp+tn
bd=tp+fn
bc=tp+tn
nn=fp+tp+tn+fn
# 计算accuracy
accu = bc/nn
accu_l95b = binom.test(bc, nn)[['conf.int']][1]
accu_u95b = binom.test(bc, nn)[['conf.int']][2]
# 计算prevanlence
prev = bd/nn
## lower limit of 95%CI of prev #
p=prev
n = nn
q = 1 - p
num = (2*n*p)+zsq-1-(z*sqrt(zsq-2-(1/n)+4*p*((n*q)+1)))
denom = 2*(n+zsq)
prev_l95b = num/denom
if (p==0)
{prev_l95b=0}
else{p=prev}
## upper limit of 95%CI of prev #
num = (2*n*p)+zsq+1+(z*sqrt(zsq+2-(1/n)+4*p*((n*q)-1)))
denom = 2*(n+zsq)
prev_u95b = num/denom
if (p==1)
{prev_u95b=1}
else{p=prev}
# 计算senstivity
sens = tp/bd
n = bd
p = sens
## begin l95b #
q = 1-p
num = (2*n*p)+zsq-1-(z*sqrt(zsq-2-(1/n)+4*p*((n*q)+1)))
denom = 2*(n+zsq)
sens_l95b = num/denom
if (p==0)
{sens_l95b = 0}
else{p = sens}
## begin u95b #
num = (2*n*p)+zsq+1+(z*sqrt(zsq+2-(1/n)+4*p*((n*q)-1)));
denom = 2*(n+zsq);
sens_u95b = num/denom;
if (p==1)
{sens_u95b = 1}
else{p = sens}
#计算Specificity
spec = tn/ac
n = ac
p = spec
## begin l95b #
q = 1-p;
num = (2*n*p)+zsq-1-(z*sqrt(zsq-2-(1/n)+4*p*((n*q)+1)))
denom = 2*(n+zsq)
spec_l95b = num/denom
if (p==0)
{spec_l95b = 0}
else{p = spec}
## begin u95b #
num = (2*n*p)+zsq+1+(z*sqrt(zsq+2-(1/n)+4*p*((n*q)-1)))
denom = 2*(n+zsq)
spec_u95b = num/denom
if (p==1)
{spec_u95b = 1}
else{p = spec}
#计算阳性率ppos
ppos = ab/nn
n = nn
p = ppos
## begin l95b #
q = 1-p
num = (2*n*p)+zsq-1-(z*sqrt(zsq-2-(1/n)+4*p*((n*q)+1)))
denom = 2*(n+zsq)
ppos_l95b = num/denom
if (p==0)
{ ppos_l95b = 0}
else(p = ppos)
## begin u95b #
num = (2*n*p)+zsq+1+(z*sqrt(zsq+2-(1/n)+4*p*((n*q)-1)))
denom = 2*(n+zsq)
ppos_u95b = num/denom
if (p==1)
{ppos_u95b = 1}
else(p = ppos)
#计算阴性率pneg
pneg = cd/nn
n = nn
p = pneg
## begin l95b #
q = 1-p
num = (2*n*p)+zsq-1-(z*sqrt(zsq-2-(1/n)+4*p*((n*q)+1)))
denom = 2*(n+zsq)
pneg_l95b = num/denom
if (p==0)
{pneg_l95b = 0}
else{p = pneg}
## begin u95b #
num = (2*n*p)+zsq+1+(z*sqrt(zsq+2-(1/n)+4*p*((n*q)-1)))
denom = 2*(n+zsq)
pneg_u95b = num/denom
if (p==1)
{pneg_u95b = 1}
else{p = pneg}
#计算阳性预测值PPV
ppv = b/ab
n = ab
p = ppv
##begin l95b
q = 1-p
num = (2*n*p)+zsq-1-(z*sqrt(zsq-2-(1/n)+4*p*((n*q)+1)))
denom = 2*(n+zsq)
ppv_l95b = num/denom
if (p==0)
{ppv_l95b = 0}
else{p = ppv}
##begin u95b#
num = (2*n*p)+zsq+1+(z*sqrt(zsq+2-(1/n)+4*p*((n*q)-1)))
denom = 2*(n+zsq)
ppv_u95b = num/denom
if (p==1)
{ppv_u95b = 1}
else{p = ppv}
#计算阴性预测值NPV
npv = c/cd
n = cd
p = npv
##begin l95b
q = 1-p
num = (2*n*p)+zsq-1-(z*sqrt(zsq-2-(1/n)+4*p*((n*q)+1)));
denom = 2*(n+zsq);
npv_l95b = num/denom;
if (p==0)
{npv_l95b = 0}
else{p = npv}
## begin u95b
num = (2*n*p)+zsq+1+(z*sqrt(zsq+2-(1/n)+4*p*((n*q)-1)));
denom = 2*(n+zsq);
npv_u95b = num/denom;
if (p==1)
{npv_u95b = 1}
else{p = npv}
# 计算阳性似然比 #计算阴性似然比
pl = sens/(1-spec);
nl = (1-sens)/spec;
xp = sqrt(((1-sens)/b)+(spec/a))
xn = sqrt((sens)/d)+((1-spec)/c)
lgpl = log(pl)
lgnl = log(nl)
## 95%CI
pl_l95b = exp(lgpl-(1.95996*xp));
pl_u95b = exp(lgpl+(1.95996*xp));
## 阴性似然比95%CI
nl_l95b = exp(lgnl-(1.95996*xn));
nl_u95b = exp(lgnl+(1.95996*xn));
statistics_df = data.frame(Metric=c('Prevalance','Accuracy', 'Senstivity','Specificity','Positive','Negative',
'Positive Predictive Value','Negative Predictive Value',
'Positive likelihood Ratios','Negative likelihood Ratios'),
Estimated.value = c(prev,accu, sens,spec,ppos,pneg,ppv,npv,pl,nl),
Lower.95CI = c(prev_l95b,accu_l95b, sens_l95b,spec_l95b,ppos_l95b,
pneg_l95b,ppv_l95b,npv_l95b,pl_l95b,nl_l95b),
Upper.95CI=c(prev_u95b,accu_u95b, sens_u95b,spec_u95b,ppos_u95b,
pneg_u95b,ppv_u95b,npv_u95b,pl_u95b,nl_u95b)
)
options(digits=decimal_digit)
return(statistics_df)}
|
e25c4c77acc0c58017a8c261bc750c837f05b6f4
|
6ee5627b7d048d87dd4527badf7ca0443358382a
|
/mammals_simulated_bootstrap/templates/plot_bk.R
|
0295f36f3f55761035a8487ef22ca05de799b07a
|
[] |
no_license
|
evolbioinfo/booster-workflows
|
0bae01d5b9f41316c555cb985cfac8a1841a6499
|
341b012a99f706a5e363af83d6b85d8b7c3f089e
|
refs/heads/master
| 2021-01-23T12:32:13.905501
| 2018-04-03T07:52:03
| 2018-04-03T07:52:03
| 93,164,761
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,341
|
r
|
plot_bk.R
|
#!/bin/env Rscript
library(ggplot2)
library(plyr)
## FBP
fbp=read.table("!{fbp}",header=T,na.strings = "N/A")
colnames(fbp)=c("tree","brid","length","boot","depth","simu")
fbp=fbp[fbp$depth>1,]
svg("fbp_simu_vs_boot.svg")
ggplot(fbp,aes(x=simu,y=boot))+geom_point(size=2,alpha=0.2)+ggtitle("FBP")
dev.off()
## TBE
tbe=read.table("!{tbe}",header=T,na.strings = "N/A")
colnames(tbe)=c("tree","brid","length","boot","depth","simu")
tbe=tbe[tbe$depth>1,]
svg("tbe_simu_vs_boot.svg")
ggplot(tbe,aes(x=simu,y=boot))+geom_point(size=2,alpha=0.2)+ggtitle("TBE")
dev.off()
sink("!{div}_!{seed}_correlations.txt")
print("FBP boot vs. simu Spearman")
cor(fbp$boot,fbp$simu,method = "spearman")
print("FBP boot vs. simu Pearson")
cor(fbp$boot,fbp$simu,method = "pearson")
print("TBE boot vs. simu Spearman")
cor(tbe$boot,tbe$simu,method = "spearman")
print("TBE boot vs. simu Pearson")
cor(tbe$boot,tbe$simu,method = "pearson")
sink()
## Bias Analysis
matbe=unlist(lapply(seq(0,1,0.01),function(v){
mean(tbe$boot[tbe$simu>=v & tbe$simu<v+0.2])-mean(tbe$simu[tbe$simu>=v & tbe$simu<v+0.2])
}))
mafbp=unlist(lapply(seq(0,1,0.01),function(v){
mean(fbp$boot[fbp$simu>=v & fbp$simu<v+0.2])-mean(fbp$simu[fbp$simu>=v & fbp$simu<v+0.2])
}))
svg("!{div}_!{seed}_bias_tbe.svg")
plot(seq(0,1,0.01),matbe,type='l',ylim=c(-0.25,0.25),lwd=2,xaxt='n')
abline(h=0,lwd=2)
axis(1, at=seq(0,1,0.25))
dev.off()
svg("!{div}_!{seed}_bias_fbp.svg")
plot(seq(0,1,0.01),mafbp,type='l',ylim=c(-0.25,0.25),lwd=2,xaxt='n')
abline(h=0,lwd=2)
axis(1, at=seq(0,1,0.25))
dev.off()
## Rogue analysis
rogues=read.table(gzfile("!{refrogues}"),stringsAsFactors=F)
rogueboots=read.table(text=paste0(head(readLines("!{tbelogboot}"), -1)),stringsAsFactors=F,sep=":",skip = 8,header=F)
colnames(rogueboots)=c("name","index")
roguesimu=read.table(text=paste0(head(readLines("!{tbelogsimu}"), -1)),stringsAsFactors=F,sep=":",skip = 8,header=F)
colnames(roguesimu)=c("name","index")
roguesimu$name=gsub(" ","",roguesimu$name)
rogueboots$name=gsub(" ","",rogueboots$name)
svg("!{div}_!{seed}_rogues_simu_vs_boot.svg")
plot(roguesimu$index/10,rogueboots$index/10,
pch=20,
col=ifelse(rogueboots$name%in%rogues$V1,"orange","blue"),
cex=ifelse(rogueboots$name%in%rogues$V1,2,0.5),
xlab="Instablity score / Simulated",
ylab="Instability score / Bootstrap")
legend("topleft", legend=c("Rogues", "Others"),col=c("orange", "blue"), pch=c(20,20),pt.cex=c(2,0.5))
abline(v=arrange(roguesimu,desc(index))[100,"index"]/10)
abline(h=arrange(rogueboots,desc(index))[100,"index"]/10)
dev.off()
sink("!{div}_!{seed}_correlations.txt",append=T)
print("Number of rogues in the first 100 instable taxa (simu)")
table(head(arrange(roguesimu,desc(index)), n = 100)[,"name"]%in%rogues$V1)
print("Number of rogues in the first 100 instable taxa (boot)")
table(head(arrange(rogueboots,desc(index)), n = 100)[,"name"]%in%rogues$V1)
sink()
# ROC curves
nrogues=length(rogues$V1)
totaltaxa=length(rogueboots$name)
simucumu=unlist(lapply(1:1449,function(c){sum(head(arrange(roguesimu,desc(index)), n = c)[,"name"]%in%rogues$V1)}))/nrogues
bootcumu=unlist(lapply(1:1449,function(c){sum(head(arrange(rogueboots,desc(index)), n = c)[,"name"]%in%rogues$V1)}))/nrogues
svg("!{div}_!{seed}_rogues_cumu.svg")
plot(1:1449,simucumu,type='l',ylab ="% of total rogues found in first x instable taxa",xlab="First x instable taxa",lwd=2)
lines(1:1449,bootcumu,col="blue",lwd=2)
abline(v=100)
legend("bottomright", legend=c("Bootstrap trees", "Simulation-based trees"),col=c("blue", "black"),lty=1,lwd=2)
dev.off()
# Or in instability index
simucumuvp=unlist(lapply(seq(0,34,0.01),function(c){sum(roguesimu$name[roguesimu$index>c]%in%rogues$V1)/nrogues}))
simucumufp=unlist(lapply(seq(0,34,0.01),function(c){sum(!(roguesimu$name[roguesimu$index>c]%in%rogues$V1))/(totaltaxa-nrogues)}))
bootcumuvp=unlist(lapply(seq(0,34,0.01),function(c){sum(rogueboots$name[rogueboots$index>c]%in%rogues$V1)/nrogues}))
bootcumufp=unlist(lapply(seq(0,34,0.01),function(c){sum(!(rogueboots$name[rogueboots$index>c]%in%rogues$V1))/(totaltaxa-nrogues)}))
svg("!{div}_!{seed}_rogues_roc.svg")
plot(simucumufp,simucumuvp,type='l',ylab ="VP",xlab="FP",lwd=2)
lines(bootcumufp,bootcumuvp,col="blue",lwd=2)
legend("bottomright", legend=c("Boot", "Simu"),col=c("blue", "black"),lty=1,lwd=2)
dev.off()
|
d75f0ac02cf216d3f4b98407c83b008c5f2090b5
|
a02204c9d3c1c5fac13696196431d64baaf4f835
|
/R/new_samples.R
|
27aa61d311be1fc7abe7c8261bffc8039296d5d2
|
[] |
no_license
|
haihaba/gcms
|
2e5de03ae8ca734bc972b79d116b6a36a6c77b17
|
8f9c7ec06f7ab27a3a51f7f4b4398d3cc9c02853
|
refs/heads/master
| 2020-04-15T06:04:38.335020
| 2018-05-26T14:31:01
| 2018-05-26T14:31:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,599
|
r
|
new_samples.R
|
##' Function new_samples
##'
##' Function new_samples
##' @export
##' @param projectpath
new_samples<-function(projectpath){
require(tcltk)
## Skapa prediktionskatalog
startup <- 1
while(startup){
cat("\n\n===============================\n")
cat("===== Process new samples =====\n")
cat("===============================\n\n")
startup <- menu(c("Set path","Back"),title="")
if(startup == 1){
temp <- tk_choose.dir(caption="Select directory.")
if(!is.na(temp)){
predpath <- temp
if(length(list.files(projectpath))){
cat("¤¤¤¤\n¤¤¤¤\n")
cat("Warning! ",predpath," is not empty! Some files might be overwritten!\n")
cat("¤¤¤¤\n¤¤¤¤\n")
}
a <- 1
startup <- 0
file.copy(file.path(projectpath,"SETTINGS.Rdata"),file.path(predpath))
#file.copy(file.path(projectpath,"sampleinfo.Rdata"),file.path(predpath))
file.copy(file.path(projectpath,"maxMZ.Rdata"),file.path(predpath))
dir.create(file.path(predpath,"Edges"),showWarnings=FALSE)
#dir.create(file.path(predpath,"ALKANE_SERIE"),showWarnings=FALSE)
dir.create(file.path(predpath,"HMCR"),showWarnings=FALSE)
if(file.exists(file.path(projectpath,"HMCR","MCR.Rdata")))
file.copy(file.path(projectpath,"HMCR","MCR.Rdata"),file.path(predpath,"HMCR"))
#if(file.exists(file.path(projectpath,"ALKANE_SERIE","RT_INFO.Rdata")))
# file.copy(file.path(projectpath,"ALKANE_SERIE","RT_INFO.Rdata"),file.path(predpath,"ALKANE_SERIE"))
file.copy(file.path(projectpath,"Edges","edges.Rdata"),file.path(predpath,"Edges"))
if(file.exists(file.path(predpath,"newsamples.Rdata")))
load(file.path(predpath,"newsamples.Rdata"))
}else{
cat("\n\n Error! You must specify where to store prediction files to continue!\n")
a <- 0
}
}else if(startup == 2 | !startup){
a <- 0
startup <- 0
}
}
### Menu
while(a){
cat("\n\n===============================\n")
cat("===== Process new samples =====\n")
cat("===============================\n\n")
cat("Path:\t",predpath,"\n\n")
predmenu <- c("Import and smooth data","Select samples","Align samples","Check for outliers","Compress data","Resolve data","Export to NIST","Quit")
if(exists("newsamples")){
if(length(newsamples)){
predvector <- c(TRUE,TRUE,TRUE,TRUE,FALSE,TRUE,TRUE,TRUE)
cat("Number of prediction files selected: ",length(newsamples),"\n")
}else
predvector <- c(TRUE,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,TRUE)
}else
predvector <- c(TRUE,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,TRUE)
a <- menu(predmenu[predvector],title="")
if(!a | a == sum(predvector)){
a <- 0
}else if(a == 1){
#Import data
importmenu <- 1
while(importmenu){
importmenu <- menu(c("Leco CSV","Leco CSV (special)","Andi NetCDF","Done"),title = "Import files")
if(importmenu == 4 | !importmenu)
importmenu <- 0
if(importmenu == 1)
cat("\nSorry, not implemented yet\n")
else if(importmenu == 2)
cat("\nSorry, not implemented yet\n")
else if(importmenu == 3)
read_cdf(predpath)
}
}else if(a == 2){
b <- 1
while(b){
b <- menu(c("Select samples", "Remove files","List files","Done"),title="Select samples")
if(b == 1){
temp <- tk_choose.files(caption="Select .Rdata files.", multi = TRUE,filters = matrix(c("Rdata files (*.Rdata", "*.Rdata"),1,2,byrow=T))
if(length(temp)){
if(exists("newsamples"))
newsamples <- unique(c(newsamples,temp))
else
newsamples <- temp
predvector <- c(TRUE,TRUE,TRUE,TRUE,TRUE,TRUE)
more <- 1
while(more == 1){
more <- menu(c("Yes","No"),title="Choose files from another directory?")
if(more == 1){
temp <- tk_choose.files(,caption="Select .Rdata files.", multi = TRUE,filters = matrix(c("Rdata files (*.Rdata", "*.Rdata"),1,2,byrow=T))
newsamples <- unique(c(newsamples,temp))
}
}
}
}else if(b == 2){
remfiles <- select.list(basename(newsamples),multiple=TRUE,title="Remove samples from prediction set")
newsamples <- newsamples[!(basename(newsamples) %in% remfiles)]
}else if(b == 3){
if(exists("newsamples"))
select.list(basename(newsamples))
else
cat("No samples selected!\n")
}else if(b == 4 | !b){
b <- 0
save(newsamples,file=file.path(predpath,"newsamples.Rdata"))
}
}
}else if(a == 3){
if(file.exists(file.path(projectpath,"Aligned","target.Rdata"))){
load(file.path(projectpath,"Aligned","target.Rdata"))
cat("Aligning samples..\n")
aligndatapred(projectpath,predpath,newsamples,target,minshift,datasource,ion)
}else
cat("Error! No original target file found!")
}else if(a == 4){
check_data(predpath)
}else if(a == 5){
DATA <- sigma_unfold(predpath)
scoresAR(predpath,DATA)
}else if(a == 6){
cat("Resolving data.. \n")
results <- find_spectrum2(predpath,projectpath)
if(length(results)){
read_win2(predpath,results$type,results$windowlist)
cat("Exporting spectrum to NIST...\n")
spec2NIST2(predpath,results$type,all=TRUE)
}
}else if(a == 7){
#Export to NIST
spec2NIST2(predpath,results$type)
}else if(a == 8 | !a)
a <- 0
}
}
##' Function scoresAR
##'
##' Function scoresAR
##' @param predpath
##' @param DATA
##' @return DATA, SPECTRUM, VARID1, OBSID1, VARID2
scoresAR <- function(predpath,DATA){
if(missing(DATA))
load(file.path(predpath,"HMCR","REG","MVA_DATA.Rdata"))
load(file.path(predpath,"info.Rdata"))
load(file.path(predpath,"Aligned","SCAN_RANGE.Rdata"))
D <- numeric()
VARID1 <- character()
spectrum <- numeric()
for(i in 1:max(ROWID1[,1]))
{
textstr <- paste("Checking window number:", i)
cat(textstr,"\n")
#do_log(predpath,textstr)
int <- which(ROWID1[,1]==i)
XX <- DATA[,int] # DATA = X
SSX <- sum(XX^2)
if(min(dim(XX))>2)
{
CS <- ARmethod1(XX,predpath)
comp <- ncol(CS$C)
D <- cbind(D,CS$C)
S_temp <- matrix(0,nrow=comp,ncol=max(SCAN_RANGE))
MZ <- ROWID1[int,2]
S_temp[,MZ] <- t(CS$S)
spectrum <- rbind(spectrum,S_temp)
for(j in 1:comp)
{
if(i > 99)
{
if(j < 10)
{
VARID1 <- rbind(VARID1,paste("W",i,"_C0",j))
}else
VARID1 <- rbind(VARID1,paste("W",i,"_C",j))
}else
{
if(i < 10)
{
if(j < 10)
{
VARID1 <- rbind(VARID1,paste("W00",i,"_C0",j))
}else
VARID1 <- rbind(VARID1,paste("W00",i,"_C",j))
}else
if(j < 10)
{
VARID1 <- rbind(VARID1,paste("W0",i,"_C0",j))
}else
VARID1 <- rbind(VARID1,paste("W0",i,"_C",j))
}
}
}
}
#output <- list(D=D,VARID1=VARID1,spectrum=spectrum)
DATA <- D
load(file.path(predpath,"Aligned","COLUMNID1.Rdata"))
OBSID1 <- COLUMNID1
save(DATA,OBSID1,VARID1,file=file.path(predpath,"HMCR","REG","MVA_DATA.Rdata"))
write.table(data.frame(ID=OBSID1,DATA),file=file.path(predpath,"HMCR","REG","MVA_DATA.txt"),row.names=FALSE,col.names=c("PrimaryID",VARID1),sep="\t",quote=FALSE)
save(spectrum,VARID1,file=file.path(predpath,"HMCR","REG","SPECTRUM.Rdata"))
load(files[which.min(shift)])
EDGES_TIME <- SCAN_INFO[,2]
save(EDGES_TIME,file=file.path(predpath,"EDGES_TIME.Rdata"))
}
##' Function ARmethod1
##'
##' Function ARmethod1
##' @param X
##' @param projectpath
##' @return C, S
ARmethod1 <- function(X,projectpath){
require(MASS)
X[X<0] <- 0
XX <- sweep(X,2,colMeans(X))
vec <- pca(XX,min(c((dim(X)-1),15)))$vec
SSX <- sum(XX^2)
load(file.path(projectpath,"SETTINGS.Rdata"))
limit <- SETTINGS$R2Xc
R <- sum(cumsum(diag(t(vec)%*%vec)/SSX)<limit)+1
# R f?r ej vara 0
p <- pca(X,R)$p
S <- abs(p)
Sstart <- S
diff <- 1
R2 <- 0
z <- 0
zz <- 100
C <- X%*%S%*%ginv(t(S)%*%S)
C[C<0] <- 0
Cstart <- C
while(diff > 1e-6)
{
z <- z+1
S <- t(X)%*%C%*%ginv(t(C)%*%C)
S[S<=0] <- 0
if(R == 1) # Applyfunktionen nedan fungerar endast f?r R>1
{
S[,1]/sqrt(sum(S[,1]^2))
}else
S[,1:R] <- apply(S[,1:R],2,function(S) S/sqrt(sum(S^2)))
CC <- t(S)%*%S
if(sum(CC>0.95) > R)
{
zz <- z
z <- 100
}
C <- X%*%S%*%ginv(t(S)%*%S)
C[C<0] <- 0
R2new <- 1-ss(X-C%*%t(S))/ss(X)
diff <- abs(R2-R2new)
R2 <- R2new
if(any(is.na(R2)))
z <- 100
if(z == 100)
{
cat("Rank reduced after: ",zz, " iterations.\n")
z <- 0
R <- R-1
C <- Cstart[,1:R]
diff <- 1
R2 <- 0
zz <- 100
}
}
cat("Rank: ",R,"\n")
cat("R2X: ",R2,"\n")
cat("Iterations: ",z,"\n")
output <- list(C=C,S=S)
}
##' Function ss
##'
##' Function ss
##' @param X
##' @return SSX
ss<-function(X)
SSX <- sum(X^2)
##' Function pca
##'
##' Function pca
##' @export
##' @param X
##' @param comp
##' @return vec, p
pca<-function(X,comp){
# Calculates Principal componets of X by calc eigvectors of X'*X or X*X'
# Depending on whats easiest to calculate....
if(nrow(as.matrix(X)) < ncol(as.matrix(X))){
tX <- t(X)
p <- eigen(X%*%tX)$vectors[,1:comp]
if(comp>1){
p <- apply(p,2,function(p) tX%*%p)
p <- apply(p,2,function(p) p/sqrt(sum(p^2)))
}else{
p <- tX%*%p
p <- p/sqrt(sum(p^2))
}
}else
p <- eigen(t(X)%*%X)$vectors[,1:comp]
vec <- X%*%p
vecp <- list(vec=vec,p=p)
}
##' function aligndatapred
##'
##' Function aligndatapred
##'
##' @param projectpath
##' @param predpath
##' @param newsamples
##' @param target
##' @param minshift
##' @param datasource
##' @param ion
aligndatapred <- function(projectpath,predpath,newsamples,target,minshift,datasource,ion){
load(file.path(projectpath,"maxMZ.Rdata"))
alignfiles <- newsamples[order(newsamples)]
A_DATA <- list()
#do_log(predpath,"Aligning data")
n <- length(alignfiles)
dir.create(file.path(predpath,"Aligned"),showWarnings=FALSE)
COLUMNID1 <- cbind(sub("[.][^.]*$", "", basename(alignfiles))) # Filenames without extensions
A_DATA$files <- files <- alignfiles
A_DATA$fsize <- matrix(0,n,2)
for(i in 1:n){
cat("Loading ", paste(basename(alignfiles[i])," (",i,"/",n,")\n",sep=""))
load(alignfiles[i])
A_DATA$fsize[i,] <- dim(Xbc)
num <- which(SCAN_RANGE == ion)
if(i == 1){
NUM_MZ <- ncol(Xbc)
NUM_scans <- nrow(Xbc)
A_DATA$tic <- matrix(0,n,NUM_scans)
A_DATA$bp <- matrix(0,n,NUM_scans)
A_DATA$ic <- matrix(0,n,NUM_scans)
SUM_MZ <- matrix(0,n,NUM_MZ)
}
if(nrow(Xbc) > NUM_scans){
A_DATA$tic <- cbind(A_DATA$tic,matrix(0,n,nrow(Xbc)-NUM_scans))
A_DATA$bp <- cbind(A_DATA$bp,matrix(0,n,nrow(Xbc)-NUM_scans))
A_DATA$ic <- cbind(A_DATA$ic,matrix(0,n,nrow(Xbc)-NUM_scans))
NUM_scans <- nrow(Xbc)
}
if(ncol(Xbc) > NUM_MZ)
SUM_MZ <- cbind(SUM_MZ,matrix(0,n,ncol(Xbc)-NUM_MZ))
A_DATA$tic[i,1:nrow(Xbc)] <- rowSums(Xbc)
A_DATA$bp[i,1:nrow(Xbc)] <- apply(Xbc,1,max)
A_DATA$ic[i,1:nrow(Xbc)] <- t(Xbc[,num])
SUM_MZ[i,1:ncol(Xbc)] <- colSums(Xbc)
if(ncol(Xbc) < maxMZ){
Xbc <- cbind(Xbc,matrix(0,nrow = nrow(Xbc),ncol = maxMZ-ncol(Xbc)))
SCAN_RANGE <- c(SCAN_RANGE,(max(SCAN_RANGE)+1):maxMZ)
save(Xbc,SCAN_INFO,SCAN_RANGE,file = alignfiles[i])
}else if(ncol(Xbc) >= maxMZ){
Xbc <- Xbc[,1:maxMZ]
SCAN_RANGE <- SCAN_RANGE[1:maxMZ]
save(Xbc,SCAN_INFO,SCAN_RANGE,file = alignfiles[i])
}
}
A_DATA$ion <- ion
A_DATA$ok <- 1
if(file.exists(file.path(predpath,"SETTINGS.Rdata"))){
load(file.path(predpath,"SETTINGS.Rdata"))
max_shift <- SETTINGS$MS
}else{
cat("No settings for maximum shift found! Using default value (500)")
max_shift <- 500
}
if(datasource == "TIC"){
DATA <- A_DATA$tic
}else if(datasource == "Basepeak"){
DATA <- A_DATA$bp
}else if(datasource == "IC")
DATA <- A_DATA$ic
if(length(target) < ncol(DATA))
target <- c(target,rep(0,ncol(DATA)-length(target)))
shift <- numeric()
for(i in 1:length(alignfiles)){
CO_VAR <- numeric()
A <- target[(max_shift+1):(ncol(DATA)-max_shift)]
for(j in -max_shift:max_shift){
B <- DATA[i,(max_shift+1+j):(ncol(DATA)-max_shift+j)]
CO_VAR <- c(CO_VAR,sum(A*B))
}
shift <- c(shift,(which.max(CO_VAR)-max_shift-1))
}
shift <- shift-minshift
cat("Min shift: ", min(shift),"\n")
cat("Max shift: ", max(shift),"\n")
if(min(shift)<0)
cat("Adjusting negative shifts...\n")
shift <- shift-min(shift)
start <- 1+shift[1:nrow(A_DATA$tic)]
stop <- ncol(A_DATA$tic)-max(shift)+shift[1:nrow(A_DATA$tic)]
BASEPEAK <- matrix(0,nrow(A_DATA$bp),ncol(A_DATA$bp))
TIC <- matrix(0,nrow(A_DATA$tic),ncol(A_DATA$tic))
for(i in 1:nrow(A_DATA$bp)){
TIC[i,1:(stop[i]-start[i]+1)] <- A_DATA$tic[i,start[i]:stop[i]]
BASEPEAK[i,1:(stop[i]-start[i]+1)] <- A_DATA$bp[i,start[i]:stop[i]]
}
TIC_RAW <- A_DATA$tic
cat("Saving variables..\n")
save(TIC_RAW,file=file.path(predpath,"Aligned","TIC_RAW.Rdata"))
save(shift,file=file.path(predpath,"Aligned","shift.Rdata"))
save(TIC,file=file.path(predpath,"Aligned","TIC.Rdata"))
save(BASEPEAK,file=file.path(predpath,"Aligned","BASEPEAK.Rdata"))
save(SUM_MZ,file=file.path(predpath,"Aligned","SUM_MZ.Rdata"))
save(COLUMNID1,file=file.path(predpath,"Aligned","COLUMNID1.Rdata"))
save(files,file=file.path(predpath,"Aligned","files.Rdata"))
save(SCAN_RANGE,file=file.path(predpath,"Aligned","SCAN_RANGE.Rdata"))
save(NUM_scans,file=file.path(predpath,"Aligned","NUM_scans.Rdata"))
textstr <- paste(files," Shift: ", shift," Scans: ", A_DATA$fsize[,1]," MZ: ",A_DATA$fsize[,2],sep="\n")
#do_log(predpath,textstr)
}
##' Function read_win2
##'
##' Function read_win2
##' @param predpath
##' @param type
##' @param win
##' @return DATA, SPECTRUM, VARID1, OBSID1, VARID2
read_win2<-function(predpath,type,win){
#require(xlsReadWrite)
SPECTRUM <- DATA <- VARID1 <- VARID2 <- numeric()
load(file.path(predpath,"Edges","edges.Rdata"))
num_windows <- length(edges)-1
load(file.path(predpath,"Aligned","files.Rdata"))
if(!missing(win)){
if(length(list.files(file.path(predpath,"HMCR",type,"win"))) > length(win)){
cat("\nRecently processed windows: ",win,"\n")
cat("Previously processed windows: ", setdiff(as.numeric(substr(basename(list.files(file.path(predpath,"HMCR",type,"win"))),4,6)),win),"\n")
if(1 == menu(c("Yes","No"),title="There are some processed windows that are not selected, do you wish to include some of them as well?"))
while(!length(win <- sort(as.numeric(substr(basename(tk_choose.files(caption="Select windows to process")),4,6)))))
cat("You must select at least one window!\n")
}
}else{
cat("Select windows.")
while(!length(win <- sort(as.numeric(substr(basename(tk_choose.files(caption="Select windows to process")),4,6)))))
cat("You must select at least one window!\n")
}
cat("\n")
for(i in win){
if(file.exists(file.path(predpath,"HMCR",type,"win",paste("win",ifelse(i<=99,ifelse(i<=9,paste("00",i,sep=""),paste("0",i,sep="")),i),".Rdata",sep="")))){
load(file.path(predpath,"HMCR",type,"win",paste("win",ifelse(i<=99,ifelse(i<=9,paste("00",i,sep=""),paste("0",i,sep="")),i),".Rdata",sep="")))
if(length(S)){
cat(nrow(as.matrix(C)),"\n")
if(nrow(as.matrix(C)) == length(files)){
cat("--------------------------------------------------------\n")
SPECTRUM <- cbind(SPECTRUM,S)
DATA <- cbind(DATA,C)
cat("Window num: ",i,"\nNumber of comps: ",ncol(S),"\nTotal number of comps: ", ncol(SPECTRUM),"\n")
VARID2 <- c(VARID2,TIME)
for(j in 1:ncol(S))
VARID1 <- rbind(VARID1,ifelse(i>99,ifelse(j<10,paste("Win",i,"_C0",j,sep=""),paste("Win ",i,"_C",j,sep="")),ifelse(i<10,ifelse(j<10,paste("Win00",i,"_C0",j,sep=""),paste("Win00",i,"_C",j,sep="")),ifelse(j<10,paste("Win0",i,"_C0",j,sep=""),paste("Win0",i,"_C",j,sep="")))))
}else
cat("Number of files and rows in data matrix does not match!\n")
}
}
}
load(file.path(predpath,"Aligned","COLUMNID1.Rdata"))
OBSID1 <- COLUMNID1
save(DATA,VARID1,VARID2,OBSID1,SPECTRUM,file=file.path(predpath,"HMCR",type,"MVA_DATA.Rdata"))
save(SPECTRUM = SPECTRUM,file=file.path(predpath,"HMCR",type,"SPECTRUM.Rdata"))
save(VARID1 = VARID1,file=file.path(predpath,"HMCR",type,"VARID1.Rdata"))
save(VARID2 = VARID2,file=file.path(predpath,"HMCR",type,"VARID2.Rdata"))
write.table(data.frame(ID = OBSID1,DATA),file=file.path(predpath,"HMCR",type,"MVA_DATA.txt"),sep='\t',row.names = FALSE,col.names = c("Primary ID",VARID1),quote=FALSE)
out <- list(DATA = DATA,SPECTRUM = SPECTRUM,VARID1 = VARID1, OBSID1 = OBSID1,VARID2 = VARID2)
}
##' Function sigma_unfold
##'
##' Function sigma_unfold
##' @param predpath
##' @return DATA
sigma_unfold <- function(predpath){
if(!file.exists(file.path(predpath,"Aligned","shift.Rdata"))){
cat("Error! You need to align data first!\n")
return(NULL)
}else{
load(file.path(predpath,"Aligned","files.Rdata"))
load(file.path(predpath,"Aligned","COLUMNID1.Rdata"))
load(file.path(predpath,"Aligned","shift.Rdata"))
OBSID1 <- COLUMNID1
load(file.path(predpath,"SETTINGS.Rdata"))
DOBL <- SETTINGS$BC2
if(file.exists(file.path(predpath,"Edges","edges.Rdata"))){
load(file.path(predpath,"Edges","edges.Rdata"))
load(file.path(predpath,"Aligned","SCAN_RANGE.Rdata"))
ROWID1 <- varnamegen(length(edges)-1,length(SCAN_RANGE),min(SCAN_RANGE))
load(file.path(predpath,"Aligned","NUM_scans.Rdata"))
for(i in 1:length(files)){
DATA_temp <- numeric()
cat("Loading ",basename(files[i]),paste("(",i,"/",length(files),")",sep=""),"\n")
load(files[i])
if(shift[i]+tail(edges,1)>nrow(Xbc)) # Avoids "subscript out of bounds" if the shift is too big (outliers)
Xbc <- rbind(Xbc,matrix(0,shift[i]+tail(edges,1)-nrow(Xbc),ncol(Xbc)))
for(k in 1:(length(edges)-1)){
X_temp <- Xbc[edges[k]:edges[k+1]+shift[i],]
if(DOBL){
BL <- apply(X_temp,2,function(X_temp) approx(c(1,length(X_temp)),X_temp[c(1,length(X_temp))],1:length(X_temp))$y) # method = 'linear' by default
X_temp <- X_temp-BL
X_temp[X_temp<0] <- 0
}
DATA_temp <- cbind(DATA_temp, matrix(colSums(X_temp),nrow=1))
}
if(i == 1)
DATA <- matrix(0,length(files),length(DATA_temp))
if(ncol(DATA) < ncol(DATA_temp))
DATA <- cbind(DATA,matrix(0,nrow=nrow(DATA),ncol=ncol(DATA_temp)-ncol(DATA)))
DATA[i,1:ncol(DATA_temp)] <- DATA_temp
}
dir.create(file.path(predpath,"HMCR","REG"),showWarnings=FALSE)
save(DATA,OBSID1,file=file.path(predpath,"HMCR","REG","MVA_DATA.Rdata"))
save(edges,shift,ROWID1,files,file=file.path(predpath,"info.Rdata"))
output <- DATA
}else{
cat("Error! You need to set edges first!\n")
return(NULL)
}
}
}
##' Function varnamegen
##'
##' Function varnamegen
##' @param intervals
##' @param peks
##' @param start
##' @return A
varnamegen <- function(intervals,peks,start)
{
#A <- cbind(paste("int ",sort(rep(1:intervals,peks))," M/Z ", rep(start:(start+peks-1),intervals)," ",sep=""))
A <- matrix(c(sort(rep(1:intervals,peks)),rep(start:(start+peks-1),intervals)),peks*intervals,2)
}
##' Function find_spectrum2
##'
##' Function find_spectrum2
##' @param predpath
##' @param projectpath
##' @return type, windwolist
find_spectrum2<-function(predpath,projectpath){
require(MASS)
datamenu <- character()
if(file.exists(file.path(projectpath,"HMCR","REG","MVA_DATA.Rdata")))
datamenu <- "REG H-MCR DATA"
if(length(datamenu)){
dataexport <- menu(c(datamenu,"Cancel"),title="Select data")
if(dataexport == 0 | dataexport == length(c(datamenu,"Cancel")))
datamenu <- character()
}
if(!length(datamenu)){
cat("Error! (Aborted by user or no data found)\n\n")
return(character())
}else{
type = ifelse(length(datamenu) == 1,strsplit(datamenu," ")[[1]][1],c("REG","CV")[dataexport])
load(file.path(predpath,"Aligned","files.Rdata"))
load(file.path(predpath,"Edges","edges.Rdata"))
load(file.path(predpath,"Aligned","shift.Rdata"))
load(file.path(predpath,"Aligned","SCAN_RANGE.Rdata"))
load(file.path(predpath,"maxMZ.Rdata"))
load(files[which.min(shift)])
load(file.path(projectpath,"HMCR","MCR.Rdata"))
windowlist <- as.numeric(substr(basename(list.files(file.path(projectpath,"HMCR",type,"win"))),4,6))
EDGES_TIME <- SCAN_INFO[,2]
load(file.path(projectpath,"SETTINGS.Rdata"))
NL <- SETTINGS$NL
RT_LIMIT <- SETTINGS$MPS
DO_BL <- SETTINGS$BC2
#color <- cbind("red","green","blue","black","purple","grey","yellow4","red","green","blue","black","purple","grey","yellow4","red","green","blue","black","purple","grey","yellow4","red","green","blue","black","purple","grey","yellow4","red","green","blue","black","purple","grey","yellow4")
rm(Xbc,SETTINGS)
dir.create(file.path(predpath,"Edges","dat"),recursive = TRUE,showWarnings = FALSE)
temp <- list.files(file.path(projectpath,"Edges","dat"),full.names=TRUE)
dir.create(file.path(predpath,"Edges","dat","Model samples_bg_corr"),showWarnings = FALSE,recursive=TRUE)
dir.create(file.path(predpath,"HMCR",type,"win_png"),showWarnings = FALSE,recursive = TRUE)
dir.create(file.path(predpath,"HMCR",type,"win"),showWarnings = FALSE,recursive = TRUE)
gc()
Scores <- numeric()
### attpempt to store the background correction data. bg correction storage in "find_spectrum.R" is done in arrays per window
### scan * m/z * sample. Here processing is sample wise (in "find_spectrum.R", the whole sampleset is in memory at once),
### so the array has to be build step by step
### First array variables for each window has to be defined. Then in the two main loops "sample" and "window" the variables
### are filled sample by sample. After finishing the two loops, the arrays have to be stored in corresponding files
#for(win in windowlist){
# scans<-(edges[win+1]-edges[win]+1)
# mzs<-length(SCAN_RANGE)
# BL<-array(0,dim=c(scans,mzs,length(files)))
# #eval(parse(text=paste("BL_",win,"<-NULL",sep="")))
# save(BL,file=file.path(predpath,"Edges","dat","Model samples_bg_corr",paste("bg_win",ifelse(win<=99,ifelse(win<=9,paste("00",win,sep=""),paste("0",win,sep="")),win),".Rdata",sep="")))
# rm(BL)
#}
if(length(windowlist)){
for(i in 1:length(files)){
load(files[i])
cat("Resolving ",basename(files[i]),paste(" (",i,"/",length(files),")\n",sep=""))
for(win in windowlist){
if(edges[win]+shift[i] > 0 & edges[win+1]+shift[i] < nrow(Xbc)){
x <- Xbc[edges[win]:edges[win+1]+shift[i],]
if(ncol(x) < maxMZ)
x <- cbind(x,matrix(0,nrow=nrow(x),ncol= maxMZ-ncol(x)))
if(DO_BL){ # Removes baseline for prediction files
BL<-matrix(apply(x,2,function(x) approx(c(1,length(x)),x[c(1,length(x))],1:length(x))$y),nrow=length(edges[win]:edges[win+1]),ncol=maxMZ) # method = 'linear' by default
x <- x-BL
x[x<0] <- 0
###test code###
#BL_new<-BL
#load(file.path(predpath,"Edges","dat","Model\ samples","bg_corr",paste("bg_win",ifelse(win<=99,ifelse(win<=9,paste("00",win,sep=""),paste("0",win,sep="")),win),".Rdata",sep="")))
#BL[,,i]<-BL_new
#save(BL,file=file.path(predpath,"Edges","dat","Model samples","bg_corr",paste("bg_win",ifelse(win<=99,ifelse(win<=9,paste("00",win,sep=""),paste("0",win,sep="")),win),".Rdata",sep="")))
#rm(BL_new)
rm(BL)
}
load(file.path(projectpath,"HMCR", "REG","win",paste("win",ifelse(win<=99,ifelse(win<=9,paste("00",win,sep=""),paste("0",win,sep="")),win),".Rdata",sep="")))
# browser()
if(!length(S))
c2 <- numeric()
else
c2 <- do_AR_all_prediction(x,S,CP,RT_LIMIT)
if(exists("PCApara"))
scores <- (colSums(x) <- PCApara$mean)%*%PCApara$loadings[,1:2]
else
scores <- numeric()
ok <- 1
}else{
load(file.path(projectpath,"HMCR", "REG","win",paste("win",ifelse(win<=99,ifelse(win<=9,paste("00",win,sep=""),paste("0",win,sep="")),win),".Rdata",sep="")))
c2 <- matrix(0,length(edges[win]:edges[win+1]),ncol(as.matrix(C)))
ok <- 0
if(exists("PCApara"))
scores <- c(0,0)
else
scores <- numeric()
}
if(i == 1){
if(ok){
C<-matrix(colSums(as.matrix(c2,ncol=ncol(as.matrix(C)))),nrow=1)
CC <- c2
}else{
C<-matrix(colSums(as.matrix(c2,ncol=ncol(as.matrix(C)))),nrow=1)*NA
CC<-c2
}
Scores <- rbind(Scores,scores)
save(C,CC,S,TIME,Scores,file=file.path(predpath,"HMCR",type,"win",paste("win",ifelse(win<=99,ifelse(win<=9,paste("00",win,sep=""),paste("0",win,sep="")),win),".Rdata",sep="")))
}else{
load(file.path(predpath,"HMCR",type,"win",paste("win",ifelse(win<=99,ifelse(win<=9,paste("00",win,sep=""),paste("0",win,sep="")),win),".Rdata",sep="")))
Scores <- rbind(Scores,scores)
if(ok){
C<-rbind(C,colSums(as.matrix(c2,ncol=ncol(as.matrix(C)))))
CC<-rbind(CC,c2)
}else{
C<-rbind(C,colSums(as.matrix(c2,ncol=ncol(as.matrix(C))))*NA)
CC<-rbind(CC,c2)
}
save(C,CC,S,TIME,Scores,file=file.path(predpath,"HMCR",type,"win",paste("win",ifelse(win<=99,ifelse(win<=9,paste("00",win,sep=""),paste("0",win,sep="")),win),".Rdata",sep="")))
}
gc()
}
gc()
}
# Plot windows
# for(win in windowlist){
# if(win %in% as.numeric(substr(basename(list.files(file.path(projectpath,"HMCR",type,"win"))),4,6))){
# rm(Scores)
# load(file.path(predpath,"HMCR",type,"win",paste("win",ifelse(win<=99,ifelse(win<=9,paste("00",win,sep=""),paste("0",win,sep="")),win),".Rdata",sep="")))
# if(min(dim(CC)) > 0){
# if(length(Scores)){
# #Plot functions for Cross validation method
# }
# Hz <- 1/median(diff(SCAN_INFO[,2]))
# for(i in 1:ncol(CC)){
# c2 <- matrix(CC[,i],length(edges[win]:edges[win+1]),length(files))
# if(length(EDGES_TIME) < edges[win+1])
# EDGES_TIME <- c(EDGES_TIME,approxExtrap(1:length(EDGES_TIME),EDGES_TIME,xout=(length(EDGES_TIME)+1):edges[win+1])$y)
# for(j in 1:ncol(c2)){
# plot(EDGES_TIME[edges[win]:edges[win+1]]+median(shift)/Hz,c2[,j],col=color[i],xlab="Time",ylab="Intensity",type="l",ylim=cbind(0,max(CC)),xlim=range(EDGES_TIME[edges[win]:edges[win+1]]+median(shift)/Hz),main=paste("Window:",win),lwd=1)
# par(new=TRUE)
# }
# }
# par(new=FALSE)
# #savePlot(file.path(predpath,"HMCR", "REG","win_png",paste("win",ifelse(win<=99,ifelse(win<=9,paste("00",win,sep=""),paste("0",win,sep="")),win),sep="")),type="png")
# }else
# cat("No data found in window ", win,"!\n")
# }
# }
}else
cat("No windows processed.\n")
return(list(type=type,windowlist=windowlist))
}
}
##' Function do_AR_all_prediction
##'
##' Function do_AR_all_prediction
##' Funciton to do Alternate Regression on a prediction set
##' @param x
##' @param S
##' @param CP
##' @param RT_LIMIT
##' @return Cnew
do_AR_all_prediction <- function(x,S,CP,RT_LIMIT){
var <- nrow(x)
mz <- ncol(x)
C <- x%*%S%*%ginv(t(S)%*%S)
C[C<0] <- 0
Cnew <- C*0
for(i in 1:ncol(C)){
c <- as.matrix(C[,i],nrow=var)
Cnew[,i] <- unimodal_3(c,1,CP[i],RT_LIMIT)
}
C <- Cnew
}
##' Function unimodal_3
##'
##' Function unimodal_3
##' @param C
##' @param obs
##' @param cp
##' @param RT_LIMIT
##' @return C
unimodal_3<-function(C,obs,cp,RT_LIMIT){
for(i in 1:obs){
xpeak <- peak_pick(t(C[,i]))$xpeak
mp <- which(as.logical(xpeak))
if(!length(mp)){
C[,i] <- C[,i]*0
}else{
if(which.min(abs(mp-cp)))
mp <- min(mp[which.min(abs(mp-cp))])
else
mp <- 1
if(abs(mp-cp)<RT_LIMIT){
D <- diff(C[1:mp,i])
if(length(which(D<0)))
poss <- max(which(D<0))
else
poss <- 1
for(j in poss:1)
C[j,i] <- min(C[j:mp,i])
D <- diff(C[mp:nrow(C),i])
poss <-
if(length(which(D>0)))
poss <- min(which(D>0))
else
poss <- FALSE
if(poss){
for(j in (mp-1+poss):nrow(C))
C[j,i] <- min(C[mp:j,i])
}
# Correction of strange peaks
knew <- C[(diff(C[,i]) != 0),i]
mpnew <- which.max(knew)
k <- C[,i]*0
k[1:length(knew)+mp-mpnew] <- knew
C[,i] <- k
}else
C[,i] <- C[,i]*0
}
}
C
}
##' Function peak_pick
##'
##' Function peak_pick
##' Function to find peaks
##' @param x
##' @return xpeak, xout
peak_pick<-function(x){
xout <- x <- as.matrix(x)
xd1 <- sgolayfilt(xout,3,11,1)
NOISE_LIMIT <- median(x)
N1 <- which(x>NOISE_LIMIT)
N2 <- matrix(0,nrow(x),ncol(x))
for (i in 3:(ncol(x)-2))
if(xd1[i-2] > 0)
if(xd1[i-1] > 0)
if(xd1[i+1] < 0)
if(xd1[i+2] < 0)
if(sum(N1 == i) == 1)
N2[i] <- TRUE
N <- which(as.logical(N2))
if(length(N) > 1){
while(min(diff(N)) < 3){
p1 <- min(which(diff(N) < 3))
p2 <- p1+1
if(xout[N[p1]] < xout[N[p2]])
N <- N[-p1]
else
N <- N[-p2]
if(length(N) == 1)
break
}
}
xpeak <- matrix(0,nrow(x),ncol(x))
xpeak[N] <- xout[N]
out <- list(xpeak = xpeak, xout = xout)
}
##' Function sgolay
##'
##' Function sgolay
##' @param p
##' @param n
##' @param ts
##' @return F
sgolay<-function(p, n, m = 0, ts = 1){
library(MASS)
if (n %% 2 != 1)
stop("sgolay needs an odd filter length n")
if (p >= n)
stop("sgolay needs filter length n larger than polynomial order p")
F = matrix(0., n, n)
k = floor(n/2)
for (row in 1:(k+1)) {
C = ( ((1:n)-row) %*% matrix(1, 1, p+1) ) ^ ( matrix(1, n) %*% (0:p) )
A = ginv(C)
F[row,] = A[1+m,]
}
F[(k+2):n,] = (-1)^m * F[k:1,n:1]
if (m > 0)
F = F * prod(1:m) / (ts^m)
class(F) = "sgolayFilter"
F
}
##' Function sgolayfilt
##'
##' Function sgolayfilt
##' @param x
##' @param p
##' @param n
##' @param m
##' @param ts
sgolayfilt<-function(x, p = 3, n = p + 3 - p%%2, m = 0, ts = 1){
len = length(x)
if (class(p) == "sgolayFilter" || (!is.null(dim(p)) && dim(p) > 1)){
F = p
n = nrow(F)
}else
F = sgolay(p, n, m, ts)
k = floor(n/2)
#z = filter(F[k+1,n:1], 1, x)
filt <- F[k+1,n:1]
z <- na.omit(stats:::filter(c(rep(0,length(filt) - 1), x), filt, sides = 1))
c(F[1:k,] %*% x[1:n], z[n:len], F[(k+2):n,] %*% x[(len-n+1):len])
}
|
504754e38750c4fb1d36d5134f3546a00a720a90
|
a17795f03e21930af5dcbac26d8e19f7500585c4
|
/lecture9/in_class_exercise.R
|
70ff9f1476c2564cf3e27687b1a0447f5f0ec30f
|
[] |
no_license
|
shrebirth1223/Seunghyun_park
|
7c6764f36404b9a374ca90b4f626041f4099106f
|
016776b2394f7dec12959a9c94b963e99926419e
|
refs/heads/master
| 2020-07-21T20:01:37.639932
| 2015-07-15T12:31:54
| 2015-07-15T12:31:54
| 38,341,928
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,921
|
r
|
in_class_exercise.R
|
#1. A store keeps track of purchase history. The manager wonders if there is an association between the amount of money a customer spends on their first visit to the store and their second visit. Below is data collected on 10 customers. Each column corresponds to one customer. For example, the first customer spend $20 on the first visit and $23 on the second visit. The second customer spend $32 on first visit and $34 on second, etc.
#Money spent on first visit (in dollars): 20,32,35,34,40,51,52,56,57,68 Money spent on second visit (in dollars): 23,34,36,44,42,51,54,57,54,62
first_visit <- c(20,32,35,34,40,51,52,56,57,68)
second_visit <- c(23,34,36,44,42,51,54,57,54,62)
data=cbind(first_visit,second_visit)
#a. Display the relationship between first and second visit dollar amounts?
plot(first_visit,second_visit)
#b. Describe the pattern in part (a) briefly. Is there a relationship? Is it positive or negative? Is it linear or non-linear? Is it weak or strong?
#It shows the positive linear relation ship betweeen first_visit and second_visit, It's quite strong.
#c. Calculate the correlation coefficient between the amount of money spent on the first visit and the second visit.
corelation <- cor(first_visit,second_visit)
standard_error <- sqrt((1-corelation^2)/(length(first_visit)-2))
#d. What does the standard error in part (c) refer to?
#e. Calculate an approximate 95% confidence interval for ρ.
interval_right <- cor(data)-2*standard_error#first answer
interval_left <- cor(data)+2*standard_error#first answer
rt<-cor.test(first_visit,second_visit)#Second answer
rt$conf.int#Second answer
#2. Answer the following question using the data from question (2).
#a. Adding $30 to each of the observations for the second visit. How is the correlation coefficient between first and second visits affected? What can you conclude about the effects on the correlation coefficient of adding a constant to one or both of the variables?
c1_B <- first_visit + 30
c2_B <- second_visit + 30
cor(first_visit,c2_B)
#b. Convert the first visit to cents (i.e., multiply by 100). How does this affect the correlation between the first and second visits? What can you conclude about the effects on the correlation coefficient of multiplying one or both of the variables by a constant?
c1_C <- first_visit*100
cor(c1_C,second_visit)
#3. Some species seem to thrive in captivity, whereas others are prone to health and behavior difficulties when caged. Maternal care problems in some captive species, for example, lead to high infant mortality. Can these differences be predicted? The following data are measurements of the infant mortality (percentage of births) of 20 carnivore species in captivity along with the log (based-10) of the minimal home range sizes (in km2) of the same species in the wild (Clubb and Mason 2003). For example, -1.3 is the home range and 4 is the captive infant mortality percentage.
#Log home range size: -1.3 (4),-0.5 (22),-0.3 (0),0.2 (0),0.1 (11),0.5 (13),1.0 (17),0.3 (25),0.4 (24),0.5 (27),0.1 (29),0.2 (33),0.4 (33),1.3 (42),1.2 (33),1.4 (20),1.6 (19),1.6 (19),1.8 (25),3.1 (65)
#a. Draw a scatter plot of these data, with log of home range size as the explanatory variable. Describe the association between the two variables in words.
b1 <- c(-1.3,-0.5,-0.3,0.2,0.1,0.5,1.0,0.3,0.4,0.5,0.1,0.2,0.4,1.3,1.2,1.4,1.6,1.6,1.8,3.1)
b2 <- c(4,22,0,0,11,13,17,25,24,27,29,33,33,42,33,20,19,19,25,65)
plot(b1,b2)
#b. Estimate the slope and intercept of the least squares regression line, with the log of home range size as the explanatory variable. Add this line to your plot.
m <- lm(b2~b1)
abline(m)
#c. Does home range size in the wild predict the mortality of captive carnivores? Carry out a formal test. Assume that the species data are independent.
# Ho: home range size does not predict infant mortality (beta = 0)
# Ha: home range size does predict infant mortality (beta != 0)
# answer: b = 9.955, a=16.280, SE=2.766, t=3.6, df=18, P=0.002
# reject null Ho
summary(m)
a <- m$coefficients[1]
b <- m$coefficients[2]
a = 16.28047
b = 9.955187
# predicted values vs actual values
b3 <- mat.or.vec(20,1)
for (i in 1:length(b3)) {
b3[i] = b*b1[i] + a
}
points(b1,b3,col="red")
#d. Outliers should be investigated because they might have a substantial effect on the estimate so of the slope and intercept. Recalculate the slope and intercept of the regression line from part (c) after excluding the outlier at large home range size (which correspond to the polar bear). Add the new line to your plot. By how much did it change the slope?
b1_p <- c(-1.3,-0.5,-0.3,0.2,0.1,0.5,1.0,0.3,0.4,0.5,0.1,0.2,0.4,1.3,1.2,1.4,1.6,1.6,1.8)
b2_p <- c(4,22,0,0,11,13,17,25,24,27,29,33,33,42,33,20,19,25,25)
m_p <- lm(b2_p~b1_p)
abline(m_p)
summary(m_p)
# answer: b = 6.600, a=17.510, SE = 3.074, t=2.147, df=17, P = 0.0465
# still reject Ho but not as strongly, the slope changes from 9.955 to 6.600
|
2c46afbd9847c9d2f34d3c89e1017750b6f8821a
|
f508935a612e29d265e1501544f94fdaec9d4c47
|
/plot4.R
|
76a95b0ed20afc89ba8718284ce343a2952512c6
|
[] |
no_license
|
Folgs/ExData_Plotting1
|
54d31e638c9d1d1e4d4a14371fbcfdd0f01e239b
|
52494f332fba06b6543764ad5754a528877451cc
|
refs/heads/master
| 2020-12-28T21:06:42.344077
| 2016-07-08T10:22:46
| 2016-07-08T10:22:46
| 62,880,312
| 0
| 0
| null | 2016-07-08T10:19:03
| 2016-07-08T10:19:03
| null |
UTF-8
|
R
| false
| false
| 1,189
|
r
|
plot4.R
|
mydata<-read.table(file="household_power_consumption.txt",sep=";",header=TRUE,stringsAsFactors = FALSE,na.strings = "?")
##We pick the dates we want.
mydata<-mydata[(mydata$Date=="2/2/2007")|(mydata$Date=="1/2/2007"),]
##Then we convert the columns we will is into their units
mydata$Time<-paste(mydata$Date,mydata$Time,sep=" ")
mydata$Time<-strptime(mydata$Time,format="%d/%m/%Y %H:%M:%S")
##We set the space to save the four plots
par(mfrow=c(2,2))
##We open the file and save the four plots, and close it.
png(filename = "plot4.png")
plot(x=mydata$Time,mydata$Global_active_power,type="l",xlab="Days",ylab="Global Active Power (kilowatts)")
plot(mydata$Time,mydata$Sub_metering_1,type="l",xlab="Days",ylab="Energy sub metering")
lines(x=mydata$Time,y=mydata$Sub_metering_2,col="red")
lines(x=mydata$Time,y=mydata$Sub_metering_3,col="blue")
legend(x="topright",legend=c("Sub_meterring_1","Sub_metering_2","Sub_metering_3"),pch=c("l","l","l"),col=c("black","red","blue"))
plot(mydata$Time,mydata$Voltage,type="l",xlab="datetime",ylab="Voltage")
plot(mydata$Time,mydata$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
457d901a108ca00a12f857e5aa6203301bf254d2
|
95799d6e1ddc4bbbbbbcfb3a8e61d90176fc01a4
|
/Chapter III/chapter_3_3.R
|
fa2e236ac1696eb69ca086ae01d1b0ded90dcfa1
|
[] |
no_license
|
drnmy/mlf
|
e8e7846a763e5dd89cc965806296b1f9b80142ed
|
092e3f8db633d4013c6f77022553b3246cb4f401
|
refs/heads/master
| 2022-12-04T01:42:18.313331
| 2020-08-17T15:36:44
| 2020-08-17T15:36:44
| 284,404,931
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,591
|
r
|
chapter_3_3.R
|
# Данните са https://archive.ics.uci.edu/ml/datasets/Polish+companies+bankruptcy+data#
library(foreign)
library(stats)
library(caret)
readData <- function(bAllData = FALSE)
{
if (TRUE == bAllData) {
d1 <- read.arff("c:/Stanio/Mono_S/Data/1year.arff")
d2 <- read.arff("c:/Stanio/Mono_S/Data/2year.arff")
d3 <- read.arff("c:/Stanio/Mono_S/Data/3year.arff")
d4 <- read.arff("c:/Stanio/Mono_S/Data/4year.arff")
d5 <- read.arff("c:/Stanio/Mono_S/Data/5year.arff")
data <- rbind(d1,d2,d3, d4, d5)
} else {
data <- read.arff("c:/Stanio/Mono_S/Data/1year.arff")
#data <- read.arff("c:/Stanio/Mono_S/Data/5year.arff")
}
data
}
data <- readData(FALSE)
#data <- data[, which(names(data) %in% c("class","Attr3", "Attr6", "Attr7", "Attr8", "Attr9"))]
split <- createDataPartition(y = data$class,p = 0.85,list = FALSE)
train <- data[split,]
train <- na.omit(train)
train[train == -Inf] <- 0
train[train == Inf] <- 0
test <- data[-split,]
test <- na.omit(test)
test[test == -Inf] <- 0
test[test == Inf] <- 0
summary(train$class)
summary(test$class)
cutoffExtremes <- function(data)
{
#data$Attr3[data$Attr3 < -20] <- -20
#data$Attr6[data$Attr6 < -20] <- -20
#data$Attr7[data$Attr7 < -20] <- -20
#data$Attr7[data$Attr7 > 20] <- 20
#data$Attr8[data$Attr8 < -20] <- -20
#data$Attr8[data$Attr8 > 20] <- 20
#data$Attr9[data$Attr9 < -20] <- -20
#data$Attr9[data$Attr9 > 20] <- 20
data[, !(names(data) %in% c("class"))] <- scale(data[, !(names(data) %in% c("class"))])
data
}
train <- cutoffExtremes(train)
test <- cutoffExtremes(test)
trCntl <- trainControl(method = "CV",number = 25)
logitModel <- train(class ~ .,data = train,trControl = trCntl,method="glm",family = "binomial")
summary(logitModel)
confusionMatrix(logitModel)
pdata <- predict(logitModel, test)
# generate confusion matrix for hold back data
confusionMatrix(pdata,reference=test$class)
library(ROCR)
library(Metrics)
logitModel <- glm(class ~ ., family=binomial(link='logit'),data=train)
summary(logitModel)
pdata <- predict(logitModel, test)
prX <- pdata
prY <- test$class
pr <- prediction( prX, prY)
perf <- performance(pr,measure = "tpr",x.measure = "fpr")
plot(perf, colorize=TRUE)
# As a rule of thumb, a model with good predictive ability should have an AUC closer to 1 (1 is ideal) than to 0.5.
auc <- performance(pr, measure = "auc")
auc <- auc@y.values[[1]]
auc
### 2. MDA
library(mda)
mdaModel <- mda(class ~ Attr3 + Attr6 + Attr7 + Attr8 + Attr9, train)
summary(mdaModel)
confusion(mdaModel)
plot(mdaModel, data=train)
pdata <- predict(mdaModel, test)
confusionMatrix(pdata,reference=test$class)
pr <- prediction(pdata, test$class)
perf <- performance(pr,measure = "tpr",x.measure = "fpr")
plot(perf, colorize=TRUE)
auc <- performance(pr, measure="auc")
auc <- auc@y.values[[1]]
auc
### 3. FDA
fdaModel <- fda(class ~ Attr3 + Attr6 + Attr7 + Attr8 + Attr9, data=train)
summary(fdaModel)
confusion(fdaModel)
### 4. LDA
library(MASS)
ldaModel <- lda(class ~ Attr3 + Attr6 + Attr7 + Attr8 + Attr9, train)
summary(ldaModel)
confusion(ldaModel)
plot(ldaModel)
pdata <- predict(ldaModel, test)
pr <- prediction(pdata$posterior[,2], test$class)
perf <- performance(pr,"tpr","fpr")
plot(perf,colorize=TRUE)
# As a rule of thumb, a model with good predictive ability should have an AUC closer to 1 (1 is ideal) than to 0.5.
auc <- performance(pr, measure = "auc")
auc <- auc@y.values[[1]]
auc
### 5. Neural network
library(neuralnet)
#scaledtrain <- scale(train)
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
#normTrain <- as.data.frame(lapply(train, normalize))
#normTest <- as.data.frame(lapply(test, normalize))
normTrain <- train
normTrain$Attr3 <- scale(normTrain$Attr3)
normTrain$Attr6 <- scale(normTrain$Attr6)
normTrain$Attr7 <- scale(normTrain$Attr7)
normTrain$Attr8 <- scale(normTrain$Attr8)
normTrain$Attr9 <- scale(normTrain$Attr9)
normTest <- test
normTest$Attr3 <- scale(normTest$Attr3)
normTest$Attr6 <- scale(normTest$Attr6)
normTest$Attr7 <- scale(normTest$Attr7)
normTest$Attr8 <- scale(normTest$Attr8)
normTest$Attr9 <- scale(normTest$Attr9)
nn <- neuralnet(class ~ .,
data=normTrain,
hidden=c(0),
algorithm="sag",
linear.output=FALSE)
nn$result.matrix
plot(nn)
tempTest <- normTest[, !(names(normTest) %in% c("class"))]
nn.results <- compute(nn, tempTest)
pred.weights <- nn.results$net.result
idx <- apply(pred.weights, 1, which.max) - 1
table(idx, test$class)
cm <- confusionMatrix(as.factor(idx), normTest$class)
cm
cm$table / length(normTest$class)
prob = compute(nn, tempTest )
prob.result <- prob$net.result
train_params <- trainControl(method = "repeatedcv", number = 10, repeats=5)
nnet_model <- train(train[,-6], train$class,
method = "nnet",
trControl= train_params,
preProcess=c("scale","center"),
na.action = na.omit
)
prop.table(table(train$class)) #Baseline Accuracy
# Predictions on the training set
nnet_predictions_train <-predict(nnet_model, train)
table(train$class, nnet_predictions_train)
#Predictions on the test set
nnet_predictions_test <-predict(nnet_model, test)
table(test$class, nnet_predictions_test)
### 6. NNET
library(nnet)
normTrain <- train
normTrain$Attr3 <- scale(normTrain$Attr3)
normTrain$Attr6 <- scale(normTrain$Attr6)
normTrain$Attr7 <- scale(normTrain$Attr7)
normTrain$Attr8 <- scale(normTrain$Attr8)
normTrain$Attr9 <- scale(normTrain$Attr9)
normTest <- test
normTest$Attr3 <- scale(normTest$Attr3)
normTest$Attr6 <- scale(normTest$Attr6)
normTest$Attr7 <- scale(normTest$Attr7)
normTest$Attr8 <- scale(normTest$Attr8)
normTest$Attr9 <- scale(normTest$Attr9)
nn <- nnet(class ~ ., data=normTrain, size=5, maxit=1000)
pdata <- predict(nn, normTest, type="class")
pdata <- as.factor(pdata)
confusionMatrix(pdata, normTest$class)
### 7. SVM
library(e1071)
normTrain <- train
normTrain$Attr3 <- scale(normTrain$Attr3)
normTrain$Attr6 <- scale(normTrain$Attr6)
normTrain$Attr7 <- scale(normTrain$Attr7)
normTrain$Attr8 <- scale(normTrain$Attr8)
normTrain$Attr9 <- scale(normTrain$Attr9)
normTest <- test
normTest$Attr3 <- scale(normTest$Attr3)
normTest$Attr6 <- scale(normTest$Attr6)
normTest$Attr7 <- scale(normTest$Attr7)
normTest$Attr8 <- scale(normTest$Attr8)
normTest$Attr9 <- scale(normTest$Attr9)
svmModel <- svm(formula = class ~ .,
data = normTrain,
type = 'C-classification',
kernel = 'sigmoid')
tempTest <- normTest[, !(names(normTest) %in% c("class"))]
pdata <- predict(svmModel, newdata = tempTest)
cm <- confusionMatrix(pdata, normTest$class)
cm
cm$table / length(normTest$class)
graph <- data.frame(matrix(ncol = 5, nrow = 0))
for(i in 1:150) {
svmModel <- svm(formula = class ~ .,
data = normTrain,
type = 'C-classification',
kernel = 'sigmoid',
class.weights= c("0" = 1, "1" = i))
tempTest <- normTest[, !(names(normTest) %in% c("class"))]
pdata <- predict(svmModel, newdata = tempTest)
cm <- confusionMatrix(pdata, normTest$class)
pct <- (cm$table / length(normTest$class))*100
graph <- rbind(graph, c(i, pct[1,1], pct[1,2], pct[2,1], pct[2,2]))
}
x <- c("w", "(0,0)", "(0,1)", "(1,0)", "(1,1)")
colnames(graph) <- x
require(ggplot2)
require(reshape2)
g <- melt(graph, id.vars="w", variable.name='series')
ggplot(g, aes(w,value)) + geom_line(aes(colour = series))
|
d6728aedd0e7c0964a9b291c42fd34475209f112
|
e5c43a31a082bbfec5ebbc20b34d373896721579
|
/R/functions/round.extent.R
|
9434812c525fc251f314410e60918e70ae00ac10
|
[] |
no_license
|
geryan/rfst
|
3dde3a499651f3a1ccc736f8c6597c5972f0e17c
|
0aac1f0c3b17096af0c5b0b06e1ad80ac6d709ca
|
refs/heads/master
| 2023-05-02T12:32:51.743467
| 2021-04-27T01:26:47
| 2021-04-27T01:26:47
| 164,573,310
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 104
|
r
|
round.extent.R
|
round.extent <- function(x){
library(raster)
extent(x) <- round(extent(x))
return(x)
}
|
8f3c421e6a2985cefeae26cdf716500177b571e9
|
e26f9101996074842a8bc796aae6280572489aec
|
/man/copyMatricesToVirtualSdy.Rd
|
ef8f020b3b6824d6dfc68e9c36f3ee52cada07da
|
[] |
no_license
|
ehfhcrc/UpdateAnno
|
5c31ea9aac2ecf0f1b5e256cce3abd93f6c79da8
|
f8e561750e1d5cdee90d08753a34ff78a9813926
|
refs/heads/master
| 2021-01-19T08:03:01.670544
| 2018-10-10T16:30:29
| 2018-10-10T16:30:29
| 87,593,499
| 1
| 1
| null | 2021-01-11T18:32:19
| 2017-04-07T23:09:30
|
R
|
UTF-8
|
R
| false
| true
| 533
|
rd
|
copyMatricesToVirtualSdy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CopyMatricesToVirtualSdy.R
\name{copyMatricesToVirtualSdy}
\alias{copyMatricesToVirtualSdy}
\title{Function to migrate a copy of EMs from /Studies/ to a virtual study container}
\usage{
copyMatricesToVirtualSdy(ISserver, virtualSdy)
}
\arguments{
\item{ISserver}{immunespace server, either test or prod}
\item{virtualSdy}{subdirectory within HIPC container}
}
\description{
Function to migrate a copy of EMs from /Studies/ to a virtual study container
}
|
3e82818e1a312d77cbd0e0d3e89ad3aeba967f26
|
473dfd3f5c89fd2bf2087c524c52e484ecc823b6
|
/man/lineLength.Rd
|
8949706184a7cf2ac1709685fde996c2176c8908
|
[] |
no_license
|
cran/SDraw
|
038ec0a0f2d8a094f89d258d43edb15a003303b2
|
0b06c5ecbd424a0d9ba59fe5fd4f4bf30a1ce326
|
refs/heads/master
| 2021-01-17T19:20:33.351896
| 2020-07-03T15:20:09
| 2020-07-03T15:20:09
| 60,879,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,888
|
rd
|
lineLength.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lineLength.r
\name{lineLength}
\alias{lineLength}
\title{Line length}
\usage{
lineLength(x, byid = FALSE)
}
\arguments{
\item{x}{A spatial object inheriting from \code{SpatialLines}, \code{SpatialPolygons},
or \code{SpatialPoints}.}
\item{byid}{Whether to return lengths of individual spatial objects (TRUE)
or the sum of all length (FALSE).}
}
\value{
If \code{byid==TRUE}, a vector containing the lengths of individual
spatial objects
(the points, lines, or polygons) is returned. If \code{byid=FALSE},
the total length of all spatial objects is returned (a single number).
If \code{x} inherits from \code{SpatialPoints}, returned
value is 0. If \code{x} inherits from \code{SpatialLines}, returned
value contains line lengths or the sum of line lengths in \code{x}.
If \code{x} inherits from \code{SpatialPolygons}, returned
value contains lengths of the perimeter of all polygons, or
the sum of perimeters, in \code{x}. When \code{x} contains polygons with
holes, the perimeter of the holes is included (i.e., perimeter of holes
is positive, not negative).
Units of the returned value are same as units of coordinates
in \code{x}. E.g.,
meters if coordinates in \code{x} are UTM meters,
decimal degrees if coordinates in \code{x} are lat-long decimal
degrees.
}
\description{
An all-R routine that computes total length of all
lines in a \code{SpatialLines*} object.
}
\details{
Provides the same answer as \code{rgeos::gLength}, but is
all-R (does not require rgeos Java library) and does not
fire a warning if \code{x} is un-projected (i.e., lat-long).
}
\examples{
# Length of Hawaii coastline, in kilometers
l <- lineLength( HI.coast ) / 1000
}
\seealso{
\code{sp::SpatialLines-class}
}
\author{
Trent McDonald
}
|
f7515a79f77d5c8425a0f1e0eef9bd37d1d57bd6
|
92427118c4b0dd55689ce8e6cb57555743e54c6b
|
/tools/dockerfiles/scripts/sc_tools/sc_rna_cluster.R
|
1fc5c5846e3f4f61b0d73e4aba6b132c21914ee6
|
[] |
no_license
|
Barski-lab/workflows
|
c8edc08020a5fedcd4d49cd16ac6ea3bda8b9c15
|
839de7ba3d04bf73c5884b0e30e0b332fcc07830
|
refs/heads/master
| 2023-08-16T19:43:56.821249
| 2023-08-09T17:28:26
| 2023-08-09T17:28:26
| 152,797,483
| 5
| 11
| null | 2023-07-05T20:09:52
| 2018-10-12T19:17:32
|
Common Workflow Language
|
UTF-8
|
R
| false
| false
| 28,119
|
r
|
sc_rna_cluster.R
|
#!/usr/bin/env Rscript
options(warn=-1)
options("width"=200)
options(error=function(){traceback(3); quit(save="no", status=1, runLast=FALSE)})
suppressMessages(library(dplyr))
suppressMessages(library(Seurat))
suppressMessages(library(Signac))
suppressMessages(library(modules))
suppressMessages(library(forcats))
suppressMessages(library(argparse))
suppressMessages(library(tidyselect))
HERE <- (function() {return (dirname(sub("--file=", "", commandArgs(trailingOnly=FALSE)[grep("--file=", commandArgs(trailingOnly=FALSE))])))})()
suppressMessages(analyses <- modules::use(file.path(HERE, "modules/analyses.R")))
suppressMessages(debug <- modules::use(file.path(HERE, "modules/debug.R")))
suppressMessages(graphics <- modules::use(file.path(HERE, "modules/graphics.R")))
suppressMessages(io <- modules::use(file.path(HERE, "modules/io.R")))
suppressMessages(prod <- modules::use(file.path(HERE, "modules/prod.R")))
suppressMessages(ucsc <- modules::use(file.path(HERE, "modules/ucsc.R")))
export_all_clustering_plots <- function(seurat_data, args){
Idents(seurat_data) <- "new.ident" # safety measure
downsampled_to <- analyses$get_min_ident_size(SplitObject(seurat_data, split.by="new.ident")) # need to split it for consistency
print(paste("Downsampling datasets to", downsampled_to, "cells per sample"))
downsampled_data <- subset(seurat_data, downsample=downsampled_to)
if ("Phase" %in% colnames(seurat_data@meta.data)){
if (length(unique(as.vector(as.character(Idents(seurat_data))))) > 1){
graphics$composition_plot(
data=downsampled_data,
plot_title=paste(
"Composition plot,",
"colored by cell cycle phase,",
"split by dataset, downsampled"
),
legend_title="Phase",
group_by="Phase",
split_by="new.ident",
x_label="Dataset",
y_label="Cells percentage",
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "cmp_gr_ph_spl_idnt", sep="_"),
pdf=args$pdf
)
}
}
for (i in 1:length(args$resolution)){
current_resolution <- args$resolution[i]
graphics$dim_plot(
data=seurat_data,
reduction="rnaumap",
plot_title=paste(
"UMAP, colored by cluster,",
"resolution", current_resolution
),
legend_title="Cluster",
group_by=paste("rna_res", current_resolution, sep="."),
label=TRUE,
label_color="black",
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "umap_res", current_resolution, sep="_"),
pdf=args$pdf
)
graphics$silhouette_plot(
data=seurat_data,
reduction="pca",
dims=args$dimensions,
downsample=500,
plot_title=paste(
"Silhouette scores, resolution",
current_resolution
),
legend_title="Cluster",
group_by=paste("rna_res", current_resolution, sep="."),
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "slh_res", current_resolution, sep="_"),
pdf=args$pdf
)
if (length(unique(as.vector(as.character(Idents(seurat_data))))) > 1){
graphics$dim_plot(
data=seurat_data,
reduction="rnaumap",
plot_title=paste(
"UMAP, colored by cluster,",
"split by dataset,",
"resolution", current_resolution
),
legend_title="Cluster",
group_by=paste("rna_res", current_resolution, sep="."),
split_by="new.ident",
label=TRUE,
label_color="black",
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "umap_spl_idnt_res", current_resolution, sep="_"),
pdf=args$pdf
)
graphics$composition_plot(
data=downsampled_data,
plot_title=paste(
"Composition plot, colored by cluster,",
"split by dataset, downsampled,",
"resolution", current_resolution
),
legend_title="Cluster",
group_by=paste("rna_res", current_resolution, sep="."),
split_by="new.ident",
x_label="Dataset",
y_label="Cells percentage",
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "cmp_gr_clst_spl_idnt_res", current_resolution, sep="_"),
pdf=args$pdf
)
graphics$composition_plot(
data=downsampled_data,
plot_title=paste(
"Composition plot, colored by dataset,",
"split by cluster, downsampled,",
"resolution", current_resolution
),
legend_title="Dataset",
group_by="new.ident",
split_by=paste("rna_res", current_resolution, sep="."),
x_label="Cluster",
y_label="Cells percentage",
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "cmp_gr_idnt_spl_clst_res", current_resolution, sep="_"),
pdf=args$pdf
)
}
if (
all(as.vector(as.character(seurat_data@meta.data$new.ident)) != as.vector(as.character(seurat_data@meta.data$condition))) &&
length(unique(as.vector(as.character(seurat_data@meta.data$condition)))) > 1
){
graphics$dim_plot(
data=seurat_data,
reduction="rnaumap",
plot_title=paste(
"UMAP, colored by cluster,",
"split by grouping condition,",
"resolution", current_resolution
),
legend_title="Cluster",
group_by=paste("rna_res", current_resolution, sep="."),
split_by="condition",
label=TRUE,
label_color="black",
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "umap_spl_cnd_res", current_resolution, sep="_"),
pdf=args$pdf
)
graphics$composition_plot(
data=downsampled_data,
plot_title=paste(
"Composition plot, colored by cluster,",
"split by grouping condition, downsampled,",
"resolution", current_resolution
),
legend_title="Cluster",
group_by=paste("rna_res", current_resolution, sep="."),
split_by="condition",
x_label="Condition",
y_label="Cells percentage",
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "cmp_gr_clst_spl_cnd_res", current_resolution, sep="_"),
pdf=args$pdf
)
graphics$composition_plot(
data=downsampled_data,
plot_title=paste(
"Composition plot,",
"colored by grouping condition,",
"split by cluster, downsampled,",
"resolution", current_resolution
),
legend_title="Condition",
group_by="condition",
split_by=paste("rna_res", current_resolution, sep="."),
x_label="Cluster",
y_label="Cells percentage",
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "cmp_gr_cnd_spl_clst_res", current_resolution, sep="_"),
pdf=args$pdf
)
}
if ("Phase" %in% colnames(seurat_data@meta.data)){
graphics$dim_plot(
data=seurat_data,
reduction="rnaumap",
plot_title=paste(
"UMAP, colored by cluster,",
"split by cell cycle phase,",
"resolution", current_resolution
),
legend_title="Cluster",
group_by=paste("rna_res", current_resolution, sep="."),
split_by="Phase",
label=TRUE,
label_color="black",
alpha=0.5,
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "umap_spl_ph_res", current_resolution, sep="_"),
pdf=args$pdf
)
graphics$composition_plot(
data=downsampled_data,
plot_title=paste(
"Composition plot,",
"colored by cell cycle phase,",
"split by cluster, downsampled,",
"resolution", current_resolution
),
legend_title="Phase",
group_by="Phase",
split_by=paste("rna_res", current_resolution, sep="."),
x_label="Cluster",
y_label="Cells percentage",
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "cmp_gr_ph_spl_clst_res", current_resolution, sep="_"),
pdf=args$pdf
)
}
}
rm(downsampled_data)
gc(verbose=FALSE)
}
export_all_expression_plots <- function(seurat_data, args) {
DefaultAssay(seurat_data) <- "RNA" # safety measure
Idents(seurat_data) <- "new.ident" # safety measure
if (!is.null(args$genes) && length(args$genes) > 0){
for (i in 1:length(args$genes)){
current_gene <- args$genes[i]
graphics$feature_plot(
data=seurat_data,
features=current_gene,
labels=current_gene,
reduction="rnaumap",
plot_title="UMAP, gene expression",
label=FALSE,
order=TRUE,
max_cutoff="q99", # to prevent cells with overexpressed gene from distoring the color bar
combine_guides="keep",
width=800,
height=800,
theme=args$theme,
rootname=paste(args$output, "xpr_per_cell", current_gene, sep="_"),
pdf=args$pdf
)
graphics$expression_density_plot(
data=seurat_data,
features=current_gene,
reduction="rnaumap",
plot_title="UMAP, gene expression density",
joint=FALSE,
width=800,
height=800,
theme=args$theme,
rootname=paste(args$output, "xpr_per_cell_sgnl", current_gene, sep="_"),
pdf=args$pdf
)
}
}
for (i in 1:length(args$resolution)) {
current_resolution <- args$resolution[i]
Idents(seurat_data) <- paste("rna_res", current_resolution, sep=".")
graphics$dot_plot(
data=seurat_data,
features=args$genes,
plot_title=paste(
"Gene expression dot plot,",
"resolution", current_resolution
),
x_label="Genes",
y_label="Clusters",
cluster_idents=FALSE,
theme=args$theme,
rootname=paste(args$output, "xpr_avg_res", current_resolution, sep="_"),
pdf=args$pdf
)
if (!is.null(args$genes) && length(args$genes) > 0){
for (i in 1:length(args$genes)){
current_gene <- args$genes[i]
graphics$vln_plot(
data=seurat_data,
features=current_gene,
labels=current_gene,
plot_title=paste(
"Gene expression violin plot,",
"resolution", current_resolution
),
legend_title="Cluster",
log=TRUE,
pt_size=0,
combine_guides="collect",
width=800,
height=600,
palette_colors=graphics$D40_COLORS,
theme=args$theme,
rootname=paste(args$output, "xpr_dnst_res", current_resolution, current_gene, sep="_"),
pdf=args$pdf
)
}
}
}
Idents(seurat_data) <- "new.ident" # safety measure
}
export_heatmaps <- function(seurat_data, markers, args){
DefaultAssay(seurat_data) <- "RNA" # safety measure
Idents(seurat_data) <- "new.ident" # safety measure
for (i in 1:length(args$resolution)) {
current_resolution <- args$resolution[i]
grouped_markers <- markers %>%
dplyr::filter(.$resolution==current_resolution) %>% # shouldn't fail even if resolution is not present
dplyr::select(-c("resolution")) %>%
dplyr::filter(.$p_val_adj <= 0.05) %>% # to have only significant gene markers
dplyr::filter(.$pct.1 >= 0.1) %>% # to have at least 10% of cells expressing this gene
dplyr::group_by(cluster) %>%
dplyr::top_frac(n=-0.25, wt=p_val_adj) %>% # get 25% of the most significant gene markers
dplyr::top_n(
n=10, # 10 genes per cluster
# n=tidyselect::all_of(floor(600/length(unique(markers$cluster)))),
wt=avg_log2FC
)
if (nrow(grouped_markers) > 0){ # in case we don't have any markers for specific resolution
column_annotations <- c(paste("rna_res", current_resolution, sep="."))
if (length(unique(as.vector(as.character(seurat_data@meta.data$new.ident)))) > 1){
column_annotations <- c(column_annotations, "new.ident") # several datasets found
}
if (
all(as.vector(as.character(seurat_data@meta.data$new.ident)) != as.vector(as.character(seurat_data@meta.data$condition))) &&
length(unique(as.vector(as.character(seurat_data@meta.data$condition)))) > 1
){
column_annotations <- c(column_annotations, "condition") # several conditions found
}
custom_fields <- grep("^custom_", colnames(seurat_data@meta.data), value=TRUE, ignore.case=TRUE)
if (length(custom_fields) > 0){
column_annotations <- c(column_annotations, custom_fields) # adding all custom fields
}
graphics$feature_heatmap( # install.packages("magick") for better rasterization
data=seurat_data,
assay="RNA",
slot="data",
features=grouped_markers$feature,
split_rows=forcats::fct_inorder(as.character(grouped_markers$cluster)), # fct_inorder fails with numeric
show_rownames=TRUE,
scale_to_max=TRUE,
group_by=unique(column_annotations), # to not show duplicates
palette_colors=graphics$D40_COLORS,
heatmap_colors=c("black", "yellow"),
plot_title=paste(
"Gene expression heatmap, resolution", current_resolution
),
height=13*length(grouped_markers$feature),
rootname=paste(args$output, "xpr_htmp_res", current_resolution, sep="_"),
pdf=args$pdf
)
}
}
Idents(seurat_data) <- "new.ident" # safety measure
}
get_args <- function(){
parser <- ArgumentParser(description="Single-cell RNA-Seq Cluster Analysis")
parser$add_argument(
"--query",
help=paste(
"Path to the RDS file to load Seurat object from. This file should include genes",
"expression information stored in the RNA assay, as well as 'pca' and 'rnaumap'",
"dimensionality reductions applied to that assay."
),
type="character", required="True"
)
parser$add_argument(
"--dimensions",
help=paste(
"Dimensionality to use when constructing nearest-neighbor graph before clustering",
"(from 1 to 50). If single value N is provided, use from 1 to N dimensions. If",
"multiple values are provided, subset to only selected dimensions.",
"Default: from 1 to 10"
),
type="integer", default=10, nargs="*"
)
parser$add_argument(
"--ametric",
help=paste(
"Distance metric used when constructing nearest-neighbor graph before clustering.",
"Default: euclidean"
),
type="character", default="euclidean",
choices=c(
"euclidean", "cosine", "manhattan", "hamming"
)
)
parser$add_argument(
"--algorithm",
help=paste(
"Algorithm for modularity optimization when running clustering.",
"Default: louvain"
),
type="character", default="louvain",
choices=c(
"louvain", "mult-louvain", "slm", "leiden"
)
)
parser$add_argument(
"--resolution",
help=paste(
"Clustering resolution applied to the constructed nearest-neighbor graph.",
"Can be set as an array but only the first item from the list will be used",
"for cluster labels and gene markers in the UCSC Cell Browser when running",
"with --cbbuild and --diffgenes parameters.",
"Default: 0.3, 0.5, 1.0"
),
type="double", default=c(0.3, 0.5, 1.0), nargs="*"
)
parser$add_argument(
"--genes",
help=paste(
"Genes of interest to build genes expression plots.",
"Default: None"
),
type="character", nargs="*"
)
parser$add_argument(
"--diffgenes",
help=paste(
"Identify differentially expressed genes (putative gene markers) between each",
"pair of clusters for all resolutions.",
"Default: false"
),
action="store_true"
)
parser$add_argument(
"--logfc",
help=paste(
"For putative gene markers identification include only those genes that",
"on average have log fold change difference in expression between every",
"tested pair of clusters not lower than this value. Ignored if '--diffgenes'",
"is not set.",
"Default: 0.25"
),
type="double", default=0.25
)
parser$add_argument(
"--minpct",
help=paste(
"For putative gene markers identification include only those genes that",
"are detected in not lower than this fraction of cells in either of the",
"two tested clusters. Ignored if '--diffgenes' is not set.",
"Default: 0.1"
),
type="double", default=0.1
)
parser$add_argument(
"--onlypos",
help=paste(
"For putative gene markers identification return only positive markers.",
"Ignored if '--diffgenes' is not set.",
"Default: false"
),
action="store_true"
)
parser$add_argument(
"--testuse",
help=paste(
"Statistical test to use for putative gene markers identification.",
"Ignored if '--diffgenes' is not set.",
"Default: wilcox"
),
type="character", default="wilcox",
choices=c("wilcox", "bimod", "roc", "t", "negbinom", "poisson", "LR", "MAST", "DESeq2")
)
parser$add_argument(
"--pdf",
help="Export plots in PDF. Default: false",
action="store_true"
)
parser$add_argument(
"--verbose",
help="Print debug information. Default: false",
action="store_true"
)
parser$add_argument(
"--h5seurat",
help="Save Seurat data to h5seurat file. Default: false",
action="store_true"
)
parser$add_argument(
"--h5ad",
help="Save Seurat data to h5ad file. Default: false",
action="store_true"
)
parser$add_argument(
"--cbbuild",
help="Export results to UCSC Cell Browser. Default: false",
action="store_true"
)
parser$add_argument(
"--scope",
help=paste(
"Save Seurat data to SCope compatible loom file.",
"Default: false"
),
action="store_true"
)
parser$add_argument(
"--output",
help="Output prefix. Default: ./sc",
type="character", default="./sc"
)
parser$add_argument(
"--theme",
help=paste(
"Color theme for all generated plots.",
"Default: classic"
),
type="character", default="classic",
choices=c("gray", "bw", "linedraw", "light", "dark", "minimal", "classic", "void")
)
parser$add_argument(
"--cpus",
help="Number of cores/cpus to use. Default: 1",
type="integer", default=1
)
parser$add_argument(
"--memory",
help=paste(
"Maximum memory in GB allowed to be shared between the workers",
"when using multiple --cpus.",
"Default: 32"
),
type="integer", default=32
)
args <- parser$parse_args(commandArgs(trailingOnly = TRUE))
return (args)
}
args <- get_args()
print("Input parameters")
print(args)
if (length(args$dimensions) == 1) {
print("Adjusting --dimensions parameter as only a single value was provided")
args$dimensions <- c(1:args$dimensions[1])
print(paste("--dimensions was adjusted to", paste(args$dimensions, collapse=", ")))
}
print(
paste(
"Setting parallelization to", args$cpus, "cores, and", args$memory,
"GB of memory allowed to be shared between the processes"
)
)
prod$parallel(args)
print(paste("Loading Seurat data from", args$query))
seurat_data <- readRDS(args$query)
print("Setting default assay to RNA")
DefaultAssay(seurat_data) <- "RNA"
debug$print_info(seurat_data, args)
if (!all(c("pca", "rnaumap") %in% names(seurat_data@reductions))){
print("Loaded Seurat object doesn't have 'pca' and/or 'rnaumap' reduction(s). Exiting.")
quit(save="no", status=1, runLast=FALSE)
}
print(paste("Clustering RNA data using", paste(args$dimensions, collapse=", "), "principal components"))
seurat_data <- analyses$add_clusters(
seurat_data=seurat_data,
assay="RNA",
graph_name="rna", # will be used in all the plot generating functions
reduction="pca",
args=args
)
debug$print_info(seurat_data, args)
export_all_clustering_plots(
seurat_data=seurat_data,
args=args
)
if (!is.null(args$genes)){
print("Adjusting genes of interest to include only those that are present in the loaded Seurat object")
args$genes <- unique(args$genes)
args$genes <- args$genes[args$genes %in% as.vector(as.character(rownames(seurat_data)))] # with RNA assay set as default the rownames should be genes
print(args$genes)
}
all_markers <- NULL
if (!is.null(args$genes) || args$diffgenes) {
print("Normalizing counts in RNA assay before evaluating genes expression or identifying putative gene markers")
DefaultAssay(seurat_data) <- "RNA"
seurat_data <- NormalizeData(seurat_data, verbose=FALSE)
if (!is.null(args$genes)){
print("Generating genes expression plots")
export_all_expression_plots(seurat_data=seurat_data, args=args)
}
if(args$diffgenes){
print("Identifying differentially expressed genes between each pair of clusters for all resolutions")
all_markers <- analyses$get_markers_by_res( # either NULL or definitely not empty
seurat_data=seurat_data,
assay="RNA",
resolution_prefix="rna_res",
args=args
)
if (!is.null(all_markers)){
io$export_data(
all_markers,
paste(args$output, "_gene_markers.tsv", sep="")
)
export_heatmaps(
seurat_data=seurat_data,
markers=all_markers,
args=args
)
}
}
}
if(args$cbbuild){
print("Exporting RNA assay to UCSC Cellbrowser")
if(!is.null(all_markers)){
all_markers <- all_markers %>%
dplyr::filter(.$resolution==args$resolution[1]) %>% # won't fail even if resolution is not present
dplyr::select(-c("resolution"))
}
print("Reordering reductions to have rnaumap on the first place") # will be shown first in UCSC Cellbrowser
reduc_names <- names(seurat_data@reductions)
ordered_reduc_names <- c("rnaumap", reduc_names[reduc_names!="rnaumap"]) # we checked before that rnaumap is present
seurat_data@reductions <- seurat_data@reductions[ordered_reduc_names]
debug$print_info(seurat_data, args)
ucsc$export_cellbrowser(
seurat_data=seurat_data,
assay="RNA",
slot="counts",
short_label="RNA",
markers=all_markers, # can be NULL
palette_colors=graphics$D40_COLORS, # to have colors correspond to the plots
label_field=paste0("Clustering (rna ", args$resolution[1], ")"), # always use only the first resolution
rootname=paste(args$output, "_cellbrowser", sep="")
)
}
DefaultAssay(seurat_data) <- "RNA" # better to stick to RNA assay by default https://www.biostars.org/p/395951/#395954
print("Exporting results to RDS file")
io$export_rds(seurat_data, paste(args$output, "_data.rds", sep=""))
if(args$h5seurat){
print("Exporting results to h5seurat file")
io$export_h5seurat(seurat_data, paste(args$output, "_data.h5seurat", sep=""))
}
if(args$h5ad){
print("Exporting results to h5ad file")
io$export_h5ad(seurat_data, paste(args$output, "_data.h5ad", sep=""))
}
if(args$scope){
print("Exporting results to SCope compatible loom file")
io$export_scope_loom( # we save only counts slot from the RNA assay
seurat_data,
paste(args$output, "_data.loom", sep="")
)
}
|
b452902ede75d4c4a9e2968ef63637da4b90adcc
|
eb62b4e11c2fabee75b4dcfbe5ab2e11a64450b9
|
/man/get_variant_set.Rd
|
b87431f87be33f4cfd52a94addf32e05f5ca5817
|
[] |
no_license
|
cran/Rga4gh
|
af195a0de5a16298c01d9c0692715ec1e582bf5b
|
76cd4974e4a1d9fc6dcebe3c46c27f4f164b75e6
|
refs/heads/master
| 2020-12-24T10:57:01.533892
| 2016-11-07T21:07:40
| 2016-11-07T21:07:40
| 73,116,702
| 0
| 1
| null | 2017-02-08T20:45:22
| 2016-11-07T20:12:44
|
R
|
UTF-8
|
R
| false
| true
| 599
|
rd
|
get_variant_set.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.R
\name{get_variant_set}
\alias{get_variant_set}
\title{GET a Variant Set}
\usage{
get_variant_set(client, variant_set_id)
}
\arguments{
\item{client}{A ga4gh_client object}
\item{variant_set_id}{The ID of the Variant Set}
}
\description{
GET a Variant Set
}
\examples{
## Create a client
ref_client <- ga4gh_client("http://1kgenomes.ga4gh.org", api_location = "")
\dontrun{
library(magrittr)
## Retrieve the object with id 'id'
variant_set <- ref_client \%>\% get_variant_set("id")
}
}
|
77ef98fdf4c2f87ece177ebc4507cc0aef08b928
|
67f31a9f56d85ede80920358fe40462c2cb710ed
|
/man/ctrlP.Rd
|
d5261e134412bd5a31436e68fb80821abe0485de
|
[] |
no_license
|
vh-d/VHtools
|
ff95b01424c210b3451f4ee63d5aaa016e553c2e
|
a7907e8ba370523ca92985fb73f734a3284896b8
|
refs/heads/master
| 2020-04-12T06:25:18.169942
| 2019-04-09T20:09:34
| 2019-04-09T20:09:34
| 60,918,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 526
|
rd
|
ctrlP.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{ctrlP}
\alias{ctrlP}
\title{read from clipboard for easy copy-paste from spreasdsheet (Excel)
\code{ctrlP} reads clipboard (copied from excel or other spreasheet) and returns a \code{data.frame}.}
\usage{
ctrlP(dec = ",", sep = "\\t", rn = NULL, ...)
}
\description{
read from clipboard for easy copy-paste from spreasdsheet (Excel)
\code{ctrlP} reads clipboard (copied from excel or other spreasheet) and returns a \code{data.frame}.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.