blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3c7e6c0554cfd32abe542bf1aa55457967ba9966
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/coxinterval/examples/linapprox.Rd.R
|
6a8517725072e47b55b87fa3c93b26499b3d76ca
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 392
|
r
|
linapprox.Rd.R
|
library(coxinterval)
### Name: linapprox
### Title: Linear approximation
### Aliases: linapprox
### ** Examples
fit <- coxdual(Surv(start, stop, status) ~ cluster(id)
+ trans(from, to) + z, data = dualrc, init.coxph = TRUE)
head(basehaz)
Haz01 <- with(fit, split(basehaz[, 1:2], basehaz[, 3]))[[1]]
all(Haz01$hazard == with(Haz01, linapprox(cbind(time, hazard), time)))
|
75a63f18bd261827bc7db52a837c51a2e54b3439
|
17cb64908b89c854c304e10d5a88e621728d9f37
|
/R/initialize_module.R
|
d6d3ab9fa0c4d92e9a0198826c3294811536265d
|
[] |
no_license
|
EvoNetHIV/Herbeck-et-al-Vaccine-2018
|
494387deba8f63902a03b4ba4de68eddce540c63
|
bf0ee209f8924c9ad4865a2dadfe11f7b256f845
|
refs/heads/master
| 2022-12-09T23:42:17.631572
| 2020-09-01T19:07:32
| 2020-09-01T19:07:32
| 69,389,080
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,055
|
r
|
initialize_module.R
|
#' @export
initialize_module <- function(x, param, init, control, s)
{
#Description:
# runs EpiModel function EpiModel::initialize.net()
# fills in viral load values for initial infected with initialize_infecteds_vl()
# fills in cd4 variable values for initial infected (initial CD4 value based on SPVL)
# with initialize_infecteds_cd4()
# does a few bookkeeping steps with initialize_evonet_misc()
# updates vl of initial infecteds (otw, vl jumps from initial value to spvl in one
# timestep) with viral_update_module_gamma()
# creates and inserts initial values for "popsumm" stats (calculated at end of
# each timestep_ with initialize_popsumm_dynamics()
#sets up basic EpiModel structure
dat <- EpiModel::initialize.net(x, param, init, control,s)
# need to ensure that sex attribute as been copied to dat$attr from the network.
# ideally for consistency we'd like to have all of the attributes
# included on the dat$attr list. However, when in network mode, only the
# attributes included in the formula (usually 'role') will be coppied
# so need to force copy it here until we figure a better system.
dat$attr$status_evo <- rep(0,length(dat$attr$status))
dat$attr$status_evo[which(dat$attr$status=="i" )] <- 1
if(!is.null(dat[['nw']])){
dat$attr$sex <- get.vertex.attribute(dat$nw,'sex')
dat$attr$age <- get.vertex.attribute(dat$nw,'age')
dat$attr$id <- get.vertex.attribute(dat$nw,'id')
if(!is.logical(dat$param$generic_nodal_att_values))
dat$attr$att1 <- get.vertex.attribute(dat$nw,'att1')
}
# likewise, if there is going to be roles in the model, need to ensure they are copied in
if(dat$param$model_sex=="msm" &&
(is.null(dat$attr[['role']]) & !is.null(dat[['nw']]))){
dat$attr$role <- get.vertex.attribute(dat$nw,'role')
}
#if(is.null(dat$attr[['role']]) & !is.null(dat[['nw']])){
# dat$attr$role <- get.vertex.attribute(dat$nw,'role')
#}
#sets up agent attributes and initial values
dat <- initialize_agents(dat, 1)
#fills in vl variable values for initial infecteds
dat <- initialize_infecteds_vl(dat,1)
#fills in cd4 variable values for initial infecteds
#note, this must come after initialize_infecteds_vl because initial cd4 based on spvl
dat <- initialize_infecteds_cd4(dat,1)
#does a few random bookkeeping steps
dat <- initialize_evonet_misc(dat)
#updates vl of initial infecteds (otw, jumps from initial value to spvl)
#but shouldn't happen for aim3 runs
if(param$VL_Function != "aim3"){
dat <- viral_update_gamma(dat,1)
dat <- viral_update_cd4_intial_pop(dat)
}
#create list, if "save_vl_list" = TRUE to save
#individual agent's vl/cd4 values for each timestep
dat<- summary_vl_list(dat,1)
#creates and fills in initial values for "popsumm" stats (stats calculated at
#end of each timestep)
dat <- summary_popsumm(dat,1)
#keep track of current simulation/replicate
dat$simulation <- s
return(dat)
}
|
d6246fcaa98b7b59e4ab0f7741255be4aa4b010f
|
a99e5fbcc03fb66304b1a11a792f71fc7b5b7ab8
|
/R/RcppExports.R
|
522a3743024f98c47fa2a9ad07b51a79f84862f2
|
[] |
no_license
|
vh-d/RPortfolioSimilarity
|
0bb8dcc9ecb5c00c46d76c52c438c9f079f35fd3
|
ffcacb3ab54298bcf1a70eefe507936397bf90f1
|
refs/heads/master
| 2021-01-17T07:19:10.112152
| 2019-10-16T19:17:26
| 2019-10-16T19:17:26
| 21,853,502
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,839
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Generate all possible pairs from a vector or a sequence of integer numbers
#'
#' @rdname allPairs
#' @param n maximum number of the sequence
#' @return \code{allPairsRcpp} returns (n*(n-1)/2) x 2 integer matrix all unique combinations (pairs) of numbers from a sequence of integers 1..n
allPairsRcpp <- function(n) {
.Call('_RPortfolioSimilarity_allPairsRcpp', PACKAGE = 'RPortfolioSimilarity', n)
}
#' Generate random variation of a matrix keeping rows and cols sums. Deprecated, use restRandMat().
#'
#'@param original_mat the original matrix that will be replicated
#'@param max_iter maximum number of iterations when coverging to the rows and cols sums
#'@return numeric matrix with the same dimensions and (almost) rows' and columns' sums as the input matrix
restRandMatUnif <- function(original_mat, max_iter) {
.Call('_RPortfolioSimilarity_restRandMatUnif', PACKAGE = 'RPortfolioSimilarity', original_mat, max_iter)
}
#' Generate random variation of a matrix keeping rows and cols sums. Deprecated, use restRandMat().
#'
#'@param original_mat the original matrix that will be replicated
#'@param max_iter maximum number of iterations when coverging to the rows and cols sums
#'@param meanl parameter of the log-normal distribution
#'@param sdl parameter of the log-normal distribution
#'@return numeric matrix with the same dimensions and (almost) rows' and columns' sums as the input matrix
restRandMatLNorm <- function(original_mat, max_iter, meanl = 0, sdl = 1) {
.Call('_RPortfolioSimilarity_restRandMatLNorm', PACKAGE = 'RPortfolioSimilarity', original_mat, max_iter, meanl, sdl)
}
#' Generate random variation of a matrix keeping rows and cols sums. Deprecated, use restRandMat().
#'
#'@param original_mat the original matrix that will be replicated
#'@param max_iter maximum number of iterations when coverging to the rows and cols sums
#'@param shape parameter of the gamma distribution
#'@param rate parameter of the gamma distribution
#'@return numeric matrix with the same dimensions and (almost) rows' and columns' sums as the input matrix
restRandMatGamma <- function(original_mat, max_iter, shape, rate = 1.0) {
.Call('_RPortfolioSimilarity_restRandMatGamma', PACKAGE = 'RPortfolioSimilarity', original_mat, max_iter, shape, rate)
}
#' Generate random variation of a matrix keeping rows and cols sums
#'
#'@param original_mat the original matrix that will be replicated
#'@param max_iter maximum number of iterations when coverging to the rows and cols sums
#'@return numeric matrix with the same dimensions and (almost) rows' and columns' sums as the input matrix
restRandMat <- function(original_mat, max_iter, type, par1 = 1.0, par2 = 1.0) {
.Call('_RPortfolioSimilarity_restRandMat', PACKAGE = 'RPortfolioSimilarity', original_mat, max_iter, type, par1, par2)
}
#' Compute cosine similarity of two numeric vectors
#'
#'@param a numeric vector
#'@param b numeric vector
#'@details
#'\code{a} and \code{b} are expected to have the same length.
#'\deqn{\sum(a*b)/\sqrt(\sum(a^2)\sum(b^2))}
#'@return Numeric value.
vCosSimilarity <- function(a, b) {
.Call('_RPortfolioSimilarity_vCosSimilarity', PACKAGE = 'RPortfolioSimilarity', a, b)
}
#' Compute weighted cosine similarity of two numeric vectors
#'
#'@param a numeric vector
#'@param b numeric vector
#'@param w numeric vector of weights
#'@details
#'\code{a}, \code{b} and \code{c} are expected to have the same length.
#'\deqn{\sum(a*b*w)/\sqrt\sum(w*a^2)\sqrt\sum(w*b^2)}
#'@return Numeric value of cosine similarity
wtVCosSimilarity <- function(a, b, w) {
.Call('_RPortfolioSimilarity_wtVCosSimilarity', PACKAGE = 'RPortfolioSimilarity', a, b, w)
}
#' Compute soft weighted cosine similarity of two numeric vectors
#'
#'@param a numeric vector
#'@param b numeric vector
#'@param w numeric matrix of weights
#'
#'@details
#'\code{a}, \code{b} and \code{c} are expected to have the same length.
#'Soft cosine similarity allows accounting for (cor)relations between features.
#'@return Numeric value of cosine similarity
sftVCosSimilarity <- function(a, b, weights) {
.Call('_RPortfolioSimilarity_sftVCosSimilarity', PACKAGE = 'RPortfolioSimilarity', a, b, weights)
}
#' Compute cosine similarity for every pair of rows from given matrix
#'
#'@param input_mat numeric input matrix
#'@return Upper triangle matrix where \{i, j\} element is the cosine similarity of i-th and j-th row of the original matrix.
#'@seealso \code{\link{vCosSimilarity}}
mCosSimilarity <- function(input_mat) {
.Call('_RPortfolioSimilarity_mCosSimilarity', PACKAGE = 'RPortfolioSimilarity', input_mat)
}
#' Compute weighted cosine similarities for each pair of rows from given matrix and given weights
#'
#'@param input_mat numeric input matrix
#'@param weights numeric vector of weights
#'@details
#'\link{wtVCosSimilarity}
#'@return Upper triangle matrix where \{i, j\} element is the cosine similarity of i-th and j-th row of the original matrix.
#'@seealso \code{\link{wtVCosSimilarity}}
wtMCosSimilarity <- function(input_mat, weights) {
.Call('_RPortfolioSimilarity_wtMCosSimilarity', PACKAGE = 'RPortfolioSimilarity', input_mat, weights)
}
#' Compute soft cosine similarities for each pair of rows from given matrix and given weights
#'
#'@param input_mat numeric input matrix
#'@param weights numeric vector of weights
#'@details
#'Soft cosine similarity allows accounting for (cor)relations between features.
#'@return Upper triangle matrix where \{i, j\} element is the cosine similarity of i-th and j-th row of the original matrix.
sftMCosSimilarity <- function(input_mat, weights) {
.Call('_RPortfolioSimilarity_sftMCosSimilarity', PACKAGE = 'RPortfolioSimilarity', input_mat, weights)
}
|
a0ff535e047e188d455747ae52e3526e04b4ad71
|
b4d7a52769de93daae1ac56808a17716364be9fe
|
/run_analysis.R
|
356d3467dad1f986e98d86ae9d607807a73c07e7
|
[] |
no_license
|
zacariasyang/GettingDataCourseProject
|
5ccc1367779e38fda791609485926eb7a26ceb9a
|
b96e711b6c2a632e55952551aed5008b2931bb49
|
refs/heads/master
| 2021-01-10T01:42:57.042765
| 2015-10-25T13:11:35
| 2015-10-25T13:11:35
| 44,910,587
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,914
|
r
|
run_analysis.R
|
# read in datasets
features <- read.table("./UCI HAR Dataset/features.txt", stringsAsFactors = FALSE)
activityLabels <- read.table("./UCI HAR Dataset/activity_labels.txt")
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# combine test and train datasets with activity labels and subject number
x_trainCombined <- cbind(x_train, subject_train, y_train)
x_testCombined <- cbind(x_test, subject_test, y_test)
# combine test and train datasets
x_total <- rbind(x_trainCombined, x_testCombined)
# organize col names of main dataset
colNames <- rbind(features, c(nrow(features) + 1, "subject"), c(nrow(features) + 2, "activity"))
names(x_total) <- colNames[,2]
# extract only mean() and std() related information together with activity label and subject number data
tidyDataset <- x_total[, grep("std\\(|mean\\(|subject|activity", names(x_total))]
# make activity descriptive
tidyDataset$activity <- activityLabels[tidyDataset$activity, 2]
# make col names descriptive
names(tidyDataset) <- gsub("^f", "frequency", names(tidyDataset))
names(tidyDataset) <- gsub("^t", "time", names(tidyDataset))
names(tidyDataset) <- gsub("Acc", "Acceleration", names(tidyDataset))
names(tidyDataset) <- gsub("Gyro", "Gyroscope", names(tidyDataset))
names(tidyDataset) <- gsub("Mag", "Magnitude", names(tidyDataset))
# summarise the mean of each variable by each activity and subject
library(dplyr)
summarisedTidyDataset <- tidyDataset %>% group_by(subject, activity) %>% summarise_each(funs(mean))
# generate text file
write.table(summarisedTidyDataset, file = "step5.txt", row.names = FALSE)
|
c7f03bf7101f073cbcb192d31a840bb192948bac
|
42cd821229e6d92091f0c9ce04433fd902a622b9
|
/man/sub-.bytefactor.Rd
|
1f18ec423ccad0d1ef051b3e1da01e8417ed2ff9
|
[
"MIT"
] |
permissive
|
coolbutuseless/smallfactor
|
7df372c36967437651bcc3d8adf3fc9ce93bac3a
|
c3b1e7d8d5c0faf9d7e947962e6d930375b7ad52
|
refs/heads/main
| 2023-07-25T20:10:51.233570
| 2021-09-06T09:37:09
| 2021-09-06T09:37:09
| 400,427,312
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 293
|
rd
|
sub-.bytefactor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bytefactor.R
\name{[.bytefactor}
\alias{[.bytefactor}
\title{Subset a bytefactor}
\usage{
\method{[}{bytefactor}(x, ...)
}
\arguments{
\item{x}{bytefactor}
\item{...}{ignored}
}
\description{
Subset a bytefactor
}
|
75fb6e57f1ff075d8bc11a17a0f991be3aa8ce4b
|
5aab0c7c5a35775f6278c1e307886df103c74e91
|
/data/Roptimize/basic-operations.R
|
ab410dacf9f53c1f94616de833025bf081edd492
|
[] |
no_license
|
ims-fhs/badhacker
|
070bdf5c8d7e8fc50d2c05812a85683abeb411aa
|
54cc42204aadcb239e6b39e265a9cc2a48ab722d
|
refs/heads/master
| 2022-03-13T14:12:53.800997
| 2022-03-03T08:30:00
| 2022-03-03T08:30:00
| 117,236,946
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,227
|
r
|
basic-operations.R
|
#' Move all vehicles from one base to another address
#'
#' Vehicles at old address are set to 00:00--00:00 (not available)
#'
#' @param old_community A character, the name of the old community
#' @param old_address A character, the address of the old community
#' @param new_community A character, the name of the new community
#' @param new_address A character, the address of the new community
#' @param sim_vehicles sim_vehicles
#' @param new_bases A data.frame
#'
#' @return
move_base <- function(old_community, old_address,
new_community, new_address,
sim_vehicles, new_bases) {
# Move location of base or delete a base
cond1 <- old_community == sim_vehicles$community
cond2 <- old_address == sim_vehicles$address
assertthat::assert_that(all(cond1 == cond2))
cond3 <- new_community == new_bases$community
cond4 <- new_address == new_bases$address
assertthat::assert_that(all(cond3 == cond4))
# build vehicles at new base
cond_old_community <- sim_vehicles$community == old_community &
sim_vehicles$address == old_address
vehicles_temp <- sim_vehicles[cond_old_community, ]
old_ids <- vehicles_temp$id
nr_vehicles_temp <- nrow(vehicles_temp)
max_id <- max(sim_vehicles$id)
temp_id <- c((max_id + 1):(max_id + nr_vehicles_temp))
# Changes for new vehicles
vehicles_temp$id <- temp_id
vehicles_temp$community <- new_community
vehicles_temp$address <- new_address
# rbind old and new vehicles
sim_vehicles <- rbind(sim_vehicles, vehicles_temp)
cond_old <- sim_vehicles$id %in% old_ids
sim_vehicles$name[cond_old] <- paste0("X ", sim_vehicles$name[cond_old])
sim_vehicles$schedule[cond_old] <- paste0(
stringr::str_split_fixed(string = sim_vehicles$schedule[cond_old],
pattern = "\\|",
n = 2)[, 1],
"|00:00--00:00")
sim_vehicles$shift_from_simtime <- 0L
sim_vehicles$shift_to_simtime <- 0L
# new base coordinates
cond_new_community <- new_bases$community == new_community &
new_bases$address == new_address
assertthat::assert_that(sum(cond_new_community) == 1)
new_base_lat <- new_bases$lat[cond_new_community]
new_base_lng <- new_bases$lng[cond_new_community]
# replacement of base_lat and lat
sim_vehicles$base_lat[sim_vehicles$id %in% temp_id] <- new_base_lat
sim_vehicles$base_lng[sim_vehicles$id %in% temp_id] <- new_base_lng
sim_vehicles$lat[sim_vehicles$id %in% temp_id] <- new_base_lat
sim_vehicles$lng[sim_vehicles$id %in% temp_id] <- new_base_lng
message(paste0("Move base from ",
paste(old_community, old_address, collapse = ", "),
" to ", paste(new_community, new_address, collapse = ", ")))
return(sim_vehicles)
}
#' Move one vehicle (id) from one address (base) to another (new base) by vehicle_id
#'
#' The correct coordinates are retrieved from look-up in "new_bases".
#' Vehicles at old address are set to 00:00--00:00 (not available)
#'
#' If you want to move a vehicle by name, first search all vehicle ID via
#' `vehicle_ids <- vehicle$id[which(v$name == "My great name")]`
#'
#' @param sim_vehicles sim_vehicles
#' @param vehicle_id An integer, the vehicle_id of the vehicle to be moved
#' @param new_community A character, the new community of the vehicle
#' @param new_address A character, the new address of the vehicle
#' @param new_bases A data.frame, the new bases
#'
#' @return sim_vehicles
move_vehicle_id <- function(sim_vehicles, vehicle_id,
new_community, new_address, new_bases) {
cond1 <- new_community == new_bases$community
cond2 <- new_address == new_bases$address
assertthat::assert_that(all(cond1 == cond2))
assertthat::assert_that(length(vehicle_id) == 1)
cond <- sim_vehicles$id == vehicle_id
message(paste0("Move: ", sim_vehicles$name[cond], ", id ", vehicle_id, ": ",
paste(sim_vehicles[cond, c(5, 9, 10)], collapse = " ")))
max_id <- max(sim_vehicles$id)
vehicle <- sim_vehicles[cond,]
# replace parameters
cond_replace <- new_bases$community == new_community &
new_bases$address == new_address
assertthat::assert_that(sum(cond_replace) == 1)
vehicle$lat <- new_bases$lat[cond_replace]
vehicle$lng <- new_bases$lng[cond_replace]
vehicle$base_lat <- new_bases$lat[cond_replace]
vehicle$base_lng <- new_bases$lng[cond_replace]
vehicle$id <- max_id + 1
vehicle$community <- new_community
vehicle$address <- new_address
vehicle$name <- paste0("M ", vehicle$name)
sim_vehicles$schedule[cond] <- paste0(
stringr::str_split_fixed(string = sim_vehicles$schedule[cond],
pattern = "\\|",
n = 2)[, 1],
"|00:00--00:00")
sim_vehicles$shift_from_simtime <- 0L
sim_vehicles$shift_to_simtime <- 0L
sim_vehicles$name[cond] <- paste0("X ", sim_vehicles$name[cond])
# rbind
sim_vehicles <- rbind(sim_vehicles, vehicle)
message(paste0("To: ", vehicle$address, " in ", vehicle$community))
return(sim_vehicles)
}
#' Add new vehicle name to sim_vehicles
#'
#' A "N" for New is added to the name as prefix. new_bases needed to look up
#' coordinates of the (new) base.
#' If the new_schedule corresponds to a night shift, the vehicle ID is duplicate
#' automatically.
#'
#' @param sim_vehicles the sim_vehicles
#' @param origin_date A POSIXct, the origin date
#' @param new_name A character, the new name of the team
#' @param new_organisation A character, the new organisation
#' @param new_type A character, the new type, e.g. RTW, NEF, ...
#' @param new_schedule A character, the new teams' schedule
#' @param new_wdays A character, the weekdays for the new vehicle in the form
#' "1,2,3,4,5,6,7" where 1 = Monday, 7 = Sunday.
#' @param new_community A character, the new community
#' @param new_zipcode A character, the zip code of the community
#' @param new_address A character, the new address
#' @param new_bases A data.frame, the lookup-table. Either an existing one or
#' a list of new bases.
#'
#' @return sim_vehicles
add_vehicle_name <- function(sim_vehicles, origin_date, new_name, new_organisation,
new_type, new_schedule, new_wdays, new_community,
new_zipcode, new_address, new_bases) {
dups_ind <- which(sim_vehicles$name == new_name |
sim_vehicles$name == paste0("N ", new_name))
if (any(dups_ind)) {
warning("New sim_vehicle name already exists")
message("Old names show ", length(sim_vehicles$name[dups_ind]),
" duplicated names ", new_name)
message(paste0("Old: \"", sim_vehicles$name[dups_ind[1]], "\", id ",
sim_vehicles$id[dups_ind[1]], ": ",
paste(sim_vehicles[dups_ind[1], c(5, 9, 10, 8, 6)],
collapse = " ")))
}
new_vehicle <- sim_vehicles[FALSE, ]
new_vehicle[1, ] <- 0
new_vehicle$id <- 1 # set in remove_24h_crossings_by_id_duplication() below
new_vehicle$name <- paste0("N ", new_name)
new_vehicle$organisation <- new_organisation
new_vehicle$type <- new_type
new_vehicle$schedule <- new_schedule
new_vehicle$shift_weekday <- new_wdays
cond_replace <- new_bases$community == new_community &
new_bases$address == new_address
if (sum(cond_replace) != 1) {
stop("community and address not found in new_bases.")
}
new_vehicle$lat <- new_bases$lat[cond_replace]
new_vehicle$lng <- new_bases$lng[cond_replace]
new_vehicle$base_lat <- new_bases$lat[cond_replace]
new_vehicle$base_lng <- new_bases$lng[cond_replace]
new_vehicle$community <- new_community
new_vehicle$zipcode <- new_zipcode
new_vehicle$address <- new_address
new_vehicle <- data911::remove_24h_crossings_by_id_duplication(
vehicles = new_vehicle)
if (nrow(new_vehicle) == 1) {
new_vehicle$id <- max(sim_vehicles$id) + 1
message(paste0("New: \"", new_vehicle$name, "\", id ", new_vehicle$id, ": ",
paste(new_vehicle[1, c(5, 9, 10, 8, 6)], collapse = " ")))
} else {
new_vehicle$id <- max(sim_vehicles$id) + c(1:2)
message(paste0("New: \"", new_vehicle$name[1], "\", id ", new_vehicle$id[1], ": ",
paste(new_vehicle[1, c(5, 9, 10, 8, 6)], collapse = " ")))
message(paste0("New: \"", new_vehicle$name[2], "\", id ", new_vehicle$id[2], ": ",
paste(new_vehicle[2, c(5, 9, 10, 8, 6)], collapse = " ")))
}
sim_vehicles <- rbind(sim_vehicles, new_vehicle)
sim_vehicles <- data911::update_vehicles(year = as.numeric(format(origin_date, "%Y")),
origin_date = origin_date,
vehicles = sim_vehicles)
return(sim_vehicles)
}
#' Remove one vehicle ID
#'
#' Disabled via schedule and run data911::update_vehicles()
#'
#' @param sim_vehicles sim_vehicles
#' @param vehicle_id An integer, the vehicle ID
#' @param origin_date A POSIXct, the origin date of the secneario
#'
#' @return sim_vehicles
remove_vehicle_id <- function(sim_vehicles, vehicle_id, origin_date) {
assertthat::assert_that(length(vehicle_id) == 1)
cond <- sim_vehicles$id == vehicle_id
message(paste0("Removed: ", sim_vehicles$name[cond], ", id ", vehicle_id, ": ",
paste(sim_vehicles[cond, c(5, 9, 10)], collapse = " ")))
sim_vehicles$schedule[cond] <- paste0(
stringr::str_split_fixed(string = sim_vehicles$schedule[cond],
pattern = "\\|",
n = 2)[, 1],
"|00:00--00:00")
sim_vehicles$shift_from_simtime <- 0L
sim_vehicles$shift_to_simtime <- 0L
sim_vehicles$name[cond] <- paste0("X ", sim_vehicles$name[cond])
return(sim_vehicles)
}
#' Edit schedule of one vehicle ID
#'
#' update via data911::update_vehicles due to changes in "schedule".
#'
#' @param sim_vehicles sim_vehicles
#' @param vehicle_id An integer, the vehicle ID
#' @param origin_date A POSIXct, the origin date of the secneario
#' @param new_schedule A character of the form "Jan-01--Dez-31|07:00--17:00"
#' @param new_wdays A character of the form "1,2,3,4,5,6,7".
#' 1 = Monday, ... 7 = Sunday.
#'
#' @return sim_vehicles
edit_vehicle_schedule <- function(sim_vehicles, vehicle_id, origin_date,
new_schedule = NULL,
new_wdays = NULL) {
assertthat::assert_that(is.null(new_schedule) + is.null(new_wdays) < 2)
assertthat::assert_that(length(vehicle_id) == 1)
assertthat::assert_that(all(as.integer(unlist(
strsplit(x = new_wdays, split = ","))) %in% c(1:7)))
cond <- sim_vehicles$id == vehicle_id
message(paste0("Old: ", sim_vehicles$name[cond], ", id ", vehicle_id, ": ",
paste(sim_vehicles[cond, c(5, 9, 10)], collapse = " ")))
if (!is.null(new_schedule)) {
sim_vehicles$schedule[cond] <- new_schedule
}
if (!is.null(new_wdays)) {
sim_vehicles$shift_weekday[cond] <- new_wdays
}
sim_vehicles <- data911::update_vehicles(as.numeric(format(origin_date, "%Y")),
origin_date, sim_vehicles)
message(paste0("Changed to: ",
paste(sim_vehicles[cond, c(5, 9, 10)], collapse = " ")))
return(sim_vehicles)
}
|
26dcc1648cffe172a75971cc952273f5741034fd
|
ebbe08d58a57ae2e9d308a12df500e1e0ef8d098
|
/wgk/Figures/Fig5_TF_net.R
|
aacc9ee551b8f5924cc6677bd3b99ab7b9abc30c
|
[] |
no_license
|
Drizzle-Zhang/bioinformatics
|
a20b8b01e3c6807a9b6b605394b400daf1a848a3
|
9a24fc1107d42ac4e2bc37b1c866324b766c4a86
|
refs/heads/master
| 2022-02-19T15:57:43.723344
| 2022-02-14T02:32:47
| 2022-02-14T02:32:47
| 171,384,799
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,278
|
r
|
Fig5_TF_net.R
|
################################
## 1. EXP/TF AUC 变化趋势图
################################
library(Seurat)
library(ggplot2)
library(SCENIC)
require("RColorBrewer")
library(maSigPro)
seurat.chon <- readRDS('/home/yzj/JingMA_NEW/res/Harmony/ALL/RDS/seurat_celltype_Chond.Rdata')
seurat.child <- subset(seurat.chon, subset = batch %in% c('C4', 'C6', 'M1', 'M2', 'M3'))
rm(seurat.chon)
# plot single gene
#sample.cells <- sample(colnames(seurat.child),10000)
#sample.seurat.child <- subset(seurat.child,cells=sample.cells)
Harmony2 <- seurat.child@reductions$harmony@cell.embeddings[, 'harmony_2']
mat.gene <- seurat.child@assays$RNA@data
# AUC
regulonAUC <- readRDS(file='/home/yzj/JingMA_NEW/res/SCENIC_main/int/3.4_regulonAUC.Rds')
regulonAUC <- regulonAUC[onlyNonDuplicatedExtended(rownames(regulonAUC)), colnames(mat.gene)]
mat.auc <- as.matrix(regulonAUC@assays@data@listData$AUC)
df.pc.gene <- data.frame(t(rbind(as.matrix(mat.gene), mat.auc)), check.names = F)
df.pc.gene$Harmony2 <- Harmony2
df.pc.gene$celltype <- seurat.child$celltype
df.pc.gene$status <- seurat.child$type
df.pc.gene <- df.pc.gene[order(Harmony2, decreasing = F),]
df.pc.gene$idx <- 1:nrow(df.pc.gene)
colors <- c("#EE9572","#B2DF8A" ,"#A6CEE3","#9999FF")
names(colors) <- c('CSC', 'C0', 'C1', 'C2')
########################
# TF AUC
########################
# vec.TF <- c('SOX8 (158g)','SOX5 (218g)', 'DBP (45g)')
# type='down'
vec.TF <- c('ATF3 (83g)','EGR1 (264g)', 'EGR3 (53g)')
type='up'
for (i in 1:length(vec.TF)) {
TF <- vec.TF[i]
df.plot <- df.pc.gene[, c('idx', 'Harmony2', 'celltype', 'status', TF)]
names(df.plot) <- c('idx', 'Harmony2', 'celltype', 'status', 'TF')
p1 <- ggplot(data = df.plot, aes(x = Harmony2,
linetype = status,
y = TF)) +
geom_point(aes(color = celltype), size = 0.0000001) +
scale_color_manual(labels = c('CSC', 'C0', 'C1', 'C2'),values = colors) +
xlim(-30, 10) +
geom_smooth(color = '#696969',size=0.5) + theme_classic()+
labs(x = '', y = '') +
theme(panel.background=element_rect(fill='transparent', color='black',size = 0.5),
axis.text = element_blank(),axis.ticks = element_blank(),plot.margin = unit(c(0.1,0.1,-0.5,-0.5), "cm"),
legend.position = 'none') +
annotate('text', label = TF, x = 9, y = max(df.plot$TF),
hjust = 1, vjust = 1, size = 2)
if (i == 1) {
p <- p1
} else {
p <- p / p1
}
}
# ggsave(paste('JingMA_NEW/res/compMicrotia/MicrotiavsNormal_inChildren/FIG/lineage_AUC/Fig5A_TF_AUC_',type,'.pdf',sep=''),p,
# height = 6, width = 3.5, units = 'cm')
ggsave(paste('JingMA_NEW/res/compMicrotia/MicrotiavsNormal_inChildren/FIG/lineage_AUC/Fig5B_TF_AUC_',type,'.pdf',sep=''),p,
height = 6, width = 3.5, units = 'cm')
########################
## gene
########################
# vec.TF.exp <- c('SOX8','SOX5', 'DBP')
# type='down'
vec.TF.exp <- c('ATF3','EGR1', 'EGR3')
type='up'
for (i in 1:length(vec.TF.exp)) {
TF <- vec.TF.exp[i]
df.plot <- df.pc.gene[, c('idx', 'Harmony2', 'celltype', 'status', TF)]
names(df.plot) <- c('idx', 'Harmony2', 'celltype', 'status', 'TF')
p1 <- ggplot(data = df.plot, aes(x = Harmony2,
linetype = status,
y = TF)) +
geom_point(aes(color = celltype), size = 0.0000001) + theme_classic()+
scale_color_manual(labels = c('CSC', 'TC', 'C1', 'C2'),values = colors) +
xlim(-30, 10) +
geom_smooth(color = '#696969',size=0.5) +
labs(x = '', y = '') +
theme(panel.background=element_rect(fill='transparent', color='black',size = 0.5),plot.margin = unit(c(0.1,0.1,-0.5,-0.5), "cm"),
axis.text = element_blank(),axis.ticks = element_blank(),legend.position = 'none') +
annotate('text', label = TF, x = 9, y = max(df.plot$TF),
hjust = 1, vjust = 1, size = 2)
if (i == 1) {
p <- p1
} else {
p <- p / p1
}
}
# ggsave(paste('JingMA_NEW/res/compMicrotia/MicrotiavsNormal_inChildren/FIG/lineage_EXP/Fig5A_TF_EXP_',type,'.pdf',sep=''),p,
# height = 6, width = 3.5, units = 'cm')
ggsave(paste('JingMA_NEW/res/compMicrotia/MicrotiavsNormal_inChildren/FIG/lineage_EXP/Fig5B_TF_EXP_',type,'.pdf',sep=''),p,
height = 6, width = 3.5, units = 'cm')
################################
## 2. network图
################################
.libPaths('/home/zy/R/x86_64-pc-linux-gnu-library/4.0')
library(Seurat)
library(patchwork)
library(tidyverse)
library(SCENIC)
library(igraph)
library(ggraph)
library(tidygraph)
path.TF.net <- '/home/disk/drizzle/wgk/TF_net/'
fc.cutoff <- 0.4
path.M123 <- '/home/disk/drizzle/wgk/microtia_chon_child_M1M2M3/'
path.cutoff <- paste0(path.M123, 'cutoff_', fc.cutoff, '/')
file.go.BP <- paste0(path.cutoff, 'GO_BP_all.Rdata')
list.go.BP <- readRDS(file.go.BP)
file.go.MF <- paste0(path.cutoff, 'GO_MF_all.Rdata')
list.go.MF <- readRDS(file.go.MF)
# down
file_CSC_down <- paste0(path.TF.net, 'igraph_CSC_down.RDS')
igraph_down_2 <- readRDS(file_CSC_down)
sel.MF_SCS_down <- c('antioxidant activity',
'S100 protein binding',
'extracellular matrix structural constituent')
sel.BP_SCS_down <- c('extracellular matrix organization',
'cellular response to zinc ion',
'skeletal system development',
'cartilage development')
BP_SCS_down <- list.go.BP[['CSC_Microtia_decrease']]
rownames(BP_SCS_down) <- BP_SCS_down$Description
MF_SCS_down <- list.go.MF[['CSC_Microtia_decrease']]
rownames(MF_SCS_down) <- MF_SCS_down$Description
df_GO_pre <- rbind(MF_SCS_down[sel.MF_SCS_down, c('Description', 'geneID')],
BP_SCS_down[sel.BP_SCS_down, c('Description', 'geneID')])
vec_desc <- c('Oxidoreductase activity',
'S100 protein binding',
'Extracellular matrix organization',
'Cellular response to zinc/copper ion',
'Cartilage development')
vec_genes <- c(paste0(paste(df_GO_pre[1, 'geneID'], collapse = '/'), '/TXN'),
paste(df_GO_pre[2, 'geneID'], collapse = '/'),
paste0(paste(df_GO_pre[3:4, 'geneID'], collapse = '/'), '/COL11A1/'),
paste(df_GO_pre[5, 'geneID'], collapse = '/'),
paste0(paste(df_GO_pre[6:7, 'geneID'], collapse = '/'), '/WWP2/SCRG1/FMOD/CTGF'))
df_GO <- data.frame(Description = vec_desc, geneID = vec_genes)
Carti <- unlist(strsplit(df_GO[df_GO$Description == 'Cartilage development', 'geneID'], '/'))
ECM <- unlist(strsplit(df_GO[df_GO$Description == 'Extracellular matrix organization', 'geneID'], '/'))
Ion <- unlist(strsplit(df_GO[df_GO$Description == 'Cellular response to zinc/copper ion', 'geneID'], '/'))
ROS <- unlist(strsplit(df_GO[df_GO$Description == 'Oxidoreductase activity', 'geneID'], '/'))
S100 <- unlist(strsplit(df_GO[df_GO$Description == 'S100 protein binding', 'geneID'], '/'))
library(RColorBrewer)
mycolor=brewer.pal(10,"Set3")
col=mycolor[c(1,3:6)]
group <- c('Carti','Ion','ECM','ROS','S100')
names(col) <- group
node_type <- V(igraph_down_2)$name
node_group <- rep('7', length(node_type))
node_group[node_type %in% c(Carti)] <- '1'
node_group[node_type %in% c(Ion)] <- '2'
node_group[node_type %in% c(ECM)] <- '3'
node_group[node_type %in% c(ROS)] <- '4'
node_group[node_type %in% c(S100)] <- '5'
node_group[node_type %in% c('SOX5', 'SOX8', 'DBP', 'ARID5B')] <- '6'
V(igraph_down_2)$group <- node_group
# gene expression FC
FCs <- V(igraph_down_2)$FC
names(FCs) <- V(igraph_down_2)$name
FC_TF <- FCs[names(FCs) %in% c('SOX5', 'SOX8', 'DBP', 'ARID5B')]
FC_alpha <- (scale(FC_TF) + 1.5) / 3
FCs[rownames(FC_alpha)] <- FC_alpha
FCs[!(names(FCs) %in% c('SOX5', 'SOX8', 'DBP', 'ARID5B'))] <- 1
V(igraph_down_2)$FC <- as.numeric(FCs)
ggraph_CSC_down <- as_tbl_graph(igraph_down_2)
plot_CSC_down <-
ggraph(ggraph_CSC_down, layout = 'stress') +
geom_edge_link(aes(edge_width=weight, alpha = weight),color="gray",
arrow = arrow(length = unit(2, 'mm')),
end_cap = circle(1, 'mm'),
start_cap = circle(0.3, 'mm')) +
scale_edge_width(range=c(0.5,1)) +
scale_edge_alpha(range=c(0.2,1)) +
scale_size_continuous(range = c(2,10)) +
geom_node_point(aes(size = page_rank, fill = group, alpha = FC),
shape=21, color = 'transparent') +
scale_color_manual(values = c(col,'#4169E1', 'gray')) +
scale_fill_manual(values = c(col,'#4169E1', 'gray')) +
# scale_alpha_manual(values = c(1,1,1,1,1,1,1, 0.1)) +
geom_node_text(aes(filter = (group %in% c(1, 2, 3, 4, 5)),label=name),size=2, repel = T) +
geom_node_text(aes(filter = group == 6,label=name),size=3) +
theme_void() +
theme(legend.position = 'none') +
guides(size = F, edge_width = F, alpha = F)
# ggsave(filename = '/home/yzj/JingMA_NEW/res/compMicrotia/MicrotiavsNormal_inChildren/FIG/Fig5A_TF_CSC_down.pdf',
# plot_CSC_down,height = 10, width = 10, units = 'cm')
ggsave(plot = plot_CSC_down, path = path.TF.net,
filename = 'TF_CSC_down.png',
height = 10, width = 10, units = 'cm')
# color bar
df_plot <- data.frame(FC = scale(FC_TF), TF = rownames(scale(FC_TF)))
df_plot <- rbind(df_plot, data.frame(FC = -1.5, TF = 'DOWN'))
df_plot <- rbind(df_plot, data.frame(FC = 1.5, TF = 'UP'))
df_plot$NUL <- rep('1', nrow(df_plot))
plot_bar <-
ggplot(data = df_plot, aes(x = TF, y = NUL, fill = FC)) +
geom_tile() +
scale_fill_gradient(low = 'transparent', high = '#4169E1', breaks = c(-1.5, 0, 1.5)) +
labs(fill = expression(paste("Scaled FC"['TF']))) +
theme(legend.title = element_text(size = 6, color = "black"),
legend.text = element_text(size = 6, color = "black"))
ggsave(plot = plot_CSC_down, path = path.TF.net,
filename = 'TF_CSC_down_bar.png',
height = 5, width = 5, units = 'cm')
## 查看gene所属类别
show.genes <- intersect(union(union(union(union(Carti,ECM),Ion),ROS),S100),node_type)
for(i in 1:length(show.genes)){
g=show.genes[i]
print(paste('!!!Gene: ',g))
if(g %in% Carti){print('Carti')}
if(g %in% ECM){print('ECM')}
if(g %in% Ion){print('Ion')}
if(g %in% ROS){print('ROS')}
if(g %in% S100){print('S100')}
}
### 画饼图
vec_desc <- c('Oxidoreductase activity',
'S100 protein binding',
'Extracellular matrix organization',
'Cellular response to zinc/copper ion',
'Cartilage development')
group <- c('Carti','Ion','ECM','ROS','S100')
set <- c('ECM','ROS')
pct <- data.frame(group=set,pct=rep(100/length(set),length(set)),ncol = 1)
p<- ggplot(pct,aes(x="",y=pct,fill=group)) +
geom_bar(stat = "identity",color="white",size =0.1) +
scale_fill_manual(values = col[set]) +
coord_polar(theta = "y") +
theme(axis.text.x = element_blank(),axis.title = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank(),panel.background = element_blank())+guides(fill=FALSE)
p
ggsave(paste(paste(set,collapse = '_'),'.pdf',sep=''),p,width = 3,height = 3,units = 'cm')
#####################################################################################
#####################################################################################
# up
file_CSC_up <- paste0(path.TF.net, 'igraph_CSC_up.RDS')
igraph_up_2 <- readRDS(file_CSC_up)
sel.BP_SCS_up <- c('cell cycle arrest',
'negative regulation of cell growth',
'negative regulation of stem cell differentiation',
'response to oxidative stress',
'p38MAPK cascade',
'I-kappaB kinase/NF-kappaB signaling',
'intrinsic apoptotic signaling pathway',
'extrinsic apoptotic signaling pathway',
'response to unfolded protein',
'regulation of RNA stability',
'activation of innate immune response',
'cellular response to tumor necrosis factor',
'regulation of inflammatory response',
'cellular response to interleukin-1')
BP_SCS_up <- list.go.BP[['CSC_Microtia_increase']]
rownames(BP_SCS_up) <- BP_SCS_up$Description
df_GO_pre <- BP_SCS_up[sel.BP_SCS_up, c('Description', 'geneID')]
vec_desc <- c('Reduction of stem cell ability',
'Response to oxidative stress',
'NF-kappaB signaling and p38MAPK cascade',
'Apoptotic signaling pathway',
'Stability of protein and RNA',
'Immune and inflammatory response')
vec_genes <- c(paste(df_GO_pre[1:3, 'geneID'], collapse = '/'),
paste(df_GO_pre[4, 'geneID'], collapse = '/'),
paste(df_GO_pre[5:6, 'geneID'], collapse = '/'),
paste(df_GO_pre[7:8, 'geneID'], collapse = '/'),
paste(df_GO_pre[9:10, 'geneID'], collapse = '/'),
paste(df_GO_pre[11:14, 'geneID'], collapse = '/'))
df_GO <- data.frame(Description = vec_desc, geneID = vec_genes)
Stem <- unlist(strsplit(df_GO[df_GO$Description == 'Reduction of stem cell ability', 'geneID'], '/'))
ROS <- unlist(strsplit(df_GO[df_GO$Description == 'Response to oxidative stress', 'geneID'], '/'))
NFK <- unlist(strsplit(df_GO[df_GO$Description == 'NF-kappaB signaling and p38MAPK cascade', 'geneID'], '/'))
Apop <- unlist(strsplit(df_GO[df_GO$Description == 'Apoptotic signaling pathway', 'geneID'], '/'))
Stab <- unlist(strsplit(df_GO[df_GO$Description == 'Stability of protein and RNA', 'geneID'], '/'))
IL <- unlist(strsplit(df_GO[df_GO$Description == 'Immune and inflammatory response', 'geneID'], '/'))
library(RColorBrewer)
mycolor=brewer.pal(10,"Set3")
col=mycolor[c(1,3:7)]
group <- c('Apop','IL','NFK','Stem','ROS','Stab')
names(col) <- group
node_type <- V(igraph_up_2)$name
node_group <- rep('8', length(node_type))
node_group[node_type %in% c(Apop)] <- '1'
node_group[node_type %in% c(IL)] <- '2'
node_group[node_type %in% c(NFK)] <- '3'
node_group[node_type %in% c(Stem)] <- '4'
node_group[node_type %in% c(ROS)] <- '5'
node_group[node_type %in% c(Stab)] <- '6'
node_group[node_type %in% c('EGR1', 'KLF10', 'JUNB', 'REL', 'EGR3','ATF3', 'HIVEP3', 'IRX2', 'EGR2', 'KLF2', 'BCL3',
'CEBPB', 'CEBPD', 'STAT5A')] <- '7'
V(igraph_up_2)$group <- node_group
# gene expression FC
FCs <- V(igraph_up_2)$FC
names(FCs) <- V(igraph_up_2)$name
FC_TF <- FCs[names(FCs) %in% c('EGR1', 'KLF10', 'JUNB', 'REL', 'EGR3','ATF3', 'HIVEP3', 'IRX2', 'EGR2', 'KLF2', 'BCL3',
'CEBPB', 'CEBPD', 'STAT5A')]
FC_alpha <- (scale(FC_TF) + 1.5) / 3
FCs[rownames(FC_alpha)] <- FC_alpha
FCs[!(names(FCs) %in% c('EGR1', 'KLF10', 'JUNB', 'REL', 'EGR3','ATF3', 'HIVEP3', 'IRX2', 'EGR2', 'KLF2', 'BCL3',
'CEBPB', 'CEBPD', 'STAT5A'))] <- 1
V(igraph_up_2)$FC <- as.numeric(FCs)
ggraph_CSC_up <- as_tbl_graph(igraph_up_2)
plot_CSC_up <-
ggraph(ggraph_CSC_up, layout = 'stress') +
geom_edge_link(aes(edge_width=weight, alpha = weight),color="gray",
arrow = arrow(length = unit(2, 'mm')),
end_cap = circle(1, 'mm'),
start_cap = circle(0.3, 'mm')) +
scale_edge_width(range=c(0.5,1)) +
scale_edge_alpha(range=c(0.2,1)) +
scale_size_continuous(range = c(2,8)) +
geom_node_point(aes(size = page_rank, fill = group, alpha = FC),shape=21, color = 'transparent') +
scale_color_manual(values = c(col,'firebrick3', 'gray')) +
scale_fill_manual(values = c(col,'firebrick3', 'gray')) +
geom_node_text(aes(filter = (group %in% c(1, 2, 3, 4, 5, 6)),label=name),size=2, repel = T) +
geom_node_text(aes(filter = group == 7,label=name),size=3) +
theme_void() + theme(legend.position = 'none')
ggsave(filename = '/home/yzj/JingMA_NEW/res/compMicrotia/MicrotiavsNormal_inChildren/FIG/Fig5B_TF_CSC_up.pdf',
plot_CSC_up,height = 10, width = 10, units = 'cm')
# color bar
df_plot <- data.frame(FC = scale(FC_TF), TF = rownames(scale(FC_TF)))
df_plot <- rbind(df_plot, data.frame(FC = -1.5, TF = 'DOWN'))
df_plot <- rbind(df_plot, data.frame(FC = 1.5, TF = 'UP'))
df_plot$NUL <- rep('1', nrow(df_plot))
plot_bar <-
ggplot(data = df_plot, aes(x = TF, y = NUL, fill = FC)) +
geom_tile() +
scale_fill_gradient(low = 'transparent', high = 'firebrick3', breaks = c(-1.5, 0, 1.5)) +
labs(fill = expression(paste("Scaled FC"['TF']))) +
theme(legend.title = element_text(size = 6, color = "black"),
legend.text = element_text(size = 6, color = "black"))
ggsave(plot = plot_CSC_down, path = path.TF.net,
filename = 'TF_CSC_down_bar.png',
height = 5, width = 5, units = 'cm')
## 查看gene所属类别
show.genes <- intersect(union(union(union(union(union(Apop,IL),NFK),ROS),Stab),Stem),node_type)
for(i in 1:length(show.genes)){
g=show.genes[i]
print(paste('!!!Gene: ',g))
if(g %in% Apop){print('Apop')}
if(g %in% IL){print('IL')}
if(g %in% NFK){print('NFK')}
if(g %in% ROS){print('ROS')}
if(g %in% Stab){print('Stab')}
if(g %in% Stem){print('Stem')}
}
### 画饼图
vec_desc <- c('Reduction of stem cell ability',
'Response to oxidative stress',
'NF-kappaB signaling and p38MAPK cascade',
'Apoptotic signaling pathway',
'Stability of protein and RNA',
'Immune and inflammatory response')
group <- c('Apop','IL','NFK','Stem','ROS','Stab')
set <- c('ECM','ROS')
pct <- data.frame(group=set,pct=rep(100/length(set),length(set)),ncol = 1)
p<- ggplot(pct,aes(x="",y=pct,fill=group)) +
geom_bar(stat = "identity",color="white",size =0.1) +
scale_fill_manual(values = col[set]) +
coord_polar(theta = "y") +
theme(axis.text.x = element_blank(),axis.title = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank(),panel.background = element_blank())+guides(fill=FALSE)
p
ggsave(paste(paste(set,collapse = '_'),'.pdf',sep=''),p,width = 3,height = 3,units = 'cm')
|
07b39023c93d0066add93f43a0d8a3327971b6fe
|
0b5d4f07fb7c6c1b574503877389f1ca4949a23d
|
/R/dex165.R
|
8559c9f23311c007cdfd33f1012e21f0be35bef6
|
[] |
no_license
|
joemckean/mathstat
|
0673c5f97064b8f4ac77d21ce3f10062e98b91f6
|
5e008df47b5549730c5e219b62c2cec55ecf3c3f
|
refs/heads/master
| 2020-04-08T14:34:52.117179
| 2019-04-22T17:58:40
| 2019-04-22T17:58:40
| 159,443,068
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 824
|
r
|
dex165.R
|
#' @title probability mass function for Exersize 1.6.5
#'
#' @description Suppose we have a lot of 100 items, 20 of which are defective.
#' Let X be the number of defective item in a random sample of size 5 drawn
#' without replacement. This function calulates the pmf of X.
#'
#' @examples pmf_matrix <- dex165()
#'
#' @return a matrix containing the table distribution of X.
#' The first row of the table consists of the range of X and the second row
#' the associated probabilities.
#'
#' @references Hogg, R. McKean, J. Craig, A. (2018) Introduction to
#' Mathematical Statistics, 8th Ed. Boston: Pearson.
#'
#' @details pmf for Exercise 1.6.5 part a
#'
#' @export dex165
#'
dex165 <- function() {
x <- 0:5
pmf <- choose(20, x) * choose(80, 5 - x)/choose(100, 5)
tab <- rbind(x, pmf)
return(tab)
}
|
62e8aa751923aa9388922293487b73c7e5ed550c
|
1defdf06f288a7921676a299c0de5a224f9f749e
|
/2-Benchmark/3-ConcateRF-bench.R
|
259d2361eb03cd03427afac5e6ed9cb67d7c7668
|
[] |
no_license
|
yangziyi1990/MSPL
|
4ac28d6a94b5cfb4a6e4185dbbd6a5ee8cc8153f
|
41f1224ec450875c0f9736f74f19dff8e3ca2c56
|
refs/heads/master
| 2020-06-24T10:01:15.041955
| 2019-12-27T02:27:56
| 2019-12-27T02:27:56
| 198,934,601
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,762
|
r
|
3-ConcateRF-bench.R
|
##----------------------------------
## Code Version 1.0
## This real data code for Random Forest embeded in the concatenation-based framework.
## Concate_RF
## Created by Zi-Yi Yang
## Modified by Zi-Yi Yang on July 23, 2019
## Concact: yangziyi091100@163.com
##----------------------------------
## load libraries
library(Matrix)
library(tseries)
library(randomForest)
library(glmnet)
library(ROCR)
setwd("D:/Ziyi/School/PMO/12.Multi_omics/7.MSPL-code/2-Benchmark")
source('Function_performance.R')
## Load data
load("D:/Ziyi/School/PMO/12.Multi_omics/7.MSPL-code/2-Benchmark/1-data/SNFdatasets.RDATA")
#----------------
## 1. Colon dataset
#----------------
# Preparing data
data_colon <- snf_data$colon
label_colon <- snf_group$colon
# random select samples and Setting training and testing
randidx_colon = sample(c(1:length(label_colon)),size=length(label_colon))
splits_colon = vector(mode = "numeric", length = length(label_colon))
splits_colon_trainidx = randidx_colon[1:(0.7*length(randidx_colon))]
splits_colon_testidx = randidx_colon[(0.7*length(randidx_colon)+1):length(randidx_colon)]
splits_colon[splits_colon_trainidx] = 0
splits_colon[splits_colon_testidx] = 1
splits_colon = as.matrix(splits_colon)
trainidx_colon = which(splits_colon[,1]==0)
testidx_colon = which(splits_colon[,1]==1)
train_colon_mrna<-data_colon$mrna[trainidx_colon,]
train_colon_mirna<-data_colon$mirna[trainidx_colon,]
train_colon_cpg<-data_colon$cpg[trainidx_colon,]
test_colon_mrna<-data_colon$mrna[testidx_colon,]
test_colon_mirna<-data_colon$mirna[testidx_colon,]
test_colon_cpg<-data_colon$cpg[testidx_colon,]
label_colon[which(label_colon=="high")] <- 1
label_colon[which(label_colon=="low")] <- 0
train_colon_label <- label_colon[trainidx_colon]
test_colon_label <- label_colon[testidx_colon]
## Generate Colon data
colon_train_data = list(mrna=train_colon_mrna, mirna=train_colon_mirna, cpg=train_colon_cpg)
colon_train_lable = train_colon_label
colon_test_data = list(mrna=test_colon_mrna, mirna=test_colon_mirna, cpg=test_colon_cpg)
colon_test_lable = test_colon_label
#----------------
## 1. GBM dataset
#----------------
# Preparing data
data_gbm <- snf_data$gbm
label_gbm <- snf_group$gbm
# random select samples and Setting training and testing
randidx_gbm = sample(c(1:length(label_gbm)),size=length(label_gbm))
splits_gbm = vector(mode = "numeric", length = length(label_gbm))
splits_gbm_trainidx = randidx_gbm[1:(0.7*length(randidx_gbm))]
splits_gbm_testidx = randidx_gbm[(0.7*length(randidx_gbm)+1):length(randidx_gbm)]
splits_gbm[splits_gbm_trainidx] = 0
splits_gbm[splits_gbm_testidx] = 1
splits_gbm = as.matrix(splits_gbm)
trainidx_gbm = which(splits_gbm[,1]==0)
testidx_gbm = which(splits_gbm[,1]==1)
train_gbm_mrna<-data_gbm$mrna[trainidx_gbm,]
train_gbm_mirna<-data_gbm$mirna[trainidx_gbm,]
train_gbm_cpg<-data_gbm$cpg[trainidx_gbm,]
test_gbm_mrna<-data_gbm$mrna[testidx_gbm,]
test_gbm_mirna<-data_gbm$mirna[testidx_gbm,]
test_gbm_cpg<-data_gbm$cpg[testidx_gbm,]
label_gbm[which(label_gbm=="high")] <- 1
label_gbm[which(label_gbm=="low")] <- 0
train_gbm_label <- label_gbm[trainidx_gbm]
test_gbm_label <- label_gbm[testidx_gbm]
## Generate gbm data
gbm_train_data = list(mrna=train_gbm_mrna, mirna=train_gbm_mirna, cpg=train_gbm_cpg)
gbm_train_lable = train_gbm_label
gbm_test_data = list(mrna=test_gbm_mrna, mirna=test_gbm_mirna, cpg=test_gbm_cpg)
gbm_test_lable = test_gbm_label
#----------------
## 1. Kidney dataset
#----------------
# Preparing data
data_kidney <- snf_data$kidney
label_kidney <- snf_group$kidney
# random select samples and Setting training and testing
randidx_kidney = sample(c(1:length(label_kidney)),size=length(label_kidney))
splits_kidney = vector(mode = "numeric", length = length(label_kidney))
splits_kidney_trainidx = randidx_kidney[1:(0.7*length(randidx_kidney))]
splits_kidney_testidx = randidx_kidney[(0.7*length(randidx_kidney)+1):length(randidx_kidney)]
splits_kidney[splits_kidney_trainidx] = 0
splits_kidney[splits_kidney_testidx] = 1
splits_kidney = as.matrix(splits_kidney)
trainidx_kidney = which(splits_kidney[,1]==0)
testidx_kidney = which(splits_kidney[,1]==1)
train_kidney_mrna<-data_kidney$mrna[trainidx_kidney,]
train_kidney_mirna<-data_kidney$mirna[trainidx_kidney,]
train_kidney_cpg<-data_kidney$cpg[trainidx_kidney,]
test_kidney_mrna<-data_kidney$mrna[testidx_kidney,]
test_kidney_mirna<-data_kidney$mirna[testidx_kidney,]
test_kidney_cpg<-data_kidney$cpg[testidx_kidney,]
label_kidney[which(label_kidney=="high")] <- 1
label_kidney[which(label_kidney=="low")] <- 0
train_kidney_label <- label_kidney[trainidx_kidney]
test_kidney_label <- label_kidney[testidx_kidney]
## Generate kidney data
kidney_train_data = list(mrna=train_kidney_mrna, mirna=train_kidney_mirna, cpg=train_kidney_cpg)
kidney_train_lable = train_kidney_label
kidney_test_data = list(mrna=test_kidney_mrna, mirna=test_kidney_mirna, cpg=test_kidney_cpg)
kidney_test_lable = test_kidney_label
#----------------
## 1. Lung dataset
#----------------
# Preparing data
data_lung <- snf_data$lung
label_lung <- snf_group$lung
# random select samples and Setting training and testing
randidx_lung = sample(c(1:length(label_lung)),size=length(label_lung))
splits_lung = vector(mode = "numeric", length = length(label_lung))
splits_lung_trainidx = randidx_lung[1:(0.7*length(randidx_lung))]
splits_lung_testidx = randidx_lung[(0.7*length(randidx_lung)+1):length(randidx_lung)]
splits_lung[splits_lung_trainidx] = 0
splits_lung[splits_lung_testidx] = 1
splits_lung = as.matrix(splits_lung)
trainidx_lung = which(splits_lung[,1]==0)
testidx_lung = which(splits_lung[,1]==1)
train_lung_mrna<-data_lung$mrna[trainidx_lung,]
train_lung_mirna<-data_lung$mirna[trainidx_lung,]
train_lung_cpg<-data_lung$cpg[trainidx_lung,]
test_lung_mrna<-data_lung$mrna[testidx_lung,]
test_lung_mirna<-data_lung$mirna[testidx_lung,]
test_lung_cpg<-data_lung$cpg[testidx_lung,]
label_lung[which(label_lung=="high")] <- 1
label_lung[which(label_lung=="low")] <- 0
train_lung_label <- label_lung[trainidx_lung]
test_lung_label <- label_lung[testidx_lung]
## Generate lung data
lung_train_data = list(mrna=train_lung_mrna, mirna=train_lung_mirna, cpg=train_lung_cpg)
lung_train_lable = train_lung_label
lung_test_data = list(mrna=test_lung_mrna, mirna=test_lung_mirna, cpg=test_lung_cpg)
lung_test_lable = test_lung_label
##---------------------
## 2. RandomForest
##---------------------
##-----2.1 colon dataset------
combined_train_colon <- do.call(cbind, colon_train_data)
combined_test_colon <- do.call(cbind, colon_test_data)
net.colon <- randomForest(x = combined_train_colon, y = factor(colon_train_lable),
importance = TRUE, ntree = 10)
pred_train_colon<- predict(net.colon, combined_train_colon)
pred_test_colon<- predict(net.colon, combined_test_colon)
##-----2.2 gbm dataset-----
combined_train_gbm <- do.call(cbind, gbm_train_data)
combined_test_gbm <- do.call(cbind, gbm_test_data)
net.gbm <- randomForest(x = combined_train_gbm, y = factor(gbm_train_lable),
importance = TRUE, ntree = 10)
pred_train_gbm<- predict(net.gbm, combined_train_gbm)
pred_test_gbm<- predict(net.gbm, combined_test_gbm)
##-----2.3 kidney dataset-----
combined_train_kidney <- do.call(cbind, kidney_train_data)
combined_test_kidney <- do.call(cbind, kidney_test_data)
net.kidney <- randomForest(x = combined_train_kidney, y = factor(kidney_train_lable),
importance = TRUE, ntree = 10)
pred_train_kidney<- predict(net.kidney, combined_train_kidney)
pred_test_kidney<- predict(net.kidney, combined_test_kidney)
##-----2.4 lung dataset-----
combined_train_lung <- do.call(cbind, lung_train_data)
combined_test_lung <- do.call(cbind, lung_test_data)
net.lung <- randomForest(x = combined_train_lung, y = factor(lung_train_lable),
importance = TRUE, ntree = 10)
pred_train_lung<- predict(net.lung, combined_train_lung)
pred_test_lung<- predict(net.lung, combined_test_lung)
##---------------------
## 3. Performance
##---------------------
##-----3.1 colon dataset------
confusion.matrix.train.colon <- table(observed=colon_train_lable,predicted=pred_train_colon)
confusion.matrix.test.colon <- table(observed=colon_test_lable,predicted=pred_test_colon)
select.feature.colon.idx <- which(net.colon$importance[,4]!=0) ## MeanDecreaseGini
select.feature.colon.vaule <- net.colon$importance[which(net.colon$importance[,4]!=0),4] ## MeanDecreaseGini
select.fearure.colon.name <- colnames(combined_train_colon)[select.feature.colon.idx]
pref.train.colon <- evaluate.randforest.performance(confusion.mat = confusion.matrix.train.colon,
true_lable = colon_train_lable, predict_label = pred_train_colon)
pref.test.colon <- evaluate.randforest.performance(confusion.mat = confusion.matrix.test.colon,
true_lable = colon_test_lable, predict_label = pred_test_colon)
perf.colon <- list("pref.train" = pref.train.colon, "perf.test" = pref.test.colon)
results.colon <- list("net" = net.colon, "feature.idx" = select.feature.colon.idx,
"feature.value" = select.feature.colon.vaule,
"feature.name" = select.fearure.colon.name)
##-----3.2 gbm dataset------
confusion.matrix.train.gbm <- table(observed=gbm_train_lable,predicted=pred_train_gbm)
confusion.matrix.test.gbm <- table(observed=gbm_test_lable,predicted=pred_test_gbm)
select.feature.gbm.idx <- which(net.gbm$importance[,4]!=0) ## MeanDecreaseGini
select.feature.gbm.vaule <- net.gbm$importance[which(net.gbm$importance[,4]!=0),4] ## MeanDecreaseGini
select.fearure.gbm.name <- colnames(combined_train_gbm)[select.feature.gbm.idx]
pref.train.gbm <- evaluate.randforest.performance(confusion.mat = confusion.matrix.train.gbm,
true_lable = gbm_train_lable, predict_label = pred_train_gbm)
pref.test.gbm <- evaluate.randforest.performance(confusion.mat = confusion.matrix.test.gbm,
true_lable = gbm_test_lable, predict_label = pred_test_gbm)
perf.gbm <- list("pref.train" = pref.train.gbm, "perf.test" = pref.test.gbm)
results.gbm <- list("net" = net.gbm, "feature.idx" = select.feature.gbm.idx,
"feature.value" = select.feature.gbm.vaule,
"feature.name" = select.fearure.gbm.name)
##-----3.3 kidney dataset------
confusion.matrix.train.kidney <- table(observed=kidney_train_lable,predicted=pred_train_kidney)
confusion.matrix.test.kidney <- table(observed=kidney_test_lable,predicted=pred_test_kidney)
select.feature.kidney.idx <- which(net.kidney$importance[,4]!=0) ## MeanDecreaseGini
select.feature.kidney.vaule <- net.kidney$importance[which(net.kidney$importance[,4]!=0),4] ## MeanDecreaseGini
select.fearure.kidney.name <- colnames(combined_train_kidney)[select.feature.kidney.idx]
pref.train.kidney <- evaluate.randforest.performance(confusion.mat = confusion.matrix.train.kidney,
true_lable = kidney_train_lable, predict_label = pred_train_kidney)
pref.test.kidney <- evaluate.randforest.performance(confusion.mat = confusion.matrix.test.kidney,
true_lable = kidney_test_lable, predict_label = pred_test_kidney)
perf.kidney <- list("pref.train" = pref.train.kidney, "perf.test" = pref.test.kidney)
results.kidney <- list("net" = net.kidney, "feature.idx" = select.feature.kidney.idx,
"feature.value" = select.feature.kidney.vaule,
"feature.name" = select.fearure.kidney.name)
##-----3.4 lung dataset------
confusion.matrix.train.lung <- table(observed=lung_train_lable,predicted=pred_train_lung)
confusion.matrix.test.lung <- table(observed=lung_test_lable,predicted=pred_test_lung)
select.feature.lung.idx <- which(net.lung$importance[,4]!=0) ## MeanDecreaseGini
select.feature.lung.vaule <- net.lung$importance[which(net.lung$importance[,4]!=0),4] ## MeanDecreaseGini
select.fearure.lung.name <- colnames(combined_train_lung)[select.feature.lung.idx]
pref.train.lung <- evaluate.randforest.performance(confusion.mat = confusion.matrix.train.lung,
true_lable = lung_train_lable, predict_label = pred_train_lung)
pref.test.lung <- evaluate.randforest.performance(confusion.mat = confusion.matrix.test.lung,
true_lable = lung_test_lable, predict_label = pred_test_lung)
perf.lung <- list("pref.train" = pref.train.lung, "perf.test" = pref.test.lung)
results.lung <- list("net" = net.lung, "feature.idx" = select.feature.lung.idx,
"feature.value" = select.feature.lung.vaule,
"feature.name" = select.fearure.lung.name)
##---------------------
## 4. Output
##---------------------
results.benchmark <- list("results.colon" = results.colon,
"results.gbm" = results.gbm,
"results.kidney" = results.kidney,
"results.lung" = results.lung)
performance.benchmark <- list("perf.colon" = perf.colon,
"perf.gbm" = perf.gbm,
"perf.kidney" = perf.kidney,
"perf.lung" = perf.lung)
|
2cb6b1eb2d4a68bc7988386ac6622a961fe854b9
|
177119d6a0caf84a490b3843191bfdb112efb74a
|
/MovingAverageSimulation.R
|
8fdb894d26db8f372eda1fbbbc2da933c0c92a91
|
[] |
no_license
|
robruzzo/TimeSeriesAnalysis
|
655976082f3a3d7df3ca21f83b919d14d149ad96
|
7984d8ef4abec6e65dc8c6500bd8400e95c5dfc2
|
refs/heads/master
| 2021-04-03T23:51:31.667690
| 2020-03-27T23:39:30
| 2020-03-27T23:39:30
| 248,406,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 986
|
r
|
MovingAverageSimulation.R
|
"MOving Average Model Demo
Unline the Autoregressive Model, the moving average model uses more white
noise terms with Beta Coefficients to give the magnitude.
x(t)=w(t) +B1Wt-1 +...BqWt-q
The white noise will have a mean of 0 and the variance of the series
THe order is q
"
set.seed(1)
#generate values for W(t) white noise
w<-rnorm(1000)
#generate values for x(t) time series x(t=1)=0
x<-rep(0,1000)
#we simulate MA(1)with one coefficient(0.4)
for(t in 3:1000)x[t]<-w[t]+0.4*w[t-1]
"
In the autocorrelation function we expect there to be a non zero significant
autocorrelation value. So if a non zero value exists we can fit MA(q) model
"
plot(x,type="l")
acf(x)
"There is no ma() function in R so we use the ARIMA model without the
AR and Integrated part. We set them to both be 0 and MA(1)
"
x.ma<-arima(x,order=c(0,0,1))
x.ma
#The output shows that we have an MA coefficient of 0.37 with a std err of 0.0306
#Construct the confidence levels
0.37567096+c(-1.96,1.96)*0.0306
|
893284b190d487ea5e2be3bf8f8a82386328c433
|
946a3a8bfd917b1797cb4499f259b955f28e3de4
|
/thesis-data-analysis/scripts/raw-dataset-analysis/lucene_stats.R
|
d1b52f17663499232221629c40be32d14ef954bc
|
[] |
no_license
|
ansin218/master-thesis
|
2980089bc48901595e9f221baa96d76fa7dcb8a4
|
8e55a588be9f3c2784783f9233973c7ec79c12b5
|
refs/heads/master
| 2021-03-19T16:55:04.538157
| 2018-03-26T12:33:35
| 2018-03-26T12:33:35
| 100,115,495
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
r
|
lucene_stats.R
|
library(ggplot2)
library(scales)
luceneFile <- "lucene_issues.csv"
luceneData <- read.csv(luceneFile, header=TRUE, sep=",")
luceneData$countCat <- cut(luceneData$count, breaks=c(1,5,10,15,20,25,30,35,40,45,50,100,150))
luceneData <- luceneData[!is.na(luceneData$countCat), ]
# Plot raw data analysis statistics
ggplot(luceneData, aes(x = factor(luceneData$countCat))) +
geom_bar(aes(y = (..count..))) +
labs(x="Comments Count Category",y="Number of Issues") +
ggtitle("Apache Lucene Issue Trackers Comments Distribution") +
theme(plot.title = element_text(hjust = 0.5), axis.text.x = element_text(color = "black", size = 11), axis.text.y = element_text(color = "black", size = 11)) +
geom_text(aes( label = (..count..), y= ..count.. ), stat="count", vjust = -.5)
|
2c8132e07fe48b820fd856d267e8752fd8d616a3
|
a2937d7268f41c5bb3c3dafc69252248a2ef34c7
|
/plot1.R
|
3154099650af540f63b42e1e4e980a4806d98bd1
|
[] |
no_license
|
JTBenson/ExData_Plotting1
|
df46ebb30a10d228fe4bd06cde58e932a1133ef3
|
ac32382a4856fb904ce482ab1afc6a8a56d52281
|
refs/heads/master
| 2021-01-16T01:02:46.367599
| 2014-08-08T20:04:03
| 2014-08-08T20:04:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 727
|
r
|
plot1.R
|
#Read in data - read in only number of rows required to pull in 1-Feb-2007 and 2-Feb-2007
x=read.table("./exdata_data_household_power_consumption/household_power_consumption.txt",
header=TRUE,sep=";",nrows=69516,na.strings="?")
##Subset for 1-Feb-2007 and 2-Feb-2007 dates
xSub = subset(x,as.character(x$Date) %in% c("1/2/2007","2/2/2007"))
summary(xSub$Date)
##Create DateTime variable
xSub$DateTimeCat = paste(xSub$Date,xSub$Time,sep=",")
xSub$DateTime = strptime(xSub$DateTimeCat,format="%d/%m/%Y,%H:%M:%S")
#Create histogram and output to PNG
with(xSub, hist(Global_active_power,xlab="Global Active Power (kilowatts)",
main="Global Active Power",col="red"))
dev.copy(png,file="plot1.png")
dev.off()
|
b38f9d8e6fb8926d961a2630d3e68185173bc4f1
|
2e8d89ca30be43dc27a9a3e369db24e7eee67ed3
|
/pbmc_prepro0518.R
|
0ff775d90a3d06e0529f186d1c9c843106235fef
|
[] |
no_license
|
DaigoOkada/kernel_deef_code
|
d62a75953c567566806118f3a003e8f64c116d04
|
4ec0c4c7b9cb7a761d078c683958af1dd1c5ec48
|
refs/heads/main
| 2023-06-05T06:21:33.696127
| 2021-06-21T10:10:30
| 2021-06-21T10:10:30
| 370,950,339
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,432
|
r
|
pbmc_prepro0518.R
|
#scRNA-seq
library(Seurat)
out_path <- "/home/dokada/work_dir/pbmc_prepro0518/"
out_path2 <- paste0(out_path,"prepro/")
dir.create(out_path)
dir.create(out_path2)
data_path <- "/home/dokada/work_dir/scrnaseq_bld0113/data_GSE125527/"
files <- sort(list.files(data_path))
#Processing
sample_names <- files
n <- length(sample_names)
data_list <- list()
for(i in 1:n){
s <- sample_names[i]
pbmc.data <- read.table(paste0(data_path, s),header=T,row.names=1)
pbmc <- CreateSeuratObject(counts =t(pbmc.data))
pbmc[["percent.mt"]] <- PercentageFeatureSet(pbmc, pattern = "^MT.")
#VlnPlot(pbmc, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
pbmc_cellqc <- subset(pbmc, subset = nFeature_RNA > 200 & nFeature_RNA < 7000 & nCount_RNA < 70000 & percent.mt < 10)
pbmc_cellqc_norm <- NormalizeData(pbmc_cellqc, normalization.method = "LogNormalize", scale.factor = 10000)
mat_cellqc_norm = as.matrix(pbmc_cellqc_norm[["RNA"]]@data)
data_list[[i]] <- mat_cellqc_norm
cat(i,"\n")
}
#QC for genes
pooled_reads_num <- rowSums(sapply(data_list,rowSums))
idx <- which(pooled_reads_num > 15000)
setwd(out_path2)
for(i in 1:n){
tab <- t(data_list[[i]][idx,])
write.csv(tab, file=paste0(sample_names[i], ".csv"),row.names=F)
}
#true label
true_label <- sapply(files, function(x){substr(strsplit(x,"_")[[1]][2],1,1)})
write.csv(true_label,file=paste0(out_path,"true_lab.csv"))
|
6a91de07abfffaeb9d1eceb71371dcd80b6bb4a6
|
ff9429761f135642f36e2b366aa378eb71f2f686
|
/hw1YourNamePart1.r
|
43409894e863fbc583ed23b7f183ac0c3320fe8f
|
[] |
no_license
|
e-turk/RStudio-Project
|
7e92915c950523f5253530e6f4e111c95fecd609
|
0965e542e378c8251a148bf5780c958c9283b064
|
refs/heads/master
| 2021-04-27T14:37:01.153819
| 2018-05-10T15:37:51
| 2018-05-10T15:37:51
| 122,447,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 755
|
r
|
hw1YourNamePart1.r
|
install.packages("tidyverse")
library(tidyverse)
library(readr) #Reading xlsx files
library(data.table)
library(ggplot2) # visualization
library(scales) # visualization
library(dplyr) # data manipulation
library(knitr)
setwd("C:\\Users\\HACI-PC\\Desktop")
library(readr)
UNdata_Export_20171026_130851047 <- read_csv("C:\\Users\\HACI-PC\\Desktop/UNdata_Export_20171026_130851047.csv")
View(UNdata_Export_20171026_130851047)
dim(UNdata_Export_20171026_130851047)
glimpse(UNdata_Export_20171026_130851047)
summary(UNdata_Export_20171026_130851047)
ggplot(data = UNdata_Export_20171026_130851047, aes(x=Year, y=Rate))
p1 <- ggplot(data = UNdata_Export_20171026_130851047, aes(x=Year, y=Rate))
p1 + geom_bar(stat="identity")
|
13b770bbda0185fdee62d6dd70abd4aa10120f85
|
93dbdf2b98ec03d5ef604426b4f3552312a174c9
|
/old/multi_step_model.R
|
44fea405f17b7969e7a942560fd841fece5b4ddd
|
[] |
no_license
|
lebelinoz/ci-factor_model
|
6a00b89349b781525e27e1fe8a50cf512445d076
|
27b2afcab723d0e94c0115775e2a201c06b4e78e
|
refs/heads/master
| 2020-05-23T07:59:00.426675
| 2017-03-07T02:52:26
| 2017-03-07T02:52:26
| 80,478,651
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 611
|
r
|
multi_step_model.R
|
# Take the residuals of the asset vs bmark linear model, and see how much better a second step will do against it.
source('experiment_on_all_tickers.R')
source('show_regression.R')
asset_bmark.lm = lm(asset ~ bmark, data = asset_and_bmark_and_factors)
asset_and_bmark_and_factors$residuals = asset_bmark.lm$residuals
show_regression(asset_and_bmark_and_factors, "residuals", "yield")
step2_bond.lm = lm(residuals ~ bond, asset_and_bmark_and_factors)
summary(step2_bond.lm)
anova(step2_bond.lm)
step2_yield.lm = lm(residuals ~ yield, asset_and_bmark_and_factors)
summary(step2_yield.lm)
anova(step2_yield.lm)
|
e3f3588cf54233ec3211e0c6dcd8db0f47df1468
|
f6a580a7ab6afb88d3751e4131e72eeb4037b145
|
/candidatePack/R/show_candidate.R
|
a08d6886507b626f8cb9a08a2416cd95f0ee4e9f
|
[] |
no_license
|
ScottSolomon66/PS6
|
fbe2fbfdc5c51c1ed2e0adfe7606b563a6ca2ac4
|
8766f9222ac0eb951de7314197dfd1944dd194c0
|
refs/heads/master
| 2021-01-20T04:42:35.239995
| 2016-03-21T02:36:27
| 2016-03-21T02:36:27
| 53,739,339
| 0
| 0
| null | 2016-03-12T15:44:18
| 2016-03-12T15:44:18
| null |
UTF-8
|
R
| false
| false
| 1,326
|
r
|
show_candidate.R
|
#' taking an object of class "Candidate" and using the generic show function
#'
#' takes an object of class "Candidate" and executes show
#'
#' @param name a character object
#' @param delegatesWon a numeric object
#' @param party a character object
#'
#' @return the output of show for the "Candidate" object
#' \item{names}{The name of the candidate}
#' \item{delegates_won}{The number of delegates won}
#' \item{party}{The party of the candidate}
#' \item{delegates_needed}{the number of delegates needed to win the primary}
#' @author Scott Solomon
#' @note Make america gr8 again
#' @examples
#'
#' Obama<-new("Candidate", name = "Obama", delegates_won = 800, party = "Democrat", delegates_needed = 1200)
#' show(Obama)
#' @export
setMethod(f = "show",
## setting the method for show with class "Candidate"
signature = "Candidate",
definition = function(object){
## extracting the values
values<-list(object@name, object@delegates_won, object@party, object@delegates_needed)
## creating labels
labels<-c("name", "delegates won", "party", "delegates needed")
## creating a table with labels and values
value_table<-rbind(labels, values)
## printing tables
print(value_table)
})
|
5c4338516dd011ad629c7cc7a01bde79c0414323
|
f61064bb7d0013f111123206b230482514141d9e
|
/man/sir_csmc.Rd
|
92b58dc3109b8c5386f393866bcf7133b241ca6e
|
[] |
no_license
|
nianqiaoju/agents
|
6e6cd331d36f0603b9442994e08797effae43fcc
|
bcdab14b85122a7a0d63838bf38f77666ce882d1
|
refs/heads/main
| 2023-08-17T05:10:49.800553
| 2021-02-18T23:01:47
| 2021-02-18T23:01:47
| 332,890,396
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 712
|
rd
|
sir_csmc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sir_csmc.R
\name{sir_csmc}
\alias{sir_csmc}
\title{controlled SMC for SIR model}
\usage{
sir_csmc(y, model_config, particle_config, policy = NULL)
}
\arguments{
\item{y}{, observations, length (T+1)}
\item{model_config}{a list containing model parameters, must pass the test of check_model_config;}
\item{particle_config}{a list specifying the particle filter, must pass the test of check_particle_config;}
}
\value{
the particle filter outcome, must contain estimate of log marginal likelihood and effective sample size.
}
\description{
marginal likelihood estimations and samples from the posterior distribution of agent states
}
|
44fe79f86a35b2403e7511bcb2e5db747123b41f
|
ab40dbb49718e426b5e75de554c36a339c143eaa
|
/bigGP/man/remoteGetIndices.Rd
|
e23e50797da0012904236cf2cb82ca4baa97654c
|
[] |
no_license
|
paciorek/bigGP
|
b4037380cfe23e3b28f543585a3d1052331057bb
|
c087c7712d67c55ba72ab93b544008c1f6dd2d41
|
refs/heads/master
| 2023-05-01T03:03:25.282229
| 2023-04-25T15:01:43
| 2023-04-25T15:01:43
| 38,216,446
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,917
|
rd
|
remoteGetIndices.Rd
|
\name{remoteGetIndices}
\alias{remoteGetIndices}
\title{
Determine Indices of Vector or Matrix Elements Stored on all Processes
}
\description{
\code{remoteGetIndices} determines the indices of the subset of a
matrix or vector that are stored on each process.
}
\usage{
remoteGetIndices(type = "vector", objName, objPos = ".GlobalEnv", n1,
n2 = NULL, h1 = 1, h2 = 1)
}
\arguments{
\item{type}{
a string, one of 'vector', 'symmetric', 'triangular', or 'rectangular'
giving the type of object for which one wants the indices. Note that
square and symmetric matrices are both stored as lower triangles, so
these options both return the same result. For square, non-symmetric
matrices, use 'rectangular'.
}
\item{objName}{
the name to use for the object containing the indices on the slave processes.
}
\item{objPos}{
where to do the assignment of the object, given as a character string (unlike
\code{assign}). This can indicate an environment or a ReferenceClass
object.
}
\item{n1}{
a positive integer, giving the length of the vector, number of rows
and columns of a symmetric or triangular matrix and number of rows of
a rectangular matrix, including square, non-symmetric matrices.
}
\item{n2}{
a positive integer, giving the number of columns of a
a rectangular matrix.
}
\item{h1}{
a positive integer, giving the block replication factor for a
vector, a symmetric or triangular matrix, or the rows of
a rectangular matrix.
}
\item{h2}{
a positive integer, giving the block replication factor for the
columns of the rectangular matrix.
}
}
\details{
\code{remoteGetIndices} calculates the indices as described in
\code{\link{localGetVectorIndices}},
\code{\link{localGetTriangularMatrixIndices}}, and
\code{\link{localGetRectangularMatrixIndices}}, and writes them to an
object named \code{objName}.
}
|
eeb9334367a9c5e2bc345a21c47af9d6a9a12826
|
88f2455bd16b29da868cfed650330de6a219f215
|
/seurat_120419.R
|
5bb3d9f26896d0380ae7bd030b48f7396c02cd8b
|
[] |
no_license
|
ggruenhagen3/brain_scripts
|
8b90902afe62d3a8c111ffb5f3fd203863330ef6
|
cc9f78e431cd05fdc0d7c43ed4d5ca44e715d5cc
|
refs/heads/master
| 2023-07-08T03:27:24.566197
| 2023-06-19T18:49:09
| 2023-06-19T18:49:09
| 225,917,508
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,756
|
r
|
seurat_120419.R
|
install.packages('Seurat')
library(Seurat)
library(dplyr)
rna_path <-"C:/Users/zjohn/Desktop/single_nuc"
b1.data <- Read10X(data.dir = paste(rna_path, "/filtered_feature_bc_matrix/", sep=""))
b2.data <- Read10X(data.dir = paste(rna_path, "/filtered_feature_bc_matrix/", sep=""))
c1.data <- Read10X(data.dir = paste(rna_path, "/filtered_feature_bc_matrix/", sep=""))
b1 <- CreateSeuratObject(counts = b1.data, project = "behav")
b2 <- CreateSeuratObject(counts = b2.data, project = "behav2")
c1 <- CreateSeuratObject(counts = c1.data, project = "control")
b1$cond <- "BHVE"
b2$cond <- "BHVE"
c1$cond <- "CTRL"
# Normalize Data without scale factor per https://satijalab.org/seurat/v3.1/pbmc3k_tutorial.html
b1 <- NormalizeData(b1)
b2 <- NormalizeData(b2)
c1 <- NormalizeData(c1)
# Normalize Data with scale factor per https://satijalab.org/seurat/v3.1/pbmc3k_tutorial.html
b1 <- NormalizeData(b1, normalization.method = "LogNormalize", scale.factor = 100000)
b2 <- NormalizeData(b2, normalization.method = "LogNormalize", scale.factor = 100000)
c1 <- NormalizeData(c1, normalization.method = "LogNormalize", scale.factor = 100000)
combined <- merge(x=c1, y=c(b1,b2), merge.data = TRUE, add.cell.ids = c("CTRL", "BHVE", "BHVE"))
combined <- subset(combined, subset = nFeature_RNA > 200)
combined <- subset(combined, subset = nFeature_RNA < 1500)
combined <- subset(combined, subset = nCount_RNA > 200)
combined <- subset(combined, subset = nCount_RNA < 1500)
# combined <- FindVariableFeatures(object = combined, mean.function = ExpMean, dispersion.function = LogVMR, nfeatures = 2000)
combined <- FindVariableFeatures(object = combined, selection.method = "vst", loess.span = 0.3, dispersion.function = "default", clip.max = "auto", num.bin = 50, binning.method = "equal_width", verbose = TRUE)
plot1 <- VariableFeaturePlot(combined)
plot1
all.genes <- rownames(combined)
length(all.genes)
combined <- ScaleData(combined, features = all.genes)
combined <- RunPCA(combined, features = VariableFeatures(object = combined))
#clustering
combined <- FindNeighbors(combined, dims = 1:25)
combined <- FindClusters(combined, resolution = 1.2)
combined <- RunUMAP(combined, dims = 1:20)
DimPlot(combined, reduction = "umap", pt.size = 0.5, label = TRUE)
#from here onward still playing around, some commands may not work or be really time intensive
my.data=fetch.data(nbt,c("ident","PC1","nGene","orig.ident","PAX6","DLX2","ACTB"))
#find the top 20 distinguishing genes for each cluster
combined.markers <- FindAllMarkers(combined, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
write.csv(head(combined.markers %>% group_by(cluster) %>% top_n(n = 20, wt = avg_logFC),400), "test.csv") #write them into a .csv file
top5 <- combined.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
top3 <- combined.markers %>% group_by(cluster) %>% top_n(n = 3, wt = avg_logFC)
top2 <- combined.markers %>% group_by(cluster) %>% top_n(n = 2, wt = avg_logFC)
my.data=FetchData(combined,)
doHeatMap(nbt,features = combined.markers,slim.col.label = TRUE,remove.key = TRUE,cexRow=0.1)
#another command for finding markers that distinguish all clusters
markers.all=FindAllMarkers(combined,test.use = "poisson", logfc.threshold=0.1, min.pct=.25, label = TRUE, verbose=TRUE)
?FindAllMarkers
#picking top marker genes by cluster
top5 <- subset(markers.all %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)) #this is how to get genes with biggest expression differences
top5 <- subset(markers.all %>% group_by(cluster) %>% top_n(n = -4, wt = p_val_adj)) #this is how to get most significant genes
top3 <- subset(markers.all %>% group_by(cluster) %>% top_n(n = 3, wt = avg_logFC))
top3 <- subset(markers.all %>% group_by(cluster) %>% top_n(n = -1, wt = p_val_adj))
?top_n
top3
top2 <- markers.all %>% group_by(cluster) %>% top_n(n = 2, wt = avg_logFC)
#all markers found by FindAllMarkers
#DoHeatmap(combined, features = markers.all$gene, cells = NULL, group.by = "ident",
# group.bar = TRUE, group.colors = NULL, disp.min = -2.5,
# disp.max = NULL, slot = "scale.data", assay = NULL, label = TRUE,
# size = 5.5, hjust = 0, angle = 45, raster = TRUE,
# draw.lines = TRUE, lines.width = NULL, group.bar.height = 0.02,
# combine = TRUE)
#plot top 5 for each cluster
DoHeatmap(combined, features = top5$gene, cells = NULL, group.by = "ident",
group.bar = TRUE, group.colors = NULL, disp.min = -2.5,
disp.max = NULL, slot = "scale.data", assay = NULL, label = TRUE,
size = 5.5, hjust = 0, angle = 45, raster = TRUE,
draw.lines = TRUE, lines.width = NULL, group.bar.height = 0.02,
combine = TRUE)
#plot top 3 for each cluster
DoHeatmap(combined, features = top3$gene, cells = NULL, group.by = "ident",
group.bar = TRUE, group.colors = NULL, disp.min = -2.5,
disp.max = NULL, slot = "scale.data", assay = NULL, label = TRUE,
size = 5.5, hjust = 0, angle = 45, raster = TRUE,
draw.lines = TRUE, lines.width = NULL, group.bar.height = 0.02,
combine = TRUE)
top5_list <- unique(top5$gene)
top2_list <- unique(top2$gene)
DotPlot(combined,features = top5_list)
DotPlot(combined,features = top2_list, angle = 45)
?DotPlot
print(head(combined.markers,40))
nbt = set.all.ident(nbt, "orig.ident")
dot.plot(nbt,genes.plot = rownames(combined.markers)[1:200],cex.use=4)
# Run the standard workflow for visualization and clustering
combined <- ScaleData(object = combined, vars.to.regress = NULL)
combined <- RunPCA(combined, npcs = 30, verbose = FALSE)
unique(combined.markers.test$gene)
# t-SNE and Clustering
combined <- RunUMAP(combined, reduction = "pca", dims = 1:20)
combined <- FindNeighbors(combined, reduction = "umap", dims = 1:2)
combined <- FindClusters(combined, resolution = 0.005)
DimPlot(combined, reduction = "umap", split.by = "cond", label = TRUE)
FeaturePlot(combined, features = c("slc17a6b"), split.by = "cond", reduction = "umap", pt.size = 1.5, label=TRUE, order = TRUE)
# Find clusters
folder <- "Dim25"
saveRDS(combined, file = paste(rna_path, "/results/clusters/", folder, "/combined.RDS", sep=""))
print("Finding DEG between clusters")
Idents(object = combined) <- "seurat_clusters"
num_clusters <- as.numeric(tail(levels(combined@meta.data$seurat_clusters), n=1))
for (i in 0:num_clusters) {
print(i)
nk.markers <- FindMarkers(combined, ident.1 = i, verbose = FALSE)
nk.markers$gene_name <- row.names(nk.markers)
sig_nk.markers <- nk.markers[which(nk.markers$p_val_adj < 0.05 & abs(nk.markers$avg_logFC) > 2),]
write.table(nk.markers, file = paste(rna_path, "/results/clusters/", folder, "/all_cluster_", i, ".tsv", sep=""), quote = FALSE, row.names = FALSE)
write.table(sig_nk.markers, file = paste(rna_path, "/results/clusters/", folder, "/sig_cluster_", i, ".tsv", sep=""), quote = FALSE, row.names = FALSE)
write.table(sig_nk.markers$gene_name, file = paste(rna_path, "/results/clusters/", folder, "/genes_sig_cluster_", i, ".tsv", sep=""), quote = FALSE, row.names = FALSE, col.names = FALSE)
sig_nk_pos.markers <- nk.markers[which(nk.markers$p_val_adj < 0.05 & nk.markers$avg_logFC > 2),]
write.table(sig_nk_pos.markers, file = paste(rna_path, "/results/clusters/", folder, "/sig_pos_cluster_", i, ".tsv", sep=""), quote = FALSE, row.names = FALSE)
write.table(sig_nk_pos.markers$gene_name, file = paste(rna_path, "/results/clusters/", folder,"/genes_sig_pos_cluster_", i, ".tsv", sep=""), quote = FALSE, row.names = FALSE, col.names = FALSE)
}
print("Done finding clusters")
|
339edc3b51d62ace2a972dbcd0b14c07f95580ca
|
7df389c848049ebf2bc1ad8e56f0ed3bdad977f3
|
/src/hawkes.R
|
176cc1bde9df7f0ce8464546b628138f5b61593f
|
[] |
no_license
|
shaabhishek/hierarchical-pp-new
|
a64644d3b6d27d2974ae72ec481177800c0d0101
|
26c1a74bcda275fb99108d5e66d00a27571f5739
|
refs/heads/master
| 2022-11-13T09:35:15.859530
| 2020-05-04T03:42:42
| 2020-05-04T03:42:42
| 278,703,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,028
|
r
|
hawkes.R
|
library(hawkes)
library(poisson)
setwd('/Users/abhisheksharma/Dev/hierarchichal_point_process/data/')
params_data <- read.delim("synthetic_data_params.txt", header = FALSE) #col.names = c('lambda0', 'alpha', 'beta', 'horizon'))
# n_cluster <- c(5, 10, 50, 100)
n_cluster <- c(4)
# lambda0 <- c(0.1, 0.2, 0.3, 0.4, 0.5)
# n_cluster <- length(lambda0)
# alpha <- c(0.6, 0.7, 0.9, 0.5, 0.4)
# beta <- rep(1.0, n_cluster)
# horizon <- rep(100, n_cluster)
intensity_nhpp_wrapper <- function(coeff) {
function(x) sinpi(coeff*x)
}
belowminthres <- function(x) {
min(diff(x)) < 0.001
}
generate_hawkes <- function(params, splittype) {
print(params)
sim_pp <- function(idx, params) {
ready = FALSE
attempts = 0
while (!ready) {
attempts <- attempts + 1
if (params[1]=='Poisson'){
res <- nhpp.sim(1, 50, intensity_nhpp_wrapper(as.double(params[2])), prepend.t0 = F)
if (!belowminthres(res)){
ready <- TRUE
}
}
else{
lambda0 = as.double(params[2])
alpha = as.double(params[3]) #rnorm(1, mean=as.double(params[3]), 0.01)
beta = as.double(params[4]) #rnorm(1, mean=as.double(params[4]), 0.01)
beta = min(c(beta, 1.0))
alpha = min(c(alpha, beta-.01))
horizon = 25
res <- simulateHawkes(lambda0, alpha, beta, horizon)
if ((length(unlist(res)) >= 2) && (!belowminthres(unlist(res)))){
ready <- TRUE
# print(c(attempts, params))
# res <- simulateHawkes(lambda0, alpha, beta, horizon)
# if (attempts == 100) break
}
else{
horizon <- 2*horizon
}
}
}
return (unlist(res))
}
if (splittype =='train'){
n <- 500
} else {
n <- 50
}
return (lapply(1:n, function(x) sim_pp(x, params)))
}
deletepreviousdata <- function(fn) {
if (file.exists(fn)){
#Delete file if it exists
file.remove(fn)}
}
simulate <- function(n_cluster) {
for (nclus in n_cluster) {
print(nclus)
train_data = apply(params_data[1:nclus,1:4], 1, function(x) generate_hawkes(x, 'train'))
val_data = apply(params_data[1:nclus,1:4], 1, function(x) generate_hawkes(x, 'val'))
test_data = apply(params_data[1:nclus,1:4], 1, function(x) generate_hawkes(x, 'test'))
# Flatten clusters into one data set
train_data <- unlist(train_data, recursive = FALSE, use.names = FALSE)
val_data <- unlist(val_data, recursive = FALSE, use.names = FALSE)
test_data <- unlist(test_data, recursive = FALSE, use.names = FALSE)
# File names
fn_base <- paste0('dump/syntheticdata_nclusters_', nclus)
fn_train <- paste0(fn_base, '_train.txt')
fn_val <- paste0(fn_base, '_val.txt')
fn_test <- paste0(fn_base, '_test.txt')
# Delete previous files
sapply(c(fn_train, fn_val, fn_test), deletepreviousdata)
# Save data
print(lapply(list(train_data, val_data, test_data), function(x) c(min(sapply(x, length)), max(sapply(x, length)), mean(sapply(x, length)))))
lapply(train_data, function(x) write(x, file = fn_train, append = TRUE, ncolumns = length(x)))
lapply(val_data, function(x) write(x, file = fn_val, append = TRUE, ncolumns = length(x)))
lapply(test_data, function(x) write(x, file = fn_test, append = TRUE, ncolumns = length(x)))
}
}
# simulate(n_cluster)
intensity_hp <- function(t, P, params){
P_ <- P[P<t]
params[1] + params[2]*sum(exp(-1* (t - P_)/params[3]))
}
genHP <- function (T, intensity_hp, params) {
t = 0
P = c()
while(t < T){
m <- intensity_hp(t, P, params)+1
e <- rexp(1, rate = m)
t <- t + e
u <- runif(1, 0, m)
if ((t < T) && (u <= intensity_hp(t, P, params))){
P <- c(P, t)
}
}
return (P)
}
intensity_nhpp <- function(t, params) {
0.5*sinpi(params[1]*t) + 0.5*cospi(.2*params[1]*t) + 1
}
genNHPP <- function(T, intensity_nhpp, params) {
t = 0
P = c()
while(t < T){
m <- 5
e <- rexp(1, rate = m)
t <- t + e
u <- runif(1, 0, m)
if ((t < T) && (u <= intensity_nhpp(t, params))){
P <- c(P, t)
}
}
return (P)
}
intensity_scp <- function(t, P, params) {
P_ <- P[P<t]
# print(sum(table(P_)))
exp(params[1]*t - params[2]*sum(table(P_)))
}
genSCP <- function(T, intensity_scp, params) {
t = 0
P = c()
while(t < T){
m <- intensity_scp(t+1, P, params)
# print(c(t, m))
e <- rexp(1, rate = m)
t <- t + e
u <- runif(1, 0, m)
if ((t < T) && (u <= intensity_scp(t, P, params))){
P <- c(P, t)
}
}
return (P)
}
# sigmoid <- function(x) 1/(1+exp(x))
markerSSCT <- function(r, y, params) {
P <- function(x, r, y, a) exp(x * sum(a*r*y))/(exp(-1 * sum(a*r*y)) + exp(1 * sum(a*r*y)))
# a <- as.integer(runif(3)*2)*2-1 #+1/-1
# r <- as.integer(runif(3)*2)
# y <- as.integer(runif(3)*2)*2-1
u <- runif(1)
if (u < P(1,r,y,params)){
m <- 1
}
else{
m <- -1
}
return (m)
}
intensity_ssct <- function(a,r,y) exp(sum(a*r*y))
pointSSCT <- function(r, y, params) {
lambda <- intensity_ssct(params,r,y)#/(exp(-1 * sum(a*r*y)) + exp(1 * sum(a*r*y)))
print(c(params, r, y, lambda))
# a <- as.integer(runif(3)*2)*2-1 #+1/-1
# r <- as.integer(runif(3)*2)
# y <- as.integer(runif(3)*2)*2-1
d <- rexp(1, lambda)
return (d)
}
genSSCT <- function(T, params) {
r_seq <- as.integer(runif(3)*2)
y_seq <- as.integer(runif(3)*2)*2-1
m <- 3
t <- 0
t_seq <- c()
while (t < T) {
tn <- pointSSCT(tail(r_seq, m), tail(y_seq, m), params)
t <- t + tn
# print(t)
rn <- as.integer(t%%24 < 12)
print(rn)
yn <- markerSSCT(tail(r_seq, m), tail(y_seq, m), params)
t_seq <- c(t_seq, t)
r_seq <- c(r_seq, rn)
y_seq <- c(y_seq, yn)
}
return(list(t_seq, y_seq))
}
genHPP <- function(T, params) {
t <- 0
P <- c()
while (t < T) {
e <- rexp(1, rate = params[1])
t <- t + e
P <- c(P, t)
}
return (P)
}
paramhpp <- c(.7)
datahpp <- genHPP(100, paramhpp)
plot(seq(1,100), rep(paramhpp[1], 100), type='l', ylim=c(0, paramhpp[1]+1))
par()
points(datahpp, rep_len(0.1, length(datahpp)))
paramnhpp <- c(.1)
datanhpp <- genNHPP(100, intensity_nhpp, paramnhpp)
plot(seq(1,100,.5), sapply(seq(1,100,.5), function(x) intensity_nhpp(x, paramnhpp)), type='l')
par()
points(datanhpp, rep_len(0.1, length(datanhpp)))
paramscp <- c(.5, .2)
datascp <- genSCP(100, intensity_scp, paramscp)
plot(seq(1,100,.5), sapply(seq(1,100,.5), function(x) intensity_scp(x, datascp, paramscp)), type='l')
par()
points(datascp, rep_len(1, length(datascp)))
paramshp <- c(.9, .1, 1.0)
datahp <- genHP(100, intensity_hp, paramshp)
plot(seq(1,100,.5), sapply(seq(1,100,.5), function(x) intensity_hp(x, datahp, paramshp)), type='l')
par()
points(datahp, rep_len(1, length(datahp)))
paramsssct <- c(-.2, 0.8, -.8)
datassct <- genSSCT(100, paramsssct)
plot(datassct[[1]][2:length(datassct[[1]])], diff(datassct[[1]]), type='l')
par()
points(datassct[[1]], rep_len(.1, length(datassct[[1]])), col=datassct[[2]]+2)
|
91814107e858a8cece55919e042f5188d5a693de
|
cf044807d8d2a5c75f5576b701778afa9c7243cb
|
/src/parse_config.R
|
f803e332e8d184ef68b9e04030d376a74312baed
|
[
"MIT"
] |
permissive
|
indigoblueraspberry/MuSTA
|
3b1e1a4f13b575055238334c7a9d62f93976bd53
|
92e909a08fbbce08444bccd235d14e2f83cb3901
|
refs/heads/master
| 2022-11-30T03:45:36.558244
| 2020-08-10T14:14:23
| 2020-08-10T14:14:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,417
|
r
|
parse_config.R
|
####general####
mut_sv_mode <- "genomon" #TODO VCF or Genomon
####software####
softwares <-
c("sqanti_filter.py", "sqanti_qc.py", "lordec-build-SR-graph", "lordec-correct",
"minimap2", "samtools", "salmon", "seqkit")
software_path_argnames <-
c("sqanti_filter", "sqanti_qc", "lordec_build_SR_graph", "lordec_correct",
"minimap2", "samtools", "salmon", "seqkit")
soft_ver_comm <-
c("-v", "-v", "", "", "--version", "--version", "--version", "version")
soft_ver_parse_fun <-
list(
(function(x) str_remove(x, "^SQANTI ")),
(function(x) str_remove(x, "^SQANTI ")),
(function(x) str_remove(x, "^LoRDEC v")),
(function(x) str_remove(x, "^LoRDEC v")),
(function(x) x),
(function(x) str_remove(x, "^samtools ")),
(function(x) str_remove(x, "^salmon ")),
(function(x) str_remove(x, "^seqkit v"))
)
soft_names <- softwares %>% str_to_lower() %>% str_replace_all("-", "_")
stopifnot(length(softwares) == length(soft_ver_comm))
stopifnot(length(softwares) == length(soft_ver_parse_fun))
soft <- args[software_path_argnames] %>% setNames(soft_names)
soft_ver <- check_soft_path_and_get_ver(softwares, soft, soft_ver_comm, soft_ver_parse_fun)
####que_requirement####
process_names <-
c("fasta2BSgenome", "lordec_build", "lordec", "minimap2", "qual_filter", "samtools", "intra_merge", "inter_merge",
"gtf2fasta", "salmon_index", "salmon_quant", "merge_salmon", "sqanti",
"filter_variant", "link_original_range", "fusion_bind", "fusion_parse", "fusion_sqanti", "fusion_summary", "report_number")
que_req <-
map(process_names, ~ config::get(config = ..1, file = src$config.yml)) %>%
setNames(process_names)
####input####
output_dir <- args$output %>% str_remove("/$") %>% fs::path_expand() %>% fs::path_abs()
ref_gtf <- args$gtf %>% fs::path_expand() %>% fs::path_abs()
genome_fa <- args$genome %>% fs::path_expand() %>% fs::path_abs()
result_dir <- path(output_dir, "result")
merge_dir <- path(output_dir, "merge")
script_dir <- path(output_dir, "script")
####input files####
input_files <- read_csv(
args$input,
col_types = cols(.default = col_character())
)
possible_cols <- c("sample", "long_read_hq", "long_read_lq", "short_read_1", "short_read_2", "cluster_report", "SJ", "mutation", "sv")
selected_cols <- intersect(colnames(input_files), possible_cols)
allna_cols <- setdiff(possible_cols, selected_cols)
input_files <- input_files[selected_cols]
for (col in allna_cols) {
input_files[[col]] <- rep(NA_character_, nrow(input_files))
}
samples <- input_files[["sample"]]
# check
mandatory_cols <- c("sample", "long_read_hq")
if (args$no_lordec == FALSE) mandatory_cols <- c(mandatory_cols, "short_read_1", "short_read_2")
if (args$use_lq == TRUE) mandatory_cols <- c(mandatory_cols, "long_read_lq")
missing_cols <- setdiff(mandatory_cols, colnames(input_files))
if (length(missing_cols) > 0L) {
abort(paste0("Some mandatory columns are missing in the input csv file: ", str_c(missing_cols, collapse = ", ")), "requirement error")
}
dup_sample_id <- samples[duplicated(samples)]
if (length(dup_sample_id) > 0L) {
abort(paste0("All sample identifiers in the input csv file must be unique: ", str_c(dup_sample_id, collapse = ", ")), "requirement error")
}
walk2(c("long_read_hq", "short_read_1", "short_read_2", "cluster_report", "SJ"), c(FALSE, FALSE, FALSE, TRUE, TRUE), check_no_na)
if (args$use_lq) check_no_na("long_read_lq")
|
bb73ee8aaebea59d468ceaad0daf61061feb6c0d
|
244d83e95927048b93885d2b2b56f298e3a1b645
|
/qPCR_data_workup.R
|
dbda971b3833d1a940e5976a6895a63e245a1195
|
[] |
no_license
|
juliambrosman/Summer2014TSAnalysisTime
|
6c3f77eb14d4c7caec7c61f07a7b23d2ffba1f45
|
962064e9ebe4f9df07b6e7dd4d659b79a15b3884
|
refs/heads/master
| 2021-01-19T07:24:42.373698
| 2014-07-16T10:34:52
| 2014-07-16T10:34:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,813
|
r
|
qPCR_data_workup.R
|
##import qPCR data
qpcr.data=read.table("TimeSeriesqPCR_data.csv", sep=",",header=TRUE)
##read data data as a date:
qpcr.data$date <- as.Date(qpcr.data$date, "%m/%d/%y")
#calculate copies per sample and adjust sd
qpcr.data$rlcp1.smpl<-(qpcr.data$rlcp1.rxn.copies*qpcr.data$elute.vol)/qpcr.data$filt.vol
qpcr.data$rlcp1.smpl.sd<-(qpcr.data$rlcp1.rxn.stdev*qpcr.data$elute.vol)/qpcr.data$filt.vol
qpcr.data$rlcp2a.smpl<-(qpcr.data$rlcp2a.rxn.copies*qpcr.data$elute.vol)/qpcr.data$filt.vol
qpcr.data$rlcp2a.smpl.sd<-(qpcr.data$rlcp2a.rxn.stdev*qpcr.data$elute.vol)/qpcr.data$filt.vol
qpcr.data$rlcp4.smpl<-(qpcr.data$rlcp4.rxn.copies*qpcr.data$elute.vol)/qpcr.data$filt.vol
qpcr.data$rlcp4.smpl.sd<-(qpcr.data$rlcp4.rxn.stdev*qpcr.data$elute.vol)/qpcr.data$filt.vol
##Load dplyr to start grouping data
library("dplyr", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
#group data by sampling location and time
quant.groups<-group_by(qpcr.data,date,lake)
#Calculating mean abundance per timepoint and location and pooled variance according to a calculation I found on the internet here:
#http://stackoverflow.com/questions/16974389/how-to-calculate-a-pooled-standard-deviation-in-r
#Example pooled standard deviation from which I'm working:
#df$df <- df$n-1
#pooledSD <- sqrt( sum(df$sd^2 * df$df) / sum(df$df) )
per.locale<-summarise(quant.groups,count=n(), rlcp1=mean(rlcp1.smpl),
rlcp1.sd=sqrt(sum((rlcp1.smpl.sd^2)*(3-1))/((count*3)-1)),
rlcp2a=mean(rlcp2a.smpl),
rlcp2a.sd=sqrt(sum((rlcp2a.smpl.sd^2)*(3-1))/((count*3)-1)),
rlcp4=mean(rlcp4.smpl),
rlcp4.sd=sqrt(sum((rlcp4.smpl.sd^2)*(3-1))/((count*3)-1)))
View(per.locale)
#per.local now holds all the data
|
d579d2c8ae77226443471ec0a85ddd0ecb259521
|
b9b42fdda0caa249802bf3e82755723981e02064
|
/cachematrix.R
|
e6583c6c0d219924a6ddbfb5905530a0e2c6187a
|
[] |
no_license
|
ganaidu/ProgrammingAssignment2
|
303c03e38f2d8b3a54bc0b7abcc927ab14df6257
|
d98dde92d8810b7150e3887f48aa59869e935487
|
refs/heads/master
| 2021-01-11T17:05:43.066481
| 2017-01-29T19:16:41
| 2017-01-29T19:16:41
| 79,716,649
| 0
| 0
| null | 2017-01-22T12:33:06
| 2017-01-22T12:33:05
| null |
UTF-8
|
R
| false
| false
| 1,561
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix( numeric() )) {
# creates a new matrix for the inverse.
m <- matrix(numeric() ,ncol=ncol(x), nrow=nrow(x))
# creates a set function for setting a new matrix and its inverse
set <- function(y) {
x <<- y
m <<- matrix(numeric() ,ncol = ncol(x), nrow = nrow(x))
}
# Reads the matrix
get <- function() x
# function for setting the inverse
setinv <- function(solve) m <<- solve
# function for getting the inverse
getinv <- function() m
# list returns functions
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# getting the inverse matrix from the getinverse function
m <- x$getinv()
#Checking if the inverse matrix is set or not. Returns inverse if available in the cache.
if(!all(is.na(m))) {
message("getting cached data")
return(m)
}
# Reading the matrix if its inverse is not available.
data <- x$get()
# solving the inverse.
m <- solve(data)
# setting the inverse in the setinv function.
x$setinv(m)
# Returns inverse
m
}
|
d4831212738fe6acc28f7627adeebf3b98adbb04
|
3caf493c913baf4aa64ec49430ada88fde9bbabd
|
/misc/utils/PMS/prepare-LUT.R
|
0d789d0162e39ce7b35dc6e80e652d1e0d7bbcc6
|
[] |
no_license
|
Memo1986/aqp
|
7af94fe03909d926f689fefbbb3a17d49593615e
|
5d7a98823a0c03c53214efca3bf545dc9a297721
|
refs/heads/master
| 2023-07-17T17:06:37.286703
| 2021-08-27T15:21:32
| 2021-08-27T15:21:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,101
|
r
|
prepare-LUT.R
|
library(aqp)
## TODO: the following creates the Pantone -> Munsell LUT
## this isn't the same as the Munsell -> Pantone LUT (entries currently missing)
# https://github.com/ncss-tech/aqp/issues/124
# sourced from:
# https://raw.githubusercontent.com/ajesma/Pantoner/gh-pages/csv/pantone-coated.csv
# https://github.com/ajesma/Pantoner/raw/gh-pages/csv/pantone-uncoated.csv
x.coated <- read.csv('pantone-coated.csv', stringsAsFactors = FALSE)
x.uncoated <- read.csv('pantone-uncoated.csv', stringsAsFactors = FALSE)
# OK
head(x.coated)
head(x.uncoated)
# stack, names can be used
x <- rbind(x.coated, x.uncoated)
str(x)
head(x)
# convert
m <- rgb2munsell(t(col2rgb(x$hex)) / 255)
# merge to make the combined lookup table
z <- cbind(x, m)
str(z)
# re-name
names(z) <- c('code', 'hex', 'hue', 'value', 'chroma', 'dE00')
# condense Munsell notation
z$munsell <- sprintf("%s %s/%s", z$hue, z$value, z$chroma)
# subset
z <- z[, c('code', 'hex', 'munsell', 'dE00')]
str(z)
# re-name and save
pms.munsell.lut <- z
# save
save(pms.munsell.lut, file = '../../../data/pms.munsell.lut.rda')
|
ae35764f3438d4c024c1e9ee78043df748f92a51
|
64988d8ad2b3b8ef76649e919e9175523a68c2c6
|
/man/fitted.spARCH.Rd
|
ef5bbe0d75fd9e39540bdf74bc5700fdd1557015
|
[] |
no_license
|
cran/spGARCH
|
ba3278b6bcdb7f163963265f7417c157bbea0504
|
e929dc5d7f9377a8950ddb259d2ed799cd03a082
|
refs/heads/master
| 2021-05-25T11:01:21.926100
| 2020-09-02T06:10:03
| 2020-09-02T06:10:03
| 127,176,180
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 748
|
rd
|
fitted.spARCH.Rd
|
\name{fitted.spARCH}
\alias{fitted.spARCH}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Extract model fitted values
}
\description{
The function extracts the fitted values of a spatial ARCH model.
}
\usage{
## S3 method for class 'spARCH'
\method{fitted}{spARCH}(object, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{\code{spARCH} object generated by \code{\link{qml.spARCH}} or \code{\link{qml.SARspARCH}}.}
\item{...}{
Other arguments.
}
}
\details{
Fitted values extracted from the \code{object}.
}
\author{
Philipp Otto \email{potto@europa-uni.de}
}
\seealso{
\code{\link[spGARCH]{residuals.spARCH}}, \code{\link[spGARCH]{logLik.spARCH}}.
}
|
26d0911a80dbff3df568a2d4cb1a990cf0b67851
|
f1c5692b05ed04064d8f7a8c0890eedc8f0981e8
|
/R/jvaLast.R
|
10e3bfb9cc6f872ce8f894dd66c6461df8cb5fea
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
JVAdams/jvamisc
|
7caba076e1f756febd675ac40f9ed9640facba7b
|
522f50a3c8ff09899bfa321b260daf8c5e860e22
|
refs/heads/master
| 2021-08-28T12:54:39.344004
| 2021-08-04T18:55:27
| 2021-08-04T18:55:27
| 19,327,794
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 660
|
r
|
jvaLast.R
|
#' Shutdown
#'
#' One function with all the commands I typically want run at the end of an
#' R session.
#' @param file
#' Character scalar, file in which to save the commands history relative to
#' current working directory, default ".Rhistory".
#' @param nlines
#' Integer scalar, number of lines that saved to commands history,
#' default 10,000.
#' @export
#' @seealso
#' \code{\link{savehistory}}, \code{\link{Sys.setenv}}.
jvaLast <- function(file=".Rhistory", nlines=10000) {
if (interactive()) {
Sys.setenv(R_HISTSIZE=nlines)
cat("\nSession ended at ...............................", date(), "\n")
try(savehistory(file))
}
}
|
de051399a5fde01a33ba1a7766785285d7b88029
|
452042d9a5cb876a90a9cb1f4c802d0f4b1453c7
|
/R/flea_rosters.R
|
cfcda2d4ebb160f40d961d8dc636b1614d86369a
|
[
"MIT"
] |
permissive
|
jpiburn/ffscrapr
|
dc420370f6940275aaa8cb040c5ec001a25268b8
|
4e7bda862500c47d1452c84a83adce7ee1987088
|
refs/heads/main
| 2023-06-02T00:09:09.670168
| 2021-06-12T15:52:23
| 2021-06-12T15:52:23
| 377,976,824
| 1
| 0
|
NOASSERTION
| 2021-06-17T22:42:26
| 2021-06-17T22:42:26
| null |
UTF-8
|
R
| false
| false
| 1,431
|
r
|
flea_rosters.R
|
#### ff_rosters (Fleaflicker) ####
#' Get a dataframe of roster data
#'
#' @param conn a conn object created by `ff_connect()`
#' @param ... arguments passed to other methods (currently none)
#'
#' @examples
#' \donttest{
#' joe_conn <- ff_connect(platform = "fleaflicker", league_id = 312861, season = 2020)
#' ff_rosters(joe_conn)
#' }
#' @describeIn ff_rosters Fleaflicker: Returns roster data (minus age as of right now)
#' @export
ff_rosters.flea_conn <- function(conn, ...) {
df_rosters <- fleaflicker_getendpoint("FetchLeagueRosters",
sport = "NFL",
external_id_type = "SPORTRADAR",
league_id = conn$league_id
) %>%
purrr::pluck("content", "rosters") %>%
tibble::tibble() %>%
tidyr::unnest_wider(1) %>%
tidyr::hoist("team", "franchise_id" = "id", "franchise_name" = "name") %>%
dplyr::select(-"team") %>%
tidyr::unnest_longer("players") %>%
tidyr::hoist("players", "proPlayer") %>%
tidyr::hoist("proPlayer",
"player_id" = "id",
"player_name" = "nameFull",
"pos" = "position",
"team" = "proTeamAbbreviation",
"externalIds"
) %>%
dplyr::mutate(sportradar_id = purrr::map_chr(.data$externalIds, purrr::pluck, 1, "id", .default = NA)) %>%
dplyr::select(dplyr::any_of(c(
"franchise_id",
"franchise_name",
"player_id",
"player_name",
"pos",
"team",
"sportradar_id"
)))
return(df_rosters)
}
|
c512bc4c363715ccbfcc410e83f38302a3f9e2be
|
d317f7e6a38bd252cfdf69e3846905c24e14f2fc
|
/man/make_qc_dt.Rd
|
455fb712ea6a23af4c72975d866aaf5784f289e5
|
[
"MIT"
] |
permissive
|
Yixf-Self/SampleQC
|
208bb170884c6c9dbc1596a277186abd5a43d548
|
82f4483eafdaac93c17710bf605e147ad4611f0c
|
refs/heads/master
| 2023-07-28T06:09:25.504772
| 2021-08-29T14:05:10
| 2021-08-29T14:05:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,427
|
rd
|
make_qc_dt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_qc_dt.R
\name{make_qc_dt}
\alias{make_qc_dt}
\title{Checks specified QC metrics and makes data.table for input to
\code{calc_pairwise_mmds}.}
\usage{
make_qc_dt(
qc_df,
sample_var = "sample_id",
qc_names = c("log_counts", "log_feats", "logit_mito"),
annot_vars = NULL
)
}
\arguments{
\item{qc_df}{data.frame object containing calculated QC metrics}
\item{sample_var}{which column of qc_df has sample labels? (e.g. sample, group,
batch, library)}
\item{qc_names}{list of qc_names that need to be extracted}
\item{annot_vars}{list of user-specified sample-level annotations}
}
\value{
qc_dt, a data.table containing the sample variable plus qc metrics
}
\description{
Takes a \code{data.frame} of raw QC metrics, and makes a nice neat
\code{data.table} output that can be used in \pkg{SampleQC}. For example,
users with a \pkg{SingleCellExperiment} object \code{sce} may first run
\code{scater::calculateQCMetrics}, then call \code{make_qc_dt(colData(sce))}.
We work with \code{data.frame}/\code{data.table} objects to have the most
flexible possible approach (and to save work on e.g. keeping up with changes
to dependencies like \pkg{SingleCellExperiment} and \pkg{Seurat}).
}
\details{
This code also calculates some sample-level statistics, e.g. median log
library size per sample, and adds columns with binned values for these.
}
|
6551bbd2d7a5119c49f8a9a5bceff2f03a96a3e9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lmom/examples/lmrdpoints.Rd.R
|
8782d167667a1bd0f3563888aea2b59684e25409
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 774
|
r
|
lmrdpoints.Rd.R
|
library(lmom)
### Name: lmrdpoints
### Title: Add points or lines to an L-moment ratio diagram
### Aliases: lmrdpoints lmrdlines
### Keywords: hplot
### ** Examples
# Plot L-moment ratio diagram of Wind from the airquality data set
data(airquality)
lmrd(samlmu(airquality$Wind), xlim=c(-0.2, 0.2))
# Sample L-moments of each month's data
( lmom.monthly <- with(airquality,
t(sapply(5:9, function(mo) samlmu(Wind[Month==mo])))) )
# Add the monthly values to the plot
lmrdpoints(lmom.monthly, pch=19, col="blue")
# Draw an L-moment ratio diagram and add a line for the
# Weibull distribution
lmrd(xaxs="i", yaxs="i", las=1)
weimom <- sapply( seq(0, 0.9, by=0.01),
function(tau3) lmrwei(pelwei(c(0,1,tau3)), nmom=4) )
lmrdlines(t(weimom), col='darkgreen', lwd=2)
|
6603d4e371c82092877b02c52617a702012a9572
|
f2ef99e52520a24fd08e0950937549df785481e4
|
/data-products/final/photo-voltaic-growth/ui.R
|
783c05397fb52b3977a019aa8427d60d5b62bfe9
|
[] |
no_license
|
acuariano/datasciencecoursera
|
6b3ac276f99ec2c2b129b56f1477216746364983
|
ec42a4e2943a5ed8ba82bc0398816d000672f831
|
refs/heads/master
| 2020-05-29T16:42:40.006407
| 2018-06-04T13:37:56
| 2018-06-04T13:37:56
| 40,692,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,828
|
r
|
ui.R
|
library(shiny)
library(shinyBS)
library(plotly)
shinyUI(fluidPage(
titlePanel("Solar photo-voltaic capacity growth"),
sidebarLayout(
sidebarPanel(
checkboxInput("use_scale_log", "Log Scale", value = TRUE),
strong("Exponential gowth bases"), span("(?)", id="basesHelp", style="color:blue;font-weight:bold;"), br(),
"China:", textOutput(inline = TRUE, outputId = "china_base"), br(),
"Germany:", textOutput(inline = TRUE, outputId = "germany_base"), br(),
"USA:", textOutput(inline = TRUE, outputId = "usa_base"), br(),
"Japan:", textOutput(inline = TRUE, outputId = "japan_base"), br(),
"World:", textOutput(inline = TRUE, outputId = "world_base"), br(),
br(),
"You can switch between log and linear scales. The linear scale gives you an idea of the stagering",
" growth we are experiencing. But for comparisons, the log scale is better.",
br(),br(),
"You can drag in the Plotly chart to select points for the bases calculation. I recommend you drag",
" in a horizontal line to select the segment of the graph.",
br(),br(),
"Try selecting the segments [2005, 2015] and [2010, 2015] to see the growth rate for the last 10",
" and 5 years respectively.",
br(),br(),
"Also try deselecting each country by clicking in their labels. Leave only 'World' and 'GTM Research',",
"so you can compare the best projection from ",
a("Wikipedia",href="https://en.wikipedia.org/wiki/Growth_of_photovoltaics"), " to the derived growth rate.",
br(),br(),
em("The gray area are projections (after 2015.)"),
br(),br(),
"You can access the presentation for this application ",
a("here", href="https://acuariano.github.io/datasciencecoursera/data-products/final/", target="_blank"),
".",
bsTooltip("basesHelp", "The base is calculated with the formula (final-initial)^(1/years). <br/> It uses the selection to calculate the base.",
"right", options = list(container = "body"))
),
mainPanel(
h3("PV Growth"),
plotlyOutput("plot1", height = 600)
# plotOutput("plot1", brush =brushOpts(id = "brush1"))
)
)
))
|
5ac08509be329c3b34d12849f38b5cfe8fade73f
|
308d520a92c181b7b37ba101de769b48507fad49
|
/run_analysis_imputeNA.R
|
40cf441c97c81ad5894509ad5550b1d077e14bd8
|
[] |
no_license
|
igilroy/RepData_PeerAssessment1
|
ac9f9a35d112ebab71d92aa915e7fef0d12bc8d8
|
f2661eefd43ee49a3cdbc19de3fca886464c3f6a
|
refs/heads/master
| 2020-12-30T23:20:56.127841
| 2016-01-11T06:58:17
| 2016-01-11T06:58:17
| 49,251,877
| 0
| 0
| null | 2016-01-08T05:33:20
| 2016-01-08T05:33:20
| null |
UTF-8
|
R
| false
| false
| 7,200
|
r
|
run_analysis_imputeNA.R
|
# Script: run_analysis_imputeNA.R
# The purpose of this script is to read a CSV data file of
# human activity as measured by the embedded gyroscope and accelerometer
# a personal activity monitoring device.
# This device collects data at 5 minute intervals through out the day.
# The data consists of two months of data from an anonymous individual collected
# during the months of October and November, 2012 and include the number of steps
# taken in 5 minute intervals each day.
#
# The variables included in this dataset are:
# - steps: Number of steps taking in a 5-min interval (missing values are coded as 𝙽𝙰)
# - date: The date on which the measurement was taken in YYYY-MM-DD format
# - interval: Identifier for the 5-min interval in which measurement was taken
# The dataset is stored in a comma-separated-value (CSV) file and there are a total of
# 17,568 observations in this dataset.
# This script:
# - reads the CSV file into a table
# - calculates and report the total number of missing values in the dataset
# (i.e. the total number of rows with 𝙽𝙰s)
# - creates a new dataset by filling in all of the missing values in the dataset.
# This is achieved by using the mean for that 5-minute interval.
# - produces a histogram of the total number of steps taken each day.
# - calculates and reports the mean and median total number of steps taken per day.
# - using the modified data set:
# - creates a new factor variable in the dataset with two
# levels: “weekday” and “weekend” indicating whether a given
# date is a weekday or weekend day.
# - produces a panel plot containing a time series plot of the
# 5-minute interval (x-axis) and the average number of steps taken,
# averaged across all weekday days or weekend days (y-axis).
## load packages and libraries
install.packages("dplyr")
library(dplyr)
install.packages(lattice)
library(lattice)
## Read in the Activity data
activity.data <- read.table("../data/activity.csv",sep=",",
colClasses=c("integer","Date","integer"),
header=TRUE)
## Calculate and report the total number of missing values in the dataset
## (i.e. the total number of rows with 𝙽𝙰)
# missing.steps<-is.na(activity.data[,1])
print(paste("Number of missing values is:",sum(is.na(activity.data[,1])),sep=" "))
## Create a new dataset by filling in all of the missing values in the dataset.
## This is achieved by using the mean for that 5-minute interval.
### Calculate the mean values for each interval
### Group data based on interval
interval.activity<-select(activity.data,interval,steps) ## subset data to just the columns we need
interval.activity <- group_by(interval.activity,interval) ## Group data by 5-minute time interval
### Calculate the mean for each interval
interval.activity <- summarise_each(interval.activity,funs(mean(steps, na.rm=TRUE))) ## sum the data for each date
### Copy original data table to new table
activity.data.imputed <- activity.data
### For each entry where "steps" value is missing, assign the mean vale for that interval
## (There is probably a more elegant way to do this but this will work to start with)
for ( iptr in 1:nrow(activity.data.imputed)) { ## for each row in the data file
if (is.na(activity.data.imputed[iptr,1])) { ## if "steps" data is missing ...
index<-interval.activity[,1]==activity.data.imputed[iptr,3] ## get the interval as index
activity.data.imputed[iptr,1]<-interval.activity[index,2] ## substitute "NA" with mean for that interval
}
}
## Calculate the total number of steps taken each day
## Group by day and sum each group
daily.activity.imputed<-select(activity.data.imputed,date,steps) ## subset data to just the columns we need
daily.activity.imputed <- group_by(daily.activity.imputed,date) ## Group data by date
daily.activity.imputed <- summarise_each(daily.activity.imputed,funs(sum(steps))) ## sum the data for each date
## Create a histogram plot with minimal formatting
par(mfrow = c(1, 1)) ## Specify that plots will be arranged in 1 rows of 1 plots
png(filename="figure/Plot3-Hist-Total-Imputed.png") ## Open the output file
with(daily.activity.imputed,hist(steps,col="red",main="Histogram of Total Steps",
xlab="Steps", breaks=100 ))
with(daily.activity.imputed,rug(steps))
## Calculate mean and median
mean.steps.imputed <- colMeans(daily.activity.imputed[,2],na.rm=TRUE)
median.steps.imputed <- apply(X = daily.activity.imputed[,2], MARGIN=2, FUN = median, na.rm = TRUE)
## Add to plot
abline(v=mean.steps.imputed, col="navy",lwd=5)
abline(v=median.steps.imputed,col="purple",lwd=5,lty=2)
legend("topright", pch = "-", lwd=3, col = c("navy", "purple"), bty="n",
legend = c("Mean", "Median"))
dev.off() ## Close the output file
## Print the mean and median total steps taken
print(paste("Mean total daily steps taken: ",(as.numeric(mean.steps.imputed)),sep=" "))
print(paste("Median total daily steps taken: ",(as.numeric(median.steps.imputed)),sep=" "))
## Creates a new factor variable in the dataset with two
## levels: “weekday” and “weekend” indicating whether a given
## date is a weekday or weekend day.
### add a column called "day" based on "date" field
activity.data.imputed<-mutate(activity.data.imputed, day=(weekdays(date)))
### For each entry determine if it occurs on a weekend or week day and label accordingly
## (There is probably a more elegant way to do this but this will work to start with)
for ( iptr in 1:nrow(activity.data.imputed)) { ## for each row in the data file
if (grepl("^S",activity.data.imputed[iptr,4])) { ## is it a Saturday or Sunday?
activity.data.imputed[iptr,4]<- "weekend" ## if so label as "weekend"
} else {
activity.data.imputed[iptr,4]<- "weekday" ## if not label as "weekday"
}
}
### convert "day" column to a factor
activity.data.imputed<-mutate(activity.data.imputed, as.factor(day))
## Create a panel plot containing a time series plot of the 5-minute interval
## (x-axis) and the average number of steps taken, averaged across all weekday days
## or weekend days (y-axis).
### group each by 5 minute interval and calculate average number of steps per interval
interval.activity.imputed <- select(activity.data.imputed,interval,steps,day) ## subset data to just the columns we need
interval.activity.imputed <- group_by(interval.activity.imputed,day,interval) ## Group data by 5-minute time interval
### Calculate the mean for each interval
interval.activity.imputed <- summarise_each(interval.activity.imputed,funs(mean(steps, na.rm=TRUE))) ## sum the data for each date
### Create panel plot
xyplot(steps~interval|day,data=interval.activity.imputed,
layout=c(1,2),type="l",
xlab="Time Interval",ylab="Mean Steps")
|
deae6ac668bdf0f71fdb7e892ec1330807774268
|
1efb64b70a2acb08355702bc27e96a794d7c7697
|
/man/check_save_path.Rd
|
f5c1b40a00265356bbbb3f45c7627c530a51b595
|
[] |
no_license
|
daklab/MungeSumstats
|
cb84af95608b7867343beda9ef122821e02da3e6
|
6f8946b9967f7c770f5ed7297934d3dcd1498a85
|
refs/heads/master
| 2023-08-07T18:02:51.407318
| 2021-09-17T08:50:55
| 2021-09-17T08:50:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,132
|
rd
|
check_save_path.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_save_path.R
\name{check_save_path}
\alias{check_save_path}
\title{Check if save path and log folder is appropriate}
\usage{
check_save_path(save_path, log_folder, log_folder_ind, write_vcf = FALSE)
}
\arguments{
\item{save_path}{File path to save formatted data. Defaults to
\code{tempfile(fileext=".tsv.gz")}.}
\item{log_folder}{Filepath to the directory for the log files and the log of
MungeSumstats messages to be stored. Default is a temporary directory.}
\item{log_folder_ind}{Binary Should log files be stored containing all
filtered out SNPs (separate file per filter). The data is outputted in the
same format specified for the resulting sumstats file. The only exception to
this rule is if output is vcf, then log file saved as .tsv.gz. Default is
FALSE.}
\item{write_vcf}{Whether to write as VCF (TRUE) or tabular file (FALSE).}
}
\value{
Corrected \code{save_path}, the file type, the separator, corrected
\code{log_folder},the log file extension
}
\description{
Check if save path and log folder is appropriate
}
\keyword{internal}
|
8a253b0f6cf9f6dcca003ae5f9c1160736c2310b
|
f6dcb066042632979fc5ccdd6aa7d796d3191003
|
/Problem Sets/Student Submissions/Problem Set 2/2416912/q1_template.R
|
d0fe3afc772812e757c688c6fbbcc46be68b3fc3
|
[] |
no_license
|
NikoStein/ADS19
|
45301bcd68d851053399621dd8a0be784e1cc899
|
90f2439c6de8569f8a69983e0a605fd94e2e9f0a
|
refs/heads/master
| 2020-05-19T19:10:12.411585
| 2020-03-12T00:02:14
| 2020-03-12T00:02:14
| 185,165,255
| 0
| 4
| null | 2019-08-06T06:16:30
| 2019-05-06T09:26:01
|
HTML
|
UTF-8
|
R
| false
| false
| 4,139
|
r
|
q1_template.R
|
# Problem Set 2
# Question 1
#1a
library(tidyverse)
library(rvest)
library(xml2)
library(purrr)
library(repurrrsive)
library(stringr)
getTagsCleaned <- function(x) {
html_nodes(x, "a") %>%
html_text()
}
## Aufgabe 1
getTenQuotes <- function() {
url <- read_html('http://quotes.toscrape.com/')
quotes <-
html_nodes(url, xpath = '//*[contains(concat( " ", @class, " " ), concat( " ", "text", " " ))]') %>%
html_text()
authors <-
html_nodes(url, xpath = '//*[contains(concat( " ", @class, " " ), concat( " ", "author", " " ))]') %>%
html_text()
tags <-
html_nodes(url, xpath = '//*[contains(concat( " ", @class, " " ), concat( " ", "tags", " " ))]')
cleanTags <- map(tags, getTagsCleaned)
together <- data.frame(authors, cbind(cleanTags), quotes)
x <- c("authors", "tags", "quotes")
colnames(together) <- x
together
}
## Aufgabe 2
getOneHundredQuotes <- function() {
x <- c("authors", "quotes", "tags")
map_df(1:10, function(i) {
cat(".")
pg <-
read_html(sprintf('http://quotes.toscrape.com/page/%d/', i))
quotes <-
html_nodes(pg, xpath = '//*[contains(concat( " ", @class, " " ), concat( " ", "text", " " ))]') %>%
html_text()
authors <-
html_nodes(pg, xpath = '//*[contains(concat( " ", @class, " " ), concat( " ", "author", " " ))]') %>%
html_text()
tags <-
html_nodes(pg, xpath = '//*[contains(concat( " ", @class, " " ), concat( " ", "tags", " " ))]')
cleanTags <- map(tags, getTagsCleaned)
rbind(
data.frame(
'authors' = authors,
'quotes' = quotes,
'tags' = cbind(cleanTags),
stringsAsFactors = F
)
)
}) -> allQuotes
colnames(allQuotes) <- x
allQuotes
}
# Aufgabe 3
getAllAuthorsUrl <- function() {
authorsWithUrl <- map_df(1:10, function(i) {
cat(".")
pg <-
read_html(sprintf('http://quotes.toscrape.com/page/%d/', i))
authors <-
html_nodes(pg, xpath = '//*[contains(concat( " ", @class, " " ), concat( " ", "author", " " ))]') %>%
html_text()
authorsUrl <-
html_nodes(pg, xpath = '//*[contains(concat( " ", @class, " " ), concat( " ", "quote", " " ))]') %>%
html_nodes('span a') %>%
html_attr('href')
rbind(data.frame(
'authors' = authors,
'authorsUrl' = authorsUrl,
stringsAsFactors = F
))
})
authorsWithUrl
}
# Aufgabe 4
authorsWithUrl <- getAllAuthorsUrl()
getInfoOnAuthor <- function(url) {
base <- read_html(sprintf('http://quotes.toscrape.com%s', url))
authorDetail <-
data.frame(
authors = html_node(base, 'h3') %>% html_text(),
bornDate = html_node(base, xpath = '//*[contains(concat( " ", @class, " " ), concat( " ", "author-born-date", " " ))]') %>% html_text(),
description = html_node(base, 'div.author-description') %>% html_text(),
stringsAsFactors = F
)
authorDetail$authors = gsub(pattern = "[\n] *", "", authorDetail$authors)
authorDetail
}
# Aufgabe 5
authorsNotUnique = map_df(authorsWithUrl$authorsUrl, getInfoOnAuthor)
authorWithDetails = unique(authorsNotUnique)
authorWithDetails$day = str_sub(authorWithDetails$bornDate,-8,-7)
authorWithDetails$month = regmatches(authorWithDetails$bornDate,regexpr("[a-zA-Z]*",authorWithDetails$bornDate))
authorWithDetails$year = str_sub(authorWithDetails$bornDate,-4,-1)
between <- table(authorWithDetails$year)
between <- sum(between[names(between) >= 1800 & names(between) < 1900])
# Aufgabe 6
allQuotes <- getOneHundredQuotes()
allQuotes %>%
group_by(authors) %>%
count(authors, sort = T, name = "brian") -> maxQuote
maxQuote[which.max(maxQuote$brian),]
# Aufgabe 7
allQuotes %>%
group_by(authors) %>%
summarise(number = as.numeric(n())) %>%
summarise("Mean of Quotes" = mean(number))
# Aufgabe 8
allQuotes[grepl('life', allQuotes$tags),]
joined_data <- left_join(allQuotes, authorWithDetails)
joined_data
#max(countedQuotes$n)
## ausführen
#getTenQuotes()
#brian=getOneHundredQuotes()
|
6487815421b07eca1e94802930d269f7b84c7f38
|
4e2c87e93ffca05d39d9ce15a612c12911492364
|
/man/funnel_measure.Rd
|
7b833075a50cb0149303f8129ae5ea39a7358d31
|
[] |
no_license
|
ModelOriented/DALEXtra
|
bc731240c42b33c22a6f6442894c26bb751413a2
|
a8baf5791b8d9565ca857c670c3678c568c8b3d0
|
refs/heads/master
| 2023-06-09T23:38:36.365886
| 2023-05-25T22:53:20
| 2023-05-25T22:53:20
| 196,374,651
| 63
| 15
| null | 2023-03-17T20:43:39
| 2019-07-11T10:38:29
|
R
|
UTF-8
|
R
| false
| true
| 4,374
|
rd
|
funnel_measure.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funnel_measure.R
\name{funnel_measure}
\alias{funnel_measure}
\title{Caluculate difference in performance in models across different categories}
\usage{
funnel_measure(
champion,
challengers,
measure_function = NULL,
nbins = 5,
partition_data = champion$data,
cutoff = 0.01,
cutoff_name = "Other",
factor_conversion_threshold = 7,
show_info = TRUE,
categories = NULL
)
}
\arguments{
\item{champion}{- explainer of champion model.}
\item{challengers}{- explainer of challenger model or list of explainers.}
\item{measure_function}{- measure function that calculates performance of model based on true observation and prediction.
Order of parameters is important and should be (y, y_hat). The measure calculated by the function
should have the property that lower score value indicates better model. If NULL, RMSE will be used for regression,
one minus auc for classification and crossentropy for multiclass classification.}
\item{nbins}{- Number of quantiles (partition points) for numeric columns. In case when more than one quantile have the same value, there will be less partition points.}
\item{partition_data}{- Data by which test dataset will be partitioned for computation. Can be either data.frame or character vector.
When second is passed, it has to indicate names of columns that will be extracted from test data.
By default full test data. If data.frame, number of rows has to be equal to number of rows in test data.}
\item{cutoff}{- Threshold for categorical data. Entries less frequent than specified value will be merged into one category.}
\item{cutoff_name}{- Name for new category that arised after merging entries less frequent than \code{cutoff}}
\item{factor_conversion_threshold}{- Numeric columns with lower number of unique values than value of this parameter will be treated as factors}
\item{show_info}{- Logical value indicating if progress bar should be shown.}
\item{categories}{- a named list of variable names that will be plotted in a different colour. By default it is partitioned on Explanatory, External and Target.}
}
\value{
An object of the class \code{funnel_measure}
It is a named list containing following fields:
\itemize{
\item \code{data} data.frame that consists of columns:
\itemize{
\item \code{Variable} Variable according to which partitions were made
\item \code{Measure} Difference in measures. Positive value indicates that champion was better, while negative that challenger.
\item \code{Label} String that defines subset of \code{Variable} values (partition rule).
\item \code{Challenger} Label of challenger explainer that was used in \code{Measure}
\item \code{Category} a category of the variable passed to function
}
\item \code{models_info} data.frame containing information about models used in analysis
}
}
\description{
Function \code{funnel_measure} allows users to compare two models based on their explainers. It partitions dataset on which models were built
and creates categories according to quantiles of columns in \code{parition data}. \code{nbins} parameter determines number of quantiles.
For each category difference in provided measure is being calculated. Positive value of that difference means that Champion model
has better performance in specified category, while negative value means that one of the Challengers was better. Function allows
to compare multiple Challengers at once.
}
\examples{
\donttest{
library("mlr")
library("DALEXtra")
task <- mlr::makeRegrTask(
id = "R",
data = apartments,
target = "m2.price"
)
learner_lm <- mlr::makeLearner(
"regr.lm"
)
model_lm <- mlr::train(learner_lm, task)
explainer_lm <- explain_mlr(model_lm, apartmentsTest, apartmentsTest$m2.price, label = "LM")
learner_rf <- mlr::makeLearner(
"regr.ranger"
)
model_rf <- mlr::train(learner_rf, task)
explainer_rf <- explain_mlr(model_rf, apartmentsTest, apartmentsTest$m2.price, label = "RF")
learner_gbm <- mlr::makeLearner(
"regr.gbm"
)
model_gbm <- mlr::train(learner_gbm, task)
explainer_gbm <- explain_mlr(model_gbm, apartmentsTest, apartmentsTest$m2.price, label = "GBM")
plot_data <- funnel_measure(explainer_lm, list(explainer_rf, explainer_gbm),
nbins = 5, measure_function = DALEX::loss_root_mean_square)
plot(plot_data)
}
}
|
3da6242950507c9ca14e1100cf58f496e6e6cfc6
|
cb649b9f890d39ed47431b71ba8a89ed27037b5c
|
/R/02-open-saved-DI-flat-files.R
|
d651d8bfdd039e559b54bed5d9906c42f71c272a
|
[] |
no_license
|
magerton/drillinginfo-data-import
|
bf0a00f3b9ab092eb93edd97a2fdaa45e4919cba
|
ca4d4554e755095a117555d6007e3bcf90fdecd5
|
refs/heads/master
| 2021-03-24T11:55:26.562681
| 2017-05-25T14:19:32
| 2017-05-25T14:19:35
| 89,253,578
| 4
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,674
|
r
|
02-open-saved-DI-flat-files.R
|
# Code by Mark Agerton (2017-04)
#
# This file is simply a set of commands to open each table saved in the prior script
rm(list=ls())
library(data.table)
DATE_DI_FILES <- "-2017-03-31"
# import column info
load("intermediate_data/column_info.Rdata")
column_info[,.N, keyby=table]
column_info[is_factor == T, .(table, field)]
# what import problems were there in all but PDEN_PROD table?
# Why did Windows find embedded nulls but Linux not?
load(paste0("intermediate_data/DI_import_problems", DATE_DI_FILES, ".Rdata"))
problems
load(paste0("intermediate_data/nph_api_nos" , DATE_DI_FILES, ".Rdata"))
nph_api_nos
sapply(nph_api_nos, class)
load(paste0("intermediate_data/nph_botmholes" , DATE_DI_FILES, ".Rdata"))
nph_botmholes
sapply(nph_botmholes, class)
load(paste0("intermediate_data/nph_oper_addr" , DATE_DI_FILES, ".Rdata"))
nph_oper_addr
sapply(nph_oper_addr, class)
load(paste0("intermediate_data/nph_pden_tops" , DATE_DI_FILES, ".Rdata"))
nph_pden_tops
sapply(nph_pden_tops, class)
load(paste0("intermediate_data/nph_wellspots" , DATE_DI_FILES, ".Rdata"))
nph_wellspots
sapply(nph_wellspots, class)
load(paste0("intermediate_data/pden_desc" , DATE_DI_FILES, ".Rdata"))
pden_desc
sapply(pden_desc, class)
load(paste0("intermediate_data/pden_inj" , DATE_DI_FILES, ".Rdata"))
pden_inj
sapply(pden_inj, class)
load(paste0("intermediate_data/pden_sale" , DATE_DI_FILES, ".Rdata"))
pden_sale
sapply(pden_sale, class)
load(paste0("intermediate_data/pden_well_test", DATE_DI_FILES, ".Rdata"))
pden_well_test
sapply(pden_well_test, class)
load(paste0("intermediate_data/permits" , DATE_DI_FILES, ".Rdata"))
permits
sapply(permits, class)
|
ff9a64f2dc5ca72394d815c75afc1cb4deec441d
|
f31050afb9299aeb68fe4f3bc3d4999bc3c01d3e
|
/pkg/tests/databases.R
|
35e84cc04858b93488d0e1478951fa224b796542
|
[] |
no_license
|
piccolbo/dplyr.spark.hive
|
4e574923b43ec7ff7ecf9dce13cea69eb792c5b3
|
002bc997feb26b722dc0fc448edbfebc27541d40
|
refs/heads/master
| 2020-12-25T03:41:08.160622
| 2016-01-22T18:15:57
| 2016-01-22T18:15:57
| 40,582,624
| 11
| 4
| null | 2016-01-08T22:35:35
| 2015-08-12T05:50:23
|
R
|
UTF-8
|
R
| false
| false
| 4,220
|
r
|
databases.R
|
# Copyright 2015 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# derivative of dplyr introductory material, http://github.com/hadley/dplyr
# presumably under MIT license
library(dplyr)
library(dplyr.spark.hive)
copy_to_from_local = dplyr.spark.hive:::copy_to_from_local
my_db = src_SparkSQL()
library(nycflights13)
flights = {
if(db_has_table(my_db$con, "flights"))
tbl(my_db, "flights")
else{
copy_to_from_local(my_db, flights, "flights")}}
flights
cache(flights)
## ------------------------------------------------------------------------
select(flights, year:day, dep_delay, arr_delay)
filter(flights, dep_delay > 240)
arrange(flights, year, month, day)
mutate(flights, speed = air_time / distance)
summarize(flights, delay = mean(dep_delay))
c1 = filter(flights, year == 2013, month == 1, day == 1)
c2 = select(c1, year, month, day, carrier, dep_delay, air_time, distance)
c3 = mutate(c2, speed = distance / air_time * 60)
c4 = arrange(c3, year, month, day, carrier)
c4
c4_mem = collect(c4)
class(c4_mem)
c4$query$sql
explain(c4)
flights %>%
filter(year == 2013, month == 1, day == 1) %>%
select(year, month, day, carrier, dep_delay, air_time, distance) %>%
mutate(speed = distance / air_time * 60) %>%
arrange(year, month, day, carrier)
daily = group_by(flights, year, month, day)
summarize(daily, arr_delay = mean(arr_delay))
bestworst =
daily %>%
select(flight, arr_delay) %>%
filter(arr_delay == min(arr_delay) || arr_delay == max(arr_delay))
bestworst
bestworst$query$sql
ranked =
daily %>%
select(arr_delay) %>%
mutate(rank = rank(desc(arr_delay)))
ranked
library(ggplot2)
#from the tutorial for data frames
dim(flights)
head(flights)
filter(flights, month == 1, day == 1)
arrange(flights, year, month, day)
arrange(flights, desc(arr_delay))
select(flights, year, month, day)
select(flights, year:day)
select(flights, -(year:day))
select(flights, tail_num = tailnum)
rename(flights, tail_num = tailnum)
distinct(select(flights, tailnum))
distinct(select(flights, origin, dest))
mutate(
flights,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60) %>%
select(flight, gain, speed)
mutate(
flights,
gain = arr_delay - dep_delay,
gain_per_hour = gain / (air_time / 60))%>%
select(flight, gain, gain_per_hour)
transmute(
flights,
gain = arr_delay - dep_delay,
gain_per_hour = gain / (air_time / 60))
#not in dplyr for sql
# slice(flights, 1:10)
# sample_n(flights, 10)
# sample_frac(flights, 0.01)
by_tailnum = group_by(flights, tailnum)
delay =
summarize(
by_tailnum,
count = n(),
dist = mean(distance),
delay = mean(arr_delay))
delay = filter(delay, count > 20, dist < 2000)
delay_local = collect(delay)
delay_local
ggplot(
collect(delay),
aes(dist, delay)) +
geom_point(aes(size = count), alpha = 1/2) +
geom_smooth() +
scale_size_area()
destinations = group_by(flights, dest)
summarize(
destinations,
planes = n_distinct(tailnum),
flights = n())
(per_day = summarize(daily, flights = n()))
(per_month = summarize(per_day, flights = sum(flights)))
(per_year = summarize(per_month, flights = sum(flights)))
a1 = group_by(flights, year, month, day)
a2 = select(a1, arr_delay, dep_delay)
a3 =
summarize(
a2,
arr = mean(arr_delay),
dep = mean(dep_delay))
a4 = filter(a3, arr > 30 | dep > 30)
a4
filter(
summarize(
select(
daily,
arr_delay, dep_delay),
arr = mean(arr_delay),
dep = mean(dep_delay)),
arr > 30 | dep > 30)
flights %>%
group_by(year, month, day) %>%
select(arr_delay, dep_delay) %>%
summarize(
arr = mean(arr_delay),
dep = mean(dep_delay)) %>%
filter(arr > 30 | dep > 30)
|
62483f80f1b99b475d9eb6aec4899787b6d258f4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spacesXYZ/examples/uvfromXYZ.Rd.R
|
6c86afb6703526151445dc173f16e55ad070ce16
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 426
|
r
|
uvfromXYZ.Rd.R
|
library(spacesXYZ)
### Name: uvfrom
### Title: Convert from XYZ or xy to Uniform Chromaticity Spaces
### Aliases: uvfromXYZ uvfromxy
### Keywords: XYZ
### ** Examples
# locate some standard illuminants on the 1976 UCS diagram
uvfromXYZ( standardXYZ( c('C','D50','D65','E') ) )
## u' v'
## C 0.2008921 0.4608838
## D50 0.2091601 0.4880734
## D65 0.1978398 0.4683363
## E 0.2105263 0.4736842
|
34d41ff7de31ab9808c7d51952d8ebc4fd4afa1d
|
69305b91a02d22b9a61eb344fc11cd22b9273c25
|
/plot2.R
|
0a8c20ae048f03fca9b5d3378bdd56bb27717194
|
[] |
no_license
|
lauraviluma/ExData_Plotting1
|
45f0f95b26e3d52c9be604a67c088b92a5c482a6
|
22d92aa61b684380540564d4bd00551e64a9f2cb
|
refs/heads/master
| 2022-11-25T03:24:31.381080
| 2020-06-23T15:05:00
| 2020-06-23T15:05:00
| 274,139,314
| 0
| 0
| null | 2020-06-22T13:06:55
| 2020-06-22T13:06:54
| null |
UTF-8
|
R
| false
| false
| 604
|
r
|
plot2.R
|
setwd("~/r_scripts/Exploratory analysis/ExData_Plotting1")
power_data<-read.csv("household_power_consumption.txt", sep=";" )
library(dplyr)
small_data<- filter(power_data, Date=="1/2/2007" | Date=="2/2/2007")
small_data1<-mutate(small_data, DT=paste(Date, Time))
small_data1<-mutate(small_data1, DT=as.POSIXct(DT, tz="", format= "%d/%m/%Y %H:%M:%S"))
##Create plot2
png(file = "plot2.png")
## Create plot and send to a file (no plot appears on screen)
with(small_data1, plot(DT, Global_active_power, ylab = "Global Active Power (Kilowatts)", xlab="" , type="l"))
## Close the PNG file device
dev.off()
|
642257c921bb2fad8c382052da4c02a4f7e7e3a1
|
ff88daac885a87325c699aeef429efffbb61c08a
|
/man/mtkReadFactors-methods.Rd
|
56e9316a293c2ed6616366a19dac4db935e7b37d
|
[] |
no_license
|
santoshpanda15/mtk
|
200fe859fefce13b1c317f0b7e26ee86b32c0494
|
cd69f4411837d656d80b462f9b5d431724a81041
|
refs/heads/master
| 2021-01-15T10:07:06.218323
| 2014-07-15T00:00:00
| 2014-07-15T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 497
|
rd
|
mtkReadFactors-methods.Rd
|
\name{mtkReadFactors-methods}
\alias{mtkReadFactors-methods}
\alias{mtkReadFactors}
\title{The \code{mtkReadFactor} method}
\description{a list of factors}
\usage{mtkReadFactors(file, path)}
\value{an object of the class \code{mtkDomain}}
\arguments{
\item{file}{the name of the file to read.}
\item{path}{the path to the file to read.}
}
\author{Hervé Richard, BioSP, INRA, Domaine Saint paul, 84914 Avignon Cedex 9}
\examples{
# see examples for the \code{\linkS4class{mtkExpFactors}} class.
}
|
bc95b4694ff37af474b6c5f5428cd154464ddc4f
|
1a17e00cca65b12329b68b33d4ad8e86d89b7a82
|
/components/text_abattements.R
|
86c39a474d65b97bf4379c06f019b0b7ef7eec2c
|
[
"CC-BY-4.0",
"CC-BY-3.0",
"etalab-2.0"
] |
permissive
|
marion-paclot/TH
|
247aa3f8a1b6ee7ea7fbf279a7814d69dca049ca
|
8d590da5b1b9e7a327cf12796009008a9172870c
|
refs/heads/master
| 2021-06-27T17:24:15.710463
| 2019-05-27T17:15:59
| 2019-05-27T17:15:59
| 142,594,902
| 20
| 3
| null | 2018-08-09T12:28:34
| 2018-07-27T15:33:37
|
R
|
UTF-8
|
R
| false
| false
| 1,636
|
r
|
text_abattements.R
|
# Présentation des abattements
text_abattements = "<h4>En fonction de votre situation personnelle, vous pouvez bénéficier d'abattements
appliqués à la valeur locative brute de votre bien.</h4>
<span><br>La valeur locative nette de votre bien est sa valeur après abattements et elle ne peut être négative.
Elle sert d'assiette au calcul des cotisations.</span>
<span><br>Il existe 5 types d'abattements, dont le montant est soit un montant forfaitaire (F%)
soit un pourcentage de la valeur locative moyenne de la collectivité :</span>
<ul>
<li>Général à la base : applicable à toutes les habitations principales,
sans condition de ressource.</li>
<li>Pour personne à charge de rang 1 et 2 : par personne à charge pour chacune des
deux premières personnes à charge.</li>
<li>Pour personne à charge à partir de 3 : par personne à charge à partir de la troisième.</li>
<li>Spécial à la base en faveur des personnes de condition modeste :
critère de revenu (le rfr ne doit pas dépasser un seuil dépendant
du département et du nombre de parts fiscales)
et de valeur locative brute, qui ne doit pas dépasser 130% (+10% par personne à charge) de la valeur locative moyenne.</li>
<li>Spécial à la base en faveur des personnes handicapées ou invalides :
applicable dès lors qu'une personne handicapée ou invalide réside dans l'habitation à titre principal.</li>
</ul>"
|
dd1827aacaf4b5efe1531f6e5b3c8899608f8d7f
|
e5b43b221e0d63f896a0b8bf0be9f1e3bcdb9076
|
/corpus_processing.R
|
bfea6aabea754c4a27e7a3fe7075d796e8fb5794
|
[] |
no_license
|
sztomczyk86/Data_science_capstone
|
14faa4b35ce46e44b24920068a7db0e4b032914c
|
97f61b4164e14670c6720a269f8d518d8777d9a6
|
refs/heads/master
| 2023-06-21T11:45:36.581962
| 2021-07-20T14:06:29
| 2021-07-20T14:06:29
| 307,304,066
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,813
|
r
|
corpus_processing.R
|
#' pre process the SwiftKey text into a corpus
library(tidyverse)
library(cld2) # detect language
library(quanteda)
library(quanteda.textplots)
library(quanteda.textstats)
set.seed(666)
blog <- file("Coursera-SwiftKey/final/en_US/en_US.blogs.txt")
news <- file("Coursera-SwiftKey/final/en_US/en_US.news.txt")
twitter <- file("Coursera-SwiftKey/final/en_US/en_US.twitter.txt")
profanity <- read_lines("bad-words.txt")
#' sample only 0.5% of the blog text file
blog.txt <- readLines(blog)
sample.blog <- sample(1:length(blog.txt),round(length(blog.txt)*0.7))
blog.txt.sub <- blog.txt[sample.blog]
rm(blog.txt)
#' sample only 0.5% of the news text file
news.txt <- readLines(news)
sample.news <- sample(1:length(news.txt),round(length(news.txt)*0.7))
news.txt.sub <- news.txt[sample.news]
rm(news.txt)
#' sample only 0.5% of the twitter text file
twitter.txt <- readLines(twitter)
sample.twitter <- sample(1:length(twitter.txt),round(length(twitter.txt)*0.7))
twitter.txt.sub <- twitter.txt[sample.twitter]
rm(twitter.txt)
#' detect the language of each line in the text file and remove the non-english
#' lines
lng1 <- detect_language(blog.txt.sub)
lng2 <- detect_language(news.txt.sub)
lng3 <- detect_language(twitter.txt.sub)
#' keep only lines marked as english
blog.txt.sub <- blog.txt.sub[lng1 =="en"]
news.txt.sub <- news.txt.sub[lng2 =="en"]
twitter.txt.sub <- twitter.txt.sub[lng3 =="en"]
#' combine the subsamples of different input files into one corpus for further
#' analysis
blog.txt.sub <- data.frame(text = blog.txt.sub, source = "blog")
news.txt.sub <- data.frame(text = news.txt.sub, source = "news")
twitter.txt.sub <- data.frame(text = twitter.txt.sub, source = "twitter")
blog.txt.sub %>% bind_rows(news.txt.sub, twitter.txt.sub) -> all.txt.sub
rm(list=c("blog.txt.sub", "news.txt.sub", "twitter.txt.sub"))
c.all <- corpus(all.txt.sub)
c.all <- corpus_reshape(c.all, to = "sentences")
rm(all.txt.sub)
#' create the corpus from the sampled text and tokenize it by words
tk1 <- tokens(c.all, what = "word", remove_punct = TRUE,
remove_symbols = TRUE,
remove_numbers = TRUE,
remove_url = TRUE,
remove_separators = TRUE)
#' remove profanity
tk1 <- tokens_remove(tk1, pattern = profanity)
#' remove stopwords
#tk1 <- tokens_select(tk1, pattern = stopwords("en"), selection = "remove")
#' create 2, 3, 4 and 5-ngrams and analyze their frequency
tk1.dfm <- dfm(tk1)
tk1.freq <- as_tibble(textstat_frequency(tk1.dfm)) %>%
select(feature, frequency) %>% filter(frequency > 4)
rm(tk1.dfm)
tk2 <- tokens_ngrams(tk1, n = 2)
tk2.dfm <- dfm(tk2)
rm(tk2)
tk2.freq <- as_tibble(textstat_frequency(tk2.dfm)) %>%
select(feature, frequency) %>% filter(frequency > 4)
rm(tk2.dfm)
tk3 <- tokens_ngrams(tk1, n = 3)
tk3.dfm <- dfm(tk3)
rm(tk3)
tk3.freq <- as_tibble(textstat_frequency(tk3.dfm)) %>%
select(feature, frequency) %>% filter(frequency > 4)
rm(tk3.dfm)
tk4 <- tokens_ngrams(tk1, n = 4)
tk4.dfm <- dfm(tk4)
rm(tk4)
tk4.freq <- as_tibble(textstat_frequency(tk4.dfm)) %>%
select(feature, frequency) %>% filter(frequency > 4)
rm(tk4.dfm)
tk5 <- tokens_ngrams(tk1, n = 5)
tk5.dfm <- dfm(tk5)
rm(tk5)
tk5.freq <- as_tibble(textstat_frequency(tk5.dfm)) %>%
select(feature, frequency) %>% filter(frequency > 4)
rm(tk1)
rm(tk5.dfm)
# Stupid Backoff Model
rm(list = setdiff(ls(), c("tk1.freq","tk2.freq", "tk3.freq", "tk4.freq",
"tk5.freq","profanity")))
gc()
# write.csv(tk1.freq, "tk1.freq.csv", row.names = F)
# write.csv(tk2.freq, "tk2.freq.csv", row.names = F)
# write.csv(tk3.freq, "tk3.freq.csv", row.names = F)
# write.csv(tk4.freq, "tk4.freq.csv", row.names = F)
# write.csv(tk5.freq, "tk5.freq.csv", row.names = F)
rm(list=ls())
|
4b966ab2e331772c655023a73c66880068ea6595
|
58a13652e98ccac9772196c14c69e419ed20574c
|
/R/subOrigData_v4.R
|
7ccd339be7380fcdfbc2657713f960d191cd28cd
|
[] |
no_license
|
SPATIAL-Lab/isorig
|
41adfab5f1afcc452b16e0b303ee852832023c30
|
3c04ff08791fb3679d7cc294fe85abfce391169f
|
refs/heads/master
| 2021-06-27T07:14:20.359737
| 2019-08-13T02:56:24
| 2019-08-13T02:56:24
| 109,046,694
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,962
|
r
|
subOrigData_v4.R
|
subOrigData <- function(taxon = NULL, group = NULL, mask = NULL) {
data("knownOrig")
result <- NULL
if (!is.null(taxon)){
if (all(taxon %in% unique(knownOrig$Taxon))) {
result <- knownOrig[knownOrig$Taxon %in% taxon, ]
} else {
stop("taxon should be string or string vector given from Taxon column in knownOrig. Please see knownOrig help page!")
}
}
if (!is.null(group)){
if (all(group %in% unique(knownOrig$Group))) {
result <- knownOrig[knownOrig$Group %in% group, ]
} else {
stop("group should be string or string vector given from Group column in knownOrig. Please see knownOrig help page!")
}
}
if (!is.null(taxon) && !is.null(group))
stop("Please either choose taxon or group")
if (!is.null(mask)) {
if (class(mask) == "SpatialPolygonsDataFrame") {
if (is.na(proj4string(mask))){
stop("mask must have coord. ref.")
} else {
mask <- spTransform(mask, "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
}
s <- data.frame(result@coords, result@data)
o <- over(result, mask)
overlap <- s[!is.na(o), ]
} else {
stop("mask should be a SpatialPolygonsDataFrame")
}
if (length(overlap[, 1]) != 0) {
overlap <- SpatialPointsDataFrame(coords = cbind(overlap$Longitude,
overlap$Latitude), data = overlap, proj4string = CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"))
plot(mask, axes = T)
plot(overlap, add = T, col = "red")
} else {
cat("No isotope data found in mask you choose!\n")
}
result <- overlap
} else {
require(maptools)
data(wrld_simpl)
plot(wrld_simpl, axes = T)
points(result$Longitude, result$Latitude, col = "red", cex = 0.5)
}
cat(length(result[,1]),"data points are found\n")
return(result[,3])
}
|
4868fb73ec81bfd5643fe76c37fddfc630000c33
|
2ac8483d0feb540b59d362759f0ca6b010fd5fe3
|
/cachematrix.R
|
ef86b2581625133d649ca0f75e8da68fbf7098f9
|
[] |
no_license
|
Yuan-Hu-Pharma/ProgrammingAssignment2
|
e91fd5c83aa7a84c8b5487b4987ad2964e88e720
|
39e6231c708615bd177cfed3dbfcd989f7d4b419
|
refs/heads/master
| 2021-05-28T01:50:08.930968
| 2014-10-21T05:52:32
| 2014-10-21T05:52:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,362
|
r
|
cachematrix.R
|
## This is solution to R Programming Assignment 2
## This function is used to compute the inverse of the special "matrix", and
## if the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve retrieves the inverse from the cache.
##
## Usage example:
##
## m <- makeCacheMatrix(matrix(1:4, nrow=2))
## all(m$get() %*% cacheSolve(m) == diag(2)) # gives TRUE
## all(m$get() %*% cacheSolve(m) == diag(2)) # prints "getting cached data" and returns TRUE
##
## The function returns a list with four methods that let you:
## - set the matrix value
## - get the matrix value
## - set the inverse value of the matrix
## - get the inverse value of the matrix
makeCacheMatrix <- function(x = matrix()) {
# The initial value of the inverse is set to NULL
m <- NULL
# First interface method: set a matrix value
set <- function(y) {
# replace old x with new value y
x <<- y
# reset stored cache
m <<- NULL
}
# Second interface method: get matrix value
get <- function() x
# Third interface method: set the inverse value of the matrix
setinverse <- function(solve) m <<- solve
# Fourth interface method: get the inverse value of the matrix
getinverse <- function() m
# list of all the functions withing this function is returned
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function is used to compute the inverse matrix.
## If the inverse of the same matrix has not been cached, it computes it and caches the value.
## Otherwise it reads and returns the cached inverse.
## The required parameter is the list with interface functions
cacheSolve <- function(x, ...) {
# Read the inverse matrix through the interface function
m <- x$getinverse()
# If the inverse is cached, return its value
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
# Otherwise the inverse needs to be computed, cached and returned
m <- solve(data, ...)
# which we store in cachable matrix
x$setinverse(m)
# Return a matrix that is the inverse of 'x'
m
}
|
92b0be1fe76df1e1bff96415c8905e3bc3d91b10
|
44598c891266cd295188326f2bb8d7755481e66b
|
/DbtTools/RetroMAT/R/nanmin.R
|
cce1f3fd5dfc568877389b664019f0041183003a
|
[] |
no_license
|
markus-flicke/KD_Projekt_1
|
09a66f5e2ef06447d4b0408f54487b146d21f1e9
|
1958c81a92711fb9cd4ccb0ea16ffc6b02a50fe4
|
refs/heads/master
| 2020-03-13T23:12:31.501130
| 2018-05-21T22:25:37
| 2018-05-21T22:25:37
| 131,330,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 547
|
r
|
nanmin.R
|
nanmin <- function (Data) {
# spaltenweises minimum NaN werden ignoriert
## Compute row and column min for a matrix:
if (length(dim(Data)) ==2) {
SpaltenMinima <- apply(Data, 2, function(x) min(x,na.rm=TRUE))
SpaltenInd <- NaN # Noch nicht implementiert !!!
}else{
SpaltenMinima <- min(Data,na.rm=TRUE)
SpaltenInd <- which( Data==SpaltenMinima)
} # if (length(dim(Data) ==2)
return(SpaltenMinima)# SpaltenInd <- NaN # Noch nicht implementiert !!!
} # nanmin <- function (Data)
|
945ecb0be76ed773f13951f5d1a666eb3bc98e19
|
515e4237e5124fc0654949be4cf4f4149ae160c0
|
/R/coalesce_join.R
|
f481e375eef80d4ad270efa384fa46e53d090e73
|
[] |
no_license
|
pacific-hake/hakedataUSA
|
9104a97b7d3c9e8b0f7df24bce6d1df0a64bbbec
|
f4e7ea3fbcb2d13317e94d9b2990e19799221ac3
|
refs/heads/main
| 2023-02-07T22:06:29.045405
| 2023-02-02T22:12:27
| 2023-02-02T22:12:27
| 232,366,659
| 0
| 1
| null | 2023-02-02T22:08:37
| 2020-01-07T16:26:25
|
R
|
UTF-8
|
R
| false
| false
| 1,640
|
r
|
coalesce_join.R
|
#' Mutating join
#'
#' A join that adds information from matching columns from `y` to `x`. If the
#' value in `y` is `NA`, then the value from `x` will be used. Thus, all new
#' information can be used to overwrite the old information.
#'
#' @inheritParams dplyr::full_join
#' @param join The {dplyr} function, or function from any other package, that
#' should be used to join `x` and `y`. The default is to perform a full join,
#' i.e., [dplyr::full_join()] between the two data frames.
#' @param ... Any additional arguments you wish to supply to the function
#' specified in `join`.
#'
#' @author Edward Visel with some changes from Kelli F. Johnson.
#' @references \url{https://alistaire.rbind.io/blog/coalescing-joins/}
#'
#' @export
coalesce_join <- function(x,
y,
by = NULL,
suffix = c(".x", ".y"),
join = dplyr::full_join,
...) {
joined <- join(x, y, by = by, suffix = suffix, ...)
# names of desired output
cols <- union(names(x), names(y))
to_coalesce <- names(joined)[!names(joined) %in% cols]
suffix_used <- suffix[ifelse(endsWith(to_coalesce, suffix[1]), 1, 2)]
# remove suffixes and deduplicate
to_coalesce <- unique(
gsub(paste0("\\", suffix, collapse = "|"), "", to_coalesce)
)
names(to_coalesce) <- to_coalesce
coalesced <- purrr::map_dfc(
to_coalesce,
~ dplyr::coalesce(
joined[[paste0(.x, suffix[2])]],
joined[[paste0(.x, suffix[1])]]
)
)
names(coalesced) <- to_coalesce
return(dplyr::bind_cols(joined, coalesced)[names(x)])
}
|
85aa849585ce47cb6eaa776335344c115b0e2f45
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/9799_0/rinput.R
|
d0b4ed86a994fa0f0f8a712e6295a8273a7ed15c
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("9799_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9799_0_unrooted.txt")
|
2ffaec72d420bc98ea6a3372284028447b46e1fe
|
d83b3df1b39b0f49b25e1c61937ce5396c9b10ef
|
/tests/testthat/utils.R
|
eeac345feefa70b98b9a768786da8e99ffe888fd
|
[] |
no_license
|
al2na/bamsignals
|
bcc55d3db6bb08eb8169dc72be9c5170b4b4aac1
|
5d69d4ff959f51cb373c9401986d74b7c8fc8f1e
|
refs/heads/master
| 2021-01-21T16:31:54.470762
| 2015-06-15T13:52:27
| 2015-06-15T13:52:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,891
|
r
|
utils.R
|
##CONVERT READS FROM A DATA-FRAME-LIKE FORMAT TO BAM
#to print integers as characters without weird stuff
int2chr <- function(nums){
format(nums, scientific=FALSE, trim=TRUE, justify="none")
}
isPEData <- function(reads){
all(c("isize", "read1", "pnext") %in% names(reads))
}
#converts reads in R format to an indexed bamfile
#the format for the reads is similar to the one used by
#scanBam in Rsamtools, except that only the fields
#qname, rname*, strand*, pos*, qwidth*, mapq* are used
#('*' means required)
#and for paired end reads also the fields read1 and isize.
#The field read1 is not in the Rsamtools format, but we need
#it to compute the flag. It means "is this read here the read 1
#in the pair?", and it defines the orientation of the pair.
#another difference, the reads object must inherit from "data.frame"
#another difference, there is no flag, every read is supposed to
#be mapped properly, as well as its paired end (if it is there)
#refs is a list with the fields 'refname' and 'reflen'
readsToBam <- function(reads, bampath, refs=NULL){
#checking and completing fields
if (!inherits(reads,"data.frame")) stop("reads must be a data.frame")
#check that all required fields are there
reqFields <- c("rname", "strand", "pos", "qwidth", "mapq")
if (any(! reqFields %in% names(reads))) stop("missing required fields")
#check that all fields have the right length
nreads <- nrow(reads)
#check that the format for the strand is correct
if (any(!reads$strand %in% c("+", "-"))) stop("strand can only be '-' or '+'")
#deal with paired end reads
pe <- isPEData(reads)
if (!pe){
if (any(c("isize", "read1", "pnext") %in% names(reads))){
warning("incomplete paired-end specification, pair information will be ignored")
}
reads$isize <- rep(0, nreads)
reads$pnext <- reads$isize
reads$read1 <- rep(TRUE, nreads)
} else {
if (!is.logical(reads$read1)) stop("the 'read1' field must be of type logical")
if (any(reads$isize==0)) stop("an 'isize' of 0 is not allowed")
if (any((reads$strand=="+")!=(reads$isize>0))) {
warning("the sign of 'isize' should be the same as the strand of the read
for properly mapped read pairs. Fixing that.")
reads$isize <- abs(reads$isize)*(2*as.integer(reads$strand=="+") - 1)
}
}
#complete missing fields
if (!"qname" %in% names(reads)) reads$qname <- 1:nreads
reads$mapq[is.na(reads$mapq)] <- 255
#compute the flag
reads$flag <- 0x10*(reads$strand=="-")
if (pe) reads$flag <- reads$flag + 0x1 + 0x2 + 0x80 + (0x40-0x80)*reads$read1 + 0x20*(reads$strand=="+")
#deal with the references
needRefs <- unique(reads$rname)
if (!is.null(refs)){
if (any(! c("refname", "reflen") %in% names(regs))) stop("missing required fields in 'refs'")
if (length(refs$refname) != length(refs$reflen)) stop("all fields must have the same length in 'refs'")
if (any(! needRefs %in% refs$refname)) stop("missing some chromosome names")
} else {
refs <- list(
refname=needRefs,
reflen=sapply(needRefs, function(chr){
v <- reads$rname == chr
max(reads$pos[v] + reads$qwidth[v]) + 1
}))
}
#sort the references
refs <- data.frame(refs)
o <- order(refs$refname)
refs <- refs[o,]
#build the final data frame
df <- data.frame(
qname=reads$qname,
flag=reads$flag,
rname=reads$rname,
pos=reads$pos,
mapq=reads$mapq,
cigar=paste0(reads$qwidth, "M"),
rnext="=",
pnext=reads$pnext,
tlen=reads$isize,
seq="*",
qual="*")
#sort the reads
o <- order(df$rname, df$pos)
df <- df[o,]
#avoid scientific notation
df$pos <- int2chr(df$pos)
df$qname <- int2chr(df$qname)
#create temporary samfile
tmpsam <- paste0(tempfile(), ".sam")
#write header
header <- c(
"@HD\tVN:1.0\tSO:coordinate",
paste0("@SQ\tSN:", refs$refname, "\tLN:", int2chr(refs$reflen)))
writeLines(header, tmpsam)
#write reads
write.table(x=df, file=tmpsam, sep="\t", quote=F, row.names=F, col.names=F, append=T)
#write to bam and index
bamsignals:::writeSamAsBamAndIndex(tmpsam, bampath)
#remove temporary sam file
file.remove(tmpsam)
}
##REWRITE BAMSIGNALS FUNCTIONS IN R
##READS ARE IN A DATA-FRAME-LIKE FORMAT
#convert the reads to a GRanges object
df2gr <- function(reads, paired.end=FALSE, paired.end.midpoint=FALSE, shift=0, mapqual=0){
if (!paired.end %in% c("ignore", "filter", "midpoint", "extend"))
stop("invalid paired.end option")
#filter based on mapqual
reads <- reads[reads$mapq>=mapqual,]
#if paired.end, discard reads that are not the first read in the pair
if (paired.end != "ignore") {
reads <- reads[reads$read1,]
}
#do as if the pair was a single read
if (paired.end %in% c("extend", "midpoint")){
isNeg <- reads$strand=="-"
#shift back negative reads
reads[isNeg,]$pos <- reads[isNeg,]$pos - abs(reads[isNeg,]$isize) + reads[isNeg,]$qwidth
#extend width to the template width
reads$qwidth <- abs(reads$isize)
}
#convert to GRanges
gr <- GRanges(seqnames=reads$rname, strand=reads$strand, IRanges(start=reads$pos, width=reads$qwidth))
#if paired.end.midpoint, consider only the midpoint
if (paired.end == "midpoint"){
mids <- (start(gr) + end(gr))/2
#take care of the rounding
signedMids <- mids*(2*as.integer(strand(gr)=="+")-1)
mids <- abs(ceiling(signedMids))
start(gr) <- mids
end(gr) <- mids
}
#consider shift
shifts <- rep(shift, length(gr))
shifts[as.logical(strand(gr)=="-")] <- -shift
GenomicRanges::shift(gr, shifts)
}
countR <- function(reads, genes, ss=FALSE, ...){
reads <- df2gr(reads, ...)
ov <- findOverlaps(genes, reads, select="all", type="any", ignore.strand=TRUE)
ssCounts <- sapply(1:length(genes), function(g){
gStart <- start(genes)[g]
gEnd <- end(genes)[g]
#get the reads overlapping with this gene
ovReads <- reads[subjectHits(ov)[queryHits(ov)==g]]
#sum up positive reads
preads <- sum(start(ovReads)>=gStart & start(ovReads)<=gEnd & strand(ovReads)=="+")
#sum up negative reads
nreads <- sum(end(ovReads)>=gStart & end(ovReads)<=gEnd & strand(ovReads)=="-")
c(preads, nreads)
})
#reverse columns of negative regions
toRev <- as.logical(strand(genes)=="-")
ssCounts[,toRev] <- ssCounts[c(2,1),toRev]
rownames(ssCounts) <- c("sense", "antisense")
if (!ss) return(colSums(ssCounts))
ssCounts
}
profileR <- function(reads, genes, ss=FALSE, ...){
reads <- df2gr(reads, ...)
ov <- findOverlaps(genes, reads, select="all", type="any", ignore.strand=TRUE)
isNegGene <- as.logical(strand(genes)=="-")
lapply(seq_along(genes), function(g){
gStart <- start(genes)[g]
gEnd <- end(genes)[g]
gLen <- gEnd-gStart+1
#get the reads overlapping with this gene
ovReads <- reads[subjectHits(ov)[queryHits(ov)==g]]
isNeg <- as.logical(strand(ovReads)=="-")
#sum up positive reads
pStarts <- start(ovReads)[!isNeg]
preads <- table(factor(pStarts-gStart+1, levels=1:gLen))
#sum up negative reads
nEnds <- end(ovReads)[isNeg]
nreads <- table(factor(nEnds-gStart+1, levels=1:gLen))
mat <- t(cbind(preads, nreads))
if (isNegGene[g]) {
#take into account the strand of the region
mat <- rev(mat)
}
mat <- as.integer(mat)
dim(mat) <- c(2, length(preads))
dimnames(mat) <- NULL
if (ss){#make matrix
rownames(mat) <- c("sense", "antisense")
colnames(mat) <- NULL
return(mat)
} else return(colSums(mat))
})
}
coverageR <- function(reads, genes, ...){
reads <- df2gr(reads, ...)
isNegGene <- as.logical(strand(genes)=="-")
seqNames <- as.character(seqnames(genes))
#use the already implemented coverage function
#but first make sure that seqinfo of the reads is set properly
rng <- range(c(genes, reads), ignore.strand=TRUE)
seqlevels(reads) <- as.character(seqnames(rng))
seqlengths(reads) <- end(rng)
cvrg <- coverage(reads)
lapply(seq_along(genes), function(g){
sig <- cvrg[[seqNames[g]]][start(genes)[g]:end(genes)[g]]
#take into account strand of the region
if (isNegGene[g]) sig <- rev(sig)
as.integer(sig)
})
}
|
e170e0bbd9778fda8b54b434ba62871bc2e5b5f6
|
2203d1591c360ee124b2b0a798f3567ab2bce818
|
/shiny/ui/ui_aboutPage_ref_analy.R
|
4a9045bcf925d260906a36fa27cb4b65d294096e
|
[
"MIT"
] |
permissive
|
sbalci/clinicopathological
|
65a7e2956fd6257f144489f20a2d9601fc86219a
|
2f7ebbafb1a68c335e038f68cd48b3538b320c60
|
refs/heads/master
| 2022-11-20T02:10:13.633848
| 2021-04-03T14:48:10
| 2021-04-03T14:48:10
| 228,191,123
| 1
| 0
|
MIT
| 2022-11-10T19:35:30
| 2019-12-15T13:44:17
|
R
|
UTF-8
|
R
| false
| false
| 1,638
|
r
|
ui_aboutPage_ref_analy.R
|
## Page 7c: References for Analysis ----
tabPanel(
"References for the Analysis",
tags$h3("Package References Used in Analysis"),
tags$hr(),
tags$b("Download References bib"),
tags$br(),
tags$br(),
downloadButton(outputId = "downloadbib", label = "Download Bibliography"),
tags$br(),
tags$br(),
tags$hr(),
tags$h3("Software & Libraries Used"),
p(
"The jamovi project (2019). jamovi. (Version 0.9) [Computer Software]. Retrieved from https://www.jamovi.org."
),
p(
"R Core Team (2018). R: A Language and envionment for statistical computing. [Computer software]. Retrieved from https://cran.r-project.org/."
),
p(
"Fox, J., & Weisberg, S. (2018). car: Companion to Applied Regression. [R package]. Retrieved from https://cran.r-project.org/package=car."
),
p(
"Wickham et al., (2019). Welcome to the tidyverse. Journal of Open Source Software, 4(43), 1686, https://doi.org/10.21105/joss.01686"
),
p(
"Data processing was carried out with R (R Core Team, 2019) and the easystats ecosystem (Lüdecke, Waggoner, & Makowski, 2019; Makowski, Ben-Shachar, & Lüdecke, 2019)"
),
tags$br(),
tags$hr(),
tags$h3("Session Info"),
tags$br(),
verbatimTextOutput("references"),
tags$br(),
tags$hr(),
tags$h3("Packages"),
tags$br(),
htmlOutput("packagesreport"),
tags$br(),
tags$hr(),
tags$h3("Packages"),
tags$br(),
htmlOutput("packagesreport2"),
tags$br(),
tags$hr(),
tags$h3("Packages"),
tags$br(),
verbatimTextOutput("packagespacman"),
tags$br(),
tags$hr()
)
|
09b2f8ae3c6730d1da0d20cdbc7650a507b4c515
|
312bac9b0575c054208078d7dd7d117d96ca756e
|
/man/interleave.Rd
|
3a393fe6d1bf10ae2443fdec190c97f1f53d9cbd
|
[] |
no_license
|
fbc-studies/fbcutils
|
ba422fdf5eb1b978ca863cd576d41037fc209c9c
|
d0fba39e346a7a9356a738b06a94948237f37c14
|
refs/heads/master
| 2020-03-25T18:41:48.828357
| 2019-02-09T14:56:21
| 2019-02-09T14:56:21
| 144,044,770
| 0
| 0
| null | 2019-02-19T21:15:08
| 2018-08-08T17:11:22
|
R
|
UTF-8
|
R
| false
| true
| 505
|
rd
|
interleave.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interleave.R
\name{interleave}
\alias{interleave}
\title{Interleave elements of vectors}
\usage{
interleave(...)
}
\arguments{
\item{...}{vectors to be interleaved}
}
\value{
result of \code{c(...)} ordered to interleave elements of \code{...}
}
\description{
Use to interleave elements in multiple vectors to a single vector
}
\examples{
x <- paste0("x", 1:5)
y <- paste0("y", 1:2)
z <- paste0("z", 1:3)
interleave(x, y, z)
}
|
b020d2a5afdfd02d3682fba6302e1a2a23cf47c7
|
a82978f0e25412ac1d60a340009cbf3b5ab0c998
|
/R/timedom.trunc.R
|
a000bd62e63199cba785710c590d39307fc880c4
|
[] |
no_license
|
kidzik/freqdom
|
560d8b99f46bc7f35e987d9114e7dd3aabe4fa65
|
fcbfc6958cbb3e96fb02f63acb87dac56bad4dfc
|
refs/heads/master
| 2022-05-02T22:41:58.132430
| 2022-04-18T05:48:16
| 2022-04-18T05:48:16
| 73,332,290
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 750
|
r
|
timedom.trunc.R
|
#' This function removes lags from a linear filter.
#'
#' @title Choose lags of an object of class \code{timedom}
#' @param A an object of class \code{timedom}.
#' @param lags a vector which contains a set of lags. These lags must be a subset of the lags defined for timedom object A. Only those lags will be kept, the other lags are removed.
#' @return An object of class \code{timedom}.
#' @keywords time.domain
#' @export
timedom.trunc = function(A, lags){
if (!is.timedom(A))
stop ("A must be an object of class timedom")
suboperators=A$operators[,,A$lags %in% lags,drop=FALSE]
#drop=False guarantees that filter of length one is not converted from array to matrix
timedom(suboperators,lags=as.vector(intersect(lags,A$lags)))
}
|
5a1da959b7bbf4a5a9214a03662c33bdbbbc7d38
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/USAboundaries/R/us_counties.R
|
9e4222eba84624e4abb9651c093a2e4e8044c13f
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,434
|
r
|
us_counties.R
|
#' County boundaries (contemporary and historical)
#'
#' Get the current (2014) boundaries for U.S states, or get historical county
#' boundaries for dates between 30 December 1636 and 31 December 2000.
#'
#' @param map_date The date of the boundaries as some object coercible to a date
#' with \code{as.Date()}; the easiest option is a character vector following
#' the \href{https://en.wikipedia.org/wiki/ISO_8601}{ISO 8601} data format. A
#' \code{NULL} value will return contemporary boundaries.
#' @param resolution The resolution of the map.
#' @param states A character vector of state or territory names. Only boundaries
#' inside these states/territories will be returned. If \code{NULL}, all
#' boundaries will be returned.
#'
#' @return A SpatialPolygonsDataFrame.
#'
#' @seealso For documentation of and citation to the underlying shapefiles for
#' contemporary data from the U.S. Census Bureau, see
#' \code{\link{census_boundaries}}. For documentation of
#' and citation to the underlying shapefiles for contemporary data from the
#' U.S. Census Bureau, see \code{\link{hist_us_counties}}.
#'
#' @examples
#' contemporary <- us_counties()
#' historical <- us_counties("1820-07-04")
#' new_england <- us_counties(states = c("Massachusetts", "Vermont", "Maine",
#' "New Hampshire", "Rhode Island",
#' "Connecticut"))
#' if (require(sp)) {
#' plot(contemporary)
#' plot(historical)
#' plot(new_england)
#' }
#'
#' @export
us_counties <- function(map_date = NULL, resolution = c("low", "high"),
states = NULL) {
resolution <- match.arg(resolution)
if (is.null(map_date)) {
if (resolution == "low") {
shp <- cb_2014_us_county_20m
} else if (resolution == "high") {
check_data_package()
shp <- USAboundariesData::cb_2014_us_county_500k
}
shp <- filter_by_states(shp, states, "state_name")
} else {
map_date <- as.Date(map_date)
stopifnot(as.Date("1636-12-30") <= map_date,
map_date <= as.Date("2000-12-31"))
if (resolution == "low") {
shp <- hist_us_counties
} else if (resolution == "high") {
check_data_package()
shp <- USAboundariesData::hist_us_counties_hires
}
shp <- filter_by_date(shp, map_date, "start_posix", "end_posix")
shp <- filter_by_states(shp, states, "state_terr")
}
shp
}
|
7a1be839df153a53349df018933c422881ba5e9e
|
217251efc9afe3fce8bcc9f3bf553ebcdc67f280
|
/ChangedVersionMP.R
|
9d382bb48dffe1946cb47c99807ea8fb8ef16584
|
[] |
no_license
|
PTWaade/SocCult-Exam
|
f36cc8ddf40317aff83a399cd75d86253fe28c90
|
23fbbfda642dfeaecbbbcad76f27ddfcaf9126bd
|
refs/heads/master
| 2020-03-19T04:19:28.665045
| 2018-06-02T12:38:02
| 2018-06-02T12:38:02
| 135,816,236
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,169
|
r
|
ChangedVersionMP.R
|
library(modeest)
library(rethinking)
#==============================================================================
#=========================== parameters =======================================
#===============================================================================
# here we define values for the parameters for the multiPVL function,
# this function constitutes the simulation and is described below
set.seed(1)
#curve parameter for logistic functions for loyalty and performance transformation
loyCurve = 0.5
perCurve = 1
sim.params = data.frame(
ntrials = 100,
nagents = 100,
nsims = 100)
# agent parameters are sampled from distributions
agent.params = data.frame(
Ax = c(.4,.1), #prospect theory shape parameter for deck choice
Wx = c(2.5,.8), #prospect theory loss aversion parameter for deck choice
ax = c(.2,3), #updating/memory parameter for deck choice
cx = c(4,.5)) #explore - exploit parameter
agent.params$Az = agent.params$Ax #....same for choice to collaborate
agent.params$Wz = agent.params$Wx
agent.params$az = agent.params$ax
agent.params$cz = agent.params$cx
#this controls how agents interact, and what kinds of feedback they recieve
condition = data.frame(
interaction = "vote",
knowledge = "symmetrical",
reputation = "performance")
#===============================================================================
#=========================== simulation function ===============================
#===============================================================================
# here we specify the function that consitutes the simulation,
# outputs agent choice behaviour
multiPVL <- function(sim.params, agent.params, condition) {
#######################################################
# Take simulation parameters from inputs
### trial/task parameters #########
ntrials = sim.params$ntrials
nagents = sim.params$nagents
nsims = sim.params$nsims
#######################################################
# Create empty arrays to populate with simulated data
propJoin=c()
propA_in=c()
propB_in=c()
propC_in=c()
propD_in=c()
propA_out=c()
propB_out=c()
propC_out=c()
propD_out=c()
muExpA=c()
muExpB=c()
muExpC=c()
muExpD=c()
muBalanceOut=c()
muBalanceIn=c()
muBalance_cum=c()
muExpOut=c()
muExpIn=c()
nswitch=c()
group_choice = array(0,c(nsims,ntrials))
group_reward = array(0,c(nsims,ntrials,4))
group_loss = array(0,c(nsims,ntrials,4))
muPerL = c()
endPerL = c()
muLoyL = c()
endLoyL = c()
#######################################################
################## Run simulation #####################
#######################################################
# Set initial (i.e. trial 1) values for all agents, for all simulations
for (sim in 1:nsims) {
#-------------------------------------create agent parameters----------------------------------
### card decision parameters ######
Ax = rnorm(nagents,agent.params$Ax[1],agent.params$Ax[2])
wx = rnorm(nagents,agent.params$Wx[1],agent.params$Wx[2])
ax = rgamma(nagents,agent.params$ax[1],agent.params$ax[2])
cx <- rnorm(nagents,agent.params$cx[1],agent.params$cx[2])
thetax <- 2^cx - 1
### collaborate decision parameters ######
Az = rnorm(nagents,agent.params$Az[1],agent.params$Az[2])
wz = rnorm(nagents,agent.params$Wz[1],agent.params$Wz[2])
az = rgamma(nagents,agent.params$az[1],agent.params$az[2])
cz <- rnorm(nagents,agent.params$cz[1],agent.params$cz[2])
thetaz <- 2^cz - 1
#-------------------------------------set up decks for simulation----------------------------------
Rewards <- cbind(rep(100,ntrials),
rep(100,ntrials),
rep(50,ntrials),
rep(50,ntrials))
Losses <- cbind(sample(c(rep(-250,50),c(rep(0,50))),ntrials),
sample(c(rep(-1250,10),c(rep(0,90))),ntrials),
sample(c(rep(-50,50),c(rep(0,50))),ntrials),
sample(c(rep(-250,10),c(rep(0,90))),ntrials))
group_reward[sim,,] <- Rewards
group_loss[sim,,] <- Losses
################################################################################
#---- initiate paramaters needed for decisions in first trial------
# empty matrices for valence, expectancy, and probability (of card choice) parameters
Vx = array(0,c(nagents,ntrials,4)) #card choice valence
Ex = array(0,c(nagents,ntrials,4)) #card choice expectancies
expEx = array(0,c(nagents,ntrials,4)) #transformed expectancies
Px = array(0,c(nagents,ntrials,4)) #card choice probability
x = array(0,c(nagents,ntrials)) #card choice
#-set parameters needed to get started on trial 1 for agent n
Px[,1,]=c(.25,.25,.25,.25) # set to non-zero, but don't get used for anything past t = 1
x[,1] = sample(seq(1,4,1),1) #randomly select one of the decks for t = 1
# empty matrices for valence, expectancy, and probability (of collaboration) parameters
Vz = array(0,c(nagents,ntrials,2)) #valence of decision to collaborate
Ez = array(0,c(nagents,ntrials,2)) #Expectancy
expEz = array(0,c(nagents,ntrials,2))#transformed expectancy
Pz = array(0,c(nagents,ntrials,2)) #probability of choice
z = array(0,c(nagents,ntrials)) #decision to collaborate, or not at t = 1
# set parameters to get started
Pz[,1,]=.01 #p of collaborating at t = 1 for all agents
G = array(0,c(ntrials,4)) #Group choice
Vg = array(0,c(ntrials,4)) #Valence of group choice
pdeck=array(0,c(nagents,100))
pjoin=array(0,c(nagents,100))
balance = array(0,c(nagents,ntrials)) #monetary balance of agent per trial
balanceCum = array(0,c(ntrials)) #cumulative balance
### reputation parameters
loy = array(0,c(nagents,ntrials)) #<- loyalty
loyL = array(0,c(nagents,ntrials)) #<- logistically transformed
perScore = array(0,c(nagents,ntrials)) #<- performance
per = array(0,c(nagents,ntrials)) #<- performance
perL = array(0,c(nagents,ntrials)) #<- logistically transformed
rep = array(0,c(nagents,ntrials)) #<- reputation
Exweight = array(0,c(nagents,ntrials,4)) #<- weighted expectancies for deck choice
####################################################################
###################### Begin simulating choices ####################
# run trial loop from trial 2 - because some parameters have t - 1 reference
for (t in 2:ntrials) {
#all agents choose to join collaboration, and choose deck
for (n in 1:nagents) {# each agent decides which deck
#---------------Make decision of whether to join collaboration------------------------
# make choice weighted by probability representations for joining collaboration
# translate probability of choosing to join collaboration into proportions within a 100 element array.....
# (for each of the n agents) - i.e. Pz from previous trial
pjoin[n,] = c(rep(0,round(Pz[n,t-1,1]*100)),
rep(1,round(Pz[n,t-1,2]*100)))
#.... and then sample from that array (for each of the n agents)
z[n,t] = sample(pjoin[n,],1)
# make choice weighted by probability representations
# translate probability of choosing each deck into proportions within a 100 element array.....
A = c(rep(1,round(Px[n,t-1,1]*100)),
rep(2,round(Px[n,t-1,2]*100)),
rep(3,round(Px[n,t-1,3]*100)),
rep(4,round(Px[n,t-1,4]*100)),
1)
pdeck[n,] = A[1:100]
#.... and then sample from that array
x[n,t] = sample(pdeck[n,],1)
### Reputation updating
#Loyalty score goes up or down depending on participation
loy[n,t] = loy[n, t-1] + (-0.05 + z[n,t]*0.1)
#This is fed to a logistic function to create a transformed loyalty score
loyL[n,t] = 1 / (1 + exp(-loyCurve*(loy[n,t])))
#Performance score updates proportional to net reward
per[n,t] = per[n, t-1] + (perScore[n, t-1])*0.0001
#And is logistically transformed
perL[n,t] = ((1 / (1 + exp(-perCurve*(per[n,t])))) -0.5)*2
#Decide which type of reputation
if (condition$reputation == "performance"){
rep[n,t] = perL[n,t]
} else if (condition$reputation == "loyalty") {
rep[n,t] = loyL[n,t]
} else if (condition$reputation == "both") {
rep[n,t] = loyL[n,t]*perL[n,t]
} else if (condition$reputation == "baseline") {
rep[n,t] = 1}
#Calculate weighted expectancies for each agent
Exweight[n,t-1,] = Ex[n,t-1,]*rep[n,t]
}
#Group choice can be decided on vote or by sharing confidence, depending on simulation condition
#WEIGHTED BY REPUTATION
if (condition$interaction == "vote") {
G[t,] = c(sum(rep[x[,t]==1]&z[,t]==1,t-1),
sum(rep[x[,t]==2]&z[,t]==1,t-1),
sum(rep[x[,t]==3]&z[,t]==1,t-1),
sum(rep[x[,t]==4]&z[,t]==1,t-1)
)/(sum(z[,t]==1))#to avoid nan if group empty
}else if (condition$interaction == "confidence") { #interact with mean expectancy
G[t,] = c(mean(Exweight[z[,t]==1,t-1,1]),
mean(Exweight[z[,t]==1,t-1,2]),
mean(Exweight[z[,t]==1,t-1,3]),
mean(Exweight[z[,t]==1,t-1,4])+.00000001)}
if (is.nan(G[t,1])) {G[t,]=c(.0000001,.0000001,.0000001,.0000001)}
# compute reward for group decision
Gx = which.max(G[t,])
Rg = c(0,0,0,0) # reset reward representation R for all decks on a trial
Rg[Gx] = Rewards[t,Gx]
Lg = c(0,0,0,0) # reset loss representation L for all decks on a trial
Lg[Gx] = Losses[t,Gx]
group_choice[sim,t] <- Gx
for (n in 1:nagents) {# each agent decides which deck
#----compute reward for what agent's choice would have been had they gone alone,
#irrespective of whether she joined the group or not
Rx = c(0,0,0,0) # reset reward representation R for all decks on a trial
Rx[x[n,t]]=Rewards[t,x[n,t]] # and update on the basis of the chosen deck
Lx = c(0,0,0,0) # reset loss representation L for all decks on a trial
Lx[x[n,t]]=Losses[t,x[n,t]] # and update on the basis of the chosen deck
#Result of choice if had not been in group - used for reputation only
perScore[n,t] = sum(Rx-Lx)
# calculate the actual outcome the agent gets depending on group affiliation
R = ((1 - z[n,t]) * Rx) + (z[n,t]*Rg)
L = ((1 - z[n,t]) * Lx) + (z[n,t]*Lg)
#apply prospect theory to outcomes
VxL = -wx[n]*abs(L)^Ax[n]
VxR = R^Ax[n]
Vx[n,t,] <- VxR + VxL
#Remove errors
Vx[n,t,][is.na(Vx[n,t,])] = 0
Vx[n,t,][Vx[n,t,]< (-10000)] = -37
# Update deck expected valence - apply delta learning rule
Ex[n,t,] = Ex[n,t-1,] + (ax[n] * (Vx[n,t,] - Ex[n,t-1,]))
# set maximum Ex = 5, to avoid inf values in conversion to Px
Ex[n,t,][Ex[n,t,]>5]<-5
# transform to proportional probabilities
expEx[n,t,] = exp(thetax[n]*Ex[n,t,])
Px[n,t,] = expEx[n,t,]/sum(expEx[n,t,])
# update expectancies for decision to collaborate. values are updated on the basis of valence Vz
# on each trial, which tracks summed discrepency between own and group choice.
# Index is z + 1 because only the valence on the chosen option
# (collaborate or nor) is updated
# Calculate the objective reward discrepency between each individuals choice, and the group choice
# Z[1] = objective reward of choosing alone, Z[2] = value of being in group
Z = c(sum(Rx+Lx)-sum(Rg+Lg),sum(Rg+Lg)-sum(Rx+Lx)) #value in group only if not in group
# Apply prospect theory
Vz[n,t,Z>0] = Z[Z>0]^Az[n]
Vz[n,t,Z<0] = -wz[n]*abs(Z[Z<0])^Az[n]
#transformation to expectancy.
#2 expectancies are coded, representing differences in Knowledge
#Ein is the perspective from "inside" the group. Here, the agent is not told what their reward would
#have been had they chosen themselves, and only receives feedback about the group choice of which they
#are a part. They must use this knowledge, plus their past expectancies, to update their current expectancies
#So this is the difference between previous expectancies and current group expectancies
Ein = c(Ex[n,t-1,x[n,t]] - Ex[n,t,Gx], Ex[n,t,Gx] - Ex[n,t-1,x[n,t]])
#Eout is the perspective from "outside"" the group, here the expectancy is based on the difference
#in value between joining the group or not, where the agent has access to objective value difference
#in choosing alone or choosing in the group. They can update their expectancies objectively
Eout = Ez[n,t-1,] + (az[n] * (Vz[n,t,] - Ez[n,t-1,]))
#If the knowledge condition of the simulation is symmetric, then all agents get objective knowledge
#of own and group choices
if (condition$knowledge == "symmetrical"){
Ez[n,t,] = ((1 - z[n,t]) * Eout) + (z[n,t]*Eout)}
else if (condition$knowledge == "asymmetrical") {
Ez[n,t,] = ((1 - z[n,t]) * Eout) + (z[n,t]*Ein)}
# set maximum Ex = 5, to avoid inf values in conversion to Px
Ez[n,t,][Ez[n,t,]>5]= 5
Ez[n,t,][Ez[n,t,]< (-5)]= -5
#transform expectancies and convert to probabilities
expEz[n,t,] = exp(thetaz[n]*Ez[n,t,])
Pz[n,t,] = expEz[n,t,]/sum(expEz[n,t,])
#Remove negative probabilities and NA's
Pz[n,t,][is.na(Pz[n,t,])] = Pz[n,t-1,]
# track the balance of the agent
balance[n,t]=sum(R)+sum(L)
#print(c("t",t,"n",n,"R",R,"L",L,"Vx",Vx[n,t,],"Ex",Ex[n,t,],"expEx",expEx[n,t,],"Px",Px[n,t,]))
} # close agent loop
balanceCum[t] = mean(balance[,t]) + balanceCum[t-1]
} # close trial loop
propJoin = cbind(propJoin,colMeans(z[,2:ntrials]))
propA_in= cbind(propA_in,colMeans(x[,2:ntrials]==1&z[,2:ntrials]==1)/colMeans(z[,2:ntrials]==1))
propB_in= cbind(propB_in,colMeans(x[,2:ntrials]==2&z[,2:ntrials]==1)/colMeans(z[,2:ntrials]==1))
propC_in= cbind(propC_in,colMeans(x[,2:ntrials]==3&z[,2:ntrials]==1)/colMeans(z[,2:ntrials]==1))
propD_in= cbind(propD_in,colMeans(x[,2:ntrials]==4&z[,2:ntrials]==1)/colMeans(z[,2:ntrials]==1))
propA_out= cbind(propA_out,colMeans(x[,2:ntrials]==1&z[,2:ntrials]==0)/colMeans(z[,2:ntrials]==0))
propB_out= cbind(propB_out,colMeans(x[,2:ntrials]==2&z[,2:ntrials]==0)/colMeans(z[,2:ntrials]==0))
propC_out= cbind(propC_out,colMeans(x[,2:ntrials]==3&z[,2:ntrials]==0)/colMeans(z[,2:ntrials]==0))
propD_out= cbind(propD_out,colMeans(x[,2:ntrials]==4&z[,2:ntrials]==0)/colMeans(z[,2:ntrials]==0))
muBalance_cum = cbind(muBalance_cum,balanceCum)
muBalanceOut = cbind(muBalanceOut,mean(balance[z==0]))
muBalanceIn = cbind(muBalanceIn,mean(balance[z==1]))
muExpA <- cbind(muExpA,colMeans(Ex[,2:ntrials,1]))
muExpB <- cbind(muExpB,colMeans(Ex[,2:ntrials,2]))
muExpC <- cbind(muExpC,colMeans(Ex[,2:ntrials,3]))
muExpD <- cbind(muExpD,colMeans(Ex[,2:ntrials,4]))
muExpOut = cbind(muExpOut,colMeans(Ez[,2:ntrials,1]))
muExpIn = cbind(muExpIn,colMeans(Ez[,2:ntrials,2]))
nswitch=c(nswitch,sum(abs((z[1,1:(ntrials-1)]-z[1,2:ntrials]))))
muPerL = cbind(muPerL, rowMeans(perL))
endPerL = cbind(endPerL, perL[,ntrials])
muLoyL = cbind(muLoyL, rowMeans(loyL))
endLoyL = cbind(endLoyL, loyL[,ntrials])
#print(sim)
} # close simulation loop
Pz = Pz
x = x
group_choice = group_choice
result = list(propA_in=propA_in,
propB_in=propB_in,
propC_in=propC_in,
propD_in=propD_in,
propA_out=propA_out,
propB_out=propB_out,
propC_out=propC_out,
propD_out=propD_out,
propJoin=propJoin,
muExpA=muExpA,
muExpB=muExpB,
muExpC=muExpC,
muExpD=muExpD,
muBalanceOut=muBalanceOut,
muBalanceIn=muBalanceIn,
muBalance_cum=muBalance_cum,
muPerL = muPerL,
endPerL = endPerL,
muLoyL = muLoyL,
endLoyL = endLoyL,
muExpOut=muExpOut,
muExpIn=muExpIn,
nswitch=nswitch,
Pz=Pz,
x = x,
group_choice=group_choice,
group_reward=group_reward,
group_loss=group_loss)
return(result)
} # close function
#===============================================================================
#======================= Plotting function =====================================
#===============================================================================
# Here we take the output from the simulation and plot results figures for the paper
plotResults <- function(condA, condB, condLabels) {
layout(matrix(c(1,1,1,1, 2,2, 3,3, 4,4, 5,5, 6,7,8,9),
nrow = 4, ncol = 4, byrow = TRUE))
#-----------------------------------------------------------
##### Plot proportion collaborating over trials ############
# Plots all simulations, with mean in bold
COL <- adjustcolor(c("blue", "red"), alpha = 0.1)
plot(rowMeans(condA$propJoin),
xlab = "Trial Number",
ylab = "Proportion Collaborating",
main=condLabels[3],
type='l',
ylim=c(0,1.1),
lwd=4,
col="dark blue",
axes=FALSE)
axis(1)
axis(2)
for (i in 1:100) {
lines(condA$propJoin[,i],col=COL[1])
}
lines(rowMeans(condB$propJoin),type='l',ylim=c(0,1),lwd=4,col="dark red")
for (i in 1:100) {
lines(condB$propJoin[,i],col=COL[2])
}
#legend (x = 35, y = 1.2,
# legend = c(condLabels[1], condLabels[2]),
# col = c("dark blue","dark red"), bty = "n", lwd = 4)
#-----------------------------------------------------------
##### Plot distribution of mean value gained/lost for joining/defecting for condition A
COL <- adjustcolor(c("dark blue", "light blue"), alpha = 0.7)
plot(density(condA$muBalanceOut),xlim=c(-100,100),ylim=c(0,.1),
axes=FALSE,xlab="Mean Trial Value/Simulation",
ylab="",main=condLabels[1])
axis(1)
polygon(density(condA$muBalanceOut), col=COL[1])
lines(density(condA$muBalanceIn))
polygon(density(condA$muBalanceIn), col=COL[2])
legend (x = -120, y = 0.16,
legend = c(paste("Defect, MLV =", round(mlv(condA$muBalanceOut)$M,digits=2)),
paste("Join, MLV =", round(mlv(condA$muBalanceIn)$M,digits=2))),
col = c("dark blue","light blue"), bty = "n", lwd = 4, y.intersp = 0.15)
##### And condition B
COL <- adjustcolor(c("dark red", "red"), alpha = 0.7)
plot(density(condB$muBalanceOut),xlim=c(-100,100),ylim=c(0,.1),
axes=FALSE,xlab="Mean Trial Value/Simulation",
ylab="",main=condLabels[2])
axis(1)
polygon(density(condB$muBalanceOut), col=COL[1])
lines(density(condB$muBalanceIn))
polygon(density(condB$muBalanceIn), col=COL[2])
legend (x = -120, y = 0.16,
legend = c(paste("Defect, MLV =", round(mlv(condB$muBalanceOut)$M,digits=2)),
paste("Join, MLV =", round(mlv(condB$muBalanceIn)$M,digits=2))),
col = c("dark red","red"), bty = "n", lwd = 4, y.intersp = 0.15)
#-----------------------------------------------------------
#Plot cumulative balance
plot(rowMeans(condA$muBalance_cum),
xlab = "Trial Number",
ylab = "Cumulative Balance (Mean)",
type='l',
ylim=c(-700,700),
lwd=4,
col="dark blue",
axes=FALSE)
axis(1)
axis(2)
lines(rowMeans(condB$muBalance_cum,na.rm =TRUE),type='l',ylim=c(0,1),lwd=4,col="dark red")
#-----------------------------------------------------------
#Plot SD
plot(rowMeans(sqrt((condA$muBalance_cum-rowMeans(condA$muBalance_cum))^2)),
xlab = "Trial Number",
ylab = "Cumulative Balance (SD)",
type='l',
ylim=c(0,1200),
lwd=4,
col="dark blue",
axes=FALSE)
axis(1)
axis(2)
lines(rowMeans(sqrt((condB$muBalance_cum-rowMeans(condB$muBalance_cum,na.rm =TRUE))^2))
,type='l',ylim=c(0,1),lwd=4,col="dark red")
#-----------------------------------------------------------
#Plot Deck Choice Proportions
plot(rowMeans(condA$propA_in,na.rm = TRUE),
xlab = "Trial Number",
ylab = "Proportion Choose A",
type='l',
ylim=c(0,.6),
lwd=4,
col="light blue",
axes=FALSE)
axis(1)
axis(2)
lines(rowMeans(condA$propA_out,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="dark blue")
lines(rowMeans(condB$propA_in,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="red")
lines(rowMeans(condB$propA_out,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="dark red")
plot(rowMeans(condA$propB_in,na.rm = TRUE),
xlab = "Trial Number",
ylab = "Proportion Choose B",
type='l',
ylim=c(0,.6),
lwd=4,
col="light blue",
axes=FALSE)
axis(1)
axis(2)
lines(rowMeans(condA$propB_out,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="dark blue")
lines(rowMeans(condB$propB_in,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="red")
lines(rowMeans(condB$propB_out,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="dark red")
plot(rowMeans(condA$propC_in,na.rm = TRUE),
xlab = "Trial Number",
ylab = "Proportion Choose C",
type='l',
ylim=c(0,.6),
lwd=4,
col="light blue",
axes=FALSE)
axis(1)
axis(2)
lines(rowMeans(condA$propC_out,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="dark blue")
lines(rowMeans(condB$propC_in,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="red")
lines(rowMeans(condB$propC_out,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="dark red")
plot(rowMeans(condA$propD_in,na.rm = TRUE),
xlab = "Trial Number",
ylab = "Proportion Choose D",
type='l',
ylim=c(0,.6),
lwd=4,
col="light blue",
axes=FALSE)
axis(1)
axis(2)
lines(rowMeans(condA$propD_out,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="dark blue")
lines(rowMeans(condB$propD_in,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="red")
lines(rowMeans(condB$propD_out,na.rm = TRUE),type='l',ylim=c(0,1),lwd=4,col="dark red")
simPlot <- recordPlot()
return(simPlot)
}
#===============================================================================
#======================= Call simulation and plotting functions ================
#===============================================================================
############## run simulations ############
loyCurve = 0.5
perCurve = 1
#VOTE
condition$knowledge = "symmetrical"
condition$interaction = "vote"
condition$reputation = "baseline"
symvotebaseline=multiPVL(sim.params, agent.params, condition)
condition$reputation = "loyalty"
symvoteloy=multiPVL(sim.params, agent.params, condition)
condition$reputation = "performance"
symvoteper=multiPVL(sim.params, agent.params, condition)
condition$reputation = "both"
symvoteboth=multiPVL(sim.params, agent.params, condition)
#CONFIDENCE
condition$interaction = "confidence"
condition$reputation = "baseline"
symconfbaseline=multiPVL(sim.params, agent.params, condition)
condition$reputation = "loyalty"
symconfloy=multiPVL(sim.params, agent.params, condition)
condition$reputation = "performance"
symconfper=multiPVL(sim.params, agent.params, condition)
condition$reputation = "both"
symconfboth=multiPVL(sim.params, agent.params, condition)
####Assymetrical
condition$knowledge = "asymmetrical"
condition$interaction = "vote"
condition$reputation = "baseline"
asymvotebaseline=multiPVL(sim.params, agent.params, condition)
condition$reputation = "loyalty"
asymvoteloy=multiPVL(sim.params, agent.params, condition)
condition$reputation = "performance"
asymvoteper=multiPVL(sim.params, agent.params, condition)
condition$reputation = "both"
asymvoteboth=multiPVL(sim.params, agent.params, condition)
#CONFIDENCE
condition$interaction = "confidence"
condition$reputation = "baseline"
asymconfbaseline=multiPVL(sim.params, agent.params, condition)
condition$reputation = "loyalty"
asymconfloy=multiPVL(sim.params, agent.params, condition)
condition$reputation = "performance"
asymconfper=multiPVL(sim.params, agent.params, condition)
condition$reputation = "both"
asymconfboth=multiPVL(sim.params, agent.params, condition)
################## ----- plotting ---- ################
### - symmetrical - ###
condLabels = c("Equal","Loyalty", "Symmetry, Vote")
symvote.baseline.loyalty.plot = plotResults(symvotebaseline,symvoteloy,condLabels)
condLabels = c("Equal","Performance", "Symmetry, Vote")
symvote.baseline.performance.plot = plotResults(symvotebaseline,symvoteper,condLabels)
condLabels = c("Equal","Both", "Symmetry, Vote")
symvote.baseline.both.plot = plotResults(symvotebaseline,symvoteboth,condLabels)
condLabels = c("Equal","Loyalty", "Symmetry, Confidence")
symconf.baseline.loyalty.plot = plotResults(symconfbaseline,symconfloy,condLabels)
condLabels = c("Equal","Performance", "Symmetry, Confidence")
symconf.baseline.performance.plot = plotResults(symconfbaseline,symconfper,condLabels)
condLabels = c("Equal","Both", "Symmetry, Confidence")
symconf.baseline.both.plot = plotResults(symconfbaseline,symconfboth,condLabels)
# condLabels = c("Vote","Confidence", "Symmetry, Equal")
# baseline.voteconf.plot = plotResults(symvotebaseline,symconfbaseline,condLabels)
#
# condLabels = c("Vote","Confidence", "Symmetry, Loyalty")
# loyalty.voteconf.plot = plotResults(symvoteloy,symconfloy,condLabels)
#
# condLabels = c("Vote","Confidence", "Symmetry, Performance")
# confidence.voteconf.plot = plotResults(symvoteper,symconfper,condLabels)
#
# condLabels = c("Vote","Confidence", "Symmetry, Both")
# both.voteconf.plot = plotResults(symvoteboth,symconfboth,condLabels)
### - asymmetrical - ###
condLabels = c("Equal","Loyalty", "Asymmetry, Vote")
Avote.baseline.loyalty.plot = plotResults(asymvotebaseline,asymvoteloy,condLabels)
condLabels = c("Equal","Performance", "Asymmetry, Vote")
Avote.baseline.performance.plot = plotResults(asymvotebaseline,asymvoteper,condLabels)
condLabels = c("Equal","Both", "Asymmetry, Vote")
Avote.baseline.both.plot = plotResults(asymvotebaseline,asymvoteboth,condLabels)
condLabels = c("Equal","Loyalty", "Asymmetry, Confidence")
Aconfidence.baseline.loyalty.plot = plotResults(asymconfbaseline,asymconfloy,condLabels)
condLabels = c("Equal","Performance", "Asymmetry, Confidence")
Aconfidence.baseline.performance.plot = plotResults(asymconfbaseline,asymconfper,condLabels)
condLabels = c("Equal","Both", "Asymmetry, Confidence")
Aconfidence.baseline.both.plot = plotResults(asymconfbaseline,asymconfboth,condLabels)
# condLabels = c("Vote","Confidence", "Asymmetry, Equal")
# Abaseline.voteconf.plot = plotResults(asymvotebaseline,asymconfbaseline,condLabels)
#
# condLabels = c("Vote","Confidence", "Asymmetry, Loyalty")
# Aloyalty.voteconf.plot = plotResults(asymvoteloy,asymconfloy,condLabels)
#
# condLabels = c("Vote","Confidence", "Asymmetry, Performance")
# Aconfidence.voteconf.plot = plotResults(asymvoteper,asymconfper,condLabels)
#
# condLabels = c("Vote","Confidence", "Asymmetry, Both")
# Aboth.voteconf.plot = plotResults(asymvoteboth,asymconfboth,condLabels)
################## ----- Reputation distributions ---- ################
dens(symvoteper$endPerL, main = paste("Symmetry, voting, performance weighting: ","Performance rating on final trial"))
dens(symvoteloy$endLoyL, main = paste("Symmetry, voting, loyalty weighting: ","Loyalty rating on final trial"))
dens(symvoteboth$endPerL, main = paste("Symmetry, voting, both weightings: ","Performance rating on final trial"))
dens(symvoteboth$endLoyL, main = paste("Symmetry, voting, both weightnigs: ","Loyalty rating on final trial"))
dens(symconfper$endPerL, main = paste("Symmetry, confidence, performance weighting: ","Performance rating on final trial"))
dens(symconfloy$endLoyL, main = paste("Symmetry, confidence, loyalty weighting: ","Loyalty rating on final trial"))
dens(symconfboth$endPerL, main = paste("Symmetry, confidence, both weightings: ","Performance rating on final trial"))
dens(symconfboth$endLoyL, main = paste("Symmetry, confidence, both weightnigs: ","Loyalty rating on final trial"))
dens(asymvoteper$endPerL, main = paste("Asymmetry, voting, performance weighting: ","Performance rating on final trial"))
dens(asymvoteloy$endLoyL, main = paste("Asymmetry, voting, loyalty weighting: ","Loyalty rating on final trial"))
dens(asymvoteboth$endPerL, main = paste("Asymmetry, voting, both weightings: ","Performance rating on final trial"))
dens(asymvoteboth$endLoyL, main = paste("Asymmetry, voting, both weightnigs: ","Loyalty rating on final trial"))
dens(asymconfper$endPerL, main = paste("Asymmetry, confidence, performance weighting: ","Performance rating on final trial"))
dens(asymconfloy$endLoyL, main = paste("Asymmetry, confidence, loyalty weighting: ","Loyalty rating on final trial"))
dens(asymconfboth$endPerL, main = paste("Asymmetry, confidence, both weightings: ","Performance rating on final trial"))
dens(asymconfboth$endLoyL, main = paste("Asymmetry, confidence, both weightnigs: ","Loyalty rating on final trial"))
|
02491edf9bb7e04c6b1a0675a1539b51422f05d3
|
854967ae768996c755bc85162a0e2225c78fca74
|
/R/fit.R
|
c41bdaec8dc7ff21b901e2851e4be2534ddbbb16
|
[] |
no_license
|
hansenlab/mpra
|
9909bcf22f74315438db29d8c6b94dbbbd123c78
|
cb2f463699b40ff9e572428183f08fece71d026b
|
refs/heads/master
| 2021-07-18T18:10:06.007647
| 2021-02-26T16:15:09
| 2021-02-26T16:15:09
| 101,249,489
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,094
|
r
|
fit.R
|
mpralm <- function(object, design, aggregate = c("mean", "sum", "none"),
normalize = TRUE, block = NULL,
model_type = c("indep_groups", "corr_groups"),
plot = TRUE, ...) {
.is_mpra_or_stop(object)
if (nrow(design) != ncol(object)) {
stop("Rows of design must correspond to the columns of object")
}
model_type <- match.arg(model_type)
aggregate <- match.arg(aggregate)
if (model_type=="indep_groups") {
fit <- .fit_standard(object = object, design = design,
aggregate = aggregate, normalize = normalize,
plot = plot, ...)
} else if (model_type=="corr_groups") {
if (is.null(block)) {
stop("'block' must be supplied for the corr_groups model type")
}
fit <- .fit_corr(object = object, design = design, aggregate = aggregate,
normalize = normalize, block = block, plot = plot, ...)
}
return(fit)
}
get_precision_weights <- function(logr, design, log_dna, span = 0.4,
plot = TRUE, ...) {
if (nrow(design) != ncol(logr)) {
stop("Rows of design must correspond to the columns of logr")
}
## Obtain element-specific residual SDs
fit <- lmFit(logr, design = design, ...)
s <- fit$sigma
x <- rowMeans(log_dna, na.rm = TRUE)
y <- sqrt(s)
## Lowess fitting
lo <- lowess(x, y, f = span)
if (plot) {
plot(x, y, pch = 16, col = alpha("black", 0.25),
xlab = "Mean(log2(dna+1))", ylab = "sqrt(sd(log-ratio))")
lines(lo, lwd = 3, col = "red")
}
loFun <- approxfun(lo, rule = 2)
## Use mean log DNA to get estimated sqrt(SD) to
## convert to precision weights
fittedvals <- log_dna
w <- 1/loFun(fittedvals)^4
dim(w) <- dim(fittedvals)
rownames(w) <- rownames(logr)
colnames(w) <- colnames(logr)
return(w)
}
compute_logratio <- function(object, aggregate = c("mean", "sum", "none")) {
.is_mpra_or_stop(object)
aggregate <- match.arg(aggregate)
if (aggregate %in% c("sum", "none")) {
## Do aggregation even with option "none" to ensure
## matching ordering of eids in logr and log_dna
dna <- getDNA(object, aggregate = TRUE)
rna <- getRNA(object, aggregate = TRUE)
logr <- log2(rna + 1) - log2(dna + 1)
} else if (aggregate=="mean") {
dna <- getDNA(object, aggregate = FALSE)
rna <- getRNA(object, aggregate = FALSE)
eid <- getEid(object)
logr <- log2(rna + 1) - log2(dna + 1)
by_out <- by(logr, eid, colMeans, na.rm = TRUE)
logr <- do.call("rbind", by_out)
rownames(logr) <- names(by_out)
}
return(logr)
}
normalize_counts <- function(object, block = NULL) {
.is_mpra_or_stop(object)
## Perform total count normalization
dna <- getDNA(object, aggregate = FALSE)
rna <- getRNA(object, aggregate = FALSE)
if (is.null(block)) {
libsizes_dna <- colSums(dna, na.rm = TRUE)
libsizes_rna <- colSums(rna, na.rm = TRUE)
} else {
libsizes_dna <- tapply(colSums(dna, na.rm = TRUE), block,
sum, na.rm = TRUE)
libsizes_dna <- libsizes_dna[block]
libsizes_rna <- tapply(colSums(rna, na.rm = TRUE), block,
sum, na.rm = TRUE)
libsizes_rna <- libsizes_rna[block]
}
dna_norm <- round(sweep(dna, 2, libsizes_dna, FUN = "/")*10e6)
rna_norm <- round(sweep(rna, 2, libsizes_rna, FUN = "/")*10e6)
assay(object, "DNA") <- dna_norm
assay(object, "RNA") <- rna_norm
return(object)
}
.fit_standard <- function(object, design, aggregate = c("mean", "sum", "none"),
normalize = TRUE, return_elist = FALSE,
return_weights = FALSE, plot = TRUE, span = 0.4, ...) {
.is_mpra_or_stop(object)
if (nrow(design) != ncol(object)) {
stop("Rows of design must correspond to the columns of object")
}
aggregate <- match.arg(aggregate)
if (normalize) {
object <- normalize_counts(object)
}
logr <- compute_logratio(object, aggregate = aggregate)
log_dna <- log2(getDNA(object, aggregate = TRUE) + 1)
## Estimate mean-variance relationship to get precision weights
w <- get_precision_weights(logr = logr, design = design, log_dna = log_dna,
span = span, plot = plot, ...)
elist <- new("EList", list(E = logr, weights = w, design = design))
if (return_weights) {
return(w)
}
if (return_elist) {
return(elist)
}
fit <- lmFit(elist, design)
fit <- eBayes(fit)
fit
}
.fit_corr <- function(object, design, aggregate = c("mean", "sum", "none"),
normalize = TRUE, block = NULL, return_elist = FALSE,
return_weights = FALSE, plot = TRUE, span = 0.4, ...) {
.is_mpra_or_stop(object)
if (nrow(design) != ncol(object)) {
stop("Rows of design must correspond to the columns of object")
}
aggregate <- match.arg(aggregate)
if (normalize) {
object <- normalize_counts(object)
}
logr <- compute_logratio(object, aggregate = aggregate)
log_dna <- log2(getDNA(object, aggregate = TRUE) + 1)
## Estimate mean-variance relationship to get precision weights
w <- get_precision_weights(logr = logr, design = design, log_dna = log_dna,
span = span, plot = plot, ...)
## Estimate correlation between element versions that are paired
corfit <- duplicateCorrelation(logr, design = design,
ndups = 1, block = block)
elist <- new("EList", list(E = logr, weights = w, design = design))
if (return_weights) {
return(w)
}
if (return_elist) {
return(elist)
}
fit <- lmFit(elist, design, block = block, correlation = corfit$consensus)
fit <- eBayes(fit)
fit
}
|
adff5fa9a47e2cfdef65cc63fe096a3481870bbc
|
10f24c363ca7b7554bc4c2db539204c1ed15d6ca
|
/run_analysis.R
|
329043f4a177f2e0f6ca26d19315d71cd4e65f1d
|
[] |
no_license
|
fengwanwang/GettingandCleaningDataCourseProject
|
690f6446176288964a868ddf1539d0129593f1fb
|
b6c5e5d44ae84d4b1463952732b6980932ab25a2
|
refs/heads/master
| 2021-01-21T01:46:50.664402
| 2014-11-24T00:15:47
| 2014-11-24T00:15:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,432
|
r
|
run_analysis.R
|
subject_train <- read.table("./train/subject_train.txt") ##read in training data sets
x_train <-read.table("./train/X_train.txt")
y_train <-read.table("./train/Y_train.txt")
subject_test <- read.table("./test/subject_test.txt") ##read in testing data sets
x_test <-read.table("./test/X_test.txt")
y_test <-read.table("./test/Y_test.txt")
features <- read.table("features.txt") ## read in features and activity labels
activity <- read.table("activity_labels.txt")
data_train <- cbind(subject_train, y_train, x_train) ##merge training data variables and labels
data_test <- cbind(subject_test, y_test, x_test) ##merge testing data variables and labels
data_full <- rbind(data_train, data_test) ##merge training and testing data
names(data_full) <- cbind("Subject","Activity",t(as.character((features$V2)))) ##rename the data frame
data_meanstd <- data_full[,grep("mean()|std()|Subject|Activity",names(data_full))] ##retrieve the mean and std columns
data_labeled <-merge(activity, data_meanstd, by.y="Activity",by.x="V1",all=TRUE)[,-1] ##get the activity descriptive labels
colnames(data_labeled)[1] <- "Activity"
library(reshape2)
data_melt <- melt(data_labeled, id = c("Activity", "Subject"))
data_cast <- dcast(data_melt, Activity + Subject ~ variable, mean)
data_final <- melt(data_labeled, id = c("Activity", "Subject"))
names(data_final) <- c("Activity", "Subject", "Feature", "Mean")
data_final
|
e8458dde8b4ab712d6dc904354c97cba0f2f4e11
|
f993ad4382087bacfe215ce672777502ee869236
|
/figures_A1_A2/create_figure_A1.R
|
0e23799dacbec5208aac1074b09830528f6c91ba
|
[
"MIT"
] |
permissive
|
akubisch/smart_disp
|
3ffe5f25ed783a36d7a0e96877ab3a6cf9cba3b9
|
da96cf3f954f5bf1409427f0eedc002b15ba67d4
|
refs/heads/master
| 2021-01-18T21:08:38.637480
| 2016-09-01T08:13:01
| 2016-09-01T08:13:01
| 49,948,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,477
|
r
|
create_figure_A1.R
|
x11(width=10,height=7)
axcex <- 2.25
par(mfrow=c(2,3),oma=c(5,5.5,5,5),mar=c(4,.5,.5,2))
### standard sigmoid
# I=0.2
data <- read.table("sig25_i02_S/data/output.txt",header=T)
x <- data$frac_s1
y <- data$frac_draw
# in the following we calculate the total fraction of won competitions (*1/3, because every strategy participated in 3 competitions)
wins_TL <- 1/3 * ( sum(x[1:3]) )
wins_TA <- 1/3 * ( 1-x[1]-y[1] + sum(x[4:5]) )
wins_TAE <- 1/3 * ( 1-x[2]-y[2] + 1-x[4]-y[4] + x[6] )
wins_S <- 1/3 * ( 1-x[3]-y[3] + 1-x[5]-y[5] + 1-x[6]-y[6] )
draws_TL <- 1/3 * ( y[1] + y[2] + y[3] )
draws_TA <- 1/3 * ( y[1] + y[4] + y[5] )
draws_TAE <- 1/3 * ( y[2] + y[4] + y[6] )
draws_S <- 1/3 * ( y[3] + y[5] + y[6] )
xses_win <- 1:4
plot(1,1,type="n",bty="l",ylim=c(0.038,1),
xlim=c(0.5,4.5),xlab="",ylab="",xaxt="n",xaxs="i",
cex.axis=axcex,yaxt="n")
axis(side=2,at=c(0,0.25,0.5,0.75,1),labels=c("0","","0.5","","1"),cex.axis=axcex)
#segments(xses_win,rep(-1,4),xses_win,c(wins_TL,wins_TA,wins_TAE,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,rep(-1,4),xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),xses_win,c(wins_TA+draws_TA,wins_TAE+draws_TAE,wins_TL+draws_TL,wins_S+draws_S),lend=1,lwd=20,col="grey75")
text(1,-0.15,expression(paste(italic(T)[A])),srt=90,xpd=T,cex=2.5)
text(2,-0.15,expression(paste(italic(T)[AE])),srt=90,xpd=T,cex=2.5)
text(3,-0.15,expression(paste(italic(T)[L])),srt=90,xpd=T,cex=2.5)
text(4,-0.15,expression(paste(italic(S))),srt=90,xpd=T,cex=2.5)
# I=0.5
data <- read.table("sig25_i05_S/data/output.txt",header=T)
x <- data$frac_s1
y <- data$frac_draw
# in the following we calculate the total fraction of won competitions (*1/3, because every strategy participated in 3 competitions)
wins_TL <- 1/3 * ( sum(x[1:3]) )
wins_TA <- 1/3 * ( 1-x[1]-y[1] + sum(x[4:5]) )
wins_TAE <- 1/3 * ( 1-x[2]-y[2] + 1-x[4]-y[4] + x[6] )
wins_S <- 1/3 * ( 1-x[3]-y[3] + 1-x[5]-y[5] + 1-x[6]-y[6] )
draws_TL <- 1/3 * ( y[1] + y[2] + y[3] )
draws_TA <- 1/3 * ( y[1] + y[4] + y[5] )
draws_TAE <- 1/3 * ( y[2] + y[4] + y[6] )
draws_S <- 1/3 * ( y[3] + y[5] + y[6] )
xses_win <- 1:4
plot(1,1,type="n",bty="l",ylim=c(0.038,1),
xlim=c(0.5,4.5),xlab="",ylab="",xaxt="n",xaxs="i",
cex.axis=axcex,yaxt="n")
#axis(side=2,at=c(0,0.25,0.5,0.75,1),labels=c("0","","0.5","","1"),cex.axis=axcex)
#segments(xses_win,rep(-1,4),xses_win,c(wins_TL,wins_TA,wins_TAE,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,rep(-1,4),xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),xses_win,c(wins_TA+draws_TA,wins_TAE+draws_TAE,wins_TL+draws_TL,wins_S+draws_S),lend=1,lwd=20,col="grey75")
text(1,-0.15,expression(paste(italic(T)[A])),srt=90,xpd=T,cex=2.5)
text(2,-0.15,expression(paste(italic(T)[AE])),srt=90,xpd=T,cex=2.5)
text(3,-0.15,expression(paste(italic(T)[L])),srt=90,xpd=T,cex=2.5)
text(4,-0.15,expression(paste(italic(S))),srt=90,xpd=T,cex=2.5)
# I=0.8
data <- read.table("sig25_i08_S/data/output.txt",header=T)
x <- data$frac_s1
y <- data$frac_draw
# in the following we calculate the total fraction of won competitions (*1/3, because every strategy participated in 3 competitions)
wins_TL <- 1/3 * ( sum(x[1:3]) )
wins_TA <- 1/3 * ( 1-x[1]-y[1] + sum(x[4:5]) )
wins_TAE <- 1/3 * ( 1-x[2]-y[2] + 1-x[4]-y[4] + x[6] )
wins_S <- 1/3 * ( 1-x[3]-y[3] + 1-x[5]-y[5] + 1-x[6]-y[6] )
draws_TL <- 1/3 * ( y[1] + y[2] + y[3] )
draws_TA <- 1/3 * ( y[1] + y[4] + y[5] )
draws_TAE <- 1/3 * ( y[2] + y[4] + y[6] )
draws_S <- 1/3 * ( y[3] + y[5] + y[6] )
xses_win <- 1:4
plot(1,1,type="n",bty="l",ylim=c(0.038,1),
xlim=c(0.5,4.5),xlab="",ylab="",xaxt="n",xaxs="i",
cex.axis=axcex,yaxt="n")
#axis(side=2,at=c(0,0.25,0.5,0.75,1),labels=c("0","","0.5","","1"),cex.axis=axcex)
#segments(xses_win,rep(-1,4),xses_win,c(wins_TL,wins_TA,wins_TAE,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,rep(-1,4),xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),xses_win,c(wins_TA+draws_TA,wins_TAE+draws_TAE,wins_TL+draws_TL,wins_S+draws_S),lend=1,lwd=20,col="grey75")
text(1,-0.15,expression(paste(italic(T)[A])),srt=90,xpd=T,cex=2.5)
text(2,-0.15,expression(paste(italic(T)[AE])),srt=90,xpd=T,cex=2.5)
text(3,-0.15,expression(paste(italic(T)[L])),srt=90,xpd=T,cex=2.5)
text(4,-0.15,expression(paste(italic(S))),srt=90,xpd=T,cex=2.5)
### modified sigmoid
# I=0.2
data <- read.table("sig25_i02_Sm/data/output.txt",header=T)
x <- data$frac_s1
y <- data$frac_draw
# in the following we calculate the total fraction of won competitions (*1/3, because every strategy participated in 3 competitions)
wins_TL <- 1/3 * ( sum(x[1:3]) )
wins_TA <- 1/3 * ( 1-x[1]-y[1] + sum(x[4:5]) )
wins_TAE <- 1/3 * ( 1-x[2]-y[2] + 1-x[4]-y[4] + x[6] )
wins_S <- 1/3 * ( 1-x[3]-y[3] + 1-x[5]-y[5] + 1-x[6]-y[6] )
draws_TL <- 1/3 * ( y[1] + y[2] + y[3] )
draws_TA <- 1/3 * ( y[1] + y[4] + y[5] )
draws_TAE <- 1/3 * ( y[2] + y[4] + y[6] )
draws_S <- 1/3 * ( y[3] + y[5] + y[6] )
xses_win <- 1:4
plot(1,1,type="n",bty="l",ylim=c(0.038,1),
xlim=c(0.5,4.5),xlab="",ylab="",xaxt="n",xaxs="i",
cex.axis=axcex,yaxt="n")
axis(side=2,at=c(0,0.25,0.5,0.75,1),labels=c("0","","0.5","","1"),cex.axis=axcex)
#segments(xses_win,rep(-1,4),xses_win,c(wins_TL,wins_TA,wins_TAE,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,rep(-1,4),xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),xses_win,c(wins_TA+draws_TA,wins_TAE+draws_TAE,wins_TL+draws_TL,wins_S+draws_S),lend=1,lwd=20,col="grey75")
text(1,-0.15,expression(paste(italic(T)[A])),srt=90,xpd=T,cex=2.5)
text(2,-0.15,expression(paste(italic(T)[AE])),srt=90,xpd=T,cex=2.5)
text(3,-0.15,expression(paste(italic(T)[L])),srt=90,xpd=T,cex=2.5)
text(4,-0.15,expression(paste(italic(S)[M])),srt=90,xpd=T,cex=2.5)
# I=0.5
data <- read.table("sig25_i05_Sm/data/output.txt",header=T)
x <- data$frac_s1
y <- data$frac_draw
# in the following we calculate the total fraction of won competitions (*1/3, because every strategy participated in 3 competitions)
wins_TL <- 1/3 * ( sum(x[1:3]) )
wins_TA <- 1/3 * ( 1-x[1]-y[1] + sum(x[4:5]) )
wins_TAE <- 1/3 * ( 1-x[2]-y[2] + 1-x[4]-y[4] + x[6] )
wins_S <- 1/3 * ( 1-x[3]-y[3] + 1-x[5]-y[5] + 1-x[6]-y[6] )
draws_TL <- 1/3 * ( y[1] + y[2] + y[3] )
draws_TA <- 1/3 * ( y[1] + y[4] + y[5] )
draws_TAE <- 1/3 * ( y[2] + y[4] + y[6] )
draws_S <- 1/3 * ( y[3] + y[5] + y[6] )
xses_win <- 1:4
plot(1,1,type="n",bty="l",ylim=c(0.038,1),
xlim=c(0.5,4.5),xlab="",ylab="",xaxt="n",xaxs="i",
cex.axis=axcex,yaxt="n")
#axis(side=2,at=c(0,0.25,0.5,0.75,1),labels=c("0","","0.5","","1"),cex.axis=axcex)
#segments(xses_win,rep(-1,4),xses_win,c(wins_TL,wins_TA,wins_TAE,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,rep(-1,4),xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),xses_win,c(wins_TA+draws_TA,wins_TAE+draws_TAE,wins_TL+draws_TL,wins_S+draws_S),lend=1,lwd=20,col="grey75")
text(1,-0.15,expression(paste(italic(T)[A])),srt=90,xpd=T,cex=2.5)
text(2,-0.15,expression(paste(italic(T)[AE])),srt=90,xpd=T,cex=2.5)
text(3,-0.15,expression(paste(italic(T)[L])),srt=90,xpd=T,cex=2.5)
text(4,-0.15,expression(paste(italic(S)[M])),srt=90,xpd=T,cex=2.5)
# I=0.8
data <- read.table("sig25_i08_Sm/data/output.txt",header=T)
x <- data$frac_s1
y <- data$frac_draw
# in the following we calculate the total fraction of won competitions (*1/3, because every strategy participated in 3 competitions)
wins_TL <- 1/3 * ( sum(x[1:3]) )
wins_TA <- 1/3 * ( 1-x[1]-y[1] + sum(x[4:5]) )
wins_TAE <- 1/3 * ( 1-x[2]-y[2] + 1-x[4]-y[4] + x[6] )
wins_S <- 1/3 * ( 1-x[3]-y[3] + 1-x[5]-y[5] + 1-x[6]-y[6] )
draws_TL <- 1/3 * ( y[1] + y[2] + y[3] )
draws_TA <- 1/3 * ( y[1] + y[4] + y[5] )
draws_TAE <- 1/3 * ( y[2] + y[4] + y[6] )
draws_S <- 1/3 * ( y[3] + y[5] + y[6] )
xses_win <- 1:4
plot(1,1,type="n",bty="l",ylim=c(0.038,1),
xlim=c(0.5,4.5),xlab="",ylab="",xaxt="n",xaxs="i",
cex.axis=axcex,yaxt="n")
#axis(side=2,at=c(0,0.25,0.5,0.75,1),labels=c("0","","0.5","","1"),cex.axis=axcex)
#segments(xses_win,rep(-1,4),xses_win,c(wins_TL,wins_TA,wins_TAE,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,rep(-1,4),xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),lend=1,lwd=20,col="grey25")
segments(xses_win,c(wins_TA,wins_TAE,wins_TL,wins_S),xses_win,c(wins_TA+draws_TA,wins_TAE+draws_TAE,wins_TL+draws_TL,wins_S+draws_S),lend=1,lwd=20,col="grey75")
text(1,-0.15,expression(paste(italic(T)[A])),srt=90,xpd=T,cex=2.5)
text(2,-0.15,expression(paste(italic(T)[AE])),srt=90,xpd=T,cex=2.5)
text(3,-0.15,expression(paste(italic(T)[L])),srt=90,xpd=T,cex=2.5)
text(4,-0.15,expression(paste(italic(S)[M])),srt=90,xpd=T,cex=2.5)
mtext(at=0.5,side=1,line=2,outer=T,cex=2,"Decision rule")
mtext(side=4,outer=T,line=3,cex=1.75,at=0.8,"standard\n sigmoid")
mtext(side=4,outer=T,line=3,cex=1.75,at=0.3,"modified\n sigmoid")
mtext(at=0.55,side=2,line=3,outer=T,cex=2,"Fraction won/draw")
mtext(at=0.485,side=3,line=2,outer=T,expression(italic(I)==0.5),cex=2)
mtext(at=0.825,side=3,line=2,outer=T,expression(italic(I)==0.8),cex=2)
mtext(at=0.15,side=3,line=2,outer=T,expression(italic(I==0.2)),cex=2)
dev.copy2eps(file="figure_A1.eps",title="Poethke et al. | Figure A1")
|
5f0ff738c971c90be5b91d46ac18e51242eacff9
|
75c419f185ea06113b2ef838336741bdc32f5c29
|
/R/CombDmExPc.R
|
d34bb4a1e11db667ffb19c3ecee51f4c0fa553b0
|
[] |
no_license
|
cran/nmw
|
21069a6c43ec04d3c4a0de66b543b7489f01e78b
|
1be8ffa1e096a62032a8c8cc8ea7e8fa032e7ca9
|
refs/heads/master
| 2023-05-28T14:20:05.201173
| 2023-05-10T02:40:02
| 2023-05-10T02:40:02
| 84,930,485
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,556
|
r
|
CombDmExPc.R
|
CombDmExPc = function(dm, ex, pc)
{
ColDm = toupper(colnames(dm))
ColEx = toupper(colnames(ex))
ColPc = toupper(colnames(pc))
colnames(dm) = ColDm
colnames(ex) = ColEx
colnames(pc) = ColPc
if (nrow(dm) != length(unique(dm$ID))) stop("The first table should have only one row for each ID!")
ColAll = union(union(ColEx, ColPc), ColDm)
ToAddEx = setdiff(ColAll, ColEx)
ToAddPc = setdiff(ColAll, ColPc)
IDs = sort(unique(pc$ID))
nID = length(IDs)
if ("DATE" %in% ColEx) {
ex$DT = strptime(paste(ex$DATE, ex$TIME), "%Y-%m-%d %H:%M")
pc$DT = strptime(paste(pc$DATE, pc$TIME), "%Y-%m-%d %H:%M")
} else {
ex$DT = ex$TIME
pc$DT = pc$TIME
}
ex = ex[order(ex$ID, ex$DT), ]
pc = pc[order(pc$ID, pc$DT), ]
FLAG = rep(T, NROW(ex))
for (i in 1:nID) {
cID = IDs[i]
cDAT = pc[pc$ID == cID, , drop=F]
cLast = cDAT[NROW(cDAT), "DT"]
FLAG[ex$ID == cID & ex$DT >= cLast] = F
}
ex = ex[FLAG, ]
ex = cbind(ex, MDV = 1)
pc = cbind(pc, MDV = 0)
ToEx = matrix(nrow=NROW(ex), ncol=length(ToAddEx))
colnames(ToEx) = ToAddEx
ToPc = matrix(nrow=NROW(pc), ncol=length(ToAddPc))
colnames(ToPc) = ToAddPc
Res = rbind(cbind(ex, ToEx), cbind(pc, ToPc)) # Ex first for MDV descending order
Res = Res[order(Res$ID, Res$DT, Res$MDV), ]
for (i in 1:nID) {
cID = IDs[i]
Res[Res$ID == cID, setdiff(ColDm, "ID")] = dm[dm$ID == cID, setdiff(ColDm, "ID")]
}
Res = Res[, c(ColAll, "MDV")]
rownames(Res) = NULL
return(Res)
}
|
85207a247b18f8a0f9424eab5c2e13e379593b7f
|
31628b963d51246f2a923b56597a4f26c772c819
|
/plot1.R
|
6250cb950ff99aff3858f9a1324bb253c718540f
|
[] |
no_license
|
Frankie5Angels/Tauyanashe-Chitsika-ExData_Plotting1
|
07e6bb7dd747763b0da68021b8325347145f14d6
|
46ab060a22d8ff44495b08b72b34654ffdd8bbac
|
refs/heads/master
| 2021-09-10T03:05:46.700115
| 2018-03-20T21:03:57
| 2018-03-20T21:03:57
| 126,079,382
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 509
|
r
|
plot1.R
|
data_file0 <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", stringsAsFactors = FALSE, dec = ".")
data_file1 <- subset(data_file0, Date %in% c("1/2/2007","2/2/2007"))
head(data_file1)
class(data_file1$Date)
data_file1$Date <- as.Date(data_file1$Date, "%d%m%Y")
x0 <- data_file1$Global_active_power
png(file = "plot1.png", width=480, height=480)
hist(x0, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
cc97eb987602934ff306e18abd12654834df950d
|
04479a0945363db519854f8aeb02d864690fc47a
|
/R/summarizeMSE.R
|
2d7b1bfd09beb2dd5f33620d35f751677f6a82f0
|
[] |
no_license
|
nissandjac/PacifichakeMSE
|
f398521fd096f0e55b5634ebfe08232b8d9839ef
|
cf967413fa052c1a843bfaf6f1fc1542afd994ce
|
refs/heads/master
| 2022-03-08T14:45:01.953621
| 2022-03-01T09:21:06
| 2022-03-01T09:21:06
| 124,948,437
| 6
| 8
| null | 2020-05-01T06:34:23
| 2018-03-12T20:37:25
|
HTML
|
UTF-8
|
R
| false
| false
| 1,721
|
r
|
summarizeMSE.R
|
#' Title
#'
#' @param ls.save a saved list of MSE's
#' @param perc Percentiles to save
#'
#' @return Returns summaeized data
#' @export
#'
#' @examples
#' summarizeMSE(ls.save) # Get a summary of the MSE
summarizeMSE <- function(ls.save, perc = c(0.1,0.9)){
# This function needs to be generalized
df.MSE <- purrr::flatten(ls.save) # Change the list a little bit
catchcdf <- processMSE(df.MSE, 'Catch', idx = c(2,3), spacenames = c('CAN', 'USA'), runs = 100,nspace = 2)
catchdf <- processMSE(df.MSE, 'Catch', idx = 2, spacenames = c('CAN', 'USA'), runs = 100,nspace = 2)
SSB_mid <- processMSE(df.MSE, id = 'SSB.mid', idx = c(1,2), spacenames = c('CAN', 'USA'), runs = 100,nspace = 2)
SSB_tot <- processMSE(df.MSE, id = 'SSB', idx = 1, spacenames = c('CAN', 'USA'), runs = 100,nspace = 2)
# Calculate AAV
AAVdf <- AAV(catchcdf)
AAVtottmp <- AAV(catchdf)
catch.tot.tmp <- catchdf %>%
group_by(year) %>%
summarise(catchmean = median(value),
quantsmax = quantile(value, probs =perc[2]),
quantsmin = quantile(value, probs= perc[1]))
SSB.tot.tmp <- SSB_tot %>%
group_by(year) %>%
summarise(SSBmean = median(value),
quantsmax = quantile(value, probs = perc[2]),
quantsmin = quantile(value, probs= perc[1]))
AAV.tot.tmp <- AAVtottmp[AAVtottmp$year > 1966,] %>%
group_by(year) %>%
summarise(AAVmean = median(AAV, na.rm = TRUE),
quantsmax = quantile(AAV, probs = perc[2]),
quantsmin = quantile(AAV, probs= perc[1]))
return(list(catch = catch.tot.tmp,
SSB = SSB.tot.tmp,
AAV = AAV.tot.tmp,
catchcountry = catchcdf,
SSB_mid = SSB_mid))
}
|
c61c9eed6481106942b1309e1456b98ff77a076f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rerf/examples/Predict.Rd.R
|
43899fe07c0816de3b62dc0778334d3f83b0d548
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 514
|
r
|
Predict.Rd.R
|
library(rerf)
### Name: Predict
### Title: Compute class predictions for each observation in X
### Aliases: Predict
### ** Examples
library(rerf)
trainIdx <- c(1:40, 51:90, 101:140)
X <- as.matrix(iris[, 1:4])
Y <- as.numeric(iris[, 5])
forest <- RerF(X[trainIdx, ], Y[trainIdx], num.cores = 1L, rank.transform = TRUE)
# Using a set of samples with unknown classification
predictions <- Predict(X[-trainIdx, ], forest, num.cores = 1L, Xtrain = X[trainIdx, ])
error.rate <- mean(predictions != Y[-trainIdx])
|
dd13d705d26a3ebb0d4ef421767d9be8361a9a42
|
ce8d13de6aa47617809c5fc4d83ccd961b310104
|
/man/xgb.opt.depth.Rd
|
f2f216e443bb81cf7052b30efcb2b4e107949da7
|
[] |
no_license
|
BruceZhaoR/Laurae
|
2c701c1ac4812406f09b50e1d80dd33a3ff35327
|
460ae3ad637f53fbde6d87b7b9b04ac05719a169
|
refs/heads/master
| 2021-01-22T12:24:50.084103
| 2017-03-24T19:35:47
| 2017-03-24T19:35:47
| 92,722,642
| 0
| 1
| null | 2017-05-29T08:51:26
| 2017-05-29T08:51:26
| null |
UTF-8
|
R
| false
| true
| 5,924
|
rd
|
xgb.opt.depth.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.opt.depth.R
\name{xgb.opt.depth}
\alias{xgb.opt.depth}
\title{xgboost depth automated optimizer}
\usage{
xgb.opt.depth(initial = 8, min_depth = 1, max_depth = 25, patience = 2,
sd_effect = 0.001, worst_score = 0, learner = NA, better = max_better)
}
\arguments{
\item{initial}{The initial starting search depth. This is the starting point, along with \code{initial - 2} and \code{initial + 2} depths. Defaults to \code{8}.}
\item{min_depth}{The minimum accepted depth. If it is reached, the computation stops. Defaults to \code{1}.}
\item{max_depth}{The maximum accepted depth. If it is reached, the computation stops. Defaults to \code{25}.}
\item{patience}{How many iterations are allowed without improvement, excluding the initialization (the three first computations). Larger means more patience before stopping due to no improvement of the scored metric. Defaults to \code{2}.}
\item{sd_effect}{How much the standard deviation accounts in the score to determine the best depth parameter. Default to \code{0.001}.}
\item{worst_score}{The worst possible score of the metric used, as a numeric (non NA / Infinite) value. Defaults to \code{0}.}
\item{learner}{The learner function. It fetches everything needed from the global environment. Defaults to \code{my_learner}, which is an example of using that function.}
\item{better}{Should we optimize for the minimum or the maximum value of the performance? Defaults to \code{max_better} for maximization of the scored metric. Use \code{min_better} for the minimization of the scored metric.}
}
\value{
Three elements forced in the global environment: \code{"Laurae.xgb.opt.depth.df"} for the dataframe with depth log (data.frame), \code{"Laurae.xgb.opt.depth.iter"} for the dataframe with iteration log (list), and \code{"Laurae.xgb.opt.depth.best"} for a length 1 vector with the best depth found (numeric).
}
\description{
This function allows you to optimize the depth of xgboost in gbtree/dart booster given the other parameters constant.
Output is intentionally pushed to the global environment, specifically in \code{Laurae.xgb.opt.depth.df}, \code{Laurae.xgb.opt.depth.iter}, and \code{Laurae.xgb.opt.depth.best} to allow manual interruption without losing data.
Verbosity is automatic and cannot be removed. In case you need this function without verbosity, please compile the package after removing verbose messages.
In addition, a sink is forced. Make sure to run \code{sink()} if you interrupt (or if xgboost interrupts) prematurely the execution of the function. Otherwise, you end up with no more messages printed to your R console.
initial = 8, min_depth = 1, max_depth = 25, patience = 2, sd_effect = 0.001, worst_score = 0, learner = NA, better = max_better
}
\examples{
#Please check xgb.opt.utils.R file in GitHub.
\dontrun{
max_better <- function(cp) {
return(max(cp, na.rm = TRUE))
}
my_learner <- function(depth) {
sink(file = "Laurae/log.txt", append = TRUE, split = FALSE)
cat("\\n\\n\\nDepth ", depth, "\\n\\n", sep = "")
global_depth <<- depth
gc()
set.seed(11111)
temp_model <- xgb.cv(data = dtrain,
nthread = 12,
folds = folded,
nrounds = 100000,
max_depth = depth,
eta = 0.05,
#gamma = 0.1,
subsample = 1.0,
colsample_bytree = 1.0,
booster = "gbtree",
#eval_metric = "auc",
eval_metric = mcc_eval_nofail_cv,
maximize = TRUE,
early_stopping_rounds = 25,
objective = "binary:logistic",
verbose = TRUE
#base_score = 0.005811208
)
sink()
i <<- 0
return(c(temp_model$evaluation_log[[4]][temp_model$best_iteration],
temp_model$evaluation_log[[5]][temp_model$best_iteration], temp_model$best_iteration))
}
xgb.opt.depth.callback <- function(i, learner, better, sd_effect) {
cat("\\nExploring depth ", sprintf("\%02d", Laurae.xgb.opt.depth.iter[i, "Depth"]), ": ")
Laurae.xgb.opt.depth.df[Laurae.xgb.opt.depth.iter[i, "Depth"],
c("mean", "sd", "nrounds")] <<- learner(Laurae.xgb.opt.depth.iter[i, "Depth"])
Laurae.xgb.opt.depth.df[Laurae.xgb.opt.depth.iter[i, "Depth"],
"score"] <<- Laurae.xgb.opt.depth.df[Laurae.xgb.opt.depth.iter[i, "Depth"], "mean"] +
(Laurae.xgb.opt.depth.df[Laurae.xgb.opt.depth.iter[i, "Depth"], "sd"] * sd_effect)
Laurae.xgb.opt.depth.iter[i,
"Score"] <<- Laurae.xgb.opt.depth.df[Laurae.xgb.opt.depth.iter[i, "Depth"], "score"]
Laurae.xgb.opt.depth.iter[i,
"Best"] <<- better(Laurae.xgb.opt.depth.df[, "score"])
Laurae.xgb.opt.depth.best <<- which(
Laurae.xgb.opt.depth.df[, "score"] == Laurae.xgb.opt.depth.iter[i, "Best"])[1]
cat("[",
sprintf("\%05d", Laurae.xgb.opt.depth.df[Laurae.xgb.opt.depth.iter[i, "Depth"], "nrounds"]),
"] ",
sprintf("\%.08f", Laurae.xgb.opt.depth.df[Laurae.xgb.opt.depth.iter[i, "Depth"], "mean"]),
ifelse(is.na(Laurae.xgb.opt.depth.df[Laurae.xgb.opt.depth.iter[i, "Depth"], "mean"]) == TRUE,
"",
paste("+",
sprintf("\%.08f", Laurae.xgb.opt.depth.df[Laurae.xgb.opt.depth.iter[i, "Depth"], "sd"]),
sep = "")),
" (Score: ",
sprintf("\%.08f", Laurae.xgb.opt.depth.df[Laurae.xgb.opt.depth.iter[i, "Depth"], "score"]),
ifelse(Laurae.xgb.opt.depth.iter[i, "Best"] == Laurae.xgb.opt.depth.iter[i, "Score"],
" <<<)",
" )"),
" - best is: ",
Laurae.xgb.opt.depth.best,
" - ",
format(Sys.time(), "\%a \%b \%d \%Y \%X"),
sep = "")
}
xgb.opt.depth(initial = 10, min_depth = 1, max_depth = 20, patience = 2, sd_effect = 0,
worst_score = 0, learner = my_learner, better = max_better)
}
}
|
0ddc0069422ad6b5d86fe186d4c2e2c0ad8b8eba
|
dd1fa9020beb9b0205a5d05e0026ccae1556d14b
|
/itwill/R-script/chap17_2_Naive Bayes.R
|
de0c71e0f815db01dd06b63e69d3e79a8d2528bb
|
[] |
no_license
|
kimjieun6307/itwill
|
5a10250b6c13e6be41290e37320b15681af9ad9a
|
71e427bccd82af9f19a2a032f3a08ff3e1f5911d
|
refs/heads/master
| 2022-11-13T11:55:12.502959
| 2020-07-15T08:14:21
| 2020-07-15T08:14:21
| 267,373,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,211
|
r
|
chap17_2_Naive Bayes.R
|
# chap17_2_Naive Bayes
##################################################
# Naive Bayes 알고리즘
##################################################
# 조건부 확률 적용 예측
# 비교적 성능 우수
# 베이즈 이론 적용
# -> 조건부 확률 이용
# -> 스펨 메시지 분류에 우수함
# 조건부 확률 : 사건 A가 발생한 상태에서 사건 B가 발생할 확률
# P(B|A) = P(A|B) * P(B) / P(A)
# ----------------------------------------------------------
# ex) 비아그라,정력 단어가 포함된 메시지가 스팸일 확률
# P(스팸|비아그라,정력)
# 사건 A : 비아그라, 정력 -> P(A) : 5/100(5%)
# 사건 B : 스팸 -> P(B) : 20/100(20%)
# P(A|B) : 스팸일때 비아그라, 정력일 경우 -> 4/20(20%)
A <- 5/100
B <- 20/100
A.B <- 4/20
P <- A.B*B/A
P # 0.8
##################################################
# Naive Bayes 기본실습 : iris
##################################################
# 패키지 설치
install.packages('e1071')
library(e1071) # naiveBayes()함수 제공
# 1. train과 test 데이터 셋 생성
data(iris)
set.seed(415) # random 결과를 동일하게 지정
idx <- sample(1:nrow(iris), 0.7*nrow(iris)) # 7:3 비율
train <- iris[idx, ]
test <- iris[-idx, ]
train; test
nrow(train) # 105
# 2. 분류모델 생성 : train data 이용
# 형식) naiveBayes(train_x, train_y)
model <- naiveBayes(train[-5], train$Species)
model # 105개 학습 데이터를 이용하여 x변수(4개)를 y변수로 학습시킴
# 3. 분류모델 평가 : test data 이용
# 형식) predict(model, test, type='class')
p <- predict(model, test) # test : y변수가 포함된 데이터 셋
p
# 4. 분류모델 평가(예측결과 평가)
tab <- table(p, test$Species) # 예측결과, 원형 test의 y변수
# p setosa versicolor virginica
# setosa 13 0 0
# versicolor 0 16 3
# virginica 0 1 12
# 분류 정확도
acc <- (tab[1,1]+tab[2,2]+tab[3,3])/sum(tab)
acc # 0.9111111
##################################################
# Naive Bayes 응용실습 : 기상데이터 분석
##################################################
# 1. 데이터 가져오기
setwd("c:/ITWILL/2_Rwork/Part-IV")
weatherAUS <- read.csv('weatherAUS.csv')
weatherAUS <- weatherAUS[ ,c(-1,-2, -22, -23)] # 칼럼 제외
# 2. 데이터 생성/전처리
set.seed(415)
idx = sample(1:nrow(weatherAUS), 0.7*nrow(weatherAUS))
train_w = weatherAUS[idx, ]
test_w = weatherAUS[-idx, ]
head(train_w)
head(test_w)
dim(train_w) # [1] 25816 20
dim(test_w) # [1] 11065 20
# 3. 분류모델(분류기) 생성 : train data 이용
# 형식2) niveBayes(y변수 ~ x변수, data)
model = naiveBayes(RainTomorrow ~ ., data = train_w)
model
# 4. 분류모델 평가(예측기) : test data 이용
# 형식) predict(model, test, type='class')
p<- predict(model, test_w)
tab<-table(p, test_w$RainTomorrow)
tab
# p No Yes
# No 7153 964
# Yes 1137 1615
# 5. 분류정확도
acc <- (tab[1,1]+tab[2,2])/sum(tab)
acc # 0.8066979
# no
tab[1,1]/sum(tab[1,]) # 0.8812369
#yes
tab[2,2]/sum(tab[2,]) # 0.5868459
table(test_w$RainTomorrow)
|
a784f5afe134474f4f893baaee12772b6feb57fb
|
2721957eb5c2b3e9afe2f726fee6aa4155adb8ca
|
/R_playtime/weight_updated.R
|
5106b064b9b72c2fcbe0a71aef864bd4ae1040fb
|
[] |
no_license
|
dianekaplan/playtime
|
2f97b82dcadc7d2e6c7eb471b4a0c6563d38fc64
|
2bbc2d7ca29148e4c7dc6b4dbfaaad0321d189dc
|
refs/heads/master
| 2021-01-15T09:20:10.037323
| 2016-10-10T01:08:53
| 2016-10-10T01:08:53
| 28,461,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 931
|
r
|
weight_updated.R
|
# -*- coding: utf-8 -*-
#weight.R
#Diane Kaplan
#messing around
#December 31, 2014
# load data saved from download Excel file as 'csv'
#all_data <- read.csv("weight.csv")
install.packages ("rJava")
install.packages ("xlsx")
library("xlsx")
all_data <- read.xlsx("weightxlsx.xlsx", SheetName= "2014")
#Error: could not find function "read.xlsx"
Average <- all_data$Average
Week <- all_data$Week
#WeekNo <- all_data$WeekNo
#convert the values from strings to date time, then display it to check
Week <- as.Date(Week, "%m/%d/%Y"); Week
plot(Week, Average)
par() #displays current graph settings/parameters
copy.par <- par() #make a copy of your settings (useful)
title(main="Weight chart", col.main="light blue")
par(col.lab="cornsilk4", font.lab = 2, col.axis = "cornsilk4")
par(pch=20, bg="white", col="cornsilk4")
#let's add our fit line
abline(lsfit(Week, Average), col = "light blue", lty = 5)
# clean up
rm(all_data)
|
dd9f1a1b26836cff7fb7a5a600fae5b012800602
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/compositions/examples/CompLinModCo.Rd.R
|
dc7960e4902d36408200412c41d1ce2bdc236164
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 538
|
r
|
CompLinModCo.Rd.R
|
library(compositions)
### Name: CompLinModCoReg
### Title: Compositional Linear Model of Coregionalisation
### Aliases: CompLinModCoReg
### Keywords: multivariate
### ** Examples
## Not run:
##D data(juraset)
##D X <- with(juraset,cbind(X,Y))
##D comp <- acomp(juraset,c("Cd","Cu","Pb","Co","Cr"))
##D CompLinModCoReg(~nugget()+sph(0.5)+R1*exp(0.7),comp)
##D CompLinModCoReg(~nugget()+R1*sph(0.5)+R1*exp(0.7)+(0.3*diag(5))*gauss(0.3),comp)
##D CompLinModCoReg(~nugget()+R1*sph(0.5)+R1(c(1,2,3,4,5))*exp(0.7),comp)
## End(Not run)
|
673b0b7fd2b1b425db9a32d676580cd3315ea22f
|
6e5b5050ff779266b83be384e0372fd507044b4d
|
/setup/PAK_detail/create_params_set.R
|
ced2b81775b51885f85afc11a8d2f256fd1b69c8
|
[] |
no_license
|
cmmid/covidm_reports
|
b3968b096d40a830d775a0fa3eac7afee1e3e56e
|
b87f48cb81c4ef3bbbd221295ea47fad17c27b13
|
refs/heads/master
| 2022-12-05T23:36:42.314743
| 2020-08-10T06:38:15
| 2020-08-10T06:38:15
| 254,046,734
| 9
| 6
| null | 2020-08-05T20:14:54
| 2020-04-08T09:43:35
|
R
|
UTF-8
|
R
| false
| false
| 6,244
|
r
|
create_params_set.R
|
#' default simulation parameters
suppressPackageStartupMessages({
require(data.table)
})
.args <- if (interactive()) c(
"~/Dropbox/covidm_reports/hpc_detailed/worldpop5yr.lfs.csv",
"../covidm",
"~/Dropbox/covidm_reports/hpc_detailed" # going to be writing admin#/param_set.rds
) else commandArgs(trailingOnly = TRUE)
#' @examples
#' .args <- gsub("ZWE","guineabissau",.args)
#' .args <- gsub("ZWE","palestine",.args)
reference = fread(
.args[1], strip.white = FALSE)[
level == 1 & country == "PAK"
]
country <- matref <- "Pakistan"
cm_path = .args[2]
outfilefmt = file.path(tail(.args, 1),"%s","params_set.rds")
cm_force_rebuild = F;
cm_build_verbose = F;
cm_force_shared = T;
cm_version = 1;
suppressPackageStartupMessages({
source(file.path(cm_path, "R", "covidm.R"))
})
# country <- cm_populations[
# country_code == popcode,
# unique(as.character(name))
# ]
#set up node-runs
probs = fread(
"Age_low,Age_high,Prop_symptomatic,IFR,Prop_inf_hosp,Prop_inf_critical,Prop_critical_fatal,Prop_noncritical_fatal,Prop_symp_hospitalised,Prop_hospitalised_critical
0,9,0.66,8.59E-05,0.002361009,6.44E-05,0.5,0,0,0.3
10,19,0.66,0.000122561,0.003370421,9.19E-05,0.5,9.47E-04,0.007615301,0.3
20,29,0.66,0.000382331,0.010514103,0.000286748,0.5,0.001005803,0.008086654,0.3
30,39,0.66,0.000851765,0.023423527,0.000638823,0.5,0.001231579,0.009901895,0.3
40,49,0.66,0.001489873,0.0394717,0.001117404,0.5,0.002305449,0.018535807,0.3
50,59,0.66,0.006933589,0.098113786,0.005200192,0.5,0.006754596,0.054306954,0.3
60,69,0.66,0.022120421,0.224965092,0.016590316,0.5,0.018720727,0.150514645,0.3
70,79,0.66,0.059223786,0.362002579,0.04441784,0.5,0.041408882,0.332927412,0.3
80,100,0.66,0.087585558,0.437927788,0.065689168,0.5,0.076818182,0.617618182,0.3"
)
#increase CFR
cfr_RR <- 1.5
probs[, Prop_critical_fatal := Prop_critical_fatal * cfr_RR]
probs[, Prop_noncritical_fatal := Prop_noncritical_fatal * cfr_RR]
#min(1, x) does not work for noncritical_fatal, for some reason
probs[Prop_critical_fatal > 1, Prop_critical_fatal := 1]
probs[Prop_noncritical_fatal > 1, Prop_noncritical_fatal := 1]
reformat = function(P, lmic_adjust=TRUE) {
# no info to re-weight these, so assume 70-74 is like 70-79, and 75+ is like 80+
if(lmic_adjust){
P <- P[2:length(P)]
return (rep(P[1:8], each = 2))
} else {
return (c(rep(P[1:7], each = 2),P[8:9]))
}
}
P.icu_symp = reformat(probs[, Prop_symp_hospitalised * Prop_hospitalised_critical], lmic_adjust = TRUE);
P.nonicu_symp = reformat(probs[, Prop_symp_hospitalised * (1 - Prop_hospitalised_critical)], lmic_adjust = TRUE);
P.death = reformat(probs[, Prop_noncritical_fatal], lmic_adjust = TRUE);
max_time <- 60
tres <- 0.25
ponset2hosp <- cm_delay_gamma(7, 7, max_time, tres)$p
pignore <- cm_delay_skip(max_time, tres)$p
icustay <- cm_delay_gamma(10, 10, max_time, tres)$p
nonicustay <- cm_delay_gamma(8, 8, max_time, tres)$p
ponset2death <- cm_delay_gamma(22, 22, max_time, tres)$p
cm_multinom_process <- function(
src, outcomes, delays,
report = ""
) {
if ("null" %in% names(outcomes)) {
if (length(report) != length(outcomes)) report <- rep(report, length(outcomes))
report[which(names(outcomes)=="null")] <- ""
if (!("null" %in% names(delays))) {
delays$null <- c(1, rep(0, length(delays[[1]])-1))
}
} else if (!all(rowSums(outcomes)==1)) {
report <- c(rep(report, length(outcomes)), "")
outcomes$null <- 1-rowSums(outcomes)
delays$null <- c(1, rep(0, length(delays[[1]])-1))
}
nrow <- length(outcomes)
list(
source = src, type="multinomial", names=names(outcomes), report = report,
prob = t(as.matrix(outcomes)), delays = t(as.matrix(delays))
)
}
cm_track_process <- function(src, name, delays, agecats = 16, report = "p") {
list(
source = src, type="multinomial", names = name, report = report,
prob = matrix(1, nrow = 1, ncol = agecats),
delays = t(delays)
)
}
burden_processes = list(
# process of sending symptomatic cases to hospital icu or ward
cm_multinom_process(
"Ip",
outcomes = data.frame(to_icu = P.icu_symp, to_nonicu = P.nonicu_symp),
delays = data.frame(to_icu = ponset2hosp, to_nonicu = ponset2hosp)
),
# track icu prevalance
cm_track_process("to_icu", "icu", icustay),
# track ward prevalence
cm_track_process("to_nonicu", "nonicu", nonicustay),
# track infections - get from delta R prevalence
# cm_track_process("S", "infection", pignore, report="i"),
# send some cases to death, tracking outcidence
cm_multinom_process(
"Ip",
outcomes = data.table(death=P.death),
delays = data.table(death=ponset2death),
report = "o"
)
)
popnorm <- function(x, seed_cases = 100){
#no contact matrix - will be overwritten by empirical ones
x$matrices$home <- x$matrices$work <- x$matrices$school <- x$matrices$other <- x$matrices$other*0
#age-specific probability of being symptomatic
#x$y <- c(rep(0.056, 3), rep(0.49, 8), rep(0.74, 8))
#new values proposed by nick
x$y <- c(
rep(0.2973718, 2), rep(0.2230287, 2), rep(0.4191036, 2),
rep(0.4445867, 2), rep(0.5635720, 2), rep(0.8169443, 6)
)
#no cases in empty compartments
x$dist_seed_ages <- as.numeric(!(x$size == 0))
#seed cases
x$seed_times <- rep(0, seed_cases)
return(x)
}
for (admin_code in reference[, key]) {
params_set <- list()
params1 <- cm_parameters_SEI3R(
country, matref,
deterministic=FALSE,
date_start = "2020-03-01",
date_end = "2022-03-01"
)
extractpop <- melt(reference[
key == admin_code, .SD, .SDcols = grep("(f|m)_", colnames(reference), value = TRUE)
], measure.vars = grep("(f|m)_", colnames(reference), value = TRUE))
extractpop[, agelb := as.integer(gsub(".+_(\\d+)","\\1", variable)) ]
redage <- extractpop[,.(value = sum(value)), keyby=agelb ]
redage[16]$value <- sum(redage[16:.N]$value)
params1$pop[[1]]$size <- redage[1:16, value]
params1$processes = burden_processes
params1$pop <- lapply(params1$pop, popnorm)
#params1$time1 <- as.Date(params1$time1)
params_set[[1]] <- params1
dir.create(dirname(sprintf(outfilefmt, admin_code)))
saveRDS(params_set, sprintf(outfilefmt, admin_code))
}
#general population parameters
|
f890e974aa82e1269fdbf751bf2881241268e995
|
dc960b86670393c9b66d4a3c3851a71ae53ef84b
|
/plot2.R
|
e94852bd957c5d65e293372b250c24c4faf9239f
|
[] |
no_license
|
nikly/ExData_Plotting1
|
bbd62a90a199b5f54fbea18131731b0e9cc43967
|
02664b997f5a863838b45cbe838d71c4b3f1be95
|
refs/heads/master
| 2021-01-15T20:19:01.594189
| 2014-06-08T19:42:25
| 2014-06-08T19:42:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 692
|
r
|
plot2.R
|
#plot2 - line graph of Global Active Power Data vs datetime
library(sqldf) #load sqldf library for importing
#read in select data for required dates
data <- read.csv.sql("household_power_consumption.txt", sql ='select * from file where Date in ("1/2/2007","2/2/2007")', header=TRUE, sep=";")
data$datetime <- paste(data[,1],data[,2]) #create datetime as char
data$datetime <- strptime(data[,10],"%d/%m/%Y %H:%M:%S") #datetime to datetime
#define x and y variables for target data
y <-data$Global_active_power
x <-data$datetime
png(filename="plot2.png") #call device
plot(x,y, type="l", ylab="Global Active Power (kilowatts)", xlab="") #plot line, y axis label
#closes graphic device
dev.off()
|
2e91d9a21bcbbe54278d36309c1c9036ab7df292
|
5c714607243a555a69eed41aba0b53f8747cd495
|
/papillary/main/deseq.R
|
20739bd7b3c93f1d45da581c6cac1b414e9dfd5c
|
[] |
no_license
|
xulijunji/Stage-Prediction-of-Cancer
|
5a7912d66f92e73deacedd12446490b79c78cea8
|
4924468fa35a504c991fdd0051174d0489d3ff21
|
refs/heads/master
| 2021-05-06T06:45:57.242407
| 2017-12-11T09:26:20
| 2017-12-11T09:26:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,333
|
r
|
deseq.R
|
library(DESeq2)
load('environment/stages.level.comb.RData')
load('environment/dds_tumor_reported_normal_stage.RData')
load('environment/dds_tumor_reported.RData')
load('environment/first_trial_shrunken_classifier.RData')
load('environment/dds_object.RData')
load('environment/accuracy_feature/classifer_list.RData')
load('environment/accuracy_feature/net_features.RData')
source('across_tumors.R')
source('main/final.R')
sample.df <- colData(dds_tumor_reported_normal_stage)
sample.df[,4] <- sapply(as.character(sample.df[,4]), function(stage)
{
if(stage == 'stage ii')
stage = 'stage i'
else if(stage == 'stage iii')
stage = 'stage iv'
else
stage = stage
})
dds_nor_tum_comb <-
DESeqDataSetFromMatrix(counts(dds_tumor_reported_normal_stage),
colData = sample.df, design = ~stage.type)
dds_nor_tum_comb <- dds_nor_tum_comb[rowSums(assay(dds_nor_tum_comb)) > 2]
dds_nor_tum_comb <- DESeq(dds_nor_tum_comb)
save(dds_nor_tum_comb, file = 'environment/dds_nor_tum_comb.RData')
sample.df <- colData(dds_tumor_reported)
sample.df[,3] <- sapply(as.character(sample.df[,3]), function(stage)
{
if(stage == 'stage ii')
stage = 'stage i'
else if(stage == 'stage iii')
stage = 'stage iv'
else
stage = stage
})
gr = first.trial$gr
dds_obj <- create.Deseq2(gr, counts(dds_tumor_reported),
colData = sample.df)
res.train.dds_obj <- do.Deseq2(dds_obj)
deseq.genes.list <- lapply(c(1,1.5,2), function(fold)
{
lapply(res.train.dds_obj, function(x)
{
get.genes(x, fold, 0.05, 0.05)
})
})
names(deseq.genes.list) <- c("1fold", "1.5fold", "2fold")
net.features[['deseq']] <- list()
net.features$deseq[['genes.object']] <- res.train.dds_obj
net.features$deseq[['genes.list']] <- deseq.genes.list
net.features$deseq[['atleast_1']] <- sapply(net.features$deseq$genes.list, function(x)
{
get.genes.common(x, 1)
})
net.features$deseq[['atleast_3']] <- sapply(net.features$deseq$genes.list, function(x)
{
get.genes.common(x, 3)
})
net.features$deseq[['atleast_5']] <- sapply(net.features$deseq$genes.list, function(x)
{
get.genes.common(x, 5)
})
length(intersect(net.features$deseq$atleast_1, g2))
sapply(net.features$deseq$genes.list$`1.5fold`, length)
classifier.list[['deseq']] <- list()
classifier.list$deseq[['atleast_1']] <- do.rf(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_1,
stages.levels.comb, list(),
list())
classifier.list$deseq[['atleast_1']] <- do.knn(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_1,
stages.levels.comb, classifier.list$deseq$atleast_1[[1]],
classifier.list$deseq$atleast_1[[2]])
classifier.list$deseq[['atleast_1']] <- do.svm(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_1,
stages.levels.comb, classifier.list$deseq$atleast_1[[1]],
classifier.list$deseq$atleast_1[[2]])
classifier.list$deseq[['atleast_1']] <- do.naive(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_1,
stages.levels.comb, classifier.list$deseq$atleast_1[[1]],
classifier.list$deseq$atleast_1[[2]])
classifier.list$deseq[['atleast_3']] <- do.rf(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_3,
stages.levels.comb, list(),
list())
classifier.list$deseq[['atleast_3']] <- do.naive(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_3,
stages.levels.comb, classifier.list$deseq$atleast_3[[1]],
classifier.list$deseq$atleast_3[[2]])
classifier.list$deseq[['atleast_3']] <- do.knn(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_3,
stages.levels.comb, classifier.list$deseq$atleast_3[[1]],
classifier.list$deseq$atleast_3[[2]])
classifier.list$deseq[['atleast_3']] <- do.svm(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_3,
stages.levels.comb, classifier.list$deseq$atleast_3[[1]],
classifier.list$deseq$atleast_3[[2]])
classifier.list$deseq[['atleast_5']] <- do.rf(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_5,
stages.levels.comb, list(),
list())
classifier.list$deseq[['atleast_5']] <- do.svm(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_5,
stages.levels.comb, classifier.list$deseq$atleast_5[[1]],
classifier.list$deseq$atleast_5[[2]])
classifier.list$deseq[['atleast_5']] <- do.knn(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_5,
stages.levels.comb, classifier.list$deseq$atleast_5[[1]],
classifier.list$deseq$atleast_5[[2]])
classifier.list$deseq[['atleast_5']] <- do.naive(first.trial$gr,
vs_normal_comb_reported[tumor.ind.vs,],
net.features$deseq$atleast_5,
stages.levels.comb, classifier.list$deseq$atleast_5[[1]],
classifier.list$deseq$atleast_5[[2]])
save(net.features, file = 'environment/accuracy_feature/net_features.RData')
save(classifier.list, file = 'environment/accuracy_feature/classifer_list.RData')
|
3751f0f2e62d6a7fc849218da95f6b49b9826956
|
f14fdd5a70e68a3e259da340167d90d5024fd653
|
/tests/testthat/test_get_essence_data.R
|
6db03f6d03fc0567296fb3925176a055812cf3e3
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CC0-1.0",
"LicenseRef-scancode-us-govt-public-domain"
] |
permissive
|
CDCgov/Rnssp
|
5e4f5ef1f39824b067105ab14cd4503025fdb86f
|
afbcb807f4c413335f6c80aab5d4c6bcad742f70
|
refs/heads/master
| 2023-08-17T11:38:00.473337
| 2023-02-11T16:35:07
| 2023-02-11T16:35:07
| 369,309,753
| 41
| 10
|
Apache-2.0
| 2023-07-12T14:22:38
| 2021-05-20T18:57:21
|
R
|
UTF-8
|
R
| false
| false
| 363
|
r
|
test_get_essence_data.R
|
context("test-get_essence_data")
test_that("get_essence_data() function works!", {
url <- "http://httpbin.org/json"
handle <- Credentials$new("", "")
expect_error(get_essence_data(url, start_date = "2021-02-15",
end_date = "2021-02-15", profile = handle))
expect_error(get_essence_data(url, profile = handle))
})
|
4a8bbabe5aefacd733dce39d512be7faf5d60dc6
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/HWEBayes/R/DirichNormSat.R
|
488e2897d7e6f1cc6847055ee99ba9af3831b7cc
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 365
|
r
|
DirichNormSat.R
|
DirichNormSat <-
function(nvec,bvec){
if (length(nvec) != length(bvec) ) stop("DirichNormSat: Dimension of nvec and bvec differ\n")
DirichNormSat <- lfactorial(sum(nvec)) - sum(lfactorial(nvec)) +
lgamma(sum(bvec)) - sum(lgamma(bvec)) +
sum(lgamma(nvec+bvec)) - lgamma(sum(nvec)+sum(bvec))
DirichNormSat <- exp(DirichNormSat)
DirichNormSat
}
|
053dac7939322f59b67e142d4eff37d0754dd2f6
|
2b2aad1308ce11fc70d7057cd32a74edc1a39af0
|
/man/hru_lc_ratio.Rd
|
94b2a23003bb4c4b0e4fae625077c389b5d2302e
|
[] |
no_license
|
ln1267/dWaSSIC
|
495ec35e17db1782cbf4689809f14849fb9d007d
|
a85007f8e596525074f7d0985b8db1141005468c
|
refs/heads/master
| 2022-03-31T18:47:00.652333
| 2019-11-27T22:49:15
| 2019-11-27T22:49:15
| 69,312,882
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 720
|
rd
|
hru_lc_ratio.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{hru_lc_ratio}
\alias{hru_lc_ratio}
\title{Zonal vegetation coverage for each Hru in dWaSSI-C}
\usage{
hru_lc_ratio(classname, shp, field = NULL)
}
\arguments{
\item{classname}{a raster of each hru it can be vegetation type or soil type}
\item{shp}{the zonal boundary}
\item{field}{the field of the shp boundary that will used for zonal}
\item{varname}{The name of the zonaled variable}
}
\description{
FUNCTION_DESCRIPTION
}
\details{
This is a function for zonal hru data
}
\examples{
\dontrun{
ha<-hru_lc_ratio(classname = "inputs/Landcover/LUCC_Sun_IGBP.nc",
shp = Basins,
field="Station")
}
}
|
2815191158fb58a88dbe9623e7f015b110cb44bd
|
08b6bd3f6e4501df7344b8621d6392456343c5fb
|
/R/globalVariables.R
|
db76bdf5825bd1880fbff5a57dba96bbb6d72b41
|
[] |
no_license
|
rabutler-usbr/RWcheck
|
1e68453f3f9ae6719a742b68201a2d48a7b56a31
|
3ab321a04ecf632aa40d49b3b87f6453b58ff8e8
|
refs/heads/master
| 2021-01-09T04:42:21.244935
| 2020-02-22T00:28:20
| 2020-02-22T00:28:20
| 242,249,320
| 0
| 0
| null | 2020-02-21T23:36:10
| 2020-02-21T23:36:09
| null |
UTF-8
|
R
| false
| false
| 392
|
r
|
globalVariables.R
|
# global variables added so that there are no notes when running R CMD check
if(getRversion() >= "2.15.1"){
# global variables necessary because of createSysCondTable
from_createSysCondTable <- c('name','passes', 'fails', 'error', 'write.table',
'Timestep', 'TraceNumber', 'ObjectSlot', 'Value')
utils::globalVariables(c(from_createSysCondTable, "."))
}
|
d37746f072067db411df279dfafe5f528fcd5bbe
|
81f49f2828dd48350528bb5b17077b7af486461b
|
/man/vcov.gllvm.Rd
|
fbadba6bbc93c4db918f995d6a4c51379c76074c
|
[] |
no_license
|
JenniNiku/gllvm
|
497a9a0b6be94080a47803afcf7948a53d33125c
|
9eaf0f66605e32f70ffd050b2c0c3bb051866b81
|
refs/heads/master
| 2023-08-03T11:15:33.087171
| 2023-08-01T06:45:42
| 2023-08-01T06:45:42
| 91,061,910
| 39
| 16
| null | 2023-06-13T09:58:56
| 2017-05-12T07:06:17
|
R
|
UTF-8
|
R
| false
| true
| 1,089
|
rd
|
vcov.gllvm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vcov.gllvm.R
\name{vcov.gllvm}
\alias{vcov.gllvm}
\alias{vcov}
\title{Returns variance-covariance matrix of coefficients in a GLLVM.}
\usage{
\method{vcov}{gllvm}(object, ...)
}
\arguments{
\item{object}{an object of class 'gllvm'.}
\item{...}{not used.}
}
\description{
Returns the variance-covariance matrix of the parameters from a GLLVM. If the variance-covariance matrix was not calculated after model fitting, this function will have to calculate the variance-covariance matrix, which may be computational intensive for a large number of species and/or sites.
}
\details{
Calculates the variance-covariance matrix of a GLLVM object using \code{\link{se.gllvm}}, which may be computational intensive with many parameters.The parameters might have unintuitive names. Fixed-effects coefficients are labeled "b", and are ordered per species as: 1) intercepts 2) fixed-effects slopes. Coefficients of the latent variables are labled "lambda" (linear coefficients) or "lambda2".
}
\author{
Bert van der Veen
}
|
7da9f943b96cfcb79968b4e9bf1a62217db0b120
|
26f1cb213312ad204072dadd6b1163bcc0fa1bba
|
/exemples/chap8/8.29.R
|
1e90ae53322f4bccdb266a61953eff5dfc43853e
|
[] |
no_license
|
fmigone/livreR
|
a9b6f61a0aab902fb3b07fc49ea7dd642b65bdc3
|
998df678da1559ee03438c439335db796a416f2f
|
refs/heads/master
| 2020-04-22T05:47:14.083087
| 2018-05-01T14:39:10
| 2018-05-01T14:39:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 395
|
r
|
8.29.R
|
library(ggplot2) # fonctions graphiques
# diagramme en bâtons côte à côte selon le type
# de location et la politique d'annulation
ggplot(houses)+
aes(x = gsub(" ","\n",room_type),
fill = cancellation_policy) +
geom_bar(position = "dodge")+
labs(x="Type de bien loué")+
# quelques ajouts pour gérer les couleurs
scale_fill_brewer(palette="Greys")+
theme_bw()
|
e8dc2508eeaccd63efa8030a53b86f5388217519
|
67aceca655f47d98d8e62e484cd4e606b72c8870
|
/Project Euler/005-Least_ComMultiple.R
|
26d2e4123eab700db7202f4ba945a0b054bada9b
|
[] |
no_license
|
QiliWu/leetcode-and-Project-Euler
|
130cf8e2d50707687b8a4c97426dbdaddeca902b
|
ade9a06861ec97ffb5ad33d83c9d9e93b043abcd
|
refs/heads/master
| 2021-09-21T16:49:31.539051
| 2018-08-29T05:32:48
| 2018-08-29T05:32:48
| 103,649,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,065
|
r
|
005-Least_ComMultiple.R
|
#2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
#What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
# 最小公倍数=两数的乘积/最大公约(因)数, 解题时要避免和最大公约(因)数问题混淆。 最小公倍数的适用范围:分数的加减法,中国剩余定理(正确的题在最小公倍数内有解,有唯一的解)。[2] 因为,素数是不能被1和自身数以外的其它数整除的数;素数X的N次方,是只能被X的N及以下次方,1和自身数整除.所以,给最小公倍数下一个定义:S个数的最小公倍数,为这S个数中所含素因子的最高次方之间的乘积。如:1,求756,4400,19845,9000的最小公倍数?因756=2*2*3*3*3*7,4400=2*2*2*2*5*5*11,19845=3*3*3*3*5*7*7,9000=2*2*2*3*3*5*5*5,这里有素数2,3,5,7,11.2最高为4次方16,3最高为4次方81,5最高为3次方125,7最高为2次方49,还有素数11.得最小公倍数为16*81*125*49*11=87318000.2,自然数1至50的最小公倍数,因为,√50≈7,所以,在50之内的数只有≤7的素数涉及N次方。在50之内,2的最高次方的数为32,3的最高次方的数为27,5的最高次方的数为25,7的最高次方的数为49,其余为50之内的素数。所以,1,2,3,4,5,6,…,50的最小公倍数为:32*27*25*49*11*13*17*19*23*29*31*37*41*43*47=3099044504245996706400
isprimenumber <- function(x){
if(x == 1){
return(FALSE)
}else{
y <- as.integer(sqrt(x)) + 1
for(i in 2:y){
if(x %% i == 0 & x != 2){
return(FALSE)
}
}
return(TRUE)
}
}
Least_ComMultiple <- function(num){
prime <- list()
result <- 1
for(i in 2:num){
if(isprimenumber(i)){
prime[[toString(i)]] <- i
m <- as.integer(log(num, i))
result <- result * (i**m)
}
}
message('The smallest common multiple number of all the numbers in 1:', num, ' is ', result)
return(prime)
}
|
51441de6725f7550639dfbc1fe60d08fc9c957da
|
bec379c045e3d0781f546fa486ef6244f8ec6cc6
|
/man/AcuityView.Rd
|
1cf3fab09095a3ca59d4cecd84d44b7d397cc491
|
[] |
no_license
|
joaoventuraoliveira/AcuityView
|
717cb5406b5f6b0770172403487ab3ec66b4456d
|
b0465ec9ff20eb5e1c2c6c15d13d08b680282f75
|
refs/heads/master
| 2021-07-06T12:05:08.139275
| 2017-09-28T18:03:45
| 2017-09-28T18:03:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,364
|
rd
|
AcuityView.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AcuityView.R
\name{AcuityView}
\alias{AcuityView}
\title{AcuityView}
\usage{
AcuityView(photo = NULL, distance = 2, realWidth = 2,
eyeResolutionX = 0.2, eyeResolutionY = NULL, plot = T,
output = "test.jpg")
}
\arguments{
\item{photo}{The photo you wish to alter; if NULL then a pop up window allows you to navigate to your photo, otherwise include the file path here}
\item{distance}{The distance from the viewer to the object of interest in the image; can be in any units so long as it is in the same units as RealWidth}
\item{realWidth}{The real width of the entire image; can be in any units as long as it is in the same units as distance}
\item{eyeResolutionX}{The resolution of the viewer in degrees}
\item{eyeResolutionY}{The resolution of the viewer in the Y direction, if different than ResolutionX; defaults to NULL, as it is uncommon for this to differ from eyeResolutionX}
\item{plot}{Whether to plot the final image; defaults to T, but if F, the final image will still be saved to your working directory}
\item{output}{The name of the output file, must be in the format of output="image_name.filetype"; acceptable filetypes are .bmp, .png, or .jpeg}
}
\value{
Returns an image in the specified format
}
\description{
This function provides a simple method for displaying a visual scene as it may appear to an animal with lower acuity.
}
\section{Image Format Requirements}{
Image must be in 3-channel format, either PNG, JPEG or BMP. Note: some PNG files have an alpha channel that makes them 4-channel images; this will not work with the code. The image must be 3-channel.
}
\section{Image size}{
Image must be square with each side a power of 2 pixels. Example: 512x512, 1024 x 1024, 2048 x 2048 pixels
}
\section{For Linux Users}{
You may need to install the fftw library in order for the R package "fftwtools" to install and perform correctly.
The FFTW website and install information can be found here: http://www.fftw.org/
This library can easily be installed on Ubuntu with: apt-get install fftw3-dev
}
\examples{
require(imager)
photo<-system.file('extdata/reef.bmp', package='AcuityView')
reef<-load.image(photo)
AcuityView(photo = reef, distance = 2, realWidth = 2, eyeResolutionX = 2,
eyeResolutionY = NULL, plot = TRUE, output="Example.jpeg")
}
|
5a1f6a35373059f3d551c883927bf449a2c3d646
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/borrowr/inst/testfiles/matchesToCor/libFuzzer_matchesToCor/matchesToCor_valgrind_files/1609958588-test.R
|
0ae8dd811f914c0d82a95b3d6b000b5e380f58de
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 256
|
r
|
1609958588-test.R
|
testlist <- list(x = structure(c(1.0096967731662e+175, 3.94604863549254e-114, 1.16674439868909e+224, 1.51741194958664e-152, 0, 9.18962101264719e-322, 5.14291266410837e+25), .Dim = c(1L, 7L)))
result <- do.call(borrowr:::matchesToCor,testlist)
str(result)
|
36ec1af63ca8b5c0a08a6a21fd5e7165ff758c40
|
a71006b657906eb1e67f89edbb22c6a1e7db8dbe
|
/MFA/tests/testthat/test-Lg.R
|
94429b8e60f306b9b677bed0ccef35ec4d32a824
|
[] |
no_license
|
ucb243-fall16-mfa/alfa
|
dd83e6abf25462590c274e9cc79a8af0eb630a8f
|
eabf630c6cff11546e77d5fc75a448c56f1f97bc
|
refs/heads/master
| 2020-06-11T13:39:13.985078
| 2016-12-17T03:34:29
| 2016-12-17T03:34:29
| 75,654,759
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 239
|
r
|
test-Lg.R
|
context("Lg arguments")
test_that("check_numeric with ok vectors", {
expect_true(check_numeric(matrix(1:9,3,3)))
})
test_that("check_numeric fails with invalid argument",{
expect_error(check_numeric(matrix(c("a","b","c","d"),2,2)))
})
|
fdfa34478dff924b56586ac2fb21a22c45494423
|
180dd82b3c98dd23bab21ba2566277cd3a2a3d51
|
/R/transform_options.R
|
a867b7eba61faef2b0abcade21df07964e7215e9
|
[
"Apache-2.0"
] |
permissive
|
andrew-MET/harpIO
|
2e90861751a2b47ec8cdebde3de6d35414575cb7
|
976ddeca6337c31e7682d7a371552942cbb4cc66
|
refs/heads/master
| 2023-05-10T11:55:28.830541
| 2023-01-11T09:18:03
| 2023-01-11T09:18:03
| 275,838,990
| 0
| 3
|
NOASSERTION
| 2022-11-08T12:24:30
| 2020-06-29T14:27:58
|
R
|
UTF-8
|
R
| false
| false
| 8,198
|
r
|
transform_options.R
|
#' Generate Transformation Options
#'
#' When reading gridded data, three transformations are available: interpolate,
#' regrid and xsection. Each of these transformations requires their own set of
#' options, and these functions are used to generate those options.
#'
#' \code{interpolate_opts} generates options for interpolating gridded data to
#' points, or stations. If no stations are passed, a default list of stations,
#' accessed from the harpIO built in dataset stations_list is used. If 2m
#' temperature is to be read, the deafult behaviour is to attempt to height
#' correct for differences between the model elevation and the elevation at the
#' actual station. The default interpolation method is nearest-neighbour.
#'
#' \code{regrid_opts} generates options to regrid the data from its native grid
#' to a new grid. The new grid is specified either as a
#' \link[meteogrid]{geofield} or \link[meteogrid]{geodomain} object.
#' \link[harpIO]{read_grid} can be used to read a geofield, or
#' \link[meteogrid]{Make.domain} can be used to make a geodomain. The default
#' interpolation method is nearest-neighbour.
#'
#' \code{xsection_opts} generates options to extract a vertical cross section
#' from three dimensional gridded data. \code{a} and \code{b} denote the left
#' and right hand extents of the vertical cross section. By defualt the cross
#' sections are interpolated horizontally to a 2.5 km grid length. In the
#' vertical, if data are on model levels or pressure levels, these are converted
#' to log(pressure) and by default interpolated to to log(10) hPa levels. The
#' default interpolation method is bilinear interpolation.
#'
#' @param stations A data frame of points to interpolate to. Must have the
#' columns 'SID', for a station ID, lat for latitude and lon for longitude.
#' @param method The interpolation method. Can be 'nearest' or 'closest' for
#' nearest neighbour interpolation, 'bilinear', or 'bicubic'. For
#' \code{regrid_opts} 'mean' is also available for upscaling.
#' @param correct_t2m Logical. Whether to make a height correction to 2m
#' temperature to account for differences between the model elevation and the
#' station elevation.
#' @param keep_model_t2m Logical. Whether to keep the uncorrected 2m temperature
#' if \code{correct_t2m = TRUE}.
#' @param lapse_rate The lapse rate in K/m to use for the 2m temperature heigth
#' correction. The default is the standard moist adiabitic lapse rate of
#' 0.0065 K/m.
#' @param clim_file A file containing model orography or surface geopential on
#' the same grid as the data to be read in so that height corrections to 2m
#' temperature can be made. It will also contain a land sea mask if masked
#' interpolation is to be done. For initialising the domain, any parameter,
#' passed in 'clim_param' can be used.
#' @param clim_file_format The format of the clim_file. If set to NULL, a guess
#' will be made.
#' @param clim_file_opts Options for reading the clim_file, depending on the
#' format of the clim_file.
#' @param clim_param The parameter to read from 'clim_file'. If
#' \code{correct_t2m = TRUE} then this should be either surface geopotential
#' or terrain height in meters, otherwise for the purposes of intialising
#' interpolation weights, any paramter that exists in 'clim_file' can be used.
#' @param use_mask Logical. Whether to use a mask in the interpolation. Requires
#' that 'stations' has an 'lsm' column and clim_file includes a land-sea mask.
#' @param weights Interpolation weights if they have already been calculated.
#' @param keep_raw_data Logical. Whether to keep the untransformed full gridded
#' data field. The default is FALSE.
#'
#' @return A list of options that will be used in the transformation.
#' @export
#'
#' @examples
#' interpolate_opts()
interpolate_opts <- function(
stations,
method = c("nearest", "bilinear", "bicubic", "closest"),
correct_t2m = TRUE,
keep_model_t2m = FALSE,
lapse_rate = 0.0065,
clim_file = NULL,
clim_file_format = NULL,
clim_file_opts = NULL,
clim_param = "sfc_geo",
use_mask = FALSE,
weights = NULL,
keep_raw_data = FALSE
) {
if (missing(stations)) {
message("No stations specified. Using default stations: 'station_list'")
stations = get("station_list")
}
stopifnot(is.data.frame(stations))
stations <- tibble::as_tibble(stations)
stations_cols <- colnames(stations)
missing_cols <- setdiff(c("SID", "lat", "lon"), stations_cols)
if (length(missing_cols) > 0) {
stop("'stations' is missing the columns: ", paste(missing_cols, collapse = ", "))
}
if (correct_t2m && !is.element("elev", stations_cols)) {
warning(
"No 'elev' column found in stations, and correct_t2m = TRUE. Setting correct_t2m = FALSE",
call. = FALSE,
immediate. = TRUE
)
correct_t2m <- FALSE
}
if (use_mask && !is.element("lsm", stations_cols)) {
stop("For interpolation with a mask, 'stations' must contain an 'lsm' column.")
}
method = match.arg(method)
list(
stations = stations,
method = method,
correct_t2m = correct_t2m,
keep_model_t2m = keep_model_t2m,
lapse_rate = lapse_rate,
clim_file = clim_file,
clim_file_format = clim_file_format,
clim_file_opts = clim_file_opts,
clim_param = clim_param,
use_mask = use_mask,
weights = weights,
keep_raw_data = keep_raw_data
)
}
#' @rdname interpolate_opts
#' @param new_domain A geofield or geodomain object on the grid to which the
#' data should be regridded.
#'
#' @export
#'
#' @examples
#' if (requireNamespace("Rgrib2", quietly = TRUE) & requireNamespace("harpData", quietly = TRUE)) {
#' new_domain = read_grid(
#' system.file("grib/HARMUK20171015T12Z+003.grib", package = "harpData"),
#' parameter = "T2m"
#' )
#' regrid_opts(new_domain = new_domain)
#' }
regrid_opts <- function(
new_domain = NULL,
method = "nearest",
clim_file = NULL,
clim_file_format = NULL,
clim_file_opts = NULL,
clim_param = "sfc_geo",
weights = NULL,
keep_raw_data = FALSE
) {
if (!is.null(new_domain)) {
stopifnot(inherits(new_domain, "geofield") || inherits(new_domain, "geodomain"))
} else {
if (is.null(clim_file)) {
stop("Either 'new_domain' or 'clim_file' need to be passed.")
}
}
list(
new_domain = new_domain,
method = method,
clim_file = clim_file,
clim_file_format = clim_file_format,
clim_file_opts = clim_file_opts,
clim_param = clim_param,
weights = weights,
keep_raw_data = keep_raw_data
)
}
#' @rdname interpolate_opts
#'
#' @param a A length 2 numeric vector with the longitude and latitude of the
#' left hand edge of the cross section.
#' @param b A length 2 numeric vector with the longitude and latitude of the
#' right hand edge of the cross section.
#' @param horizontal_res The horizontal grid length of the cross section in
#' meters. The default is 2500m.
#' @param vertical_res The vertical grid length of the cross section. For data
#' on pressure levels or model levels this should be in hPa. For data on
#' height levels this should be in meters. The default is log(10).
#' @export
#'
#' @examples
#' xsection_opts(a = c(5.3, 60.5), b = c(10.8, 59.9))
xsection_opts <- function(
a,
b,
horizontal_res = 2500,
vertical_res = log(10),
clim_file = NULL,
clim_file_format = NULL,
clim_file_opts = NULL,
clim_param = "sfc_geo",
method = "bilinear",
keep_raw_data = FALSE
) {
stopifnot(is.numeric(a) && length(a) == 2)
stopifnot(is.numeric(b) && length(b) == 2)
list(
a = a,
b = b,
horizontal_res = horizontal_res,
vertical_res = vertical_res,
clim_file = clim_file,
clim_file_format = clim_file_format,
clim_file_opts = clim_file_opts,
clim_param = clim_param,
method = method,
keep_raw_data = keep_raw_data
)
}
|
4686d8b75184259e7311a957efd1b8df922fb5d1
|
ddc2b096e681398f576a95e40c7fd366b65f50a2
|
/SDPSimulations/SubstitutionsMK.R
|
d68af7ceddf36fc1eb806d0d8440014cbbf0c451
|
[] |
no_license
|
sbellan61/SDPSimulations
|
f334d96743c90d657045a673fbff309106e45fce
|
cfc80b116beafabe3e3aed99429fb03d58dc85db
|
refs/heads/master
| 2021-03-27T20:48:25.857117
| 2017-09-19T20:13:37
| 2017-09-19T20:13:37
| 21,144,447
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,648
|
r
|
SubstitutionsMK.R
|
####################################################################################################
## Makes control files for each analysis within which each line giving one R CMD BATCH command line
## to run on a cluster.
####################################################################################################
rm(list=ls()) # clear workspace
setwd('/home1/02413/sbellan/SDPSimulations/SDPSimulations') # setwd
load('../DHSFitting/data files/ds.nm.all.Rdata') # load country names
load('../DHSFitting/data files/pars.arr.ac.Rdata') # load acute phase relative hazards used to fit (in.arr[,,2])
hazs <- c('bmb','bfb','bme','bfe','bmp','bfp') # transmission coefficient names, for convenience
nc <- 12 # core per simulation
load(file=file.path('../DHSFitting/data files/SubAlreadyDone.Rdata'))
## source('SubstitutionsMK.R')
####################################################################################################
####################################################################################################
## SUBSTITUTION ANALYSIS
####################################################################################################
## To investigate possible drivers of country-to-country variation in serodiscordant proportions we
## substituted population-level HIV prevalence, couple-formation patterns or HIV transmission rates
## estimated from a “donor” country into a simulation model fit for a “recipient” country, and
## measured the extent to which a substitution shifted the serodiscordant proportion from that of
## the recipient country to that of the donor country.
####################################################################################################
countries <- c(1:length(ds.nm)) ## countries to do
ncount <- length(ds.nm)
# acute phase relative hazards to do (must match those fitted previously, since it determines the
# transmission coefficients used)
acutes <- c(1,5,7,10)#,25,30,40,50)
each.val <- 200 # number of couples per couple formation (marital) cohort
counterf.betas <- F # change betas in counterfactuals? if not change beta_within & c's (so beta_within affects all routes)
sub.betas <- F # substitute betas? if not beta_within & c's
nrb <- 0 # initialize blocks data frame (rows = 0)
totn <- 0 # total # jobs (starts at 0)
num.doing <- 0
nn <- 200 # number of simulations per country-acute combination (must be bigger than max(sel) later but is fine to leave big
substitute <- TRUE # doing a substitution analysis?
####################################################################################################
sink("Ac1SubstitutionsControlFile.txt") # create a control file to send to the cluster
outdir <- file.path('results','SubstitutionAnalysis') # set up directory for output
if(!file.exists(outdir)) dir.create(outdir) # create directory if necessary
for(aa in acutes) { # loop through acute phase relative hazard
acdirnm <- file.path(outdir,paste0('Acute', aa)) # set up directory for each acute phase relative hazard
if(!file.exists(acdirnm)) dir.create(acdirnm) # create directory if necessary
for(cc in countries) {
batchdirnm <- file.path(acdirnm, ds.nm[cc]) # setup directory for country
if(!file.exists(batchdirnm)) dir.create(batchdirnm) # created if necessary
if(!file.exists(file.path(batchdirnm,'routs'))) dir.create(file.path(batchdirnm, 'routs')) # setup directory to store Rout files
######################################################################
######################################################################
## Set defaults for all parameters for each simulation, simulatin specific-values set later
######################################################################
group <- rep(cc,nn) # country group.
## set substitution country (donor country) indices to country by default & change later:
## s.epic, s.demog, s.bmb...
for(svar in c('epic','demog', hazs)) assign(paste0('s.',svar), rep(cc,nn))
## set all phases to have same infectivity
for(ph in c('acute','late', 'aids')) assign(paste0(ph,'.sc'), rep(1,nn))
death <- rep(T,nn) # include death
## all haz scalars (bmb.sc,etc...) set to 1
for(hh in hazs) assign(paste0(hh,'.sc'), rep(1,nn))
## set heterogeneity defaults (i.e. het.b, het.b.sd, het.b.cor,etc...)
for(ht in c('b','e','p','gen','beh')) {
assign(paste0('het.',ht), rep(F,nn))
assign(paste0('het.',ht,'.sd'), rep(0,nn))
assign(paste0('het.',ht,'.cor'), rep(0,nn))
}
scale.by.sd <- rep(T,nn) # scale by standard deviation?
scale.adj <- rep(1,nn) # arbitrary scalar if not doing that.
infl.fac <- rep(200,nn) # inflation factor for non-parametric couple pseudo-population builder
maxN <- rep(10^5,nn) # max pseudopopulation size
sample.tmar <- rep(F,nn) # sample marital (couple formation) date from copulas?
psNonPar <- rep(F,nn) # use non-parametric couple pseudo-population builder?
each <- rep(each.val, nn) # how many couples per marital (couple formation) cohort
######################################################################
######################################################################
## ***set acute phase for within this aa-loop***
acute.sc <- rep(aa,nn)
######################################################################
## Keep track of simulations we're doing in a data.frame called blocks
if(cc==1 & aa==1) { # initialize blocks for first aa-loop iteration
blocks <- data.frame(start = 1, end = 1, acute = aa, cc = cc, country = ds.nm[cc], lab = 'as fitted', sub.country = NA)
}else{ # add to blocks
if(!(aa==1 & cc==1)) nrb <- nrow(blocks) # how long is blocks (so we know how to add appropriately
blocks <- rbind(blocks, data.frame(start = 1+nrb, end = 1+nrb, acute = aa, cc = cc, country = ds.nm[cc], lab = 'as fitted', sub.country = NA))
}
######################################################################
######################################################################
## Substitutions
######################################################################
## Epidemic curves (different for WA b/c that country-group has multiple epidemic curves)
if(ds.nm[cc]!='WA') { # if not WA substitute all other countries epidemic curves in
sel <- 2:(ncount-1) # selection index
s.epic[sel] <- c(1:ncount)[-c(which(ds.nm=='WA'), cc)]
blocks <- rbind(blocks, data.frame(start = min(sel)+nrb, end = max(sel)+nrb, acute = acute.sc[sel], cc = cc,
country = ds.nm[cc], lab = 'epidemic curve',
sub.country = c(1:ncount)[-c(which(ds.nm=='WA'), cc)]))
}else{
sel <- 2:ncount # selection index
s.epic[sel] <- c(1:ncount)[-c(cc)] # if WA, do DRC at end intsead for WA
blocks <- rbind(blocks, data.frame(start = min(sel)+nrb, end = max(sel)+nrb, acute = acute.sc[sel], cc = cc,
country = ds.nm[cc], lab = 'epidemic curve', sub.country = c(1:ncount)[-cc]))
}
## Pre-couple transmission
sel <- (sel[length(sel)] + 1):(sel[length(sel)] + ncount - 1) # update selection index, next 9 simulations
s.bmb[sel] <- c(1:ncount)[-cc]
s.bfb[sel] <- c(1:ncount)[-cc]
blocks <- rbind(blocks, data.frame(start = min(sel)+nrb, end = max(sel)+nrb, acute = acute.sc[sel], cc = cc,
country = ds.nm[cc], lab = 'pre-couple', sub.country = c(1:ncount)[-cc]))
## Extra-couple transmission
sel <- (sel[length(sel)] + 1):(sel[length(sel)] + ncount - 1) # update selection index, next 9 simulations
s.bme[sel] <- c(1:ncount)[-cc]
s.bfe[sel] <- c(1:ncount)[-cc]
blocks <- rbind(blocks, data.frame(start = min(sel)+nrb, end = max(sel)+nrb, acute = acute.sc[sel], cc = cc,
country = ds.nm[cc], lab = 'extra-couple', sub.country = c(1:ncount)[-cc]))
## Within-couple transmission
sel <- (sel[length(sel)] + 1):(sel[length(sel)] + ncount - 1) # update selection index, next 9 simulations
s.bmp[sel] <- c(1:ncount)[-cc]
s.bfp[sel] <- c(1:ncount)[-cc]
blocks <- rbind(blocks, data.frame(start = min(sel)+nrb, end = max(sel)+nrb, acute = acute.sc[sel], cc = cc,
country = ds.nm[cc], lab = 'within-couple', sub.country = c(1:ncount)[-cc]))
## Relationship patterns (from copulas)
sel <- (sel[length(sel)] + 1):(sel[length(sel)] + ncount - 1) # update selection index, next 9 simulations
s.demog[sel] <- c(1:ncount)[-cc]
blocks <- rbind(blocks, data.frame(start = min(sel)+nrb, end = max(sel)+nrb, acute = acute.sc[sel], cc = cc,
country = ds.nm[cc], lab = 'relationship patterns', sub.country = c(1:ncount)[-cc]))
######################################################################
## Create control file text to send to cluster.
for(ii in 1:max(sel)) {
jb <- ii # job num
totn <- totn+1 # total jobs
cmd <- paste("R CMD BATCH '--args jobnum=", totn, " batchdirnm=\"", batchdirnm, "\"", " nc=", nc,
" group.ind=", group[ii], " substitute=", substitute, " s.epic=", s.epic[ii], " s.demog=", s.demog[ii],
" sub.betas=", sub.betas, " counterf.betas=", counterf.betas,
" s.bmb=", s.bmb[ii], " s.bfb=", s.bfb[ii],
" s.bme=", s.bme[ii], " s.bfe=", s.bfe[ii],
" s.bmp=", s.bmp[ii], " s.bfp=", s.bfp[ii],
" death=", death[ii],
" acute.sc=", acute.sc[ii], " late.sc=", late.sc[ii]," aids.sc=", aids.sc[ii],
" bmb.sc=", bmb.sc[ii], " bfb.sc=", bfb.sc[ii],
" bme.sc=", bme.sc[ii], " bfe.sc=", bfe.sc[ii],
" bmp.sc=", bmp.sc[ii], " bfp.sc=", bfp.sc[ii],
" het.b=", het.b[ii], " het.b.sd=", het.b.sd[ii], " het.b.cor=", het.b.cor[ii],
" het.e=", het.e[ii], " het.e.sd=", het.e.sd[ii], " het.e.cor=", het.e.cor[ii],
" het.p=", het.p[ii], " het.p.sd=", het.p.sd[ii], " het.p.cor=", het.p.cor[ii],
" het.gen=", het.gen[ii], " het.gen.sd=", het.gen.sd[ii], " het.gen.cor=", het.gen.cor[ii],
" het.beh=", het.beh[ii], " het.beh.sd=", het.beh.sd[ii], " het.beh.cor=", het.beh.cor[ii],
" hilo=0 phihi=0 phi.m=0 phi.f=0 rrhi.m=0 rrhi.f=0",
" scale.by.sd=", scale.by.sd[ii], " scale.adj=", scale.adj[ii],
" infl.fac=", infl.fac[ii], " maxN=", maxN[ii], " sample.tmar=", sample.tmar[ii],
" psNonPar=", psNonPar[ii], " seed=1 tmar=(65*12):(113*12) each=", each[ii],
" tint=113*12' SimulationStarter.R ", file.path(batchdirnm, "routs", paste0(ds.nm[group[ii]], jb, ".Rout")), sep='')
# if(ii > 0 & acute.sc[ii]==1 & !totn %in% already.done ) {
num.doing <- num.doing + 1
cat(cmd) # add command
cat('\n') # add new line
# }
} } }
sink()
blocks$sub.country.nm <- ds.nm[blocks$sub.country]
head(blocks,50)
tail(blocks,50)
print(totn)
print(num.doing)
save(blocks, file=file.path(outdir,'blocks.Rdata'))
####################################################################################################
|
7b60d5ad1e20d02592a0988e2a69a1d0323fd435
|
0e7f8960beaaa7ce0403cbbb06089a436883d662
|
/Textmining/DemoR/Bigrams.R
|
f5dafcbb3484fc1c1be62a3b33a40b360ed680fa
|
[] |
no_license
|
urosgodnov/BigData
|
511bfb69d483974731838d705ade4c05c6f67f04
|
9c501dbad764dfe3f3ac264664f36486b9706587
|
refs/heads/master
| 2021-01-25T04:59:07.757782
| 2017-06-11T16:30:54
| 2017-06-11T16:30:54
| 93,498,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,493
|
r
|
Bigrams.R
|
library(readxl)
library(dplyr)
library(tidyverse)
library(tidytext)
data<-read.csv(file="TA.csv",stringsAsFactors = FALSE, sep=";")
bigrams <- select(data,Review) %>%
unnest_tokens(bigram, Review, token = "ngrams", n = 2)
words <- c("room", "food")
bigram_counts <- bigrams %>%
count(bigram, sort = TRUE) %>%
separate(bigram, c("word1", "word2"), sep = " ") %>%
filter(word1 %in% words & nchar(word2)>3 ) %>%
count(word1, word2, wt = n, sort = TRUE) %>%
rename(total = nn)
word_ratios <- bigram_counts %>%
group_by(word2) %>%
filter(sum(total) > 2) %>%
ungroup() %>%
spread(word1, total, fill = 0) %>%
mutate_if(is.numeric, funs((. + 1) / sum(. + 1))) %>%
mutate(logratio = log2(room / food)) %>%
arrange(desc(logratio))
word_ratios %>%
arrange(abs(logratio))
word_ratios %>%
mutate(abslogratio = abs(logratio)) %>%
group_by(logratio < 0) %>%
top_n(10, abslogratio) %>%
ungroup() %>%
mutate(word = reorder(word2, logratio)) %>%
ggplot(aes(word, logratio, color = logratio < 0)) +
geom_segment(aes(x = word, xend = word,
y = 0, yend = logratio),
size = 1.1, alpha = 0.6) +
geom_point(size = 3.5) +
coord_flip() +
labs(x = NULL,
y=NULL) +
scale_color_discrete(name = "", labels = c("more with 'room'", "more with 'food'")) +
scale_y_continuous(breaks = seq(-3, 3),
labels = c("0.125x", "0.25x", "0.5x",
"Equal", "2x", "4x", "8x"))
|
5efc000966cee2ac36e779f1cc3d8f21e6c92d22
|
c6fff18e936e11b5145112f20bdcd881832881a5
|
/R/methods_path.R
|
bac5b975a04d38d48d196fa8c7f5c26f3c740cf9
|
[] |
no_license
|
hugogogo/natural
|
01c738409fd9d51f803e74003ca68cf2b39a6fce
|
745ee2468ed255633cf23ac0937d5d517c39942e
|
refs/heads/master
| 2021-09-04T09:11:59.102798
| 2018-01-17T16:01:25
| 2018-01-17T16:01:25
| 115,482,957
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,773
|
r
|
methods_path.R
|
#' print a natural.path object
#'
#' This function is adapted from the \pkg{ggb} R package.
#'
#' @param x an object of class \code{natural.path}, as returned by
#' \code{\link{nlasso_path}} and \code{\link{olasso_path}}
#' @param ... additional argument(not used here, only for S3 generic/method consistency)
#'
#' @method print natural.path
#' @export
print.natural.path <- function(x, ...){
cat(sprintf("%s lasso path with %s lambda values.", x$type,
length(x$lambda)), fill = TRUE)
tab <- data.frame(lambda = x$lambda,
sig_objective = x$sig_obj_path,
sig_naive = x$sig_naive_path,
sig_df = x$sig_df_path)
print(tab, row.names = FALSE)
}
#' plot a natural.path object
#'
#' This function is adapted from the \pkg{ggb} R package.
#'
#' @param x an object of class \code{natural.path}, as returned by
#' \code{\link{nlasso_path}} and \code{\link{olasso_path}}
#' @param ... additional argument(not used here, only for S3 generic/method consistency)
#' @method plot natural.path
#' @export
plot.natural.path <- function(x, ...){
graphics::par(mar = c(5, 5, 5, 1))
yrange <- range(x$sig_obj_path, x$sig_naive_path, x$sig_df_path)
graphics::plot(x$lambda, x$sig_naive_path, type = "l", col = "black",
lwd = 2,
main = paste("Path plot of ", x$type, " lasso", sep = ""),
ylim = yrange,
ylab = "estimated error s.d.", xlab = "lambda")
graphics::lines(x$lambda, x$sig_df_path, col = "red", lwd = 2)
graphics::lines(x$lambda, x$sig_obj_path, col = "blue", lwd = 2)
graphics::legend("bottomright",
legend = c("naive", "df", "natural"),
lwd = c(2, 2, 2),
col = c("black", "red", "blue"))
}
|
e0f268f5930cc19fbed1e393efba6c23c40ff07d
|
8cf4416f7e4c9016d85a616aaae3fbf0d48cf9a4
|
/r/Old/SparrowAttenuation20100713.r
|
9bbd71e4167ab586d367d0b9a421be491eb60d54
|
[] |
no_license
|
willbmisled/MRB1
|
35f9bb4ef9279f55b1348b8b3fbda6543ddbc70d
|
af39fb697255df15ae41131d76c6fcf552a55a70
|
refs/heads/master
| 2020-07-20T08:43:00.460675
| 2017-06-14T14:09:38
| 2017-06-14T14:09:38
| 94,337,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,945
|
r
|
SparrowAttenuation20100713.r
|
#Get the MRB1 data
load(file='M:/Net MyDocuments/EPA/Data/Sparrow/MRB1Sparrow/R/MRB120100630.rda')
######################
#Calculate N_NR(mg/l) = Nin-Nitrogen removed based on Harrison et al 2009
Rcal_N<-(NLA$Ninput-(NLA$TN*10**-3*NLA$Outflow))/NLA$Ninput #estimate % Nitrogen Removed
VfCal_N<--NLA$HL*log(1-Rcal_N)
summary(VfCal_N,na.rm=T) #median=8.748 #Harrison et al 1990 median value = 6.83
NRH<-round((1-exp(-median(VfCal_N,na.rm=T)/MRB1$HL)),2)
summary(NRH) #median=32%
#Nin adjusted for removal
N_NR<-MRB1$Nin*(1-NRH)
summary(N_NR)
#Total N removed by NE Lakes
sum(MRB1$Ninput*NRH)/10**6 # 19.2 M kilos
#Calculate P_PR(mg/l) = Pin-Phosphorus removed based on Harrison et al 2009
Rcal_P<-(NLA$Pinput-(NLA$TP*10**-3*NLA$Outflow))/NLA$Pinput #estimate % Phosphorus Removed
VfCal_P<--NLA$HL*log(1-Rcal_P)
summary(VfCal_P,na.rm=T) #median=13.0100
PRH<-round((1-exp(-median(VfCal_P,na.rm=T)/MRB1$HL)),2)
summary(PRH) #Median=40%
#Pin adjusted for removal
P_PR<-MRB1$Pin*(1-PRH)
#Total P removed by NE Lakes
sum(MRB1$Pinput*PRH)/10**6 # 1.7 M kilos
####################################
#Kellog et al estimate
NRQ<-.7924-(.3326*log10(MRB1$HL))
NRQ[NRQ<0]<-0 #adjust to min=0% and max=100%
NRQ[NRQ>1]<-1
summary(NRQ) #median=31%
#Total N removed by NE Lakes
sum(MRB1$Ninput*NRQ)/10**6 # 18.2 M kilos
par(mfrow=c(1,1))
plot(NRH,NRQ)
plot(density(log10(MRB1$Ninput*NRQ)),lwd=2,col='red',ylim=c(0,.8))
lines(density(log10(MRB1$Ninput*NRH)),lwd=2,col='green')
hist(log10(MRB1$Ninput*NRH))
par(mfrow=c(1,2))
Color<-rep(NA,length(NLA))
Color[log10(MRB1$Ninput*NRH)>4]<-'red'
Color[log10(MRB1$Ninput*NRH)<=3]<-'orange'
Color[log10(MRB1$Ninput*NRH)<=2]<-'goldenrod'
Color[log10(MRB1$Ninput*NRH)<=1]<-'green'
table(Color)
plot(MRB1$AlbersX,MRB1$AlbersY,pch=19,cex=.6,col=Color)
Color<-rep(NA,length(NLA))
Color[log10(MRB1$Ninput*NRQ)>4]<-'red'
Color[log10(MRB1$Ninput*NRQ)<=3]<-'orange'
Color[log10(MRB1$Ninput*NRQ)<=2]<-'goldenrod'
Color[log10(MRB1$Ninput*NRQ)<=1]<-'green'
table(Color)
plot(MRB1$AlbersX,MRB1$AlbersY,pch=19,cex=.6,col=Color)
####################################
####################################
#Alexander et al 2006 method
NRA<-1-(1/(1+(9.9*(MRB1$HL**-1))))
summary(NRA)
#Total N removed by NE Lakes
sum(MRB1$Ninput*NRA)/10**6 # 18 M kilos
plot(log10(MRB1$HL),NRA)
####################################
####################################
#redefine NLA data.frame to include N_NR and P_PR
#NOTE: this includes both probability and reference lakes
NLA<-subset(MRB1,MRB1$Rank==1 & LAKE_SAMP=='Target_Sampled')
plot((MRB1$hrtGIS),(MRB1$Pin-MRB1$TP)/MRB1$Pin,xlim=c(0,5))
plot(log10(MRB1$hrtGIS),log10(MRB1$Pin-MRB1$TP))
plot(log10(MRB1$hrtGIS),(log10(MRB1$Pin)-log10(MRB1$TP))/log10(MRB1$Pin))
#Seitzinger et al 2002
#R=88.45(D/T)**-.3677
NRS<-88.45*((MRB1$Zmean/MRB1$hrt)**-.3677)
#Total N removed by NE Lakes
sum(MRB1$Ninput*NRS,na.rm=T)/10**6 # 19.2 M kilos
summary(NRS)
|
c775044f397bd8a7c30363e51238db55d5126520
|
156688b10a61b43f95a5bd2eefa2b11a3aee238f
|
/R/fviz_dend.R
|
fc6af2f44f0a241bd0fd33b7461e5dc5749d1acb
|
[] |
no_license
|
qfazille/factoextra
|
821526929302c7df88510f4ba50bd933af1fcf46
|
38bce51c8a20dfc345dafa85f13b65ef86663cb8
|
refs/heads/master
| 2021-09-26T12:32:44.605756
| 2018-10-30T08:32:11
| 2018-10-30T08:32:11
| 111,820,008
| 1
| 0
| null | 2017-11-23T14:38:52
| 2017-11-23T14:38:52
| null |
UTF-8
|
R
| false
| false
| 19,407
|
r
|
fviz_dend.R
|
#' Enhanced Visualization of Dendrogram
#'
#' @description Draws easily beautiful dendrograms using either R base plot or
#' ggplot2. Provides also an option for drawing a circular dendrogram and
#' phylogenic trees.
#' @param x an object of class dendrogram, hclust, agnes, diana, hcut,
#' hkmeans or HCPC (FactoMineR).
#' @param k the number of groups for cutting the tree.
#' @param h a numeric value. Cut the dendrogram by cutting at height h. (k
#' overrides h)
#' @param k_colors,palette a vector containing colors to be used for the groups.
#' It should contains k number of colors. Allowed values include also "grey"
#' for grey color palettes; brewer palettes e.g. "RdBu", "Blues", ...; and
#' scientific journal palettes from ggsci R package, e.g.: "npg", "aaas",
#' "lancet", "jco", "ucscgb", "uchicago", "simpsons" and "rickandmorty".
#' @param show_labels a logical value. If TRUE, leaf labels are shown. Default
#' value is TRUE.
#' @param color_labels_by_k logical value. If TRUE, labels are colored
#' automatically by group when k != NULL.
#' @param label_cols a vector containing the colors for labels.
#' @param labels_track_height a positive numeric value for adjusting the room for the
#' labels. Used only when type = "rectangle".
#' @param repel logical value. Use repel = TRUE to avoid label overplotting when
#' type = "phylogenic".
#' @param lwd a numeric value specifying branches and rectangle line width.
#' @param type type of plot. Allowed values are one of "rectangle", "triangle",
#' "circular", "phylogenic".
#' @param phylo_layout the layout to be used for phylogenic trees. Default value
#' is "layout.auto". Allowed values include:
#' \code{\link[igraph]{layout.auto}}, \code{\link[igraph]{layout_with_drl}},
#' \code{\link[igraph]{layout_as_tree}}, \code{\link[igraph]{layout.gem}},
#' \code{\link[igraph]{layout.mds}} and \code{\link[igraph]{layout_with_lgl}}.
#' @param rect logical value specifying whether to add a rectangle around
#' groups. Used only when k != NULL.
#' @param rect_border,rect_lty border color and line type for rectangles.
#' @param rect_fill a logical value. If TRUE, fill the rectangle.
#' @param lower_rect a value of how low should the lower part of the rectangle
#' around clusters. Ignored when rect = FALSE.
#' @param horiz a logical value. If TRUE, an horizontal dendrogram is drawn.
#' @param cex size of labels
#' @param main,xlab,ylab main and axis titles
#' @param sub Plot subtitle. If NULL, the method used hierarchical clustering is
#' shown. To remove the subtitle use sub = "".
#' @param ggtheme function, ggplot2 theme name. Default value is
#' theme_classic(). Allowed values include ggplot2 official themes:
#' theme_gray(), theme_bw(), theme_minimal(), theme_classic(), theme_void(),
#' ....
#' @param ... other arguments to be passed to the function plot.dendrogram()
#' @return an object of class fviz_dend which is a ggplot with the attributes
#' "dendrogram" accessible using attr(x, "dendrogram"), where x is the result
#' of fviz_dend().
#' @examples
#' \donttest{
#' # Load and scale the data
#' data(USArrests)
#' df <- scale(USArrests)
#'
#' # Hierarchical clustering
#' res.hc <- hclust(dist(df))
#'
#' # Default plot
#' fviz_dend(res.hc)
#'
#' # Cut the tree
#' fviz_dend(res.hc, cex = 0.5, k = 4, color_labels_by_k = TRUE)
#'
#' # Don't color labels, add rectangles
#' fviz_dend(res.hc, cex = 0.5, k = 4,
#' color_labels_by_k = FALSE, rect = TRUE)
#'
#' # Change the color of tree using black color for all groups
#' # Change rectangle border colors
#' fviz_dend(res.hc, rect = TRUE, k_colors ="black",
#' rect_border = 2:5, rect_lty = 1)
#'
#' # Customized color for groups
#' fviz_dend(res.hc, k = 4,
#' k_colors = c("#1B9E77", "#D95F02", "#7570B3", "#E7298A"))
#'
#'
#' # Color labels using k-means clusters
#' km.clust <- kmeans(df, 4)$cluster
#' fviz_dend(res.hc, k = 4,
#' k_colors = c("blue", "green3", "red", "black"),
#' label_cols = km.clust[res.hc$order], cex = 0.6)
#'
#' }
#' @export
fviz_dend <- function(x, k = NULL, h = NULL, k_colors = NULL, palette = NULL, show_labels = TRUE, color_labels_by_k = TRUE,
label_cols = NULL, labels_track_height = NULL, repel = FALSE, lwd = 0.7,
type = c("rectangle", "circular", "phylogenic"),
phylo_layout = "layout.auto",
rect = FALSE, rect_border = "gray", rect_lty = 2, rect_fill = FALSE, lower_rect,
horiz = FALSE, cex = 0.8, main = "Cluster Dendrogram", xlab = "", ylab = "Height",
sub = NULL, ggtheme = theme_classic(), ...)
{
# if(.is_col_palette(k_colors)) palette <- k_colors
# else palette <- NULL
if(missing(k_colors) & !is.null(palette)) {
k_colors <- palette
palette <- NULL
}
if(!color_labels_by_k & is.null(label_cols)) label_cols <- "black"
type <- match.arg(type)
circular <- type == "circular"
phylogenic <- type == "phylogenic"
rectangle <- type == "rectangle"
if(inherits(x, "HCPC")){
k <- length(unique(x$data.clust$clust))
#k <- x$call$t$nb.clust
x <- x$call$t$tree #hclust
}
if(inherits(x, "hcut")){
k <- x$nbclust
dend <- as.dendrogram(x)
method <- x$method
}
else if(inherits(x, "hkmeans")){
k <- length(unique(x$cluster))
dend <- as.dendrogram(x$hclust)
method <- x$hclust$method
}
else if(inherits(x, c("hclust", "agnes", "diana"))) {
dend <- as.dendrogram(x)
method <- x$method
}
else if(inherits(x, "dendrogram")) {
dend <- x
method <- ""
}
else stop("Can't handle an object of class ", paste(class(x), collapse =", ") )
if(is.null(method)) method <- ""
else if(is.na(method)) method <- ""
if(is.null(sub) & method!="") sub = paste0("Method: ", method)
if(!is.null(dendextend::labels_cex(dend))) cex <- dendextend::labels_cex(dend)
dend <- dendextend::set(dend, "labels_cex", cex)
dend <- dendextend::set(dend, "branches_lwd", lwd)
k <- .get_k(dend, k, h)
if(!is.null(k)) {
if(ggpubr:::.is_col_palette(k_colors)) k_colors <- ggpubr:::.get_pal(k_colors, k = k)
else if(is.null(k_colors)) k_colors <- ggpubr:::.get_pal("default", k = k)
dend <- dendextend::set(dend, what = "branches_k_color", k = k, value = k_colors)
if(color_labels_by_k) dend <- dendextend::set(dend, "labels_col", k = k, value = k_colors)
}
if(!is.null(label_cols)){
dend <- dendextend::set(dend, "labels_col", label_cols)
}
leaflab <- ifelse(show_labels, "perpendicular", "none")
if(xlab =="") xlab <- NULL
if(ylab=="") ylab <- NULL
max_height <- max(dendextend::get_branches_heights(dend))
if(missing(labels_track_height))
labels_track_height <- max_height/8
if(max_height < 1) offset_labels <- -max_height/100
else offset_labels <- -0.1
if(rectangle | circular){
p <- .ggplot_dend(dend, type = "rectangle", offset_labels = offset_labels, nodes = FALSE,
ggtheme = ggtheme, horiz = horiz, circular = circular, palette = palette,
labels = show_labels, label_cols = label_cols,
labels_track_height = labels_track_height, ...)
if(!circular) p <- p + labs(title = main, x = xlab, y = ylab)
}
else if(phylogenic){
p <- .phylogenic_tree(dend, labels = show_labels, label_cols = label_cols,
palette = palette, repel = repel,
ggtheme = ggtheme, phylo_layout = phylo_layout, ...)
}
# base plot
# else{
# plot(dend, type = type[1], xlab = xlab, ylab = ylab, main = main,
# leaflab = leaflab, sub = sub, horiz = horiz,...)
# if(rect & !is.null(k))
# dendextend::rect.dendrogram(dend, k=k, border = rect_border,
# lty = rect_lty, lwd = lwd)
# }
# Add rectangle around clusters
if(circular | phylogenic | is.null(k)) rect <- FALSE
if(rect_fill & missing(rect_lty)) rect_lty = "blank"
if(missing(lower_rect)) lower_rect = -(labels_track_height+0.5)
if(rect){
p <- p + .rect_dendrogram(dend, k = k, palette = rect_border, rect_fill = rect_fill,
rect_lty = rect_lty, size = lwd,
lower_rect = lower_rect)
}
attr(p, "dendrogram") <- dend
structure(p, class = c(class(p), "fviz_dend"))
return(p)
}
# require igraph
.phylogenic_tree <- function(dend, labels = TRUE, label_cols = NULL,
palette = NULL, repel = FALSE,
ggtheme = theme_classic(),
phylo_layout = "layout.auto", ...){
if (!requireNamespace("igraph", quietly = TRUE)) {
stop("igraph package needed for phylogenic tree. Please install it using install.packages('igraph').")
}
allowed_layouts <- c("layout.auto", "layout_with_drl", "layout_as_tree",
"layout.gem", "layout.mds", "layout_with_lgl")
if(!(phylo_layout %in% allowed_layouts)) stop( phylo_layout, " is not supported as layout. ", "Allowed phylogenic layout are: ",
paste( allowed_layouts, collapse = ", "))
layout_func <- switch(phylo_layout,
layout.auto = igraph::layout.auto,
layout_with_drl = igraph::layout_with_drl,
layout_as_tree = igraph::layout_as_tree,
layout.gem = igraph::layout.gem,
layout.mds = igraph::layout.mds,
layout_with_lgl = igraph::layout_with_lgl
)
# Convert to 'phylo' object
hc <- stats::as.hclust(dend)
phylo_tree <- .as.phylo(hc)
graph_edges <- phylo_tree$edge
# get graph from edge list
graph_net <- igraph::graph.edgelist(graph_edges)
# extract layout (x-y coords)
set.seed(123)
graph_layout = layout_func(graph_net)
# number of observations
nobs <- length(hc$labels)
# draw tree branches
data.segments <- data.frame(
x = graph_layout[graph_edges[,1],1],
y = graph_layout[graph_edges[,1],2],
xend = graph_layout[graph_edges[,2],1],
yend = graph_layout[graph_edges[,2],2]
)
data.labels <- data.frame(
x = graph_layout[1:nobs,1],
y = graph_layout[1:nobs,2],
label = phylo_tree$tip.label
)
data.labels <- data.labels[order(as.vector(data.labels$label)), ]
# Support for dendextend
gdend <- dendextend::as.ggdend(dend)
gdat <- dendextend::prepare.ggdend(gdend)
gdat$labels <- gdat$labels[order(as.vector(gdat$labels$label)), ]
data.labels <- cbind(data.labels, gdat$labels[, c("col", "cex")])
if(!is.null(dendextend::labels_cex(dend))) font.label <- round(dendextend::labels_cex(dend)[1]*12)
else font.label <- 12
p <- ggplot() + geom_segment(data = data.segments,
aes_string(x = "x", y = "y", xend = "xend", yend = "yend"),
lineend = "square")
if(is.null(label_cols)) label_cols <- "col"
if(!labels) labels <- NULL
else labels <- "label"
p <- ggpubr::ggscatter(data.labels, "x", "y", label = labels,
color = label_cols,
ggp = p, repel = repel, font.label = font.label, ...)
if(is.null(palette)) p <- p + scale_colour_identity()
p <- ggpubr::ggpar(p, ggtheme = ggtheme, palette = palette, ...)
p <- p + theme(axis.title.x = element_blank(), axis.title.y = element_blank(),
axis.text= element_blank(),
axis.line = element_blank(), axis.ticks = element_blank(),
legend.position = "none")
p
}
# Helper functions
#%%%%%%%%%%%%%%%%%%%%
# Plot dendrogram using ggplot
# .ggplot_dend derrived from dendextend::ggplot.ggdend
# data: a ggdend class object.
.ggplot_dend <- function (dend, segments = TRUE, labels = TRUE, nodes = TRUE,
horiz = FALSE, ggtheme = theme_classic(),
offset_labels = 0, circular = FALSE, type = "rectangle",
palette = NULL, label_cols = NULL, labels_track_height = 1,
...) {
gdend <- dendextend::as.ggdend(dend, type = type)
#angle <- ifelse(horiz, 0, 90)
#hjust <- ifelse(horiz, 0, 1)
gdend$labels$angle <- ifelse(horiz, 0, 90)
gdend$labels$hjust <- ifelse(horiz, 0, 1)
gdend$labels$vjust <- 0.5
if(circular){
# If circular, change the angle and hjust so that the labels rotate
if(circular) {
pms <- .get_label_params(gdend$labels)
gdend$labels$angle <- pms$angle
gdend$labels$hjust <- pms$hjust
}
}
data <- dendextend::prepare.ggdend(gdend)
# To avoid overlaping of labels at coord_polar start
if(circular) {
n_rows <- nrow(data$labels)
data$labels$x[1] <- 0.7
data$labels$vjust[1] <- 1.7
}
p <- ggplot()
if (segments) {
p <- p + geom_segment(data = data$segments,
aes_string(x = "x", y = "y", xend = "xend", yend = "yend",
colour = "col", linetype = "lty", size = "lwd"), lineend = "square") +
guides(linetype = FALSE, col = FALSE) + #scale_colour_identity() +
scale_size_identity() + scale_linetype_identity()
if(is.null(palette)) p <- p + scale_colour_identity()
}
if (nodes) {
p <- p + geom_point(data = data$nodes,
aes_string(x = "x", y = "y", colour = "col", shape = "pch", size = "cex")) +
guides(shape = FALSE, col = FALSE, size = FALSE) +
scale_shape_identity()
}
if (labels) {
data$labels$cex <- 5 * data$labels$cex
data$labels$y <- data$labels$y + offset_labels
if(is.null(label_cols)) label_cols <- "col"
p <- p + ggpubr::geom_exec(geom_text, data = data$labels,
x = "x", y = "y", label = "label", color = label_cols, size = "cex",
angle = "angle", hjust = "hjust", vjust = "vjust")
}
p <- ggpubr::ggpar(p, ggtheme = ggtheme, palette = palette, ...) + theme(axis.line = element_blank())
if (horiz & !circular) {
p <- p + coord_flip() + scale_y_reverse()+
theme(axis.text.y = element_blank(), axis.ticks.y = element_blank(),
axis.text.x = element_text())
}
else p <- p + theme(axis.text.x = element_blank(), axis.ticks.x = element_blank())
if(circular){
p <- p + theme(plot.margin = margin(0, 0, 0, 0),
axis.title.x = element_blank(), axis.title.y = element_blank(),
axis.text= element_blank(),
axis.line = element_blank(), axis.ticks = element_blank())+
ylim(max(dendextend::get_branches_heights(dend)), -1)+
coord_polar(theta = 'x', direction = 1)
}
else{
p <- p + expand_limits(y=-labels_track_height)
}
p
}
# Function used for circular dendrogram
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Create the angle and hjust vectors so that the labels
# rotation switches from 6 o'clock to 12 o'clock to improve readability.
.get_label_params <- function(labeldf) {
nn <- length(labeldf$y)
halfn <- floor(nn/2)
firsthalf <- rev(90 + seq(0,360, length.out = nn))
secondhalf <- rev(-90 + seq(0,360, length.out = nn))
angle <- numeric(nn)
angle[1:halfn] <- firsthalf[1:halfn]
angle[(halfn+1):nn] <- secondhalf[(halfn+1):nn]
hjust <- numeric(nn)
hjust[1:halfn] <- 0
hjust[(halfn+1):nn] <- 1
return(list(angle = angle, hjust = hjust))
}
# Convert 'hclust' to 'phylo' object
# used for phylogenic tree
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# from ape::as.phylo.hclust
# x hclust
.as.phylo <- function (x, ...)
{
N <- dim(x$merge)[1]
edge <- matrix(0L, 2 * N, 2)
edge.length <- numeric(2 * N)
node <- integer(N)
node[N] <- N + 2L
cur.nod <- N + 3L
j <- 1L
for (i in N:1) {
edge[j:(j + 1), 1] <- node[i]
for (l in 1:2) {
k <- j + l - 1L
y <- x$merge[i, l]
if (y > 0) {
edge[k, 2] <- node[y] <- cur.nod
cur.nod <- cur.nod + 1L
edge.length[k] <- x$height[i] - x$height[y]
}
else {
edge[k, 2] <- -y
edge.length[k] <- x$height[i]
}
}
j <- j + 2L
}
if (is.null(x$labels))
x$labels <- as.character(1:(N + 1))
obj <- list(edge = edge, edge.length = edge.length/2, tip.label = x$labels,
Nnode = N)
class(obj) <- "phylo"
obj
#ape::reorder.phylo(obj)
}
# Get k value if h specified
# Make also some checking
# dend a dendrogram object
# h: tree height
.get_k <- function(dend, k = NULL, h = NULL){
if (!dendextend::is.dendrogram(dend)) stop("x is not a dendrogram object.")
if (length(h) > 1L | length(k) > 1L)
stop("'k' and 'h' must be a scalar(i.e.: of length 1)")
tree_heights <- dendextend::heights_per_k.dendrogram(dend)[-1]
tree_order <- stats::order.dendrogram(dend)
if (!is.null(h)) {
if (!is.null(k))
stop("specify exactly one of 'k' and 'h'")
ss_ks <- tree_heights < h
k <- min(as.numeric(names(ss_ks))[ss_ks])
k <- max(k, 2)
}
k
}
# Add rectangle to a dendrogram
# lower_rect: a (scalar) value of how low should the lower part of the rect be.
.rect_dendrogram <- function (dend, k = NULL, h = NULL,
k_colors = NULL, palette = NULL, rect_fill = FALSE, rect_lty = 2,
lower_rect=-1.5,
...)
{
if(missing(k_colors) & !is.null(palette)) k_colors <- palette
# value (should be between 0 to 1): proportion of the height
# our rect will be between the height needed for k and k+1 clustering.
prop_k_height <- 0.5
if (!dendextend::is.dendrogram(dend))
stop("x is not a dendrogram object.")
k <- .get_k(dend, k, h)
tree_heights <- dendextend::heights_per_k.dendrogram(dend)[-1]
tree_order <- stats::order.dendrogram(dend)
if (is.null(k)) stop("specify k")
if (k < 2) {
stop(gettextf("k must be between 2 and %d", length(tree_heights)),
domain = NA)
}
cluster <- dendextend::cutree(dend, k = k)
clustab <- table(cluster)[unique(cluster[tree_order])]
m <- c(0, cumsum(clustab))
which <- 1L:k
xleft <- ybottom <- xright <- ytop <- list()
for (n in seq_along(which)) {
next_k_height <- tree_heights[names(tree_heights) == k + 1]
if (length(next_k_height) == 0) {
next_k_height <- 0
prop_k_height <- 1
}
xleft[[n]] = m[which[n]] + 0.66
ybottom[[n]] = lower_rect
xright[[n]] = m[which[n] + 1] + 0.33
ytop[[n]] <- tree_heights[names(tree_heights) == k] *
prop_k_height + next_k_height * (1 - prop_k_height)
}
df <- data.frame(xmin = unlist(xleft), ymin = unlist(ybottom), xmax = unlist(xright), ymax = unlist(ytop))
color <- k_colors
if(color == "cluster") color <- "default"
if(ggpubr:::.is_col_palette(color)) color <- ggpubr:::.get_pal(color, k = k)
else if(length(color) > 1 & length(color) < k){
color <- rep(color, k)[1:k]
}
if(rect_fill){
fill <- color
alpha <- 0.2
}
else {
fill <- "transparent"
alpha <- 0
}
df$color <- color
df$cluster <- as.factor(paste0("c", 1:k))
ggpubr::geom_exec(geom_rect, data = df,
xmin = "xmin", ymin = "ymin", xmax = "xmax", ymax = "ymax",
fill = fill, color = color, linetype = rect_lty, alpha = alpha, ...)
}
|
3422fb3920a57771829ae5b3e13828647a36d9b9
|
9048f030f96788ccb46f0b50dda2daebad828a3a
|
/AprendiendoQD1.R
|
269bc7eb3aed4613153561c39289491af4421fe2
|
[] |
no_license
|
carlosrmngucuenca/RmethodsSearch
|
e15841f61c983329af2b3b28810f85d5c9941fe7
|
27182e30b62fdb31ae59a516a6c199dcbe508083
|
refs/heads/master
| 2020-09-04T16:17:08.783501
| 2019-11-17T15:42:33
| 2019-11-17T15:42:33
| 219,798,814
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,671
|
r
|
AprendiendoQD1.R
|
library("igraph")
nodos=list()
nodos2ways=list()
opcionmenu<- 0
arnod<-read.csv("ArchivoPrubas.csv",header = TRUE, sep = ",",stringsAsFactors = FALSE)
dataf=data.frame(arnod)
datap<-dataf[!((dataf$fin)==""),c(1,2,3)]
node<-sort(union(unique(datap$Inicio),unique(datap$fin)),decreasing = FALSE)
Migrafo<-Grafo$new()
Migrafo$set_matriz_aristapeso(length(node),c(node))
Migrafo$get_matrix()
limpiar<-function(){
for (variable in 1:length(nodos)) {
nodos[[variable]]$set_visitado(FALSE)
nodos[[variable]]$set_added(FALSE)
nodos[[variable]]$set_hasnivel(FALSE)
#cat("el valor de el nodo visitado ",variable,"",nodos[[variable]]$get_visited(),"\n")
#cat("el valor de el nodo aderido ",variable,"",nodos[[variable]]$get_added(),"\n")
}
}
#Busquedas A ciegas
BFS<-function(Raiz,Meta){
#print(str(Raiz))
cola<- Queue$new()
cola$add(Raiz)
cat("Recorrido : -> ",Raiz$get_dato(),"\n")
#cat("el nuevo valor e raiz es : -> ",Raiz$get_dato(),"\n")
#cat("el valor de la cola al entrar es : -> ",Raiz$get_dato(),"\n")
while (cola$vacia()!=TRUE) {
#print("entras \n")
if(Raiz$get_dato()==Meta$get_dato()){
cola$remove()
break
}else{
Raiz$set_visitado(TRUE)
#cat("el valor visiatdo de la Raiz es : -> ",Raiz$get_visited(),"\n")
cola$remove()
#cat("el valor qque se remueve es es : -> ",r,"\n")
if (length(Raiz$get_lista())!=0 ) {
#cat("el valor de el tam es : -> ",length(Raiz$get_lista()),"\n")
for (variable in 1:length(Raiz$get_lista())) {
#cat("el valor de el contador es : -> ",variable,"\n")
Nodohijo=Raiz$get_lista()[[variable]]
#cat("el valor de un hijo de un nodo es : -> ",Nodohijo$get_dato(),"\n")
if (Nodohijo$get_visited() == FALSE && Nodohijo$get_added()==FALSE ){
Nodohijo$set_added(TRUE)
cola$add(Nodohijo)
if(cola$vacia()!=TRUE){
#cat("el valor en la cola es : -> ",cola$frente()$get_dato(),"\n")
}
}
}
}
if(cola$vacia()!=TRUE){
Raiz<-cola$peek();
cat("Recorrido : -> ",Raiz$get_dato(),"\n")
}
}
}
#print("sale del while")
return(0)
}##fin bfs
DFS<-function(Raiz,Meta){
#print(str(Raiz))
pila<- Pila$new()
pila$add(Raiz)
#cat("el nuevo valor e raiz es : -> ",Raiz$get_dato(),"\n")
#cat("el valor de la cola al entrar es : -> ",Raiz$get_dato(),"\n")
cat("Recorrido : -> ",Raiz$get_dato(),"\n")
while (pila$vacia()!=TRUE) {
#print("entras \n")
if(Raiz$get_dato()==Meta$get_dato()){
pila$remove()
break
}else{
Raiz$set_visitado(TRUE)
#cat("el valor visiatdo de la Raiz es : -> ",Raiz$get_visited(),"\n")
pila$remove()
#cat("el valor qque se remueve es es : -> ",r,"\n")
if (length(Raiz$get_lista())!=0 ) {
#cat("el valor de el contador es : -> ",variable,"\n")
for (variable in 1:length(Raiz$get_lista())) {
#cat("el valor de el contador es : -> ",variable,"\n")
Nodohijo=Raiz$get_lista()[[variable]]
#cat("el valor de un hijo de un nodo es : -> ",Nodohijo$get_dato(),"\n")
if (Nodohijo$get_visited() == FALSE && Nodohijo$get_added()==FALSE ){
Nodohijo$set_added(TRUE)
pila$add(Nodohijo)
if(pila$vacia()!=TRUE){
#cat("el valor en la pila es : -> ",pila$frente()$get_dato(),"\n")
}
}
}#fin for
}
if(pila$vacia()!=TRUE){
Raiz<-pila$peek();
cat("Recorrido : -> ",Raiz$get_dato(),"\n")
}
}
}
#print("sale del while")
return(0)
}##fin dfs
BCU<-function(Raiz,Meta){
cola<-c()
colavisiatados<-c()
root<-Migrafo$matriz[Raiz,Raiz]
cat("--------","\n")
names(root)<-c(as.character(Raiz))
print(root)
cat("--------","\n")
cat("--------","\n")
cola<-c(cola,root)
print(cola)
cat("--------","\n")
while (length(cola)!=0) {
if(names(root)==Meta){
print("encontrado")
break
}else{
cat("--------Tiene hijos la raiz ","\n")
cat("--------","\n")
print(names(root))
cat("--------","\n")
x<-Migrafo$matriz[names(root),]
print(x)
cat("-----fin ---","\n")
cat("----Tiene hijos la raiz----","\n")
cat("--------Inicia el valor de peso del padre root ","\n")
dator<-as.numeric(cola[1])
print(dator)
cat("--------Fin el valor de peso del padre root ","\n")
cat("--------Inicia el valor de cola visitados -----","\n")
colavisiatados<-c(colavisiatados,cola[1])
print(colavisiatados)
cat("--------finaliza el valor de colas visitados----","\n")
cola<-cola[-1]
verx<-all(x==0)
if (verx!=TRUE) {
cat("--Inicia ingresea a ordx-----" ,"\n")
ordx<-c(sort(x[x>0]))
print(ordx)
cat("-- finaliza ingresea a ordx-----","\n")
for (variable in 1:length(ordx)) {
ordx[variable]<-ordx[variable]+dator
cola<-c(cola,ordx[variable])
}
cat("--Inicia ingresea a cola-----" ,"\n")
cola<-sort(cola)
print(cola)
cat("--fin ingresea a cola -----" ,"\n")
cat("-- Inicia el valor nuevo de root-----" ,"\n")
root<-cola[1]
print(root)
cat("-----finaliza el valor nuevo de root-----" ,"\n")
while (any((names(cola[1])==names(colavisiatados)))==TRUE) {
cat("--Inicia ingresea a cola cuando enceuntra visitados en la cola[1] -----" ,"\n")
cat("--antes-----" ,"\n")
print(cola)
cat("--antes-----" ,"\n")
cola<-cola[-1]
cat("--dspues-----" ,"\n")
print(root)
cat("--despues-----" ,"\n")
root<-cola[1]
print(cola)
cat("--finaliza ingresea a cola cuando enceuntra visitados en la cola[1] -----" ,"\n")
}
}else{
cat("--Inicia ingresea a cola cuando la fila es 000000 -----" ,"\n")
print(cola)
#cola<-cola[-1]
root<-cola[1]
cat("nueva cola ","\n")
print(cola)
cat("--finaliza ingresea a cola cuando fila es 00000 -----" ,"\n")
}#finif
}#finif
#fin
}
}##fin BCU
IDS<-function(Raiz,Meta,Nivel){
#print(str(Raiz))
cola<- Queue$new()
cola$add(Raiz)
cat("Recorrido : -> ",Raiz$get_dato(),"\n")
#cat("el nuevo valor e raiz es : -> ",Raiz$get_dato(),"\n")
#cat("el valor de la cola al entrar es : -> ",Raiz$get_dato(),"\n")
while (Raiz$==tam) {
#print("entras \n")
if(Raiz$get_dato()==Meta$get_dato()){
cola$remove()
break
}else{
Raiz$set_visitado(TRUE)
#cat("el valor visiatdo de la Raiz es : -> ",Raiz$get_visited(),"\n")
cola$remove()
#cat("el valor qque se remueve es es : -> ",r,"\n")
if (length(Raiz$get_lista())!=0 ) {
#cat("el valor de el tam es : -> ",length(Raiz$get_lista()),"\n")
for (variable in 1:length(Raiz$get_lista())) {
#cat("el valor de el contador es : -> ",variable,"\n")
Nodohijo=Raiz$get_lista()[[variable]]
#cat("el valor de un hijo de un nodo es : -> ",Nodohijo$get_dato(),"\n")
if (Nodohijo$get_visited() == FALSE && Nodohijo$get_added()==FALSE ){
Nodohijo$set_added(TRUE)
cola$add(Nodohijo)
if(cola$vacia()!=TRUE){
#cat("el valor en la cola es : -> ",cola$frente()$get_dato(),"\n")
}
}
}
}
if(cola$vacia()!=TRUE){
Raiz<-cola$peek();
cat("Recorrido : -> ",Raiz$get_dato(),"\n")
}
}
}
#print("sale del while")
return(0)
}##fin IDS
#Busquedas Heuristicas
HCS<-function(Raiz,Meta){
cola<- colapr$new()
cola$add(Raiz)
cat("Recorrido : -> ",Raiz$get_dato(),"\n")
#cat("el nuevo valor e raiz es : -> ",Raiz$get_dato(),"\n")
#cat("el valor de la cola al entrar es : -> ",Raiz$get_dato(),"\n")
while (cola$vacia()!=TRUE) {
#print("entras \n")
if(Raiz$get_dato()==Meta$get_dato()){
cola$remove()
break
}else{
Raiz$set_visitado(TRUE)
#cat("el valor visiatdo de la Raiz es : -> ",Raiz$get_visited(),"\n")
#cola$remove()
cola$vaciar
#cat("el valor qque se remueve es es : -> ",r,"\n")
if (length(Raiz$get_lista())!=0 ) {
#cat("el valor de el tam es : -> ",length(Raiz$get_lista()),"\n")
for (variable in 1:length(Raiz$get_lista())) {
#cat("el valor de el contador es : -> ",variable,"\n")
Nodohijo=Raiz$get_lista()[[variable]]
#cat("el valor de un hijo de un nodo es : -> ",Nodohijo$get_dato(),"\n")
if (Nodohijo$get_visited() == FALSE && Nodohijo$get_added()==FALSE ){
Nodohijo$set_added(TRUE)
cola$add(Nodohijo)
if(cola$vacia()!=TRUE){
#cat("el valor en la cola es : -> ",cola$frente()$get_dato(),"\n")
}
}
}
}
if(cola$vacia()!=TRUE){
Raiz<-cola$peek();
cat("Recorrido : -> ",Raiz$get_dato(),"\n")
}
}
}
#print("sale del while")
return(0)
}
FBS<-function(Raiz,Meta){
cola<- colapr$new()
cola$add(Raiz)
cat("Recorrido : -> ",Raiz$get_dato(),"\n")
#cat("el nuevo valor e raiz es : -> ",Raiz$get_dato(),"\n")
#cat("el valor de la cola al entrar es : -> ",Raiz$get_dato(),"\n")
while (cola$vacia()!=TRUE) {
#print("entras \n")
if(Raiz$get_dato()==Meta$get_dato()){
cola$remove()
break
}else{
Raiz$set_visitado(TRUE)
#cat("el valor visiatdo de la Raiz es : -> ",Raiz$get_visited(),"\n")
#cola$remove()
cola$vaciar
#cat("el valor qque se remueve es es : -> ",r,"\n")
if (length(Raiz$get_lista())!=0 ) {
#cat("el valor de el tam es : -> ",length(Raiz$get_lista()),"\n")
for (variable in 1:length(Raiz$get_lista())) {
#cat("el valor de el contador es : -> ",variable,"\n")
Nodohijo=Raiz$get_lista()[[variable]]
#cat("el valor de un hijo de un nodo es : -> ",Nodohijo$get_dato(),"\n")
if (Nodohijo$get_visited() == FALSE && Nodohijo$get_added()==FALSE ){
Nodohijo$set_added(TRUE)
cola$add(Nodohijo)
if(cola$vacia()!=TRUE){
#cat("el valor en la cola es : -> ",cola$frente()$get_dato(),"\n")
}
}
}
}
if(cola$vacia()!=TRUE){
Raiz<-cola$peek();
cat("Recorrido : -> ",Raiz$get_dato(),"\n")
}
}
}
#print("sale del while")
return(0)
}
BDD<-function(Raiz,Meta){
cola1<-c()
cola2<-c()
if(Raiz$get_dato()==Meta$get_dato()){
print("encontrado")
}
cola1<-c(cola1,Raiz)
cola2<-c(cola2,Meta)
while (algo!=TRUE) {
cola1aux<-c()
cola2aux<-c()
for (variable in cola1) {
if (variable == cola2aux) {
print("encontrado")
}
cola<-c(cola,Raiz)
}
for (variable in cola2) {
if (variable == cola1aux) {
print("encontrado")
}
}
}#fin while
#print("sale del while")
return(0)
}#fin BDD
while (opcionmenu != 3 ) {
cat("Menu de Ingreso \n")
cat("1)cargar Nodos Desde Archivo \n")
cat("2)cargar Aristas Desde Archivo \n")
cat("3)Ejecutar Metodos De Busqueda \n")
cat("4)Salir \n")
opcionmenu<- readline("Cual elige ? ")
opciones<-paste0("opcion",opcionmenu)
switch(opciones,
opcion1={
data<-unique(dataf[,c(1,4)])
n<-nrow(data)
for (variable in 1:n) {
nombre_nodo<-data[variable,1]
heuristica_nodo<-data[variable,2]
objNodo<-Nodo$new()
objNodo$set_dato(nombre_nodo)
objNodo$set_heuristica(heuristica_nodo)
nodos[[nombre_nodo]]<-objNodo
#nodos2ways[[variable]]<-objNodo
}
for (variable in 1:n) {
nombre_nodo<-data[variable,1]
heuristica_nodo<-data[variable,2]
objNodo<-Nodo$new()
objNodo$set_dato(nombre_nodo)
objNodo$set_heuristica(heuristica_nodo)
nodos2ways[[nombre_nodo]]<-objNodo
}
},
opcion2={
dataemptyh<-dataf[!((dataf$fin)==""),c(1,2,4)]
numerofilas<-nrow(dataemptyh)
solonodos<-dataemptyh[,c(1,2)]
solonodos
for (variable in 1:numerofilas) {
father<-solonodos[variable,1]
son<-solonodos[variable,2]
decendiente<-nodos[[son]]
nodos[[father]]$set_hijos(decendiente)
adecendiente<-nodos2ways[[father]]
nodos2ways[[son]]$set_hijos(adecendiente)
}
dataemptyp<-dataf[!((dataf$fin)==""),c(1,2,3)]
Migrafo$get_matrix()
solonodos<-dataemptyp[,c(1,2,3)]
for (variable in 1:numerofilas) {
i<-solonodos[variable,1]
j<-solonodos[variable,2]
p<-solonodos[variable,3]
Migrafo$matriz[i,j]<-p
}
},
opcion3={
dataempty<-dataf[!((dataf$fin)==""),c(1,2)]
vive <-matrix(c(dataempty$Inicio, dataempty$fin), ncol=2)
head.matrix(vive)
g<-graph_from_edgelist(vive)
plot(g)
cat("Ingrese la raiz -> ")
R<- readline("Ingrese el nodo Raiz-> : ")
root<-R
cat("\n")
cat("Ingrese la Meta -> ")
M<- readline("Ingrese el nodo Meta-> : ")
goal<-M
NodoR<-nodos[[R]]
NodoM<-nodos[[M]]
cat("Busqueda En Amplitud -> \n")
BFS(NodoR,NodoM)
limpiar()
cat("Busqueda En Profundidad -> \n")
DFS(NodoR,NodoM)
limpiar()
cat("Busqueda en costo uniforme -> \n")
BCU(NodoR$get_dato(),NodoM$get_dato())
},opcion4={
break()
},
)#finswitch
}# fin while
|
dd409f8041358c1e440b11df80a3ae5415819684
|
5e71c21eb28c13184810796e95b7a460ecc0d553
|
/code.r
|
dcb4e7f8d7945041d86956ab0980341a273ed9a1
|
[
"Apache-2.0"
] |
permissive
|
FrancescoTranquillo/MethComp
|
8b187bfb6f3fbe912fa228ecb12156dced17147f
|
75ce6f24705bcf1671d403279d618dd673f71970
|
refs/heads/master
| 2021-04-15T08:27:09.525960
| 2018-08-01T15:42:41
| 2018-08-01T15:42:41
| 126,165,859
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 849
|
r
|
code.r
|
#load library
library(MethComp)
#import data
df<-read.csv("gatti.csv", header = TRUE, sep = ";", dec= ",", stringsAsFactors = FALSE)
#
# #ignore columns
# df<-df[,1:22]
#
# head(df)
# str(df)
# #delete record number 99
# df<-df[-c(393:396),]
#
# df_Meth<-Meth(df, 2,1,3,7)
#
# plot(df_Meth)
#convert data to Meth object type and save pdf with meth plot
pdf()
for(i in c(4:22)){
df_Meth<-Meth(df, 2,1,3,i)
plot(df_Meth)
title(main=list(colnames(df[i]), col="red"), line=3, adj=0.45)
print(i)
}
dev.off()
#Bland-Altman plot
BA.plot(df_Meth, repl.conn=FALSE, meth.names = TRUE)
#passing bablok regression
reg<-PBreg(df_Meth)
print(reg)
plot(reg,subtype= 1, xlim = c(0,50), ylim= c(0,50))
#concordance correlation coefficient agreement=(misure del target, misure del test)
|
fdb9f76386a5a0957d8e76065938da509964b9bc
|
4476502e4fed662b9d761c83e352c4aed3f2a1c2
|
/GIT_NOTE/06_R_Quant/02_종목선택/step26_멀티팩터 포트폴리오.R
|
aa3ea883ced9af35ecb6b486bb26bffa0d090357
|
[] |
no_license
|
yeon4032/STUDY
|
7772ef57ed7f1d5ccc13e0a679dbfab9589982f3
|
d7ccfa509c68960f7b196705b172e267678ef593
|
refs/heads/main
| 2023-07-31T18:34:52.573979
| 2021-09-16T07:45:57
| 2021-09-16T07:45:57
| 407,009,836
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,063
|
r
|
step26_멀티팩터 포트폴리오.R
|
#step26_멀티팩터 포트폴리오
# 퀄리티: 자기자본이익률, 매출총이익, 영업활동현금흐름
# 밸류: PER, PBR, PSR, PCR
# 모멘텀: 3개월 수익률, 6개월 수익률, 12개월 수익률
# 변경 가능
library(xts)
library(stringr)
# 데이터
KOR_fs = readRDS('data/KOR_fs.Rds')
KOR_value = read.csv('data/KOR_value.csv', row.names = 1,
stringsAsFactors = FALSE)
KOR_price = read.csv('data/KOR_price.csv', row.names = 1,
stringsAsFactors = FALSE) %>% as.xts()
KOR_ticker = read.csv('data/KOR_ticker.csv', row.names = 1,
stringsAsFactors = FALSE)
KOR_ticker$'종목코드' =
str_pad(KOR_ticker$'종목코드', 6, 'left', 0)
#재무제표, 가치지표, 주가 데이터 계산
if ( lubridate::month(Sys.Date()) %in% c(1,2,3,4) ) {
num_col = str_which(colnames(KOR_fs[[1]]), as.character(lubridate::year(Sys.Date()) - 2))
} else {
num_col = str_which(colnames(KOR_fs[[1]]), as.character(lubridate::year(Sys.Date()) - 1))
}
# ROE,GPA,CFO
quality_roe = (KOR_fs$'지배주주순이익' / KOR_fs$'자본')[num_col]
quality_gpa = (KOR_fs$'매출총이익' / KOR_fs$'자산')[num_col]
quality_cfo =
(KOR_fs$'영업활동으로인한현금흐름' / KOR_fs$'자산')[num_col]
## quality
# quality_P
quality_profit =
cbind(quality_roe, quality_gpa, quality_cfo) %>%
setNames(., c('ROE', 'GPA', 'CFO'))
# quaility 지표 합산
factor_quality = quality_profit %>%
mutate_all(list(~min_rank(desc(.)))) %>%
mutate_all(list(~scale(.))) %>%
rowSums()
# 시각화
factor_quality %>%
data.frame() %>%
ggplot(aes(x = `.`)) +
geom_histogram()
## value
#rowSums() 함수를 통해 계산된 Z-Score를 종목별로 합쳐줍니다
factor_value = KOR_value %>%
mutate_all(list(~min_rank(.))) %>%
mutate_all(list(~scale(.))) %>%
rowSums()
# 시각화
factor_value %>%
data.frame() %>%
ggplot(aes(x = `.`)) +
geom_histogram()
##수익률 Momentum
library(PerformanceAnalytics)
library(dplyr)
ret_3m = Return.calculate(KOR_price) %>% xts::last(60) %>%
sapply(., function(x) {prod(1+x) - 1})
ret_6m = Return.calculate(KOR_price) %>% xts::last(120) %>%
sapply(., function(x) {prod(1+x) - 1})
ret_12m = Return.calculate(KOR_price) %>% xts::last(252) %>%
sapply(., function(x) {prod(1+x) - 1})
# 합치기
ret_bind = cbind(ret_3m, ret_6m, ret_12m) %>% data.frame()
#정규화
factor_mom = ret_bind %>%
mutate_all(list(~min_rank(desc(.)))) %>%
mutate_all(list(~scale(.))) %>%
rowSums()
#시각화
factor_mom %>%
data.frame() %>%
ggplot(aes(x = `.`)) +
geom_histogram()
#상관관계 확인
library(corrplot)
cbind(factor_quality, factor_value, factor_mom) %>%
data.frame() %>%
setNames(c('Quality', 'Value', 'Momentum')) %>%
cor(use = 'complete.obs') %>%
round(., 2) %>%
corrplot(method = 'color', type = 'lower',
addCoef.col = 'black', number.cex = 1,
tl.cex = 1, tl.srt = 0, tl.col = 'black',
col =
colorRampPalette(c('blue', 'white', 'red'))(200),
mar=c(0,0,0.5,0))
factor_qvm =
cbind(factor_quality, factor_value, factor_mom) %>%
data.frame() %>%
mutate_all(list(~scale(.))) %>%
mutate(factor_quality = factor_quality * 0.33, # 강도
factor_value = factor_value * 0.33, # 강도
factor_mom = factor_mom * 0.33) %>% # 강도
rowSums()
#랭킹 기준 상위 30종목
invest_qvm = rank(factor_qvm) <= 30
#포트폴리오 내 종목들을 대상으로 팩터별 대표적인 지표인 ROE, PBR, 12개월 수익률을 나타냈습니다.
KOR_ticker[invest_qvm, ] %>%
select('종목코드', '종목명') %>%
cbind(round(quality_roe[invest_qvm, ], 2)) %>%
cbind(round(KOR_value$PBR[invest_qvm], 2)) %>%
cbind(round(ret_12m[invest_qvm], 2)) %>%
setNames(c('종목코드', '종목명', 'ROE', 'PBR', '12M'))
#마지막으로 포트폴리오 내 종목들의 지표별 평균을 계산한 값입니다
cbind(quality_profit, KOR_value, ret_bind)[invest_qvm, ] %>%
apply(., 2, mean) %>% round(3) %>% t()
|
aed321aeaf3df630f207d0d2b86be097d8ada446
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/longROC/examples/roc.Rd.R
|
5c8bda65d0ece61a6edfa8fcb4937d54e803c309
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,031
|
r
|
roc.Rd.R
|
library(longROC)
### Name: roc
### Title: ROC curve
### Aliases: roc
### ** Examples
# parameters
n=100
tt=3
Tmax=10
u=1.5
s=2
vtimes=c(0,1,2,5)
# generate data
ngrid=5000
ts=seq(0,Tmax,length=ngrid)
X2=matrix(rnorm(n*ngrid,0,0.1),n,ngrid)
for(i in 1:n) {
sa=sample(ngrid/6,1)
vals=sample(3,1)-1
X2[i,1:sa[1]]=vals[1]+X2[i,1:sa[1]]
X2[i,(sa[1]+1):ngrid]=vals[1]+sample(c(-2,2),1)+X2[i,(sa[1]+1):ngrid]
}
S1=matrix(sample(4,n,replace=TRUE),n,length(vtimes))
S2=matrix(NA,n,length(vtimes))
S2[,1]=X2[,1]
for(j in 2:length(vtimes)) {
tm=which.min(abs(ts-vtimes[j]))
S2[,j]=X2[,tm]}
cens=runif(n)
ripart=1-exp(-0.01*apply(exp(X2),1,cumsum)*ts/1:ngrid)
Ti=rep(NA,n)
for(i in 1:n) {
Ti[i]=ts[which.min(abs(ripart[,i]-cens[i]))]
}
cens=runif(n,0,Tmax*2)
delta=ifelse(cens>Ti,1,0)
Ti[cens<Ti]=cens[cens<Ti]
##
## an important marker
ro=roc(S2,Ti,delta,u,tt,s,vtimes)
plot(ro,type="l",col="red")
abline(a=0,b=1)
## an unrelated marker
ro=roc(S1,Ti,delta,u,tt,s,vtimes)
plot(ro,type="l",col="red")
abline(a=0,b=1)
|
0d826c4873c16407a8810d61acf3c51109f74745
|
ee63641e44dde1eaa517a412dda43f9578317f0b
|
/geocode-failures/geocoder-misses.R
|
b38187a0b4bad137eba095273ce576d23e8ac513
|
[
"MIT"
] |
permissive
|
Allegheny-CountyStats/Airflow-Docker
|
d1ea9cc6d691c1680ad447f2b4032567a3d4d2cd
|
61623f2fe9080d09e157f8b97930fc33b5b412f3
|
refs/heads/master
| 2023-09-04T05:02:43.629827
| 2023-09-01T20:13:10
| 2023-09-01T20:13:10
| 230,480,811
| 0
| 0
|
MIT
| 2023-09-11T13:32:36
| 2019-12-27T16:57:33
|
R
|
UTF-8
|
R
| false
| false
| 1,241
|
r
|
geocoder-misses.R
|
#!/usr/bin/env Rscript
require(DBI)
require(dplyr)
# dotenv::load_dot_env()
wh_host <- Sys.getenv('WH_HOST')
wh_db <- Sys.getenv('WH_DB')
wh_user <- Sys.getenv('WH_USER')
wh_pass <- Sys.getenv('WH_PASS')
schema <- Sys.getenv('schema', 'Master')
source_table <- Sys.getenv('TABLE')
t_cols <- Sys.getenv('T_COLS')
t_cols <- unlist(strsplit(t_cols, ","))
t_cols <- paste0("t.", paste(t_cols, collapse = ", t."))
g_cols <- Sys.getenv('G_COLS')
g_cols <- unlist(strsplit(g_cols, ","))
g_cols <- paste0("g.", paste(g_cols, collapse = ", g."))
cols <- paste(t_cols, g_cols, sep = ", ")
g_table <- paste0(source_table, "_G")
id <- Sys.getenv('ID')
where <- Sys.getenv('WHERE')
where_stmnt <- ifelse(where == "", "", paste(" AND ", where))
# DB Connection String
wh_con <- dbConnect(odbc::odbc(), driver = "{ODBC Driver 17 for SQL Server}", server = wh_host, database = wh_db, UID = wh_user, pwd = wh_pass)
g_query <- paste0("SELECT ", cols, " FROM ", schema, ".", g_table, " g
LEFT JOIN ", schema, ".", source_table, " t
ON g.", id, " = t.", id, "
WHERE g.latitude IS NULL", where_stmnt)
fails <- dbGetQuery(wh_con, g_query)
write.csv(fails, "FOOD_FACILITY_FAILURES.csv", row.names = F)
|
c754c9dde6b740a07288702de9b18f6ff2b0881f
|
eac541ea67c4a1dc859218e323be126e0639ff42
|
/R/update_article.R
|
8fa9c9f7a81727243885004c1499d6a418f1daab
|
[
"MIT"
] |
permissive
|
DaveParr/dev.to.ol
|
d1967075ec0febfe5f1643755b868c2568903f7d
|
85bbebfd8f62af6fa6acfec03f8c6151d361bce2
|
refs/heads/main
| 2022-12-03T06:51:03.387115
| 2020-08-05T13:34:13
| 2020-08-05T13:34:13
| 263,746,442
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,974
|
r
|
update_article.R
|
#' @title Update a post on dev.to
#' @description Updates an existing post on dev.to by id with content from and .Rmd
#' @param id The id of the post to update as a string
#' @param file The path to the file
#' @param key Your API key, Default: NA
#' @return A response object
#' @details The id of the post must be supplied, and can be returned by the \code{\link[dev.to.ol]{get_users_articles}} function
#' @examples
#' \dontrun{
#' if(interactive()){
#' update_article(id = "369520", file = "./article.Rmd")
#' }
#' }
#' @seealso
#' \code{\link[rmarkdown]{yaml_front_matter}},\code{\link[rmarkdown]{render}}
#' \code{\link[purrr]{map}}
#' \code{\link[stringr]{str_remove}}
#' \code{\link[readr]{read_file}}
#' \code{\link[glue]{glue}}
#' \code{\link[httr]{PUT}},\code{\link[httr]{add_headers}}
#' @rdname update_article
#' @export
#' @importFrom rmarkdown yaml_front_matter render
#' @importFrom purrr map
#' @importFrom stringr str_remove_all str_remove
#' @importFrom readr read_file
#' @importFrom glue glue
#' @importFrom httr PUT add_headers
update_article <- function(id,
file,
key = NA) {
check_internet()
check_file <- is_postable_Rmd(file)
article <- parse_article(file = file)
response <- httr::PUT(
url = glue::glue("https://dev.to/api/articles/{id}", id = id),
httr::add_headers("api-key" = api_key(key = key)),
user_agent,
body = list(
article = list(
title = article$file_frontmatter$title,
series = article$file_frontmatter$series,
published = article$file_frontmatter$published,
tags = article$file_frontmatter$tags,
body_markdown = article$file_string,
main_image = article$file_frontmatter$main_image,
description = article$file_frontmatter$description
)
),
encode = 'json'
)
check_json(response)
check_status(response, operation = "updateArticle", expected = 200)
response
}
|
4ae2449c409495d5501a098eb9778a58677cb11d
|
b6ea5b277d98f8accad9fe0ddaaa349168067a6b
|
/Code/legacy_code/lehd_sample_code.R
|
62e064207d7f22e2b3abc64d8e701ef5ac29bcdc
|
[] |
no_license
|
shiweipsu/Street-Improvement-Impact
|
55ef214d7ca94c698ef7aa3c2b2732e32e7927cb
|
4ad81501acb383a71ba0e7119d2675b2c5a25331
|
refs/heads/master
| 2021-01-21T22:34:56.614493
| 2019-06-06T23:20:16
| 2019-06-06T23:20:16
| 102,161,316
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 728
|
r
|
lehd_sample_code.R
|
if(!require(pacman)){install.packages("pacman");library(pacman)}
p_load(readr, stringr, sf, purrr, tigris, dplyr)
options(tigris_class = "sf", tigris_use_cache = TRUE)
devtools::install_github("jamgreen/lehdr")
library(lehdr)
years <- 2012:2014
or_lehd <- map(years, grab_lodes, state = "or", job_type = "JT00", lodes_type = "wac", segment = "S000", download_dir = "Data")
#or_lehd <-lapply(years, grab_wac,state="or")
names(or_lehd) <- years
or_lehd_201214 <- bind_rows(or_lehd)
library(tidycensus)
library(sf)
options(tigris_class = "sf")
or_blocks <- blocks(state = "or")
or_lehd_201214 <- or_lehd_201214 %>% left_join(or_blocks,by=c("w_geocode"="GEOID10"))
or_lehd_2014 <- st_as_sf(or_lehd_2014)
plot(or_lehd_2014[2])
|
720096b2f0091893d7a3edb4cfc9e33b278c6393
|
a5c8aa1ed795d6b34b53ec33bc39a0670e1ca29f
|
/code/04-positioning/42_sequential-g.R
|
cc5ad0145e9504419385314a5ded886da3235765
|
[] |
no_license
|
mikedecr/dissertation
|
6a15f45647f1205f6f3fe901108a5917f231e473
|
06ec3d304821ee09a28e42ba0bc11318d774b5ed
|
refs/heads/main
| 2021-06-24T08:52:45.349588
| 2020-11-14T17:57:54
| 2020-11-14T17:57:54
| 282,011,623
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,313
|
r
|
42_sequential-g.R
|
# ----------------------------------------------------
# implement sequential-g estimator
# post APW this is on the to-do list.
# ----------------------------------------------------
library("here")
library("magrittr")
library("tidyverse")
library("boxr"); box_auth()
library("rstan")
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
library("lme4")
library("tidybayes")
library("broom")
library("scales")
library("latex2exp")
(home <- system("whoami", intern = TRUE) == "michaeldecrescenzo")
# if (home) {
# source(here::here("code", "helpers", "call-R-helpers.R"))
# }
box_mcmc_4 <- 120779787044
mcmc_dir <- file.path("data", "mcmc", "4-positioning")
data_dir <- 102977578033
# ----------------------------------------------------
# data and helpers
# ----------------------------------------------------
# data and ideal point hyperparam estimates
if (home) {
full_data_raw <-
here("data", "_clean", "candidates-x-irt.rds") %>%
read_rds() %>%
print()
theta_stats <-
here("data", "_clean", "ideal-point-priors.rds") %>%
read_rds()
} else {
full_data_raw <- box_read(664519538654) %>% print()
theta_stats <- box_read(706620258916)
}
# ---- inspect and clean data -----------------------
names(theta_stats)
# eventually move this into the merge file?
# small number of duplicate candidates?
full_data_raw %>%
count(bonica_rid_cycle) %>%
filter(n > 1) %>%
semi_join(x = full_data_raw, y = .) %>%
select(recipient_cfscore_dyn)
select(full_data_raw, group, party_num) %>%
distinct() %>%
count(group) %>%
arrange(desc(n))
ggplot(data = full_data_raw) +
aes(x = theta_mean, y = recipient_cfscore_dyn) +
facet_wrap(~ party, scales = "free_x") +
geom_point()
full_data_raw
names(full_data_raw)
full_data_raw %>%
count(cycle, state_abb, primary_rules) %>%
select(-n) %>%
count(cycle, primary_rules)
# ----------------------------------------------------
# compose Stan data
# ----------------------------------------------------
blip_value <- 50
full_data_raw %>% count(tpo)
full_data_raw %>% count(pf)
# check mediator_formula and direct_formula
# in naive-regression.R file
# Making matrices for compose_data
g_data <- full_data_raw %>%
transmute(
group, party_num,
primary_rules_cso, primary_rules_co, prim_rules = primary_rules_cso,
incumbency,
theta_mean,
y = recipient_cfscore_dyn,
mediator = ((rep_pres_vs*100) - blip_value) / 10,
Z_med = c(
out_theta_mean,
as.numeric(cycle == 2014), as.numeric(cycle == 2016)
) %>%
matrix(nrow = n()),
X_trt = c(
as.vector(scale(district_white)),
as.vector(scale(district_latino)),
as.vector(scale(district_college_educ)),
as.vector(scale(district_median_income)),
as.vector(scale(district_poverty)),
as.vector(scale(district_unemployment)),
as.vector(scale(district_service)),
as.vector(scale(district_blue_collar)),
as.vector(scale(district_age_18_to_24)),
as.vector(scale(district_over_65)),
as.vector(scale(district_pop_density)),
as.vector(scale(district_land_area)),
as.integer(tpo == 2),
as.integer(tpo == 3),
as.integer(tpo == 4),
as.integer(tpo == 5),
pf
)
%>%
matrix(nrow = n())
) %>%
na.omit() %>%
arrange(group) %>%
print()
g_data %>% select("X_trt")
# ---- by-party test data -----------------------
# - remember this is a thing you can do: x_at_y(x, y)
# - factoring d again in each party groups group-in-party index
# trying to create theta data using raw draws
testy <- g_data %>%
filter(party_num == 1) %>%
mutate(d = as.factor(group)) %>%
select(-c(starts_with("primary_rules"), party_num, incumbency)) %>%
compose_data(
ideal_means = theta_stats$mean_all$theta_mean[sort(unique(.$group))],
ideal_prec = theta_stats$prec_all[sort(unique(group)), sort(unique(group))]
)
names(testy)
lapply(testy, dim)
lapply(testy, length)
g_data_dem <- g_data %>%
filter(party_num == 1) %>%
mutate(d = as.factor(group)) %>%
select(-c(contains("_rules"), party_num, incumbency)) %>%
compose_data(
.n_name = toupper,
N = length(y),
K_med = ncol(Z_med),
K_trt = ncol(X_trt),
blip_value = 0,
ideal_means = theta_stats$mean_all$theta_mean[sort(unique(.$group))],
ideal_prec = theta_stats$prec_all[sort(unique(group)), sort(unique(group))],
joint_prior = 0,
lkj_value = 50
# group = NULL
)
g_data_rep <- g_data %>%
filter(party_num == 2) %>%
mutate(d = as.factor(group)) %>%
select(-c(contains("_rules"), party_num, incumbency)) %>%
compose_data(
.n_name = toupper,
N = length(y),
K_med = ncol(Z_med),
K_trt = ncol(X_trt),
blip_value = 0,
ideal_means = theta_stats$mean_all$theta_mean[sort(unique(.$group))],
ideal_prec = theta_stats$prec_all[sort(unique(group)), sort(unique(group))],
joint_prior = 0,
lkj_value = 50
)
sum(g_data_dem$group %% 2 != 1)
sum(g_data_rep$group %% 2 != 0)
names(g_data_dem)
names(g_data_rep)
g_data_dem$N
g_data_rep$N
g_data_dem$D
g_data_rep$D
lapply(g_data_dem, head)
lapply(g_data_dem, length)
lapply(g_data_dem, dim)
lapply(g_data_dem, n_distinct)
lapply(g_data_rep, head)
lapply(g_data_rep, length)
lapply(g_data_rep, dim)
lapply(g_data_rep, n_distinct)
# ---- create a data grid with all data and subsets -----------------------
g_grid_data <- g_data %$%
crossing(
party_num,
incumbency = c(incumbency, "All"),
prim_rules = c(prim_rules, "All")
) %>%
group_by_all() %>%
mutate(data = list(g_data)) %>%
mutate(
data = map2(
.x = data,
.y = party_num,
.f = ~ filter(.x, party_num == .y)
),
data = case_when(
incumbency == "All" ~ data,
TRUE ~ map2(
.x = data,
.y = incumbency,
.f = ~ filter(.x, incumbency == .y))
),
data = case_when(
prim_rules == "All" ~ data,
TRUE ~ map2(
.x = data,
.y = prim_rules,
.f = ~ filter(.x, prim_rules == .y))
)
) %>%
filter(incumbency == "All" | prim_rules == "All") %>%
mutate(
stan_data = map(
.x = data,
.f = ~ {
.x %>%
mutate(d = as.factor(group)) %>%
select(-c(starts_with("primary_rules"), party_num, incumbency)) %>%
compose_data(
.n_name = toupper,
N = length(y),
K_med = ncol(Z_med),
K_trt = ncol(X_trt),
blip_value = 0,
ideal_means =
theta_stats$mean_all$theta_mean[sort(unique(.x$group))],
ideal_prec =
theta_stats$prec_all[sort(unique(.x$group)), sort(unique(.x$group))],
joint_prior = 0,
lkj_value = 50
)
}
)
) %>%
print(n = nrow(.))
# save global party data
g_grid_data %>%
box_write(
"stan-data_all.rds",
dir_id = box_mcmc_4
)
# ----------------------------------------------------
# stan model
# ----------------------------------------------------
# ---- does woodbury work -----------------------
N <- g_data_rep$N
D <- g_data_rep$D
L <- matrix(rep(0, N * D), nrow = N)
for (i in 1:N) {
L[i, g_data_rep$d[i]] <- 1
}
L[1:20, 1:10]
(L %*% t(L))[1:20, 1:10]
(t(L) %*% L)[1:10, 1:10]
sig <- .5
tau <- .1
sig^(-2)
tau^(-2)
1 / tau^(2)
NI <- diag(N)
DI <- diag(D)
cov_woodbury <- (sig^2*diag(N)) + L %*% (tau^2*diag(D)) %*% t(L)
cov_woodbury[1:15, 1:15]
# --- original setup ---
big_inv <-
matlib::inv(
( tau^(-2) * diag(D) ) +
(sig^(-2) * t(L)) %*% L
)
prec_woodbury <- sig^(-2)*diag(N) - sig^(-2) * L %*% big_inv %*% t(L) * sig^(-2)
# --- factor inner sigma out (move trailing sigma to front) ---
big_inv <-
matlib::inv(
( (sig/tau)^(2) * diag(D) ) +
(t(L) %*% L)
)
prec_woodbury <- sig^(-2)*diag(N) - sig^(-2) * L %*% big_inv %*% t(L)
# --- big_inv is diag bc L'L is diag, so don't need the inv() ---
(t(L) %*% L)[1:15, 1:15]
colSums(L) # vector of column dot products with themselves
# (in our case, the number of 1s per column)
# diag of L'L
sig^(-2) / ((sig^4 / tau^2) + colSums(L)) # end-around inverting
# must be element division in stan
# (because colsums(L) will be row-vec)
prec_woodbury <- (sig^(-2) * diag(N) - L %*% diag(sig^(-2) / ((sig / tau)^2 + colSums(L))) %*% t(L))
# checking
big_inv[1:15, 1:15]
prec_woodbury[1:15, 1:15]
(cov_woodbury %*% prec_woodbury)[1:15, 1:15] %>% round(5)
# prec_woodbury <- sig^(-2)*NI - L %*% big_inv %*% t(L) * sig^(-2)
length(prec_woodbury[prec_woodbury != 0])
# prec_med =
# pow(sigma_med, -2) * NI -
# pow(sigma_med, -2) * L *
# inv(
# (pow(hypersigma_med, -2) * DI) +
# (pow(sigma_med, -2) * L' * L)
# ) *
# L' * pow(sigma_med, -2);
# ---- end woodbury -----------------------
# ---- compile model -----------------------
# original scale everything
g_FREE <-
stan_model(
here("code", "04-positioning", "stan", "seq-g.stan")
)
# theta and Y standardized
g_ID <-
stan_model(
here("code", "04-positioning", "stan", "seq-g-identified.stan")
)
g_FIX <-
stan_model(
here("code", "04-positioning", "stan", "seq-g-fixtheta.stan")
)
alarm()
# ranefs marginalized (slow slow slow)
# g_marginal <-
# stan_model(
# here("code", "04-positioning", "stan", "seq-g-marginal.stan")
# )
# g_marginal
# ---- sampler wrapper function -----------------------
n_iter <- 2000
n_warmup <- 500
n_chains <- min(parallel::detectCores() - 1, 5)
n_thin <- 1
nuts_adapt_delta <- 0.9
nuts_max_treedepth <- 15
sample_g <- function(object = NULL, data = list(), ...) {
diagnostic_filepath <- here(
"data", "mcmc", "4-positioning", "logs",
str_glue("{deparse(substitute(data))}_{lubridate::now()}.txt")
)
sampling(
object = object,
data = data,
iter = n_iter, warmup = n_warmup, thin = n_thin, chains = n_chains,
control = list(
adapt_delta = nuts_adapt_delta,
max_treedepth = nuts_max_treedepth
),
diagnostic_file = diagnostic_filepath,
refresh = round(n_iter / 100),
pars = ("theta_raw"),
include = FALSE,
...
)
}
# ---- variational testing -----------------------
# vb args to consider...
# sample_file (where to save samples)
# importance_resampling (default = FALSE)
# iter
# inits via list: Set inital values by providing a list equal
# in length to the number of chains. The elements of this
# list should themselves be named lists, where each of
# these named lists has the name of a parameter and is used
# to specify the initial values for that parameter for the
# corresponding chain.
# runs democratic test twice to check the convergence stability
vb_dem <- vb(
object = g_ID,
data = g_data_dem,
algorithm = "meanfield",
pars = c("theta_raw", "prec_med", "prec_trt"),
include = FALSE
)
vb_dem_1 <- vb(
object = g_ID,
data = g_data_dem,
algorithm = "meanfield"
)
alarm()
# check stability, compare pt estimates
list(vb_dem, vb_dem_1) %>%
lapply(tidy) %>%
bind_rows(.id = "test") %>%
pivot_wider(
names_from = "test",
values_from = c("estimate", "std.error")
) %>%
pivot_longer(
cols = -term,
names_to = "param",
values_to = "value"
) %>%
mutate(
param = str_remove(param, "[.]"),
test = parse_number(param),
param = str_split(param, pattern = "_", simplify = TRUE)[,1]
) %>%
pivot_wider(
names_from = "test",
values_from = "value",
names_prefix = "test_"
) %>%
ggplot() +
aes(x = test_1, y = test_2) +
geom_point() +
facet_wrap(~ param)
# test republican fit
vb_rep <- vb(
object = g_FIX,
data = g_data_rep,
output_samples = 2000
)
alarm()
# tidy Rs and Ds
vb_tidy <-
list(vb_dem, vb_rep) %>%
lapply(tidy, conf.int = TRUE) %>%
bind_rows(.id = "party_num") %>%
print()
# plot treatment fx
vb_tidy %>%
filter(term %in% c("coef_theta_trt")) %>%
ggplot() +
aes(x = term, y = estimate, color = as.factor(party_num)) +
geom_pointrange(
aes(ymin = conf.low, ymax = conf.high),
position = position_dodge(width = -0.25)
) +
# scale_color_manual(values = party_factor_colors) +
coord_flip(ylim = c(-1, 1))
# plot all terms
vb_tidy %>%
filter(
str_detect(term, "coef") |
str_detect(term, "wt") |
str_detect(term, "sigma") |
str_detect(term, "const")
) %>%
mutate(
prefix = case_when(
str_detect(term, "coef") ~ "Coefs of Interest",
str_detect(term, "wt") ~ "Nuisance Coefs",
str_detect(term, "sigma") ~ "Variance Components",
str_detect(term, "const") ~ "Nuisance Coefs"
)
) %>%
ggplot() +
aes(x = term, y = estimate, color = party_num) +
geom_pointrange(
aes(ymin = conf.low, ymax = conf.high),
position = position_dodge(width = -0.25)
) +
facet_wrap(~ prefix, scales = "free") +
coord_flip() +
# scale_color_manual(values = party_factor_colors) +
NULL
# ---- run VB grid -----------------------
vb_fix <- g_grid_data %>%
mutate(
vb_fix = map(
.x = stan_data,
.f = ~ vb(
object = g_FIX,
data = .x,
pars = c("theta_raw", "prec_med", "prec_trt"),
include = FALSE,
output_samples = 3000
)
)
) %>%
print()
alarm()
vb_random <- g_grid_data %>%
mutate(
vb_random = map(
.x = stan_data,
.f = ~ vb(
object = g_FREE,
data = .x,
pars = c("theta_raw", "prec_med", "prec_trt"),
include = FALSE,
output_samples = 3000
)
)
) %>%
print()
alarm()
vb_id <- g_grid_data %>%
mutate(
vb_id = map(
.x = stan_data,
.f = ~ vb(
object = g_ID,
data = .x,
pars = c("theta_raw", "prec_med", "prec_trt"),
include = FALSE,
output_samples = 3000
)
)
) %>%
print()
alarm()
g_grid_vb <-
full_join(vb_fix, vb_random) %>%
full_join(vb_id) %>%
print()
write_rds(g_grid_vb, here(mcmc_dir, "local_g-grid-vb.rds"))
# box_write(g_grid_vb, "g-grid-vb.rds", dir_id = box_mcmc_4)
# g_grid_vb <-
# here("data", "mcmc", "4-positioning", "g-grid-vb.rds") %>%
# read_rds()
# g_grid_vb %>%
# mutate(
# tidy_vb = map(vbfit, tidy, conf.int = TRUE)
# ) %>%
# unnest(tidy_vb) %>%
# filter(str_detect(term, "coef")) %>%
# ggplot() +
# aes(x = term, y = estimate, color = as.factor(party_num)) +
# facet_grid(incumbency ~ prim_rules) +
# geom_hline(yintercept = 0) +
# geom_pointrange(
# aes(ymin = conf.low, ymax = conf.high),
# position = position_dodge(width = -0.25)
# ) +
# # scale_color_manual(values = party_factor_colors) +
# coord_flip()
# ---- sampling testing -----------------------
mcmc_dem <- sampling(
object = g_ID,
data = g_data_dem,
iter = 100,
refresh = 10L
)
alarm()
# test republican fit
mcmc_rep <- sampling(
object = g_ID,
data = g_data_rep,
iter = 100,
refresh = 10L
)
alarm()
# write_rds(mcmc_dem, here(mcmc_dir, "local_g-mcmc_dem.rds"))
# box_write(mcmc_dem, "g-mcmc_dem.rds", dir_id = box_mcmc_4)
# write_rds(mcmc_rep, here(mcmc_dir, "local_g-mcmc_rep.rds"))
# box_write(mcmc_rep, "g-mcmc_rep.rds", dir_id = box_mcmc_4)
# list(mcmc_dem, mcmc_rep) %>%
# lapply(tidy, conf.int = TRUE, rhat = TRUE, ess = TRUE) %>%
# bind_rows(.id = "party_num") %>%
# arrange((ess)) %>%
# print(n = 100)
# list(mcmc_dem, mcmc_rep) %>%
# lapply(check_hmc_diagnostics)
# ---- MCMC -----------------------
mcmc_party <- g_grid_data %>%
filter(prim_rules == "All" & incumbency == "All") %>%
mutate(
mcmcfit = map(
.x = stan_data,
.f = ~ sample_g(
object = g_ID,
data = .x
)
)
) %>%
print()
alarm()
write_rds(mcmc_party, here(mcmc_dir, "local_mcmc_party.rds"))
mcmc_dem_primary <- g_grid_data %>%
filter(prim_rules %in% c("closed", "semi", "open")) %>%
filter(party_num == 1) %>%
mutate(
mcmcfit = map(
.x = stan_data,
.f = ~ sample_g(
object = g_ID,
data = .x
)
)
) %>%
print()
alarm()
write_rds(mcmc_dem_primary, here(mcmc_dir, "local_mcmc_dem_primary.rds"))
mcmc_rep_primary <- g_grid_data %>%
filter(prim_rules %in% c("closed", "semi", "open")) %>%
filter(party_num == 2) %>%
mutate(
mcmcfit = map(
.x = stan_data,
.f = ~ sample_g(
object = g_ID,
data = .x
)
)
) %>%
print()
alarm()
write_rds(mcmc_rep_primary, here(mcmc_dir, "local_mcmc_rep_primary.rds"))
mcmc_dem_incumbency <- g_grid_data %>%
filter(incumbency %in% c("Incumbent", "Challenger", "Open Seat")) %>%
filter(party_num == 1) %>%
mutate(
mcmcfit = map(
.x = stan_data,
.f = ~ sample_g(
object = g_ID,
data = .x
)
)
) %>%
print()
alarm()
write_rds(mcmc_dem_incumbency, here(mcmc_dir, "local_mcmc_dem_incumbency.rds"))
mcmc_rep_incumbency <- g_grid_data %>%
filter(incumbency %in% c("Incumbent", "Challenger", "Open Seat")) %>%
filter(party_num == 2) %>%
mutate(
mcmcfit = map(
.x = stan_data,
.f = ~ sample_g(
object = g_ID,
data = .x
)
)
) %>%
print()
alarm()
write_rds(mcmc_rep_incumbency, here(mcmc_dir, "local_mcmc_rep_incumbency.rds"))
bind_rows(
mcmc_party,
mcmc_dem_primary,
mcmc_rep_primary,
mcmc_dem_incumbency,
mcmc_rep_incumbency
) %>%
write_rds(here(mcmc_dir, "local_sample_grid.rds"))
# this would be everything all at once
# g_grid_mcmc <- g_grid_data %>%
# mutate(
# mcmcfit = map(
# .x = stan_data,
# .f = ~ sample_g(
# object = g_ID,
# data = .x
# ))
# )
# ) %>%
# print()
# alarm()
# box_write(g_grid_mcmc, "g-grid-mcmc.rds", dir_id = box_mcmc_4)
# ----------------------------------------------------
# lookat mcmc
# ----------------------------------------------------
tidies <- mcmc_party %>%
mutate(
tidy = map(mcmcfit, tidy, conf.int = TRUE, conf.level = 0.9, rhat = TRUE, ess = TRUE)
) %>%
unnest(tidy) %>%
ungroup() %>%
print()
tidies %>%
filter(party_num == 2) %>%
filter(str_detect(term, "theta\\[")) %>%
arrange(term)
|
3f72823f0bb6884e5c1c9b93ce544975c6544078
|
36c629eb5f7829a7465975b06c6fc55f0b90dd0e
|
/R/qmval.R
|
0b733e34a2a53f37edee359184ddaafa0650860c
|
[] |
no_license
|
NickSpyrison/cranvas
|
3971869e5c5d19681b405b59bd6e1e4365ecb7cd
|
929f60dc75a8f34c053d5c3c08427cca170f7aa1
|
refs/heads/master
| 2020-11-30T11:53:47.617494
| 2012-03-30T22:11:05
| 2012-03-30T22:11:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,769
|
r
|
qmval.R
|
##' Draw a missing value plot
##'
##' A missing value plot shows the counts or proportions of missing
##' values in each variable. It is essentially a stacked bar plot,
##' i.e. a bar plot of variables split by the logical vectors of
##' missingness of observations.
##'
##' As usual, common interactions are defined in
##' \code{\link{common_key_press}}. Brushing on a missing value plot
##' has a slightly different meaning with brushing other types of
##' plots: if a rectangle is brushed in a missing value plot, all rows
##' in the orginal data in which the current variable is brushed
##' (i.e. either missing or non-missing) are brushed; on the other
##' hand, the brushed rows in the original data will also be reflected
##' in the missing value plot.
##'
##' This plot is built upon the bar plot \code{\link{qbar}}.
##' @param vars variables to show in the plot: a character vector of
##' variable names, or a numeric vector of column indices, or a
##' two-sided formula like \code{~ x1 + x2 + x3} (without the
##' left-hand side); see \code{\link{var_names}}
##' @inheritParams qbar
##' @param ... arguments passed to \code{\link{qbar}}
##' @return A missing value plot
##' @author Heike Hofmann and Yihui Xie
##' @export
##' @family plots
##' @example inst/examples/qmval-ex.R
qmval =
function(vars, data = last_data(), horizontal = TRUE, standardize = TRUE, ...) {
shadow = attr(data, 'Shadow')
vars = var_names(vars, data)
if (is.null(shadow)) stop('there are no missing values in the data!')
## reshape the shadow matrix to a new qdata()
d =
data.frame(variable = rep(vars, each = nrow(data)),
missing = factor(as.vector(shadow[, vars]), c(TRUE, FALSE)))
nd = qdata(d, color = missing, copy = FALSE)
## link nd to data (code borrowed from link.R)
change1 = change2 = FALSE
add_listener(nd, function(i, j) {
if (change1) return()
change2 <<- TRUE
if (j == '.brushed') {
selected(data) = apply(matrix(selected(nd), ncol = length(vars)), 1, any)
} else if (j == '.visible') {
visible(data) = apply(matrix(visible(nd), ncol = length(vars)), 1, all)
}
change2 <<- FALSE
})
add_listener(data, function(i, j) {
if (change2) return()
change1 <<- TRUE
if (j == '.brushed') {
selected(nd) = rep(selected(data), length(vars))
} else if (j == '.visible') {
visible(nd) = rep(visible(data), length(vars))
}
change1 <<- FALSE
})
qbar(variable, data = nd, horizontal = horizontal, standardize = standardize, ...)
}
|
0dfb0e71b19f4d7094b9cd58ecefc36300df9abe
|
8877d31ec9ebe7a0868adfc3d94d57250101807f
|
/plot1.R
|
25eb137b4d5ac532aee8cd3ed6cea08e99f01cba
|
[] |
no_license
|
neelb84/ExData_Plotting1
|
d0ad10571df9eee611fa546f3e9e79a561dd5af1
|
619ca9893190df773a7f2ac12a778a28a22d403c
|
refs/heads/master
| 2021-01-22T12:02:17.728130
| 2015-04-12T18:51:20
| 2015-04-12T18:51:20
| 33,823,730
| 0
| 0
| null | 2015-04-12T16:10:22
| 2015-04-12T16:10:21
| null |
UTF-8
|
R
| false
| false
| 651
|
r
|
plot1.R
|
#Pull 2 days data
tmp1<-subset((read.table("household_power_consumption.txt",sep=";",header=TRUE,na.strings="?")),
as.Date(Date,format='%d/%m/%Y')>='2007-02-01' & as.Date(Date,format='%d/%m/%Y')<='2007-02-02')
#Create the Date-Time variable from Date and Time, rest are pulled as numeric so NO change required
tmp1$DateTime<-strptime(paste(tmp1$Date,tmp1$Time,sep=" "),format="%d/%m/%Y %H:%M:%S")
png(filename = "plot1.png", width=480, height=480) #initializing png file
#Create the histogram
hist(tmp1$Global_active_power,col="Red",main='Global Active Power',
xlab='Global Active Power(kilowatts)',ylab='Frequency' )
dev.off()
|
277de3959e49186ccb3694b39a0538735d9d8d45
|
a3ca886f83e3962f354e209baf11a8a470539de3
|
/ui.R
|
3ca2cf8b8027343dece8dd8c81bac506258af85b
|
[] |
no_license
|
DSCourse001/cars
|
2a7daacfbc605c895232d07f46ab3bbbaa6053d8
|
6ee3cb214714b75fa544c105ac8025359487b843
|
refs/heads/master
| 2021-01-10T17:57:07.315157
| 2015-09-27T22:32:07
| 2015-09-27T22:32:07
| 43,267,838
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 343
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Cars Stopping Distance"),
sidebarLayout(
sidebarPanel(
selectInput("system", "System of Measurement:", c("Metric","Imperial")),
uiOutput("speed"),
downloadButton("savePNG", "Save to PNG")
),
mainPanel(
plotOutput("displayPlot")
)
)
))
|
c40a14edeec674b557204e0a0d8ebc73f09515ce
|
faa379ebd606e774d3511d09274bc766272a2614
|
/script.R
|
c2dbfd5cce8463fdf4f60b126189e57e30f16a4f
|
[] |
no_license
|
sivacharansrc/Mushroom-Classification
|
5e842c92ddc39e4b7928d5f7a93c402a488608f5
|
737d0f46c86d72a5ddd62b04c0102caa4a0d1ea1
|
refs/heads/master
| 2021-05-05T20:56:02.991069
| 2017-12-28T03:30:01
| 2017-12-28T03:30:01
| 115,401,346
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,347
|
r
|
script.R
|
####### SETTING UP THE ENVIRONMENT
rm(list = ls())
options(scipen = 999)
library(dplyr)
library(randomForest)
library(caret)
### READING SOURCE FILE ####
dataSource <- read.csv("~/R Projects/Mushroom-Classification/src/mushrooms.csv",
stringsAsFactors = F)
df <- dataSource %>% rename(classification = class)
case.class <- list(
!! classification == "e" ~ "edible",
TRUE ~ "poisonous")
case.cap.shape <- list(
!! cap.shape == "b" ~ "bell",
!! cap.shape == "c" ~ "conical",
!! cap.shape == "x" ~ "convex",
!! cap.shape == "f" ~ "flat",
!! cap.shape == "k" ~ "knobbed",
TRUE ~ "sunken")
case.cap.surface <- list(
!! cap.surface == "f" ~ "fibrous",
!! cap.surface == "g" ~ "grooves",
!! cap.surface == "y" ~ "scaly",
TRUE ~ "smooth")
case.cap.color <- list(
!! cap.color == "n" ~ "brown",
!! cap.color == "y" ~ "yellow",
!! cap.color == "w" ~ "white",
!! cap.color == "g" ~ "gray",
!! cap.color == "e" ~ "red",
!! cap.color == "p" ~ "pink",
!! cap.color == "b" ~ "buff",
!! cap.color == "u" ~ "purple",
!! cap.color == "c" ~ "cinnamon",
TRUE ~ "green")
case.bruises <- list(
!! bruises == "t" ~ "bruises",
TRUE ~ "no")
case.odor <- list(
!! odor == "a" ~ "almond",
!! odor == "l" ~ "anise",
!! odor == "c" ~ "creosote",
!! odor == "y" ~ "fishy",
!! odor == "f" ~ "foul",
!! odor == "m" ~ "musty",
!! odor == "n" ~ "none",
!! odor == "p" ~ "pungent",
TRUE ~ "spicy")
case.gill.attachment <- list(
!! gill.attachment == "a" ~ "attached",
!! gill.attachment == "d" ~ "descending",
!! gill.attachment == "f" ~ "free",
TRUE ~ "notched")
case.gill.spacing <- list(
!! gill.spacing == "c" ~ "close",
!! gill.spacing == "w" ~ "crowded",
TRUE ~ "distant")
case.gill.size <- list(
!! gill.size == "b" ~ "broad",
TRUE ~ "narrow")
case.gill.color <- list(
!! gill.color == "n" ~ "brown",
!! gill.color == "y" ~ "yellow",
!! gill.color == "w" ~ "white",
!! gill.color == "g" ~ "gray",
!! gill.color == "e" ~ "red",
!! gill.color == "p" ~ "pink",
!! gill.color == "b" ~ "buff",
!! gill.color == "u" ~ "purple",
!! gill.color == "c" ~ "cinnamon",
!! gill.color == "k" ~ "black",
!! gill.color == "h" ~ "chocolate",
!! gill.color == "o" ~ "orange",
TRUE ~ "green")
case.stalk.shape <- list(
!! stalk.shape == "e" ~ "enlarging",
TRUE ~ "tapering")
case.stalk.root <- list(
!! stalk.root == "b" ~ "bulbous",
!! stalk.root == "c" ~ "club",
!! stalk.root == "u" ~ "cup",
!! stalk.root == "e" ~ "equal",
!! stalk.root == "z" ~ "rhizomorphs",
!! stalk.root == "r" ~ "rooted",
TRUE ~ "missing")
case.stalk.surface.above.ring <- list(
!! stalk.surface.above.ring == "f" ~ "fibrous",
!! stalk.surface.above.ring == "y" ~ "scaly",
!! stalk.surface.above.ring == "k" ~ "silky",
TRUE ~ "smooth")
case.stalk.surface.below.ring <- list(
!! stalk.surface.below.ring == "f" ~ "fibrous",
!! stalk.surface.below.ring == "y" ~ "scaly",
!! stalk.surface.below.ring == "k" ~ "silky",
TRUE ~ "smooth")
case.stalk.color.above.ring <- list(
!! stalk.color.above.ring == "n" ~ "brown",
!! stalk.color.above.ring == "y" ~ "yellow",
!! stalk.color.above.ring == "w" ~ "white",
!! stalk.color.above.ring == "g" ~ "gray",
!! stalk.color.above.ring == "e" ~ "red",
!! stalk.color.above.ring == "p" ~ "pink",
!! stalk.color.above.ring == "b" ~ "buff",
!! stalk.color.above.ring == "c" ~ "cinnamon",
TRUE ~ "orange")
case.stalk.color.below.ring <- list(
!! stalk.color.below.ring == "n" ~ "brown",
!! stalk.color.below.ring == "y" ~ "yellow",
!! stalk.color.below.ring == "w" ~ "white",
!! stalk.color.below.ring == "g" ~ "gray",
!! stalk.color.below.ring == "e" ~ "red",
!! stalk.color.below.ring == "p" ~ "pink",
!! stalk.color.below.ring == "b" ~ "buff",
!! stalk.color.below.ring == "c" ~ "cinnamon",
TRUE ~ "orange")
case.veil.type <- list(
!! veil.type == "p" ~ "partial",
TRUE ~ "universal")
case.veil.color <- list(
!! veil.color == "n" ~ "brown",
!! veil.color == "o" ~ "orange",
!! veil.color == "w" ~ "white",
TRUE ~ "yellow")
case.ring.number <- list(
!! ring.number == "n" ~ "none",
!! ring.number == "o" ~ "one",
TRUE ~ "two")
case.ring.type <- list(
!! ring.type == "c" ~ "cobwebby",
!! ring.type == "e" ~ "evanescent",
!! ring.type == "f" ~ "flaring",
!! ring.type == "l" ~ "large",
!! ring.type == "n" ~ "none",
!! ring.type == "p" ~ "pendant",
!! ring.type == "s" ~ "sheathing",
TRUE ~ "zone")
case.spore.print.color <- list(
!! spore.print.color == "n" ~ "brown",
!! spore.print.color == "y" ~ "yellow",
!! spore.print.color == "w" ~ "white",
!! spore.print.color == "h" ~ "chocolate",
!! spore.print.color == "r" ~ "green",
!! spore.print.color == "u" ~ "purple",
!! spore.print.color == "b" ~ "buff",
!! spore.print.color == "k" ~ "black",
TRUE ~ "orange")
case.population <- list(
!! population == "a" ~ "abundant",
!! population == "c" ~ "clustered",
!! population == "n" ~ "numerous",
!! population == "s" ~ "scattered",
!! population == "v" ~ "several",
TRUE ~ "solitary")
case.habitat <- list(
!! habitat == "g" ~ "grasses",
!! habitat == "l" ~ "leaves",
!! habitat == "m" ~ "meadows",
!! habitat == "p" ~ "paths",
!! habitat == "u" ~ "urban",
!! habitat == "w" ~ "waste",
TRUE ~ "woods")
df <- mutate(df,
classification = case_when(!!! case.class),
cap.shape = case_when(!!! case.cap.shape),
cap.surface = case_when(!!! case.cap.surface),
cap.color = case_when(!!! case.cap.color),
bruises = case_when(!!! case.bruises),
odor = case_when(!!! case.odor),
gill.attachment = case_when(!!! case.gill.attachment),
gill.spacing = case_when(!!! case.gill.spacing),
gill.size = case_when(!!! case.gill.size),
gill.color = case_when(!!! case.gill.color),
stalk.shape = case_when(!!! case.stalk.shape),
stalk.root = case_when(!!! case.stalk.root),
stalk.surface.above.ring = case_when(!!! case.stalk.surface.above.ring),
stalk.surface.below.ring = case_when(!!! case.stalk.surface.below.ring),
stalk.color.above.ring = case_when(!!! case.stalk.color.above.ring),
stalk.color.below.ring = case_when(!!! case.stalk.color.below.ring),
veil.type = case_when(!!! case.veil.type),
veil.color = case_when(!!! case.veil.color),
ring.number = case_when(!!! case.ring.number),
ring.type = case_when(!!! case.ring.type),
spore.print.color = case_when(!!! case.spore.print.color),
population = case_when(!!! case.population),
habitat = case_when(!!! case.habitat))
df <- data.frame(lapply(df, FUN = as.factor))
#### RANDOM FOREST CLASSIFICATION #####
## SLICING TRAIN AND TEST DATA nrow(train) | nrow(test)
set.seed(1)
splitSample <- sample(2, nrow(df), replace = T, prob = c(0.75,0.25))
train <- df[splitSample==1,]
test <- df[splitSample==2,]
### RUNNING RANDOM MODEL WITH DEFAULT PARAMETERS
set.seed(99)
rfModel <- randomForest(classification ~., data = train, ntree )
print(rfModel)
### PREDICTION AND CONFUSION MATRIX
pred1 <- predict(rfModel, train)
confusionMatrix(pred1, train$classification)
pred2 <- predict(rfModel, test)
confusionMatrix(pred2, test$classification)
### Plotting Error Rates
# The plots provide an idea on the number of trees that may be required to run the random forest analysis
plot(rfModel)
## Tuning data
# The current model has 100% accuracy. In case, our model would have been
# less accurate, then model tuning would help in such cases
#tuned <- tuneRF(train[,-1], train[,1],
# stepFactor = 0.5,
# plot = T,
# ntreeTry = 50,
# trace = T,
# improve = 0.05)
# No. of nodes for the trees:
hist(treesize(rfModel),
main = "No. of nodes for the Trees",
col="blue")
# Looks like for this current model, there are more number of 15 nodes
varUsed(rfModel)
#### IMPORTANT VARIABLES FOR PREDICTING THE MODEL
varImp(rfModel)
varImpPlot(rfModel)
keepCols <- c(which(names(train) == "odor"),
which(names(train) == "spore.print.color"),
which(names(train) == "gill.color"),
which(names(train) == "gill.size"),
which(names(train) == "stalk.surface.above.ring"),
which(names(train) == "ring.type"),
which(names(train) == "stalk.surface.below.ring"),
which(names(train) == "population"),
which(names(train) == "habitat"),
which(names(train) == "stalk.root"))
cols <- c("odor", "spore.print.color", "gill.color", "stalk.surface.above.ring")
tunedModel <- randomForest(train[,keepCols], y = train[,1], data = train,
ntree = 1)
print(tunedModel)
pred1 <- predict(tunedModel, train)
confusionMatrix(pred1, train$classification)
pred2 <- predict(tunedModel, test)
confusionMatrix(pred2, test$classification)
varImp(tunedModel)
varUsed(tunedModel)
library(psych)
intTrain <- data.frame(lapply(train, FUN = as.integer))
parameters <- intTrain[,-1]
cronAlpha <- psych::alpha(parameters)
|
1749ffb78fd8f40928209106bb789677857dd9af
|
58785a9764947a6b9d448d0816ede97b2c22063e
|
/plot1.R
|
704325aa9dab086f93825ea8f70a985cf96f7a15
|
[] |
no_license
|
ErazeX/ExData_Plotting1
|
cfe134bd9b0956dd2db45d46d91f83dc1c2bbde2
|
055c7d2f9bb8130af384bc0c3aa10f19d96bd3f2
|
refs/heads/master
| 2021-01-24T22:02:00.452206
| 2015-05-10T21:30:41
| 2015-05-10T21:30:41
| 26,224,135
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 519
|
r
|
plot1.R
|
power <- read.csv("~/R working/household_power_consumption.txt", sep=";",stringsAsFactors = FALSE)
power$Date <- as.Date(power$Date,format = "%d/%m/%Y")
graphdata <- power[power$Date >= as.Date("2007/2/1") & power$Date <= as.Date("2007/2/2"),]
graphdata$Global_active_power <- as.numeric(graphdata$Global_active_power)
png(filename="~/R working/plot1.png",width=480,height=480)
hist(graphdata$Global_active_power,main="Global Active Power",ylab="Frequency", xlab="Global Active Power (Kilowatts)",col="red",)
dev.off()
|
2502b06c649caa2743252c4e6776e95991478d02
|
4906c11ac419e3c0d7d17471e8490468f6d62ebf
|
/man/extract_length.Rd
|
fe92fab763639a7e91f4574476c63e4a72c7c7a9
|
[] |
no_license
|
joelpick/measuRe
|
22648c95a38e1d95d5b28f9dce4d39d67227ef0b
|
a561071a24f908d557a4700993b57468fd031152
|
refs/heads/master
| 2022-12-22T05:14:32.099127
| 2020-09-24T14:47:56
| 2020-09-24T14:47:56
| 292,667,851
| 1
| 2
| null | 2020-09-06T18:44:11
| 2020-09-03T20:00:13
|
R
|
UTF-8
|
R
| false
| true
| 391
|
rd
|
extract_length.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/length_area_functions.R
\name{extract_length}
\alias{extract_length}
\title{extract_length Function}
\usage{
extract_length(data, metric = "pixels", pixels_per = NULL)
}
\arguments{
\item{data}{}
\item{pixels_per}{}
}
\description{
Extract total length of plotted line in pixels
}
\examples{
#extract_length()
}
|
0a017e18a1a629e715a9df6f221d358d081410d3
|
de2452990be8fa6f7b325f8777d520f61725384a
|
/predict-455/assignment3/assignment_3_jump_start_V003.r
|
7efc21d79d9fe0860e78ee1f48e44a3a3a638db7
|
[] |
no_license
|
retropean/mspa-apps
|
7be84f78567be03b2e369f441afb47f33da14faf
|
96732c38be3a2ef85893f19a9485bdcb91dfd781
|
refs/heads/master
| 2021-01-22T23:53:48.961671
| 2018-06-25T18:54:54
| 2018-06-25T18:54:54
| 33,203,196
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,759
|
r
|
assignment_3_jump_start_V003.r
|
# Week 6
# Individual Assignment 3
# Jump Start Code v3
# Set working directory
# The below references the Desktop on my Mac
setwd("C:/Users/00811289/Desktop/assignment3")
# Visualizing Time Jump-Start Code for Financial Time Series
# begin by installing the packages quantmod, lubridate, latticeExtra, and zoo
# install.packages("quantmod")
# install.packages("lubridate")
# install.packages("latticeExtra")
# install.packages("zoo")
library(quantmod) # use for gathering and charting economic data
library(lubridate) # date functions
library(latticeExtra) # package used for horizon plot
library(zoo) # utilities for working with time series
# ---------------------------------------
# here we demonstrate the wonders of FRED
# ---------------------------------------
# demonstration of R access to and display of financial data from FRED
# requires a connection to the Internet
# ecomonic research data from the Federal Reserve Bank of St. Louis
# see documentation of tags at http://research.stlouisfed.org/fred2/
# if you choose a particular series and click on it a graph will be displayed
# in parentheses in the title of the graph will be the symbol
# for the financial series some time series are quarterly, some monthly
# ... others weekly... so make sure the time series match up in time
# see the documentation for quantmod at
# <http://cran.r-project.org/web/packages/quantmod/quantmod.pdf>
# University of Michigan: Consumer Sentiment, not seasonally adjusted (monthly, 1966 = 100)
getSymbols("UMCSENT",src="FRED",return.class = "xts")
print(str(UMCSENT)) # show the structure of this xtx time series object
# plot the series
chartSeries(UMCSENT,theme="white")
# Additional plot
getSymbols("CPIAUCNS",src="FRED",return.class = "xts")
print(str(CPIAUCNS)) # show the structure of this xtx time series object
# plot the series
chartSeries(CPIAUCNS,theme="white")
# New Homes Sold in the US, not seasonally adjusted (monthly, thousands)
getSymbols("HSN1FNSA",src="FRED",return.class = "xts")
print(str(HSN1FNSA)) # show the structure of this xtx time series object
# plot the series
chartSeries(HSN1FNSA,theme="white")
# ---------------------------------------
# here we demonstrate Yahoo! finance
# ---------------------------------------
# stock symbols for companies can be obtained from Yahoo! at
# <http://finance.yahoo.com/lookup>
# get Apple stock price data
getSymbols("AAPL",return.class = "xts")
print(str(AAPL)) # show the structure of this xtx time series object
# plot the series stock price
chartSeries(AAPL,theme="white")
# get IBM stock price data
getSymbols("IBM",return.class = "xts")
print(str(IBM)) # show the structure of this xtx time series object
# plot the series stock price
chartSeries(IBM,theme="white")
|
924e9ee7fa86ab397096646dd474c6770e5c5392
|
1170116acf04e3e7d5baf8563fd36ee313917573
|
/man/bench_time.Rd
|
4e45a7bf4d7d53bbf983190b52e510b730d151d1
|
[
"MIT"
] |
permissive
|
r-lib/bench
|
9cbd5403ea2ac07c38066fd922edd0af756f064c
|
8d4ab5ea8219f00cc476a4702df91d2b18f47b12
|
refs/heads/main
| 2023-05-11T03:42:08.981583
| 2023-05-04T17:09:03
| 2023-05-04T17:09:03
| 128,975,118
| 218
| 33
|
NOASSERTION
| 2023-05-04T17:07:41
| 2018-04-10T18:01:13
|
R
|
UTF-8
|
R
| false
| true
| 1,013
|
rd
|
bench_time.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bench_time.R
\name{bench_time}
\alias{bench_time}
\alias{system_time}
\title{Measure Process CPU and real time that an expression used.}
\usage{
bench_time(expr)
}
\arguments{
\item{expr}{A expression to be timed.}
}
\value{
A \code{bench_time} object with two values.
\itemize{
\item \code{process} - The process CPU usage of the expression evaluation.
\item \code{real} - The wallclock time of the expression evaluation.
}
}
\description{
Measure Process CPU and real time that an expression used.
}
\details{
On some systems (such as macOS) the process clock has lower
precision than the realtime clock, as a result there may be cases where the
process time is larger than the real time for fast expressions.
}
\examples{
# This will use ~.5 seconds of real time, but very little process time.
bench_time(Sys.sleep(.5))
}
\seealso{
\code{\link[=bench_memory]{bench_memory()}} To measure memory allocations for a given expression.
}
|
2a7951c8a39580f591c97b425fa17bc166bc6dd4
|
c789f078b8eeef6219d088822a6c8f9ae091b3dd
|
/R/rmvhyper.R
|
3ced2cfcf471ad5aa47dcfee237811df92db8918
|
[] |
no_license
|
cran/tpsDesign
|
ba6077aae84e503f5dd82b19bf0a9c52885b59f9
|
a4c17859df70bcd96af4c51af20c773bf0b9c8f4
|
refs/heads/master
| 2016-09-06T08:36:16.915845
| 2011-06-09T00:00:00
| 2011-06-09T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 422
|
r
|
rmvhyper.R
|
rmvhyper <-
function(Mk, m)
{
##
K <- length(Mk)
M <- sum(Mk)
##
if(m > M)
mk <- Mk
##
if(m <= M)
{
if(K == 1)
mk <- m
if(K > 1)
{
mk <- rep(0, K)
mk[1] <- rhyper(1, Mk[1], M - Mk[1], m)
if(K > 2)
{
for(j in 2:(K-1)) mk[j] <- rhyper(1, Mk[j], M - sum(Mk[1:j]), m - sum(mk[1:(j-1)]))
}
mk[K] <- m - sum(mk[1:(K-1)])
}
}
##
return(mk)
}
|
7915767b0a4fcdb44f40a3ff798dbd456bc11924
|
5febc1e3f2dd766ff664f8e0ae79002072359bde
|
/man/mcell_calc_one_batch_stats.Rd
|
6d7c80c65e85071897f45c83ca7efcb1c8c4d4fb
|
[
"MIT"
] |
permissive
|
tanaylab/metacell
|
0eff965982c9dcf27d545b4097e413c8f3ae051c
|
ff482b0827cc48e5a7ddfb9c48d6c6417f438031
|
refs/heads/master
| 2023-08-04T05:16:09.473351
| 2023-07-25T13:37:46
| 2023-07-25T13:37:46
| 196,806,305
| 89
| 30
|
NOASSERTION
| 2023-07-25T13:38:07
| 2019-07-14T07:20:34
|
R
|
UTF-8
|
R
| false
| true
| 475
|
rd
|
mcell_calc_one_batch_stats.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scmat_batch_stat.r
\name{mcell_calc_one_batch_stats}
\alias{mcell_calc_one_batch_stats}
\title{calc batch stats - essentially umi distribution}
\usage{
mcell_calc_one_batch_stats(scmat, batch_id)
}
\arguments{
\item{scmat}{- a matrix object}
\item{batch_id}{- the batch name to summarize}
}
\value{
one row data frame for merging
}
\description{
calc batch stats - essentially umi distribution
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.