blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4d8bd3308f192a8a14eb90929de6d93d0074edb4
|
7ec48368e8887b645d9274fe02e89d8d003624fb
|
/data/transforms/building_assignment.R
|
5cb366fb17ff8a43f70eb89fa278e48cf171ca7c
|
[] |
no_license
|
kurtis-s/ds_at_scale_capstone
|
5b5c65bfd237f0e23396906ababa454d263f02cf
|
56c9a8b792a64b2c0b948bbb02227d1757c12325
|
refs/heads/master
| 2020-09-28T04:17:30.434466
| 2016-10-09T20:40:03
| 2016-10-09T20:40:03
| 66,779,167
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 857
|
r
|
building_assignment.R
|
rm(list=ls()[ls() != "datenv"])
library(sp)
library(rgdal)
library(RANN)
source("utilities.R")
if(!exists("datenv")) {
datenv <- new.env()
read_transformed_dat(datenv)
}
# Nearest neighbors -------------------------------------------------------
building_coords <- datenv$dat_parcels_transform %>% select(x, y)
assign_building_id <- function(dat) {
coords <- dat %>% select(x, y)
nearest <- nn2(building_coords, coords, k=1)
# A observation is "too far" away if it is further than max_dist from the
# building's center
max_dist <- 30 # 30 meters ~ 100 feet
too_far <- ifelse(nearest$nn.dists > max_dist, TRUE, FALSE)
nearest_building_IDs <- datenv$dat_parcels_transform$ID[nearest$nn.idx]
dat$BuildID <- ifelse(too_far, NA, nearest_building_IDs)
return(dat)
}
datenv <- lapply(datenv, assign_building_id)
|
361a1d27a36cd88941dadc80edd3fcdc166b5c39
|
b4c5cbf7ec7975aa5d5b4a7b5d7e9cfd0a9ca4ca
|
/process-data/03-regions-map.R
|
1fabbb28bbbd2b41add37b32d5ff91a70c18ca3b
|
[
"CC0-1.0"
] |
permissive
|
marioalbertod/chilemaps
|
162774c2aab7224fa036dadc677d43efd123c353
|
9fd26010eb557290e281cd73a168d2d56b2c748c
|
refs/heads/master
| 2020-07-01T23:40:20.036750
| 2019-05-17T19:11:47
| 2019-05-17T19:11:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,353
|
r
|
03-regions-map.R
|
# Packages and functions --------------------------------------------------
source("process-data/00-functions.R")
source("process-data/01-download-data.R")
source("process-data/02-communes-map.R")
# Merge comuna shapes -----------------------------------------------------
old_simplified_regions_map_file <- sprintf("%s/old_simplified_regions_map.rda", data_dir)
if (!file.exists(old_simplified_regions_map_file)) {
old_simplified_regions_map <- map(old_simplified_communes_map, aggregate_communes)
old_simplified_regions_map <- map2(old_simplified_regions_map, region_attributes_id, ~add_col(.x, .y, col = "region_id"))
old_simplified_regions_map <- map2(old_simplified_regions_map, region_attributes_name, ~add_col(.x, .y, col = "region_name"))
old_simplified_regions_map <- map(old_simplified_regions_map, move_cols)
save(old_simplified_regions_map, file = old_simplified_regions_map_file, compress = "xz")
} else {
load(old_simplified_regions_map_file)
}
new_simplified_regions_map_file <- sprintf("%s/new_simplified_regions_map.rda", data_dir)
if (!file.exists(new_simplified_regions_map_file)) {
new_simplified_regions_map <- map(new_simplified_communes_map, aggregate_communes)
new_simplified_regions_map <- map2(new_simplified_regions_map, region_attributes_id_new, ~add_col(.x, .y, col = "region_id"))
new_simplified_regions_map <- map2(new_simplified_regions_map, region_attributes_name_new, ~add_col(.x, .y, col = "region_name"))
new_simplified_regions_map <- map(new_simplified_regions_map, move_cols)
save(new_simplified_regions_map, file = new_simplified_regions_map_file, compress = "xz")
} else {
load(new_simplified_regions_map_file)
}
# Save as geo/topo json ---------------------------------------------------
map2(
old_simplified_regions_map,
sprintf("%s/r%s.geojson", simplified_regions_geojson_old_dir, region_attributes_id),
save_as_geojson
)
map2(
old_simplified_regions_map,
sprintf("%s/r%s.topojson", simplified_regions_topojson_old_dir, region_attributes_id),
save_as_topojson
)
map2(
new_simplified_regions_map,
sprintf("%s/r%s.geojson", simplified_regions_geojson_new_dir, region_attributes_id_new),
save_as_geojson
)
map2(
new_simplified_regions_map,
sprintf("%s/r%s.topojson", simplified_regions_topojson_new_dir, region_attributes_id_new),
save_as_topojson
)
|
189478f478da3ca5b94c4bf0b4f905a9a9728ada
|
26a5d277a197cf17b024f0ed24f93635dc27e294
|
/R/write.R
|
ab6950b6142a0663041897c0bb5a4d1d9236403a
|
[
"MIT"
] |
permissive
|
ricoperdiz/NIRtools
|
152aacd154c7a13e3ecbe8a650438b697cb7b814
|
70ed6deea7dc9195bcebcdf3dd52e8bfa709d960
|
refs/heads/master
| 2023-06-07T19:12:23.666497
| 2022-12-15T18:44:28
| 2022-12-15T18:44:28
| 150,185,336
| 2
| 2
| null | 2021-02-17T21:45:45
| 2018-09-25T00:28:50
|
R
|
UTF-8
|
R
| false
| false
| 4,968
|
r
|
write.R
|
#' Write NIR parameters file to disk
#'
#' @param file Dataset name.
#' @param wd Location of working directory. If not provided, current working directory will be used one.
#' @param surface Which leaf surface (abaxial, adaxial, both).
#' @param reads Do all the reads will be taken into account (all, mean)?
#' @param nir_variables Which NIR variables will be used (all, subset).
#' @param subset_file Location of NIR variable subset file, in case option selected in `nir_variables` was `subset`.
#' @param individual_id Which variable name corresponds to the individual?
#' @param individual_list Location of a file containing list of specimens to subset data when building datasets with `build_NIRdataset` function.
#' @param surface_id Which variable name corresponds to the leaf surface?
#' @param group_id Which variable name corresponds to the group category?
#' @param nir_id A string that can be used to grep column names containing NIR data. Default value is '`X`, which precedes all columns with NIR data.
#'
#' @return A message that indicates the file has been saved to disk.
#' @export
#' @examples
#' \dontrun{
#' write_NIRparams(file = "teste", wd = ".",
#' reads = "mean", surface = "abaxial",
#' nir_variables = "all", surface_id = "face",
#' individual_id = "especimenid",
#' individual_list = NULL, group_id = "SP1",
#' nir_id = "X")
#' read_NIRparams("teste-NIR.txt")
#' readLines("teste-NIR.txt")
#' }
write_NIRparams <- function(file = "", wd = ".", surface = "", reads = "", nir_variables = "", subset_file = "", individual_id = "", individual_list = NULL, surface_id = "", group_id = "", nir_id = "X") {
surface_var <- c("abaxial", "adaxial", "both")
reads_var <- c("all", "mean")
nir_var <- c("all", "subset")
#### file ####
if (file == "") {
file_name <- "NIR_dataset"
} else {
file_name <- file
}
#### wd ####
if (wd == ".") {
wd <- getwd()
}
#### surface ####
if (surface == "") {
surface <- " "
} else if (!surface %in% surface_var) {
stop("Invalid `surface` value")
}
if (surface %in% surface_var[1:2]) {
if (surface_id == "") {
stop("Argument `surface_id` is empty. You must supply a value for `surface_id`!")
}
}
#### reads ####
if (reads == "") {
reads <- " "
} else if (!reads %in% reads_var) {
stop("Invalid `reads` value")
}
#### nir_variables ####
if (nir_variables == "") {
nir_variables <- " "
} else if (!nir_variables %in% nir_var) {
stop("Invalid `nir_variables` value")
}
#### subset_file ####
if (subset_file == "") {
subset_file <- " "
}
#### individual_id ####
if (individual_id == "") {
individual_id <- " "
}
#### individual_list ####
if (!is.null(individual_list)) {
if (file.access(individual_list) != 0) {
stop("If supplied, `individual_list` must be a path to a file containing a specimen identifier, one per line.")
}
} else {
individual_list <- " "
}
#### surface_id ####
if (surface_id == "") {
surface_id <- " "
}
#### group_id ####
if (group_id == "") {
group_id <- " "
}
params_file <- paste0(file_name, "-NIRparams.txt")
file_full <- paste0(wd, "/", params_file)
# write params file
sink(file_full, split = FALSE, append = FALSE)
cat("# NIR dataset description - Dataset name and variables for its construction", sep = "\n")
cat(paste0(file_name, " ## [dataset_name]: Dataset name."), sep = "\n")
cat(paste0(wd, " ## [working_dir]: Location of working directory. If not provided, current working directory will be used one"), sep = "\n")
cat(paste0(surface, " ## [surface]: Which leaf surface (abaxial, adaxial, both)"), sep = "\n")
cat(paste0(reads, " ## [reads]: Do all the reads will be taken into account (all, mean)?"), sep = "\n")
cat(paste0(nir_variables, " ## [nir_variables]: Which NIR variables will be used (all, subset)"), sep = "\n")
cat(paste0(subset_file, " ## [subset_file]: Location of NIR variable subset file, in case option selected in `nir_variables` was `subset`"), sep = "\n")
cat(paste0(individual_id, " ## [individual_id]: Which variable name corresponds to the individual?"), sep = "\n")
cat(paste0(individual_list, " ## [individual_list]: Location of a file containing list of specimens to subset data when building datasets with `build_NIRdataset` function."), sep = "\n")
cat(paste0(surface_id, " ## [surface_id]: Which variable name corresponds to the leaf surface?"), sep = "\n")
cat(paste0(group_id, " ## [group_id]: Which name corresponds to the group category?"), sep = "\n")
cat(paste0(nir_id, " ## [nir_id]: A string that can be used to grep column names containing NIR data. Default value is '`X`, which precedes all columns with NIR data."), sep = "\n")
sink(file = NULL)
closeAllConnections()
message("New file '", params_file, "' written in directory ", wd)
}
|
ad8ab3f73d0eba36617750f4f48020e2123b43fa
|
68ccdf6931c377c3922dea0d2fcc15a660002e09
|
/R/seqFISH.R
|
4c9f96f8fa8d5a466781e47ba8f507da2e1728ec
|
[] |
no_license
|
waldronlab/SingleCellMultiModal
|
476a71b9df8bdce939f47d84a7c34d1fe1088750
|
409a6e0d8a152449fbb92c206d22c41465995849
|
refs/heads/devel
| 2023-08-08T17:00:36.498649
| 2023-07-11T21:44:48
| 2023-07-11T21:44:48
| 222,560,510
| 15
| 9
| null | 2023-07-11T21:36:00
| 2019-11-18T22:53:03
|
R
|
UTF-8
|
R
| false
| false
| 4,297
|
r
|
seqFISH.R
|
#' Single-cell spatial + Gene Expression
#'
#' @description seqFISH function assembles data on-the-fly from `ExperimentHub`
#' to provide a \linkS4class{MultiAssayExperiment} container. Actually
#' the `DataType` argument provides access to the available datasets
#' associated to the package.
#' @details seq FISH data are a combination of single cell spatial coordinates
#' and transcriptomics for a few hundreds of genes.
#' seq-FISH data can be combined for example with scRNA-seq data to unveil
#' multiple aspects of cellular behaviour based on their spatial
#' organization and transcription.
#'
#' Available datasets are:
#' \itemize{
#' \item{mouse_visual_cortex: } combination of seq-FISH data as obtained
#' from Zhu et al. (2018) and scRNA-seq data as obtained from
#' Tasic et al. (2016),
#' Version 1.0.0 returns the full scRNA-seq data matrix, while version
#' 2.0.0 returns the processed and subsetted scRNA-seq data matrix
#' (produced for the Mathematical Frameworks for Integrative Analysis
#' of Emerging Biological Data Types 2020 Workshop)
#' The returned seqFISH data are always the processed ones for the same
#' workshop.
#' Additionally, cell types annotations are available in the `colData`
#' through the `class` column in the seqFISH `assay`.
#' \itemize{
#' \item{scRNA_Counts} - Tasic scRNA-seq gene count matrix
#' \item{scRNA_Labels} - Tasic scRNA-seq cell labels
#' \item{seqFISH_Coordinates} - Zhu seq-FISH spatial coordinates
#' \item{seqFISH_Counts} - Zhu seq-FISH gene counts matrix
#' \item{seqFISH_Labels} - Zhu seq-FISH cell labels
#' }
#' }
#'
#' @inheritParams scNMT
#'
#' @param DataType character(1) indicating the identifier of the dataset to
#' retrieve. (default "mouse_visual_cortex")
#'
#' @param modes character( ) The assay types or modes of data to obtain these
#' include seq-FISH and scRNA-seq data by default.
#'
#' @return A \linkS4class{MultiAssayExperiment} of seq-FISH data
#'
#' @author Dario Righelli <dario.righelli <at> gmail.com>
#'
#' @importFrom SpatialExperiment SpatialExperiment
#' @importFrom SingleCellExperiment SingleCellExperiment
#' @importFrom S4Vectors DataFrame
#'
#' @examples
#'
#' seqFISH(DataType = "mouse_visual_cortex", modes = "*", version = "2.0.0",
#' dry.run = TRUE)
#'
#' @export
seqFISH <-
function(
DataType="mouse_visual_cortex", modes="*", version,
dry.run=TRUE, verbose=TRUE, ...
)
{
ess_list <- .getResourcesList(prefix = "seqfish_", datatype = DataType,
modes = modes, version = version, dry.run = dry.run,
verbose = verbose, ...)
if (dry.run) { return(ess_list) }
modes_list <- ess_list[["experiments"]]
switch(DataType,
"mouse_visual_cortex" = {
mae <- .mouse_visual_cortex(modes_list=modes_list,
version=version)
},
## Add here other seqFISH datasets based on DataType identifier
{
stop("Unrecognized seqFISH dataset name")
}
)
return(mae)
}
.mouse_visual_cortex <- function(modes_list, version)
{
res <- paste0("scRNA",
if (identical(version, "1.0.0")) "_Full" else "",
"_", c("Counts", "Labels")
)
## discrepancy between labels in counts and colData
counts <- as.matrix(modes_list[[res[1]]])
## rowData is duplicate of rownames [removed]
coldata <- modes_list[[res[2]]]
vIDs <- intersect(rownames(coldata), colnames(counts))
counts <- counts[, vIDs]
coldata <- coldata[vIDs, ]
sce <- SingleCellExperiment::SingleCellExperiment(
colData=coldata,
assays=S4Vectors::SimpleList(counts=counts)
)
se <- SpatialExperiment::SpatialExperiment(
rowData=rownames(modes_list$seqFISH_Counts),
colData=modes_list$seqFISH_Labels,
assays=S4Vectors::SimpleList(
counts=as.matrix(modes_list$seqFISH_Counts)),
spatialData=DataFrame(modes_list$seqFISH_Coordinates),
spatialCoordsNames=c("x", "y"))
MultiAssayExperiment(
experiments = list(seqFISH = se, scRNAseq = sce)
)
}
|
0436ed9b6221e4349fba88b04eff1c36c2e8570b
|
e96218d3634d77b11dabf9e115a47da93c022c03
|
/man/date_between.Rd
|
d21e340d157e00fcac3ca26361291c7123cfaeaa
|
[] |
no_license
|
cran/lazysql
|
6f5114081eb5f1b858f909a1adcf69b1013e7334
|
6629882dd75cb02971c059c07aa8c062df114e57
|
refs/heads/master
| 2016-08-11T15:21:06.248953
| 2016-03-12T06:16:37
| 2016-03-12T06:16:37
| 53,720,064
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,415
|
rd
|
date_between.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/date_between.R
\name{date_between}
\alias{date_between}
\title{Create SQL string to select date between two given dates}
\usage{
date_between(column_name, date_range)
}
\arguments{
\item{column_name}{[character(1)]\cr
Name of data base column to select dates from.}
\item{date_range}{[Date(1:2)]\cr
One or two dates giving the date range in which the dates should be enclosed (closed interval).
If only one date is given, it is taken for both upper and lower limits.}
}
\value{
Character string to be used in SQL statement.
}
\description{
Create string with SQL \code{BETWEEN} expression for \code{WHERE} clause to select dates
within the given range.
}
\details{
\code{column_name} must be a valid SQL identifier. It is validated to conform to
the regular expression returned by \code{\link{valid_identifier_regex}}.
}
\examples{
date1 <- as.Date("2016-02-22")
date2 <- as.Date("2016-02-11")
# SQL expression for a date range
(sql_expr1 <- lazysql::date_between("STD_1", c(date1, date2)))
# SQL expression for a single date
(sql_expr2 <- lazysql::date_between("STD_1", date1))
# sample SQL statements
paste("select * from TEST_TABLE where", sql_expr1)
paste("select * from TEST_TABLE where", sql_expr2)
}
\author{
Uwe Block
}
\seealso{
\code{\link{valid_identifier_regex}}.
}
|
97c79437b9e7821691e6048906aef2363f252068
|
066d9f452eb3e95295e7d84f3f87736a63550419
|
/Models/Utils/R/poweRlaw.R
|
c7cf4ba8b756effee77e64725086fa02c063e173
|
[] |
no_license
|
JusteRaimbault/Organisation
|
c6d1bfdb833dc00b9208bf77672bf2b53bd085ca
|
3e0a9e756670d7f85d15ec1a7b4c244c658ed130
|
refs/heads/master
| 2023-08-16T22:35:23.056590
| 2023-08-11T16:14:19
| 2023-08-11T16:14:19
| 123,956,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 329
|
r
|
poweRlaw.R
|
library("poweRlaw")
data("moby", package = "poweRlaw")
# Discrete power law
pl_m <- displ$new(moby)
estimate_pars(pl_m)
est_pl <- estimate_xmin(pl_m)
pl_m$setXmin(est_pl)
dd <- plot(pl_m)
fitted <- lines(pl_m)
# parameter uncertainty
bs <- bootstrap(pl_m,xmins = seq(2, 20, 2), no_of_sims = 5000, threads = 4, seed = 1)
|
d6c39468ef37eaccf8a3a574ed2b5b2aef016c11
|
841e4586ca48ad1ffdcdd4e6091c550375000ef5
|
/rankall.R
|
b1f06a6744696b784e1f34bd960e2db7c1bcc6f0
|
[] |
no_license
|
akhilag24/PA3
|
44344532ab48769c4e81805112fb5f4f6112b2e3
|
4af19217a649eeeecedf0b71dc2b8a1cdeba1b5c
|
refs/heads/master
| 2021-01-19T17:31:20.319537
| 2017-04-15T05:26:12
| 2017-04-15T05:26:12
| 88,326,195
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,654
|
r
|
rankall.R
|
rankall <- function(outcome, num = "best") {
## Read outcome data
## Check that state and outcome are valid
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
d <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
##Prepare the column name dynamically for the outcome passed
s<-strsplit(outcome," ")[[1]]
u<- paste(toupper(substring(s,1,1)),substring(s,2),sep="",collapse=" ")
a<-gsub(" ", ".", u)
p<-paste("Hospital.30.Day.Death..Mortality..Rates.from",a,sep=".")
## Check if input parameters i.e. outcome is valid
if (!p %in% colnames(d)) {
stop("invalid outcome")
}
state_arr <- sort(unique(d$State))
l <- length(state_arr)
hospital<-NULL
for (i in 1:l)
{ ##Sub group data for the State passed as input
o<-d[d$State==state_arr[i],]
##for the state, get the subset based on the outcome passed and coerce it to numeric
n<-as.numeric(o[[p]])
## finds length i.e. no of rows for outcome array with non NA values, used for worst scenario
l<-dim(o[!is.na(n),])[1]
if(num=="best"){
hospital[i]<- rank_hos(n,o,1)
}
else if(num=="worst"){
hospital[i]<- rank_hos(n,o,l)
}
else if(num>l){
hospital[i]<-NA
}
else{
hospital[i]<- rank_hos(n,o,num)
}
}
df <- data.frame(hospital=hospital, state=state_arr)
return(df)
}
rank_hos <- function(outcome_subset, state_subset, num) {
rn <-order(outcome_subset,state_subset[, "Hospital.Name"],na.last=TRUE)
hos<-state_subset[, "Hospital.Name"][rn][num]
return(hos)
}
|
ed629a466823bb2628dead2d47a7541814b96a30
|
030c92064eee6308ff9efc985b6dd1aa750f9bb2
|
/R/random_dna.R
|
20162fdb9c5291cca97400db6c697a363a9aeb62
|
[] |
no_license
|
rforbiodatascience21/2021_group_04_rpackage
|
bf7aad22fd3e6d736d7d6a9f09fa4390cf041f94
|
8cdd7cc91b166660b3ac57c8daad5d1152b3e83f
|
refs/heads/main
| 2023-03-24T16:11:14.243531
| 2021-03-24T13:52:32
| 2021-03-24T13:52:32
| 350,354,203
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 274
|
r
|
random_dna.R
|
#' R/random_dna.R
#' @title random_dna
#' @description produce a random dna sequence
#' @usage random_dna(l = ...)
random_dna <- function(l){
nucleotides <- sample(c("A", "T", "G", "C"), size = l, replace = TRUE)
dna = paste0(nucleotides, collapse = "")
return(dna)
}
|
0d81c63bcde608d956854804c93494fcbffed3b3
|
305058652ab13e51510b20caf60b941889d45cbb
|
/ahp.R
|
010ce8cc5ad0f40f0c9ea23fcc63de1845b412e9
|
[] |
no_license
|
MateusMacul/miscelanea
|
728c2a0bca0b4e07caad2249ee18b0a5d4f07989
|
4d3fefb1f09ee30b4a19708227edb53649034815
|
refs/heads/master
| 2020-12-19T18:48:29.465869
| 2020-01-23T15:15:50
| 2020-01-23T15:15:50
| 235,818,641
| 0
| 0
| null | 2020-01-23T15:15:51
| 2020-01-23T15:02:27
| null |
ISO-8859-1
|
R
| false
| false
| 3,641
|
r
|
ahp.R
|
### intalações ####
install.packages("xlsx")
### DEPENDENCIAS ####
require(xlsx)
library("xlsx")
#### APH ####
# Deletar colunas q não serão usadas --------------------------------------
drop = c("V80")
drop = c(names(dados.ahp@data[,c(80:length(names(dados.ahp)))]))
drop
dados.ahp <- dados.ahp[,!(names(dados) %in% drop)]
str(dados.ahp@data)
#### importando a tabela com a simulacao 25 ####
#simu <- read.xlsx2("C:/Users/Padrao/OneDrive - inpe.br/Tabelas/AHP 6.0 simulacao.xlsx", sheetIndex = 4)
simu <- read.csv2("C:/Users/Padrao/OneDrive - inpe.br/Tabelas/simulado50.csv" )
dados.ahp = dados
View(simu)
simu[c(1:5),c(3:ncol(simu))]
s = 1
while (s <= length(simu$RC)){
p1 = simu$Ã.rea.Aberta[s] #Area Aberta
p2 = simu$VegetaÃ.Ã.o.SecundÃ.ria[s] #Vegetação Secundária
p3 = simu$BR.163[s] #BR 163
p4 = simu$Proximidade.a.Ã.rea.urbana[s] #Proximidade a área urbana
p5 = simu$Estradas.vicinais[s] #Estradas vicinais
p6 = simu$Tamanho.dos.imóveis[s] #Tamanho dos imóveis
p7 = simu$Imóveis.certificados[s] #Imóveis certificados
p8 = simu$Declividade[s] #Declividade
p9 = simu$Rios[s] #Rios
p10 = simu$Assentamento[s] #Assentamento
p11 = simu$Embargo[s] #Embargo
p12 = simu$Floresta[s] #Floresta
p13 = simu$Unidade.de.ConservaÃ.Ã.o[s] #Unidade de Conservação
p14 = simu$Terra.IndÃ.gena[s] #Terra Indígena
dados.ahp@data[,ncol(dados.ahp)+1] <- (dados.ahp$i_aberta * p1)+(dados.ahp$i_vegse * p2)+
(dados.ahp$i_br * p3)+(dados.ahp$i_au * p4)+(dados.ahp$i_est * p5)+
(dados.ahp$i_tamimov * p6)+(dados.ahp$i_cert * p7)+(dados.ahp$i_decliv * p8)+
(dados.ahp$i_rios5 * p9)+(dados.ahp$i_ass_p* p10) +(dados.ahp$i_emb* p11) +
(dados.ahp$i_flor13* p12)+(dados.ahp$i_uc* p13)+(dados.ahp$i_ti* p14)
s = s+1
}
str(dados.ahp@data[,c(80:954)])
for(i in 1:length(dados.ahp$id)){
dados.ahp$q1[i] = quantile(dados.ahp@data[i,c(80:954)], c(0.25))[,1] #SEMPRE CHECAR AS COLUNAS
dados.ahp$q3[i] = quantile(dados.ahp@data[i,c(80:954)], c(0.75))[,1] #SEMPRE CHECAR AS COLUNAS
}
dados.ahp$sense = dados.ahp$q3 - dados.ahp$q1
View(dados.ahp@data)
#### importando a tabela com a simulacao 50 em dados2 ####
simu <- read.csv2("C:/Users/Padrao/OneDrive - inpe.br/Tabelas/simulado50.csv" )
View(simu)
s = 1
while (s <= length(simu$RC)){
p1 = simu$Ã.rea.Aberta[s] #Area Aberta
p2 = simu$VegetaÃ.Ã.o.SecundÃ.ria[s] #Vegetação Secundária
p3 = simu$BR.163[s] #BR 163
p4 = simu$Proximidade.a.Ã.rea.urbana[s] #Proximidade a área urbana
p5 = simu$Estradas.vicinais[s] #Estradas vicinais
p6 = simu$Tamanho.dos.imóveis[s] #Tamanho dos imóveis
p7 = simu$Imóveis.certificados[s] #Imóveis certificados
p8 = simu$Declividade[s] #Declividade
p9 = simu$Rios[s] #Rios
p10 = simu$Assentamento[s] #Assentamento
p11 = simu$Embargo[s] #Embargo
p12 = simu$Floresta[s] #Floresta
p13 = simu$Unidade.de.ConservaÃ.Ã.o[s] #Unidade de Conservação
p14 = simu$Terra.IndÃ.gena[s] #Terra Indígena
dados2@data[,ncol(dados2)+1] <- (dados2$i_aberta * p1)+(dados2$i_vegse * p2)+
(dados2$i_br * p3)+(dados2$i_au * p4)+(dados2$i_est * p5)+
(dados2$i_frag * p6)+(dados2$i_cert * p7)+(dados2$i_decliv * p8)+
(dados2$i_rios5 * p9)+(dados2$i_ass_p* p10) +(dados2$i_emb* p11) +
(dados2$i_flor* p12)+(dados2$i_uc* p13)+(dados2$i_ti* p14)
s = s+1
}
for(i in 1:length(dados2$id)){
q1 = quantile(dados2@data[i,c(61:935)], c(0.25))
q3 = quantile(dados2@data[i,c(61:935)], c(0.75))
dados2$sense[i] = (q3[,1] - q1[,1])
}
|
c02aac1da930691666bf89ece7de308229975142
|
4fcff0d54bace980e71f9c39b6eb233712e60ccb
|
/plot1.R
|
c598862d0fdc5ff054bc5e7683aa87050e0bf9dd
|
[] |
no_license
|
Kinshuk-9/ExData_Plotting1
|
ddcacf92638881693efd94a0a7dedc93e31e79fd
|
ea1b4861659bdfb085b7acf0a6cd0da32ffc7544
|
refs/heads/master
| 2022-11-21T11:10:21.707559
| 2020-07-08T21:24:02
| 2020-07-08T21:24:02
| 278,063,762
| 0
| 0
| null | 2020-07-08T10:50:29
| 2020-07-08T10:50:28
| null |
UTF-8
|
R
| false
| false
| 462
|
r
|
plot1.R
|
full_data <- read.csv('household_power_consumption.txt', header = TRUE, sep = ';', stringsAsFactors = FALSE, na.strings = '?')
head(full_data)
data1 <- subset(full_data, Date %in% c("1/2/2007","2/2/2007"))
data1$Date <- as.Date(data1$Date, format = '%d/%m/%Y')
png("plot1.png", width=480, height=480)
hist(data1$Global_active_power, main = "Global active power", xlab = "Global Active Power(kilowatts)", ylab = "Frequency", col = 'Turquoise')
dev.off()
|
6e2c77839eb20e69edce8b8b1fce7e6cb998035c
|
7a56db93111edd77ad7911c67ef9852db13eefab
|
/Rprogramming/ProgrammingAssignment1/Pollutantmean.R
|
d6063b64ba5c378fd4016e6f52425b10ea5c6dbb
|
[] |
no_license
|
CarolinaBosch/DataScience
|
15b533d0e3664de507f6f4afe0d82b85c874b145
|
7de1d47c8d1028e7082c750e0f51be67ea0087dc
|
refs/heads/master
| 2022-11-27T21:35:59.607008
| 2016-06-21T16:21:08
| 2016-06-21T16:21:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 415
|
r
|
Pollutantmean.R
|
pollutantmean <- function(directory,pollutant="sulfate",id=1:332){
temp = list.files(path=directory,pattern="*.csv")
dat = do.call(rbind,lapply(temp,function(x) read.csv(x)))
net = data.frame()
for (f in id) {
if(pollutant=="sulfate"){
get <-dat[dat$ID ==f,2]
} else {
get <-dat[dat$ID ==f,3]
}
net <- c(get,net)
}
s<-as.numeric(net)
avg <-mean(s,na.rm=TRUE)
print(avg)
}
|
efae240c8819cc9e793ae5bbf347ce8cb5985141
|
b664fd3502722bbb78c92a9fab3d51d8239545b4
|
/RCode/hist_scores.R
|
c6eecbb5834c1e2b16d94beca2aad804de10aa47
|
[] |
no_license
|
seninp-bioinfo/jKEGG
|
cb87fe598177895775660a80c11e90727666971e
|
655cf4e298bd0ea35594018cded89e9810e10f63
|
refs/heads/master
| 2016-09-05T10:15:03.206686
| 2015-09-02T06:49:46
| 2015-09-02T06:49:46
| 24,708,987
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,094
|
r
|
hist_scores.R
|
require(VennDiagram)
require(RMySQL)
require(ggplot2)
require(Cairo)
require(grid)
library(gridExtra)
session <- dbConnect(MySQL(), host="localhost",
db="funnymat",user="funnymat", password="XXX")
blast_hits = dbGetQuery(session, "select score from aligners_score where tag=\"BD\" order by hit_id ASC")
last_hits = dbGetQuery(session, "select score from aligners_score where tag=\"LAD\" order by hit_id ASC")
diamond_hits = dbGetQuery(session, "select score from aligners_score where tag=\"DSD\" order by hit_id ASC")
lambda_hits = dbGetQuery(session, "select score from aligners_score where tag=\"LSD\" order by hit_id ASC")
pauda_hits = dbGetQuery(session, "select score from aligners_score where tag=\"PSD\" order by hit_id ASC")
p1=ggplot(blast_hits,aes(x=score))+geom_histogram(identity=T,binwidth=5)+theme_classic()+
ggtitle("Score for best BLAST hits") + geom_density()
p2=ggplot(last_hits,aes(x=score))+geom_histogram(identity=T,binwidth=5)+theme_classic()+
ggtitle("Score for best LAST hits")
p3=ggplot(diamond_hits,aes(x=score))+geom_histogram(identity=T,binwidth=5)+theme_classic()+
ggtitle("Score for best DIAMOND hits")
p4=ggplot(lambda_hits,aes(x=score))+geom_histogram(identity=T,binwidth=5)+theme_classic()+
ggtitle("Score for best LAMBDA hits")
p5=ggplot(pauda_hits,aes(x=score))+geom_histogram(identity=T,binwidth=5)+theme_classic()+
ggtitle("Score for best PAUDA hits")
grid.arrange(p1, p2, p3, p4, p5, ncol=1)
ggsave(arrangeGrob(p1, p2, p3, p4, p5, ncol=1), width=130, height=220, units="mm",
file="hist-score.png", dpi=120)
blast_hits = dbGetQuery(session, "select normalized_score from aligners_score where tag=\"BD\" order by hit_id ASC")
last_hits = dbGetQuery(session, "select normalized_score from aligners_score where tag=\"LAD\" order by hit_id ASC")
diamond_hits = dbGetQuery(session, "select normalized_score from aligners_score where tag=\"DSD\" order by hit_id ASC")
lambda_hits = dbGetQuery(session, "select normalized_score from aligners_score where tag=\"LSD\" order by hit_id ASC")
pauda_hits = dbGetQuery(session, "select normalized_score from aligners_score where tag=\"PSD\" order by hit_id ASC")
p1=ggplot(blast_hits,aes(x=normalized_score))+geom_histogram(identity=T,binwidth=0.05)+theme_classic()+
ggtitle("Score for best BLAST hits") + geom_density()
p2=ggplot(last_hits,aes(x=normalized_score))+geom_histogram(identity=T,binwidth=0.05)+theme_classic()+
ggtitle("Score for best LAST hits")
p3=ggplot(diamond_hits,aes(x=normalized_score))+geom_histogram(identity=T,binwidth=0.05)+theme_classic()+
ggtitle("Score for best DIAMOND hits")
p4=ggplot(lambda_hits,aes(x=normalized_score))+geom_histogram(identity=T,binwidth=0.05)+theme_classic()+
ggtitle("Score for best LAMBDA hits")
p5=ggplot(pauda_hits,aes(x=normalized_score))+geom_histogram(identity=T,binwidth=0.05)+theme_classic()+
ggtitle("Score for best PAUDA hits")
grid.arrange(p1, p2, p3, p4, p5, ncol=1)
ggsave(arrangeGrob(p1, p2, p3, p4, p5, ncol=1), width=130, height=220, units="mm",
file="hist-score-normalized.png", dpi=120)
|
3ea497d1e6e371047a55da9251bdf9aaecec5738
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/928_0/rinput.R
|
f07321a16b02ebd32a9e60f50b0ebef96280fa8e
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 133
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("928_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="928_0_unrooted.txt")
|
913c9d4f0dcd80fe10eaa1ed708bd99bb047a75e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mvglmmRank/examples/game.pred.Rd.R
|
6ca2364a025ed3477f00a1c00b0d1884af4479fc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 421
|
r
|
game.pred.Rd.R
|
library(mvglmmRank)
### Name: game.pred
### Title: Predict outcomes of games.
### Aliases: game.pred
### Keywords: regression
### ** Examples
data(nfl2012)
mvglmmRank(nfl2012,method="PB0",first.order=TRUE,verbose=TRUE,max.iter.EM=1)
## No test:
result <- mvglmmRank(nfl2012,method="PB0",first.order=TRUE,verbose=TRUE)
print(result)
game.pred(result,home="Denver Broncos",away="Green Bay Packers")
## End(No test)
|
7d6c1a1d7cb86189c0883e20b79a0f93169d62cb
|
8b61baaf434ac01887c7de451078d4d618db77e2
|
/R/rbind.smart.R
|
6e31cd84f82d60ca09bafb8d8a79fcb626628253
|
[] |
no_license
|
drmjc/mjcbase
|
d5c6100b6f2586f179ad3fc0acb07e2f26f5f517
|
96f707d07c0a473f97fd70ff1ff8053f34fa6488
|
refs/heads/master
| 2020-05-29T19:36:53.961692
| 2017-01-17T10:54:00
| 2017-01-17T10:54:00
| 12,447,080
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,561
|
r
|
rbind.smart.R
|
#' A smarter rbind
#' \code{rbind} 2 matrix-like-objects even if they have different numbers of
#' columns. It's a bit like \code{merge()} but via rowbindings, not
#' colbindings. It produces a result which as \code{union(colnames(a),
#' colnames(b))}, and fills in missing data with \code{NA}. See details.
#'
#' The resulting data.frame will have \code{nrow(x)} + \code{nrow(y)} rows, and
#' \code{length(union(colnames(x), colnames(y)))} columns.
#'
#' If x and y contain the same colnames, then \code{rbind.smart} ==
#' \code{rbind}.
#'
#' If x and y contain partially overlapping colnames, then the result will be
#' the union of all colnames, with NA's filled in where appropriate.
#'
#' If x and y contain no overlapping colnames, then the result will have x in
#' top left and y in bottom right, filled in with NA's. as in: \preformatted{ x
#' : X; y: Y rbind.smart(x, y) -> X NA NA Y } Naming rules: column classes from
#' \code{x} take precedence over those from \code{y}, and the colnames of
#' result will be all of the colnames from x, then the colnames from y that
#' were not also in x at the end.
#'
#' @param x,y matrix-like objects to be merged
#' @param sort.col Which column would you like the resulting data to be sorted
#' on? Set to NULL to disable, in which case, rows corresponding to \code{x}
#' will appear before those from \code{y}.
#' @return A data.frame with \code{nrow(x)} + \code{nrow(y)} rows, and
#' \code{length(union(colnames(x), colnames(y)))} columns.
#' @author Mark Cowley, 11 April 2006
#' @seealso \code{\link{rbind}}
#' @keywords manip
#' @examples
#'
#' a <- matrix(rnorm(25), 5, 5)
#' colnames(a) <- letters[1:5]
#' b <- matrix(rnorm(25), 5, 5)
#' colnames(b) <- letters[3:7]
#' rbind.smart(a, b)
#'
#' @export
rbind.smart <- function(x, y, sort.col=NULL) {
if(ncol(x) == ncol(y) && identical(colnames(x), colnames(y)))
return( rbind(x, y) )
else {
#
# what are the possible colnames from both matrices?
# usually, y has a subset of the columns of x.
#
COLNAMES <- union(colnames(x), colnames(y))
#
# keep colnames in the order that they were in "x".
#
tmp.order <- match(colnames(x), COLNAMES)
COLNAMES <- COLNAMES[c(tmp.order, setdiff(1:length(COLNAMES), tmp.order))]
#
# in case the colclasses in x and y for same cols differ,
# do y then x so that the classes in x take priority over the classes in y.
#
COLCLASSES <- rep("character", length(COLNAMES))
names(COLCLASSES) <- COLNAMES
COLCLASSES[colnames(y)] <- colclasses(y)
COLCLASSES[colnames(x)] <- colclasses(x)
# tmp function to resize a matrix and fill in with NA's
resize <- function(x, COLNAMES, COLCLASSES) {
tmp <- as.data.frame( matrix(NA, nrow(x), length(COLNAMES), dimnames=list(rownames(x), COLNAMES)) )
tmp[,colnames(x)] <- x
colclasses(tmp) <- COLCLASSES
return(tmp)
}
if( length(setdiff(COLNAMES, colnames(x))) > 0 ) {
x <- resize(x, COLNAMES, COLCLASSES)
}
if( length(setdiff(COLNAMES, colnames(y))) > 0 ) {
y <- resize(y, COLNAMES, COLCLASSES)
}
#
# now that x and y have same ncols:
#
res <- rbind(x, y)
if( !is.null(sort.col) )
res <- res[order(res[,sort.col]),]
return( res )
}
}
# CHANGELOG
# 2013-08-28: added explicit check for same colnames if ncol's are equal.
|
9bc4ae732ad8359954411a5ce02e715ac7bdfe82
|
c6f8b268cdd35377a81c3e95953a8ea6aed2b3ae
|
/R/normalize_design.R
|
790f2ef4a519c49ea9c36aa666a6d72208714269
|
[] |
no_license
|
cran/skpr
|
66afcf4fb51a07d95a72e5ecf28db60c9de53937
|
e34638d13253b7f82de9fcfee87f2c17f2619c37
|
refs/heads/master
| 2023-06-26T22:02:24.856196
| 2023-06-16T15:10:02
| 2023-06-16T15:10:02
| 100,655,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,924
|
r
|
normalize_design.R
|
#'@title Normalize Design
#'
#'@description Normalizes the numeric columns in the design to -1 to 1. This is important to do if your model has interaction or polynomial terms,
#'as these terms can introduce multi-collinearity and standardizing the numeric columns can reduce this problem.
#'
#'@param design The design matrix.
#'@param augmented Default `NULL`. If augmenting an existing design, this should be the pre-existing design. The column types must match design
#'
#'@return Normalized design matrix
#'@export
#'@examples
#'#Normalize a design
#'if(skpr:::run_documentation()) {
#'cand_set = expand.grid(temp = c(100,300,500),
#' altitude = c(10000,20000),
#' offset = seq(-10,-5,by=1),
#' type = c("A","B", "C"))
#'design = gen_design(cand_set, ~., 24)
#'
#'#Un-normalized design
#'design
#'}
#'if(skpr:::run_documentation()) {
#'#Normalized design
#'normalize_design(design)
#'}
normalize_design = function(design, augmented = NULL) {
if(!is.null(augmented)) {
all_equal_classes = all(identical(lapply(design,class), unlist(lapply(augmented,class))))
if(!all_equal_classes) {
stop("Design to be augmented and new design must have identical column classes")
}
for (column in 1:ncol(design)) {
if (is.numeric(design[, column])) {
midvalue = mean(c(max(c(design[, column],augmented[,column])), min(c(design[, column],augmented[,column]))))
design[, column] = (design[, column] - midvalue) / (max(c(design[, column],augmented[,column])) - midvalue)
}
}
} else {
for (column in 1:ncol(design)) {
if (is.numeric(design[, column])) {
midvalue = mean(c(max(design[, column]), min(design[, column])))
design[, column] = (design[, column] - midvalue) / (max(design[, column]) - midvalue)
}
}
}
return(design)
}
|
0af827bf3146b4295f46de17e5b0324a4e481022
|
3d75826038af085d23c523d0c6fd9e643e8631c6
|
/R/sample-exposures.R
|
67425cdcb090999ab2692fba4c52d78883f96b49
|
[] |
no_license
|
bryanmayer/multdr
|
6c5f669157941c55b4a01fd4f1c0978a6dd15a67
|
49e847663631b8587375920b517ba75f87c0f6c6
|
refs/heads/master
| 2021-01-23T14:05:30.332362
| 2015-08-26T19:12:52
| 2015-08-26T19:12:52
| 41,111,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 838
|
r
|
sample-exposures.R
|
#' Takes a series of exposures and times and samples from them based on an exposure rate
#'
#' Effectively maps a continuum of exposure to a discrete set of actual exposures that could cause infection.
#'
#' @param raw_exposure_data The subsetted data.frame containing time (days) and exposure (count) for one subject
#' @param exposure_rate The rate of sampling from the raw_exposure_data
#' @return A data.frame containing only the sampled exposure times
#' @export
#this does it by rate
sample_exposure_data = function(raw_exposure_data, exposure_rate){
if(exposure_rate > 5 & exposure_rate != 10) stop("Exposure rate must be an integer between 1-5 or 10")
days = raw_exposure_data$days
exposure_times = round(seq(min(days), max(days), by = 1/exposure_rate), 1)
subset(raw_exposure_data, round(days, 1) %in% exposure_times)
}
|
9c467c2a4f3f61dc05d0674d8414367b3d124629
|
c267b09529ad20ba29c23b9922cbb94ce5b2e2d8
|
/R/events.R
|
2fc58418ad03bb272b212a5a22a8eced59c46dc2
|
[] |
no_license
|
IlyaFinkelshteyn/psichomics
|
7f004cffcdabf028ef3094b3a6a6b290fb377cd4
|
a7f2db7954f6962ff7a487369c7b367a1d999daa
|
refs/heads/master
| 2021-01-11T06:08:04.688256
| 2016-09-24T01:40:53
| 2016-09-24T01:40:53
| 69,070,120
| 0
| 0
| null | 2016-09-24T00:37:37
| 2016-09-24T00:37:36
| null |
UTF-8
|
R
| false
| false
| 19,786
|
r
|
events.R
|
#' @include events_mats.R
#' @include events_miso.R
#' @include events_vastTools.R
#' @include events_suppa.R
NULL
#' Creates a template of alternative splicing junctions
#'
#' @param nrow Integer: Number of rows
#' @param program Character: Program used to get the junctions
#' @param event.type Character: Event type of the respective events
#' @param chromosome Character: Chromosome of the junctions
#' @param strand Character: positive ("+") or negative ("-") strand of the event
#' @param id Character: events' ID
#'
#' @return A data frame with the junctions coordinate names pre-filled with NAs
#'
#' @examples
#' psichomics:::createJunctionsTemplate(nrow = 8)
createJunctionsTemplate <- function(nrow, program = character(0),
event.type = character(0),
chromosome = character(0),
strand = character(0),
id = character(0)) {
## TODO(NunoA): only accept a "+" or a "-" strand
parsed <- as.data.frame(matrix(NA, nrow = nrow, ncol = 8),
stringsAsFactors = FALSE)
names(parsed) <- c("C1.start", "C1.end",
"A1.start", "A1.end",
"A2.start", "A2.end",
"C2.start", "C2.end")
if (length(program) > 0) parsed[["Program"]] <- "MISO"
if (length(event.type) > 0) parsed[["Event.type"]] <- event.type
if (length(chromosome) > 0) parsed[["Chromosome"]] <- chromosome
if (length(strand) > 0) parsed[["Strand"]] <- strand
if (length(id) > 0) parsed[["Event.ID"]] <- id
return(parsed)
}
#' Get MISO alternative splicing annotation
#' @importFrom utils read.delim
#' @return Retrieve annotation from MISO
getMisoAnnotation <- function() {
types <- c("SE", "AFE", "ALE", "MXE", "A5SS", "A3SS", "RI", "TandemUTR")
typesFile <- paste0("/genedata/Resources/Annotations/MISO/hg19/", types,
".hg19.gff3")
annot <- lapply(typesFile, read.delim, stringsAsFactors = FALSE,
comment.char="#", header=FALSE)
## TODO: ALE events are baldy formatted, they have two consecutive gene
## lines... remove them for now
annot[[3]] <- annot[[3]][-c(49507, 49508), ]
return(annot)
}
#' @rdname parseMatsAnnotation
#' @importFrom plyr rbind.fill
parseMisoAnnotation <- function(annot) {
events <- lapply(annot, parseMisoEvent)
events <- rbind.fill(events)
return(events)
}
#' Get SUPPA alternative splicing annotation
#' @importFrom utils read.delim
#' @return Retrieve annotation from SUPPA
getSuppaAnnotation <- function() {
types <- c("SE", "AF", "AL", "MX", "A5", "A3", "RI")
typesFile <- paste0("~/Documents/psi_calculation/suppa/suppaEvents/hg19_",
types, ".ioe")
annot <- lapply(typesFile, read.delim, stringsAsFactors = FALSE,
comment.char="#", header=TRUE)
return(annot)
}
#' @rdname parseMatsAnnotation
#' @importFrom plyr rbind.fill
parseSuppaAnnotation <- function(annot) {
eventsID <- lapply(annot, "[[", "event_id")
events <- lapply(eventsID, parseSuppaEvent)
events <- rbind.fill(events)
return(events)
}
#' Get MATS alternative splicing annotation
#' @importFrom utils read.delim
#' @return Retrieve annotation from MATS
getMatsAnnotation <- function() {
types <- c("SE", "AFE", "ALE", "MXE", "A5SS", "A3SS", "RI")
typesFile <- paste("~/Documents/psi_calculation/mats_out/ASEvents/fromGTF",
c(types, paste0("novelEvents.", types)), "txt",
sep = ".")
names(typesFile) <- rep(types, 2)
annot <- lapply(typesFile, read.delim, stringsAsFactors = FALSE,
comment.char="#", header=TRUE)
return(annot)
}
#' Parse alternative splicing annotation
#' @param annot Data frame or matrix: alternative splicing annotation
#' @importFrom plyr rbind.fill
#' @return Parsed annotation
parseMatsAnnotation <- function(annot) {
types <- names(annot)
events <- lapply(seq_along(annot), function(i)
if (nrow(annot[[i]]) > 0)
return(parseMatsEvent(annot[[i]], types[[i]])))
events <- rbind.fill(events)
# Sum 1 position to the start/end of MATS events (depending on the strand)
matsNames <- names(events)
plus <- events$Strand == "+"
# Plus
start <- matsNames[grep(".start", matsNames)]
events[plus, start] <- events[plus, start] + 1
# Minus
end <- matsNames[grep(".end", matsNames)]
events[!plus, end] <- events[!plus, end] + 1
return(events)
}
#' Get VAST-TOOLS alternative splicing annotation
#' @importFrom utils read.delim
#' @return Retrieve annotation from VAST-TOOLS
getVastToolsAnnotation <- function() {
types <- c("ALT3", "ALT5", "COMBI", "IR", "MERGE3m", "MIC",
rep(c("EXSK", "MULTI"), 1))
typesFile <- sprintf(
"/genedata/Resources/Software/vast-tools/VASTDB/Hsa/TEMPLATES/Hsa.%s.Template%s.txt",
types, c(rep("", 6), rep(".2", 2))#, rep(".2", 2))
)
names(typesFile) <- types
annot <- lapply(typesFile, read.delim, stringsAsFactors = FALSE,
comment.char="#", header=TRUE)
return(annot)
}
#' @rdname parseMatsAnnotation
#' @importFrom plyr rbind.fill
parseVastToolsAnnotation <- function(annot) {
types <- names(annot)
events <- lapply(seq_along(annot),
function(i) {
cat(types[i], fill=TRUE)
a <- annot[[i]]
if (nrow(a) > 0)
return(parseVastToolsEvent(a))
})
events <- rbind.fill(events)
events <- unique(events)
return(events)
}
#' Returns the coordinates of interest for a given event type
#' @param type Character: alternative splicing event type
#' @return Coordinates of interest according to the alternative splicing event
#' type
getSplicingEventCoordinates <- function(type) {
switch(type,
"SE" = c("C1.end", "A1.start", "A1.end", "C2.start"),
"A3SS" = c("C1.end", "C2.start", "A1.start"),
"A5SS" = c("C1.end", "C2.start", "A1.end"),
"AFE" = c("C1.start", "C1.end", "A1.start", "A1.end"),
"ALE" = c("A1.start", "A1.end", "C2.start", "C2.end"),
"RI" = c("C1.start", "C1.end", "C2.start", "C2.end"),
"MXE" = c("C1.end", "A1.start", "A1.end",
"A2.start", "A2.end", "C2.start"),
"TandemUTR" = c("C1.start", "C1.end", "A1.end"))
}
#' Get the annotation for all event types
#' @return Parsed annotation
getParsedAnnotation <- function() {
cat("Retrieving MISO annotation...", fill=TRUE)
annot <- getMisoAnnotation()
cat("Parsing MISO annotation...", fill=TRUE)
miso <- parseMisoAnnotation(annot)
cat("Retrieving SUPPA annotation...", fill=TRUE)
annot <- getSuppaAnnotation()
cat("Parsing SUPPA annotation...", fill=TRUE)
suppa <- parseSuppaAnnotation(annot)
cat("Retrieving VAST-TOOLS annotation...", fill=TRUE)
annot <- getVastToolsAnnotation()
cat("Parsing VAST-TOOLS annotation...", fill=TRUE)
vast <- parseVastToolsAnnotation(annot)
cat("Retrieving MATS annotation...", fill=TRUE)
annot <- getMatsAnnotation()
cat("Parsing MATS annotation...", fill=TRUE)
mats <- parseMatsAnnotation(annot)
events <- list(
"miso" = miso, "mats" = mats, "vast-tools" = vast, "suppa" = suppa)
# Remove the "chr" prefix from the chromosome field
cat("Standarising chromosome field", fill=TRUE)
for (each in seq_along(events)) {
chr <- grepl("chr", events[[each]]$Chromosome)
events[[each]]$Chromosome[chr] <-
gsub("chr", "", events[[each]]$Chromosome[chr])
}
events <- rbind.fill(events)
events <- dlply(events, .(Event.type))
events <- lapply(events, dlply, .(Program))
return(events)
}
#' Convert a column to numeric if possible and ignore given columns composed
#' of lists
#'
#' @param table Data matrix: table
#' @param by Character: column names of interest
#' @param toNumeric Boolean: which columns to convert to numeric (FALSE by
#' default)
#'
#' @return Processed data matrix
#' @examples
#' event <- read.table(text = "ABC123 + 250 300 350
#' DEF456 - 900 800 700")
#' names(event) <- c("Event ID", "Strand", "C1.end", "A1.end", "A1.start")
#'
#' # Let's change one column to character
#' event[ , "C1.end"] <- as.character(event[ , "C1.end"])
#' is.character(event[ , "C1.end"])
#'
#' event <- psichomics:::getNumerics(event, by = c("Strand", "C1.end", "A1.end",
#' "A1.start"),
#' toNumeric = c(FALSE, TRUE, TRUE, TRUE))
#' # Let's check if the same column is now integer
#' is.numeric(event[ , "C1.end"])
getNumerics <- function(table, by = NULL, toNumeric = FALSE) {
# Check which elements are lists of specified length
bool <- TRUE
for (each in by)
bool <- bool & vapply(table[[each]], length, integer(1)) == 1
table <- table[bool, ]
# Convert elements to numeric
conv <- by[toNumeric]
table[conv] <- as.numeric(as.character(unlist(table[conv])))
return(table)
}
#' Full outer join all given annotation based on select columns
#' @param annotation Data frame or matrix: alternative splicing annotation
#' @param types Character: alternative splicing types
#' @return List of annotation joined by alternative splicing event type
joinAnnotation <- function(annotation, types) {
if (missing(types)) types <- names(annotation)
joint <- lapply(types, function(type, annotation) {
cat(type, fill=TRUE)
# Create vector with comparable columns
id <- c("Strand", "Chromosome", "Event.type")
by <- c(id, getSplicingEventCoordinates(type))
toNumeric <- !by %in% id
# Convert given columns to numeric if possible
tables <- lapply(annotation[[type]], getNumerics, by, toNumeric)
# Make the names of non-comparable columns distinct
cols <- lapply(names(tables), function(k) {
ns <- names(tables[[k]])
inBy <- ns %in% by
ifelse(inBy, ns, paste(k, ns, sep="."))
})
# Full join all the tables
res <- Reduce(function(x, y) dplyr::full_join(x, y, by), tables)
names(res) <- unique(unlist(cols))
# Remove equal rows
return(unique(res))
}, annotation)
names(joint) <- types
return(joint)
}
#' Write the annotation of an event type to a file
#'
#' @param jointEvents List of lists of data frame
#' @param eventType Character: type of event
#' @param filename Character: path to the annotation file
#' @param showID Boolean: show the events' ID? FALSE by default
#' @param rds Boolean: write to a RDS file? TRUE by default; otherwise, write to
#' TXT
#'
#' @importFrom utils write.table
#'
#' @return Invisible TRUE if everything's okay
writeAnnotation <- function(jointEvents, eventType,
filename = paste0("data/annotation_",
eventType, ".txt"),
showID = FALSE, rds = TRUE) {
res <- jointEvents[[eventType]]
# Show the columns Chromosome, Strand and coordinates of interest
by <- c("Chromosome", "Strand", getSplicingEventCoordinates(eventType))
ord <- 0
# Show the events' ID if desired
if (showID) {
cols <- grep("Event.ID", names(res), value = TRUE)
by <- c(cols, by)
ord <- length(cols)
}
res <- subset(res, select = by)
## TODO(NunoA): clean this mess
# Order by chromosome and coordinates
orderBy <- lapply(c(1 + ord, (3 + ord):ncol(res)),
function(x) return(res[[x]]))
res <- res[do.call(order, orderBy), ]
res <- unique(res)
if (rds)
saveRDS(res, file = filename)
else
write.table(res, file = filename, quote = FALSE, row.names = FALSE,
sep = "\t")
return(invisible(TRUE))
}
#' Read the annotation of an event type from a file
#'
#' @inheritParams writeAnnotation
#' @param rds Boolean: read from a RDS file? TRUE by default; otherwise, read
#' from table format
#' @importFrom utils read.table
#'
#' @return Data frame with the annotation
readAnnotation <- function(eventType, filename, rds = TRUE) {
if (missing(filename)) {
filename <- file.path("data", paste0("annotation_", eventType))
filename <- paste0(filename, ifelse(rds, ".RDS", ".txt"))
}
if (!file.exists(filename))
stop("Missing file.")
if (rds)
read <- readRDS(filename)
else
read <- read.table(filename, header = TRUE, stringsAsFactors = FALSE)
return(read)
}
#' Compare the number of events from the different programs in a Venn diagram
#'
#' @param join List of lists of data frame
#' @param eventType Character: type of event
#'
#' @return Venn diagram
vennEvents <- function(join, eventType) {
join <- join[[eventType]]
programs <- join[grep("Program", names(join))]
nas <- !is.na(programs)
nas <- ifelse(nas, row(nas), NA)
p <- lapply(1:ncol(nas), function(col) nas[!is.na(nas[ , col]), col])
names(p) <- sapply(programs, function(x) unique(x[!is.na(x)]))
gplots::venn(p)
}
#' String used to search for matches in a junction quantification file
#' @param chr Character: chromosome
#' @param strand Character: strand
#' @param junc5 Integer: 5' end junction
#' @param junc3 Integer: 3' end junction
#'
#' @return Formatted character string
junctionString <- function(chr, strand, junc5, junc3) {
plus <- strand == "+"
first <- ifelse(plus, junc5, junc3)
last <- ifelse(plus, junc3, junc5)
res <- sprintf("chr%s:%s:%s,chr%s:%s:%s",
chr, first, strand, chr, last, strand)
return(res)
}
#' Calculate inclusion levels using alternative splicing event annotation and
#' junction quantification for many samples
#'
#' @param eventType Character: type of the alternative event to calculate
#' @param junctionQuant Data.frame: junction quantification with samples as
#' columns and junctions as rows
#' @param annotation Data.frame: alternative splicing annotation related to
#' event type
#' @param minReads Integer: minimum of total reads required to consider the
#' quantification as valid (10 by default)
#'
#' @importFrom fastmatch fmatch
#' @return Matrix with inclusion levels
calculateInclusionLevels <- function(eventType, junctionQuant, annotation,
minReads = 10) {
chr <- annotation$Chromosome
strand <- annotation$Strand
if (eventType == "SE") {
# Create searchable strings for junctions
incAstr <- junctionString(chr, strand,
annotation$C1.end, annotation$A1.start)
incBstr <- junctionString(chr, strand,
annotation$A1.end, annotation$C2.start)
exclstr <- junctionString(chr, strand,
annotation$C1.end, annotation$C2.start)
# Get specific junction quantification
coords <- rownames(junctionQuant)
incA <- junctionQuant[fmatch(incAstr, coords), ]
incB <- junctionQuant[fmatch(incBstr, coords), ]
excl <- junctionQuant[fmatch(exclstr, coords), ]
rm(incAstr, incBstr, exclstr)
# Calculate inclusion levels
inc <- (incA + incB) / 2
rm(incA, incB)
tot <- excl + inc
rm(excl)
# Ignore PSI values when total reads are below the threshold
less <- tot < minReads | is.na(tot)
psi <- as.data.frame(matrix(ncol=ncol(tot), nrow=nrow(tot)))
psi[!less] <- inc[!less]/tot[!less]
colnames(psi) <- colnames(inc)
rm(inc)
rownames(psi) <- paste(eventType, chr, strand, annotation$C1.end,
annotation$A1.start, annotation$A1.end,
annotation$C2.start, annotation$Gene, sep="_")
} else if (eventType == "MXE") {
# Create searchable strings for junctions
incAstr <- junctionString(chr, strand,
annotation$C1.end, annotation$A1.start)
incBstr <- junctionString(chr, strand,
annotation$A1.end, annotation$C2.start)
excAstr <- junctionString(chr, strand,
annotation$C1.end, annotation$A2.start)
excBstr <- junctionString(chr, strand,
annotation$A2.end, annotation$C2.start)
# Get specific junction quantification
coords <- rownames(junctionQuant)
incA <- junctionQuant[fmatch(incAstr, coords), ]
incB <- junctionQuant[fmatch(incBstr, coords), ]
excA <- junctionQuant[fmatch(excAstr, coords), ]
excB <- junctionQuant[fmatch(excBstr, coords), ]
# Calculate inclusion levels
inc <- (incA + incB)
exc <- (excA + excB)
tot <- inc + exc
psi <- inc/tot
# Ignore PSI where total reads are below the threshold
psi[tot < minReads] <- NA
rownames(psi) <- paste(eventType, chr, strand, annotation$C1.end,
annotation$A1.start, annotation$A1.end,
annotation$A2.start, annotation$A2.end,
annotation$C2.start, annotation$Gene, sep="_")
} else if (eventType == "A5SS" || eventType == "AFE") {
# Create searchable strings for junctions
incStr <- junctionString(chr, strand,
annotation$A1.end, annotation$C2.start)
excStr <- junctionString(chr, strand,
annotation$C1.end, annotation$C2.start)
# Get specific junction quantification
coords <- rownames(junctionQuant)
inc <- junctionQuant[fmatch(incStr, coords), ]
exc <- junctionQuant[fmatch(excStr, coords), ]
tot <- inc + exc
# Calculate inclusion levels
psi <- inc/tot
# Ignore PSI where total reads are below the threshold
psi[tot < minReads] <- NA
rownames(psi) <- paste(eventType, chr, strand, annotation$C1.end,
annotation$A1.end, annotation$C2.start,
annotation$Gene, sep="_")
} else if (eventType == "A3SS" || eventType == "ALE") {
# Create searchable strings for junctions
incStr <- junctionString(chr, strand,
annotation$C1.end, annotation$A1.start)
excStr <- junctionString(chr, strand,
annotation$C1.end, annotation$C2.start)
# Get specific junction quantification
coords <- rownames(junctionQuant)
inc <- junctionQuant[fmatch(incStr, coords), ]
exc <- junctionQuant[fmatch(excStr, coords), ]
tot <- inc + exc
# Calculate inclusion levels
psi <- inc/tot
# Ignore PSI where total reads are below the threshold
psi[tot < minReads] <- NA
rownames(psi) <- paste(eventType, chr, strand, annotation$C1.end,
annotation$A1.start, annotation$C2.start,
annotation$Gene, sep = "_")
}
# Clear rows with nothing but NAs
naRows <- rowSums(!is.na(psi)) == 0
return(psi[!naRows, ])
}
|
aa1c14a0770d6bd41c795bc6500717108232dd22
|
6a4593ac8bb196d85d58b2042aa3cca0fbde0eb7
|
/R/NEWimputation_tsai_kNN_v1_doParallel and search_beta.R
|
1c8a993746bcd592e2c59a6867db3e7bb8988044
|
[] |
no_license
|
whiteaegis/Imputation
|
59883addc98378a736d96fea1f2ea6c3d3c9eb77
|
f85be4a1f8f94db1e6c6bb847caeb1246e0998f8
|
refs/heads/master
| 2023-02-25T01:00:06.548131
| 2021-02-02T07:56:28
| 2021-02-02T07:56:28
| 335,208,709
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,990
|
r
|
NEWimputation_tsai_kNN_v1_doParallel and search_beta.R
|
rm(list=ls())
xx=read.delim(file.choose(),header=T,sep=" ")
score.f <- function(target,train,match.score=1, miss.score=0.5, mismatch.score=-5){
m=length(target)
#x.tab=table(target,train)
#id.n = (row.names(x.tab)=="N")
n.allmiss = sum((train=="N") & (target=="N"))
n.match = sum(target==train) - n.allmiss
#n.match = sum(diag(x.tab)[!id.n])
n.miss = sum(target=="N")+sum(train=="N")- n.allmiss
n.mismatch = m - n.match -n.miss
score = (match.score*n.match + miss.score*n.miss + mismatch.score*n.mismatch)/m
return(score)
}
data = as.matrix(xx[1:240,])
result<-matrix(NA,1,20)
use<-real.simulate(data,1000)
y<-use.data$zz
ww<-use$ww
tt<-use$tt
###################
library(doParallel)
cl <- makeCluster(3)
registerDoParallel(cl)
time=Sys.time()
final1=c()
w=20 #window size
k=5 #k for KNN algorithm
n.step=floor(nrow(y)/w)
w.lis<-list()
for(L in 1:n.step){
if (L <= (n.step-1)){
w.lis[[L]]=as.matrix(y[(w*(L-1)+1):(w*L),])
} else{
w.lis[[L]]=as.matrix(y[(w*(L-1)+1):nrow(y),])
}
}
x.impute<-foreach(L=1:n.step,.combine=rbind) %dopar% {
x<-as.matrix((data.frame(w.lis[L])))
sr<-order(apply(x,1,is.N<-function(x){sum(x=="N")}))
for(i in sr){
N.id = which(x[i,]=="N")
x.new = x[i, -N.id]
if (length(N.id) > 0){
for (j in 1:length(N.id)){
x.target=x[-i, N.id[j]]
x.train=x[-i, -N.id]
s=apply(x.train,2,function(x){score.f(x.target,x)})
ss=sort(s,decreasing=TRUE,index.return=TRUE)
x.tab=table(as.character(x.new[ss$ix[1:k]]))
genotype=row.names(x.tab)
#ge<-genotype[which.max(x.tab)]
#ge<-sml(ge)
#x[i, N.id[j]]<-ge
x[i, N.id[j]]<-genotype[which.max(x.tab)]
}
}
}
x
}
final1 <- rbind(final1,x.impute)
stopCluster(cl)
Sys.time()-time
fix(final1)
jj<-use.data[,1:11]
result<-cbind(jj,final1)
dim(result)
names(result)<-names(use.data)
write.table(result, file = "missing_data_finsh_sm.txt", sep = " ", row.names = FALSE,quote=F)
|
e5ab50e97518e18b3c540506529a18851bcde773
|
6bea005b164d6a5d5bb5d30efa700cf8976dd83e
|
/R_scripts/fct_analysis_2way_noNA.R
|
e858c39baf838c2680aabb9de76fc752cc5ee372
|
[] |
no_license
|
MagDub/MFNADA-analysis
|
e24c335dbdb2ea94d9c5766759737c535caf1a39
|
6a5bdd3c407308c45772eed994c2eada8fab7717
|
refs/heads/master
| 2022-12-31T06:15:13.924699
| 2020-10-23T16:11:41
| 2020-10-23T16:11:41
| 288,498,373
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,335
|
r
|
fct_analysis_2way_noNA.R
|
# source and then on terminal ex:
# s<-rm_anova_MF('freq_D_picked_shortH', 'freq_D_picked_longH')
# From example: https://www.datanovia.com/en/lessons/repeated-measures-anova-in-r/
rm_anova_MF_noNA <- function(x1, x2) {
library(car)
library(tidyverse)
library(ggpubr)
library(rstatix)
library(readxl)
#x1 <- 'high_SH'
#x2 <- 'high_LH'
dataMF <- read_excel("~/GoogleDrive/UCL/MF/analysis/stats/data_for_R/thomp_3_params_like_param_recovery_Q0norm_no506_noNA.xlsx")
# Take only subset: concatenate the ones we want
data_tmp <- dataMF
# Change from wide to long format
data_tmp <- data_tmp %>%
gather(key = "hor", value = "freq", x1, x2) %>%
convert_as_factor(Participant, hor)
# Display
head(data_tmp)
# Summary statistics
data_tmp %>%
group_by(hor, Drug) %>%
get_summary_stats(freq, type = "mean_sd")
# Visualisation
bxp <- ggboxplot(
data_tmp, x = "hor", y = "freq",
color="Drug", palette = "jco"
)
bxp
# Anova computation
res.aov <- anova_test(
data = data_tmp, dv = freq, wid = Participant,
within = hor,
between = Drug,
covariate = c(matrix_score, PANASpost_NA),
effect.size = "pes"
)
tab<-get_anova_table(res.aov)
sentence=paste(
"(horizon main effect: F(",
tab$DFn[4],",",tab$DFd[4],")=",round(tab$F[4],3),", p=", round(tab$p[4],3), ", pes=", round(tab$pes[4],3),
"; drug main effect: F(",
tab$DFn[3],",",tab$DFd[3],")=",round(tab$F[3],3),", p=", round(tab$p[3],3), ", pes=", round(tab$pes[3],3),
"; drug-by-horizon interaction: F(",
tab$DFn[7],",",tab$DFd[7],")=",round(tab$F[7],3),", p=", round(tab$p[7],3), ", pes=", round(tab$pes[7],3),
"; WASI main effect: F(",
tab$DFn[1],",",tab$DFd[1],")=",round(tab$F[1],3),", p=", round(tab$p[1],3), ", pes=", round(tab$pes[1],3),
"; WASI-by-horizon interaction: F(",
tab$DFn[5],",",tab$DFd[5],") =",round(tab$F[5],3),", p=", round(tab$p[5],3), ", pes=", round(tab$pes[5],3),
"; PANAS_NA main effect: F(",
tab$DFn[2],",",tab$DFd[2],") =",round(tab$F[2],3),", p=", round(tab$p[2],3), ", pes=", round(tab$pes[2],3),
"; PANAS_NA-by-horizon interaction: F(",
tab$DFn[6],",",tab$DFd[6],") =",round(tab$F[6],3),", p=", round(tab$p[6],3), ", pes=", round(tab$pes[6],3),
")")
return(sentence)
}
|
e2821a6369bdcb130a822a9b590e070483855ecc
|
496cc1b9d32d20ad125a2926bcc2c2c8bf7c8ad2
|
/code/extinction.r
|
8c22b57b8ffc7f97b0afd01b3941ef1c4280d6a9
|
[] |
no_license
|
guanjiahui/nested_bipartite_network
|
e60792d2a6a44a07cf64fb055b380a67452f8bac
|
2563691964d66c9e458577e4b790476b33f33553
|
refs/heads/master
| 2020-04-18T14:53:11.499485
| 2019-01-25T22:47:51
| 2019-01-25T22:47:51
| 167,600,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 121
|
r
|
extinction.r
|
rowdegree=apply(EZ,1,sum)
coldegree=apply(EZ,2,sum)
deg=c(rowdegree,coldegree)
target=min(deg)
remove=which(deg==target)
|
d8f4a8a618c0be1230176d60e56b708d3a522580
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qad/examples/summary.qad.Rd.R
|
4893bee5e786b31dac667056a4a93221be60441d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 361
|
r
|
summary.qad.Rd.R
|
library(qad)
### Name: summary.qad
### Title: Summarize a qad object
### Aliases: summary.qad coef.qad
### ** Examples
n <- 1000
x <- runif(n, 0, 1)
y <- runif(n, 0, 1)
sample <- data.frame(x, y)
##(Not Run)
# mod <- qad(sample, permutation = TRUE, nperm = 100, print = FALSE)
# summary(mod)
# coef(mod)
# coef(mod, select = c('q(x1,x2)','p.q(x1,x2)'))
|
46d034dcc836aa5954e76dbdb1c92abc17237222
|
68d99b83ba7b84348761004ca060f6f6fba90468
|
/CCP India Create Network Matrices Men.R
|
2151add7ded6857d65e5fc359558408f2708f9f7
|
[] |
no_license
|
LiamMcCafferty/IndiaCleanCooking
|
8b642f6476a6b21c1eefa8f395f4a038e189985b
|
3fe2a51b6722821f22f587b38049e2a867a83c45
|
refs/heads/master
| 2021-07-15T05:51:36.646913
| 2020-07-10T21:49:40
| 2020-07-10T21:49:40
| 181,076,843
| 0
| 0
| null | 2019-08-22T22:40:59
| 2019-04-12T20:10:06
|
R
|
UTF-8
|
R
| false
| false
| 8,076
|
r
|
CCP India Create Network Matrices Men.R
|
# Empties Global Environment cache
rm(list=ls())
#Set working directory to current file location
#To set to own working directory
# select "Session->Set Working Directory->To Source File Location"
# then copy result in console into current "setwd("")".
#Importing packages. If not yet installed, packages can be installed by going to:
#Tools -> Install Packages, then enter their exact names from within each
#library()
library(tidyverse) # For data management
library(igraph) # To transform and analyze network data
library(ggnetwork) # To make good-looking network graphs
library(scales) # To add percentages
library(gridExtra) # For montage of networks
library(grid) # For montage of networks
#Although not supposed to load here, the functions below auto-loads the
#following. If not already, make sure to install these packages as well.
# egonet
# sna
# statnet.common
# network
#Imports data and assigns it to variable "dataset", make all strings into
# non-factors to preserve names.
setwd("~/Desktop/India Clean Cooking Project")
dat_men <- read.csv("SNA data_men.csv", stringsAsFactors = FALSE)
#Selecting the names of the alters, and changing all blank entries into NA's
name_select <- dat_men %>% select(snaw_g7_name1_m:snaw_g7_name20_m)
name_select[name_select == ""] <- NA
#Calculating the network size by counting how many values are not NA's (aka, have names)
sum(!is.na(name_select[1,]))
network_size <- apply(name_select, 1, function(x){return(sum(!is.na(x)))})
dat_men$network_size <- network_size
#These lines where to investigate our large number of men with network size of 0
# net_zeros <- dat_men[dat_men$network_size == 0,]
# name_select_zero <- net_zeros %>% select(snaw_g7_name1_m:snaw_g7_name20_m)
dat_men <- dat_men[!dat_men$network_size == 0,]
# x <- dat_men[1,]
make_base_mat <- function(x){
##########
# Function: Creates an NA-stripped matrix from a single row dataframe
# Inputs: x = Variable that stores the dataset
# Ouputs: matrix "mat", the matrix will be stripped of people which have zero
# ties, the matrix will also turn diagonal NA's (and mistaken NA's)
# into 0's
##########
#Saves the ties (edge data) of all egos and nodes as a 2D vector
shape <- select(x, "snaw_g9_name1_m":"snaw_g10_name19name20_m")
shape_alter <- shape %>% select(-snaw_g9_name1_m:-snaw_g9_name20_m)
shape_alter <- shape_alter - 1
shape <- cbind(select(shape, snaw_g9_name1_m:snaw_g9_name20_m), shape_alter)
shape[shape %in% 1] <- 9
shape[shape %in% 2] <- 1
shape[shape %in% 9] <- 2
#Saves tie values as a 1D vector of integers.
ties <- as.integer(shape)
#Creates a blank matrix
mat <- matrix(NA, 21, 21)
#Fills the lower triangle of the matrix with the vector "ties"
mat[lower.tri(mat)] <- ties
#Transposes the lower triangle into the upper triangle of the matrix
mat <- t(mat)
#Refills the lower triangle of the matrix with ties
mat[lower.tri(mat)] <- ties
#Names the columns and rows with EGO as the first row/col, row/col 2:16 are numbered
# 1:15 respectively.
colnames(mat) <- rownames(mat) <- c("EGO", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "11", "12", "13", "14", "15",
"16", "17", "18", "19", "20")
#Removes columns and rows which have no tie entries, this removes people who are
# duplicates or over 10 and thus were not given any tie values.
mat <- mat[(!colSums(mat,1) == 0), (!colSums(mat,1) == 0)]
#Fills diagonal with 0s
diag(mat) <- 0
#Saves the named social ties from the survey
name_ties <- x %>% select(snaw_g7_name1_m:snaw_g7_name20_m)
name_ties <- apply(name_ties,2,function(x){return(trimws(gsub("[)]","",unlist(strsplit(x, "[(]"))[2]), "both"))})
#Converts vector of names into a dataframe
name_ties <- data.frame(Row = name_ties)
#Add a column to name_ties which matches the names of the matrix coloumns (1-15)
name_ties$Replacement <- c("1", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "11", "12", "13", "14",
"15", "16", "17", "18", "19", "20")
#Saves names to the columns of name_ties for sorting
colnames(name_ties) <- c("Name", "Current")
#Create a new row to replace the name "EGO" from the matrix with the word "You"
ego_df <- c("EGO", "EGO")
ego_df <- as.data.frame(t(ego_df))
colnames(ego_df) <- c("Name", "Current")
#Bind the name_ties with the new ego_df
name_ties <- rbind(ego_df, name_ties)
#Replace the matrix names with those from name_ties
names <- match(colnames(mat), name_ties$Current)
colnames(mat) <- rownames(mat) <- name_ties$Name[names]
return(mat)
}
for(i in 1:nrow(dat_men)){
print(i)
print(make_base_mat(dat_men[i,]))
}
x <- dat_men[15,]
#Function which makes Social Network Image
make_image <- function(x) {
##########
# Function: Creates and outputs a network graph with nodes named and ties colored
# Inputs: x = input dataset with 1 row
# Ouputs: plot1, a single network graph
##########
#transform data to dataframe-table
x <- tbl_df(x)
#Creates a network matrix from input dataset file
mat <- make_base_mat(x)
#Saves important values for determining graph formatting
ego4.g <- graph.adjacency(mat, mode = "undirected", weighted = TRUE)
colors <- c("blue", "red") #create color palette for ties
ego_col <- ifelse(V(ego4.g)$name == "EGO", "grey17", "white")
#Saves logic to determine the strength of ties between nodes
weight.ego <- sapply(E(ego4.g)$weight, function(yk){
if(is.na(yk)){
return("Unknown")
}else if(yk == 1){
return("Weak Tie")
}else{
return("Strong Tie")
}
})
if ("Unknown" %in% weight.ego ){
#Error check to see if network has sufficient ties, will output a blank graph with
# error message.
print("Error: Some networks ties are unknown ")
plot1 <- ggplot(ego4.g, aes(x = x, y = y, xend = xend, yend = yend, na.rm = FALSE)) +
geom_blank() + ggtitle("Data doesn't work: some network ties are unknown")
}else{
E(ego4.g)$weight <- weight.ego
#Creates actual network graph
plot1 <- ggplot(ego4.g, aes(x = x, y = y, xend = xend, yend = yend, na.rm = FALSE)) +
#Determines coloration and style of network edges
geom_edges(aes(linetype = as.factor(weight), color = (weight)), curvature = 0.1) +
#Fills nodes with ego_color pallate
geom_nodes(fill = ego_col, size = 14, shape = 21) +
#Names each node with alter names
geom_nodelabel(label = rownames(mat))+
theme_blank() +
#Formats the legend which describes edge weight to the reader
theme(legend.position = "bottom", #format the legend
legend.title = element_text(face = "bold", size = 15),
legend.text = element_text(size = 10)) +
theme(legend.title.align = 0.5) +
theme(plot.title = element_text(size = 18, face = "bold")) +
scale_colour_manual(name = "Tie Strength", values = c("red", "blue"))+
scale_shape_manual(name = "Tie Strength", values = c(22, 21)) +
scale_linetype_manual(name = "Tie Strength", values = c("solid", "dashed")) +
#Determins the margins around plot
theme(plot.margin = unit(c(1.5, 1.5, 1.5, 1.5), "cm")) +
#Formatting for legend's keys
theme(legend.direction = 'vertical',
legend.key.height = unit(1, "line"),
legend.key = element_rect(colour = NA, fill = NA))
}
return(plot1)
}
matrix_list <- vector("list", nrow(dat_men))
#for-loop which iterates a number of times equal to the number of rows in the dataset.
# Each iteration will call the function "make_mat", inputing the dataset and also
# inputing the for-loops's iteration number (to call that row from the dataset).
# "make_mat"'s network matrix output is then assigned to matrix_list at the same index
# as the row the network matrix was created from.
for(i in 1:nrow(dat_men)){
matrix_list[[i]] <- make_base_mat(dat_men[i,])
}
|
3514aaf44d7d656113b6dc174294671fc4dad245
|
1e569b29b041afcd4acd7a6d6b97687cf4410f81
|
/app.R
|
bb6c55fee13c78e2f31bfe5e3d31a25a6402a89c
|
[
"MIT"
] |
permissive
|
ivelasq/ganttrrr
|
c96dfcc597378f59a7fb99f10f007c0a03bb8df9
|
e5ef2f0c53dccb38eea7b39fd69ad142e9ecceb5
|
refs/heads/master
| 2020-06-30T22:12:42.199742
| 2020-03-08T01:52:30
| 2020-03-08T01:52:30
| 200,964,526
| 1
| 0
| null | 2020-03-08T01:52:31
| 2019-08-07T03:24:08
|
JavaScript
|
UTF-8
|
R
| false
| false
| 12,771
|
r
|
app.R
|
#########################
# Creating ganttrrr app #
#########################
# resources:
# https://stackoverflow.com/questions/22272571/data-input-via-shinytable-in-r-shiny-application
# https://rdrr.io/cran/DiagrammeR/man/grVizOutput.html
# https://shiny.rstudio.com/articles/action-buttons.html
# libraries ---------------------------------------------------------------
library(tidyverse)
library(shiny)
library(shinythemes)
library(shinydashboard)
library(rhandsontable)
library(htmlwidgets)
library(DiagrammeR)
library(glue)
library(here)
# app ---------------------------------------------------------------------
values <- list()
setHot <- function(x)
values[["hot"]] <<- x
df <-
data.frame(
Category = c(rep(NA_character_, 7)),
Task = c(rep(NA_character_, 7)),
Status = c(rep(NA_character_, 7)),
Critical = c(rep(FALSE, 7)),
Start = c(rep(NA_character_, 7)),
Duration = c(rep(NA_integer_, 7)),
stringsAsFactors = FALSE
)
ui <- fluidPage(
# css
tags$head(tags$link(rel = "stylesheet", type = "text/css", href = "style.css")),
# header panel
titlePanel(windowTitle = "ganttrrrrrrrrrrr",
fluidRow(column(3,
h1("ganttrrr")),
column(
8,
h2(
"A Shiny App for Creating Gantt Charts Using DiagrammeR::mermaid()"
)
),
column(1,
( tags$a(
img(src = "Download Code.png",
align = "right",
style = "width:150px;height:150px;"),
href = "https://raw.githubusercontent.com/ivelasq/ganttrrr/master/code/agenda_gantt.R"
)
),
)
) # end Fluid Row
), # end Title Panel
# sidebar layout
sidebarLayout(
sidebarPanel(
helpText("Right-click on the table to delete/insert rows.",
tags$br(),
"Double-click on a cell to edit.",
tags$br(),
"Once edited, save table and create chart."
),
wellPanel(
actionButton("load1", "Load Example 1: Creating an R Package"),
actionButton("load2", "Load Example 2: rstudio::conf(2020) Agenda"),
actionButton("save", "Save Table & Create Chart"),
actionButton("clear", "Clear Table")
# tags$br(),
# tags$br(),
# downloadButton("export", "Export PDF")
)
), # sidebarPanel
mainPanel(rHandsontableOutput("hot"),
DiagrammeROutput("gantt_render"))
) # sidebarLayout
) # fluid page
server <- function(input, output) {
# Load Example 1
observeEvent(input$load1, {
df <- readRDS(here::here("data", "example1.rds"))
output$hot <- renderRHandsontable({
rhandsontable(df, stretchH = "all") %>%
hot_col(
col = "Status",
type = "dropdown",
source = c("To Do", "In Progress", "Done")
) %>%
hot_col(col = "Critical", halign = "htCenter") %>%
hot_col(col = "Start",
type = "date",
dateFormat = "YYYY-MM-DD") %>%
hot_context_menu(customOpts = list(csv = list(
name = "Download to CSV",
callback = htmlwidgets::JS(
"function (key, options) {
var csv = csvString(this);
var link = document.createElement('a');
link.setAttribute('href', 'data:text/plain;charset=utf-8,' +
encodeURIComponent(csv));
link.setAttribute('download', 'data.csv');
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}"
)
)))
})
})
# Load Example 2
observeEvent(input$load2, {
df <- readRDS(here::here("data", "example2.rds"))
output$hot <- renderRHandsontable({
rhandsontable(df, stretchH = "all") %>%
hot_col(
col = "Status",
type = "dropdown",
source = c("To Do", "In Progress", "Done")
) %>%
hot_col(col = "Critical", halign = "htCenter") %>%
hot_col(col = "Start",
type = "date",
dateFormat = "YYYY-MM-DD") %>%
hot_context_menu(customOpts = list(csv = list(
name = "Download to CSV",
callback = htmlwidgets::JS(
"function (key, options) {
var csv = csvString(this);
var link = document.createElement('a');
link.setAttribute('href', 'data:text/plain;charset=utf-8,' +
encodeURIComponent(csv));
link.setAttribute('download', 'data.csv');
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}"
)
)))
})
})
# Clear Table
observeEvent(input$clear, {
df <-
data.frame(
Category = c(rep(NA_character_, 7)),
Task = c(rep(NA_character_, 7)),
Status = c(rep(NA_character_, 7)),
Critical = c(rep(FALSE, 7)),
Start = c(rep(NA_character_, 7)),
Duration = c(rep(NA_integer_, 7)),
stringsAsFactors = FALSE
)
output$hot <- renderRHandsontable({
rhandsontable(df, stretchH = "all") %>%
hot_col(
col = "Status",
type = "dropdown",
source = c("To Do", "In Progress", "Done")
) %>%
hot_col(col = "Critical", halign = "htCenter") %>%
hot_col(col = "Start",
type = "date",
dateFormat = "YYYY-MM-DD") %>%
hot_context_menu(customOpts = list(csv = list(
name = "Download to CSV",
callback = htmlwidgets::JS(
"function (key, options) {
var csv = csvString(this);
var link = document.createElement('a');
link.setAttribute('href', 'data:text/plain;charset=utf-8,' +
encodeURIComponent(csv));
link.setAttribute('download', 'data.csv');
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}"
)
)))
})
})
## Handsontable
observe({
if (!is.null(input$hot)) {
df <- (hot_to_r(input$hot))
setHot(df)
}
})
output$hot <- renderRHandsontable({
rhandsontable(df, stretchH = "all") %>%
hot_col(
col = "Status",
type = "dropdown",
source = c("To Do", "In Progress", "Done")
) %>%
hot_col(col = "Critical", halign = "htCenter") %>%
hot_col(col = "Start",
type = "date",
dateFormat = "YYYY-MM-DD") %>%
hot_context_menu(customOpts = list(csv = list(
name = "Download to CSV",
callback = htmlwidgets::JS(
"function (key, options) {
var csv = csvString(this);
var link = document.createElement('a');
link.setAttribute('href', 'data:text/plain;charset=utf-8,' +
encodeURIComponent(csv));
link.setAttribute('download', 'data.csv');
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}"
)
)))
})
diagram <-
eventReactive(input$save, {
if (!is.null(values[["hot"]])) { # if there's a table input
df <- values$hot
}
# make table mermaid-friendly
df <-
df %>%
data.frame %>%
mutate(status = case_when(Status == "To Do" & Critical == TRUE ~ "crit",
Status == "To Do" & Critical == FALSE ~ "",
Status == "In Progress" & Critical == TRUE ~ "active, crit",
Status == "In Progress" & Critical == FALSE ~ "active",
Status == "Done" & Critical == TRUE ~ "done, crit",
Status == "Done" & Critical == FALSE ~ "done"
),
start = as.Date(Start, "%Y-%m-%d"),
end = paste0(Duration, "d")) %>%
select(-Status, -Critical, -Start, -Duration) %>%
rename(task = Task,
pos = Category)
one <- df %>% filter(pos %in% str_subset(df$pos, "^one")) # Category 1
two <- df %>% filter(pos %in% str_subset(df$pos, "^two")) # Category 2
thr <- df %>% filter(pos %in% str_subset(df$pos, "^thr")) # Category 3
fou <- df %>% filter(pos %in% str_subset(df$pos, "^fou")) # Category 4
fiv <- df %>% filter(pos %in% str_subset(df$pos, "^fiv")) # Category 5
six <- df %>% filter(pos %in% str_subset(df$pos, "^six")) # Category 6
sev <- df %>% filter(pos %in% str_subset(df$pos, "^sev")) # Category 7
gantt <-
DiagrammeR::mermaid(
paste0(
"gantt", "\n",
"dateFormat YYYY-MM-DD", "\n",
"section Category 1", "\n",
paste(one %>%
unite(i, task, status, sep = ":") %>%
unite(j, i, pos, start, end, sep = ",") %>%
.$j,
collapse = "\n"
), "\n",
"section Category 2", "\n",
paste(two %>%
unite(i, task, status, sep = ":") %>%
unite(j, i, pos, start, end, sep = ",") %>%
.$j,
collapse = "\n"
), "\n",
"section Category 3", "\n",
paste(thr %>%
unite(i, task, status, sep = ":") %>%
unite(j, i, pos, start, end, sep = ",") %>%
.$j,
collapse = "\n"
), "\n",
"section Category 4", "\n",
paste(fou %>%
unite(i, task, status, sep = ":") %>%
unite(j, i, pos, start, end, sep = ",") %>%
.$j,
collapse = "\n"
), "\n",
"section Category 5", "\n",
paste(fiv %>%
unite(i, task, status, sep = ":") %>%
unite(j, i, pos, start, end, sep = ",") %>%
.$j,
collapse = "\n"
), "\n",
"section Category 6", "\n",
paste(six %>%
unite(i, task, status, sep = ":") %>%
unite(j, i, pos, start, end, sep = ",") %>%
.$j,
collapse = "\n"
), "\n",
"section Category 7", "\n",
paste(sev %>%
unite(i, task, status, sep = ":") %>%
unite(j, i, pos, start, end, sep = ",") %>%
.$j,
collapse = "\n"
), "\n"
), width = 1000
)
gantt$x$config = list(ganttConfig = list(
axisFormatter = list(list(
"%d%b%y"
,htmlwidgets::JS(
'function(d){ return d.getDay() == 1 }'
)
))
))
gantt
})
output$gantt_render <- renderDiagrammeR({
req(diagram())
diagram()
})
# output$export = downloadHandler(
# filename = function() {"gantt_chart.pdf"},
# content = function(file) {
# pdf(file, onefile = TRUE)
# values$gantt %>%
# htmltools::html_print() %>%
# webshot::webshot(file = "gantt_chart.pdf")
# dev.off()
# }
# )
# output$Save_diagrammeR_plot <- downloadHandler(
# filename = "gantt_chart.html",
# content = function(file) {
# save_html(renderDiagrammeR(req(diagram())))
# }
# )
}
## run app
shinyApp(ui = ui, server = server)
|
d7d68d726f85e303e48c6b0ec917935cc9f20010
|
d9c865793a7585b64358337e8dc5c94d24f0d312
|
/data_evictions_download.R
|
fddb4e3de6f824ac46ddb33fcad76100e9f64e4c
|
[] |
no_license
|
a-aliseda/Evictions_moratorium_impact_AAA
|
7b48cbb951c3852dccc543d2f2f144f129c46b62
|
fd811693e48838db43059797ac64185bebb1eeff
|
refs/heads/master
| 2022-12-03T22:03:39.868167
| 2020-08-08T02:04:31
| 2020-08-08T02:04:31
| 285,948,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,546
|
r
|
data_evictions_download.R
|
### Coronavirus Resource Center Excercise
## Part II: Data cleaning
## Angel Aliseda Alonso
#install.packages("aws.s3")
# Libraries used to clean the data
library(aws.s3)
library(tidyverse)
library(readxl)
# Download and merge data from The Eviction Lab
# Matthew Desmond, Ashley Gromis, Lavar Edmonds, James Hendrickson, Katie
# Krywokulski, Lillian Leung, and Adam Porton. Eviction Lab National Database: Version
# 1.0. Princeton: Princeton University, 2018, www.evictionlab.org.
# The Eviction Lab has its data uploaded in an AWS S3 bucket named viction-lab-data-downloads
# Download data from bucket using library aws.s3
# https://cran.r-project.org/web/packages/aws.s3/readme/README.html
states_abb <- c(state.abb, "DC")
states_path_file <- str_c("eviction-lab-data-downloads/", states_abb) #Create list with all the path files from the bucket
#Each path corresponds to a State and the DC
evictions_states <- data.frame()
for (i in seq_along(states_path_file)){
evictions <- s3read_using(FUN = read.csv, #Get a database for each state using the paths constructed above
bucket = states_path_file[i],
object = "states.csv")
evictions_states <- rbind(evictions_states, evictions) #Combine all state files into one large dataset
}
# Variables that were removed from the cleaned dataset because they do not provide additional information
summary(evictions_states$parent.location)
summary(evictions_states$low.flag)
summary(evictions_states$imputed)
summary(evictions_states$subbed)
evictions_states_clean <- evictions_states %>%
select(-parent.location, -low.flag, -imputed, -subbed)
# Subset of data for the 2016 eviction rate as it is the most recent available value
evictions_states_2016 <- evictions_states_clean %>%
filter(year == 2016)
# With this dataset, we can uniderstand characteristics of states with high eviction rates
# AK, AR, ND, SD do not have number of evictions nor eviction rate
# States with the highest eviction rate
evictions_states_2016 %>%
arrange(desc(eviction.rate)) %>%
select(name, eviction.rate) %>%
head()
# States with the lowest eviction rate without states that do not have number of evictions
evictions_states_2016 %>%
arrange(desc(eviction.rate)) %>%
select(name, eviction.rate) %>%
filter(!(is.na(eviction.rate))) %>%
tail()
# Merge with rental assistance programs database
# The rental assistant programs database was retreived directly from the
# National Low-Income Housing Coalition website at:
# https://docs.google.com/spreadsheets/d/1hLfybfo9NydIptQu5wghUpKXecimh3gaoqT7LU1JGc8/edit#gid=79194074
# Read the Excel file into R
# First three rows were deleted because they were not part of the database
rental_assistance_df <- read_excel("NLIHC COVID-19 Rental Assistance Database.xlsx",
skip = 3) %>%
group_by(State) %>%
summarize(total_programs = n())
# Check State variable in rental assistance programs dataset to verify it can be used as a key to join it with evictions dataset
table(rental_assistance_df$State)
# Recode "Washington DC" to "District of Columbia" in rental assistance programs dataset
# Rename variable "State" to "name" so that it matches to the evictions dataset
rental_assistance_df <- rental_assistance_df %>%
mutate(State = recode(State,
`Washington DC` = "District of Columbia")) %>%
rename("name" = "State")
# Join the 2016 evictions dataset and the rental assitance programs dataset and
# create a new variable that standardizes the total number of rental assistance programs
# by total number of renter occupied households
evictions_rental_assistance_states_2016 <- left_join(evictions_states_2016, rental_assistance_df) %>%
mutate(total_programs = replace_na(total_programs, 0),
programs_by_renter_hh = (total_programs/renter.occupied.households)*100000)
# Using this dataset we can start to look at which States have the most and the least
# rental assistance programs but also states that have the most and the least rental assistant
# programs per 100,000 renter households
# States with highest number of programs
evictions_rental_assistance_states_2016 %>%
arrange(desc(total_programs)) %>%
select(name, total_programs) %>%
head()
# States with lowest number of programs
evictions_rental_assistance_states_2016 %>%
arrange(desc(total_programs)) %>%
select(name, total_programs) %>%
tail()
# States with highest proportion of programs per 100,000 renter households
evictions_rental_assistance_states_2016 %>%
arrange(desc(programs_by_renter_hh)) %>%
select(name, programs_by_renter_hh) %>%
head()
# States with lowest proportion of programs per 100,000 renter households
evictions_rental_assistance_states_2016 %>%
arrange(desc(programs_by_renter_hh)) %>%
select(name, programs_by_renter_hh) %>%
tail()
# Export the final cleaned datasets to csv
# evictions_2016_clean: includes all variables from Eviction Lab and data from rental assistance programs for 2016
# evictions_2000_2016_clean: only includes variables from Eviction Lab from 2000 to 2016
write.csv(evictions_rental_assistance_states_2016, file = "evictions_2016_clean.csv")
write.csv(evictions_states_clean, file = "evictions_2000_2016_clean.csv")
|
e2b81b949ef9e260ac6f7e3e2eb91b5eae89b003
|
86e3513f8b3acb1427d0faa0ce41051c1e1c99da
|
/Retail-V2/LinReg2-red.R
|
c095fad475b6f719a15f0a0d022e336e239d1485
|
[] |
no_license
|
prithwis/BigRe
|
476c0998c0414dc74363e03fd8bd8d4f3a8a9f14
|
4eff29a66bb75910239b7c00ae4cfce2f37f11e5
|
refs/heads/master
| 2023-06-11T03:35:31.033973
| 2023-05-29T05:07:32
| 2023-05-29T05:07:32
| 41,527,639
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,952
|
r
|
LinReg2-red.R
|
#! /usr/bin/env Rscript
# Linear Regression Reducer Script - written by Prithwis Mukerjee
# Used in the Retail Sales Application
# EstValue is function called to implement the linear regression function of R
#
# Parameters are as follows
# pSKU - string - name of SKU
# pdays - list of days for which data is available
# psale - list of values of sale data
# Nth - integer - N-th day for which the estimate will be made
#
# Other variables are as follows
# Est - estimated sale for a particular day
EstValue <- function (pdays,psale,N){
days <-as.numeric(scan(text=pdays,,sep=" "))
sale <-as.numeric(scan(text=psale,,sep=" "))
regModel <- lm(sale ~ days) # <-- the all important R function lm()
Est = predict(regModel,data.frame(days=N))
OutRec = paste("Est[",N,Est,"] Dat:")
for (ix in 1:length(days)){
OutRec = paste(OutRec,days[ix],sale[ix])
}
OutRec = paste(OutRec,"\n")
return(OutRec)
}
# -- the Reducer
#
# mapOut is the output data from the Mapper script
# read as table : first column = mapkey = SKU name
# : second column = mapval
# mapval consists of a string formatted as day$sale
# mapval needs to split into date, sale and then made into list to be passed to EstValue()
mapOut <- read.table("stdin",col.names=c("mapkey","mapval"))
CurrSKU <- as.character(mapOut[1,]$mapkey)
CurrVal <- ""
FIRSTROW = TRUE
for(i in 1:nrow(mapOut)){
SKU <- as.character(mapOut[i,]$mapkey)
Val <- as.character(mapOut[i,]$mapval)
DataVal <- unlist(strsplit(Val,"\\$"))
if (identical(SKU,CurrSKU)){
CurrVal = paste(CurrVal, Val)
if (FIRSTROW) {
days <- DataVal[1]
sale <- DataVal[2]
FIRSTROW = FALSE
} else {
days = paste(days,DataVal[1])
sale = paste(sale,DataVal[2])
}
}
else {
cat(CurrSKU,EstValue(days,sale,9))
CurrSKU <- SKU
CurrVal <- Val
days <- DataVal[1]
sale <- DataVal[2]
}
}
cat(CurrSKU,EstValue(days,sale,9))
|
a6eb63a7ff7e610046d79b3ec8d8e39a9fbea971
|
95db9afbf74cc1513f665e1e92565d514093af52
|
/src/R/evs_romopomics_pipeline.R
|
a17601cfc7d5e4d8216fdaed839032f99dda387c
|
[
"MIT"
] |
permissive
|
ngiangre/GrOMOP
|
cb3c04f6ed0428bbcd07f5ee87ed1b526fab35e0
|
0eeb9aa7505949302553e44837389e85eaaccd9f
|
refs/heads/main
| 2023-02-23T12:17:46.007405
| 2021-01-25T14:05:41
| 2021-01-25T14:05:41
| 332,530,238
| 0
| 0
| null | 2021-01-24T18:55:35
| 2021-01-24T18:55:35
| null |
UTF-8
|
R
| false
| false
| 835
|
r
|
evs_romopomics_pipeline.R
|
#install.packages("pacman")
library(pacman)
p_load(tidyverse,devtools,data.table,DBI,here)
#install_github("AndrewC160/ROMOPOmics",force=T)
library(ROMOPOmics)
#Data model.
dm_file <- system.file("extdata","OMOP_CDM_v6_0_custom.csv",package="ROMOPOmics",mustWork = TRUE)
dm <- loadDataModel(master_table_file = dm_file)
#Mask file
msk_file <- here("data/evs_mask.tsv")
msks <- loadModelMasks(msk_file)
#Sample file
in_file <- here("data/evs.tsv")
#Put it all together
omop_inputs <- readInputFile(input_file=in_file,data_model=dm,mask_table=msks,transpose_input_table = T)
#Do the cha cha smooth
db_inputs <- combineInputTables(input_table_list = omop_inputs)
#Bippity boppity boop
omop_db <- buildSQLDBR(omop_tables = db_inputs, sql_db_file=here("data/evs.sqlite"))
DBI::dbDisconnect(omop_db)
|
efa2c16c49ab67de99118f4fba2e0b02e88a8a44
|
ee1af63213eaf268bf38a51e52883e43ca811937
|
/create_amp_plot.r
|
3c48824aba6117635e2463fb4249cd1c76ab16c8
|
[] |
no_license
|
geocarvalho/r-bioinfo-ds
|
06ce4ae515981989274ade8f582988ea6fef6ffa
|
596daf835f2d8c64055e96906e6f3bda7fa3d42b
|
refs/heads/master
| 2023-05-11T14:45:41.841356
| 2023-04-28T08:02:47
| 2023-04-28T08:02:47
| 92,194,815
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 843
|
r
|
create_amp_plot.r
|
library(ggplot2)
library(dplyr)
cov_bed <- read.table(file='amplicon_regions.bed', sep='\t', header=TRUE,
col.names=c("transcript", "start", "end", "source", "mean_cov", "samples"))
head(cov_bed)
# Take one sample as example 02MI214440581A
one_df <- filter(cov_bed, samples=="02MI214440581A")
# Change dotsize and stack ratio
p <- ggplot(cov_bed, aes(x=samples, y=mean_cov)) +
geom_dotplot(binaxis='y', stackdir='center',
stackratio=0.35, dotsize=0.1)
# Rotate the dot plot
t <- 500
p + ggtitle("Amplicon mean coverage across samples") + xlab("Samples") + ylab("Amplicon mean coverage") +
coord_flip() + geom_hline(yintercept=t, linetype="dashed", color = "red") +
geom_text(aes(0,t,label = t, vjust = -1), color="red")
ggsave("amplicon_regions_plot.png")
# Bar plot for amplicon coverage and snp
|
74a69ff0810ee015027a3122c0ebcada60648a77
|
0dbd60b634c090f2153f21f945fb306495a67df6
|
/R/call_force_spinup.R
|
455404e8d26d373e8a9221a3b57e26e49aed55e8
|
[] |
no_license
|
wechuli/large-pr
|
afa8ec8535dd3917f4f05476aa54ddac7a5e9741
|
5dea2a26fb71e9f996fd0b3ab6b069a31a44a43f
|
refs/heads/master
| 2022-12-11T14:00:18.003421
| 2020-09-14T14:17:54
| 2020-09-14T14:17:54
| 295,407,336
| 0
| 0
| null | 2020-09-14T14:17:55
| 2020-09-14T12:20:52
|
TypeScript
|
UTF-8
|
R
| false
| false
| 1,876
|
r
|
call_force_spinup.R
|
# loop over all years from start year to 1979 (inclusive) to generate forcing files for gap
start.year = 1964
# years = seq(start.year,1980)
years = 1964:1992
source('C:/Users/joseph.caracappa/Documents/GitHub/neus-atlantis/R/make_force_spinup.R')
for(i in 1:length(years)){
out.dir = 'C:/Users/joseph.caracappa/Documents/Atlantis/Obs_Hindcast/'
make_force_spinup(
out.dir = out.dir,
trans.prefix = 'GLORYS_Atlantis_Transport_',
statevar.prefix = 'Obs_Hindcast_statevars_',
phyto.prefix = NA,
transport.file = paste0(out.dir,'transport/GLORYS_Atlantis_Transport_1993.nc'),
statevar.file = paste0(out.dir,'statevars/Obs_Hindcast_statevars_1993.nc'),
phyto.file = NA,
force.dir = paste0(out.dir,'Forcing_Files/'),
start.year = 1964,
new.year = years[i],
param.temp = 'C:/Users/joseph.caracappa/Documents/Atlantis/Obs_Hindcast/Forcing_Files/obs_hindcast_hydroconstruct_template.prm',
bat.temp = 'C:/Users/joseph.caracappa/Documents/Atlantis/Obs_Hindcast/Forcing_Files/hydroconstruct_run_template.bat'
)
print(i)
}
years = 1964:1997
for(i in 1:length(years)){
out.dir = 'C:/Users/joseph.caracappa/Documents/Atlantis/Obs_Hindcast/Forcing_Files/Annual_Output/'
make_force_spinup(
out.dir = out.dir,
trans.prefix = NA,
statevar.prefix = NA,
phyto.prefix = 'Phyto_Forcing_',
transport.file = NA,
statevar.file = NA,
phyto.file = paste0(out.dir,'phyto_statevars/Phyto_Forcing_1998.nc'),
# force.dir = paste0(out.dir,'Forcing_Files/'),
force.dir = out.dir,
start.year = 1964,
new.year = years[i],
param.temp = 'C:/Users/joseph.caracappa/Documents/Atlantis/Obs_Hindcast/Forcing_Files/obs_hindcast_hydroconstruct_template.prm',
bat.temp = 'C:/Users/joseph.caracappa/Documents/Atlantis/Obs_Hindcast/Forcing_Files/hydroconstruct_run_template.bat'
)
}
|
966949f940d1b80049a807c2b83de7183b9992da
|
f8b561f2fd65d7aa14be4da0a4594020639ec3bd
|
/man/massage_data.Rd
|
b8cc69e88e1325df554e4f461072317096f2395b
|
[
"MIT"
] |
permissive
|
JonasMoss/straussR
|
eeba4bec3b886684673f2dc8c5c7aa71d20dd072
|
175eeea2c1ea39f9100e54ac9b995aa17db80092
|
refs/heads/master
| 2020-03-09T13:31:52.410069
| 2018-06-25T08:33:36
| 2018-06-25T08:33:36
| 127,399,219
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 781
|
rd
|
massage_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prior_massage.R
\name{massage_data}
\alias{massage_data}
\title{Massages \code{formula}, \code{priors}, and \code{data} for \code{straussR}.}
\usage{
massage_data(formula, priors, data = NULL)
}
\arguments{
\item{formula}{A formula of the form given to \code{straussR}.}
\item{priors}{A list of priors. Should match the parameters of
\code{formula}.}
\item{data}{An optional data frame.}
}
\value{
A list containing all the data needed for \code{straussR}-
}
\description{
Converts the information contained in \code{formula}, \code{priors},
and \code{data} for \code{straussR} to a form useable by \code{straussR}.
Checks for compatibility between priors and formulas and does some error
checking.
}
|
b88e483b3533a516113fdb39f0a8627b291f69c1
|
072a16fb4a4222c9d84cd1f2cb270a40fc29bca7
|
/man/listBenchAnalysis.Rd
|
3bb077176fe096d7bdd59f9b2a2884a3cf2ba220
|
[] |
no_license
|
venelin/benchtable
|
cc20302b916b5f6f910497ac351c1a0e3a2c8178
|
0f666b63a0c9a874cceb9527ca0e26ccd17bb390
|
refs/heads/master
| 2020-04-09T17:24:00.472251
| 2018-12-05T12:55:05
| 2018-12-05T12:55:05
| 160,479,895
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 774
|
rd
|
listBenchAnalysis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyse.R
\name{listBenchAnalysis}
\alias{listBenchAnalysis}
\title{Create a list of matrices with results from analyseBenchData as columns}
\usage{
listBenchAnalysis(benchdata, filter, what, levels)
}
\arguments{
\item{benchdata}{a data.table with a key}
\item{filter}{a list of elements with modes corresponding to the first key-columns of
benchdata}
\item{what}{a function(row) returning a list of statistics for a row of benchdata}
\item{levels}{a vector with elements corresponding to the columns of the matrices. The
mode of this vector should correspond to the mode of a key-column in benchdata}
}
\description{
Create a list of matrices with results from analyseBenchData as columns
}
|
baec3fa4195afc07c9a9a31d65d7d9dd21017678
|
cf30dd1ddd002cc1475995a0e3bd00d0a5152b2b
|
/IBD/IBD_prediction/antibody_prediction_v10.R
|
2f44d305f3c248e00167f0cdd684d29a3aaade61
|
[
"MIT"
] |
permissive
|
abourgonje/Phip-Seq_LLD-IBD
|
5c4f226433a72adaa5bb030fd2a772325a59b761
|
00a6eb4c5fe8b0fa5d8d3b280f52794b8d2b8402
|
refs/heads/main
| 2023-05-10T02:51:24.648784
| 2023-04-29T08:06:27
| 2023-04-29T08:06:27
| 434,927,784
| 0
| 0
|
MIT
| 2021-12-04T14:46:23
| 2021-12-04T14:46:22
| null |
UTF-8
|
R
| false
| false
| 27,401
|
r
|
antibody_prediction_v10.R
|
# =========================================
# By: R.Gacesa (UMCG, 2021)
#
# prediction of UC, CD or CD vs UC
# using antibody panels
# =========================================
#
# load libraries
# =========================================
library(caret)
library(pROC)
library(plotROC)
library(foreach)
library(plyr)
library(gbm)
library(doSNOW)
# set WD and load helper functions
# =========================================
setwd('D:/UMCG/Arno_Antibody_prediction/')
source('R_ML_helperscripts.R')
source('R_ML_scripts_v4.R')
# prep paralelization
registerDoSNOW(makeCluster(12, type = "SOCK"))
# PREDICTIVE MODELLING RUN FOR ANTIBODIES
# =========================================================
# PART I: training (on training set), test set validation,
# segata data external (negative) validation
# =========================================================
# RUN PARAMETERS
# =========================================================
# runNameMain has to be set to one of:
# - "CD" for Crohn's disease prediction
# - "UC" for Ulcerative colitis prediction
# - "CD_vs_UC" for separation between CD and UC
runNameMain <- "CD"
pC <- "Y"; if (runNameMain == "CD_vs_UC") {pC <- "CD"}
# mlMethods: caret-implemented ML algorithms to use
# NOTE: code is implemented & tested for glmnet, gbm, avNNet, svmRadial
# and might not work with other algorithms
mlMethods <- c("glmnet", "gbm", "avNNet", "svmRadial")
# datasets to consider (all = all antibodies, agilent & twist are sub-sets)
dataSubsets <- c("all","agilent","twist")
# training set parameters
trPerc <- 0.80 # proportion of data used for training
# feature pre-selection parameters (will be calculated from training set)
pVF <- 0.005 # feature selection p-value cutoff
minP <- 0.01 # minimal presence below which features are removed
maxP <- 0.99 # maximal presence above which features are removed
# colors for plots
if (runNameMain == "CD") {
myCol = "red3"; myCol2 = "red3"; myCol3 = "red1"; myCol4="red4"
} else if (runNameMain == "UC") {
myCol = "blue3"; myCol2 = "blue3"; myCol3 = "blue1"; myCol4 = "blue4"
} else if (runNameMain == "CD_vs_UC") {
myCol = "purple3"; myCol2 = "purple3"; myCol3 = "purple1"; myCol4 = "purple4"
}
# MODEL TRAINING AND TESTING
# ==========================================
# loop iterates over algorithms and data-subsets
for (dType in dataSubsets) {
for (mlMethod in mlMethods) {
# set run name
runName <- paste0(runNameMain,'_base_',dType,'_',mlMethod)
print(paste0(' >> STARTING ML RUN FOR ',runName))
# LOAD DATA
# =====================
# main data
inDF <- read.table(paste0('Datasets/',runNameMain,'_matched_antibodies.csv'),sep=',',header=T,row.names = 1,stringsAsFactors = F)
# negative external test-set
inDFs <- read.table('Datasets/Israel_cohort_filtered_prevalences.csv',sep=',',header=T)
# DATA PREP
# ==============================================================================
# i) basic prep (variable types, subset antibody panel if needed)
inDFpr <- prepAbData(inDF,subSet=dType)
# ii) prep training & test sets
# ==============================================================================
set.seed(123897) # fixed seed ensures that all training / test splits are identical
inTrain <- createDataPartition(y=inDFpr[["Cohort"]],p=trPerc,list=F)
inDFtr <- inDFpr[inTrain,]
inDFtst <- inDFpr[-inTrain,]
# iii) pre-process (using only training set to avoid data leakage)
# ==============================================================================
pt <- prepProcessAbData(inDFtr=inDFtr,pVF = pVF,maxP = maxP,minP=minP)
inDFtrpp <- pt[[2]]; inDFprep <- pt[[1]]
# > save datasets
write.table(inDFtrpp,paste0('Model_data_',runNameMain,'/',runName,'_training.csv'),sep=',',row.names = F)
write.table(inDFtst,paste0('Model_data_',runNameMain,'/',runName,'_test.csv'),sep=',',row.names = F)
saveRDS(inDFprep,paste0('Model_data_',runNameMain,'/',runName,'_preprocessor.RDS'))
# MODEL TRAINING
# ==============================================================================
# iv) model training
# ==============================================================================
# > set model training scheme
trainC <- trainControl(method="repeatedcv",number=5,repeats = 5,savePredictions = T,classProbs = T,allowParallel = T,
verboseIter = F,returnData = F,preProcOptions = NULL,trim = T)
set.seed(123899) # seed is fixed for reproducibility / consistency [this is not necessary]
mdlFit <- caret::train(Cohort ~ ., data=inDFtrpp, method = mlMethod, metric = "Kappa", trControl = trainC,
tuneLength=10) # tune length size defines size of optimization grid
# v) report performance on training set / optimization using cross-validation
# =============================================================================
# > confusion matrix
mdlMetrics <- getMdlFitXVresults(mdlFit=mdlFit,posClass = pC,mdName = runName)
# > ROC
mdlROC <- compareModelsTrainingCV(fittedMdls = list(mdlFit),
modelNames = c(runName),roc.conf.boot = 100,
posClass = pC,annotateAUConly = T,roc.conf = 0.95,
tit = paste0(runName, " model"),roc.smooth = F,
annotS = 5,diagonalLine = F,textOffSetY = +0.05,textOffSetX = -0.195)
# - re-style ROC plot for publication
mdlRocS <- mdlROC + scale_color_manual(values=c(myCol)) + theme_classic() +
scale_fill_manual(values = c(myCol2) ) + theme(legend.position = "none") + ggtitle("") +
geom_abline(intercept = c(1), slope = 1,col="darkblue",linetype="longdash",size=1.05) +
coord_cartesian(xlim=c(1.005,-0.00),ylim=c(0,1.02),expand = F) +
theme(axis.line = element_line(size = 1.05),axis.ticks = element_line(size = 0.9))
#print(mdlRocS) # output on screen for debugging
# > extract model betas (GLM) / variable importance (non-GLM models)
varImpTable <- getVarImpTbl(mdlFit)
# >> save model, metrics, ROC, variable importance
saveRDS(mdlFit,paste0('Model_data_',runNameMain,'/',runName,'_ModelFit.RDS'))
write.table(mdlMetrics,paste0('Model_metrics_',runNameMain,'/',runName,'_resultsXV_metrics.csv'),sep=',',row.names = F)
ggsave(plot = mdlRocS,filename = paste0('Model_metrics_',runNameMain,'/',runName,'_resultsXV_ROC.png'),width = 6,height = 6,units = "in",dpi = 320)
write.table(varImpTable,paste0('Model_metrics_',runNameMain,'/',runName,'_variables.csv'),sep=',',row.names = F)
# MODEL TEST (using Test set and external negative control)
# =============================================================================
# v) model test on test set
# =============================================================================
# > preprocess test set (using pre-processing scheme from training set)
inDFtstpp <- predict(inDFprep,inDFtst)
# > predict test set and report prediction metrics
mdlMetricsTest <- getMdlTestResults(mdlFit=mdlFit,mdName = runName,testSet=inDFtstpp,posClass = pC)
# > generate ROC curve
mdlRocTest <- compareMdlsDatasets(mdls = list(mdlFit),
dataSets = list(inDFtstpp),
posClass = pC,
mdNames = c(runName),
response = "Cohort",
removeLegend = T,
roc.conf.boot = 100,roc.conf = 0.95,roc.smooth = F,
tit = paste0(runName, " model"),
annotateROC = T,
annotateROCbaseName = F,
annotS = 5,
diagonalLine = F,
textOffSetX = -0.195,
textOffSetY = +0.05)[[1]]
#print(mdlRocTest) # debug
# - ROC styling for publication
mdlRocTestS <- mdlRocTest + scale_color_manual(values=c(myCol)) + theme_classic() +
scale_fill_manual(values = c(myCol2) ) + theme(legend.position = "none") + ggtitle("") +
geom_abline(intercept = c(1), slope = 1,col="darkblue",linetype="longdash",size=1.05) +
coord_cartesian(xlim=c(1.005,-0.00),ylim=c(0,1.02),expand = F) +
theme(axis.line = element_line(size = 1.05),axis.ticks = element_line(size = 0.9))
#print(mdlRocTestS) # debug
# >> save results
write.table(mdlMetricsTest,paste0('Model_metrics_',runNameMain,'/',runName,'_resultsTest_metrics.csv'),sep=',',row.names = F)
ggsave(plot = mdlRocTestS,filename = paste0('Model_metrics_',runNameMain,'/',runName,'_resultsTest_ROC.png'),width = 6,height = 6,units = "in",dpi = 320)
# vi) model test on external negative dataset
# ================================================
# - note: it includes ONLY NEGATIVES, so we cannot make ROC curves
# and not all prediction metrics can be calculated
if (runNameMain != "CD_vs_UC") {
inDFs$filename <- NULL
inDFs$Cohort <- "N"
# - merge with our test set positive cases
inDFstst <- rbind.fill(inDFs,inDFtst[inDFtst$Cohort=="Y",])
inDFstst[is.na(inDFstst)] <- 0
inDFstst$Cohort <- as.factor(inDFstst$Cohort)
inDFststneg <- inDFstst[inDFstst$Cohort == "N",]
# - apply pre-processing scheme
inDFststnegpp <- predict(inDFprep,inDFststneg)
# - predict
mdlMetricsTestExt <- getMdlTestResults(mdlFit=mdlFit,mdName = runName,testSet=inDFststnegpp,dataSetName="Test.set.externalneg")
# > save results of testing on external set
write.table(mdlMetricsTestExt,paste0('Model_metrics_',runNameMain,'/',runName,'_resultsTestExt_metrics.csv'),sep=',',row.names = F)
}
print(paste0(' >> DONE WITH ML RUN FOR ',runName))
}
}
# ==============================================================================
# DATA COLLECTION
# ==============================================================================
# - code collects results of individual models and puts it into one table and
# moves main results to separate folder
# ==============================================================================
runs <- c("CD","UC","CD_vs_UC")
# make folder for results if it does not exist
if (!dir.exists('Results_ROCs_main')) {dir.create('Results_ROCs_main')}
# collect data from each run
for (run in runs) {
fldr <- paste0('Model_metrics_',run)
for (dt in c("all","agilent","twist")) {
res <- NULL
toMerge <- list.files(pattern = paste0('.*base_',dt,'_.*_metrics.csv'), fldr)
for (f in toMerge) {
res <- rbind.data.frame(res,read.table(paste0(fldr,'/',f),sep=',',header=T))
}
write.table(res,paste0('Results_merged/Model_results_',run,'_',dt,'.csv'),sep=',',row.names = F)
}
}
file.copy(from = 'Model_metrics_CD/CD_base_all_glmnet_resultsTest_ROC.png',to = 'Results_ROCs_main/',copy.mode = TRUE,overwrite = T)
file.copy(from = 'Model_metrics_CD/CD_base_all_glmnet_resultsXV_ROC.png',to = 'Results_ROCs_main/',copy.mode = TRUE,overwrite = T)
file.copy(from = 'Model_metrics_UC/UC_base_all_glmnet_resultsTest_ROC.png',to = 'Results_ROCs_main/',copy.mode = TRUE,overwrite = T)
file.copy(from = 'Model_metrics_UC/UC_base_all_glmnet_resultsXV_ROC.png',to = 'Results_ROCs_main/',copy.mode = TRUE,overwrite = T)
file.copy(from = 'Model_metrics_CD_vs_UC/CD_vs_UC_base_all_glmnet_resultsTest_ROC.png',to = 'Results_ROCs_main/',copy.mode = TRUE,overwrite = T)
file.copy(from = 'Model_metrics_CD_vs_UC/CD_vs_UC_base_all_glmnet_resultsXV_ROC.png',to = 'Results_ROCs_main/',copy.mode = TRUE,overwrite = T)
# ==============================================================================
# MODEL OPTIMIZATION using recursive feature selection (RFE)
# ==============================================================================
# which models to optimize?
dType <- "all" # dataset (all, twist or agilent)
mlMethod <- "glmnet" # algorithm
for (runNameMain in c("CD","UC","CD_vs_UC")) {
# set colors for plots
if (runNameMain == "CD") {
myCol = "red3"; myCol2 = "red3"; myCol3 = "red1"; myCol4="red4"; pC = "Y"
} else if (runNameMain == "UC") {
myCol = "blue3"; myCol2 = "blue3"; myCol3 = "blue1"; myCol4 = "blue4"; pC = "Y"
} else if (runNameMain == "CD_vs_UC") {
myCol = "purple3"; myCol2 = "purple3"; myCol3 = "purple1"; myCol4 = "purple4"; pC = "CD"
}
# set run names
runNameL <- paste0(runNameMain,'_base_',dType,'_',mlMethod)
runNameS <- paste0(runNameMain,'_opt_',dType,'_',mlMethod)
print(paste0(' >> STARTING OPTIMIZATION RUN FOR ',runNameL))
# LOAD DATASETS (Tr, Test, Ext, pre-processor)
# ===========================================
# training
inDFtrpp <- read.table(paste0('Model_data_',runNameMain,'/',runNameL,'_training.csv'),sep=',',header = T)
inDFtrpp$Cohort <- as.factor(inDFtrpp$Cohort)
# test
inDFtst <- read.table(paste0('Model_data_',runNameMain,'/',runNameL,'_test.csv'),sep=',',header=T)
inDFtst$Cohort <- as.factor(inDFtst$Cohort)
inDFprep <- readRDS(paste0('Model_data_',runNameMain,'/',runNameL,'_preprocessor.RDS'))
inDFtstpp <- predict(inDFprep,inDFtst)
# external
if (runNameMain != "CD_vs_UC") {
inDFs <- read.table('Datasets/Israel_cohort_filtered_prevalences.csv',sep=',',header=T)
#inDFs$Cohort <- as.factor(inDFs$Cohort)
inDFs$filename <- NULL
inDFs$Cohort <- "N"
# > merge with our test set positive cases
inDFstst <- rbind.fill(inDFs,inDFtst[inDFtst$Cohort=="Y",])
inDFstst[is.na(inDFstst)] <- 0
inDFstst$Cohort <- as.factor(inDFstst$Cohort)
inDFststneg <- inDFstst[inDFstst$Cohort == "N",]
# - apply pre-processing scheme
inDFststnegpp <- predict(inDFprep,inDFststneg)
}
# run RFE if not already done
# ==========================================
if (!file.exists(paste0('Model_RFE_',runNameMain,'/',runNameS,'_RFE.RDS'))) {
# define RFE setup
rfeMethod <- mlMethod
# train controller for RFE model fit
trC <- trainControl(method="repeatedcv",
repeats=1,
number=5,
savePredictions = T,
classProbs = T,
allowParallel = T)
# train controller for RFE algorithm
rfeCtrl <- rfeControl(functions = rfFuncs,
method = "repeatedcv",
repeats = 5,
number=50,
verbose = F,
allowParallel = T)
# set steps to test
if (ncol(inDFtrpp) <= 100) {szs=c(seq(1,ncol(inDFtrpp)-1))
} else if (ncol(inDFtrpp) <= 210) {szs=c(seq(1,50,1),seq(50,100,1),seq(105,ncol(inDFtrpp)-1,5))
} else if (ncol(inDFtrpp) > 210) {szs=c(seq(1,50,1),seq(50,100,1),seq(105,200,5), seq(200,ncol(inDFtrpp)-1,10)) }
szs <- unique(szs)
# run RFE
print (' >> doing RFE profile'); time1 <- Sys.time()
rfeProfile <- rfe(x=inDFtrpp[,-grep("Cohort",colnames(inDFtrpp))],
y=inDFtrpp[["Cohort"]],
sizes=szs,
rfeControl = rfeCtrl,
metric="Kappa",
method=rfeMethod,
maximize = T,
trControl = trC)
time2 <- Sys.time(); print (' >>> DONE!'); print(time2 - time1)
# save it
saveRDS(rfeProfile,file=paste0('Model_RFE_',runNameMain,'/',runNameS,'_RFE.RDS'))
}
# (re)load RFE profile
rfeProfile <- readRDS(paste0('Model_RFE_',runNameMain,'/',runNameS,'_RFE.RDS'))
# plot it
nMax <- pickSizeTolerance(rfeProfile$results, metric="Kappa",maximize = T,tol=0)
nT5 <- pickSizeTolerance(rfeProfile$results, metric="Kappa",maximize = T,tol=5)
nT10 <- pickSizeTolerance(rfeProfile$results, metric="Kappa",maximize = T,tol=10)
nT20 <- pickSizeTolerance(rfeProfile$results, metric="Kappa",maximize = T,tol=20)
rfeplot <- ggplot(rfeProfile$results) +
aes(x=Variables,y=Kappa) +
geom_line(col=myCol,size=1.05) + geom_point(col=myCol2) +
geom_errorbar(aes(ymax = Kappa+KappaSD,ymin=Kappa-KappaSD),alpha=0.15,col=myCol2) +
ggtitle(paste0("Recursive feature elimination (",runNameMain,")")) +
theme_classic() + xlab("Number of Variables") +
theme(axis.line = element_line(size = 1.05),axis.ticks = element_line(size = 0.9)) + ggtitle("") +
xlab('Number of antibody-bound peptides') + ylab("Cohen's Kappa")
#print(rfeplot) # debug
# save it
ggsave(plot = rfeplot,filename = paste0('Model_RFE_',runNameMain,'/',runNameS,'_RFEplot.png'),dpi = 600,width = 6,height = 4,scale = 1)
# # REFIT OPTIMIZED MODEL {5 variables}
# # ===========================================
# variables
varT <- rfeProfile$optVariables
# - keep only optimized vars
inDFtrppp5 <- inDFtrpp[,colnames(inDFtrpp) %in% c("Cohort",varT[1:5])]
inDFtrppp10 <- inDFtrpp[,colnames(inDFtrpp) %in% c("Cohort",varT[1:10])]
# # - train it
trainC <- trainControl(method="repeatedcv",number=5,repeats = 10,savePredictions = T,classProbs = T,allowParallel = T,
verboseIter = F,returnData = F,preProcOptions = NULL,trim = T)
set.seed(123899)
mdlFitOpt5 <- train(Cohort ~ ., data=inDFtrppp5, method = mlMethod, metric = "Kappa", trControl = trainC,
tuneLength=20)
mdlFitOpt10 <- train(Cohort ~ ., data=inDFtrppp10, method = mlMethod, metric = "Kappa", trControl = trainC,
tuneLength=20)
# > save variable betas
varImpTable5 <- getVarImpTbl(mdlFitOpt5)
varImpTable10 <- getVarImpTbl(mdlFitOpt10)
write.table(varImpTable5,paste0('Model_RFE_',runNameMain,'/',runNameS,'_model5_betas.csv'),sep=',',row.names = F)
write.table(varImpTable10,paste0('Model_RFE_',runNameMain,'/',runNameS,'_model10_betas.csv'),sep=',',row.names = F)
# > report performance on XV set
mdlMetricsXV5 <- getMdlFitXVresults(mdlFit=mdlFitOpt5,mdName = runNameS,posClass = pC)
mdlMetricsXV5$Model <- paste0(mdlMetricsXV5$Model,'_top5')
mdlMetricsXV10 <- getMdlFitXVresults(mdlFit=mdlFitOpt10,mdName = runNameS,posClass = pC)
mdlMetricsXV10$Model <- paste0(mdlMetricsXV10$Model,'_top10')
# > report performance on test set
mdlMetricsTest5 <- getMdlTestResults(mdlFit=mdlFitOpt5,mdName = runNameS,testSet=inDFtstpp,posClass = pC)
mdlMetricsTest5$Model <- paste0(mdlMetricsTest5$Model,'_top5')
mdlMetricsTest10 <- getMdlTestResults(mdlFit=mdlFitOpt10,mdName = runNameS,testSet=inDFtstpp,posClass = pC)
mdlMetricsTest10$Model <- paste0(mdlMetricsTest10$Model,'_top10')
# > report performance on external test set (if not doing CD vs UC)
if (runNameMain != "CD_vs_UC") {
mdlMetricsTestExt5 <- getMdlTestResults(mdlFit=mdlFitOpt5,mdName = runNameS,testSet=inDFststnegpp,posClass = pC,dataSetName = "Test.set.externalneg")
mdlMetricsTestExt5$Model <- paste0(mdlMetricsTest5$Model,'_top5')
mdlMetricsTestExt10 <- getMdlTestResults(mdlFit=mdlFitOpt10,mdName = runNameS,testSet=inDFststnegpp,posClass = pC,dataSetName = "Test.set.externalneg")
mdlMetricsTestExt10$Model <- paste0(mdlMetricsTest10$Model,'_top10')
}
# - merge tables
mdlMetricsMrg <- rbind.data.frame(mdlMetricsXV5,mdlMetricsXV10,mdlMetricsTest5,mdlMetricsTest10)
if (runNameMain != "CD_vs_UC") {
mdlMetricsMrg <- rbind.data.frame(mdlMetricsMrg,mdlMetricsTestExt5,mdlMetricsTestExt10)
}
row.names(mdlMetricsMrg) <- NULL
# - save
write.table(mdlMetricsMrg,paste0('Model_RFE_',runNameMain,'/',runNameS,'_resultsTest_metrics.csv'),sep=',',row.names = F)
# > ROC (XV)
mdlROCxv <- compareModelsTrainingCV(fittedMdls = list(mdlFitOpt5,mdlFitOpt10),
modelNames = c("M1(5 abp)","M2(10 abp)"),roc.conf.boot = 100,
posClass = pC,annotateAUConly = T,roc.conf = 0.95,
tit = paste0(runNameS, " model"),
annotS = 5, diagonalLine = F, textOffSetY = +0.05,textOffSetX = +0.005)
# - style it
mdlRocTestXVS <- mdlROCxv + scale_color_manual(values=c(myCol3,myCol4)) + theme_classic() +
scale_fill_manual(values = c(myCol3,myCol4) ) + theme(legend.position = "bottom") + ggtitle("") +
geom_abline(intercept = c(1), slope = 1,col="darkblue",linetype="longdash",size=1.05) +
coord_cartesian(xlim=c(1.005,-0.00),ylim=c(0,1.02),expand = F) +
theme(axis.line = element_line(size = 1.05),axis.ticks = element_line(size = 0.9))
#print(mdlRocTestXVS) # debug
ggsave(plot = mdlRocTestXVS,filename = paste0('Model_RFE_',runNameMain,'/',runNameS,'_ROC_XV.png'),dpi = 600,width = 6,height = 6,scale = 1.0)
# > ROC (test sets)
mdlRocTest <- compareMdlsDatasets(mdls = list(mdlFitOpt5,mdlFitOpt10),
dataSets = list(inDFtstpp),
posClass = pC,
mdNames = c("M1(5 abp)","M2(10 abp)"),
response = "Cohort",
removeLegend = T,
roc.conf.boot = 100,roc.conf = 0.95,roc.smooth = F,
tit = paste0(runNameS, " model"),
annotateROC = T,
diagonalLine = F,
annotateROCbaseName = T,
annotS = 5,
textOffSetY = +0.05,textOffSetX = +0.005)[[1]]
mdlRocTestS <- mdlRocTest + scale_color_manual(values=c(myCol3,myCol4)) + theme_classic() +
scale_fill_manual(values = c(myCol3,myCol4) ) + theme(legend.position = "bottom") + ggtitle("") +
geom_abline(intercept = c(1), slope = 1,col="darkblue",linetype="longdash",size=1.05) +
coord_cartesian(xlim=c(1.005,-0.00),ylim=c(0,1.02),expand = F) +
theme(axis.line = element_line(size = 1.05),axis.ticks = element_line(size = 0.9))
print(mdlRocTestS)
ggsave(plot = mdlRocTestS,filename = paste0('Model_RFE_',runNameMain,'/',runNameS,'_ROC_TestSet.png'),dpi = 600,width = 6,height = 6,scale = 1.0)
# DeLong tests
# - compare ROC of optimized models to original model, compare top-5 and top-10 optimizations
resDeLong <- NULL
# prep ROC
mdlFitOrig <- readRDS(file = paste0('Model_data_',runNameMain,'/',runNameMain,'_base_all_glmnet_ModelFit.RDS'))
roc5 <- roc(inDFtstpp$Cohort,predict(mdlFitOpt5,newdata = inDFtstpp,type="prob")[[pC]],auc=T,percent=F)
roc10 <- roc(inDFtstpp$Cohort,predict(mdlFitOpt10,newdata = inDFtstpp,type="prob")[[pC]],auc=T,percent=F)
rocOrig <- roc(inDFtstpp$Cohort,predict(mdlFitOrig,newdata = inDFtstpp,type="prob")[[pC]],auc=T,percent=F)
# compare ROCs
# > orig vs top5
t <- roc.test(rocOrig,roc5,method="delong")
resDeLong <- rbind.data.frame(resDeLong,
data.frame(dataset=runNameMain,
method="glmnet",
ROC1=paste0("no_opt"),
ROC1_AUC=t$roc1$auc,
ROC2=paste0("top5"),
ROC2_AUC=t$roc2$auc,
Zstat=t$statistic,
pvalue=t$p.value))
# > orig vs top10
t <- roc.test(rocOrig,roc10,method="delong")
resDeLong <- rbind.data.frame(resDeLong,
data.frame(dataset=runNameMain,
method="glmnet",
ROC1=paste0("no_opt"),
ROC1_AUC=t$roc1$auc,
ROC2=paste0("top10"),
ROC2_AUC=t$roc2$auc,
Zstat=t$statistic,
pvalue=t$p.value))
# > top5 vs top10
t <- roc.test(roc5,roc10,method="delong")
resDeLong <- rbind.data.frame(resDeLong,
data.frame(dataset=runNameMain,
method="glmnet",
ROC1=paste0("top5"),
ROC1_AUC=t$roc1$auc,
ROC2=paste0("top10"),
ROC2_AUC=t$roc2$auc,
Zstat=t$statistic,
pvalue=t$p.value))
# # > ROC curve with all 3 models (test sets)
mdlRocTest <- compareMdlsDatasets(mdls = list(mdlFitOpt5,mdlFitOpt10,mdlFitOrig),
dataSets = list(inDFtstpp),
posClass = pC,
mdNames = c("M1(5 abp)","M2(10 abp)","M3(all)"),
response = "Cohort",
removeLegend = T,
roc.conf.boot = 100,roc.conf = 0.95,roc.smooth = F,
tit = paste0(runNameS, " model"),
annotateROC = T,
diagonalLine = F,
annotateROCbaseName = T,
annotS = 5,
textOffSetY = +0.05,textOffSetX = +0.005)[[1]]
# - style it
mdlRocTestS <- mdlRocTest + scale_color_manual(values=c(myCol3,myCol4,"orange")) + theme_classic() +
scale_fill_manual(values = c(myCol3,myCol4,"orange") ) + theme(legend.position = "bottom") + ggtitle("") +
geom_abline(intercept = c(1), slope = 1,col="darkblue",linetype="longdash",size=1.05) +
coord_cartesian(xlim=c(1.005,-0.00),ylim=c(0,1.02),expand = F) +
theme(axis.line = element_line(size = 1.05),axis.ticks = element_line(size = 0.9))
#print(mdlRocTestS) # debug
write.table(resDeLong,paste0('Results_merged/','test_DeLong_',runNameMain,'.csv'),sep=',',row.names = F)
}
# merge delong tests and do FDR correction
inDL <- read.table('Results_merged/test_DeLong_CD.csv',sep=',',header=T)
inDL <- rbind.data.frame(inDL,read.table('Results_merged/test_DeLong_UC.csv',sep=',',header=T))
inDL <- rbind.data.frame(inDL,read.table('Results_merged/test_DeLong_CD_vs_UC.csv',sep=',',header=T))
inDL$FDR <- p.adjust(inDL$pvalue)
write.table(inDL,paste0('Results_merged/tests_DeLong_merged_FDRcorr.csv'),sep=',',row.names = F)
|
212105c26a0fc8bac4caf9a93eb738b6981b1b9a
|
72c51dcbda3930b9a865c892161cd3f0b8b44b26
|
/S4 L7 Forecasting With Predict.R
|
4881fdc2006d327d487a2e31fe3f98c616f717fb
|
[] |
no_license
|
richiedlon/Time-Series-Data-Analysis-R
|
eb37a406e7811a6b4c3e1bda54cab03225e13341
|
c7ee42df81bb17b1f521a7456a4e9bf00b80d130
|
refs/heads/main
| 2023-06-02T05:08:40.767888
| 2021-06-18T15:13:41
| 2021-06-18T15:13:41
| 372,722,409
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
r
|
S4 L7 Forecasting With Predict.R
|
library(prophet)
library(dplyr)
library(plotly)
setwd("F:\\Backup\\Youtube\\Videos\\Time series analysis")
StockValue=read.csv("S4 L7 Microsoft stock.csv")
head(StockValue)
tail(StockValue)
Microstock = StockValue[c("Date","Close.Last")]
colnames(Microstock) = c("ds","y") # Rename columns
head(Microstock)
plot(Microstock$y)
df1 = Microstock
df1$y = log(Microstock$y)
plot(df1$y)
m= prophet(Microstock)
forcastcycle = make_future_dataframe(m , periods = 365)
head(forcastcycle)
tail(forcastcycle)
prediction = predict(m, forcastcycle)
tail(prediction$yhat)
plot(m, prediction)
ggplotly(plot(m, prediction))
prophet_plot_components(m, prediction)
|
514eeb297160be457714839a3865c95cca7b1881
|
9931e6439b5beae2e4c9d77c28850fce7d628780
|
/man/pStars.Rd
|
c2deaa457caa65d19ee44e72208fbbbc6df4b696
|
[] |
no_license
|
PNorvaisas/PFun
|
b2703a2b75294e63b2dc506c82d6013981668902
|
c65faa1d0fe17d239af9c9f678eaa6463434109c
|
refs/heads/master
| 2020-12-30T14:33:37.679090
| 2018-10-02T18:35:53
| 2018-10-02T18:35:53
| 91,320,995
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 317
|
rd
|
pStars.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pStars.R
\name{pStars}
\alias{pStars}
\title{Get clean significance stars}
\usage{
pStars(x)
}
\arguments{
\item{x}{Vector witg p values}
}
\description{
Get clean significance stars
}
\examples{
pStars(x)
}
\keyword{Stars}
\keyword{p,}
|
174d1628030262d6ab21f080d3b2e3c30836cfc1
|
ef5a4a769dadaa61823d068b734c4073d59812b6
|
/man/OR.Rd
|
326f3fad2a3771e212db5f63cd624a1c1bcaf355
|
[] |
no_license
|
bbbruce/aepi
|
da795b0b0e5d04879e18cbc2ca5242a90806df84
|
931d23cd3c9e9ba3f4ce02db95931a781030eeae
|
refs/heads/master
| 2016-09-05T22:10:28.170866
| 2015-09-09T17:49:40
| 2015-09-09T17:49:40
| 42,145,289
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 621
|
rd
|
OR.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/OR.R
\name{OR}
\alias{OR}
\alias{OR.formula}
\alias{OR.table}
\title{Calculate OR}
\usage{
OR(x, ...)
\method{OR}{table}(table)
\method{OR}{formula}(formula, data)
}
\arguments{
\item{table}{a 2 x 2 table}
\item{formula}{a formula, see details}
\item{data}{data.frame containing variables formula refers to}
}
\description{
Calculate OR
}
\details{
Formula should be specified as outcome ~ exposure | strata1 + strata2...
}
\examples{
OR(prevhosp ~ methicse | agecat, nilton)
OR(prevhosp ~ methicse | agecat + preantbu, nilton)
}
|
d55b395c45a63cbac1b78252118a0f5fe291fcc2
|
28030b5c66d124dd974b5398bf169c7fc13c74b8
|
/man/SVM_predict.Rd
|
65cbca06ae73d4d00b4c87182ecb8979c40d3b67
|
[] |
no_license
|
cran/DMTL
|
96b1dbfd9bc066e804c16a0477b56ae754ab8ce0
|
17d4b002a96cf8100b141ef53b8d36bec0300536
|
refs/heads/master
| 2023-03-11T07:22:31.883182
| 2021-02-18T09:50:02
| 2021-02-18T09:50:02
| 340,095,052
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,401
|
rd
|
SVM_predict.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predictive_modeling.R
\name{SVM_predict}
\alias{SVM_predict}
\title{Predictive Modeling using Support Vector Machine}
\usage{
SVM_predict(
x_train,
y_train,
x_test,
lims,
kernel = "rbf",
optimize = FALSE,
C = 2,
kpar = list(sigma = 0.1),
eps = 0.01,
seed = NULL,
verbose = FALSE,
parallel = FALSE
)
}
\arguments{
\item{x_train}{Training features for designing the SVM regressor.}
\item{y_train}{Training response for designing the SVM regressor.}
\item{x_test}{Test features for which response values are to be predicted.
If \code{x_test} is not given, the function will return the trained model.}
\item{lims}{Vector providing the range of the response values for modeling.
If missing, these values are estimated from the training response.}
\item{kernel}{Kernel function for SVM implementation. The available options
are \code{linear}, \code{poly}, \code{rbf}, and \code{tanh}. Defaults to \code{rbf}.}
\item{optimize}{Flag for model tuning. If \code{TRUE}, performs a grid search for
parameters. If \code{FALSE}, uses the parameters provided. Defaults to \code{FALSE}.}
\item{C}{Cost of constraints violation. This is the constant "C" of the
regularization term in the Lagrange formulation. Defaults to \code{2}. Valid only
when \code{optimize = FALSE}.}
\item{kpar}{List of kernel parameters. This is a named list that contains
the parameters to be used with the specified kernel. The valid parameters
for the existing kernels are -
\itemize{
\item \code{sigma} for the radial basis (rbf) kernel. Note that this is the
\strong{inverse} kernel width.
\item \code{degree}, \code{scale}, \code{offset} for the polynomial kernel.
\item \code{scale}, \code{offset} for the hyperbolic tangent kernel.
}
Valid only when \code{optimize = FALSE}. Defaults to \code{list(sigma = 0.1)}.}
\item{eps}{The insensitive-loss function used for epsilon-SVR. Defaults to
\code{0.01}.}
\item{seed}{Seed for random number generator (for reproducible outcomes).
Defaults to \code{NULL}.}
\item{verbose}{Flag for printing the tuning progress when \code{optimize = TRUE}.
Defaults to \code{FALSE}.}
\item{parallel}{Flag for allowing parallel processing when performing grid
search \emph{i.e.}, \code{optimimze = TRUE}. Defaults to \code{FALSE}.}
}
\value{
If \code{x_test} is missing, the trained SVM regressor.
If \code{x_test} is provided, the predicted values using the model.
}
\description{
This function trains a Support Vector Machine regressor using the training
data provided and predict response for the test features. This implementation
depends on the \code{kernlab} package.
}
\note{
The response values are filtered to be bound by range in \code{lims}.
}
\examples{
set.seed(86420)
x <- matrix(rnorm(3000, 0.2, 1.2), ncol = 3); colnames(x) <- paste0("x", 1:3)
y <- 0.3*x[, 1] + 0.1*x[, 2] - x[, 3] + rnorm(1000, 0, 0.05)
## Get the model only...
model <- SVM_predict(x_train = x[1:800, ], y_train = y[1:800], kernel = "rbf")
## Get predictive performance...
y_pred <- SVM_predict(x_train = x[1:800, ], y_train = y[1:800], x_test = x[801:1000, ])
y_test <- y[801:1000]
print(performance(y_test, y_pred, measures = "RSQ"))
}
\keyword{support-vector-machine}
\keyword{support-vector-regression}
|
142a4cc945cd35eb8022a14a470fed2c678589aa
|
de882c7604b62c5975274bf0e3027da96a2f7b4d
|
/R/sign.star.R
|
113ede81cd7356624bf6df0162eae3641a431092
|
[] |
no_license
|
vincentgarin/mppR
|
bc061f2d0284adc6468619e162e8ffd45b641fb3
|
2fb0286257697f796c2c1b2590e9284ad281a939
|
refs/heads/master
| 2023-01-28T18:07:19.180068
| 2023-01-02T13:27:41
| 2023-01-02T13:27:41
| 75,842,226
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 499
|
r
|
sign.star.R
|
#############
# sign.star #
#############
# function that convert significance of results in stars
sign.star <- function(x){
if(is.na(x)){
sign <- NA
} else {
if(x>=0.1){
sign <- ""
}else if((0.1>x) & (0.05<=x)){
sign <- "."
}else if((0.05>x) & (0.01<=x)){
sign <- "*"
}else if((0.01>x) & (0.001<=x)){
sign <- "**"
}else if(0.001>x){
sign <- "***"
}
return(sign)
}
}
|
962b5daa85e4e996b4a41f59f3b587aede1bbbb3
|
44cccc95ebf2b0dd822bd0b8c0eb6fb867bc3470
|
/KNN_CleanData_Evaluation.R
|
a3593a979f85b7f3f910fdfdf51fdd5256abf46d
|
[] |
no_license
|
kkoska/softwareprojekt
|
f2f8da3414fe10ae181f49e883f2a2fc8c48ab75
|
2befe3f31fdcd6c6cfa7d2ebffe1ec0b5499a981
|
refs/heads/master
| 2020-06-05T18:46:42.693991
| 2019-06-18T10:03:40
| 2019-06-18T10:03:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,383
|
r
|
KNN_CleanData_Evaluation.R
|
#Librarys---------------------------------------------------------------
library(NLP)
library(tm)
library(sylly)
library(koRpus)
library(koRpus.lang.de)
library(textstem)
library(dplyr)
library(rebus)
library(stringr)
library(qdapTools)
library(qdapDictionaries)
library(qdapRegex)
library(RColorBrewer)
library(qdap)
library(class)
library(SnowballC)
#Initialisation--------------------------------------------------------
tweets <- read.csv("germeval2019training.csv", header = FALSE, sep = ";", encoding = "UTF-8")
tweetsVec <- tweets[,1]
labelVec <- tweets[,2]
tweetsVec <- as.character(tweetsVec)
#Stopword-liste
stopwordlist <- c(stopwords("de"), "lbr", "ja", "dass", "usw", "\"")
#Schimpfwortliste
Schimpfwort_liste <- read.csv("Schimpfwort_list.csv", header = F, sep = ";")
Schimpfwort_liste_Vec <- as.character(Schimpfwort_liste$V1)
Schimpfwort_liste_Vec <- tolower(Schimpfwort_liste_Vec)
# Anzahl von # und @-----------------------------------------------------------------------
Hash_2019 <- str_count(tweets$V1, pattern = fixed("#"))
Ats_2019 <- str_count(tweets$V1, pattern = fixed("@"))
#Preprocessing----------------------------------------------------------------------
#remove chars
tweetsVec <- removePunctuation(tweetsVec)
#remove UTF-8 Sonderzeichen
tweetsVec <- gsub("[^\x01-\x7F]", "", tweetsVec)
#to lowercase
tweetsVec <- tolower(tweetsVec)
#remove stopwords
tweetsVec <- removeWords(tweetsVec, stopwordlist)
#remove spaces
tweetsVec <- stripWhitespace(tweetsVec)
#lemma dictionary mit treetagger erstellen
lemma_dictionary <- make_lemma_dictionary(tweetsVec, engine = "treetagger",path = NULL, lang = "de")
#lemmatisieren
tweetsVec <- lemmatize_strings(tweetsVec, dictionary = lemma_dictionary)
tweetsPreprocessed <- data.frame(tweetsVec, labelVec)
#Daten extrahieren
#werden dann als csv mit "," als Trennzeichen ausgegeben und Nummern in der ersten Spalte
#(also beim importieren dann die erste Spalte am besten entfernen)
#write.csv(tweetsPreprocessed, file = "tweetsPreprocessed.csv", fileEncoding = "UTF-8")
#tweets Preprocessed einlesen und Nummerierungsspalte entfernen
#tweetsPreprocessed <- read.csv("tweetsPreprocessed.csv", header = TRUE, sep = ",", encoding = "UTF-8")
#tweetsPreprocessed <- tweetsPreprocessed[,2-3]
# Abgleich mit Schimpfwortliste------------------------------------------------------------
k <- 1
#Matrix zum Zaehlen, bei anderen Listen Groesse Anpassen!!!
Vor_Matrix <- matrix(nrow = 12387, byrow = F, ncol =11304)
while (k <= 11304) {
V <- c(Schimpfwort_liste_Vec[k:k])
abgleich <- str_count(tweetsPreprocessed$tweetsVec, pattern = fixed(V))
Vor_Matrix[,k] <- abgleich
k <- k + 1
}
#Anzahl Schimpfwoerter Zusammen Zaehlen
Schimpfwort_Zahl <- rowSums(Vor_Matrix)
#Document Term Matrix--------------------------------------------------------------------------
tweetsCorpus <- VectorSource(tweetsPreprocessed$tweetsVec)
tweetsSource <- VCorpus(tweetsCorpus)
#Remove Spars Terms
TweetDTM <- removeSparseTerms(DocumentTermMatrix(tweetsSource), sparse = 0.975)
TweetDTM <- as.matrix(TweetDTM)
#Featur Data Frame
tweets_Features <- data.frame(tweetsPreprocessed, Ats_2019, Hash_2019, Schimpfwort_Zahl, TweetDTM)
#knn initialisieren-----------------------------------------------------------------------------------
n <- nrow(tweets_Features)
#Teilen in Test und Training 70 - 30
shuffled <- tweets_Features[sample(n),]
train <- shuffled[1:round(0.7 * n),]
test <- shuffled[(round(0.7 * n) + 1):n,]
#Labls ziehen
train_labels <- train$labelVec
test_labels <- test$labelVec
#Arbeits Datensaetze ohne Labels
knn_train <- train
knn_test <- test
knn_train$labelVec <- NULL
knn_test$labelVec <- NULL
#Text (V1) als Nummer
knn_test$tweetsVec <- as.numeric(knn_test$tweetsVec)
knn_train$tweetsVec <- as.numeric(knn_train$tweetsVec)
#Normalisiern--------------------------------------------------------------------------------
#knn_train$Ats_2019 <- (knn_train$Ats_2019-min(tweet_Featur$Ats_2019))/(max(tweet_Featur$Ats_2019)-min(tweet_Featur$Ats_2019))
#knn_test$Ats_2019 <- (knn_test$Ats_2019-min(tweet_Featur$Ats_2019))/(max(tweet_Featur$Ats_2019)-min(tweet_Featur$Ats_2019))
#knn_train$Hash_2019 <- (knn_train$Hash_2019-min(tweet_Featur$Hash_2019))/(max(tweet_Featur$Hash_2019)-min(tweet_Featur$Hash_2019))
#knn_test$Hash_2019 <- (knn_test$Hash_2019-min(tweet_Featur$Hash_2019))/(max(tweet_Featur$Hash_2019)-min(tweet_Featur$Hash_2019))
knn_train$Schimpfwort_Zahl <- (knn_train$Schimpfwort_Zahl-min(tweets_Features$Schimpfwort_Zahl))/(max(tweets_Features$Schimpfwort_Zahl)-min(tweets_Features$Schimpfwort_Zahl))
knn_test$Schimpfwort_Zahl <- (knn_test$Schimpfwort_Zahl-min(tweets_Features$Schimpfwort_Zahl))/(max(tweets_Features$Schimpfwort_Zahl)-min(tweets_Features$Schimpfwort_Zahl))
#Trainiren, Vorhersage mit Konfussionsmatrix -> BIS JETZT K=7 BEST--------------------------------
pred <- knn(train = knn_train, test = knn_test, cl = train_labels, k = 7)
conf <- table(test = test_labels, pred)
conf
acc <- (conf[1,1]+conf[2,2]+conf[3,3]+conf[4,4])/sum(conf)
acc
#Four-Class-Evaluation
four_class_evaluation <- function(matrix){
tp_abuse <- matrix[1,1]
tp_insult <- matrix[2,2]
tp_other <- matrix[3,3]
tp_profanity <- matrix[4,4]
#row
tpfn_abuse <- sum(matrix[1,])
tpfn_insult <- sum(matrix[2,])
tpfn_other <- sum(matrix[3,])
tpfn_profanity <- sum(matrix[4,])
#col
tpfp_abuse <- sum(matrix[,1])
tpfp_insult <- sum(matrix[,2])
tpfp_other <- sum(matrix[,3])
tpfp_profanity <- sum(matrix[,4])
#precision = tp/(tp+fp)
#recall = tp/(tp+fn)
abuse_precision <- tp_abuse/tpfp_abuse
abuse_recall <- tp_abuse/tpfn_abuse
insult_precision <- tp_insult/tpfp_insult
insult_recall <- tp_insult/tpfn_insult
other_precision <- tp_other/tpfp_other
other_recall <- tp_other/tpfn_other
profanity_precision <- tp_profanity/tpfp_profanity
profanity_recall <- tp_profanity/tpfn_profanity
average_precision <- mean(abuse_precision, insult_precision, other_precision, profanity_precision)
average_recall <- mean(abuse_recall, insult_recall, other_recall, profanity_recall)
f1_score <- 2*(average_precision * average_recall)/(average_precision + average_recall)
#Output:
print(matrix)
print("===================================")
print(paste(paste("ABUSE[1]: Precision", abuse_precision, sep = ": ", collapse = NULL), paste("Recall", abuse_recall, sep = ": ", collapse = NULL), sep = " ", collapse = NULL))
print(paste(paste("INSULT[2]: Precision", insult_precision, sep = ": ", collapse = NULL), paste("Recall", insult_recall, sep = ": ", collapse = NULL), sep = " ", collapse = NULL))
print(paste(paste("OTHER[3]: Precision", other_precision, sep = ": ", collapse = NULL), paste("Recall", other_recall, sep = ": ", collapse = NULL), sep = " ", collapse = NULL))
print(paste(paste("PROFANITY[4]: Precision", profanity_precision, sep = ": ", collapse = NULL), paste("Recall", profanity_recall, sep = ": ", collapse = NULL), sep = " ", collapse = NULL))
print(paste("Average-Precision:", average_precision))
print(paste("Average-Recall:", average_recall))
print(paste("F1-Score:", f1_score))
}
four_class_evaluation(conf)
|
d7aae2bc36498b14ed6660cc203bf64295fbfc98
|
e541a1a88fbcaa4ef67cb0864de2931f44693fba
|
/lect15_2.R
|
fff4da892c5acd0b07240d43099473631b264be9
|
[] |
no_license
|
rinkanrohitjena/rstudiocodes
|
8e0346e59537259d1c7269938723c0419f3d8209
|
871e9bcb1a78a777dca4438550438e0aa49b9ca1
|
refs/heads/master
| 2020-07-06T01:07:28.492082
| 2019-08-17T05:48:34
| 2019-08-17T05:48:34
| 202,841,007
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
r
|
lect15_2.R
|
library(MASS)
data(Pima.tr)
head(Pima.tr)
library(RColorBrewer)
pima = Pima.tr
lm3 <- lm(glu ~ bmi+npreg+bp+skin+ped+age+type, data=pima)
plot(lm3$fitted.values, pima$glu,pch=20,col=brewer.pal(2,"Set1"))
abline(lm3,col="red")
predict(lm3) #Compare to predicted value
lm.finalmodel= step(lm(glu~.,data = pima))
summary(lm.finalmodel)
|
6039ad6c7eafb412fad26794042ed94b9d6c82a9
|
4e6f18e6feb01502f2cd15772b6c0cc2f7c0629d
|
/tests/testthat/test_treatment.R
|
a1a7751052cd9ce4ad53facfccc61421f9024b49
|
[] |
no_license
|
cancerpolicy/bcimodel
|
8f76e9a64c79773c4e7058f1a71eb619a6496d8e
|
219df7c778ce8388b661a83de45845b2b0f57815
|
refs/heads/master
| 2021-01-13T02:55:45.874682
| 2019-06-27T19:50:48
| 2019-06-27T19:50:48
| 77,089,332
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,338
|
r
|
test_treatment.R
|
################################################################################
# Tests for treatment.R
context('Assigning treatment')
################################################################################
test_that('shifttreatment_indicator works',
{
# First has no shift, second has 15% shift (0.85 HR)
s <- list(matrix(0, nrow=1000, ncol=2),
stageshift_indicator(0.85, 1000, 2))
# Base case has equally distributed groups 1 to 4
b <- matrix(sample.int(4, size=2000, replace=TRUE),
nrow=1000, ncol=2)
# Map of 1:4 onto stage and ER status
m <- matrix(1:4, nrow=2, dimnames=list(c('Early', 'Advanced'),
c('ER+', 'ER-')))
# If type[x]=NA, should return all NA's
expect_equal(sum(is.na(shifttreatment_indicator(x=1, type=c(NA, 2),
s, b, m))), 2000)
ind <- shifttreatment_indicator(x=2, type=c(NA, 2), s, b, m)
expect_true(abs(round(mean(ind)-(0.5*0.15),2))<=0.02)
}
)
test_that('sim_treatment_by_subgroup works',
{
library(bcimodel)
set.seed(98103)
data(ex1)
# Small example
popsize <- 1000
sims <- 100
# Base case has equally distributed groups 1 to 4
b <- matrix(sample.int(4, size=popsize*sims, replace=TRUE),
nrow=popsize, ncol=sims)
# Map of 1:4 onto stage and ER status
m <- ex1[[3]]
# Shifts
s <- lapply(ex1[[1]]$earlydetHR,
stageshift_indicator, pop_size=popsize, nsim=sims)
# Get new stages (advanced cases only)
n <- lapply(s, shift_stages, original=b, map=m)
# Create indicator for shifting treatment (advanced cases only)
st <- lapply(ex1[[1]]$num, shifttreatment_indicator,
type=ex1[[1]]$pairnum, shifts=s, basecase=b, map=m)
# Simulate treatment (for early detection scenarios, candidate
# early-stage treatments for shifted cases)
t <- sim_treatment_by_subgroup(ex1[[4]], n[[1]], 'base', popsize, nsim)
}
)
test_that('treatments_by_policy and update_treat_stageshift work',
{
library(bcimodel)
set.seed(98103)
data(ex1)
# Small example
popsize <- 10
sims <- 5
# Base case has equally distributed groups 1 to 4
b <- matrix(sample.int(4, size=popsize*sims, replace=TRUE),
nrow=popsize, ncol=sims)
# Map of 1:4 onto stage and ER status
m <- ex1$map
# Shifts
s <- lapply(ex1$pol$earlydetHR,
stageshift_indicator, pop_size=popsize, nsim=sims)
# Get new stages (advanced cases only)
n <- lapply(s, shift_stages, original=b, map=m)
# Create indicator for shifting treatment (advanced cases only)
st <- lapply(ex1$pol$num, shifttreatment_indicator,
type=ex1$pol$pairnum, shifts=s, basecase=b, map=m)
# Simulate treatment (for early detection scenarios, candidate
# early-stage treatments for shifted cases)
t <- treatments_by_policy(policies=ex1[[1]], treat_chars=ex1[[4]],
stagegroups=n, map=m, popsize, sims)
####### TEST ONE - TO DO
# Scenarios with early detection should only have early-stage
# treatments
####### TEST TWO - TO DO
# Scenarios with early detection should only have early-stage
####### TEST THREE
# Shift treatment indicator is TRUE only for cases that were
# advanced-stage in the base case
expect_equal(sum(!b[st[[3]]]%in%m['Advanced',]),0)
####### TEST FOUR
# In paired non-earlydet scenario, shift treatment indicator
# is TRUE only for advanced-stage treatments
expect_equal(sum(!t[[2]][st[[3]]]%in%
subset(ex1[[4]], SSno%in%m['Advanced',])$txSSno), 0)
####### TEST FIVE
# New stages for the shifted cases are early stages
expect_equal(
sum(!n[[3]][st[[3]]]%in%m['Early',]), 0
)
####### TEST SIX (similar to TEST FOUR)
# In paired non-earlydet scenario, shift treatment indicator
# is TRUE only for advanced-stage treatments
expect_equal(
sum(!t[[3]][st[[3]]]%in%
subset(ex1[[4]], SSno%in%m['Early',])$txSSno), 0
)
####### TEST SEVEN (similar to TEST FOUR)
# Scenario 3 is where txSSno 1 and 4 have prop=0, so we
# should only see 2 and 3
expect_equal(
sum(!t[[3]][st[[3]]]%in%c(2,3)), 0
)
# Finalize treatments by pairing the non-stage shifted scenario
# with the stage-shifted cases for the early detection scenario
tfinal <- update_treat_stageshift(ex1$pol, shifts=s, treats=t)
####### TEST EIGHT
# Treatments are the same between scenario 2 and 3 for
# non-shifted cases
expect_equal(tfinal[[2]][!s[[3]]], tfinal[[3]][!s[[3]]])
####### TEST NINE
# Treatments are only early-stage for shifted cases in final
expect_equal(
sum(!tfinal[[3]][s[[3]]]%in%c(2,3)), 0
)
}
)
test_that('sim_treatment_by_subgroup works if only 1 treatment', {
# Set up 1-treatment scenario
library(bcimodel)
data(ex1)
ex1$tx <- subset(ex1$tx, txSSid=='None')
ex1$tx <- transform(ex1$tx, txSSno=1:4, base=1, tam=1, tamandshift=1)
# Small example
popsize <- 10
sims <- 5
# Base case has equally distributed groups 1 to 4
b <- matrix(sample.int(4, size=popsize*sims, replace=TRUE),
nrow=popsize, ncol=sims)
# Map of 1:4 onto stage and ER status
m <- ex1[[3]]
# Shifts
s <- lapply(ex1[[1]]$earlydetHR,
stageshift_indicator, pop_size=popsize, nsim=sims)
# Get new stages (advanced cases only)
n <- lapply(s, shift_stages, original=b, map=m)
# Create indicator for shifting treatment (advanced cases only)
st <- lapply(ex1[[1]]$num, shifttreatment_indicator,
type=ex1[[1]]$pairnum, shifts=s, basecase=b, map=m)
# Simulate treatment (for early detection scenarios, candidate
# early-stage treatments for shifted cases)
t <- treatments_by_policy(policies=ex1[[1]], treat_chars=ex1[[4]],
stagegroups=n, map=m, popsize, sims)
# MANUALLY RERAN TESTS FROM ABOVE...
}
|
706a13b04f9ed4c3bf09438cc961d61fb522d455
|
7522f7268a18e834d0ea3fe203ccf6205c875b8e
|
/server.R
|
015b5d29c38abc15cb9623267b8af2dedbbdc765
|
[] |
no_license
|
erowe/MoCo
|
13114ad69b7a023096c8a0d3b4f1d9a59e4b2c94
|
f0bcbdcf0e3af40165f07bc610679e60512db91f
|
refs/heads/master
| 2020-04-14T18:52:54.448639
| 2014-12-18T03:53:57
| 2014-12-18T03:53:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,229
|
r
|
server.R
|
# This application prints the traffic stops from Montgomery County Maryland USA
# and can optionally highlight the stops that were alcohol related.
# The data is sourced from the Montgomery County Open Data Website 12/5/2014
# https://data.montgomerycountymd.gov/Public-Safety/Traffic-Violations/4mse-ku6q
#
# This is the server logic for the application
library(shiny)
library(shinyIncubator)
library(MASS)
library(RgoogleMaps)
library(RColorBrewer)
library(stringr)
library(devtools)
library(lubridate)
#library(shinyapps)
shinyServer(function(input, output, session) {
output$time <- renderText({
# Take slider input and set variables
startTime <<- input$timeRange[1]
endTime <<- input$timeRange[2]
# Do not print to screen
a <- NULL
})
output$drawMe <- renderText({
if(input$drawMe > 0) {
progress <- Progress$new(session)
Sys.sleep(1)
progress$set(message = 'Drawing Map...')
Sys.sleep(1)
if(is.null(nrow(df))) {
progress$set(detail = 'Please Wait. Loading Data...')
df <- read.csv(file = 'Traffic_Violations_Truncated.csv',
colClasses = c("character", "character", "numeric", "numeric", "factor"))
}
# Remove NA
progress$set(detail = 'Crunching Numbers...')
dataset <<- na.omit(df)
# Grab the coordinates and add to a new data frame
rawdata <- data.frame(as.numeric(dataset$Longitude), as.numeric(dataset$Latitude))
names(rawdata) <- c("lon", "lat")
# Create a matrix from the long/lat
data <- as.matrix(rawdata)
# Find the center of the map using the mean of the coordinates
# Grab maps using RgoogleMaps
center <- rev(sapply(rawdata, mean))
map <- GetMap(center=center, zoom=11)
# Translate original data
coords <- LatLon2XY.centered(map, rawdata$lat, rawdata$lon, 11)
modCoords <- data.frame(coords)
# Numeric Stop Time
numericStopTime <- hour(hms(dataset$Time.Of.Stop))
# Bind the coordinate data with the application modifiers
# Alcohol Stops
# Times
modCoords <- cbind (modCoords, dataset$Alcohol, numericStopTime)
names(modCoords) <- c("lat", "lon", "Alcohol", "StopTime")
# Adjust for time
modCoords <- modCoords[modCoords$StopTime >= startTime,]
modCoords <- modCoords[modCoords$StopTime <= endTime,]
alcoholCoords <- modCoords[modCoords$Alcohol == 'Yes',]
# Lay down the background google map
output$mainPlot <- renderPlot({
PlotOnStaticMap(map)
# Plot all points
points(modCoords$lat, modCoords$lon, pch=16, cex=.5, col="black")
if (input$alcoholInclude) {
# Turn on Alcohol Differentiater
points(alcoholCoords$lat, alcoholCoords$lon, pch=16, cex=.65, col="red")
} else {
# Turn off Alcohol Differentiater
points(alcoholCoords$lat, alcoholCoords$lon, pch=16, cex=.65, col="black")
}
}, height = 600, width = 800 )
progress$set(detail = '')
progress$close()
} else {
return()
}
})
}
)
|
bdfcfb8fa385465e15476129ccefc36c49830fd7
|
8cd2d5cd7bdcf74a72c04e98946189d2146a8ac2
|
/NERP/02.script2run.r
|
58ef0977c6e125b9cef6abec08f18860c38c0f0c
|
[] |
no_license
|
LaurenHodgson/Projects
|
fe7383c9d054fccb964ee54fc8a495b3de0812db
|
8935bc6f81bd173ed679552282cc7b32568c5fea
|
refs/heads/master
| 2016-09-06T02:38:54.899728
| 2012-12-13T05:00:13
| 2012-12-13T05:00:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,506
|
r
|
02.script2run.r
|
#drafted by Jeremy VanDerWal ( jjvanderwal@gmail.com ... www.jjvanderwal.com )
#GNU General Public License .. feel free to use / distribute ... no warranties
################################################################################
##get the command line arguements
args=(commandArgs(TRUE))
#evaluate the arguments
for(i in 1:length(args)) {
eval(parse(text=args[[i]]))
}
#need to have read in
#
# spp='MTHORN'
################################################################################
library(SDMTools)
#function to plot min mean and max maps for a single emmission scenario & year
plot.minmeanmax = function(tmin,tmean,tmax,tfile,tyear,tes) { #tex is the emission scenario, tyear is the year of interest
png(tfile,width=17,height=12.75,units='cm',res=150,pointsize=6) #start the png file
par(oma=c(3,3,0.5,0.5),mar=c(0,0,0,0),cex=1,cex.axis=1.5) #se the parameters of hte plot
mat = matrix(c(1,1,1,1,2,2,2,2,3,3,4,4,4,4,
1,1,1,1,2,2,2,2,3,3,3,3,3,3,
1,1,1,1,1,2,2,2,2,3,3,3,3,3,
1,1,1,1,1,1,2,2,2,2,3,3,3,3,
1,1,1,1,1,1,2,2,2,2,3,3,3,3),nr=5,byrow=TRUE)
layout(mat) #define and setup the layout of the plot... 3 maps and one time bar
#plot the maps
image(tmin,axes=FALSE,ann=FALSE,zlim=c(0,1),col=cols) #image the first map and add associated text & lat/lon info & legend
text(145.35,-15.5,labels ='Minimum',cex=3,adj=0)
axis(1,at=seq(145,147,0.5),labels=c(145,NA,146,NA,147),lwd=0,lwd.ticks=1); axis(2,at=seq(-15.5,-19.5,-0.5),labels=c(NA,-16,NA,-17,NA,-18,NA,-19,NA),lwd=0,lwd.ticks=1)
legend.gradient(legend.pnts,col=cols, c(0,1), title='suitability',cex=2)
image(tmean,axes=FALSE,ann=FALSE,zlim=c(0,1),col=cols) #image the second map and add associated text
text(145.35,-15.5,labels ='Mean',cex=3,adj=0)
image(tmax,axes=FALSE,ann=FALSE,zlim=c(0,1),col=cols) #image the third map and add associated text
text(145.35,-15.5,labels ='Maximum',cex=3,adj=0)
#add the time bar
par(mar=c(3,2,0,2)) #change the plot parameters for the time plot
plot(1,1,xlim=c(0,10),ylim=c(0,1),type='n',axes=FALSE,ann=FALSE) #create an empty plot
tval=10-((2080-tyear)/10) #work out what year should be as a bar
polygon(c(0,0,tval,tval),c(0,0.25,0.25,0),col='black') #add the polygon to represent the year
text(4.5,0.5,labels=tes,cex=2,font=2) #add text describing the emmision scenario
axis(1,at=seq(1,10,3),labels=c(1990,2020,2050,2080),cex.axis=1.75) #add axes labels
axis(1,at=seq(0,10,1),labels=F)
dev.off()
}
#function to plot means of the different emmission scenarios & year
plot.es.means = function(a1b,a2,b1,tfile,tyear) { #tex is the emission scenario, tyear is the year of interest
png(tfile,width=17,height=12.75,units='cm',res=150,pointsize=6) #start the png file
par(oma=c(3,3,0.5,0.5),mar=c(0,0,0,0),cex=1,cex.axis=1.5) #se the parameters of hte plot
mat = matrix(c(1,1,1,1,2,2,2,2,3,3,4,4,4,4,
1,1,1,1,2,2,2,2,3,3,3,3,3,3,
1,1,1,1,1,2,2,2,2,3,3,3,3,3,
1,1,1,1,1,1,2,2,2,2,3,3,3,3,
1,1,1,1,1,1,2,2,2,2,3,3,3,3),nr=5,byrow=TRUE)
layout(mat) #define and setup the layout of the plot... 3 maps and one time bar
#plot the maps
image(b1,axes=FALSE,ann=FALSE,zlim=c(0,1),col=cols) #image the first map and add associated text & lat/lon info & legend
text(145.35,-15.5,labels ='SRES B1',cex=3,adj=0)
axis(1,at=seq(145,147,0.5),labels=c(145,NA,146,NA,147),lwd=0,lwd.ticks=1); axis(2,at=seq(-15.5,-19.5,-0.5),labels=c(NA,-16,NA,-17,NA,-18,NA,-19,NA),lwd=0,lwd.ticks=1)
legend.gradient(legend.pnts,col=cols, c(0,1), title='suitability',cex=2)
image(a1b,axes=FALSE,ann=FALSE,zlim=c(0,1),col=cols) #image the second map and add associated text
text(145.35,-15.5,labels ='SRES A1B',cex=3,adj=0)
image(a2,axes=FALSE,ann=FALSE,zlim=c(0,1),col=cols) #image the third map and add associated text
text(145.35,-15.5,labels ='SRES A2',cex=3,adj=0)
#add the time bar
par(mar=c(3,2,0,2)) #change the plot parameters for the time plot
plot(1,1,xlim=c(0,10),ylim=c(0,1),type='n',axes=FALSE,ann=FALSE) #create an empty plot
tval=10-((2080-tyear)/10) #work out what year should be as a bar
polygon(c(0,0,tval,tval),c(0,0.25,0.25,0),col='black') #add the polygon to represent the year
axis(1,at=seq(1,10,3),labels=c(1990,2020,2050,2080),cex.axis=1.75) #add axes labels
axis(1,at=seq(0,10,1),labels=F)
dev.off()
}
################################################################################
#define & set the working directory
work.dir = paste('/data/jc165798/AWT.future.sdm/models/',spp,'/',sep=''); setwd(work.dir)
out.dir=paste(work.dir,'summary/images/',sep=''); dir.create(out.dir)
#get the threshold
threshold = read.csv("output/maxentResults.csv",as.is=TRUE)$Balance.training.omission..predicted.area.and.threshold.value.logistic.threshold[1]
#ssetup some plot parameters
bins = seq(0,1,length=101); bins = cut(threshold,bins,labels=FALSE) # get the threshold bin for cols
cols = c(rep('#E5E5E5',bins),colorRampPalette(c("tan","forestgreen"))(100)[bins:100])
legend.pnts = cbind(c(144.9,145.1,145.1,144.9),c(-19.5,-19.5,-18.75,-18.75))
## first plot the current
# for (type in c('potential','realized')) {
# tasc = read.asc.gz(paste('summary/1990.',type,'.asc.gz',sep=''))
## create the min mean max plots
# plot.minmeanmax(tasc,tasc,tasc,paste(out.dir,type,'.sresa1b.1990.png',sep=''),1990,'SRES A1B')
# plot.minmeanmax(tasc,tasc,tasc,paste(out.dir,type,'.sresa2.1990.png',sep=''),1990,'SRES A2')
# plot.minmeanmax(tasc,tasc,tasc,paste(out.dir,type,'.sresb1.1990.png',sep=''),1990,'SRES B1')
## plot the ES means
# plot.es.means(tasc,tasc,tasc,paste(out.dir,type,'.ES.means.1990.png',sep=''),1990)
# }
# rm(tasc) ###free up memory
## cycle through the years
# for (YEAR in seq(2000,2080,10)) {
# for (type in c('potential','realized')) {
## read in the data
# a1b.min = read.asc.gz(paste('summary/sresa1b.',YEAR,'.min.',type,'.asc.gz',sep=''))
# a1b.mean = read.asc.gz(paste('summary/sresa1b.',YEAR,'.mean.',type,'.asc.gz',sep=''))
# a1b.max = read.asc.gz(paste('summary/sresa1b.',YEAR,'.max.',type,'.asc.gz',sep=''))
# a2.min = read.asc.gz(paste('summary/sresa2.',YEAR,'.min.',type,'.asc.gz',sep=''))
# a2.mean = read.asc.gz(paste('summary/sresa2.',YEAR,'.mean.',type,'.asc.gz',sep=''))
# a2.max = read.asc.gz(paste('summary/sresa2.',YEAR,'.max.',type,'.asc.gz',sep=''))
# b1.min = read.asc.gz(paste('summary/sresb1.',YEAR,'.min.',type,'.asc.gz',sep=''))
# b1.mean = read.asc.gz(paste('summary/sresb1.',YEAR,'.mean.',type,'.asc.gz',sep=''))
# b1.max = read.asc.gz(paste('summary/sresb1.',YEAR,'.max.',type,'.asc.gz',sep=''))
## create the min mean max plots
# plot.minmeanmax(a1b.min,a1b.mean,a1b.max,paste(out.dir,type,'.sresa1b.',YEAR,'.png',sep=''),YEAR,'SRES A1B')
# plot.minmeanmax(a2.min,a2.mean,a2.max,paste(out.dir,type,'.sresa2.',YEAR,'.png',sep=''),YEAR,'SRES A2')
# plot.minmeanmax(b1.min,b1.mean,b1.max,paste(out.dir,type,'.sresb1.',YEAR,'.png',sep=''),YEAR,'SRES B1')
## plot the ES means
# plot.es.means(a1b.mean,a2.mean,b1.mean,paste(out.dir,type,'.ES.means.',YEAR,'.png',sep=''),YEAR)
# }
# }
#now create summary images based on abundance, area, Istat, num patches, mean perimeter area ratio, aggregation index for each ES
#define plot generics
cols = c('#FF0000','#228B22','#000080') #define the line colors -- colors align with IPCC fig 5 ...
cols.fill = paste(cols,'50',sep='') #define the polygon fill colors
vois = c('prop.area','prop.abund','Istat','n.patches','mean.perim.area.ratio','aggregation.index') #define the variables of interest
############lauren addition
setwd(paste(base.dir,spp,sep=''))
futs = list.files('output/',pattern='\\.asc\\.gz',recursive=TRUE,full.names=TRUE); futs=gsub('//','/',futs)
varnames = gsub('output/','',futs); varnames = gsub('\\.asc\\.gz','',varnames)
#extract ES, GCM, year information
ESs = GCMs = YEARs = current = NULL
for (ii in 1:length(varnames)) {
tt = strsplit(varnames[ii],'\\_')[[1]]
if (length(tt)==1) { current = tt[1] } else { ESs = c(ESs,tt[1]); GCMs = c(GCMs,tt[2]); YEARs = c(YEARs,tt[3]) }
}
ESs = unique(ESs); GCMs = unique(GCMs); YEARs = unique(YEARs)
#################replaces:
#ESs = c('sresa2','sresa1b','sresb1') #define the emission scenarios
tdata = NULL #setup the temporary dataset
for (ES in ESs) { tdata = rbind(tdata, read.csv(paste('summary/',ES,'.summary.data.csv',sep=''),as.is=TRUE)) } #read in and append the data
tdata.realized = tdata[which(tdata$dist.type=='realized'),] #keep realized data for summarizing Istat
tdata = tdata[which(tdata$dist.type=='realized.NO.small.patches'),] #only need to summarize realized data ... only for realized with small patches removed
if (length(which(tdata.realized[,2:6] != tdata[,2:6]))<1) { tdata$Istat=tdata.realized$Istat } #append the Istat informaiton
diffs = 1-(tdata[1,7:46] / tdata.realized[1,7:46]) #get the difference for removing small patches
tdata$prop.area = tdata$total.area/tdata$total.area[1] #define the proportion change in area
es.gcm.yr = aggregate(tdata[,vois],by=list(ES=tdata$ES,GCM=tdata$GCM,year=tdata$year),function(x) { return(mean(x,na.rm=TRUE)) } ) #get the means for es, gcm & year (to avoid bias for inidividual realizations)
es.yr.mean = aggregate(es.gcm.yr[,vois],by=list(ES=es.gcm.yr$ES,year=es.gcm.yr$year),function(x) { return(mean(x,na.rm=TRUE)) } ) #get the means for ES & year
es.yr.sd = aggregate(es.gcm.yr[,vois],by=list(ES=es.gcm.yr$ES,year=es.gcm.yr$year),function(x) { return(sd(x,na.rm=TRUE)) } ) #get the SD for the ES & year
es.yr.min = es.yr.max = es.yr.mean; #get the min & max data associated with +- 1 SD
es.yr.min[,vois] = es.yr.min[,vois] - es.yr.sd[,vois] #get the min & max data associated with +- 1 SD
es.yr.max[,vois] = es.yr.max[,vois] + es.yr.sd[,vois] #get the min & max data associated with +- 1 SD
es.yr.mean = rbind(tdata[which(tdata$year==1990)[1],names(es.yr.mean)],es.yr.mean) #append the 1990 current data
#define a plot function for summary information
tplot = function(y.limits,tmain,tylab,voi,tsub=NULL) {
plot(c(1990,2080),y.limits,ylab=tylab,xlab='year',type='n',axes=F,main=tmain,sub=tsub) #create base plot
axis(2); axis(1,at=seq(1990,2080,10),labels=c(1990,NA,NA,2020,NA,NA,2050,NA,NA,2080)) #add the axes
for (ii in length(ESs):1) { #add the polygons for each ES
tpos = which(es.yr.min$ES==ESs[ii])
polygon(c(1990,es.yr.min$year[tpos],es.yr.min$year[tpos[length(tpos):1]],1990),c(es.yr.mean[1,voi],es.yr.min[tpos,voi],es.yr.max[tpos[length(tpos):1],voi],es.yr.mean[1,voi]),col=cols.fill[ii],border=NA)
}
for (ii in length(ESs):1) { #add the mean lines for each ES
tpos = which(es.yr.mean$ES==ESs[ii])
lines(es.yr.mean$year[c(1,tpos)],es.yr.mean[c(1,tpos),voi],col=cols[ii],lwd=1)
}
}
#create the summary plots
png(paste(out.dir,"basic.summaries.png",sep=''), width=12, height=8, units="cm", pointsize=4, res=300)
par(mfrow=c(2,3),cex=1,cex.main=1.3)
tplot(c(0,max(c(es.yr.max$prop.area,es.yr.max$prop.abund,1),na.rm=T)),'Distribution Area','Proportion','prop.area',paste(round(diffs$total.area*100,1),'% area removed in small patches',se='')) #plot the change in distribution area
legend('bottomleft',legend=toupper(gsub('sres','sres ',ESs)),col=cols,lty=1,lwd=2,bty='n',cex=1.1)
tplot(c(0,max(c(es.yr.max$prop.abund,es.yr.max$prop.area,1),na.rm=T)),'Abundance','Proportion','prop.abund',paste(round(diffs$sum.suitability*100,1),'% abundance removed in small patches',se='')) #plot the change in abundance
tplot(c(0,1),'I similarity statistic','I value','Istat') #plot the change in I statistic
tplot(range(c(es.yr.max$n.patches,es.yr.min$n.patches),na.rm=T),'Number of Patches','Count','n.patches',paste(round(diffs$n.patches*100,1),'% small patches removed',se='')) #plot the change in number of patches
tplot(range(c(es.yr.max$mean.perim.area.ratio,es.yr.min$mean.perim.area.ratio),na.rm=T),'Perimeter Area Ratio','Mean ratio','mean.perim.area.ratio') #plot the change in perimeter area ratio
tplot(range(c(es.yr.max$aggregation.index,es.yr.min$aggregation.index),na.rm=T),'Aggregation Index','Per cent','aggregation.index') #plot the change in aggregation index
dev.off()
#create a dataframe and write it out summarizing this species status through time based on abundance
out=data.frame(spp=spp,ES=es.yr.mean$ES,year=es.yr.mean$year,prop.abund.mean=es.yr.mean$prop.abund,prop.abund.minus1SD=c(1,es.yr.min$prop.abund),prop.abund.plus1SD=c(1,es.yr.max$prop.abund))
write.csv(out,paste(work.dir,'summary/IUCN.summary.data.csv',sep=''),row.names=FALSE)
|
49cae7835e2475a8ae5deb2af10815fbf2340eef
|
50066dae4216d17bd6f0dcb9a11d872e73246eb6
|
/man/print.provenance.Rd
|
0f7a4c8881ace603f46f15c582efbe26dbfb863d
|
[] |
no_license
|
cran/PKNCA
|
11de9db2cb98279c79d06022415b8772e7c1f5ea
|
8f580da3e3c594e4e1be747cb2d8e35216784ed2
|
refs/heads/master
| 2023-05-10T16:54:19.131987
| 2023-04-29T18:30:02
| 2023-04-29T18:30:02
| 48,085,829
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 432
|
rd
|
print.provenance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/provenance.R
\name{print.provenance}
\alias{print.provenance}
\title{Print the summary of a provenance object}
\usage{
\method{print}{provenance}(x, ...)
}
\arguments{
\item{x}{The object to be printed}
\item{...}{Ignored}
}
\value{
invisible text of the printed information
}
\description{
Print the summary of a provenance object
}
|
84193cb4fffebbae5b3ce7179b09b6ddaf920177
|
52494e8f969efb89d40afe477f9a9a29df22fff8
|
/ChicagoCitySalary/ui.R
|
835c5babedc69c4ce2230ff056ce7f2583f2debf
|
[] |
no_license
|
manocodes/ChicagoCitySalary
|
347c1696bf9922c0171fe21d30d168452fbc53ab
|
ff3846326847c947cd06557eca251a7b87c71860
|
refs/heads/master
| 2020-07-29T15:18:57.675231
| 2016-11-14T23:44:07
| 2016-11-14T23:44:57
| 73,663,011
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,307
|
r
|
ui.R
|
library(shiny)
#shinyUI(fluidPage(
# tabsetPanel(
navbarPage("Chicago City",
tabPanel( "Salary",(
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel( h3("Chicago City Salary Info"),
sliderInput("slider1","Select the Salary Year",
2010, 2013, value=c(2011, 2012)),
uiOutput("select"),
submitButton("Submit"),
tags$br(),
tags$a(href="mailto:mano.net@gmail.com?Subject=Feedback%20From%20DataScience%20Students..",
"Email Mano with your feedback..")
),
mainPanel( h2(textOutput("text1")),
h3(textOutput("text2")),
tableOutput("data"),
plotOutput("plot")
)
)
)),
tabPanel( "Other Details", "Coming soon.."),
tabPanel( "Misc", "Coming soon..")
)
#))
|
d5ea13c813cbdf7d77aec4840c579ae1a25c9944
|
ebc0ee7cee315dfc59553162411691a185496d62
|
/rankAll.R
|
694b6fccb634e9dd0fe201cb213368333f92c976
|
[] |
no_license
|
kmenzies/datasciencecoursera
|
57ed1dc9cd8120ba6f12fd3f0edd4538bb9b29b7
|
b2e43b225caa3d80a845581604cea9061c483652
|
refs/heads/master
| 2021-01-10T08:32:19.839314
| 2016-01-21T16:44:52
| 2016-01-21T16:44:52
| 49,586,132
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,497
|
r
|
rankAll.R
|
validOptions <- list(Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack="heart attack", Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure="heart failure", Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia="pneumonia")
toOpen <- "~/Documents/Coursera/rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv"
rankall <- function(outcome, num = "best") {
## Read outcome data
cvsData <- readFile()
## Check that outcome is valid
if(!(outcome %in% validOptions)) {
stop("invalid outcome")
}
## Return hospital name in that state with the given rank
## 30-day death rate
toOrder<-selectOutcome(cvsData, outcome)
byState<-split(toOrder, toOrder$State)
t(sapply(byState, function(foo) orderAndRank(foo, rank=num)))
}
orderAndRank <- function (data, rank){
ordered <- order(data[,2], data[,1], decreasing = FALSE)
if(rank == "best"){
index <- ordered[1]
}else if (rank == "worst") {
index <- ordered[length(ordered)]
} else {
index <- ordered[rank]
}
data[index,c(1,3)]
}
selectOutcome <- function(cvsData, outcome){
#select appropriate rows from the whole table
outcomeData <- cvsData[, c("Hospital.Name", names(which(validOptions==outcome)), "State")]
#cast the outcome we're interested in as a numeric
outcomeData[,2] <- as.numeric(outcomeData[,2])
outcomeData <- outcomeData[(!is.na(outcomeData[,2])),]
outcomeData
}
readFile <- function() {
read.csv(file = toOpen, colClasses = "character")
}
|
0b65e68d58be36a23b2d38eeea2305688f8a25c7
|
9a76ce640ceb90d3a1e496f434600444090edce5
|
/Server.R
|
1857274266fe632172479a64981f0f869a08239f
|
[] |
no_license
|
blockee/DataProducts
|
9fb82c6d3504262173f9814109dc5495af1f3b12
|
e69039c7455a05e1078e0d2cc9d2553b76a48a8f
|
refs/heads/master
| 2021-01-13T01:27:33.674200
| 2014-10-26T23:15:45
| 2014-10-26T23:15:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 406
|
r
|
Server.R
|
nfl <- read.csv("nfloffense2013.csv")
shinyServer(
function(input, output){
output$score <- renderPrint({nfl[nfl$TEAM == input$team, names(nfl) == input$stat ]})
output$ss <- renderPrint({(nfl[nfl$TEAM == input$team, names(nfl) == input$stat ] -
mean(nfl[,names(nfl) == input$stat]))/sd(nfl[,names(nfl) == input$stat])})
}
)
|
e0e5aa239d2e5e65c6a46d9f79500f53707f6809
|
4115c98348bf0e7fe944272d91fe351d58d22a96
|
/tests/testthat/test-get_SDA_cosurfmorph.R
|
c9526f6094221f7161022ab6054a8ff33539442e
|
[] |
no_license
|
ncss-tech/soilDB
|
a933bf98a674fd54b5e1073a4497ee38a177bdf2
|
380440fc7b804b495aa711c130ab914c673a54be
|
refs/heads/master
| 2023-09-02T14:19:17.348412
| 2023-09-02T00:56:16
| 2023-09-02T00:56:16
| 54,595,470
| 68
| 20
| null | 2023-09-01T19:00:48
| 2016-03-23T21:51:10
|
R
|
UTF-8
|
R
| false
| false
| 934
|
r
|
test-get_SDA_cosurfmorph.R
|
test_that("get_SDA_cosurfmorph works", {
skip_on_cran()
skip_if_offline()
# Summarize by 3D geomorphic components by component name (default `by='compname'`)
x <- get_SDA_cosurfmorph(WHERE = "areasymbol = 'CA630'")
skip_if(is.null(x))
expect_true(inherits(x, 'data.frame'))
# Whole Soil Survey Area summary (using `by = 'areasymbol'`)
x <- get_SDA_cosurfmorph(WHERE = "areasymbol = 'CA630'", by = 'areasymbol')
skip_if(is.null(x))
expect_true(inherits(x, 'data.frame'))
# 2D Hillslope Position summary(using `table = 'cosurfmorphhpp'`)
x <- get_SDA_cosurfmorph(WHERE = "areasymbol = 'CA630'", table = 'cosurfmorphhpp')
skip_if(is.null(x))
expect_true(inherits(x, 'data.frame'))
# Surface Shape summary (using `table = 'cosurfmorphss'`)
x <- get_SDA_cosurfmorph(WHERE = "areasymbol = 'CA630'", table = 'cosurfmorphss')
skip_if(is.null(x))
expect_true(inherits(x, 'data.frame'))
})
|
3e2892948356446bca35c57a9688c36d91488b38
|
ad67aec0b4bbc865465e37368b7660c0488c86a1
|
/tests/testthat/test_c_or_1st.R
|
d8389da6a4d86d6f8bf45a88f2b49e7194a53717
|
[] |
no_license
|
fazetu/htable
|
b6dadc033fb2081a14236d18166aacb067347c7d
|
8924560ba8ec6fa0e364db30ae2897b69ae6ef16
|
refs/heads/master
| 2021-07-19T09:42:55.501605
| 2020-02-07T21:48:50
| 2020-02-07T21:48:50
| 195,873,455
| 0
| 0
| null | 2019-11-12T15:36:30
| 2019-07-08T19:24:42
|
R
|
UTF-8
|
R
| false
| false
| 756
|
r
|
test_c_or_1st.R
|
test_that("test c_or_1st()", {
# both NULLs
expect_equal(c_or_1st(NULL, NULL), NULL)
# one is NULL
expect_equal(c_or_1st(NULL, 1), 1)
expect_equal(c_or_1st(1, NULL), 1)
expect_equal(c_or_1st(NULL, "a"), "a")
expect_equal(c_or_1st("a", NULL), "a")
expect_equal(c_or_1st(NULL, TRUE), TRUE)
expect_equal(c_or_1st(TRUE, NULL), TRUE)
# logicals
expect_equal(c_or_1st(TRUE, TRUE), TRUE)
expect_equal(c_or_1st(TRUE, FALSE), TRUE)
expect_equal(c_or_1st(FALSE, TRUE), FALSE)
expect_equal(c_or_1st(FALSE, FALSE), FALSE)
# numerics
expect_equal(c_or_1st(1, 2), c(1, 2))
expect_equal(c_or_1st(1:2, 1:3), 1:3)
expect_equal(c_or_1st(c(1:2, 10), 11), c(1:2, 10:11))
# character
expect_equal(c_or_1st("a", "b"), "a")
})
|
276a85e074e8daf4b96fa3d189bdc92664dd31a2
|
bf07e7f42e25dca0d84c70ca3b068c82bad196aa
|
/functions.r
|
7453c5cd358fd7264b5fe03c3ced73eb2257b584
|
[] |
no_license
|
wesslen/social-media-workshop
|
bb773dde319a6183e969e639109cbc91b0cc801b
|
dfc051a721a719ed9e01fdbadc2fff91e88b0b7d
|
refs/heads/master
| 2021-01-18T15:21:10.533924
| 2017-02-28T00:32:05
| 2017-02-28T00:32:05
| 55,739,408
| 1
| 1
| null | 2016-04-08T01:13:04
| 2016-04-08T01:13:04
| null |
UTF-8
|
R
| false
| false
| 8,315
|
r
|
functions.r
|
################################################################################
# Extract hashtags from a character vector and return frequency table
################################################################################
getCommonHashtags <- function(text, n=20){
hashtags <- regmatches(text,gregexpr("#(\\d|\\w)+",text))
hashtags <- unlist(hashtags)
tab <- table(hashtags)
return(head(sort(tab, dec=TRUE), n=n))
}
################################################################################
# Download tweets sent by a given user
################################################################################
#' @rdname getTimeline
#' @export
#'
#' @title
#' Returns up to 3,200 recent tweets from a given user
#'
#' @description
#' \code{getTimeline} connects to the REST API of Twitter and returns up to
#' 3,200 recent tweets sent by these user.
#'
#' @author
#' Pablo Barbera \email{pablo.barbera@@nyu.edu}
#'
#' @param filename file where tweets will be stored (in json format)
#'
#' @param n number of tweets to be downloaded (maximum is 3,200)
#'
#' @param screen_name user name of the Twitter user for which his/her tweets
#' will be downloaded
#'
#' @param id id of Twitter user for which his/her tweets will be downloaded
#' (Use either of these two arguments)
#'
#' @param oauth OAuth token
#'
#' @param since_id id of the oldest tweet to be downloaded. Useful if, for
#' example, we're only interested in getting tweets sent after a certain
#' date.
#'
#' @param trim_user if "true", downloaded tweets will include user object
#' embedded. If "false", only tweet information will be downloaded.
#'
#' @param sleep numeric, number of seconds between API calls. Higher number
#' will increase reliability of API calls; lower number will increase speed.
#'
#' @examples \dontrun{
#' ## Download recent tweets by user "p_barbera"
#' friends <- getTimeline(screen_name="p_barbera", oauth=my_oauth)
#' }
#'
getTimeline <- function(filename, n=3200, oauth, screen_name=NULL,
id=NULL, since_id=NULL, trim_user="true", sleep=.5){
require(rjson); require(ROAuth)
## while rate limit is 0, open a new one
limit <- getLimitTimeline(my_oauth)
cat(limit, " hits left\n")
while (limit==0){
Sys.sleep(sleep)
# sleep for 5 minutes if limit rate is less than 100
rate.limit <- getLimitRate(my_oauth)
if (rate.limit<100){
Sys.sleep(300)
}
limit <- getLimitTimeline(my_oauth)
cat(limit, " hits left\n")
}
## url to call
url <- "https://api.twitter.com/1.1/statuses/user_timeline.json"
## first API call
if (!is.null(screen_name)){
params <- list(screen_name = screen_name, count=200, trim_user=trim_user)
}
if (!is.null(id)){
params <- list(id=id, count=200, trim_user=trim_user)
}
if (!is.null(since_id)){
params[["since_id"]] <- since_id
}
url.data <- my_oauth$OAuthRequest(URL=url, params=params, method="GET",
cainfo=system.file("CurlSSL", "cacert.pem", package = "RCurl"))
Sys.sleep(sleep)
## one API call less
limit <- limit - 1
## changing oauth token if we hit the limit
cat(limit, " hits left\n")
while (limit==0){
Sys.sleep(sleep)
# sleep for 5 minutes if limit rate is less than 100
rate.limit <- getLimitRate(my_oauth)
if (rate.limit<100){
Sys.sleep(300)
}
limit <- getLimitTimeline(my_oauth)
cat(limit, " hits left\n")
}
## trying to parse JSON data
## json.data <- fromJSON(url.data, unexpected.escape = "skip")
json.data <- RJSONIO::fromJSON(url.data)
if (length(json.data$error)!=0){
cat(url.data)
stop("error! Last cursor: ", cursor)
}
## writing to disk
conn <- file(filename, "a")
invisible(lapply(json.data, function(x) writeLines(rjson::toJSON(x), con=conn)))
close(conn)
## max_id
tweets <- length(json.data)
max_id <- json.data[[tweets]]$id_str
cat(tweets, "tweets. Max id: ", max_id, "\n")
max_id_old <- "none"
if (is.null(since_id)) {since_id <- 1}
while (tweets < n & max_id != max_id_old &
as.numeric(max_id) > as.numeric(since_id)){
max_id_old <- max_id
if (!is.null(screen_name)){
params <- list(screen_name = screen_name, count=200, max_id=max_id,
trim_user=trim_user)
}
if (!is.null(id)){
params <- list(id=id, count=200, max_id=max_id, trim_user=trim_user)
}
if (!is.null(since_id)){
params[['since_id']] <- since_id
}
url.data <- my_oauth$OAuthRequest(URL=url, params=params, method="GET",
cainfo=system.file("CurlSSL", "cacert.pem", package = "RCurl"))
Sys.sleep(sleep)
## one API call less
limit <- limit - 1
## changing oauth token if we hit the limit
cat(limit, " hits left\n")
while (limit==0){
Sys.sleep(sleep)
# sleep for 5 minutes if limit rate is less than 100
rate.limit <- getLimitRate(my_oauth)
if (rate.limit<100){
Sys.sleep(300)
}
limit <- getLimitTimeline(my_oauth)
cat(limit, " hits left\n")
}
## trying to parse JSON data
## json.data <- fromJSON(url.data, unexpected.escape = "skip")
json.data <- RJSONIO::fromJSON(url.data)
if (length(json.data$error)!=0){
cat(url.data)
stop("error! Last cursor: ", cursor)
}
## writing to disk
conn <- file(filename, "a")
invisible(lapply(json.data, function(x) writeLines(rjson::toJSON(x), con=conn)))
close(conn)
## max_id
tweets <- tweets + length(json.data)
max_id <- json.data[[length(json.data)]]$id_str
cat(tweets, "tweets. Max id: ", max_id, "\n")
}
}
getLimitTimeline <- function(my_oauth){
require(rjson); require(ROAuth)
url <- "https://api.twitter.com/1.1/application/rate_limit_status.json"
params <- list(resources = "statuses,application")
response <- my_oauth$OAuthRequest(URL=url, params=params, method="GET",
cainfo=system.file("CurlSSL", "cacert.pem", package = "RCurl"))
return(unlist(fromJSON(response)$resources$statuses$`/statuses/user_timeline`[['remaining']]))
}
getLimitRate <- function(my_oauth){
require(rjson); require(ROAuth)
url <- "https://api.twitter.com/1.1/application/rate_limit_status.json"
params <- list(resources = "followers,application")
response <- my_oauth$OAuthRequest(URL=url, params=params, method="GET",
cainfo=system.file("CurlSSL", "cacert.pem", package = "RCurl"))
return(unlist(fromJSON(response)$resources$application$`/application/rate_limit_status`[['remaining']]))
}
################################################################################
# Scrape NYT Congress API for information on Members of 114th Congress
################################################################################
scrape_nytimes_congress_api <- function(api_key, chamber){
require(RCurl)
require(RJSONIO)
# query URL
url <- paste0("http://api.nytimes.com/svc/politics/v3/us/legislative/",
"congress/114/", chamber, "/members.json?",
"api-key=", api_key)
# downloading data
data <- fromJSON(getURL(url))
# reading fields and transforming into data frame
fields <- names(data[[3]][[1]]$members[[1]])
df <- matrix(NA, nrow=length(data[[3]][[1]]$members), ncol=length(names(data[[3]][[1]]$members[[1]])))
for (i in 1:length(fields)){
df[,i] <- unlistWithNA(data[[3]][[1]]$members, fields[i])
}
df <- data.frame(df, stringsAsFactors=F)
names(df) <- fields
# adding extra field if senate
if (chamber=="senate"){df$district <- NA}
df$chamber <- chamber
return(df)
}
unlistWithNA <- function(lst, field){
notnulls <- unlist(lapply(lst, function(x) try(!is.null(x[[field]]), silent=TRUE)))
notnulls[grep('Error', notnulls)] <- FALSE
notnulls <- ifelse(notnulls=="TRUE", TRUE, FALSE)
vect <- rep(NA, length(lst))
vect[notnulls] <- unlist(lapply(lst[notnulls], '[[', field))
return(vect)
}
|
fa24aaf62e3cd23cdb303f1a5e6389029c825535
|
ee1205428b0d18231b1af3603a96cbdedcfa6c8f
|
/R/THINSp.R
|
2d3fa1255f6bd044a04157e918befbea3669e12f
|
[] |
no_license
|
cicibauer/CM
|
9eefc2506a9a0717bc9789866a9896a3a848a576
|
d3947fa3c4b49fd3a700f6ced5ec258e7feab9ff
|
refs/heads/master
| 2023-06-25T06:28:09.778915
| 2021-07-07T16:33:27
| 2021-07-07T16:33:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 960
|
r
|
THINSp.R
|
#' Thin-Plate Spline
#'
#' This code computes the thin-plate spline function f(s) = s^2 log(s) for 1-dimensional spatial locations
#' @param locs n-dimensional vector of locations
#' @param knots r-dimensional vector of knots
#' @param tol thresholds small values of the elements of psi to be zero. Default is no threshold.
#' @examples
#' #example two dimensional separable thin-plate spline
#' points1 = seq(0,1,length.out=1001)
#' points1=points1[2:1001]
#' r = 10
#' knots = seq(0,1,length.out=r)
#' G1 = THINSp(as.matrix(points1,m,1),as.matrix(knots,r,1))
#'
#' G=G1 %x% G1
#'
#' @return psi nxr matrix of basis functions
#' @export
THINSp<-function(locs,knots,tol=0){
r = dim(knots)[1]
n = dim(locs)[1]
psi = matrix(0,n,r)
for (i in 1:n){
for (j in 1:r){
psi[i,j] = (abs(knots[j]-locs[i]))^2*log(abs(knots[j]-locs[i])+0.01)
if (abs(psi[i,j])<tol){
psi[i,j] = 0
}
}
}
return(psi)
}
|
ba75ff26009ae33751f203284d45be87502097e9
|
344614177666758a75a235966d5adbc2232987b6
|
/tests/testthat/test-cfb_ratings_sp.R
|
054bd67e20a8fb60e6c6078d9b324970518dfd96
|
[
"MIT"
] |
permissive
|
saiemgilani/cfbscrapR
|
61d677496bac5053c07fac708f34092ce70e141d
|
0a33650fb6e7e6768b2cc1318c8960dd291c73f4
|
refs/heads/master
| 2023-03-31T06:19:47.722785
| 2021-04-03T23:53:23
| 2021-04-03T23:53:23
| 274,903,057
| 28
| 8
|
NOASSERTION
| 2021-02-20T19:36:07
| 2020-06-25T11:49:29
|
R
|
UTF-8
|
R
| false
| false
| 1,040
|
r
|
test-cfb_ratings_sp.R
|
context("CFB Ratings - Bill C.'s SP+")
x <- cfb_ratings_sp(year = 2019)
y <- cfb_ratings_sp(team = 'Texas A&M')
z <- cfb_ratings_sp(year = 2019, team = 'LSU')
cols <- c('year','team',
'conference','rating','ranking','second_order_wins', 'sos','offense_ranking',
'offense_rating','offense_success', 'offense_explosiveness',
'offense_rushing','offense_passing','offense_standard_downs',
'offense_passing_downs','offense_run_rate',
'offense_pace','defense_ranking','defense_rating','defense_success','defense_explosiveness',
'defense_rushing','defense_passing','defense_standard_downs',
'defense_passing_downs','defense_havoc_total','defense_havoc_front_seven',
'defense_havoc_db','special_teams_rating')
test_that("CFB Ratings - Bill C.'s SP+", {
expect_equal(colnames(x), cols)
expect_equal(colnames(y), cols)
expect_equal(colnames(z), cols)
expect_s3_class(x, "data.frame")
expect_s3_class(y, "data.frame")
expect_s3_class(z, "data.frame")
})
|
1c976ea4754ee355bd8960501aea746c1b43243b
|
4504e2c8fc4aefdaccaac740a9b82f29cd49e817
|
/man/Rosling.bubbles.Rd
|
43e7653ac5273a8c4103a418b219f5f0601311b7
|
[] |
no_license
|
dgrtwo/animation
|
37daaf114583e548c2e569fa1f3b12183b12c15f
|
c7a3155a3544d80b94b5378f6ace8811774632de
|
refs/heads/master
| 2021-01-09T05:25:47.116366
| 2016-02-14T02:57:17
| 2016-02-14T02:57:17
| 51,805,168
| 2
| 1
| null | 2016-02-16T03:29:58
| 2016-02-16T03:29:56
|
R
|
UTF-8
|
R
| false
| false
| 3,204
|
rd
|
Rosling.bubbles.Rd
|
% Please edit documentation in R/Rosling.bubbles.R
\name{Rosling.bubbles}
\alias{Rosling.bubbles}
\title{The bubbles animation in Hans Rosling's Talk}
\usage{
Rosling.bubbles(x, y, data, type = c("circles", "squares", "rectangles", "stars",
"thermometers", "boxplots"), bg, xlim = range(x), ylim = range(y), main = NULL,
xlab = "x", ylab = "y", ..., grid = TRUE, text = 1:ani.options("nmax"),
text.col = rgb(0, 0, 0, 0.5), text.cex = 5)
}
\arguments{
\item{x, y}{the x and y co-ordinates for the centres of the bubbles (symbols).
Default to be 10 uniform random numbers in [0, 1] for each single image
frame (so the length should be 10 * \code{ani.options('nmax')})}
\item{type, data}{the type and data for symbols; see \code{\link{symbols}}.
The default type is \code{circles}.}
\item{bg, main, xlim, ylim, xlab, ylab, ...}{see \code{\link{symbols}}. Note that
\code{bg} has default values taking semi-transparent colors.}
\item{grid}{logical; add a grid to the plot?}
\item{text}{a character vector to be added to the plot one by one (e.g. the
year in Rosling's talk)}
\item{text.col, text.cex}{color and magnification of the background text}
}
\value{
\code{NULL}.
}
\description{
In Hans Rosling's attractive talk ``Debunking third-world myths with the best
stats you've ever seen'', he used a lot of bubble plots to illustrate trends
behind the data over time. This function gives an imitation of those moving
bubbles, besides, as this function is based on \code{\link{symbols}}, we can
also make use of other symbols such as squares, rectangles, thermometers,
etc.
}
\details{
Suppose we have observations of \eqn{n} individuals over
\code{ani.options('nmax')} years. In this animation, the data of each year
will be shown in the bubbles (symbols) plot; as time goes on, certain trends
will be revealed (like those in Rosling's talk). Please note that the
arrangement of the data for bubbles (symbols) should be a matrix like
\eqn{A_{ijk}} in which \eqn{i} is the individual id (from 1 to n), \eqn{j}
denotes the \eqn{j}-th variable (from 1 to p) and \eqn{k} indicates the time
from 1 to \code{ani.options('nmax')}.
And the length of \code{x} and \code{y} should be equal to the number of rows
of this matrix.
}
\examples{
oopt = ani.options(interval = 0.1, nmax = ifelse(interactive(), 50, 2))
## use default arguments (random numbers); you may try to find the real
## data
par(mar = c(4, 4, 0.2, 0.2))
Rosling.bubbles()
## rectangles
Rosling.bubbles(type = "rectangles", data = matrix(abs(rnorm(50 * 10 *
2)), ncol = 2))
## save the animation in HTML pages
saveHTML({
par(mar = c(4, 4, 0.2, 0.2))
ani.options(interval = 0.1, nmax = ifelse(interactive(), 50, 2))
Rosling.bubbles(text = 1951:2000)
}, img.name = "Rosling.bubbles", htmlfile = "Rosling.bubbles.html", ani.height = 450,
ani.width = 600, title = "The Bubbles Animation in Hans Rosling's Talk",
description = c("An imitation of Hans Rosling's moving bubbles.",
"(with 'years' as the background)"))
ani.options(oopt)
}
\author{
Yihui Xie
}
\references{
\url{http://www.ted.com/talks/hans_rosling_shows_the_best_stats_you_ve_ever_seen.html}
}
\seealso{
\code{\link{symbols}}
}
|
6f20038cdd18f56e4878b3a41a5c65c306fac112
|
8d657b14c43472f23bd8faaf0ed3aba9e151cea6
|
/R/vertical_3party_logistic.R
|
3d9ddfafb929028231f36a6cb3165f2b9a2e97f6
|
[] |
no_license
|
cran/vdra
|
42a73738350a38c6c95af0a3156ec877829ae694
|
54c44c79831ea3407fc5cbf6fbc0301ce8c2cda2
|
refs/heads/master
| 2023-07-16T08:14:03.433005
| 2021-09-09T05:20:02
| 2021-09-09T05:20:02
| 404,778,003
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 46,481
|
r
|
vertical_3party_logistic.R
|
################### DISTRIBUTED LOGISTIC REGRESSION FUNCTIONS ##################
CheckColinearityLogistic.T3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "CheckColinearityLogistic.T3\n\n")
xtx = params$xtx
xty = params$xty
nrow = nrow(xtx)
indicies = c(1)
for (i in 2:nrow) {
tempIndicies = c(indicies, i)
if (rcond(xtx[tempIndicies, tempIndicies]) > 10^8 * .Machine$double.eps) {
indicies = c(indicies, i)
}
}
xtx = xtx[indicies, indicies, drop = FALSE]
xty = xty[indicies, drop = FALSE]
Anames = params$colnamesA
Bnames = params$colnamesB
Aindex = which(indicies <= length(Anames))
params$IndiciesKeep = indicies
params$AIndiciesKeep = indicies[Aindex]
params$BIndiciesKeep = indicies[-Aindex] - length(Anames)
AnamesKeep = Anames[params$AIndiciesKeep]
BnamesKeep = Bnames[params$BIndiciesKeep]
params$colnamesA.old = params$colnamesA
params$colnamesB.old = params$colnamesB
params$colnamesA = AnamesKeep
params$colnamesB = BnamesKeep
params$p1.old = params$p1
params$p2.old = params$p2
params$p1 = length(AnamesKeep)
params$p2 = length(BnamesKeep)
params$p.old = params$p1.old + params$p2.old
params$p = params$p1 + params$p2
params$meansA = params$meansA[params$AIndiciesKeep]
params$meansB = params$meansB[params$BIndiciesKeep]
params$sdA = params$sdA[params$AIndiciesKeep]
params$sdB = params$sdB[params$BIndiciesKeep]
params$xtx = xtx
params$xty = xty
Aindicies = params$AIndiciesKeep
Bindicies = params$BIndiciesKeep
writeTime = proc.time()[3]
save(Aindicies, file = file.path(params$writePath, "Aindicies.rdata"))
save(Bindicies, file = file.path(params$writePath, "Bindicies.rdata"))
writeSize = sum(file.size(file.path(params$writePath, c("Aindicies.rdata",
"Bindicies.rdata"))))
writeTime = proc.time()[3] - writeTime
Btags = params$Btags[params$BIndiciesKeep]
Atags = params$Atags[params$AIndiciesKeep][-1]
if ((length(unique(Atags)) == 1) | (length(unique(Atags)) >= 2 & !("numeric" %in% names(Atags)))) {
params$failed = TRUE
params$errorMessage = "A must have no covariates or at least 2 covariates at least one of which is continuous."
} else if (length(unique(Btags)) < 2) {
params$failed = TRUE
params$errorMessage = "After removing colinear covariates, Party B has 1 or fewer covariates."
} else if (!("numeric" %in% names(Btags))) {
params$failed = TRUE
params$errorMessage = "After removing colinear covariates, Party B has no continuous covariates."
}
# if (params$p2 == 0) {
# params$failed = TRUE
# params$errorMessage = "All of party B's covariates are either linear or are colinear with Party A's covariates."
# }
params = AddToLog(params, "CheckColinearityLogistic.T3", 0, 0, writeTime, writeSize)
return(params)
}
ComputeInitialBetasLogistic.T3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "ComputeInitialBetasLogistic.T3\n\n")
# de-standardize xty
p1 = params$p1
p2 = params$p2
xty = params$xty
xtx = params$xtx
betas = 4 * solve(xtx) %*% xty
Abetas = betas[1:p1]
Bbetas = betas[(p1 + 1):(p1 + p2)]
Axty = xty[1:p1]
Bxty = xty[(p1 + 1):(p1 + p2)]
params$Axty = Axty
params$Bxty = Bxty
params$betas = betas
params$betasA = Abetas
params$betasAold = matrix(0, p1, 1)
params$betasB = Bbetas
params$algIterationCounter = 1
params$deltabeta = Inf
params$converged = FALSE
converged = FALSE
maxIterExceeded = FALSE
writeTime = proc.time()[3]
save(Abetas, file = file.path(params$writePath, "betasA.rdata"))
save(p2, Axty, file = file.path(params$writePath, "Axty.rdata"))
save(Bbetas, file = file.path(params$writePath, "betasB.rdata"))
save(Bxty, file = file.path(params$writePath, "Bxty.rdata"))
save(converged, maxIterExceeded,
file = file.path(params$writePath, "converged.rdata"))
writeSize = sum(file.size(file.path(params$writePath, c("betasA.rdata",
"betasB.rdata",
"Axty.rdata",
"Bxty.rdata",
"converged.rdata"))))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "ComputeInitialBetasLogistic.T3", 0, 0, writeTime, writeSize)
return(params)}
UpdateParamsLogistic.A3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "UpdateParamsLogistic.A3\n\n")
Aindicies = NULL
Axty = NULL
p2 = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["T"]], "Aindicies.rdata"))
load(file.path(params$readPath[["T"]], "Axty.rdata"))
readSize = sum(file.size(file.path(params$readPath[["T"]], c("Aindicies.rdata",
"Axty.rdata"))))
readTime = proc.time()[3] - readTime
params$colnamesA.old = params$colnamesA
params$colnamesA = params$colnamesA.old[Aindicies]
params$p.old = params$p
params$p = length(Aindicies)
params$p2 = p2
params$AIndiciesKeep = Aindicies
params$means = params$means[Aindicies]
params$sd = params$sd[Aindicies]
params$Axty = Axty
params = AddToLog(params, "UpdateParamsLogistic.A3, UpdateDataLogistic.A3", readTime, readSize, 0, 0)
return(params)
}
UpdateParamsLogistic.B3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "UpdateParamsLogistic.B3\n\n")
Bindicies = NULL
Bxty = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["T"]], "Bindicies.rdata"))
load(file.path(params$readPath[["T"]], "Bxty.rdata"))
readSize = sum(file.size(file.path(params$readPath[["T"]], c("Bindicies.rdata",
"Bxty.rdata"))))
readTime = proc.time()[3] - readTime
params$colnamesB.old = params$colnamesB
params$colnamesB = params$colnamesB.old[Bindicies]
params$p.old = params$p
params$p = length(Bindicies)
params$BIndiciesKeep = Bindicies
params$means = params$means[Bindicies]
params$sd = params$sd[Bindicies]
params$Bxty = Bxty
params = AddToLog(params, "UpdateParamsLogistic.B3, UpdateDataLogistic.B3", readTime, readSize, 0, 0)
return(params)
}
UpdateDataLogistic.A3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "UpdateDataLogistic.A3\n\n")
data$X = as.matrix(data$X[, params$AIndiciesKeep, drop = FALSE])
return(data)
}
UpdateDataLogistic.B3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "UpdateDataLogistic.B3\n\n")
data$X = as.matrix(data$X[, params$BIndiciesKeep, drop = FALSE])
return(data)
}
GetBetaALogistic.A3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "GetBetaLogistic.A3\n\n")
converged = NULL
maxIterExceeded = NULL
Abetas = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["T"]], "converged.rdata"))
load(file.path(params$readPath[["T"]], "betasA.rdata"))
readSize = sum(file.size(file.path(params$readPath[["T"]], c("converged.rdata",
"betasA.rdata"))))
readTime = proc.time()[3] - readTime
params$converged = converged
params$maxIterExceeded = maxIterExceeded
params$betas = Abetas
params = AddToLog(params, "GetBetaALogistic.A3", readTime, readSize, 0, 0)
return(params)
}
GetBetaBLogistic.B3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "GetBetaLogistic.B3\n\n")
converged = NULL
maxIterExceeded = NULL
Bbetas = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["T"]], "converged.rdata"))
load(file.path(params$readPath[["T"]], "betasB.rdata"))
readSize = sum(file.size(file.path(params$readPath[["T"]], c("converged.rdata",
"betasB.rdata"))))
readTime = proc.time()[3] - readTime
params$converged = converged
params$maxIterExceeded = maxIterExceeded
params$betas = Bbetas
params = AddToLog(params, "GetBetaBLogistic.B3", readTime, readSize, 0, 0)
return(params)
}
GetXAbetaALogistic.A3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "GetXBetaALogistic.A3\n\n")
XAbeta = data$X %*% params$betas
writeTime = proc.time()[3]
save(XAbeta, file = file.path(params$writePath, "xabeta.rdata"))
writeSize = file.size(file.path(params$writePath, "xabeta.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "GetXAbetaALogistic.A3", 0, 0, writeTime, writeSize)
return(params)
}
GetXBbetaBLogistic.B3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "GetXBetaBLogistic.B3\n\n")
XBbeta = data$X %*% params$betas
writeTime = proc.time()[3]
save(XBbeta, file = file.path(params$writePath, "xbbeta.rdata"))
writeSize = file.size(file.path(params$writePath, "xbbeta.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "GetXBbetaBLogistic.B3", 0, 0, writeTime, writeSize)
return(params)
}
GetWeightsLogistic.T3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "GetWeightsLogistic.T3\n\n")
XAbeta = NULL
XBbeta = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["A"]], "xabeta.rdata")) # Load XbetaB
load(file.path(params$readPath[["B"]], "xbbeta.rdata")) # Load XbetaB
readSize = file.size(file.path(params$readPath[["A"]], "xabeta.rdata")) +
file.size(file.path(params$readPath[["B"]], "xbbeta.rdata"))
readTime = proc.time()[3] - readTime
Xbeta = XAbeta + XBbeta
pi_ = (1 + exp(-Xbeta))^(-1)
params$pi_ = pi_
writeTime = proc.time()[3]
save(pi_, file = file.path(params$writePath, "pi.rdata"))
writeSize = file.size(file.path(params$writePath, "pi.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "GetWeightsLogistic.T3", readTime, readSize, writeTime, writeSize)
return(params)
}
GetRVLogistic.B3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "GetRVLogistic.B3\n\n")
pi_ = NULL
writeTime = 0
writeSize = 0
readTime = proc.time()[3]
load(file.path(params$readPath[["T"]], "pi.rdata"))
readSize = file.size(file.path(params$readPath[["T"]], "pi.rdata"))
readTime = proc.time()[3] - readTime
params$pi_ = pi_
W = pi_ * (1 - params$pi_)
XBTWXB = 0
pbar = MakeProgressBar1(params$blocks$numBlocks, "R(I-Z*Z')W*XB", params$verbose)
containerCt.RZ = 0
containerCt.RV = 0
for (i in 1:params$blocks$numBlocks) {
if (i %in% params$container$filebreak.RZ) {
containerCt.RZ = containerCt.RZ + 1
filename1 = paste0("crz_", containerCt.RZ, ".rdata")
toRead = file(file.path(params$readPath[["T"]], filename1), "rb")
}
if (i %in% params$container$filebreak.RV) {
containerCt.RV = containerCt.RV + 1
filename2 = paste0("crv_", containerCt.RV, ".rdata")
toWrite = file(file.path(params$writePath, filename2), "wb")
}
strt = params$blocks$starts[i]
stp = params$blocks$stops[i]
n = stp - strt + 1
Xblock = data$X[strt:stp, , drop = FALSE]
Wblock = W[strt:stp]
WXblock = MultiplyDiagonalWTimesX(Wblock, Xblock)
readTime = readTime - proc.time()[3]
RZ = matrix(readBin(con = toRead, what = numeric(), n = n * n,
endian = "little"), nrow = n, ncol = n)
readTime = readTime + proc.time()[3]
RV = RZ %*% WXblock
writeTime = writeTime - proc.time()[3]
writeBin(as.vector(RV), con = toWrite, endian = "little")
writeTime = writeTime + proc.time()[3]
XBTWXB = XBTWXB + t(Xblock) %*% WXblock
if ((i + 1) %in% params$container$filebreak.RZ || i == params$blocks$numBlocks) {
close(toRead)
readSize = readSize + file.size(file.path(params$readPath[["T"]], filename1))
}
if ((i + 1) %in% params$container$filebreak.RV || i == params$blocks$numBlocks) {
close(toWrite)
writeSize = writeSize + file.size(file.path(params$writePath, filename2))
}
pbar = MakeProgressBar2(i, pbar, params$verbose)
}
writeTime = writeTime - proc.time()[3]
save(XBTWXB, file = file.path(params$writePath, "xbtwxb.rdata"))
writeSize = writeSize + sum(file.size(c(file.path(params$writePath, "xbtwxb.rdata"))))
writeTime = writeTime + proc.time()[3]
params = AddToLog(params, "GetRVLogistic.B3", readTime, readSize, writeTime, writeSize)
return(params)
}
ProcessVLogistic.T3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "ProcessVLogistic.T3\n\n")
XBTWXB = NULL
writeTime = 0
writeSize = 0
p2 = params$p2
readTime = proc.time()[3]
load(file.path(params$readPath[["B"]], "xbtwxb.rdata"))
readSize = file.size(file.path(params$readPath[["B"]], "xbtwxb.rdata"))
readTime = proc.time()[3] - readTime
params$xbtwxb = XBTWXB
numBlocks = params$blocks$numBlocks
pbar = MakeProgressBar1(numBlocks, "(I-Z*Z')W*XB*R", params$verbose)
containerCt.RV = 0
containerCt.VR = 0
for (i in 1:numBlocks) {
if (i %in% params$container$filebreak.RV) {
containerCt.RV = containerCt.RV + 1
filename2 = paste0("crv_", containerCt.RV, ".rdata")
toRead2 = file(file.path(params$readPath[["B"]], filename2), "rb")
}
if (i %in% params$container$filebreak.VR) {
containerCt.VR = containerCt.VR + 1
filename3 = paste0("cvr_", containerCt.VR, ".rdata")
toWrite3 = file(file.path(params$writePath, filename3), "wb")
}
strt = params$blocks$starts[i]
stp = params$blocks$stops[i]
n = stp - strt + 1
filename1 = paste0("r1_", i, ".rdata")
filename4 = paste0("r3_", i, ".rdata")
readTime = readTime - proc.time()[3]
toRead1 = file(file.path(params$dplocalPath, filename1), "rb")
R2 = matrix(readBin(con = toRead1, what = numeric(), n = n * n,
endian = "little"), nrow = n, ncol = n)
readSize = readSize + file.size(file.path(params$dplocalPath, filename1))
close(toRead1)
RV = matrix(readBin(con = toRead2, what = numeric(), n = n * p2,
endian = "little"), nrow = n, ncol = p2)
readTime = readTime + proc.time()[3]
V = t(R2) %*% RV
R3 = RandomOrthonomalMatrix(p2)
VR = V %*% R3
writeTime = writeTime - proc.time()[3]
toWrite4 = file(file.path(params$dplocalPath, filename4), "wb")
writeBin(as.vector(R3), con = toWrite4, endian = "little")
close(toWrite4)
writeSize = writeSize + file.size(file.path(params$dplocalPath, filename4))
writeBin(as.vector(VR), con = toWrite3, endian = "little")
writeTime = writeTime + proc.time()[3]
if ((i + 1) %in% params$container$filebreak.RV || i == numBlocks) {
close(toRead2)
readSize = readSize + file.size(file.path(params$dplocalPath, filename1))
}
if ((i + 1) %in% params$container$filebreak.VR || i == numBlocks) {
close(toWrite3)
writeSize = writeSize + file.size(file.path(params$writePath, filename3))
}
pbar = MakeProgressBar2(i, pbar, params$verbose)
}
params = AddToLog(params, "ProcessVLogistic.T3", readTime, readSize, writeTime, writeSize)
return(params)
}
GetXRLogistic.A3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "GetXRLogistic.A3\n\n")
pi_ = NULL
p2 = params$p2
writeTime = 0
writeSize = 0
readTime = proc.time()[3]
load(file.path(params$readPath[["T"]], "pi.rdata"))
readSize = file.size(file.path(params$readPath[["T"]], "pi.rdata"))
readTime = proc.time()[3] - readTime
params$pi_ = pi_
W = pi_ * (1 - params$pi_)
XATWXA = 0
pbar = MakeProgressBar1(params$blocks$numBlocks, "XA'(I-Z*Z')W*XB*R", params$verbose)
containerCt.VR = 0
containerCt.XR = 0
for (i in 1:params$blocks$numBlocks) {
if (i %in% params$container$filebreak.RV) {
containerCt.VR = containerCt.VR + 1
filename1 = paste0("cvr_", containerCt.VR, ".rdata")
toRead = file(file.path(params$readPath[["T"]], filename1), "rb")
}
if (i %in% params$container$filebreak.XR) {
containerCt.XR = containerCt.XR + 1
filename2 = paste0("cxr_", containerCt.XR, ".rdata")
toWrite = file(file.path(params$writePath, filename2), "wb")
}
strt = params$blocks$starts[i]
stp = params$blocks$stops[i]
n = stp - strt + 1
Xblock = data$X[strt:stp, , drop = FALSE]
Wblock = W[strt:stp]
WXblock = MultiplyDiagonalWTimesX(Wblock, Xblock)
readTime = readTime - proc.time()[3]
VR = matrix(readBin(con = toRead, what = numeric(), n = n * p2,
endian = "little"), nrow = n, ncol = p2)
readTime = readTime + proc.time()[3]
XR = t(Xblock) %*% VR
writeTime = writeTime - proc.time()[3]
writeBin(as.vector(XR), con = toWrite, endian = "little")
writeTime = writeTime + proc.time()[3]
XATWXA = XATWXA + t(Xblock) %*% WXblock
if ((i + 1) %in% params$container$filebreak.VR || i == params$blocks$numBlocks) {
close(toRead)
readSize = readSize + file.size(file.path(params$readPath[["T"]], filename1))
}
if ((i + 1) %in% params$container$filebreak.XR || i == params$blocks$numBlocks) {
close(toWrite)
writeSize = writeSize + file.size(file.path(params$writePath, filename2))
}
pbar = MakeProgressBar2(i, pbar, params$verbose)
}
writeTime = writeTime - proc.time()[3]
save(XATWXA, file = file.path(params$writePath, "xatwxa.rdata"))
writeSize = writeSize + sum(file.size(c(file.path(params$writePath, "xatwxa.rdata"))))
writeTime = writeTime + proc.time()[3]
params = AddToLog(params, "GetXRLogistic.A3", readTime, readSize, writeTime, writeSize)
return(params)
}
ProcessXtWXLogistic.T3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "ProcessXtWXLogistic.T3\n\n")
XATWXA = NULL
p1 = params$p1
p2 = params$p2
readTime = proc.time()[3]
load(file.path(params$readPath[["A"]], "xatwxa.rdata"))
readSize = file.size(file.path(params$readPath[["A"]], "xatwxa.rdata"))
readTime = proc.time()[3] - readTime
params$xatwxa = XATWXA
pbar = MakeProgressBar1(params$blocks$numBlocks, "X'W*X", params$verbose)
containerCt.XR = 0
XATWXB = 0
for (i in 1:params$blocks$numBlocks) {
if (i %in% params$container$filebreak.XR) {
containerCt.XR = containerCt.XR + 1
filename1 = paste0("cxr_", containerCt.XR, ".rdata")
toRead = file(file.path(params$readPath[["A"]], filename1), "rb")
}
filename2 = paste0("r3_", i, ".rdata")
readTime = readTime - proc.time()[3]
toRead1 = file(file.path(params$dplocalPath, filename2), "rb")
R = matrix(readBin(con = toRead1, what = numeric(), n = p2 * p2,
endian = "little"), nrow = p2, ncol = p2)
close(toRead1)
XR = matrix(readBin(con = toRead, what = numeric(), n = p1 * p2,
endian = "little"), nrow = p1, ncol = p2)
readSize = readSize + file.size(file.path(params$dplocalPath, filename2))
readTime = readTime + proc.time()[3]
XATWXB = XATWXB + XR %*% t(R)
if ((i + 1) %in% params$container$filebreak.XR || i == params$blocks$numBlocks) {
close(toRead)
readSize = readSize + file.size(file.path(params$readPath[["A"]], filename1))
}
pbar = MakeProgressBar2(i, pbar, params$verbose)
}
xtwx = rbind(cbind(params$xatwxa, XATWXB), cbind(t(XATWXB), params$xbtwxb))
params$xtwx = xtwx
II = NULL
tryCatch({II = solve(xtwx)},
error = function(err) { II = NULL }
)
if (is.null(II)) {
params$failed = TRUE
params$singularMatrix = TRUE
params$errorMessage =
paste0("The matrix t(X)*W*X is not invertible.\n",
" This may be due to one of two possible problems.\n",
" 1. Poor random initialization of the security vector.\n",
" 2. Near multicollinearity in the data\n",
"SOLUTIONS: \n",
" 1. Rerun the data analysis.\n",
" 2. If the problem persists, check the variables for\n",
" duplicates for both parties and / or reduce the\n",
" number of variables used. Once this is done,\n",
" rerun the data analysis.")
return(params)
}
params$II = II
IIA = II[, 1:p1, drop = FALSE]
IIB = II[, (p1 + 1):(p1 + p2), drop = FALSE]
writeTime = proc.time()[3]
save(IIA, file = file.path(params$writePath, "IIA.rdata"))
save(IIB, file = file.path(params$writePath, "IIB.rdata"))
writeSize = sum(file.size(file.path(params$writePath, c("IIA.rdata", "IIB.rdata"))))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "ProcessXtWXLogistic.T3", readTime, readSize, writeTime, writeSize)
return(params)
}
UpdateBetaLogistic.A3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "UpdateBetaLogistic.A3\n\n")
IIA = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["T"]], "IIA.rdata"))
readSize = file.size(file.path(params$readPath[["T"]], "IIA.rdata"))
readTime = proc.time()[3] - readTime
IA = params$Axty - t(data$X) %*% params$pi_
AI = IIA %*% IA
writeTime = proc.time()[3]
save(AI, file = file.path(params$writePath, "AI.rdata"))
writeSize = file.size(file.path(params$writePath, "AI.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "UpdateBetaLogistic.A3", readTime, readSize, writeTime, writeSize)
return(params)
}
UpdateBetaLogistic.B3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "UpdateBetaLogistic.B3\n\n")
IIB = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["T"]], "IIB.rdata"))
readSize = file.size(file.path(params$readPath[["T"]], "IIB.rdata"))
readTime = proc.time()[3] - readTime
IB = params$Bxty - t(data$X) %*% params$pi_
BI = IIB %*% IB
writeTime = proc.time()[3]
save(BI, file = file.path(params$writePath, "BI.rdata"))
writeSize = file.size(file.path(params$writePath, "BI.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "UpdateBetaLogistic.B3", readTime, readSize, writeTime, writeSize)
return(params)
}
UpdateBetaLogistic.T3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "UpdateBetaLogistic.T3\n\n")
AI = NULL
BI = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["A"]], "AI.rdata"))
load(file.path(params$readPath[["B"]], "BI.rdata"))
readSize = file.size(file.path(params$readPath[["A"]], "AI.rdata")) +
file.size(file.path(params$readPath[["B"]], "BI.rdata"))
readTime = proc.time()[3] - readTime
delta = AI + BI
betas = params$betas + delta
params$betas = betas
converged = all(abs(delta) / (abs(betas) + .1) < params$cutoff)
maxIterExceeded = (params$algIterationCounter >= params$maxIterations) && !converged
params$converged = converged
params$maxIterExceeded = maxIterExceeded
Abetas = betas[1:params$p1]
Bbetas = betas[(params$p1 + 1):(params$p1 + params$p2)]
writeTime = proc.time()[3]
save(converged, maxIterExceeded,
file = file.path(params$writePath, "converged.rdata"))
save(Abetas, file = file.path(params$writePath, "betasA.rdata"))
save(Bbetas, file = file.path(params$writePath, "betasB.rdata"))
writeSize = sum(file.size(file.path(params$writePath, c("betasA.rdata",
"betasB.rdata",
"converged.rdata"))))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "UpdateBetaLogistic.T3", readTime, readSize, writeTime, writeSize)
return(params)
}
GetFinalBetaLogistic.A3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "getFinalBetaLogistic.A3\n\n")
betas = params$betas / params$sd
offsetA = sum(betas[-1] * params$means[-1])
AFinalFitted = t(params$sd * t(data$X) + params$means) %*% betas -
t(params$sd[1] * t(data$X[, 1]) + params$means[1]) %*% betas[1]
writeTime = proc.time()[3]
save(offsetA, AFinalFitted, file = file.path(params$writePath, "Afinalfitted.rdata"))
writeSize = file.size(file.path(params$writePath, "Afinalfitted.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "GetFinalBetaLogistic.A3", 0, 0, writeTime, writeSize)
return(params)
}
GetFinalBetaLogistic.B3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "getFinalBetaLogistic.B3\n\n")
betas = params$betas / params$sd
offsetB = sum(betas * params$means)
BFinalFitted = t(params$sd * t(data$X) + params$means) %*% betas
writeTime = proc.time()[3]
save(offsetB, BFinalFitted, file = file.path(params$writePath, "Bfinalfitted.rdata"))
writeSize = file.size(file.path(params$writePath, "Bfinalfitted.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "GetFinalBetaLogistic.B3", 0, 0, writeTime, writeSize)
return(params)
}
GetFinalFittedLogistic.T3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "GetFinalFittedLogistic.T3\n\n")
offsetA = NULL
offsetB = NULL
AFinalFitted = NULL
BFinalFitted = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["A"]], "Afinalfitted.rdata"))
load(file.path(params$readPath[["B"]], "Bfinalfitted.rdata"))
readSize = file.size(file.path(params$readPath[["A"]], "Afinalfitted.rdata")) +
file.size(file.path(params$readPath[["B"]], "Bfinalfitted.rdata"))
readTime = proc.time()[3] - readTime
betas = params$betas / c(params$sdA, params$sdB)
betas[1] = betas[1] - offsetA - offsetB
finalFitted = AFinalFitted + BFinalFitted + betas[1]
params$betas = betas
params$finalFitted = finalFitted
writeTime = proc.time()[3]
save(finalFitted, file = file.path(params$writePath, "finalFitted.rdata"))
writeSize = file.size(file.path(params$writePath, "finalFitted.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "GetFinalBetaLogistic.T3", readTime, readSize, writeTime, writeSize)
return(params)
}
ComputeResultsLogistic.A3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "ComputeResultsLogistic.A3\n\n")
finalFitted = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["T"]], "finalFitted.rdata"))
readSize = file.size(file.path(params$readPath[["T"]], "finalFitted.rdata"))
readTime = proc.time()[3] - readTime
n = params$n
ct = sum(data$Y)
params$FinalFitted = finalFitted
resdev = -2 * (sum(data$Y * finalFitted) - sum(log(1 + exp(finalFitted))))
nulldev = -2 * (ct * log(ct / n) + (n - ct) * log(1 - ct / n))
hoslem = HoslemInternal(params, data)
ROC = RocInternal(params, data)
writeTime = proc.time()[3]
save(resdev, nulldev, hoslem, ROC, file = file.path(params$writePath, "logisticstats.rdata"))
writeSize = file.size(file.path(params$writePath, "logisticstats.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "ComputeResultsLogistic.A3", readTime, readSize, writeTime, writeSize)
return(params)
}
ComputeResultsLogistic.T3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "ComputeResultsLogistic.T3\n\n")
nulldev = NULL
resdev = NULL
hoslem = NULL
ROC = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["A"]], "logisticstats.rdata"))
readSize = file.size(file.path(params$readPath[["A"]], "logisticstats.rdata"))
readTime = proc.time()[3] - readTime
stats = params$stats
stats$failed = FALSE
stats$converged = params$converged
n = params$n
p1 = params$p1
p2 = params$p2
sdA = params$sdA
sdB = params$sdB
meansA = params$meansA
meansB = params$meansB
Anames = params$colnamesA.old
Bnames = params$colnamesB.old
p1.old = params$p1.old
p2.old = params$p2.old
p.old = params$p.old
indicies = params$IndiciesKeep
# If xtwx were singular, it would have been caught in GetII.A2(), so we may
# assume that xtwx is NOT singular and so we do not have to do a check.
cov1 = solve(params$xtwx)
secoef = sqrt(diag(cov1)) / c(sdA, sdB)
tmp = matrix(c(1, (-meansA / sdA)[-1], -meansB / sdB), ncol = 1)
secoef[1] = sqrt(t(tmp) %*% cov1 %*% tmp)
stats$party = c(rep("dp1", p1.old), rep("dp2", p2.old))
stats$coefficients = rep(NA, p.old)
stats$secoef = rep(NA, p.old)
stats$tvals = rep(NA, p.old)
stats$pvals = rep(NA, p.old)
stats$n = n
stats$nulldev = nulldev
stats$resdev = resdev
stats$aic = resdev + 2 * (p1 + p2)
stats$bic = resdev + (p1 + p2) * log(n)
stats$nulldev_df = n - 1
stats$resdev_df = n - (p1 + p2)
stats$coefficients[indicies] = params$betas
stats$secoef[indicies] = secoef
tvals = params$betas / secoef
pvals = 2 * pnorm(abs(tvals), lower.tail = FALSE)
stats$tvals[indicies] = tvals
stats$pvals[indicies] = pvals
stats$hoslem = hoslem
stats$ROC = ROC
stats$iter = params$algIterationCounter - 1
names.old = c(Anames, Bnames)
names(stats$coefficients) = names.old
names(stats$party) = names.old
names(stats$secoef) = names.old
names(stats$tvals) = names.old
names(stats$pvals) = names.old
writeTime = proc.time()[3]
save(stats, file = file.path(params$writePath, "stats.rdata"))
writeSize = file.size(file.path(params$writePath, "stats.rdata"))
writeTime = proc.time()[3] - writeTime
params$stats = stats
params = AddToLog(params, "ComputeResultsLogistic.T3", readTime, readSize, writeTime, writeSize)
return(params)
}
GetResultsLogistic.A3 = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "GetResultsLogistic.A3\n\n")
stats = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["T"]], "stats.rdata"))
readSize = file.size(file.path(params$readPath[["T"]], "stats.rdata"))
readTime = proc.time()[3] - readTime
stats$Y = data$Y # For Hoslem and ROC
stats$FinalFitted = params$FinalFitted
params$stats = stats
params = AddToLog(params, "GetResultsLogistic.A3", readTime, readSize, 0, 0)
return(params)
}
GetResultsLogistic.B3 = function(params) {
if (params$trace) cat(as.character(Sys.time()), "GetResultsLogistic.B3\n\n")
stats = NULL
readTime = proc.time()[3]
load(file.path(params$readPath[["T"]], "stats.rdata"))
readSize = file.size(file.path(params$readPath[["T"]], "stats.rdata"))
readTime = proc.time()[3] - readTime
params$stats = stats
params = AddToLog(params, "GetResultsLogistic.B3", readTime, readSize, 0, 0)
return(params)
}
############################### PARENT FUNCTIONS ###############################
PartyAProcess3Logistic = function(data,
yname = NULL,
monitorFolder = NULL,
sleepTime = 10,
maxWaitingTime = 24 * 60 * 60,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
params = PrepareParams.3p("logistic", "A",
popmednet = popmednet, trace = trace, verbose = verbose)
params = InitializeLog.3p(params)
params = InitializeStamps.3p(params)
params = InitializeTrackingTable.3p(params)
Header(params)
params = PrepareFolderLinear.A3(params, monitorFolder)
if (params$failed) {
warning(params$errorMessage)
return(invisible(NULL))
}
data = PrepareDataLogistic.A23(params, data, yname)
params = AddToLog(params, "PrepareDataLogistic.A23", 0, 0, 0, 0)
if (data$failed) {
message = "Error in processing the data for Party A."
MakeErrorMessage(params$writePath, message)
files = c("errorMessage.rdata")
params = SendPauseQuit.3p(params, filesT = files, sleepTime = sleepTime, job_failed = TRUE, waitForTurn = TRUE)
return(params$stats)
}
params = PrepareParamsLinear.A3(params, data)
files = "pa.rdata"
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
if (file.exists(file.path(params$readPath[["T"]], "errorMessage.rdata"))) {
warning(ReadErrorMessage(params$readPath[["T"]]))
params = SendPauseQuit.3p(params, sleepTime = sleepTime, job_failed = TRUE, waitForTurn = TRUE)
return(params$stats)
}
params$algIterationCounter = 1
params = PrepareBlocksLinear.A3(params)
params = GetZLinear.A3(params, data)
files = SeqZW("cz_", length(params$container$filebreak.Z))
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = GetWRLinear.A3(params, data)
files = c("xatxa.rdata", SeqZW("cpr_", length(params$container$filebreak.PR)))
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
if (file.exists(file.path(params$readPath[["T"]], "errorMessage.rdata"))) {
warning(ReadErrorMessage(params$readPath[["T"]]))
params = SendPauseQuit.3p(params, sleepTime = sleepTime, job_failed = TRUE, waitForTurn = TRUE)
return(params$stats)
}
params = UpdateParamsLogistic.A3(params)
data = UpdateDataLogistic.A3(params, data)
params = GetBetaALogistic.A3(params)
params$algIterationCounter = 1
while (!params$converged && !params$maxIterExceeded) {
BeginningIteration(params)
params = GetXAbetaALogistic.A3(params, data)
files = c("xabeta.rdata")
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
params = GetXRLogistic.A3(params, data)
files = c("xatwxa.rdata", SeqZW("cxr_", length(params$container$filebreak.XR)))
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
if (file.exists(file.path(params$readPath[["T"]], "errorMessage.rdata"))) {
warning(ReadErrorMessage(params$readPath[["T"]]))
params = SendPauseQuit.3p(params, sleepTime = sleepTime, job_failed = TRUE, waitForTurn = TRUE)
return(params$stats)
}
params = UpdateBetaLogistic.A3(params, data)
files = c("ai.rdata")
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
params = GetBetaALogistic.A3(params)
EndingIteration(params)
params$algIterationCounter = params$algIterationCounter + 1
}
params = GetFinalBetaLogistic.A3(params, data)
files = "Afinalfitted.rdata"
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
params = ComputeResultsLogistic.A3(params, data)
files = c("logisticstats.rdata")
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = GetResultsLogistic.A3(params, data)
params = SendPauseQuit.3p(params, sleepTime = sleepTime, waitForTurn = TRUE)
return(params$stats)
}
PartyBProcess3Logistic = function(data,
monitorFolder = NULL,
sleepTime = 10,
maxWaitingTime = 24 * 60 * 60,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
params = PrepareParams.3p("logistic", "B",
popmednet = popmednet, trace = trace, verbose = verbose)
params = InitializeLog.3p(params)
params = InitializeStamps.3p(params)
params = InitializeTrackingTable.3p(params)
Header(params)
params = PrepareFolderLinear.B3(params, monitorFolder)
if (params$failed) {
warning(params$errorMessage)
return(invisible(NULL))
}
data = PrepareDataLogistic.B23(params, data)
params = AddToLog(params, "PrepareDataLogistic.B23", 0, 0, 0, 0)
if (data$failed) {
message = "Error in processing the data for Party B."
MakeErrorMessage(params$writePath, message)
files = c("errorMessage.rdata")
params = SendPauseQuit.3p(params, filesT = files, sleepTime = sleepTime, job_failed = TRUE, waitForTurn = TRUE)
return(params$stats)
}
params = PrepareParamsLinear.B3(params, data)
files = "pb.rdata"
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
if (file.exists(file.path(params$readPath[["T"]], "errorMessage.rdata"))) {
warning(ReadErrorMessage(params$readPath[["T"]]))
params = SendPauseQuit.3p(params, sleepTime = sleepTime, job_failed = TRUE, waitForTurn = TRUE)
return(params$stats)
}
params$algIterationCounter = 1
params = PrepareBlocksLinear.B3(params)
params = GetRWLinear.B3(params, data)
files = c("xbtxb.rdata", SeqZW("crw_", length(params$container$filebreak.RW)))
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
if (file.exists(file.path(params$readPath[["T"]], "errorMessage.rdata"))) {
warning(ReadErrorMessage(params$readPath[["T"]]))
params = SendPauseQuit.3p(params, sleepTime = sleepTime, job_failed = TRUE, waitForTurn = TRUE)
return(params$stats)
}
params = UpdateParamsLogistic.B3(params)
data = UpdateDataLogistic.B3(params, data)
params = GetBetaBLogistic.B3(params)
params$algIterationCounter = 1
while (!params$converged && !params$maxIterExceeded) {
BeginningIteration(params)
params = GetXBbetaBLogistic.B3(params, data)
files = c("xbbeta.rdata")
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
params = GetRVLogistic.B3(params, data)
files = c("xbtwxb.rdata", SeqZW("crv_", length(params$container$filebreak.RV)))
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
if (file.exists(file.path(params$readPath[["T"]], "errorMessage.rdata"))) {
warning(ReadErrorMessage(params$readPath[["T"]]))
params = SendPauseQuit.3p(params, sleepTime = sleepTime, job_failed = TRUE, waitForTurn = TRUE)
return(params$stats)
}
params = UpdateBetaLogistic.B3(params, data)
files = c("bi.rdata")
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
params = GetBetaBLogistic.B3(params)
EndingIteration(params)
params$algIterationCounter = params$algIterationCounter + 1
}
params = GetFinalBetaLogistic.B3(params, data)
files = "Bfinalfitted.rdata"
params = SendPauseContinue.3p(params, filesT = files, from = "T",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
params = GetResultsLogistic.B3(params)
params = SendPauseQuit.3p(params, sleepTime = sleepTime, waitForTurn = TRUE)
return(params$stats)
}
PartyTProcess3Logistic = function(monitorFolder = NULL,
msreqid = "v_default_0_000",
blocksize = 500,
cutoff = 1e-8,
maxIterations = 25,
sleepTime = 10,
maxWaitingTime = 24 * 60 * 60,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
params = PrepareParams.3p("logistic", "T", msreqid = msreqid,
popmednet = popmednet, trace = trace, verbose = verbose)
params = InitializeLog.3p(params)
params = InitializeStamps.3p(params)
params = InitializeTrackingTable.3p(params)
Header(params)
params = PrepareFolderLinear.T3(params, monitorFolder)
if (params$failed) {
warning(params$errorMessage)
return(invisible(NULL))
}
params = PauseContinue.3p(params, from = c("A", "B"), maxWaitingTime = maxWaitingTime)
if (file.exists(file.path(params$readPath[["A"]], "errorMessage.rdata")) &&
file.exists(file.path(params$readPath[["B"]], "errorMessage.rdata"))) {
warning(ReadErrorMessage(params$readPath[["A"]]))
warning(ReadErrorMessage(params$readPath[["B"]]))
params = SendPauseQuit.3p(params, sleepTime = sleepTime, job_failed = TRUE)
SummarizeLog.3p(params)
return(params$stats)
}
if (file.exists(file.path(params$readPath[["A"]], "errorMessage.rdata"))) {
warning(ReadErrorMessage(params$readPath[["A"]]))
file.copy(file.path(params$readPath[["A"]], "errorMessage.rdata"),
file.path(params$writePath, "errorMessage.rdata"))
files = "errorMessage.rdata"
params = SendPauseContinue.3p(params, filesB = files, from = "B",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = SendPauseQuit.3p(params, sleepTime = sleepTime, job_failed = TRUE)
SummarizeLog.3p(params)
return(params$stats)
}
if (file.exists(file.path(params$readPath[["B"]], "errorMessage.rdata"))) {
warning(ReadErrorMessage(params$readPath[["B"]]))
file.copy(file.path(params$readPath[["B"]], "errorMessage.rdata"),
file.path(params$writePath, "errorMessage.rdata"))
files = "errorMessage.rdata"
params = SendPauseContinue.3p(params, filesA = files, from = "A",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = SendPauseQuit.3p(params, sleepTime = sleepTime, job_failed = TRUE)
SummarizeLog.3p(params)
return(params$stats)
}
params = PrepareParamsLinear.T3(params, cutoff, maxIterations)
if (!params$failed) params = PrepareBlocksLinear.T3(params, blocksize)
if (params$failed) {
warning(params$errorMessage)
MakeErrorMessage(params$writePath, params$errorMessage)
files = "errorMessage.rdata"
params = SendPauseContinue.3p(params, filesA = files, filesB = files,
from = c("A", "B"),
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = SendPauseQuit.3p(params, sleepTime = sleepTime)
SummarizeLog.3p(params)
return(params$stats)
}
files = "blocks.rdata"
params = SendPauseContinue.3p(params, filesA = files, from = "A",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params$algIterationCounter = 1
params = ProcessZLinear.T3(params)
files = c("blocks.rdata", SeqZW("crz_", length(params$container$filebreak.RZ)))
params = SendPauseContinue.3p(params, filesB = files, from = "B",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = ProcessWLinear.T3(params)
files = c("p2.rdata", SeqZW("cwr_", length(params$container$filebreak.WR)))
params = SendPauseContinue.3p(params, filesA = files, from = "A",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = GetProductsLinear.T3(params)
params = CheckColinearityLogistic.T3(params)
if (params$failed) {
warning(params$errorMessage)
MakeErrorMessage(params$writePath, params$errorMessage)
files = "errorMessage.rdata"
params = SendPauseContinue.3p(params, filesA = files, filesB = files,
from = c("A", "B"),
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = SendPauseQuit.3p(params, sleepTime = sleepTime)
SummarizeLog.3p(params)
return(params$stats)
}
params = ComputeInitialBetasLogistic.T3(params)
filesA = c("Aindicies.rdata", "betasA.rdata", "Axty.rdata", "converged.rdata")
filesB = c("Bindicies.rdata", "betasB.rdata", "Bxty.rdata", "converged.rdata")
params = SendPauseContinue.3p(params, filesA = filesA, filesB = filesB, from = c("A", "B"),
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params$algIterationCounter = 1
while (!params$converged && !params$maxIterExceeded) {
BeginningIteration(params)
params = GetWeightsLogistic.T3(params)
files = "pi.rdata"
params = SendPauseContinue.3p(params, filesB = files, from = "B",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = ProcessVLogistic.T3(params)
files = c("pi.rdata", SeqZW("cvr_", length(params$container$filebreak.RV)))
params = SendPauseContinue.3p(params, filesA = files, from = "A",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = ProcessXtWXLogistic.T3(params)
if (params$failed) {
warning(params$errorMessage)
MakeErrorMessage(params$writePath, params$errorMessage)
files = "errorMessage.rdata"
params = SendPauseContinue.3p(params, filesA = files, filesB = files,
from = c("A", "B"),
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = SendPauseQuit.3p(params, sleepTime = sleepTime)
SummarizeLog.3p(params)
return(params$stats)
}
filesA = c("IIA.rdata")
filesB = c("IIB.rdata")
params = SendPauseContinue.3p(params, filesA = filesA, filesB = filesB, from = c("A", "B"),
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = UpdateBetaLogistic.T3(params)
filesA = c("betasA.rdata", "converged.rdata")
filesB = c("betasB.rdata", "converged.rdata")
params = SendPauseContinue.3p(params, filesA = filesA, filesB = filesB, from = c("A", "B"),
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
EndingIteration(params)
params$algIterationCounter = params$algIterationCounter + 1
}
params = GetFinalFittedLogistic.T3(params)
filesA = "finalfitted.rdata"
params = SendPauseContinue.3p(params, filesA = filesA, from = "A",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = ComputeResultsLogistic.T3(params)
files = "stats.rdata"
params = SendPauseContinue.3p(params, filesA = files, filesB = files, from = c("A", "B"),
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = SendPauseQuit.3p(params, sleepTime = sleepTime)
SummarizeLog.3p(params)
return(params$stats)
}
|
93498cbdf7f99b265ddc050133c6f161329af22c
|
e532a61e499e1e97740f1d0c642bc33e8e2426dc
|
/Measures comparison/Evaluation/ggplot_functions.R
|
07ebdcb94739c2d763b29f70eab3dd4a2a8d27ce
|
[
"MIT"
] |
permissive
|
mllg/stability-bioinf
|
37d4cebdb10b03e2b21999db252c832e0f68f2e5
|
bc282b31b09a211244880f095bd127c6438012f4
|
refs/heads/master
| 2020-12-24T21:11:37.046377
| 2017-06-07T12:46:16
| 2017-06-07T12:46:16
| 76,960,923
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,218
|
r
|
ggplot_functions.R
|
####################################################
# Functions for creating the correlation heatmaps
# and the plot of Pareto optimal models
# for the descriptive experiments using ggplot2
####################################################
heatmap_ggplot <- function(cor, names, title = "")
{
library(gplots)
library(ggplot2)
library(data.table)
hm <- heatmap.2(cor, dendrogram = "column", distfun = function(x) as.dist(1 - x), scale = "none",
density.info = "none", trace = "none",
hclustfun = function(x) hclust(x, method = "single"))
cluster_order <- hm$rowInd
cor_data <- cbind(measure = rownames(cor), as.data.frame(cor))
cor_data <- melt(cor_data, id.vars = "measure")
cor_data$measure <- factor(cor_data$measure, levels = rownames(cor)[cluster_order])
cor_data$variable <- factor(cor_data$variable, levels = rownames(cor)[cluster_order])
gg_heatmap <- ggplot(cor_data, aes(measure, variable)) +
geom_tile(aes(fill = value), colour = "white") +
scale_fill_gradient2(low = "darkblue", mid = "white", high = "darkred", limits = c(-1, 1), name = "Correlation") +
# scale_fill_gradient(low = "white", high = "black", limits = c(-0.25, 1), name = "Correlation\n") +
theme_grey() +
labs(x = "", y = "", title = title) +
scale_x_discrete(expand = c(0, 0), labels = names[cluster_order]) +
scale_y_discrete(expand = c(0, 0), labels = names[cluster_order]) +
theme(axis.ticks = element_blank(),
legend.title = element_text(size = 12),
legend.text = element_text(size = 12),
axis.title = element_text(size = 12),
axis.text = element_text(size = 12),
title = element_text(size = 12)) +
coord_equal(ratio = 1)
return(gg_heatmap)
}
binary_heatmap_ggplot <- function(cor, names, title = "")
{
library(gplots)
library(ggplot2)
library(data.table)
hm <- heatmap.2(cor, dendrogram = "column", scale = "none",
density.info = "none", trace = "none",
hclustfun = function(x) hclust(x, method = "single"))
cluster_order <- hm$colInd
cor_factor <- apply(cor, 1, function(x) factor(x, levels = 0:1))
cor_data <- cbind(measure = colnames(cor), as.data.frame(cor_factor))
cor_data <- melt(cor_data, id.vars = "measure")
cor_data$measure <- factor(cor_data$measure, levels = colnames(cor)[cluster_order])
cor_data$variable <- factor(cor_data$variable, levels = rownames(cor)[hm$rowInd])
gg_heatmap <- ggplot(cor_data, aes(variable, measure)) +
geom_tile(aes(fill = value), colour = "white") +
scale_fill_grey(name = "Pareto\noptimal", labels = c("No", "Yes"), start = 0.8, end = 0.2) +
theme_grey() +
labs(y = "Stability measures", x = "Configurations", title = title) +
scale_y_discrete(expand = c(0, 0), labels = names[cluster_order]) +
scale_x_discrete(expand = c(0, 0), labels = element_blank()) +
theme(axis.ticks = element_blank(),
legend.title = element_text(size = 10),
legend.text = element_text(size = 10),
axis.title = element_text(size = 10),
axis.text = element_text(size = 10))
return(gg_heatmap)
}
|
e59446627364d709a138d2703c7675f76b1e2884
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.analytics/man/elasticsearchservice_delete_outbound_cross_cluster_search_connection.Rd
|
c3be3e8974b5ca7946582a8cc09bc16e9dcf86c3
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,504
|
rd
|
elasticsearchservice_delete_outbound_cross_cluster_search_connection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elasticsearchservice_operations.R
\name{elasticsearchservice_delete_outbound_cross_cluster_search_connection}
\alias{elasticsearchservice_delete_outbound_cross_cluster_search_connection}
\title{Allows the source domain owner to delete an existing outbound
cross-cluster search connection}
\usage{
elasticsearchservice_delete_outbound_cross_cluster_search_connection(
CrossClusterSearchConnectionId)
}
\arguments{
\item{CrossClusterSearchConnectionId}{[required] The id of the outbound connection that you want to permanently delete.}
}
\value{
A list with the following syntax:\preformatted{list(
CrossClusterSearchConnection = list(
SourceDomainInfo = list(
OwnerId = "string",
DomainName = "string",
Region = "string"
),
DestinationDomainInfo = list(
OwnerId = "string",
DomainName = "string",
Region = "string"
),
CrossClusterSearchConnectionId = "string",
ConnectionAlias = "string",
ConnectionStatus = list(
StatusCode = "PENDING_ACCEPTANCE"|"VALIDATING"|"VALIDATION_FAILED"|"PROVISIONING"|"ACTIVE"|"REJECTED"|"DELETING"|"DELETED",
Message = "string"
)
)
)
}
}
\description{
Allows the source domain owner to delete an existing outbound
cross-cluster search connection.
}
\section{Request syntax}{
\preformatted{svc$delete_outbound_cross_cluster_search_connection(
CrossClusterSearchConnectionId = "string"
)
}
}
\keyword{internal}
|
84a9a5b505c4f32b74258e5d369b5131353b0111
|
a160305f979ade5ae5a0a8722d6de2e80ff3c666
|
/R/fr_hollingsII.R
|
20a8ca2fe05f3cbea88a412c82cd94e52102ae27
|
[] |
no_license
|
dpritchard/frair
|
5e4249cf103891aad5943beaae5c8b8c3e45078a
|
a66a1733bc77eeb8372abeac4f2c218cd5a3c4df
|
refs/heads/master
| 2020-04-12T01:36:52.817826
| 2017-03-25T21:24:31
| 2017-03-25T21:24:31
| 10,156,720
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,909
|
r
|
fr_hollingsII.R
|
## Holling's Orginal Type II pre-prey function.
# hollingsII: The guiding function...
hollingsII <- function(X, a, h, T) {
if(is.list(a)){
coefs <- a
a <- coefs[['a']]
h <- coefs[['h']]
T <- coefs[['T']]
}
return((a*X*T)/(1+a*X*h)) # Direct from Julliano 2001, pp 181
}
# hollingsII_fit: Does the heavy lifting
hollingsII_fit <- function(data, samp, start, fixed, boot=FALSE, windows=FALSE) {
# Setup windows parallel processing
fr_setpara(boot, windows)
samp <- sort(samp)
dat <- data[samp,]
out <- fr_setupout(start, fixed, samp)
try_hollingsII <- try(bbmle::mle2(hollingsII_nll, start=start, fixed=fixed, data=list('X'=dat$X, 'Y'=dat$Y),
optimizer='optim', method='Nelder-Mead', control=list(maxit=5000)),
silent=T)
if (inherits(try_hollingsII, "try-error")) {
# The fit failed...
if(boot){
return(out)
} else {
stop(try_hollingsII[1])
}
} else {
# The fit 'worked'
for (i in 1:length(names(start))){
# Get coefs for fixed variables
cname <- names(start)[i]
vname <- paste(names(start)[i], 'var', sep='')
out[cname] <- coef(try_hollingsII)[cname]
out[vname] <- vcov(try_hollingsII)[cname, cname]
}
for (i in 1:length(names(fixed))){
# Add fixed variables to the output
cname <- names(fixed)[i]
out[cname] <- as.numeric(fixed[cname])
}
if(boot){
return(out)
} else {
return(list(out=out, fit=try_hollingsII))
}
}
}
# hollingsII_nll: Provides negative log-likelihood for estimations via bbmle::mle2()
hollingsII_nll <- function(a, h, T, X, Y) {
if (a <= 0 || h <= 0) {return(NA)} # Estimates must be > zero
prop.exp = hollingsII(X, a, h, T)/X
# The proportion consumed must be between 0 and 1 and not NaN
# If not then it must be bad estimate of a and h and should return NA
if(any(is.nan(prop.exp)) || any(is.na(prop.exp))){return(NA)}
if(any(prop.exp > 1) || any(prop.exp < 0)){return(NA)}
return(-sum(dbinom(Y, prob = prop.exp, size = X, log = TRUE)))
}
# The diff function
hollingsII_diff <- function(X, grp, a, h, T, Da, Dh) {
# return(a*X*T/(1+a*X*h)) # Direct from Julliano 2001, pp 181
return((a-Da*grp)*X*T/(1+(a-Da*grp)*X*(h-Dh*grp)))
}
# The diff_nll function
hollingsII_nll_diff <- function(a, h, T, Da, Dh, X, Y, grp) {
if (a <= 0 || h <= 0) {return(NA)} # Estimates must be > zero
prop.exp = hollingsII_diff(X, grp, a, h, T, Da, Dh)/X
# The proportion consumed must be between 0 and 1 and not NaN
# If not then it must be bad estimate of a and h and should return NA
if(any(is.nan(prop.exp)) || any(is.na(prop.exp))){return(NA)}
if(any(prop.exp > 1) || any(prop.exp < 0)){return(NA)}
return(-sum(dbinom(Y, prob = prop.exp, size = X, log = TRUE)))
}
|
ed2d3a195a58e5e919e786b7488e548009cb9127
|
bb903df45d84290980ca7fd18833893b2a1c1dbe
|
/src/data/gapminder/analysis1.R
|
d11655ef5e11b08d4ef5aaf21905d0dfbbea878e
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
billcook1951/2019-04-02-carpentries-mmc
|
0f5b71e47f852b615980d5ee88f8347c739db12c
|
65eeaec6157ab5647d619676a72cfcdda0ae219d
|
refs/heads/master
| 2020-05-04T10:06:52.488949
| 2019-04-02T17:12:27
| 2019-04-02T17:12:27
| 179,081,948
| 0
| 0
| null | 2019-04-02T13:18:48
| 2019-04-02T13:18:48
| null |
UTF-8
|
R
| false
| false
| 1,151
|
r
|
analysis1.R
|
#
library(here)
library(tidyverse)
gapminder <- readr::read_csv(here("data/gapminder/raw/gapminder_data.csv"))
# oldschool
mean(gapminder$gdpPercap[gapminder$continent == "Africa"])
mean(gapminder$gdpPercap[gapminder$continent == "Americas"])
year_country_gdp <- select(gapminder,year, country, gdpPercap)
head(year_country_gdp)
# using pipes
year_country_gdp <- gapminder %>%
select(year, country, gdpPercap, continent) %>%
filter(continent == "Europe")
head(year_country_gdp)
#challenge
af_values <- gapminder %>%
filter(continent == "Africa") %>%
select(year, country, lifeExp)
head(af_values)
# next challenge to use group_by
gapminder %>%
group_by(continent) %>%
summarize(mean_val = mean(gdpPercap))
gapminder %>%
group_by(country) %>%
summarize(mean_lifeExp = mean(lifeExp), sd_gdpPercap = sd(gdpPercap))
# pipe into a plot
ggplot(gapminder, aes(x = year, y = lifeExp, color = continent)) +
geom_line() +
facet_wrap( ~ country)
gapminder %>%
filter(continent == "Africa") %>%
ggplot ( aes(x = year, y = lifeExp, color = continent)) +
geom_line() +
facet_wrap( ~ country)
|
4bb5a9bde36f933ff294dc440cb065727c50ffc6
|
d1708275acd33034cee5cee27dc3dc7189e70f47
|
/inst/examples/ex_test.R
|
2e798f761e7f7250a8a2b7e8f9c92a095b130a34
|
[] |
no_license
|
stc04003/tranSurv
|
ae0ef34ea789a8f5bb03aa718749ea39b5e449fb
|
e9f23bf1ac11f12e745c32a91f23e1fcda336897
|
refs/heads/master
| 2021-07-19T17:05:00.055305
| 2021-07-17T17:33:17
| 2021-07-17T17:33:17
| 135,850,835
| 1
| 0
| null | 2019-10-29T21:01:53
| 2018-06-02T20:41:51
|
R
|
UTF-8
|
R
| false
| false
| 2,045
|
r
|
ex_test.R
|
## ------------------------------------------------------------------------------------------
## Library and data
## ------------------------------------------------------------------------------------------
library(tranSurv)
data(channing, package = "boot")
chan <- subset(channing, entry < exit)
## ------------------------------------------------------------------------------------------
trReg(Surv(entry, exit, cens) ~ sex, data = chan)
trReg(Surv(entry, exit, cens) ~ sex, data = chan, method = "adjust", control = list(G = 10))
(fit <- trReg(Surv(entry, exit, cens) ~ 1, data = chan))
plot(fit)
(fit <- with(chan, trSurvfit(entry, exit, cens)))
plot(fit)
gof(with(chan, Surv(entry, exit, cens)), B = 10)
(fit0 <- with(chan, trSurvfit(entry, exit, cens)))
gof(fit0, B = 20)
(fit <- trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10))
gof(fit, B = 20)
(fit <- trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10))
(fit <- trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10, control = list(P = 2)))
(fit <- trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10, control = list(P = 3)))
(fit <- trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10, method = "adjust"))
(fit <- trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10, method = "adjust", control = list(P = 1)))
(fit <- trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10, method = "adjust", control = list(P = 2)))
## errored because of tiny intervals
## (fit <- trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10, method = "adjust", control = list(P = 3)))
(fit <- trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10, method = "adjust", control = list(Q = 1)))
(fit <- trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10, method = "adjust", control = list(Q = 2)))
names(fit)
fit$PEta
(trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10, method = "adjust", control = list(Q = 2, a = -0)))
(trReg(Surv(entry, exit, cens) ~ sex, data = chan, B = 10, method = "adjust", control = list(Q = 2, a = -0.7977801)))
|
96469f90fac41374dfbcb3567dc84ccec8c28913
|
328fb6a9f41ebc505de13af7900d4c965d0636b8
|
/test.R
|
f0e3bb5d74ea9147c39401132c39fce57dd1f084
|
[] |
no_license
|
davidgremminger/dspm_git_intro
|
8ce282f75e80f4f00e0e1dd7fe7b6e3e0ef1902e
|
38cdb229fbc6a20961d4ab14609be39a579d004d
|
refs/heads/master
| 2020-04-15T10:51:47.696914
| 2020-01-06T18:05:17
| 2020-01-06T18:05:17
| 164,602,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 350
|
r
|
test.R
|
# Loading relevant package
library(ggplot2)
# Creating plot
scatterplot <- ggplot(diamonds, aes(x = carat, y = price)) +
geom_point(size = 4) +
theme_minimal() +
ggtitle("Diamonds Scatterplot")
# Saving plot
ggsave("./plots/plot.jpg")
# My colleague adds a comment.
# I make a change on Jan 6
|
b882a47513fe35bf5e0a391cade9a4e516d22da2
|
17464ae92bc82d5aa65320c47d98b2ed0028e977
|
/dominance/dominance_switch_model_selection.R
|
608bf3ee5d333f1e5d4aa815a42e31aba5ceec3b
|
[] |
no_license
|
SallyKoerner/GEx_SEV2019
|
348ed1989061e47c7a2dba88063e5676d36e3de7
|
6570f850ee80e9987eaa0b5ddbdfa6cab45c627e
|
refs/heads/master
| 2021-08-07T21:33:45.131718
| 2021-07-21T19:41:31
| 2021-07-21T19:41:31
| 182,826,141
| 1
| 1
| null | 2020-06-12T17:43:43
| 2019-04-22T16:29:23
|
R
|
UTF-8
|
R
| false
| false
| 1,346
|
r
|
dominance_switch_model_selection.R
|
# This script performs model selection on variables that affect dominant species switching
#
###########################################################################################
###########################################################################################
# Load libraries
library(tidyverse)
library(glmnet)
library(ggplot2)
## -- REPOSITORY DIR -- ##
setwd("/Users/avahoffman/Dropbox/Research/Grazing_consortium/2020/GEx_SEV2019")
dat <-
read.csv("community_difference_allmetrics_siteavg_12June2020b.csv")
dat_sub <-
dat %>%
select(c(diff_sp,
precip,
CAM,
bio1,
NMDS1,
N.deposition1993,
PhotoMix,
ALLC3)) %>%
drop_na()
dat_y <-
as.numeric((dat_sub %>%
select(diff_sp))[, 1])
dat_x <- dat_sub %>%
select(-c(diff_sp))
#find mean, std in dat_x
mean_x <- sapply(dat_x, mean)
sd_x <- sapply(dat_x, sd)
x_scaled <- scale(dat_x)
lambda_seq <- 10 ^ seq(2,-2, by = -.1)
cv_output <- cv.glmnet(x_scaled,
dat_y,
alpha = 1,
lambda = lambda_seq,
nfolds = 5)
best_lam <- cv_output$lambda.min
best_lam
lasso_out <-
glmnet(x_scaled,
dat_y,
family = "gaussian",
alpha = 1,
lambda = best_lam)
|
52b149eb793b44d7ae97ccdb4d318937ffb92b76
|
30f69e17253e3ccba7654766f790430a23dfd608
|
/R/leontief-matrix.R
|
e77b3470271d6f432e5bbdc0a248190af86b87dc
|
[] |
no_license
|
amrofi/GVCs-LICs
|
bd535ef059e70bd9e9fc9d20934937e303ffe871
|
47ac9e2ee56854d9163746bef4c7bf6b58d1331d
|
refs/heads/master
| 2020-07-24T21:17:08.647680
| 2017-02-21T08:28:24
| 2017-02-21T08:28:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,257
|
r
|
leontief-matrix.R
|
# leontief matrix form
# Bastiaan Quast
# bquast@gmail.com
library(decompr)
lm1995 <- load_tables_vectors(wid1995,
wfd1995,
countries,
sectors,
output1995)
lm2000 <- load_tables_vectors(wid2000,
wfd2000,
countries,
sectors,
output2000)
lm2005 <- load_tables_vectors(wid2005,
wfd2005,
countries,
sectors,
output2005)
lm2008 <- load_tables_vectors(wid2008,
wfd2008,
countries,
sectors,
output2008)
lmf1995 <- leontief(lm1995, long=FALSE)
lmf2000 <- leontief(lm2000, long=FALSE)
lmf2005 <- leontief(lm2005, long=FALSE)
lmf2008 <- leontief(lm2008, long=FALSE)
# run nrca's
library(gvc)
nrca1995 <- nrca(lmf1995)
nrca2000 <- nrca(lmf2000)
nrca2005 <- nrca(lmf2005)
nrca2008 <- nrca(lmf2008)
# save nrca's
save(nrca1995, nrca2000, nrca2005, nrca2008, file ="data/nrca.RData")
|
1984fd2ebab6531fd6dc84ce9e62d7fa23486b3d
|
f61339934b58e78decd99fd2f57533cd1389cde8
|
/OxD_E_Params_LaTeX/Digitiziation/Analysis.R
|
266542ee74e3a77ad0e2165ec0d778861f60ffd8
|
[] |
no_license
|
julianstanley/Ratiometric_Microscopy
|
9010d90c943f90cc5700bda813017ff25389277a
|
5eaa7c595552c652af56e99b78cd340fb349e29a
|
refs/heads/master
| 2021-06-21T19:54:59.755633
| 2019-09-29T18:12:47
| 2019-09-29T18:12:47
| 149,372,411
| 0
| 1
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 3,427
|
r
|
Analysis.R
|
# ----------- Spectra class definition
# Ox: Matrix of [X, Y] spectrum values in oxidized state
# Red: Matrix of [X, Y] spectrum values in reduced state
setClass("Spectra",
slots = list(ox = "matrix", red = "matrix"))
# ----------- Spectra constructors
# Main constructor
# ARGS: Two, 2-by-N matricies
# -- oxMatrix: matrix of x and y values in oxidized state
# -- redMatrix: matrix of x and y values in reduced state
Spectra <- function(oxMatrix, redMatrix) {
return(new("Spectra", ox = oxMatrix,
red = redMatrix));
}
# Additional constructor for seperate x and y arrays
SpectraPoint <- function(xOx, yOx, xRed, yRed) {
return(Spectra(createPos(xOx, yOx), createPos(xRed, yRed)));
}
# Helper to create a 2-column matrix from two vectors
createPos <- function(X, Y) {
return(
cbind(matrix(na.omit(X)), na.omit(Y))
);
}
# ----------- Plotting functions
plotOxRed <- function(spectra, main) {
plot(spectra@ox[,2] ~ spectra@ox[,1], type = "l", main = main, ylim = c(0, max(spectra@ox[,2],
spectra@red[,2])))
points(spectra@red[,2] ~ spectra@red[,1], type = "l", col = "red")
}
# Requirement: R = lambda_2/lambda_1
plotROxD <- function(spectra, lambda_1_low, lambda_1_high, lambda_2_low, lambda_2_high) {
# Get the average value from each emission range
lambda_1_ox <- mean(subset(spectra@ox, spectra@ox[,1] > lambda_1_low & spectra@ox[,1] < lambda_1_high)[,2])
lambda_2_ox <- mean(subset(spectra@ox, spectra@ox[,1] > lambda_2_low & spectra@ox[,1] < lambda_2_high)[,2])
lambda_1_red <- mean(subset(spectra@red, spectra@red[,1] > lambda_1_low & spectra@red[,1] < lambda_1_high)[,2])
lambda_2_red <- mean(subset(spectra@red, spectra@red[,1] > lambda_2_low & spectra@red[,1] < lambda_2_high)[,2])
# Define minimum, maximum, and delta
Rmax <- max(lambda_1_ox/lambda_2_ox, lambda_1_red/lambda_2_red)
Rmin <- min(lambda_1_ox/lambda_2_ox, lambda_1_red/lambda_2_red)
delta <- lambda_2_ox/lambda_2_red
print(Rmax)
print(Rmin)
print(delta)
# Define the funtion oxidized
OXD <- function(R, Rmin, Rmax, delta) {
return (
(R - Rmin)/((R - Rmin) + (delta*(Rmax - R)))
)
}
# Generate inital values of R
R <- seq(Rmin, Rmax, by = 0.001)
magR <- length(R)
# Generate inital values of oxD
yOXD = OXD(R, rep(Rmin, each = magR),
rep(Rmax, each = magR),
rep(delta, each = magR))
# Set size
par(pty = 's')
# Plotvalue
plot(R, yOXD,
type = 'l', main = "
Fraction oxidized \n at measured ratio",
ylab = "OxD", xlab = "R",
xlim = c(0, Rmax))
}
# -----------
# ----------- Use case
# Create GFP1
GFP1 <- SpectraPoint(dig$Oxidized.X.GFP.1,
dig$Oxidized.Y.GFP.1,
dig$Reduced.X.GFP.1,
dig$Reduced.Y.GFP.1)
# Create GFP 2
GFP2 <- SpectraPoint(dig$ï..Oxidized.X.GFP2,
dig$Oxidized.Y.GFP.2,
dig$Reduced.X.GFP2,
dig$Reduced.Y.GFP2)
# Plot GFP 1 and 2
dev.off()
par(mfrow = c(1, 2), pty = 's')
plotOxRed(GFP1, main = "GFP1")
plotOxRed(GFP2, main = "GFP2")
# Plot OxD vs R
dev.off()
plotROxD(GFP2, 485, 495, 395, 405)
|
7fe97fb91755409543dd4378c9432310bf9d08ed
|
753b32b49bfd7c9e3bb9ed7370e97a5bfbb62860
|
/plot1.R
|
3337ab62039986a6802c89f5f22aa877454098b5
|
[] |
no_license
|
rohitravishankar/ExploratoryDataAnalysis-CourseProject2
|
985e1dfabb2e24d52469457f77a9f318e7beb03c
|
40d1b665a7da7c7b45cfbd5e3d0e88ade5af239d
|
refs/heads/master
| 2021-01-15T11:12:08.529565
| 2015-07-21T10:50:18
| 2015-07-21T10:50:18
| 39,437,544
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 398
|
r
|
plot1.R
|
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
aggregatedTotal <- aggregate(Emissions ~ year, NEI, sum)
png("plot1.png")
barplot(height=aggregatedTotal$Emissions, names.arg=aggregatedTotal$year, xlab="years", ylab="total PM2.5 emission",main="Total PM2.5 emissions at various years")
dev.off()
|
6a8414e28c2996d0d1190bc91c03e97894345cbd
|
95c158d8727c1877d95e8eac0daa08754c1c6f04
|
/data/read_local_data.R
|
c428595ac12be538b7ced195eeffda51faa41f67
|
[] |
no_license
|
asRodelgo/shinyTCMN
|
bcbec537466b03dd46fea45ab7baa61318668beb
|
e938dd403735ccf7261819d443d397a8618a3c04
|
refs/heads/master
| 2021-05-04T11:14:13.651748
| 2017-01-24T01:55:47
| 2017-01-24T01:55:47
| 60,372,337
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,932
|
r
|
read_local_data.R
|
# This scripts reads data for developing version of the app where data is stored in a different place
#
TCMN_data <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/TCMN_data.csv", colClasses = c(rep("character",4),rep("numeric",2),rep("character",2)))
# country table ----------------------------
countries <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/CountryClassification.csv", stringsAsFactors = FALSE)
# Avoid ISO2 code for Namibia be confused by NA
countries[countries$CountryCodeISO3=="NAM",]$CountryCodeISO2 <- "NA"
countries <- arrange(countries, Country)
# list of only countries (useful for selectors and others)
countryNames <- filter(countries, !(CountryCodeISO2==""))
countryNames <- select(countryNames, CountryCodeISO3, Country)# remove CountryISO2
# list of country departments
countryDeps <- filter(countries, !(CMU==""))
countryDeps <- arrange(select(countryDeps, CountryCodeISO3, RegionCodeALL, Region ,CMU), CMU)
# country Coordinates --------------
#countryCoords <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/countryCoords.csv", stringsAsFactors = FALSE)
# indicator table ----------------------------
indicators <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/IndicatorClassification.csv", stringsAsFactors = FALSE)
# TCMN specific source ----------------------------
TCMN_sources <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/TCMN_sources.csv", stringsAsFactors = FALSE)
# TCMN specific indicators ----------------------------
TCMN_indic <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/TCMN_Indicators.csv", stringsAsFactors = FALSE)
# TCMN specific datasets ----------------------------
TCMN_datasets <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/TCMN_datasets.csv", stringsAsFactors = FALSE)
# WITS Imports ----------------------------
mWits <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/mWits.csv", colClasses = c(rep("character",3),rep("numeric",2),rep("character",2)))
# WITS Exports ----------------------------
xWits <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/xWits.csv", colClasses = c(rep("character",3),rep("numeric",2),rep("character",2)))
# IBRD T&C projects portfolio --------------
TCprojects <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/TCprojects.csv", stringsAsFactors = FALSE)
# IFC projects portfolio --------------
IFCprojects <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/IFCprojects.csv", stringsAsFactors = FALSE)
# SCD/CPF most recent --------------
mostRecentDocs <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/SCDCPFdocuments.csv", stringsAsFactors = FALSE)
# SCD/CPF planned --------------
plannedDocs <- read.csv("/Users/asanchez3/srv/shiny-server/shinyTCMN-data/data/Planneddocuments.csv", stringsAsFactors = FALSE)
|
cd4ab7a71761ca6a21b15563196475a505f786e8
|
c56e17d02b242eb75a87725d34e870f11cd36e3a
|
/man/linebreak.Rd
|
32cb1c56598dfa88106b82e611ba3b25c36bd74f
|
[] |
no_license
|
isteves/kableExtra
|
689fb470151249fd87d6e5dca0ae0bba4e4197e6
|
b1caa27840a86eb17071dc8973b690650aee78f1
|
refs/heads/master
| 2020-03-11T11:27:52.385123
| 2018-04-14T18:19:46
| 2018-04-14T18:19:46
| 129,970,302
| 1
| 0
| null | 2018-04-17T22:03:56
| 2018-04-17T22:03:56
| null |
UTF-8
|
R
| false
| true
| 534
|
rd
|
linebreak.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linebreak.R
\name{linebreak}
\alias{linebreak}
\title{Make linebreak in LaTeX Table cells}
\usage{
linebreak(x, align = c("l", "c", "r"), double_escape = F)
}
\arguments{
\item{x}{A character vector}
\item{align}{Choose from "l", "c" or "r"}
\item{double_escape}{Whether special character should be double escaped.
Default is FALSE.}
}
\description{
This function generate LaTeX code of \code{makecell} so that users
can have linebreaks in their table
}
|
b19ab427d404ff7c81e2384d32f37f09724dae0c
|
efb67b529095add05d77312f981305690655b45a
|
/ggplot2/Programming with ggplot2/print.ggplot/example1.R
|
ebb13d66b6a3cbf86bb67eed80698e91df27c11c
|
[] |
no_license
|
plotly/ssim_baselines
|
6d705b8346604004ae16efdf94e425a2989b2401
|
9d7bec64fc286fb69c76d8be5dc0899f6070773b
|
refs/heads/main
| 2023-08-14T23:31:06.802931
| 2021-09-17T07:19:01
| 2021-09-17T07:19:01
| 396,965,062
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
example1.R
|
colours <- list(~class, ~drv, ~fl)
p <-
# Doesn't seem to do anything!
for (colour in colours) {
ggplot(mpg, aes_(~ displ, ~ hwy, colour = colour)) +
geom_point()
}
|
9bb69a0adb561249e12c11b9f8699b13ce6e9b10
|
7472fc8576e1932c7d87e8a5792b11d82103f9ef
|
/marginal_mrp.R
|
80e125e793d5233e36d14e91cd57ae87f00692c0
|
[] |
no_license
|
gabgilling/Bayesian-Blogs
|
1b034f1dfcd1f9ec9823cbd9d5dd240915c747a6
|
fde423fb5dd00eb799aafc835c99bf5223f8baac
|
refs/heads/main
| 2023-07-22T01:27:51.155649
| 2021-09-07T14:18:24
| 2021-09-07T14:18:24
| 372,865,024
| 0
| 1
| null | 2021-07-13T13:44:34
| 2021-06-01T14:47:09
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 8,386
|
r
|
marginal_mrp.R
|
library(readr)
IFOP_21_04_2017 <- read_csv("Downloads/IFOP_21-04-2017.csv")
nuts_eb_dictionary <- read_csv("https://raw.githubusercontent.com/gabgilling/Thesis/master/Data/nuts_eb_dictionary.csv")
reg_preds <- read.csv("https://raw.githubusercontent.com/gabgilling/Thesis/master/Data/regional_predictors.csv")
census_age_gender <- read_csv("Downloads/csvoutput_HC55_2021_09_02_19_10.csv")
census_age_gender$GEO <- ifelse(census_age_gender$GEO == "FR3", "FR30", census_age_gender$GEO)
census_age_gender$NUTS2_old <- census_age_gender$GEO
census_age_gender <- merge(census_age_gender, reg_preds, by = "NUTS2_old")
census_age_gender$CAS <- NULL
census_age_gender$TIME <- NULL
census_age_gender$SIE <- NULL
census_age_gender$LOC <- NULL
census_age_gender$FLAGS <- NULL
census_age_gender$FOOTNOTES <- NULL
census_age_gender$COC<- NULL
census_age_gender$age_cat <- with(census_age_gender, ifelse(AGE %in% c("Y18", "Y19", "Y20-24"), "18-24",
ifelse(AGE %in% c("Y25-29", "Y30-34"), "25-34",
ifelse(AGE %in% c("Y35-39", "Y40-44", "Y45-49"), "35-49",
ifelse(AGE %in% c("Y50-64"), "50-64", "65+")))))
census_age_gender$region_new <- with(census_age_gender, ifelse(Region.Name %in% c("Basse-Normandie", "Haute-Normandie"), "Normandie", Region.Name))
census_age_gender$region_new <- with(census_age_gender, ifelse(Region.Name %in% c("Poitou-Charentes", "Aquitaine", "Limousin"), "Nouvelle-Aquitaine", region_new))
census_age_gender$region_new <- with(census_age_gender, ifelse(Region.Name %in% c("Picardie", "Nord-Pas-de-Calais"), "Hauts-de-France", region_new))
census_age_gender$region_new <- with(census_age_gender, ifelse(Region.Name %in% c("Bourgogne", "Franche-Comté"), "Bourgogne-Franche-Comté", region_new))
census_age_gender$region_new <- with(census_age_gender, ifelse(Region.Name %in% c("Languedoc-Roussillon", "Midi-Pyrénées"), "Occitanie", region_new))
census_age_gender$region_new <- with(census_age_gender, ifelse(Region.Name %in% c("Alsace", "Lorraine", "Champagne-Ardenne"), "Grand-Est", region_new))
census_age_gender$region_new <- with(census_age_gender, ifelse(Region.Name %in% c("Auvergne", "Rhône-Alpes"), "Auvergne-Rhône-Alpes", region_new))
# should I just use voting age records?
census_age_gender <- census_age_gender %>% group_by(region_new) %>% mutate(pop_region = sum(VALUE))
marginal_age <- census_age_gender %>% group_by(age_cat, region_new) %>% summarise(freq_a = sum(VALUE))
marginal_gender <- census_age_gender %>% group_by(SEX, region_new) %>% summarise(freq_g = sum(VALUE))
IFOP_21_04_2017 <- as.data.frame(apply(IFOP_21_04_2017, MARGIN = 2, FUN = function(x) ifelse(x == "-", 0, x)))
marginal_intentions_share_gender <- as.data.frame(cbind(c("M", "F"), as.numeric(IFOP_21_04_2017$`Emmanuel Macron`[3:4]), as.numeric(IFOP_21_04_2017$`Marine Le Pen`[3:4]),
as.numeric(IFOP_21_04_2017$`François Fillon`[3:4]), as.numeric(IFOP_21_04_2017$`Benoît Hamon`[3:4]),
as.numeric(IFOP_21_04_2017$`Jean-Luc Mélenchon`[3:4]),
as.numeric(IFOP_21_04_2017$`Nathalie Arthaud`[3:4]) +
as.numeric(IFOP_21_04_2017$`Philippe Poutou`[3:4]) +
as.numeric(IFOP_21_04_2017$`Jean Lassalle`[3:4]) +
as.numeric(IFOP_21_04_2017$`Nicolas Dupont-Aignan`[3:4]) +
as.numeric(IFOP_21_04_2017$`François Asselineau`[3:4]) +
as.numeric(IFOP_21_04_2017$`Jacques Cheminade`[3:4])))
# col.names = c("macron", "lepen"))
marginal_intentions_share_gender[, 2:ncol(marginal_intentions_share_gender)] <- apply(marginal_intentions_share_gender[, 2:ncol(marginal_intentions_share_gender)], MARGIN = 2, as.numeric)/100
colnames(marginal_intentions_share_gender) <- c("SEX", "macron_pct_g", "lepen_pct_g", "lr_pct_g", "ps_pct_g", "melenchon_pct_g", "other_pct_g")
marginal_intentions_share_age <- as.data.frame(cbind(c("18-24", "25-34", "35-49", "50-64", "65+"),
c(as.numeric(IFOP_21_04_2017$`Emmanuel Macron`[7:8]), as.numeric(IFOP_21_04_2017$`Emmanuel Macron`[10:12])),
c(as.numeric(IFOP_21_04_2017$`Marine Le Pen`[7:8]),as.numeric(IFOP_21_04_2017$`Marine Le Pen`[10:12])),
c(as.numeric(IFOP_21_04_2017$`François Fillon`[7:8]), as.numeric(IFOP_21_04_2017$`François Fillon`[10:12])),
c(as.numeric(IFOP_21_04_2017$`Benoît Hamon`[7:8]), as.numeric(IFOP_21_04_2017$`Benoît Hamon`[10:12])),
c(as.numeric(IFOP_21_04_2017$`Jean-Luc Mélenchon`[7:8]),as.numeric(IFOP_21_04_2017$`Jean-Luc Mélenchon`[10:12])),
c(as.numeric(IFOP_21_04_2017$`Nathalie Arthaud`[7:8]), as.numeric(IFOP_21_04_2017$`Nathalie Arthaud`[10:12])) +
c(as.numeric(IFOP_21_04_2017$`Philippe Poutou`[7:8]), as.numeric(IFOP_21_04_2017$`Philippe Poutou`[10:12])) +
c(as.numeric(IFOP_21_04_2017$`Jean Lassalle`[7:8]), as.numeric(IFOP_21_04_2017$`Jean Lassalle`[10:12])) +
c(as.numeric(IFOP_21_04_2017$`Nicolas Dupont-Aignan`[7:8]), as.numeric(IFOP_21_04_2017$`Nicolas Dupont-Aignan`[10:12])) +
c(as.numeric(IFOP_21_04_2017$`François Asselineau`[7:8]), as.numeric(IFOP_21_04_2017$`François Asselineau`[10:12])) +
c(as.numeric(IFOP_21_04_2017$`Jacques Cheminade`[7:8]), as.numeric(IFOP_21_04_2017$`Jacques Cheminade`[10:12]))))
marginal_intentions_share_age[, 2:ncol(marginal_intentions_share_age)] <- apply(marginal_intentions_share_age[, 2:ncol(marginal_intentions_share_age)], MARGIN = 2, as.numeric)/100
colnames(marginal_intentions_share_age) <- c("age_cat", "macron_pct_a", "lepen_pct_a", "lr_pct_a", "ps_pct_a", "melenchon_pct_a", "other_pct_a")
marginals <- merge(marginal_age, marginal_intentions_share_age, by = "age_cat")
marginals <- merge(marginals, marginal_gender, by ="region_new")
marginals <- merge(marginals, marginal_intentions_share_gender, by = "SEX")
marginals <- merge(marginals, census_age_gender %>% select(region_new, pop_region), by = "region_new")
marginals <- distinct(marginals)
marginals$macron_freq_a <- with(marginals, freq_a * macron_pct_a / pop_region)
marginals$macron_freq_g <- with(marginals, freq_g * macron_pct_g / pop_region)
marginals$macron_raw_a <- with(marginals, freq_a * macron_pct_a)
marginals$macron_raw_g <- with(marginals, freq_g * macron_pct_g)
## first round results
french_elections_2017_first_round <- read_csv("Documents/french_elections_2017_first_round.csv")
french_elections_2017_first_round <- french_elections_2017_first_round[c(1,grep("_pct", names(french_elections_2017_first_round)))]
fr2017 <- french_elections_2017_first_round %>% mutate(other_pct = dupontaignan_pct +
lasalle_pct + pouton_pct + asselineau_pct+ arthaud_pct + cheminade_pct)
fr2017 <- fr2017 %>% select(region_new, macron_pct ,lepen_pct, fillon_pct ,melenchon_pct, hamon_pct,other_pct)
fr2017 <- fr2017 %>% rename(lr_pct = fillon_pct, ps_pct = hamon_pct)
fr2017[, 2:ncol(fr2017)] <- apply(fr2017[, 2:ncol(fr2017)], MARGIN = 2, as.numeric)/100
marginals_results <- merge(marginals, fr2017, by = "region_new")
fit_macron2017 <- stan_glm(data = marginals_results, macron_pct ~ macron_raw_a + macron_raw_g)
summary(lm(data = marginals_results, macron_pct ~ macron_pct_a + macron_pct_g))
plot(fit_macron2017)
fit_macron2017_glmer <- stan_glmer(data = marginals_results, macron_pct ~ macron_freq_a + macron_freq_g + (1|region_new))
plot(fit_macron2017_glmer, digits = 3)
|
0bf5d8a6d84a704ab7f1856d89908c14dc7840b5
|
b9ebd05de9d31efc8097c1336f75aa4e14b1656b
|
/NoteBook_R/Book_Note_R语言编程艺术/002_unit_2_example_01_扩展案例_寻找连续出现1的流程.r
|
e85ea6b77c8d3258eea6a404d786ae1c4becac52
|
[] |
no_license
|
moss1225/R_practice_program_1
|
056b659f3e56c6cd7f532ded839ec665dc0591be
|
5bbf9abcccd0e30655fc56080c687c2e807d7da1
|
refs/heads/master
| 2021-01-23T05:14:20.901857
| 2018-06-25T08:09:01
| 2018-06-25T08:09:01
| 86,286,838
| 1
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 611
|
r
|
002_unit_2_example_01_扩展案例_寻找连续出现1的流程.r
|
#假设一个向量由若干0和1构成,我们想找出其中连续出现1的游程。例如,对于向量
#(1,0,0,1,1,10,1,1),从它第4索引处开始又称为3的游程,而长度为2的游程分别开始于4,5,8处
#因此,用函数FindRuns(c(1,0,0,1,1,10,1,1),2)返回又成为2的开始索引
run_data=c(1,0,0,1,1,10,1,1)
#↑输入原始数据
FindRuns<-function(x,k){
#↑创建函数,x为检测数据,k为游程的长度
n<-length(x)
runs<-NULL
for(i in 1:n-k+1){
if(all(x[i:(i+k-1)]==1))runs<-c(runs,i)
}
return(runs)
}
FindRuns(run_data,2)
#↑应用数据
|
44520a71d20b9da8279b59b716a67f26fd174c38
|
7f489d03d1eec0cf181b2c50d18253ff0ad9e61a
|
/R/straineff_support.R
|
9535254379cd9fefba862cdc14a69bab33d98468
|
[] |
no_license
|
gkeele/Diploffect.INLA
|
4fa4b8bfc75b864f9b1b417ca0e95a25acf1ea9e
|
8019dd58957d77f6445575d74978bf3c3bbf8411
|
refs/heads/master
| 2023-05-29T08:09:51.154878
| 2023-05-16T01:22:11
| 2023-05-16T01:22:11
| 86,735,774
| 3
| 1
| null | 2019-10-25T19:04:38
| 2017-03-30T18:32:47
|
R
|
UTF-8
|
R
| false
| false
| 9,316
|
r
|
straineff_support.R
|
## Return the nearest marker name to the position at chromosome chr.
## position is in bp
get.nearest.marker <- function(h, chr, position) {
chromosomes = h$genotype$genome$chromosome
bps = h$genotype$genome$bp
markers = h$genotype$genome$marker
chr.idx = which(chromosomes == chr)
idx = which.min(abs(bps[chr.idx] - position))
if (abs(bps[chr.idx[idx]] - position) > 1000000) {
print("The nearest marker is still very far from the specified position.")
}
return(list(idx=idx, name=markers[chr.idx[idx]]))
}
incidence.matrix <- function(fact)
{
m=diag(nlevels(fact))[fact,]
colnames(m)=levels(fact)
return(m)
}
shannon.entropy <- function(p){
if (min(p) < 0 || sum(p) <= 0) {
p[p<=0]=0
}
p.norm <- p[p>0]/sum(p)
return(-sum(log2(p.norm)*p.norm))
}
straineff.mapping.matrix <- function(M=8, matrixtype){
T=M*(M+1)/2
TT=M*(M+1)/2
mapping<-matrix(rep(0,T*M),M,T)
if( matrixtype %in% c("hmm")){
## GAIN Matrix
idx<-1;
for (i in 1:M){
for (j in 1:(i)){
mapping[i,idx]<-mapping[i,idx]+1;
mapping[j,idx]<-mapping[j,idx]+1;
idx<-idx+1;
}
}
}
else {
## HAPPY matrix
idx<-1;
for (i in 1:M){
mapping[i,idx]<- mapping[i,idx]+2
idx<-idx+1;
}
for (i in 2:M){
for (j in 1:(i-1)){
mapping[i,idx]<-mapping[i,idx]+1;
mapping[j,idx]<-mapping[j,idx]+1;
idx<-idx+1;
}
}
}
return(mapping)
}
straineff.mapping.matrix.happy <- function(M=8){
T=M*(M+1)/2
TT=M*(M+1)/2
mapping<-matrix(rep(0,T*M),M,T)
## HAPPY matrix
idx<-1;
for (i in 1:M){
mapping[i,idx]<- mapping[i,idx]+2
idx<-idx+1;
}
for (i in 2:M){
for (j in 1:(i-1)){
mapping[i,idx]<-mapping[i,idx]+1;
mapping[j,idx]<-mapping[j,idx]+1;
idx<-idx+1;
}
}
return(mapping)
}
straineff.mapping.pair <- function(M, matrixtype){
T = M * (M + 1) / 2
mapping <- matrix(0, 2, T)
if( matrixtype %in% c("happy")){
## HAPPY matrix
idx<-1;
for (i in 1:M){
mapping[, idx] <- c(i, i)
idx<-idx+1;
}
for (i in 2:M){
for (j in 1:(i-1)){
mapping[1, idx] <- i
mapping[2, idx] <- j
idx <- idx + 1;
}
}
}
else {
stop("straineff.mapping.pair does not support other matrix type yet.")
}
mapping
}
straineff.smooth.probability.matrix <- function(N, data){
p <- data[1:N,]
for (i in 1:N){
total = sum(p[i,]) + 0.0000036
for (j in 1:36){
p[i,j] = (p[i,j] + 0.0000001) / total
}
}
p <- t(matrix(unlist(p), ncol=N, byrow=TRUE))
p
}
straineff.get.posterior.matrix <- function(M, N, mcmc.matrix){
TT=M*(M+1)/2
data <- mat.or.vec(N,M)
x <- mat.or.vec(N,TT)
lmapping<-mat.or.vec(TT,1)
rmapping<-mat.or.vec(TT,1)
idx<-1;
for (i in 1:M){
lmapping[idx]<- i
rmapping[idx]<- i
idx<-idx+1;
}
for (i in 2:M){
for (j in 1:(i-1)){
lmapping[idx]<- i
rmapping[idx]<- j
idx<-idx+1;
}
}
ss=mcmc.matrix[[1]]
for (i in 1:N){
r=table(ss[[1]][,paste('idx[',i,']',sep="")])
x[i,as.numeric(names(r))]=r/(sum(r))
}
x
}
straineff.prior.entropy <- function(N,data){
ret <- apply(data,1,shannon.entropy)
ret <- sum(unlist(ret))
ret
}
sic <- function(p) {
if (min(p) < 0 || sum(p) <= 0) {
p[p <= 0] = 0
}
p.norm <- p[p > 0] / sum(p)
N <- length(p.norm)
p.norm <- p.norm[which(p.norm >= 0.00000001)]
sum(p.norm*log(p.norm/rep(1/N, length(p.norm))))
}
straineff.prior.sic <- function(N, data) {
ret <- apply(data, 1, sic)
sum(unlist(ret)) / N
}
straineff.prior.sic.vector <- function(N, data) {
ret <- apply(data, 1, sic)
unlist(ret)
}
straineff.posterior.entropy <- function(N,mcmc.matrix){
straineff.prior.entropy(N,straineff.get.posterior.matrix(8,mcmc.matrix))
}
straineff.true.founder.prior <- function(Y,N,M,phenotype.name,Y1,data){
Z1=Y1
idx<-1;
happymap <- mat.or.vec(M,M)
for (i in 1:M){
happymap[i,i]<-idx
idx <- idx+1
}
for (i in 2:M){
for (j in 1:(i-1)){
happymap[i,j]<-idx
idx<-idx+1;
}
}
mapping=as.numeric(phenotype.name)
p <- mat.or.vec(N,M)
for ( i in 1:N){
Z1[i,]=Y1[mapping[i],]
}
ret=0
for (i in 1:N){
x=as.numeric(Z1[i,3])
y=as.numeric(Z1[i,5])
if (x>y)
ret=ret+data[i,happymap[x,y]]
else
ret=ret+data[i,happymap[y,x]]
}
ret
}
straineff.true.founder.posterior <- function(Y,N,M,phenotype.name,Y1,mcmc.matrix){
straineff.true.founder.prior(Y,N,M,phenotype.name,Y1,straineff.get.posterior.matrix(8,mcmc.matrix))
}
straineff.extra <- function(H, h) {
extra <- NULL
if (H > 1) extra <- paste(h, ',', sep="")
extra
}
get.extra <- function(H=1, h=1) {
extra <- NULL
if (H > 1) extra <- paste(h, ',', sep="")
return(extra)
}
summarize.beta <- function(M, dat, H=1, h=1){
beta = mat.or.vec(M,niter(dat))
mbeta = mat.or.vec(M,1)
extra <- get.extra(H, h)
for (i in 1:M){
beta[i,] = dat[,paste('beta[',extra, i,']',sep="")]
}
for (i in 1:niter(dat)){
beta[,i] = beta[,i] - mean(beta[,i])
}
for (i in 1:M){
mbeta[i] = mean(beta[i,])
}
return (mbeta)
}
summarize.weighted.beta <- function(M, N, mcmc.mat, haplotype.prior, H=1, h=1){
beta = mat.or.vec(M, niter(mcmc.mat))
w = mat.or.vec(niter(mcmc.mat), 1)
mbeta = mat.or.vec(M, 1)
extra <- get.extra(H, h)
for (i in 1:M){
beta[i,] = mcmc.mat[, paste('beta[', extra, i, ']', sep="")]
}
## mcmc.mat[, 'deviance'] is the deviance defined in JAGS (not proper
## deviance though)
## defined as -2*logDensity(model)
## convert it back to log likelihood
w = mcmc.mat[, 'deviance'] * (-0.5)
lprior = log.haplotype.prior(N, haplotype.prior, mcmc.mat)
w = exp(w)
for (i in 1:niter(mcmc.mat)) {
beta[,i] = beta[,i] - mean(beta[,i])
}
for (i in 1:M) {
mbeta[i] = weighted.mean(beta[i,], w)
}
print(mbeta)
print(summarize.beta(M, mcmc.mat))
return (mbeta)
}
traces.deviated.effects <- function(M, dat, H=1, h=1) {
total = M * (M + 1) / 2
gamma = mat.or.vec(total, niter(dat))
extra <- get.extra(H, h)
for (j in (M + 1):total) {
gamma[j, ] = dat[, paste('gamma[', extra, j ,']', sep="")]
}
return (gamma)
}
traces.diplotype.effects <- function(M, dat, deviated, H=1, h=1) {
total = M * (M + 1) / 2
beta = mat.or.vec(M, niter(dat))
gamma = mat.or.vec(total, niter(dat))
diplotype.effects = mat.or.vec(total, niter(dat))
extra <- get.extra(H, h)
for (j in 1:M){
beta[j, ] = dat[, paste('beta[', extra, j, ']', sep="")]
}
if (deviated) {
for (j in (M + 1):total){
gamma[j, ] = dat[, paste('gamma[', extra, j ,']', sep="")]
}
}
mapping.matrix <- straineff.mapping.matrix(M, "happy")
for (i in 1:niter(dat)){
for (j in 1:total) {
diplotype.effects[j, i] = t(mapping.matrix[, j]) %*% beta[, i]
if (deviated) {
diplotype.effects[j, i] = diplotype.effects[j, i] + gamma[j, i]
}
}
diplotype.effects[, i] = diplotype.effects[, i] - mean(diplotype.effects[, i])
}
return(diplotype.effects)
}
summarize.diplotype.effects <- function(M, dat, deviated, H=1, h=1) {
total = M * (M + 1) / 2
diplotype.effects <- traces.diplotype.effects(M, dat, deviated, H, h)
mean.diplotype.effects = mat.or.vec(1, total)
for (j in 1:total){
mean.diplotype.effects[j] = mean(diplotype.effects[j, ])
}
return(mean.diplotype.effects)
}
summarize.haplotype <- function(N, data, true.haplotype, mcmc.matrix){
n.iter = niter(mcmc.matrix)
map2 = data
delta = 0
for ( i in 1:N){
ret = sort.int(data[i,], index.return=T)
map2[i,] = ret$ix
s = mcmc.matrix[,paste('idx[', i, ']', sep="")]
estimate = (sum(as.numeric(map2[i, s]) == true.haplotype[1])/n.iter)
real = data[i, true.haplotype[1]]
delta = delta + (estimate - real)
}
return (delta/N)
}
log.haplotype.prior <- function(N, data, mcmc.matrix){
n.iter = niter(mcmc.matrix)
map2 = data
r = rep(0, n.iter)
for ( i in 1:N) {
ret = sort.int(data[i,], index.return=T)
map2[i,] = ret$ix
s = mcmc.matrix[,paste('idx[', i, ']', sep="")]
r = r + log(data[i, map2[i, s]])
}
return (r)
}
calculate.diplotype.effects <- function(beta, deviation.effects) {
M <- length(beta)
mapping.matrix <- straineff.mapping.matrix(M, "happy")
total = M * (M + 1) / 2
diplotype.effects <- mat.or.vec(total, 1)
for (j in 1:total) {
diplotype.effects[j] = t(mapping.matrix[, j]) %*% beta + deviation.effects[j]
}
return(diplotype.effects)
}
remove.small.probability <- function(data, numprop=36){
MM = dim(data)[2]
for (i in 1:dim(data)[1]) {
x = sort(data[i, ], index.return=T)
data[i, which(x$ix <= MM - numprop)]=0
data[ i, ]=data[i, ]/sum(data[i, ])
}
return(data)
}
#' Returns the rank-based inverse normal transformation
#'
#' This function takes a phenotype vector and returns the rank-based inverse normal transformation.
#'
#' @param phenotype A vector of phenotype values for which the rank-based inverse normal transformation is output.
#' @param prop DEFAULT: 0.5. This allows Inf to not be returned for the maximum of phenotype.
#' @export
#' @examples rint()
rint <- function(phenotype, prop=0.5){
rint_phenotype <- qnorm((rank(phenotype, na.last="keep")-prop)/sum(!is.na(phenotype)))
return(rint_phenotype)
}
|
052c3591ffa47dd116e91d94b5fc11e430f1bae2
|
64f45090c6858ba1d3c8a1cdd52078f22df04d9b
|
/CSE41097_Introduction_to_R/Assignment2_W5-7/assignment2_soln.R
|
3a87094ce10b2cea548ac5f67909e6e8e88db1a7
|
[] |
no_license
|
anhnguyendepocen/UCSD_Data_Mining_Certificate
|
556ccdcacb454c0ac48e44dda08d79fa127f6802
|
423a11cc4bead8696330683f1700243e5e52bd73
|
refs/heads/master
| 2021-06-07T15:30:38.072421
| 2016-11-15T22:02:26
| 2016-11-15T22:02:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,236
|
r
|
assignment2_soln.R
|
##
## Assignment 2 - Solution
##
##
## -----------------------------------------------------------
## Problem 1
## -----------------------------------------------------------
##
extreme=function(x){
extreme.t=abs(x-mean(x))>3*sd(x)
if (any(extreme.t))
print(paste("There are", sum(extreme.t), "extreme values found."))
else print("There are no extreme values.")
}
test=rnorm(1000)
extreme(test)
##
## -----------------------------------------------------------
## Problem 2
## -----------------------------------------------------------
##
calCS=function(cal,r){
cal.u=toupper(cal)
if (cal.u=="AC") return(round(pi*r^2,3))
else if (cal.u=="CC") return(round(2*pi*r,3))
else if (cal.u=="VS") return(round(4*pi*r^3/3,3))
else if (cal.u=="AS") return(round(4*pi*r^2,3))
else stop("Your method is not supported")
}
calCS('ac',4)
##
## -----------------------------------------------------------
## Problem 3
## -----------------------------------------------------------
##
radii=seq(5,25,5)
for (i in radii) {
print(calCS('AC',i))
}
##
## -----------------------------------------------------------
## Problem 4
## -----------------------------------------------------------
##
library(MASS)
## Create a data set which contains observations with Colour >=17 and School equals "D"
d1=painters[painters$Colour>=17 & painters$School=='D',]
## Create a data set that contains only Da Udine and Barocci.
d2=painters[is.element(row.names(painters), c('Da Udine','Barocci')),]
## Create a data set which contains observations with Colour >=17 and School equals "D",
## but only with the Composition and Drawing variables.
d3=painters[painters$Colour>=17 & painters$School=='D',
c('Composition','Drawing')]
## Create a categorical variable Comp.cat with three approximate equal levels based on Composition.
boundry=quantile(painters$Composition,seq(0,1,by=1/3))
painters$Comp.cat=cut(painters$Composition,boundry,labels=c(1,2,3),
include.lowest=T,right=F)
##
## -----------------------------------------------------------
## Problem 5
## -----------------------------------------------------------
##
data.wide = data.frame(Program = c("CONT", "RI", "WI"), s1 = c(85, 79, 84),
s2 = c(85, 79, 85), s3 = c(86, 79, 84), s4 = c(85, 80, 83))
## transform it into the long form.
long=reshape(data.wide,varying=list(c('s1','s2','s3','s4')),
v.names='score',timevar='time',direction='long')
## Then transform the long form back to the wide form.
wide=reshape(long,varying=list(c('s1','s2','s3','s4')),idvar='id',
v.names='score',timevar='time',direction='wide')
##
## -----------------------------------------------------------
## Problem 6
## -----------------------------------------------------------
##
setwd("C:\\Documents and Settings\\xueli\\Desktop\\UCSD R Homework\\Assignment2")
load("datList.RData")
stackDataInList = function(alist){
result = alist[[1]]
if (length(alist) ==1) return(result)
else{
for (i in 2:length(alist)){
result = rbind(result, alist[[i]])
}
}
return(result)
}
stackDataInList(datList[1])
stackDataInList(datList[c(1,3,4)])
stackDataInList(datList)
|
1e5e645ab1109bc2e65ffafb3c69f635e0fe1089
|
44eb2a73ee6ef5d7704d2676093d214bd27e989e
|
/2_spider_2_stream.r
|
329d3fc88303f7ab7beaf45a7a05841ee01e570c
|
[] |
no_license
|
DarrenTsungjenWu/Scrapping
|
4db316b1037a718ad32d2b88080eda0f444c08f9
|
62bcbb9448d00a868eccc25b30b62eed0a9ac58d
|
refs/heads/master
| 2023-01-31T10:50:59.958756
| 2020-12-13T14:59:44
| 2020-12-13T14:59:44
| 320,774,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
r
|
2_spider_2_stream.r
|
library(tidyverse)
library(rvest)
library(stringr)
setwd("d:/download")
url <- "http://www.miaopai.com/u/paike_8o7ugjvf5c"
pages <- read_html(url)
# CAN NOT WORK
# links <- pages %>% html_nodes("div.video-player") %>%
# html_nodes("video") %>% html_attr("src")
# THIS WORKS
links <- pages %>% html_nodes("div.MIAOPAI_player") %>% html_attr("data-scid")
links <- paste0("http://gslb.miaopai.com/stream/", links, ".mp4")
ct = length(links)
names<-c()
names=paste0(rep("��ʳ��Ƶ",ct),1:ct,".mp4")
for(i in 1:ct){
download.file(links[i],names[i],mode="wb")
}
|
cae0fde0e11e5684e204fed39f3bb49931e206d2
|
49e5c20f6542bebcb82a24e5326943a2314526be
|
/02. Code/Codes for Performance Measures.r
|
c8e164875cfbfb412d4768553aeb574f4284962f
|
[] |
no_license
|
jzhao0802/jassen
|
e11ab7f0967725fe945199403818f3580b0790b7
|
35144966476e80e2b1996f6981cda85842d5fac9
|
refs/heads/master
| 2021-05-10T15:20:07.394894
| 2018-01-23T02:55:01
| 2018-01-23T02:55:01
| 118,548,721
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,118
|
r
|
Codes for Performance Measures.r
|
# Codes for Calculating performance measure, i.e.TPR, FPR, TNR, FNR, Sensitivity, Precision(PPV), rate_accuracy
# Developed by Hui, Jin
# Feb,9,2015
# Take a LR model as an example to show how to calculate performance measure
options(digits = 7)
library(glmnet)
#1. set up working folder and read in model data
data_path <- 'D:\\Project_2014\\BRACE Relapse project\\004 Output'
data_file <- 'final_model_data_comb.csv';
raw_data <- read.table(paste(data_path,'\\',data_file,sep=''), header=T, sep=',')
all_variables_list <- names(raw_data)
treatment_variables <- c('idx_IFN','idx_GA','pre_idx_IFN','pre_idx_GA','pre_idx_NOMS')
switching_flag <- c('flag_NT_IFN','flag_NT_GA','flag_IFN_GA','flag_IFN_IFN', 'flag_GA_IFN')
reference_variables <- c('idx_GA','pre_idx_NOMS','der_sexF','pat_regionMW','idx_paytypeSMRU','idx_prodtypeP','idx_spec3',
'pre_non_ms_total_allowed4','pre_ms_total_allowed4','pre_ms_pharmacy_allowed2','pre_non_ms_pharmacy_allowed4',
'pre_non_ms_medical_allowed4','pre_ms_medical_allowed4','num_pre_meds4','num_pre_op_dx4','age4','num_pre_mri_any4',
'pchrlson3','num_pre_cort_oral3','num_pre_cort_iv3','num_pre_relapse_1yr1','')
variable_list_v1 <- setdiff(all_variables_list,c(treatment_variables,switching_flag,reference_variables))
# 2. Divided whole data into 2 parts, 75% traning sample and 25% test sample
source('D:\\Project_2014\\BRACE Relapse project\\003 R Code\\Sampling_ims.r')
model_data <- raw_data[,variable_list_v1]
datasets <- sampling_ims(model_data,0.75,'response',setseed=T,10)
training_data <- datasets[[1]]
test_data <- datasets[[2]]
# 3. Run LR on training sample
fit_std <- glm(response~., data=training_data, family=binomial)
# Get the predicted value on training sample and test sample
training_obs <- predict(fit_std, training_data, type="response")
test_obs <- predict(fit_std, test_data, type="response")
# 4. Calculate Performance Measures
# 1) The threshold, could change
pred_thresh <- mean(training_data$response)
# 2) Sort the predicted value on test sample, in case top PPV is required. (Optional)
pred_data <- sort(test_obs,T)
# 3) rename actual_data, could be test sample or training sample, depends on which measures you want to compute
actual_data <- test_data
# 4) Compute performance measures
num_actual_positive <- sum(actual_data$response) # Number of actual positive cases
num_actual_negative <- sum(1 - actual_data$response) # Number of actual negative cases
num_pred_positive <- length(which(pred_data >= pred_thresh)) # Number of positive predictions
num_pred_negative <- length(which(pred_data < pred_thresh)) # Number of negative predictions
# positive cases in predicted value, corresponding rows in actual data
pred_pos_in_actual <-actual_data[rownames(actual_data) %in% names(which(pred_data>=pred_thresh)),]
# Negative cases in predicted value, corresponding rows in actual data
pred_neg_in_actual <-actual_data[rownames(actual_data) %in% names(which(pred_data<pred_thresh)),]
true_post_rate <- sum(pred_pos_in_actual$response) / num_actual_positive # True positive rate
false_post_rate <- sum(1 - pred_pos_in_actual$response) / num_actual_negative # False positive rate
true_neg_rate <- sum(1 - pred_neg_in_actual$response) / num_actual_negative # True negative rate
false_neg_rate <- sum(pred_neg_in_actual$response) / num_actual_positive # False negative rate
rate_post <- num_pred_positive/nrow(actual_data) # Proportion of cases predicted as positive
sensitivity <- sum(pred_pos_in_actual$response) / num_actual_positive # Recall / sensitivity
precision <- sum(pred_pos_in_actual$response) / num_pred_positive # Precision / PPV
rate_accuracy <- (sum(pred_pos_in_actual$response) +sum(1 - pred_neg_in_actual$response)) / nrow(actual_data) # Classification accuracy (proportion of cases predicted correctly)
# Ends
|
7e46fb900f5424dd49852bead4054f7dede4fc75
|
0d49152a631649b54d78324b1fc4c32bc912ee42
|
/R/0_get_file_info.R
|
5ff4d3c287950966fb52b3d50fc53dc256e26e79
|
[] |
no_license
|
dfe-analytical-services/automated-data-qa
|
5900e22e8db564e4315a1f2bd716c2f5d23b9626
|
ad84a5dd977cff19e13129f5ae2ae6fdbc8a4522
|
refs/heads/main
| 2023-06-16T09:25:04.531317
| 2021-07-06T09:28:45
| 2021-07-06T09:28:45
| 365,194,193
| 1
| 0
| null | 2021-06-21T11:06:08
| 2021-05-07T10:14:44
|
HTML
|
UTF-8
|
R
| false
| false
| 1,028
|
r
|
0_get_file_info.R
|
# RECOMMENDED BASIC AUTOMATED QA CHECKS -
library(dplyr)
library(data.table)
library(tidyr)
library(janitor)
library(plotly)
library(DT)
library(kableExtra)
# STEP 1: read in the data ------------------------------------------------
data <- data.table::fread("data/testing2.csv") #Your file path here
metadata <- data.table::fread("data/testing2.meta.csv") #Your metadata file path here
# STEP 2: Use metadata to get list of filters and indicators --------------
#Get list of indicators
indicators <- metadata %>%
dplyr::filter(col_type == "Indicator") %>%
dplyr::pull(col_name)
#Get list of filters
filters<- data %>%
dplyr::select(-indicators) %>%
names()
#Get list of publication-specific filters
publication_filters <- metadata %>%
dplyr::filter(col_type == "Filter") %>%
dplyr::select(col_name) %>%
dplyr::pull(col_name)
#Get filter group combos for publication-specific filters
distinct_filter_groups <- data %>%
dplyr::select(dplyr::all_of(publication_filters)) %>%
dplyr::distinct()
|
4b74401c66bb60e405b0d16d98b4132ad37efbde
|
2e9fa21af79da31fc12d2867d2f06ad528327a37
|
/Frontiers_ALR_supplementary.R
|
de4223f6751743267dfdffc151acf2a0f3e186d7
|
[] |
no_license
|
michaelgreenacre/CODAinPractice
|
2872afe84e9a8c2c58e43bb0998abf0edaef30d4
|
b12a80f316fe8b0b6e4e7c3594cbd55862ac817e
|
refs/heads/master
| 2023-08-14T10:12:52.973238
| 2023-07-27T15:08:01
| 2023-07-27T15:08:01
| 146,437,698
| 26
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,394
|
r
|
Frontiers_ALR_supplementary.R
|
### Cancer data set
### read data from github site
cancer <- read.table("https://raw.githubusercontent.com/michaelgreenacre/CODAinPractice/master/Baxter_OTU_table.txt",
header=TRUE, check.names=FALSE)
dim(cancer)
# [1] 490 338
### remove first three columns to get the OTU dataset
cancer <- cancer[,-c(1:3)]
cancer.no0 <- cancer+1
# remove strong outlier, possibly an error
cancer.pro <- cancer.no0[,-265]/rowSums(cancer.no0[,-265])
### load easyCODA package
require(easyCODA)
### unweighted (i.e. equally weighted) option
starttime <- Sys.time()
cancer.alr <- FINDALR(cancer.pro, weight=FALSE)
endtime <- Sys.time()
difftime(endtime, starttime, units="secs")
# Time difference of 182.8436 secs on Toshiba latop
plot(cancer.alr$var.log,cancer.alr$procrust.cor)
cancer.alr$tot.var # [1] 1.530197
cancer.alr$procrust.max # [1] 0.9355615
cancer.alr$procrust.ref # [1] 269
cancer.alr$var.min # [1] 0.3082664
cancer.alr$var.ref # [1] 320
### weighted option
starttime <- Sys.time()
cancer.alrw <- FINDALR(cancer.pro, weight=TRUE)
endtime <- Sys.time()
difftime(endtime, starttime, units="secs")
# Time difference of 190.9042 secs on Toshiba laptop
cancer.alrw$tot.var # [1] 2.709184
cancer.alrw$procrust.max # [1] 0.9525636
cancer.alrw$procrust.ref # [1] 269
cancer.alrw$var.min # [1] 0.3082664
cancer.alrw$var.ref # [1] 320
### -----------------------------------------------------------------------------------------
### read meta data of Baxter microbiome study
meta <- read.delim("https://raw.githubusercontent.com/michaelgreenacre/CODAinPractice/master/Baxter_Metadata.txt",
header=TRUE, check.names=FALSE, sep="\t")
dim(meta)
# [1] 490 27
colnames(meta)
# [1] "sample" "fit_result" "Site" "Dx_Bin" "dx" "Hx_Prev" "Hx_of_Polyps"
# [8] "Age" "Gender" "Smoke" "Diabetic" "Hx_Fam_CRC" "Height" "Weight"
# [15] "BMI" "White" "Native" "Black" "Pacific" "Asian" "Other"
# [22] "Ethnic" "NSAID" "Abx" "Diabetes_Med" "stage" "Location"
# the group labels, also convert to numbers
dx <- meta[,"dx"]
table(dx)
# adenoma cancer normal
# 198 120 172
dx.num <- as.numeric(as.factor(dx))
table(dx.num)
# 1 2 3
# 198 120 172
### perform RDA of CLRs of the OTUS on the categorical variable dx (groups)
(cancer.rda <- rda(CLR(cancer.pro, weight=FALSE)$LR ~ factor(dx)))
# Inertia Proportion Rank
# Total 5.121e+02 1.000e+00
# Constrained 4.194e+00 8.189e-03 2 <- 0.82% of variance due to group differences
# Unconstrained 5.079e+02 9.918e-01 333 <- 99.18% unrelated to group differences
### permutation test of significance (9999 permutations)
set.seed(123)
anova(cancer.rda, permutations=9999)
# Df Variance F Pr(>F)
# Model 2 4.19 2.0106 1e-04 *** <- nevertheless, highly significant, p<0.0001
### variances explained in two-dimsneions in constrained and full spaces
100*cancer.rda$CCA$eig/cancer.rda$CCA$tot.chi
# RDA1 RDA2
# 74.62057 25.37943
100*cancer.rda$CCA$eig/cancer.rda$tot.chi
# RDA1 RDA2
# 0.6110919 0.2078403
### row coordinates in exact geometry in restricted 2-d space of group differences
cancer.rda.wa <- cancer.rda$CCA$wa
cancer.procrust.cor <- rep(0,nrow=ncol(cancer.pro))
### loop through the reference components but fit in the reduced space
starttime <- Sys.time()
for(j in 1:ncol(cancer.pro)) {
foo.alr <- ALR(cancer.pro, denom=j, weight=FALSE)$LR
foo.rda <- rda(foo.alr ~ factor(dx))
cancer.procrust.cor[j] <- protest(foo.rda$CCA$wa,cancer.rda.wa, permutations=0)$t0
}
endtime <- Sys.time()
difftime(endtime, starttime, units="secs")
# Time difference of 149.3339 secs on Toshiba laptop
max(cancer.procrust.cor)
# [1] 0.9996624
which(cancer.procrust.cor==max(cancer.procrust.cor))
# [1] 312
colnames(cancer.pro)[312]
# [1] "Otu000363"
### compute ALRs with this reference
cancer.alr312 <- ALR(cancer.pro, denom=312, weight=FALSE)$LR
(cancer.alr312.rda <- rda(cancer.alr312 ~ factor(dx)))
# Inertia Proportion Rank
# Total 6.514e+02 1.000e+00
# Constrained 4.194e+00 6.439e-03 2
# Unconstrained 6.472e+02 9.936e-01 333
cancer.alr312.wa <- cancer.alr312.rda$CCA$wa
### variances explained in constrained and full spaces
100*cancer.alr312.rda$CCA$eig/cancer.alr312.rda$CCA$tot.chi
# RDA1 RDA2
# 74.61953 25.38047
100*cancer.alr312.rda$CCA$eig/cancer.alr312.rda$tot.chi
# RDA1 RDA2
# 0.4804437 0.1634142
### plot 2-D configuration using all logratios
cancer.cols <- c("blue","red","forestgreen")
par(mar=c(4.2,4,3,1), mgp=c(2,0.7,0), font.lab=2)
plot(cancer.rda.wa, type="n", asp=1, main="Constrained LRA of OTUs",
xlab="LRA dimension 1 (74.6% / 0.61%)", ylab="LRA dimension 2 (25.4% / 0.21%)")
abline(v=0, h=0, col="gray", lty=2)
text(cancer.rda.wa, labels=substr(dx,1,1), col=cancer.cols[as.numeric(factor(dx))], cex=0.6)
set.seed(123)
CIplot_biv(cancer.rda.wa[,1],cancer.rda.wa[,2],
group=factor(dx), shade=TRUE, add=TRUE, groupcols=cancer.cols,
groupnames=c("A","C","N"))
set.seed(123)
CIplot_biv(cancer.rda.wa[,1],cancer.rda.wa[,2],
group=factor(dx), add=TRUE, groupcols=cancer.cols,
shownames=FALSE)
### plot 2-D configurations using best set of ALRs
par(mar=c(4.2,4,3,1), mgp=c(2,0.7,0), font.lab=2)
plot(cancer.alr312.wa, type="n", asp=1, main="RDA of ALRs w.r.t. 312",
xlab="RDA dimension 1 (74.6% / 0.48%)", ylab="RDA dimension 2 (25.4% / 0.16%)")
abline(v=0, h=0, col="gray", lty=2)
text(cancer.alr312.wa, labels=substr(dx,1,1), col=cancer.cols[as.numeric(factor(dx))], cex=0.6)
set.seed(123)
CIplot_biv(cancer.alr312.wa[,1],cancer.alr312.wa[,2],
group=factor(dx), shade=TRUE, add=TRUE, groupcols=cancer.cols,
groupnames=c("A","C","N"))
set.seed(123)
CIplot_biv(cancer.alr312.wa[,1],cancer.alr312.wa[,2],
group=factor(dx), add=TRUE, groupcols=cancer.cols,
shownames=FALSE)
### ----------------------------------------------------------------------------
### do all of above again with weighted components
### perform RDA of CLRs of the OTUS on the categorical variable dx (groups)
### average composition serves as default weights
c <- colMeans(cancer.pro)
(cancer.rdaw <- rda(CLR(cancer.pro)$LR%*%diag(sqrt(c)) ~ factor(dx)))
# Inertia Proportion Rank
# Total 2.714724 1.000000
# Constrained 0.017192 0.006333 2 <- 0.63% explained by group differences
# Unconstrained 2.697533 0.993667 333 <- 99.37% not related to group differences
### permutation test of significance (9999 permutations)
set.seed(123)
anova(cancer.rdaw, permutations=9999)
# Df Variance F Pr(>F)
# Model 2 0.01719 1.5519 0.0156 * <- still significant
### variances explained in constrained and full spaces
100*cancer.rdaw$CCA$eig/cancer.rdaw$CCA$tot.chi
# RDA1 RDA2
# 66.42767 33.57233
100*cancer.rdaw$CCA$eig/cancer.rdaw$tot.chi
# RDA1 RDA2
# 0.4206694 0.2126049
### row coordinates in exact geometry in restricted 2-d space of group differences
cancer.rdaw.wa <- cancer.rdaw$CCA$wa
cancer.procrustw.cor <- rep(0,nrow=ncol(cancer.pro))
### loop through the reference components but fit in the reduced space
starttime <- Sys.time()
for(j in 1:ncol(cancer.pro)) {
cc <- c*c[j]
cc <- cc[-j]
foo.alr <- ALR(cancer.pro, denom=j, weight=FALSE)$LR
foo.rda <- rda(foo.alr%*%diag(sqrt(cc)) ~ factor(dx))
cancer.procrustw.cor[j] <- protest(foo.rda$CCA$wa,cancer.rdaw.wa, permutations=0)$t0
}
endtime <- Sys.time()
difftime(endtime, starttime, units="secs")
# Time difference of 161.5026 secs on Toshiba laptop
max(cancer.procrustw.cor)
# [1] 0.9982797
which(cancer.procrustw.cor==max(cancer.procrustw.cor))
# [1] 241
colnames(cancer.pro)[241]
# [1] "Otu000262"
### compute ALRs with this reference
### compute the weights of the ALRs (later it is shown how to extract them from ALR object)
cancer.alr241 <- ALR(cancer.pro, denom=241)$LR
cc <- c*c[241]
cc <- cc[-241]
(cancer.alr241.rda <- rda(cancer.alr241 %*% diag(sqrt(cc)) ~ factor(dx)))
# Inertia Proportion Rank
# Total 1.461e-03 1.000e+00
# Constrained 6.740e-06 4.613e-03 2
# Unconstrained 1.454e-03 9.951e-01 279
cancer.alr241.wa <- cancer.alr241.rda$CCA$wa
### variances explained in constrained and full spaces
100*cancer.alr241.rda$CCA$eig/cancer.alr241.rda$CCA$tot.chi
# RDA1 RDA2
# 66.43916 33.56084
100*cancer.alr241.rda$CCA$eig/cancer.alr241.rda$tot.chi
# RDA1 RDA2
# 0.3064660 0.1548071
### plot 2-D configuration using all logratios
cancer.cols <- c("blue","red","forestgreen")
# invert second dimension to agree with previous plots
cancer.rdaw.wa[,2] <- -cancer.rdaw.wa[,2]
par(mar=c(4.2,4,3,1), mgp=c(2,0.7,0), font.lab=2)
plot(cancer.rdaw.wa, type="n", asp=1, main="Constrained weighted LRA of OTUs",
xlab="LRA dimension 1 (66.4% / 0.42%)", ylab="LRA dimension 2 (33.6% / 0.21%)")
abline(v=0, h=0, col="gray", lty=2)
text(cancer.rdaw.wa, labels=substr(dx,1,1), col=cancer.cols[as.numeric(factor(dx))], cex=0.6)
set.seed(123)
CIplot_biv(cancer.rdaw.wa[,1],cancer.rdaw.wa[,2],
group=factor(dx), shade=TRUE, add=TRUE, groupcols=cancer.cols,
groupnames=c("A","C","N"))
set.seed(123)
CIplot_biv(cancer.rdaw.wa[,1],cancer.rdaw.wa[,2],
group=factor(dx), add=TRUE, groupcols=cancer.cols,
shownames=FALSE)
### plot 2-D configurations using best set of ALRs
# invert second dimension to agree with previous plots
cancer.alr241.wa[,2] <- -cancer.alr241.wa[,2]
par(mar=c(4.2,4,3,1), mgp=c(2,0.7,0), font.lab=2)
plot(cancer.alr241.wa, type="n", asp=1, main="RDA of weighted ALRs w.r.t. 241",
xlab="RDA dimension 1 (66.4% / 0.31%)", ylab="RDA dimension 2 (33.6% / 0.15%)")
abline(v=0, h=0, col="gray", lty=2)
text(cancer.alr241.wa, labels=substr(dx,1,1), col=cancer.cols[as.numeric(factor(dx))], cex=0.6)
set.seed(123)
CIplot_biv(cancer.alr241.wa[,1],cancer.alr241.wa[,2],
group=factor(dx), shade=TRUE, add=TRUE, groupcols=cancer.cols,
groupnames=c("A","C","N"))
set.seed(123)
CIplot_biv(cancer.alr241.wa[,1],cancer.alr241.wa[,2],
group=factor(dx), add=TRUE, groupcols=cancer.cols,
shownames=FALSE)
### -----------------------------------------------------------------------------------------
### vaginal microbiome data set by Deng et al. (2018), cited and analysed by Wu et al. (2021)
### copy the data file Deng_vaginal_microbiome.txt on GitHub and read from clipboard (PC users)
vagina <- read.table("clipboard", check.names=FALSE)
### or read data from GitHub site
vagina <- read.table("https://raw.githubusercontent.com/michaelgreenacre/CODAinPractice/master/Deng_vaginal_microbiome.txt",
header=TRUE, check.names=FALSE)
vagina <- t(vagina)
dim(vagina)
# [1] 40 103
sum(vagina==0)/(nrow(vagina)*ncol(vagina)) #13% zeros
vagina1 <- vagina+1
vagina.pro <- vagina1/rowSums(vagina1)
require(easyCODA)
starttime <- Sys.time()
vagina.alr <- FINDALR(vagina.pro)
endtime <- Sys.time()
difftime(endtime, starttime, units="secs")
# Time difference of 0.7315788 secs on Toshiba.laptop
vagina.alr$totvar
# [1] 3.257595
vagina.alr$procrust.max
# [1] 0.968543
vagina.alr$procrust.ref
# [1] 51
starttime <- Sys.time()
vagina.alrw <- FINDALR(vagina.pro, weight=TRUE)
endtime <- Sys.time()
difftime(endtime, starttime, units="secs")
# Time difference of 0.8090138 secs on Toshiba laptop
vagina.alrw$tot.var
# [1] 7.710076
vagina.alrw$procrust.max
# [1] 0.9825666
vagina.alrw$procrust.ref
# [1] 12
### this illustrates getting ALR weights from function ALR
vagina.alr12 <- ALR(vagina.pro, denom=12)
vagina.alr12.LR <- vagina.alr12$LR
vagina.alr12.LRwt <- vagina.alr12$LR.wt
### exact weighted logratio geometry
vagina.lra <- LRA(vagina.pro)
vagina.lra.rpc <- vagina.lra$rowpcoord
100*vagina.lra$sv[1:2]^2/sum(vagina.lra$sv^2)
# [1] 63.39400 24.44925
par(mar=c(4.2,4,3,1), mgp=c(2,0.7,0), font.lab=2)
plot(vagina.lra.rpc, type="n", asp=1, main="LRA of vaginal microbiome",
xlab="LRA dimension 1 (63.4%)", ylab="LRA dimension 2 (24.4%)")
abline(v=0, h=0, col="gray", lty=2)
text(vagina.lra.rpc, labels=rownames(vagina), col="blue", cex=0.6)
### plot 2-D configuration using best set of ALRs
### note that weights in the ALR object are automatically used in the PCA
vagina.pca <- PCA(vagina.alr12)
vagina.pca.rpc <- vagina.pca$rowpcoord
vagina.lra.rpc <- vagina.lra$rowpcoord
100*vagina.pca$sv[1:2]^2/sum(vagina.pca$sv^2)
# [1] 76.67587 14.67200
par(mar=c(4.2,4,3,1), mgp=c(2,0.7,0), font.lab=2)
plot(vagina.pca.rpc, type="n", asp=1, main="PCA of ALRs w.r.t. 12",
xlab="PCA dimension 1 (74.7%)", ylab="RDA dimension 2 (14.7%)")
abline(v=0, h=0, col="gray", lty=2)
text(vagina.pca.rpc, labels=rownames(vagina), col="blue", cex=0.6)
|
e924f55d9f28a1e9cbdf33ea5f2f2eb125d23e54
|
68d2ba3f519597e2146f71080712c78d9f34647f
|
/R/run_app.R
|
450bd96e3942bb75a7c74af70e48a8a9de6b3bc9
|
[
"MIT"
] |
permissive
|
igemsoftware2020/ClusteRsy-Linkoping
|
5f040e282dbd2b163cef7be3902060f271121081
|
7399466a2e11e27087ce531357708b983fb365ec
|
refs/heads/master
| 2023-01-02T13:03:44.125587
| 2020-10-27T22:29:01
| 2020-10-27T22:29:01
| 264,438,493
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 575
|
r
|
run_app.R
|
#' Run the Shiny Application
#'
#' @param ... A series of options to be used inside the app.
#'
#' @export
#' @importFrom shiny shinyApp
#' @importFrom golem with_golem_options
run_app <- function(db = NULL,...){
if (is.null(db)){
db <- system.file("database", "igem.db", package = "ClusteRsy")
}
db <<- db
with_golem_options(
app = shinyApp(
ui = app_ui,
server = app_server,
# options = (list(
# host = "192.168.50.55",
# port = 80,
# launch.browser = F
# )
# )
),
golem_opts = list(...)
)
}
|
c3f4aba2cd17ac530dabf21e7d3dab560c42a077
|
f47188d23c3fb491b42d80dcc1d9b7cbca8f146d
|
/DTL.R
|
6328927ea737413631fbf59c743a18fff5b4d2fe
|
[] |
no_license
|
nistara/Pablo
|
cd848ff5581ffa3b1e7b9f48f62f0f6b86724498
|
ef515a9f8728ace801af078907e4ef1ac81b3cc8
|
refs/heads/master
| 2021-01-20T15:54:40.365848
| 2017-05-08T20:32:16
| 2017-05-08T20:32:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70,745
|
r
|
DTL.R
|
#Add colClasses for these to specify the types.
matrix <- read.csv("matrix_to_D.csv" )
Farms <- read.csv("Farms_to_D.csv" )
#library(deSolve)
library(ggplot2)
library(dplyr)
library(plyr)
library(reshape2)
library(base)
library(MASS)
library(FME)
library(lhs)
library(zoo)
library(igraph)
library(EpiContactTrace)
library(RColorBrewer)
library(maps)
library(doBy)
library(grid)
library(gridExtra)
library(nlme)
library(multcomp)
# Rho as matrices ------------------------------------------------
rho1 <- acast(matrix, destination ~ origin, value.var = 'rho1')
rho2 <- acast(matrix, destination ~ origin, value.var = 'rho2')
rho3 <- acast(matrix, destination ~ origin, value.var = 'rho3')
rho4 <- acast(matrix, destination ~ origin, value.var = 'rho4')
rho5 <- acast(matrix, destination ~ origin, value.var = 'rho5')
rho6 <- acast(matrix, destination ~ origin, value.var = 'rho6')
rho7 <- acast(matrix, destination ~ origin, value.var = 'rho7')
rho8 <- acast(matrix, destination ~ origin, value.var = 'rho8')
rho9 <- acast(matrix, destination ~ origin, value.var = 'rho9')
diag(rho1) <- 1;diag(rho2) <- 1;diag(rho3) <- 1;diag(rho4) <- 1;diag(rho5) <- 1;diag(rho6) <- 1;diag(rho7) <- 1;diag(rho8) <- 1;diag(rho9) <- 1 # Fill diagonal with 1, due to rho_ii=1
# PARAMETERS -------------------------------------------------------------
# Besic reproduction number (R0)
# Jeong (2014)
R0_l_s <- 0.14 # (R0 of low virulence in sows. min)
R0_m_s <- 3 # (R0 of medium virulence in sows. mean)
R0_h_s <- 3.22 # (R0 of high virulence in sows. max)
R0_l_p <- 7.26 # (R0 of low virulence in piglets. min)
R0_m_p <- 9.26 # (R0 of medium virulence in piglets. mean)
R0_h_p <- 13.13 # (R0 of high virulence in piglets. max)
# Charpin (2012)
R0_l_p2 <- 1.8 # (R0 of low virulence in piglets. min)
R0_m_p2 <- 2.6 # (R0 of medium virulence in piglets. mean)
R0_h_p2 <- 3.3 # (R0 of high virulence in piglets. max)
# Days of infection (D)
# Jeong (2014)
D_us <- 4 # Days unvaccinated sows
D_vs <- 2.8 # Days vaccinated sows
D_up <- 8 # Days unvaccinated pigs
# Linhares (2012)
D_us2 <- 10 # Days unvaccinated sows
D_vs2 <- 6.4 # Days vaccinated sows
# Weeks of active immunity
D_imm <- 36 # (min=26, mean=36, max=52)
# Beta (without using vaccine) in sow farms
beta_l_s <- R0_l_s/D_us
beta_m_s <- R0_m_s/D_us
beta_h_s <- R0_h_s/D_us
# Beta (using vaccine) in sow farms
betaV_l_s <- R0_l_s/D_vs
betaV_m_s <- R0_m_s/D_vs
betaV_h_s <- R0_h_s/D_vs
# Beta (without using vaccine) in pigs farms
beta_l_p <- R0_l_p2/D_up
beta_m_p <- R0_m_p2/D_up
beta_h_p <- R0_h_p2/D_up
# Mortality in sow farms
v_s = 0.001 #(min=0, max=0.002)
# Mortality in pigs farms
v_p = 0.07 #(min=0.02, max=0.18)
# INFECT ONE FARM AT TIME=0 -----------------------------------------------
# Farms without vaccination or other action to control PRRS (low virulance)
# Assign posistive animals (~1% of the population infeceted) in one farm
set.seed(0)
Farms$X <- NA; Farms$Y <- NA; Farms$Z <- 0 # Create new columns for number of susceptible (X), infected (Y) and recovered (Z) within each farm
which(Farms$within3k>5 & Farms$links>10) # Criteria to seed the first infected (10% of inventory)
#which(Farms$type=="Fa" & Farms$county=="Stevens")
Farms$Y[which(Farms$within3k>5 & Farms$links>10)] <- sample(c(rep(0,nrow(Farms[which(Farms$within3k>5 &
Farms$links>10),])-1),1),
nrow(Farms[which(Farms$within3k>5 & Farms$links>10),])) # Fill with 0 and 1 farms under criteria of above
#Farms$Y[which(Farms$type=="Fa" & Farms$county=="Stevens")] <- sample(c(rep(0,nrow(Farms[which(Farms$type=="Fa" & Farms$county=="Stevens"),])-1),1),nrow(Farms[which(Farms$type=="Fa" & Farms$county=="Stevens"),])) # Fill with 0 and 1 farms under criteria of above
Farms$Y[is.na(Farms$Y)] <- 0 # Fill with 0 other farms under infected vector
Farms$Y <- ifelse(Farms$Y==1 , Farms$inventory*0.01, 0) # selected farm will have 1% of inventory infected
Farms$X <- ifelse(Farms$Y == 0, Farms$inventory, Farms$inventory-Farms$Y)
Farms$S <- NA; Farms$I <- NA; Farms$R <- NA # Create new columns with proportion of susceptible (S), infected (I) and recovered (R) within each farm
Farms$S <- Farms$X/Farms$inventory
Farms$I <- Farms$Y/Farms$inventory
Farms$R <- Farms$Z/Farms$inventory
summary(Farms)
# END Farms
# BETA parameter ----------------------------------------------------------
Farms$beta_l <- ifelse(Farms$sow==1, beta_l_s,beta_l_p)
Farms$beta_m <- ifelse(Farms$sow==1, beta_m_s,beta_m_p)
Farms$beta_h <- ifelse(Farms$sow==1, beta_h_s,beta_h_p)
Farms$beta_l_N <- Farms$beta_l/Farms$inventory #Beta divided by N from each farm
Farms$beta_m_N <- Farms$beta_m/Farms$inventory
Farms$beta_h_N <- Farms$beta_h/Farms$inventory
# List all possibilities of BETA and RHO ----------------------------------
beta_list = list(Farms$beta_l_N, Farms$beta_m_N, Farms$beta_h_N)
rho_list = list(rho1, rho2, rho3, rho4, rho5, rho6, rho7, rho8, rho9)
# Function SIR dissagregated model ----------------------------------------
start <- 0; finish <- 26; step <- .05 # When I increase the step time, increase the speed of my analyses. Unfortunately, if I increase step beyond than 0.05, the number of Susceptibles, Infected and Recovered goes to + or - Inf. Or I get NA values.
time <- seq(start, finish, step) #frame time period (1/2 year)
model <- function(time, stocks, parms){
#states <- Farms[,c("X", "Y", "Z")]
S <- stocks[grep("S",names(stocks))]
I <- stocks[grep("I",names(stocks))]
R <- stocks[grep("R",names(stocks))]
with(c(list("S"=S,"I"=I,"R"=R), parms), {
lambda = beta %*% I
IR <- lambda*S
RR <- I/delays
MR <- I*mort
SR <- R/returns
dS_dt <- SR - IR
dI_dt <- IR - RR - MR
dR_dt <- RR - SR
return(list(c(dS_dt, dI_dt, dR_dt)))
})
}
# CONTROL STRATEGIES 1 ----------------------------------------------------
###########################################################################
# Set number of simulation (to save time I have reduced to 10 instead 20)
nsims = 10 # (27 * 10 = 270 simulations total)
# Range of parameters (days of immunity [D_imm], increase in mortality [v] and days of infection [D])
D_imm_r = c(26, 52)
v_s_r = c(0, 0.002)
v_p_r = c(0.02, 0.18)
D_s_r = c(1, 6)
D_p_r = c(4, 12)
get_D_imm = function(k, D_imm_r, Farms){
if(k == 1){
D_imm = rep(D_imm_r[1], nrow(Farms))
} else {
if(k == 2) {
D_imm = rep(D_imm_r[2], nrow(Farms))
} else {
D_imm = runif(nrow(Farms), min = D_imm_r[1], max = D_imm_r[2])
}
}
return(D_imm)
}
D_imm = lapply(1:nsims, get_D_imm, D_imm_r, Farms)
get_D = function(k, D_s_r, D_p_r, Farms){
if(k == 1){
D = ifelse(Farms$sow == 1, D_s_r[1], D_p_r[1])
} else {
if(k == 2) {
D = ifelse(Farms$sow == 1, D_s_r[2], D_p_r[2])
} else {
D = ifelse(Farms$sow == 1,
runif(1, min = D_s_r[1], max = D_s_r[2]),
runif(1, min = D_p_r[1], max = D_p_r[2]))
}
}
return(D)
}
D = lapply(1:nsims, get_D, D_s_r, D_p_r, Farms)
get_v = function(k, v_s_r, v_p_r, Farms){
if(k == 1){
v = ifelse(Farms$sow == 1, v_s_r[1], v_p_r[1])
} else {
if(k == 2) {
v = ifelse(Farms$sow == 1, v_s_r[2], v_p_r[2])
} else {
v = ifelse(Farms$sow == 1,
runif(1, min = v_s_r[1], max = v_s_r[2]),
runif(1, min = v_p_r[1], max = v_p_r[2]))
}
}
return(v)
}
v = lapply(1:nsims, get_v, v_s_r, v_p_r, Farms)
set.seed(1) # I only can impose any strategy of control in some farms (sow farms), so I am teasting the disease dyamics by implementing 2 control strategies in those farms with different level of regional coverage (25%, 50%, 75% and 100%)
Farms3 <- subset(Farms, sow == 1)[sample(nrow(subset(Farms, sow == 1)), round(sum(Farms$sow)*0.25)), ]
Farms3$sowV25 <- NA
Farms3$sowV25 <- 1
Farms <- merge(Farms, subset(Farms3[ , c("id", "sowV25")]), by.x = "id", by.y = "id", all.x = T)
Farms$sowV25[is.na(Farms$sowV25)] <- 0
set.seed(1)
Farms3 <- subset(Farms, sow == 1)[sample(nrow(subset(Farms, sow == 1)), round(sum(Farms$sow)*0.5)), ]
Farms3$sowV50 <- NA
Farms3$sowV50 <- 1
Farms <- merge(Farms, subset(Farms3[ , c("id", "sowV50")]), by.x = "id", by.y = "id", all.x = T)
Farms$sowV50[is.na(Farms$sowV50)] <- 0
set.seed(1)
Farms3 <- subset(Farms, sow == 1)[sample(nrow(subset(Farms, sow == 1)), round(sum(Farms$sow)*0.75)), ]
Farms3$sowV75 <- NA
Farms3$sowV75 <- 1
Farms <- merge(Farms, subset(Farms3[ , c("id", "sowV75")]), by.x = "id", by.y = "id", all.x = T)
Farms$sowV75[is.na(Farms$sowV75)] <- 0
set.seed(1)
Farms3 <- subset(Farms, sow == 1)[sample(nrow(subset(Farms, sow == 1)), round(sum(Farms$sow)*1)), ]
Farms3$sowV100<- NA
Farms3$sowV100 <- 1
Farms <- merge(Farms, subset(Farms3[ , c("id", "sowV100")]), by.x = "id", by.y = "id", all.x = T)
Farms$sowV100[is.na(Farms$sowV100)] <- 0
# Vaccine efficacy / Beta*(1-E)
E = c(0.1, 0.5)
# 25% coverage
Farms$beta_l_NV25 <- ifelse(Farms$sowV25 ==1, Farms$beta_l_N*(1-E[2]), Farms$beta_l_N)
Farms$beta_m_NV25 <- ifelse(Farms$sowV25 ==1, Farms$beta_m_N*(1-E[2]), Farms$beta_m_N)
Farms$beta_h_NV25 <- ifelse(Farms$sowV25 ==1, Farms$beta_h_N*(1-E[2]), Farms$beta_h_N)
beta_listV25 = list(Farms$beta_l_NV25, Farms$beta_m_NV25, Farms$beta_h_NV25)
# 50% coverage
Farms$beta_l_NV50 <- ifelse(Farms$sowV50 ==1, Farms$beta_l_N*(1-E[2]), Farms$beta_l_N)
Farms$beta_m_NV50 <- ifelse(Farms$sowV50 ==1, Farms$beta_m_N*(1-E[2]), Farms$beta_m_N)
Farms$beta_h_NV50 <- ifelse(Farms$sowV50 ==1, Farms$beta_h_N*(1-E[2]), Farms$beta_h_N)
beta_listV50 = list(Farms$beta_l_NV50, Farms$beta_m_NV50, Farms$beta_h_NV50)
# 75%
Farms$beta_l_NV75 <- ifelse(Farms$sowV75 ==1, Farms$beta_l_N*(1-E[2]), Farms$beta_l_N)
Farms$beta_m_NV75 <- ifelse(Farms$sowV75 ==1, Farms$beta_m_N*(1-E[2]), Farms$beta_m_N)
Farms$beta_h_NV75 <- ifelse(Farms$sowV75 ==1, Farms$beta_h_N*(1-E[2]), Farms$beta_h_N)
beta_listV75 = list(Farms$beta_l_NV75, Farms$beta_m_NV75, Farms$beta_h_NV75)
# 100% coverage
Farms$beta_l_NV100 <- ifelse(Farms$sowV100 ==1, Farms$beta_l_N*(1-E[2]), Farms$beta_l_N)
Farms$beta_m_NV100 <- ifelse(Farms$sowV100 ==1, Farms$beta_m_N*(1-E[2]), Farms$beta_m_N)
Farms$beta_h_NV100 <- ifelse(Farms$sowV100 ==1, Farms$beta_h_N*(1-E[2]), Farms$beta_h_N)
beta_listV100 = list(Farms$beta_l_NV100, Farms$beta_m_NV100, Farms$beta_h_NV100)
# Set Rhos for filters
F_s = c(.6,.9)
B_s = c(.6)
matrix <- merge(matrix, Farms[,c("id","sowV25","sowV50","sowV75","sowV100")],
by.x = "destination", by.y = "id", all.x = T )
matrix <- merge(matrix, Farms[,c("id","sowV25","sowV50","sowV75","sowV100")],
by.x = "origin", by.y = "id", all.x = T )
matrix$sowV25 = matrix$sowV25.x+matrix$sowV25.y
matrix$sowV50 = matrix$sowV50.x+matrix$sowV50.y
matrix$sowV75 = matrix$sowV75.x+matrix$sowV75.y
matrix$sowV100 = matrix$sowV100.x+matrix$sowV100.y
matrix[,c("sowV25.x","sowV25.y","sowV50.x","sowV50.y","sowV75.x","sowV75.y","sowV100.x","sowV100.y")] <- list(NULL)
# Reduce K_ij by X% ramndlnly using filtering at a given protection
# 80% protection
matrix$k_ij_F80_25 <- ifelse(matrix$sowV25 >0, matrix$k_ij*(1-F_s[2]),matrix$k_ij)
matrix$k_ij_F80_50 <- ifelse(matrix$sowV50 >0, matrix$k_ij*(1-F_s[2]),matrix$k_ij)
matrix$k_ij_F80_75 <- ifelse(matrix$sowV75 >0, matrix$k_ij*(1-F_s[2]),matrix$k_ij)
matrix$k_ij_F80_100 <- ifelse(matrix$sowV100 >0, matrix$k_ij*(1-F_s[2]),matrix$k_ij)
matrix$k_ij_up_F80_25 <- ifelse(matrix$sowV25 >0, matrix$k_ij_up*(1-F_s[2]),matrix$k_ij_up)
matrix$k_ij_up_F80_50 <- ifelse(matrix$sowV50 >0, matrix$k_ij_up*(1-F_s[2]),matrix$k_ij_up)
matrix$k_ij_up_F80_75 <- ifelse(matrix$sowV75 >0, matrix$k_ij_up*(1-F_s[2]),matrix$k_ij_up)
matrix$k_ij_up_F80_100 <- ifelse(matrix$sowV100 >0, matrix$k_ij_up*(1-F_s[2]),matrix$k_ij_up)
matrix$k_ij_low_F80_25 <- ifelse(matrix$sowV25 >0, matrix$k_ij_low*(1-F_s[2]),matrix$k_ij_low)
matrix$k_ij_low_F80_50 <- ifelse(matrix$sowV50 >0, matrix$k_ij_low*(1-F_s[2]),matrix$k_ij_low)
matrix$k_ij_low_F80_75 <- ifelse(matrix$sowV75 >0, matrix$k_ij_low*(1-F_s[2]),matrix$k_ij_low)
matrix$k_ij_low_F80_100 <- ifelse(matrix$sowV100 >0, matrix$k_ij_low*(1-F_s[2]),matrix$k_ij_low)
# 40% protection
matrix$k_ij_F40_25 <- ifelse(matrix$sowV25 >0, matrix$k_ij*(1-F_s[1]),matrix$k_ij)
matrix$k_ij_F40_50 <- ifelse(matrix$sowV50 >0, matrix$k_ij*(1-F_s[1]),matrix$k_ij)
matrix$k_ij_F40_75 <- ifelse(matrix$sowV75 >0, matrix$k_ij*(1-F_s[1]),matrix$k_ij)
matrix$k_ij_F40_100 <- ifelse(matrix$sowV100 >0, matrix$k_ij*(1-F_s[1]),matrix$k_ij)
matrix$k_ij_up_F40_25 <- ifelse(matrix$sowV25 >0, matrix$k_ij_up*(1-F_s[1]),matrix$k_ij_up)
matrix$k_ij_up_F40_50 <- ifelse(matrix$sowV50 >0, matrix$k_ij_up*(1-F_s[1]),matrix$k_ij_up)
matrix$k_ij_up_F40_75 <- ifelse(matrix$sowV75 >0, matrix$k_ij_up*(1-F_s[1]),matrix$k_ij_up)
matrix$k_ij_up_F40_100 <- ifelse(matrix$sowV100 >0, matrix$k_ij_up*(1-F_s[1]),matrix$k_ij_up)
matrix$k_ij_low_F40_25 <- ifelse(matrix$sowV25 >0, matrix$k_ij_low*(1-F_s[1]),matrix$k_ij_low)
matrix$k_ij_low_F40_50 <- ifelse(matrix$sowV50 >0, matrix$k_ij_low*(1-F_s[1]),matrix$k_ij_low)
matrix$k_ij_low_F40_75 <- ifelse(matrix$sowV75 >0, matrix$k_ij_low*(1-F_s[1]),matrix$k_ij_low)
matrix$k_ij_low_F40_100 <- ifelse(matrix$sowV100 >0, matrix$k_ij_low*(1-F_s[1]),matrix$k_ij_low)
# Increase in biosecurity
matrix$p_ij1_25 <- ifelse(matrix$sowV25 >0, matrix$p_ij1*(1-B_s[1]),matrix$p_ij1)
matrix$p_ij1_50 <- ifelse(matrix$sowV50 >0, matrix$p_ij1*(1-B_s[1]),matrix$p_ij1)
matrix$p_ij1_75 <- ifelse(matrix$sowV75 >0, matrix$p_ij1*(1-B_s[1]),matrix$p_ij1)
matrix$p_ij1_100 <- ifelse(matrix$sowV100 >0, matrix$p_ij1*(1-B_s[1]),matrix$p_ij1)
matrix$p_ij.6_25 <- ifelse(matrix$sowV25 >0, matrix$p_ij.6*(1-B_s[1]),matrix$p_ij.6)
matrix$p_ij.6_50 <- ifelse(matrix$sowV50 >0, matrix$p_ij.6*(1-B_s[1]),matrix$p_ij.6)
matrix$p_ij.6_75 <- ifelse(matrix$sowV75 >0, matrix$p_ij.6*(1-B_s[1]),matrix$p_ij.6)
matrix$p_ij.6_100 <- ifelse(matrix$sowV100 >0, matrix$p_ij.6*(1-B_s[1]),matrix$p_ij.6)
matrix$p_ij.3_25 <- ifelse(matrix$sowV25 >0, matrix$p_ij.3*(1-B_s[1]),matrix$p_ij.3)
matrix$p_ij.3_50 <- ifelse(matrix$sowV50 >0, matrix$p_ij.3*(1-B_s[1]),matrix$p_ij.3)
matrix$p_ij.3_75 <- ifelse(matrix$sowV75 >0, matrix$p_ij.3*(1-B_s[1]),matrix$p_ij.3)
matrix$p_ij.3_100 <- ifelse(matrix$sowV100 >0, matrix$p_ij.3*(1-B_s[1]),matrix$p_ij.3)
# List Rho -80- 25%
matrix$rho1_F80_25 <- matrix$p_ij1_25 + matrix$k_ij_F80_25 - matrix$p_ij1_25*matrix$k_ij_F80_25
matrix$rho2_F80_25 <- matrix$p_ij1_25 + matrix$k_ij_up_F80_25 - matrix$p_ij1_25*matrix$k_ij_up_F80_25
matrix$rho3_F80_25 <- matrix$p_ij1_25 + matrix$k_ij_low_F80_25 - matrix$p_ij1_25*matrix$k_ij_low_F80_25
matrix$rho4_F80_25 <- matrix$p_ij.3_25 + matrix$k_ij_F80_25 - matrix$p_ij.3_25*matrix$k_ij_F80_25
matrix$rho5_F80_25 <- matrix$p_ij.3_25 + matrix$k_ij_up_F80_25 - matrix$p_ij.3_25*matrix$k_ij_up_F80_25
matrix$rho6_F80_25 <- matrix$p_ij.3_25 + matrix$k_ij_low_F80_25 - matrix$p_ij.3_25*matrix$k_ij_low_F80_25
matrix$rho7_F80_25 <- matrix$p_ij.6_25 + matrix$k_ij_F80_25 - matrix$p_ij.6_25*matrix$k_ij_F80_25
matrix$rho8_F80_25 <- matrix$p_ij.6_25 + matrix$k_ij_up_F80_25 - matrix$p_ij.6_25*matrix$k_ij_up_F80_25
matrix$rho9_F80_25 <- matrix$p_ij.6_25 + matrix$k_ij_low_F80_25 - matrix$p_ij.6_25*matrix$k_ij_low_F80_25
rho1_F80_25 <- acast(matrix, destination ~ origin, value.var = 'rho1_F80_25')
rho2_F80_25 <- acast(matrix, destination ~ origin, value.var = 'rho2_F80_25')
rho3_F80_25 <- acast(matrix, destination ~ origin, value.var = 'rho3_F80_25')
rho4_F80_25 <- acast(matrix, destination ~ origin, value.var = 'rho4_F80_25')
rho5_F80_25 <- acast(matrix, destination ~ origin, value.var = 'rho5_F80_25')
rho6_F80_25 <- acast(matrix, destination ~ origin, value.var = 'rho6_F80_25')
rho7_F80_25 <- acast(matrix, destination ~ origin, value.var = 'rho7_F80_25')
rho8_F80_25 <- acast(matrix, destination ~ origin, value.var = 'rho8_F80_25')
rho9_F80_25 <- acast(matrix, destination ~ origin, value.var = 'rho9_F80_25')
diag(rho1_F80_25) <- 1;diag(rho2_F80_25) <- 1;diag(rho3_F80_25) <- 1;diag(rho4_F80_25) <- 1;diag(rho5_F80_25) <- 1;diag(rho6_F80_25) <- 1;diag(rho7_F80_25) <- 1;diag(rho8_F80_25) <- 1;diag(rho9_F80_25) <- 1
rho_list_F80_25 = list(rho1_F80_25, rho2_F80_25, rho3_F80_25, rho4_F80_25, rho5_F80_25,
rho6_F80_25, rho7_F80_25, rho8_F80_25, rho9_F80_25)
# List Rho -80- 50%
matrix$rho1_F80_50 <- matrix$p_ij1_50 + matrix$k_ij_F80_50 - matrix$p_ij1_50*matrix$k_ij_F80_50
matrix$rho2_F80_50 <- matrix$p_ij1_50 + matrix$k_ij_up_F80_50 - matrix$p_ij1_50*matrix$k_ij_up_F80_50 # rho max
matrix$rho3_F80_50 <- matrix$p_ij1_50 + matrix$k_ij_low_F80_50 - matrix$p_ij1_50*matrix$k_ij_low_F80_50
matrix$rho4_F80_50 <- matrix$p_ij.3_50 + matrix$k_ij_F80_50 - matrix$p_ij.3_50*matrix$k_ij_F80_50
matrix$rho5_F80_50 <- matrix$p_ij.3_50 + matrix$k_ij_up_F80_50 - matrix$p_ij.3_50*matrix$k_ij_up_F80_50
matrix$rho6_F80_50 <- matrix$p_ij.3_50 + matrix$k_ij_low_F80_50 - matrix$p_ij.3_50*matrix$k_ij_low_F80_50 # rho min
matrix$rho7_F80_50 <- matrix$p_ij.6_50 + matrix$k_ij_F80_50 - matrix$p_ij.6_50*matrix$k_ij_F80_50
matrix$rho8_F80_50 <- matrix$p_ij.6_50 + matrix$k_ij_up_F80_50 - matrix$p_ij.6_50*matrix$k_ij_up_F80_50
matrix$rho9_F80_50 <- matrix$p_ij.6_50 + matrix$k_ij_low_F80_50 - matrix$p_ij.6_50*matrix$k_ij_low_F80_50
rho1_F80_50 <- acast(matrix, destination ~ origin, value.var = 'rho1_F80_50')
rho2_F80_50 <- acast(matrix, destination ~ origin, value.var = 'rho2_F80_50')
rho3_F80_50 <- acast(matrix, destination ~ origin, value.var = 'rho3_F80_50')
rho4_F80_50 <- acast(matrix, destination ~ origin, value.var = 'rho4_F80_50')
rho5_F80_50 <- acast(matrix, destination ~ origin, value.var = 'rho5_F80_50')
rho6_F80_50 <- acast(matrix, destination ~ origin, value.var = 'rho6_F80_50')
rho7_F80_50 <- acast(matrix, destination ~ origin, value.var = 'rho7_F80_50')
rho8_F80_50 <- acast(matrix, destination ~ origin, value.var = 'rho8_F80_50')
rho9_F80_50 <- acast(matrix, destination ~ origin, value.var = 'rho9_F80_50')
diag(rho1_F80_50) <- 1;diag(rho2_F80_50) <- 1;diag(rho3_F80_50) <- 1;diag(rho4_F80_50) <- 1;
diag(rho5_F80_50) <- 1;diag(rho6_F80_50) <- 1;diag(rho7_F80_50) <- 1;diag(rho8_F80_50) <- 1;
diag(rho9_F80_50) <- 1 # Fill diagonal with 1, due to rho_ii=1
rho_list_F80_50 = list(rho1_F80_50, rho2_F80_50, rho3_F80_50, rho4_F80_50, rho5_F80_50,
rho6_F80_50, rho7_F80_50, rho8_F80_50, rho9_F80_50)
# List Rho -80- 75%
matrix$rho1_F80_75 <- matrix$p_ij1_75 + matrix$k_ij_F80_75 - matrix$p_ij1_75*matrix$k_ij_F80_75
matrix$rho2_F80_75 <- matrix$p_ij1_75 + matrix$k_ij_up_F80_75 - matrix$p_ij1_75*matrix$k_ij_up_F80_75 # rho max
matrix$rho3_F80_75 <- matrix$p_ij1_75 + matrix$k_ij_low_F80_75 - matrix$p_ij1_75*matrix$k_ij_low_F80_75
matrix$rho4_F80_75 <- matrix$p_ij.3_75 + matrix$k_ij_F80_75 - matrix$p_ij.3_75*matrix$k_ij_F80_75
matrix$rho5_F80_75 <- matrix$p_ij.3_75 + matrix$k_ij_up_F80_75 - matrix$p_ij.3_75*matrix$k_ij_up_F80_75
matrix$rho6_F80_75 <- matrix$p_ij.3_75 + matrix$k_ij_low_F80_75 - matrix$p_ij.3_75*matrix$k_ij_low_F80_75 # rho min
matrix$rho7_F80_75 <- matrix$p_ij.6_75 + matrix$k_ij_F80_75 - matrix$p_ij.6_75*matrix$k_ij_F80_75
matrix$rho8_F80_75 <- matrix$p_ij.6_75 + matrix$k_ij_up_F80_75 - matrix$p_ij.6_75*matrix$k_ij_up_F80_75
matrix$rho9_F80_75 <- matrix$p_ij.6_75 + matrix$k_ij_low_F80_75 - matrix$p_ij.6_75*matrix$k_ij_low_F80_75
rho1_F80_75 <- acast(matrix, destination ~ origin, value.var = 'rho1_F80_75')
rho2_F80_75 <- acast(matrix, destination ~ origin, value.var = 'rho2_F80_75')
rho3_F80_75 <- acast(matrix, destination ~ origin, value.var = 'rho3_F80_75')
rho4_F80_75 <- acast(matrix, destination ~ origin, value.var = 'rho4_F80_75')
rho5_F80_75 <- acast(matrix, destination ~ origin, value.var = 'rho5_F80_75')
rho6_F80_75 <- acast(matrix, destination ~ origin, value.var = 'rho6_F80_75')
rho7_F80_75 <- acast(matrix, destination ~ origin, value.var = 'rho7_F80_75')
rho8_F80_75 <- acast(matrix, destination ~ origin, value.var = 'rho8_F80_75')
rho9_F80_75 <- acast(matrix, destination ~ origin, value.var = 'rho9_F80_75')
diag(rho1_F80_75) <- 1;diag(rho2_F80_75) <- 1;diag(rho3_F80_75) <- 1;diag(rho4_F80_75) <- 1;
diag(rho5_F80_75) <- 1;diag(rho6_F80_75) <- 1;diag(rho7_F80_75) <- 1;diag(rho8_F80_75) <- 1;
diag(rho9_F80_75) <- 1 # Fill diagonal with 1, due to rho_ii=1
rho_list_F80_75 = list(rho1_F80_75, rho2_F80_75, rho3_F80_75, rho4_F80_75, rho5_F80_75,
rho6_F80_75, rho7_F80_75, rho8_F80_75, rho9_F80_75)
# List Rho -80- 100%
matrix$rho1_F80_100 <- matrix$p_ij1_100 + matrix$k_ij_F80_100 - matrix$p_ij1_100*matrix$k_ij_F80_100
matrix$rho2_F80_100 <- matrix$p_ij1_100 + matrix$k_ij_up_F80_100 - matrix$p_ij1_100*matrix$k_ij_up_F80_100 # rho max
matrix$rho3_F80_100 <- matrix$p_ij1_100 + matrix$k_ij_low_F80_100 - matrix$p_ij1_100*matrix$k_ij_low_F80_100
matrix$rho4_F80_100 <- matrix$p_ij.3_100 + matrix$k_ij_F80_100 - matrix$p_ij.3_100*matrix$k_ij_F80_100
matrix$rho5_F80_100 <- matrix$p_ij.3_100 + matrix$k_ij_up_F80_100 - matrix$p_ij.3_100*matrix$k_ij_up_F80_100
matrix$rho6_F80_100 <- matrix$p_ij.3_100 + matrix$k_ij_low_F80_100 - matrix$p_ij.3_100*matrix$k_ij_low_F80_100 # rho min
matrix$rho7_F80_100 <- matrix$p_ij.6_100 + matrix$k_ij_F80_100 - matrix$p_ij.6_100*matrix$k_ij_F80_100
matrix$rho8_F80_100 <- matrix$p_ij.6_100 + matrix$k_ij_up_F80_100 - matrix$p_ij.6_100*matrix$k_ij_up_F80_100
matrix$rho9_F80_100 <- matrix$p_ij.6_100 + matrix$k_ij_low_F80_100 - matrix$p_ij.6_100*matrix$k_ij_low_F80_100
rho1_F80_100 <- acast(matrix, destination ~ origin, value.var = 'rho1_F80_100')
rho2_F80_100 <- acast(matrix, destination ~ origin, value.var = 'rho2_F80_100')
rho3_F80_100 <- acast(matrix, destination ~ origin, value.var = 'rho3_F80_100')
rho4_F80_100 <- acast(matrix, destination ~ origin, value.var = 'rho4_F80_100')
rho5_F80_100 <- acast(matrix, destination ~ origin, value.var = 'rho5_F80_100')
rho6_F80_100 <- acast(matrix, destination ~ origin, value.var = 'rho6_F80_100')
rho7_F80_100 <- acast(matrix, destination ~ origin, value.var = 'rho7_F80_100')
rho8_F80_100 <- acast(matrix, destination ~ origin, value.var = 'rho8_F80_100')
rho9_F80_100 <- acast(matrix, destination ~ origin, value.var = 'rho9_F80_100')
diag(rho1_F80_100) <- 1;diag(rho2_F80_100) <- 1;diag(rho3_F80_100) <- 1;diag(rho4_F80_100) <- 1;
diag(rho5_F80_100) <- 1;diag(rho6_F80_100) <- 1;diag(rho7_F80_100) <- 1;diag(rho8_F80_100) <- 1;
diag(rho9_F80_100) <- 1 # Fill diagonal with 1, due to rho_ii=1
rho_list_F80_100 = list(rho1_F80_100, rho2_F80_100, rho3_F80_100, rho4_F80_100, rho5_F80_100,
rho6_F80_100, rho7_F80_100, rho8_F80_100, rho9_F80_100)
# List Rho -40- 25%
matrix$rho1_F40_25 <- matrix$p_ij1_25 + matrix$k_ij_F40_25 - matrix$p_ij1_25*matrix$k_ij_F40_25
matrix$rho2_F40_25 <- matrix$p_ij1_25 + matrix$k_ij_up_F40_25 - matrix$p_ij1_25*matrix$k_ij_up_F40_25
matrix$rho3_F40_25 <- matrix$p_ij1_25 + matrix$k_ij_low_F40_25 - matrix$p_ij1_25*matrix$k_ij_low_F40_25
matrix$rho4_F40_25 <- matrix$p_ij.3_25 + matrix$k_ij_F40_25 - matrix$p_ij.3_25*matrix$k_ij_F40_25
matrix$rho5_F40_25 <- matrix$p_ij.3_25 + matrix$k_ij_up_F40_25 - matrix$p_ij.3_25*matrix$k_ij_up_F40_25
matrix$rho6_F40_25 <- matrix$p_ij.3_25 + matrix$k_ij_low_F40_25 - matrix$p_ij.3_25*matrix$k_ij_low_F40_25
matrix$rho7_F40_25 <- matrix$p_ij.6_25 + matrix$k_ij_F40_25 - matrix$p_ij.6_25*matrix$k_ij_F40_25
matrix$rho8_F40_25 <- matrix$p_ij.6_25 + matrix$k_ij_up_F40_25 - matrix$p_ij.6_25*matrix$k_ij_up_F40_25
matrix$rho9_F40_25 <- matrix$p_ij.6_25 + matrix$k_ij_low_F40_25 - matrix$p_ij.6_25*matrix$k_ij_low_F40_25
rho1_F40_25 <- acast(matrix, destination ~ origin, value.var = 'rho1_F40_25')
rho2_F40_25 <- acast(matrix, destination ~ origin, value.var = 'rho2_F40_25')
rho3_F40_25 <- acast(matrix, destination ~ origin, value.var = 'rho3_F40_25')
rho4_F40_25 <- acast(matrix, destination ~ origin, value.var = 'rho4_F40_25')
rho5_F40_25 <- acast(matrix, destination ~ origin, value.var = 'rho5_F40_25')
rho6_F40_25 <- acast(matrix, destination ~ origin, value.var = 'rho6_F40_25')
rho7_F40_25 <- acast(matrix, destination ~ origin, value.var = 'rho7_F40_25')
rho8_F40_25 <- acast(matrix, destination ~ origin, value.var = 'rho8_F40_25')
rho9_F40_25 <- acast(matrix, destination ~ origin, value.var = 'rho9_F40_25')
diag(rho1_F40_25) <- 1;diag(rho2_F40_25) <- 1;diag(rho3_F40_25) <- 1;diag(rho4_F40_25) <- 1;diag(rho5_F40_25) <- 1;diag(rho6_F40_25) <- 1;diag(rho7_F40_25) <- 1;diag(rho8_F40_25) <- 1;diag(rho9_F40_25) <- 1
rho_list_F40_25 = list(rho1_F40_25, rho2_F40_25, rho3_F40_25, rho4_F40_25, rho5_F40_25,
rho6_F40_25, rho7_F40_25, rho8_F40_25, rho9_F40_25)
# List Rho -40- 50%
matrix$rho1_F40_50 <- matrix$p_ij1_50 + matrix$k_ij_F40_50 - matrix$p_ij1_50*matrix$k_ij_F40_50
matrix$rho2_F40_50 <- matrix$p_ij1_50 + matrix$k_ij_up_F40_50 - matrix$p_ij1_50*matrix$k_ij_up_F40_50 # rho max
matrix$rho3_F40_50 <- matrix$p_ij1_50 + matrix$k_ij_low_F40_50 - matrix$p_ij1_50*matrix$k_ij_low_F40_50
matrix$rho4_F40_50 <- matrix$p_ij.3_50 + matrix$k_ij_F40_50 - matrix$p_ij.3_50*matrix$k_ij_F40_50
matrix$rho5_F40_50 <- matrix$p_ij.3_50 + matrix$k_ij_up_F40_50 - matrix$p_ij.3_50*matrix$k_ij_up_F40_50
matrix$rho6_F40_50 <- matrix$p_ij.3_50 + matrix$k_ij_low_F40_50 - matrix$p_ij.3_50*matrix$k_ij_low_F40_50 # rho min
matrix$rho7_F40_50 <- matrix$p_ij.6_50 + matrix$k_ij_F40_50 - matrix$p_ij.6_50*matrix$k_ij_F40_50
matrix$rho8_F40_50 <- matrix$p_ij.6_50 + matrix$k_ij_up_F40_50 - matrix$p_ij.6_50*matrix$k_ij_up_F40_50
matrix$rho9_F40_50 <- matrix$p_ij.6_50 + matrix$k_ij_low_F40_50 - matrix$p_ij.6_50*matrix$k_ij_low_F40_50
rho1_F40_50 <- acast(matrix, destination ~ origin, value.var = 'rho1_F40_50')
rho2_F40_50 <- acast(matrix, destination ~ origin, value.var = 'rho2_F40_50')
rho3_F40_50 <- acast(matrix, destination ~ origin, value.var = 'rho3_F40_50')
rho4_F40_50 <- acast(matrix, destination ~ origin, value.var = 'rho4_F40_50')
rho5_F40_50 <- acast(matrix, destination ~ origin, value.var = 'rho5_F40_50')
rho6_F40_50 <- acast(matrix, destination ~ origin, value.var = 'rho6_F40_50')
rho7_F40_50 <- acast(matrix, destination ~ origin, value.var = 'rho7_F40_50')
rho8_F40_50 <- acast(matrix, destination ~ origin, value.var = 'rho8_F40_50')
rho9_F40_50 <- acast(matrix, destination ~ origin, value.var = 'rho9_F40_50')
diag(rho1_F40_50) <- 1;diag(rho2_F40_50) <- 1;diag(rho3_F40_50) <- 1;diag(rho4_F40_50) <- 1;
diag(rho5_F40_50) <- 1;diag(rho6_F40_50) <- 1;diag(rho7_F40_50) <- 1;diag(rho8_F40_50) <- 1;
diag(rho9_F40_50) <- 1
rho_list_F40_50 = list(rho1_F40_50, rho2_F40_50, rho3_F40_50, rho4_F40_50, rho5_F40_50,
rho6_F40_50, rho7_F40_50, rho8_F40_50, rho9_F40_50)
# List Rho -40- 75%
matrix$rho1_F40_75 <- matrix$p_ij1_75 + matrix$k_ij_F40_75 - matrix$p_ij1_75*matrix$k_ij_F40_75
matrix$rho2_F40_75 <- matrix$p_ij1_75 + matrix$k_ij_up_F40_75 - matrix$p_ij1_75*matrix$k_ij_up_F40_75 # rho max
matrix$rho3_F40_75 <- matrix$p_ij1_75 + matrix$k_ij_low_F40_75 - matrix$p_ij1_75*matrix$k_ij_low_F40_75
matrix$rho4_F40_75 <- matrix$p_ij.3_75 + matrix$k_ij_F40_75 - matrix$p_ij.3_75*matrix$k_ij_F40_75
matrix$rho5_F40_75 <- matrix$p_ij.3_75 + matrix$k_ij_up_F40_75 - matrix$p_ij.3_75*matrix$k_ij_up_F40_75
matrix$rho6_F40_75 <- matrix$p_ij.3_75 + matrix$k_ij_low_F40_75 - matrix$p_ij.3_75*matrix$k_ij_low_F40_75 # rho min
matrix$rho7_F40_75 <- matrix$p_ij.6_75 + matrix$k_ij_F40_75 - matrix$p_ij.6_75*matrix$k_ij_F40_75
matrix$rho8_F40_75 <- matrix$p_ij.6_75 + matrix$k_ij_up_F40_75 - matrix$p_ij.6_75*matrix$k_ij_up_F40_75
matrix$rho9_F40_75 <- matrix$p_ij.6_75 + matrix$k_ij_low_F40_75 - matrix$p_ij.6_75*matrix$k_ij_low_F40_75
rho1_F40_75 <- acast(matrix, destination ~ origin, value.var = 'rho1_F40_75')
rho2_F40_75 <- acast(matrix, destination ~ origin, value.var = 'rho2_F40_75')
rho3_F40_75 <- acast(matrix, destination ~ origin, value.var = 'rho3_F40_75')
rho4_F40_75 <- acast(matrix, destination ~ origin, value.var = 'rho4_F40_75')
rho5_F40_75 <- acast(matrix, destination ~ origin, value.var = 'rho5_F40_75')
rho6_F40_75 <- acast(matrix, destination ~ origin, value.var = 'rho6_F40_75')
rho7_F40_75 <- acast(matrix, destination ~ origin, value.var = 'rho7_F40_75')
rho8_F40_75 <- acast(matrix, destination ~ origin, value.var = 'rho8_F40_75')
rho9_F40_75 <- acast(matrix, destination ~ origin, value.var = 'rho9_F40_75')
diag(rho1_F40_75) <- 1;diag(rho2_F40_75) <- 1;diag(rho3_F40_75) <- 1;diag(rho4_F40_75) <- 1;
diag(rho5_F40_75) <- 1;diag(rho6_F40_75) <- 1;diag(rho7_F40_75) <- 1;diag(rho8_F40_75) <- 1;
diag(rho9_F40_75) <- 1 # Fill diagonal with 1, due to rho_ii=1
rho_list_F40_75 = list(rho1_F40_75, rho2_F40_75, rho3_F40_75, rho4_F40_75, rho5_F40_75,
rho6_F40_75, rho7_F40_75, rho8_F40_75, rho9_F40_75)
# List Rho -40- 100%
matrix$rho1_F40_100 <- matrix$p_ij1_100 + matrix$k_ij_F40_100 - matrix$p_ij1_100*matrix$k_ij_F40_100
matrix$rho2_F40_100 <- matrix$p_ij1_100 + matrix$k_ij_up_F40_100 - matrix$p_ij1_100*matrix$k_ij_up_F40_100 # rho max
matrix$rho3_F40_100 <- matrix$p_ij1_100 + matrix$k_ij_low_F40_100 - matrix$p_ij1_100*matrix$k_ij_low_F40_100
matrix$rho4_F40_100 <- matrix$p_ij.3_100 + matrix$k_ij_F40_100 - matrix$p_ij.3_100*matrix$k_ij_F40_100
matrix$rho5_F40_100 <- matrix$p_ij.3_100 + matrix$k_ij_up_F40_100 - matrix$p_ij.3_100*matrix$k_ij_up_F40_100
matrix$rho6_F40_100 <- matrix$p_ij.3_100 + matrix$k_ij_low_F40_100 - matrix$p_ij.3_100*matrix$k_ij_low_F40_100 # rho min
matrix$rho7_F40_100 <- matrix$p_ij.6_100 + matrix$k_ij_F40_100 - matrix$p_ij.6_100*matrix$k_ij_F40_100
matrix$rho8_F40_100 <- matrix$p_ij.6_100 + matrix$k_ij_up_F40_100 - matrix$p_ij.6_100*matrix$k_ij_up_F40_100
matrix$rho9_F40_100 <- matrix$p_ij.6_100 + matrix$k_ij_low_F40_100 - matrix$p_ij.6_100*matrix$k_ij_low_F40_100
rho1_F40_100 <- acast(matrix, destination ~ origin, value.var = 'rho1_F40_100')
rho2_F40_100 <- acast(matrix, destination ~ origin, value.var = 'rho2_F40_100')
rho3_F40_100 <- acast(matrix, destination ~ origin, value.var = 'rho3_F40_100')
rho4_F40_100 <- acast(matrix, destination ~ origin, value.var = 'rho4_F40_100')
rho5_F40_100 <- acast(matrix, destination ~ origin, value.var = 'rho5_F40_100')
rho6_F40_100 <- acast(matrix, destination ~ origin, value.var = 'rho6_F40_100')
rho7_F40_100 <- acast(matrix, destination ~ origin, value.var = 'rho7_F40_100')
rho8_F40_100 <- acast(matrix, destination ~ origin, value.var = 'rho8_F40_100')
rho9_F40_100 <- acast(matrix, destination ~ origin, value.var = 'rho9_F40_100')
diag(rho1_F40_100) <- 1;diag(rho2_F40_100) <- 1;diag(rho3_F40_100) <- 1;diag(rho4_F40_100) <- 1;
diag(rho5_F40_100) <- 1;diag(rho6_F40_100) <- 1;diag(rho7_F40_100) <- 1;diag(rho8_F40_100) <- 1;
diag(rho9_F40_100) <- 1 # Fill diagonal with 1, due to rho_ii=1
rho_list_F40_100 = list(rho1_F40_100, rho2_F40_100, rho3_F40_100, rho4_F40_100, rho5_F40_100,
rho6_F40_100, rho7_F40_100, rho8_F40_100, rho9_F40_100)
# Set new parameters for Dv (Days of infection) in vaccinated animals
Dv_s_r = c(0.7, 2.8)
# D- 25% Coverage
get_D25 = function(k, D_s_r, D_p_r, Dv_s_r, Farms){ # Add Dv_s_r
if(k == 1){
D25 = ifelse(Farms$sowV25 == 1, Dv_s_r[1], ifelse(Farms$sow == 1, D_s_r[1], D_p_r[1]))
} else {
if(k == 2) {
D25 = ifelse(Farms$sowV25 == 1, Dv_s_r[2], ifelse(Farms$sow == 1, D_s_r[2], D_p_r[2]))
} else {
D25 = ifelse(Farms$sowV25 == 1,
runif(1, min = Dv_s_r[1], max = Dv_s_r[2]),
ifelse(Farms$sow == 1,
runif(1, min = D_s_r[1], max = D_s_r[2]),
runif(1, min = D_p_r[1], max = D_p_r[2])))
}
}
return(D25)
}
D25 = lapply(1:nsims, get_D25, D_s_r, D_p_r, Dv_s_r, Farms)
# D- 50% Coverage
get_D50 = function(k, D_s_r, D_p_r, Dv_s_r, Farms){ # Add Dv_s_r
if(k == 1){
D50 = ifelse(Farms$sowV50 == 1, Dv_s_r[1], ifelse(Farms$sow == 1, D_s_r[1], D_p_r[1]))
} else {
if(k == 2) {
D50 = ifelse(Farms$sowV50 == 1, Dv_s_r[2], ifelse(Farms$sow == 1, D_s_r[2], D_p_r[2]))
} else {
D50 = ifelse(Farms$sowV50 == 1,
runif(1, min = Dv_s_r[1], max = Dv_s_r[2]),
ifelse(Farms$sow == 1,
runif(1, min = D_s_r[1], max = D_s_r[2]),
runif(1, min = D_p_r[1], max = D_p_r[2])))
}
}
return(D50)
}
D50 = lapply(1:nsims, get_D50, D_s_r, D_p_r, Dv_s_r, Farms)
# D- 75% Coverage
get_D75 = function(k, D_s_r, D_p_r, Dv_s_r, Farms){ # Add Dv_s_r
if(k == 1){
D75 = ifelse(Farms$sowV75 == 1, Dv_s_r[1], ifelse(Farms$sow == 1, D_s_r[1], D_p_r[1]))
} else {
if(k == 2) {
D75 = ifelse(Farms$sowV75 == 1, Dv_s_r[2], ifelse(Farms$sow == 1, D_s_r[2], D_p_r[2]))
} else {
D75 = ifelse(Farms$sowV75 == 1,
runif(1, min = Dv_s_r[1], max = Dv_s_r[2]),
ifelse(Farms$sow == 1,
runif(1, min = D_s_r[1], max = D_s_r[2]),
runif(1, min = D_p_r[1], max = D_p_r[2])))
}
}
return(D75)
}
D75 = lapply(1:nsims, get_D75, D_s_r, D_p_r, Dv_s_r, Farms)
# D- 100% Coverage
get_D100 = function(k, D_s_r, D_p_r, Dv_s_r, Farms){ # Add Dv_s_r
if(k == 1){
D100 = ifelse(Farms$sowV100 == 1, Dv_s_r[1], ifelse(Farms$sow == 1, D_s_r[1], D_p_r[1]))
} else {
if(k == 2) {
D100 = ifelse(Farms$sowV100 == 1, Dv_s_r[2], ifelse(Farms$sow == 1, D_s_r[2], D_p_r[2]))
} else {
D100 = ifelse(Farms$sowV100 == 1,
runif(1, min = Dv_s_r[1], max = Dv_s_r[2]),
ifelse(Farms$sow == 1,
runif(1, min = D_s_r[1], max = D_s_r[2]),
runif(1, min = D_p_r[1], max = D_p_r[2])))
}
}
return(D100)
}
D100 = lapply(1:nsims, get_D100, D_s_r, D_p_r, Dv_s_r, Farms)
# I have been trying to create a single function, where I can vary only 3 componentes (beta_list, rho_list and D) of these loops to obtain different scenatios. Unfortunatelly I have had error messages again and again.
# Only 25% vaccine ------------------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV25[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D25[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V25.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V25.rds")
stop("Run first simulation")
# 25% vaccine 25 filter (80) -----------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV25[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F80_25[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D25[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V25_F80_25.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V25_F80_25.rds")
# 25% vaccine 25 filter (40) -----------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV25[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F40_25[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D25[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V25_F40_25.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V25_F40_25.rds")
# Only 50% vaccine --------------------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV50[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D50[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V50.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V50.rds")
# 50% vaccine 50% filter (80) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV50[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F80_50[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D50[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V50_F80_50.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V50_F80_50.rds")
# 50% vaccine 50% filter (40) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV50[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F40_50[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D50[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V50_F40_50.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V50_F40_50.rds")
# Only 75% vaccine --------------------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV75[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D75[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V75.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V75.rds")
# 75% vaccine 75% filter (80) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV75[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F80_75[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D75[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V75_F80_75.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V75_F80_75.rds")
# 75% vaccine 75% filter (40) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV75[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F40_75[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D75[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V75_F40_75.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V75_F40_75.rds")
# Only 100% vaccine --------------------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV100[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D100[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V100.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V100.rds")
# 100% vaccine 100% filter (80) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV100[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F80_100[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D100[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V100_F80_100.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V100_F80_100.rds")
# 100% vaccine 100% filter (40) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_listV100[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F40_100[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D100[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1V100_F40_100.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2V100_F40_100.rds")
# 25% filter (80) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_list[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F80_25[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1_F80_25.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2_F80_25.rds")
# 50% filter (80) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_list[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F80_50[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1_F80_50.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2_F80_50.rds")
# 75% filter (80) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_list[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F80_75[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1_F80_75.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2_F80_75.rds")
# 100% filter (80) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_list[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F80_100[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1_F80_100.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2_F80_100.rds")
# 25% filter (40) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_list[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F40_25[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1_F40_25.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2_F40_25.rds")
# 50% filter (80) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_list[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F40_50[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1_F40_50.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2_F40_50.rds")
# 75% filter (80) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_list[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F40_75[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1_F40_75.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2_F40_75.rds")
# 100% filter (80) ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_list[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list_F40_100[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1_F40_100.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2_F40_100.rds")
# Baseline ---------------------------------------------
sim_res1 = vector("list", nsims)
sim_res2 = vector("list", nsims)
names(sim_res1) = 1:nsims
names(sim_res2) = 1:nsims
result_1 = vector("list",27)
result_2 = vector("list",27)
names(result_1) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
names(result_2) = c(paste0("beta_l_",1:9), paste0("beta_m_",1:9),paste0("beta_h_",1:9) )
for( k in 1:(nsims)) {
loop=0
for(i in 1:3){
beta = beta_list[[i]]
for(j in 1:9){
print(paste0("k = ", k, "; i = ", i, "; j = ", j))
loop = loop+1
rho = rho_list[[j]]
beta_matrix = rho * beta
stocks <- c(S=Farms$X, I=Farms$Y, R=Farms$Z)
parameters <- list("beta"=beta_matrix, "delays"=c(D[[k]]),
"mort"=c(v[[k]]), "returns"=c(D_imm[[k]])) #remove farms.
out <- data.frame(ode(y=stocks, times=time, func=model, parms=parameters,
method = "euler"))
out <- out[out$time %in% seq(0,26,1), ]
o <-data.frame(time=out$time)
o$S <- apply(out[,c(grep('S', names(out), value=TRUE))], 1, sum)
o$I <- apply(out[,c(grep('I', names(out), value=TRUE))], 1, sum)
o$R <- apply(out[,c(grep('R', names(out), value=TRUE))], 1, sum)
o$animals <- o$S+o$R+o$I
o$prop <- o$I/o$animals
result_1[[loop]] = out
result_2[[loop]] = o
}
}
sim_res1[[k]] = result_1
sim_res2[[k]] = result_2
}
#saveRDS(sim_res1, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res1.rds")
#saveRDS(sim_res2, file="/Volumes/PVD2/Davis/PhD/DISSERTATION/Chapter4/R4/PRRSTM/newset3/sim_res2.rds")
|
51bbe52625b4cf40448e62824959bb9b36080500
|
5e15a024c441bc9d5ee39c26c4c8ddf174a79a88
|
/man/source_bigquery.Rd
|
c07f4fde25aee97a7e2ec38941b2a4acddd15354
|
[] |
no_license
|
bsletten/bigrquery
|
75007cd2efd3b9274ffea8d4d44937e9f52ff132
|
4e3d91d64bf543c3ebe59037c40a710f48775166
|
refs/heads/master
| 2021-01-21T00:36:55.145972
| 2013-08-14T19:15:33
| 2013-08-14T19:15:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,413
|
rd
|
source_bigquery.Rd
|
\name{source_bigquery}
\alias{arrange.source_bigquery}
\alias{filter.source_bigquery}
\alias{mutate.source_bigquery}
\alias{select.source_bigquery}
\alias{source_bigquery}
\alias{summarise.source_bigquery}
\title{A bigquery data source.}
\usage{
source_bigquery(project, dataset, table,
billing = project)
\method{filter}{source_bigquery} (.data, ...)
\method{arrange}{source_bigquery} (.data, ...)
\method{select}{source_bigquery} (.data, ...)
\method{summarise}{source_bigquery} (.data, ...,
.max_pages = 10L, .page_size = 10000L)
\method{mutate}{source_bigquery} (.data, ...,
.max_pages = 10L, .page_size = 10000L)
}
\description{
A bigquery data source.
}
\section{Caching}{
The variable names and number of rows are cached on
source creation, on the assumption that you're probably
doing analysis on a table that's not changing as you run
queries. If this is not correct, then the values of
\code{dim} and \code{dimnames} may be out of date, but it
shouldn't otherwise affect operation.
}
\examples{
library(dplyr)
billing <- "341409650721" # put your project number here
births <- source_bigquery("publicdata", "samples", "natality", billing)
dim(births)
colnames(births)
head(births)
summarise(births, first_year = min(year), last_year = max(year))
date_info <- select(births, year:wday)
head(date_info)
head(filter(select(births, year:wday), year > 2000))
}
|
88fbf7a00890e8104f25591d1ac06e0bb0b160d5
|
7552fbd72df5278b75a50ff23c717fc0f991fbf9
|
/𝚙𝚕𝚘𝚝3.R
|
552937a063846e94172754fc9d99e3be1da3a821
|
[] |
no_license
|
ldhandley/ExData_Plotting1
|
ed86b3278c7707384ba17d054d3cdeccc6003385
|
d2d99cb88de6d440348e09bb5fae6ab1c387ce50
|
refs/heads/master
| 2021-05-11T00:07:36.220558
| 2018-01-21T05:48:14
| 2018-01-21T05:48:14
| 118,297,962
| 0
| 0
| null | 2018-01-21T02:28:53
| 2018-01-21T02:28:53
| null |
UTF-8
|
R
| false
| false
| 959
|
r
|
𝚙𝚕𝚘𝚝3.R
|
library(dplyr)
##LOAD DATA AND SUBSET
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
day1 <- subset(data, (as.character(data$Date) == "1/2/2007"))
day2 <- subset(data, (as.character(data$Date) == "2/2/2007"))
twodaydata <- rbind(day1, day2)
##TRANSFORM DATE AND TIME -> DATETIME
twodaydata <- mutate(twodaydata, datetime = as.POSIXct(paste(Date, Time), format="%d/%m/%Y %H:%M:%S"))
twodaydata <- select(twodaydata, -Date, -Time)
##PLOT & WRITE TO FILE
png(filename = "plot3.png",width = 480, height = 480)
with(twodaydata, plot(datetime,Sub_metering_1, type="n",xlab="", ylab="Energy sub metering"))
with(twodaydata, lines(datetime,Sub_metering_1, col="black"))
with(twodaydata, lines(datetime,Sub_metering_2, col="red"))
with(twodaydata, lines(datetime,Sub_metering_3, col="blue"))
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty=1)
dev.off()
|
9e907c86f0671363de0b09fd63ce8c2b8b4cc2fc
|
0d35749c4c44b101afc124d26380574d650fec3a
|
/man/make_grattan_pal_discrete.Rd
|
3e7171d98b1003f70108e43a556f9ec5356fda36
|
[
"MIT"
] |
permissive
|
MattCowgill/grattantheme
|
3b27ce0488907d46dc52eff65622aef0235d4965
|
0b1dfac4e19a38c8894c0556cc1ebd3f1ee991de
|
refs/heads/master
| 2023-01-09T16:58:04.454028
| 2022-12-29T22:15:58
| 2022-12-29T22:15:58
| 351,668,681
| 0
| 0
|
NOASSERTION
| 2021-03-26T05:08:54
| 2021-03-26T05:08:54
| null |
UTF-8
|
R
| false
| true
| 684
|
rd
|
make_grattan_pal_discrete.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grattan_pal.R
\name{make_grattan_pal_discrete}
\alias{make_grattan_pal_discrete}
\title{Create a grattan colour palette}
\usage{
make_grattan_pal_discrete(n)
}
\arguments{
\item{n}{how many colours to return}
}
\description{
This function takes a the grattan graph colour palette and returns a vector of colours equal to n.
It is used in \code{\link{scale_colour_grattan}} and \code{\link{scale_fill_grattan}} to make the discrete
colour scale as the order of colours is specific in the grattan branding guides and so using an interpolated scale
does not work.
}
\seealso{
\code{\link{grattan_palettes}}
}
|
1064273cd1b2008ded9be0ca1dbe77db9bb1f758
|
87b9fdfeb907d47f378750675c89471473850ab9
|
/man/Tab.Rd
|
5793bc37a09fb6ab08c30b3c8123539d1562dcad
|
[] |
no_license
|
ayotoasset/LongMemoryTS
|
d9a77e49a0be7f1f91e61f244c39c4ffc5bf5d53
|
0ffefadd5ed237fdfa8d3adc93b79661b5827aa4
|
refs/heads/master
| 2020-04-30T01:45:05.850873
| 2019-02-09T12:46:03
| 2019-02-09T12:46:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 519
|
rd
|
Tab.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cointegrating_rank_estimation.R
\name{Tab}
\alias{Tab}
\title{Test for equality of two elements d_a and d_b of estimated d vector.
This function should not be called directly. It is called as a helper by T.rho.}
\usage{
Tab(d.hat, G.est, m1, a, b, h_n)
}
\description{
Test for equality of two elements d_a and d_b of estimated d vector.
This function should not be called directly. It is called as a helper by T.rho.
}
\keyword{internal}
|
169c5dcc5d9238d95ddc846edceadeacb273bbf2
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlecivicinfov2.auto/man/Office.Rd
|
293787bd8eeaa1877a1bb91db33783e17a6b583b
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 889
|
rd
|
Office.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/civicinfo_objects.R
\name{Office}
\alias{Office}
\title{Office Object}
\usage{
Office(divisionId = NULL, levels = NULL, name = NULL,
officialIndices = NULL, roles = NULL, sources = NULL)
}
\arguments{
\item{divisionId}{The OCD ID of the division with which this office is associated}
\item{levels}{The levels of government of which this office is part}
\item{name}{The human-readable name of the office}
\item{officialIndices}{List of indices in the officials array of people who presently hold this office}
\item{roles}{The roles which this office fulfills}
\item{sources}{A list of sources for this office}
}
\value{
Office object
}
\description{
Office Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Information about an Office held by one or more Officials.
}
|
060afbc13987a7d76e376a0f2344b73d670c5199
|
c56cb5069a959a5e4d555c4eae307ac73198add9
|
/man/design.cyclic.Rd
|
6231adf68126c60fd6edc302e73e7fdd334fd1ec
|
[] |
no_license
|
cran/agricolae
|
f7b6864aa681ed1304a07405392b73c2182f8950
|
cad9b43db1fcd0f528fe60b8a416d65767c74607
|
refs/heads/master
| 2023-07-26T22:33:36.776659
| 2023-06-30T20:30:06
| 2023-06-30T20:30:06
| 17,694,321
| 7
| 9
| null | 2015-12-31T20:15:37
| 2014-03-13T03:53:58
|
R
|
UTF-8
|
R
| false
| false
| 1,961
|
rd
|
design.cyclic.Rd
|
\name{design.cyclic}
\alias{design.cyclic}
\title{ Cyclic designs }
\description{
The cyclic design is a incomplete blocks designs, it is generated from
a incomplete block initial of the size k, the plan is generated and
randomized. The efficient and robust cyclic designs for 6 to 30 treatments,
replications <= 10.
}
\usage{
design.cyclic(trt, k, r, serie = 2, rowcol = FALSE, seed = 0, kinds = "Super-Duper"
,randomization=TRUE)
}
\arguments{
\item{trt}{ vector treatments }
\item{k}{ block size}
\item{r}{ Replications }
\item{serie}{ number plot, 1: 11,12; 2: 101,102; 3: 1001,1002 }
\item{rowcol}{ TRUE: row-column design }
\item{seed}{ init seed random }
\item{kinds}{ random method }
\item{randomization}{ TRUE or FALSE - randomize}
}
\details{
Number o treatment 6 to 30.
(r) Replication 2 to 10.
(k) size of block 2 to 10.
replication = i*k, "i" is value integer.
}
\value{
\item{parameters}{Design parameters}
\item{sketch}{Design sketch}
\item{book}{Fieldbook}
}
\references{ Kuehl, Robert(2000), Design of Experiments. 2nd ed., Duxbury.
John, J.A. (1981) Efficient Cyclic Design. J. R. Statist. Soc. B, 43, No. 1, pp, 76-80.
}
\author{ Felipe de Mendiburu }
\seealso{\code{\link{design.ab}}, \code{\link{design.alpha}},\code{\link{design.bib}},
\code{\link{design.crd} }, \code{\link{design.split} }, \code{\link{design.dau} },
\code{\link{design.graeco}}, \code{\link{design.lattice}}, \code{\link{design.lsd}},
\code{\link{design.rcbd}}, \code{\link{design.strip}} }
\examples{
library(agricolae)
trt<-letters[1:8]
# block size = 2, replication = 6
outdesign1 <- design.cyclic(trt,k=2, r=6,serie=2)
names(outdesign1)
# groups 1,2,3
outdesign1$sketch[[1]]
outdesign1$sketch[[2]]
outdesign1$sketch[[3]]
outdesign1$book
# row-column design
outdesign2<- design.cyclic(trt,k=2, r=6, serie=2, rowcol=TRUE)
outdesign2$sketch
}
\keyword{ design }
|
d72176baafc6e9ec3305150b26e5f806fb81988c
|
0944064ca4edcce0132789f5fe1464e3a14dc069
|
/cachematrix.R
|
97b59814e3246d4e44f7653d1f721ba63bec5f3e
|
[] |
no_license
|
soorajviraat/ProgrammingAssignment2
|
0c61cd6524736fb47d82ee93bde181f66c72f880
|
4bee308e083f03ef64e7642df710954199a3aa9a
|
refs/heads/master
| 2021-01-17T21:55:41.375325
| 2016-01-26T10:51:32
| 2016-01-26T10:51:32
| 50,215,416
| 0
| 0
| null | 2016-01-23T00:09:06
| 2016-01-23T00:09:05
| null |
UTF-8
|
R
| false
| false
| 1,048
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix creates a vector of functions that
## gets and sets the value of the matrix,
## gets and sets the value of the inverse
## Call it in a matrix that we will store
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(y=matrix()){
x<<-y
inv<<-NULL
}
get<-function()x
setinv<-function(inverse)inv<<-inverse
getinv<-function() inv
list(set=set,get=get,setinv=setinv,getinv=getinv)
}
## cacheSolve takes the vector created by makeCacheMatrix as its formal argument
## and tests it to see if the matrix has already been solved.
## if it was already solved, it returns the cached value
## else it calculates the inverse of the operation, caches the value
## and returns it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv<-x[4]
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data<-x[2]
inv<-solve(data,...)
x[3](inv)
inv
}
|
8a7125bd6ddf2374cc1e38c5d10322aaa9f9d692
|
cc646c432a28fe246ae2cdea8ece6afa7f0b3723
|
/R/get_bench.r
|
4f48f4f6094510845a0fafb8129e33d316b85149
|
[] |
no_license
|
propellerpdx/bambooR
|
17ab2bf1c21b4a4285fdbbf4a4572e5d590de3b6
|
b97586679d603e37babd2755a80328ef984a6268
|
refs/heads/master
| 2023-06-23T03:30:51.203103
| 2023-06-15T22:19:01
| 2023-06-15T22:19:01
| 160,996,612
| 0
| 2
| null | 2020-08-03T15:33:16
| 2018-12-09T02:21:50
|
R
|
UTF-8
|
R
| false
| false
| 3,074
|
r
|
get_bench.r
|
#' Bamboo API get request wrapper
#'
#' Submits a get request to retrieve the custom bench table for all employees
#'
#' @param user Bamboo api user id, register in Bamboo "API Keys"
#' @param password Bamboo login password
#' @param employee_ids an optional list; specifies the employees for which bench
#' time is requested; defaults to c('all') which gets all employee bench time
#' @param year integer; the year for which bench records are desired; optional,
#' defaults to NULL
#' @param verbose a logical; indicates if detailed output from httr calls should
#' be provided; default FALSE
#' @return tbl_df
#'
#' @examples
#' \dontrun{
#' user <- 'your_api_user'
#' password <- 'your_password'
#' bench <- get_bench(user=user,password=password)
#'}
#'
#' @author Mark Druffel, \email{mdruffel@propellerpdx.com}
#' @references \url{https://www.bamboohr.com/api/documentation/}, \url{https://github.com/r-lib/httr}
#'
#' @export
get_bench <-
function(user=NULL,
password=NULL,
employee_ids=c('all'),
year=NULL,
verbose=FALSE){
df <-
employee_ids %>%
purrr::map(.,
function(x)
paste0(
'https://api.bamboohr.com/api/gateway.php/propellerpdx/v1/employees/',
x,
'/tables/customBenchTime'
)) %>%
purrr::map(.,
function(x)
httr::GET(
x,
httr::add_headers(Accept = "application/json"),
httr::authenticate(user = paste0(user), password =
paste0(password)),
config = config(verbose = verbose)
)) %>%
purrr::map(.,
function(x)
httr::content(
x,
as = 'text',
type = 'json',
encoding = 'UTF-8'
)) %>%
purrr::map(.,
function(x)
jsonlite::fromJSON(x, simplifyDataFrame = T)) %>%
purrr::flatten_df() %>%
dplyr::select(-id) %>%
dplyr::mutate_at(dplyr::vars(colnames(df)[stringr::str_detect(names(df), 'date')]),
dplyr::funs(lubridate::ymd(.))) %>%
dplyr::mutate_at(dplyr::vars(c('customHours')),
dplyr::funs(as.numeric(.))) %>%
dplyr::rename(
'Employee_bambooID' = 'employeeId',
'Bench_startDate' = 'customStartdate',
'Bench_endDate' = 'customEnddate1',
'Bench_hoursCap' = 'customHours'
)
# Filter to only include records that touch the requested year if a year was
# specified
# The only scenario the below doesn't cover is a situation where the bench
# record covers the entire year, which seems unrealistic
if(!is.null(year)){
df <-
dplyr::filter(
df,
lubridate::year(Bench_startDate) == year |
lubridate::year(Bench_endDate) == year |
(lubridate::year(Bench_startDate) <= year &
is.na(Bench_endDate)))
}
return(df)
}
|
12f1c98152b1b836ca340a8101c4c27227bb863e
|
9bd1486e9e0de0159cd21df9534a9f7ed220f398
|
/run_analysis.R
|
d25368c21c8bc31370ac4081b2175cb800a958ed
|
[] |
no_license
|
andrew7brown/Coursera-Getting-and-Cleaning-Wk4
|
4864539e3c724268bbcf2694fd2b5f721664176d
|
90602f1fee6121793f717a76c04d94433d339544
|
refs/heads/master
| 2020-03-11T01:00:39.144662
| 2018-04-16T04:34:07
| 2018-04-16T04:34:07
| 129,678,565
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,725
|
r
|
run_analysis.R
|
library(dplyr)
# reading data from UCI HAR Dataset
# First test data
xtest <- read.table("./UCI HAR Dataset/test/X_test.txt")
ytest <- read.table("./UCI HAR Dataset/test/Y_test.txt")
subjecttest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# Second train data
xtrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
ytrain <- read.table("./UCI HAR Dataset/train/Y_train.txt")
subjecttrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# Activity labels
activitylabels <- read.table("./UCI HAR Dataset/activity_labels.txt")
# Features data
features <- read.table("./UCI HAR Dataset/features.txt")
# Merges the training and the test sets to create one data set.
xtotal <- rbind(xtrain, xtest)
ytotal <- rbind(ytrain, ytest)
subjecttotal <- rbind(subjecttrain, subjecttest)
# Extracts only the measurements on the mean and standard deviation for each measurement.
var <- features[grep("mean\\(\\)|std\\(\\)",features[,2]),]
xtotal <- xtotal[,var[,1]]
# Uses descriptive activity names to name the activities in the data set
colnames(ytotal) <- "activity"
ytotal$activitylabel <- factor(ytotal$activity, labels = as.character(activitylabels[,2]))
activitylabel <- ytotal[,-1]
# Appropriately labels the data set with descriptive variable names.
colnames(xtotal) <- features[var[,1],2]
# From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
colnames(subjecttotal) <- "subject"
total <- cbind(xtotal, activitylabel, subjecttotal)
totalmean <- total %>% group_by(activitylabel, subject) %>% summarize_all(funs(mean))
write.table(totalmean, file = "./tidydata.txt", row.names = FALSE, col.names = TRUE)
|
cc27979ea4708e0e1f050cd5c59f84c5b854b7da
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/kernDeepStackNet/tests/KDSNfineTuneTests.R
|
d6f266a7ae5b2f0f8842dc200eda0865328df95e
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,591
|
r
|
KDSNfineTuneTests.R
|
library(caret)
library(kernDeepStackNet)
# Construct test cases with linear model
set.seed(0)
treeTestInd <- createDataPartition(y=trees$Volume, p = 0.8, list = TRUE, times=10)
# Fit linear model
err <- vector("numeric", 10)
for(i in 1:10) {
lmPart <- lm(Volume~Height+Girth, data=trees[treeTestInd[[i]], ])
preds <- predict(lmPart, newdata=trees[-treeTestInd[[i]], ])
err[i] <- sqrt(mean((trees[-treeTestInd[[i]], "Volume"]-preds)^2))
}
mean(err)
# Fit with KDSN and three levels
tempMat <- robustStandard(as.matrix(trees[treeTestInd[[1]], ]))
tempMat <- dist(tempMat)
tempVec <- c(tempMat^2)
quantEuklid <- quantile(tempVec, probs = c(0.25, 0.75))
errKDSN <- vector("numeric", 1)
Level <- 3
for(i in 1:10) {
KDSNpart <- fitKDSN(y=trees[treeTestInd[[i]], "Volume"], X=as.matrix(trees[treeTestInd[[i]], -3]),
levels=Level, Dim=round(seq(dim(trees)[1], sqrt(dim(trees)[1]), length.out=Level)),
sigma=seq(quantEuklid[1], quantEuklid[2], length.out=Level), lambda=seq(10^-1, 10^-10, length.out=Level),
alpha=rep(0, Level),
info=FALSE, seedW=1:Level, standX=TRUE)
preds <- predict(KDSNpart, newx=as.matrix(trees[-treeTestInd[[i]], -3]))
errKDSN[i] <- sqrt(mean((trees[-treeTestInd[[i]], "Volume"]-preds)^2))
}
errKDSN
mean((errKDSN - err))
# Fine tuning
Level <- 3
for(i in 1:10) {
KDSNpart <- fitKDSN(y=trees[treeTestInd[[i]], "Volume"], X=as.matrix(trees[treeTestInd[[i]], -3]),
levels=Level, Dim=round(seq(dim(trees)[1], sqrt(dim(trees)[1]), length.out=Level)),
sigma=seq(quantEuklid[1], quantEuklid[2], length.out=Level), lambda=seq(10^-1, 10^-10, length.out=Level),
alpha=rep(0, Level),
info=FALSE, seedW=1:Level, standX=TRUE)
preds <- predict(KDSNpart, newx=as.matrix(trees[-treeTestInd[[i]], -3]))
errKDSN[i] <- sqrt(mean((trees[-treeTestInd[[i]], "Volume"]-preds)^2))
}
mean(errKDSN)
errKDSN1 <- vector("numeric", 10)
for(i in 1:10) {
KDSNpart <- fineTuneKDSN(KDSNpart,
y=matrix(trees[treeTestInd[[i]], "Volume"], ncol=1),
X=as.matrix(trees[treeTestInd[[i]], -3]),
fineTuneIt=100, info=FALSE, seedInit = 0)
preds <- predict(KDSNpart, newx=as.matrix(trees[-treeTestInd[[i]], -3]))
errKDSN1[i] <- sqrt(mean((trees[-treeTestInd[[i]], "Volume"]-preds)^2))
cat("iter = ", i, "\n")
}
errKDSN1
stopifnot(mean(errKDSN1) < mean(errKDSN))
|
79415a2bfc9e9899a94e82b87cdc5625da2b983f
|
091518481f7e2d022c98ea16a45eb3491953671e
|
/R/getDetailedWeatherData.R
|
b17078b270bfbfed0c7602ba74afb0ecf19532d4
|
[
"MIT"
] |
permissive
|
stevenjoelbrey/SmokeInTheCity
|
87625532edf1721a7deb5ab183d159b44d964e36
|
481a4d2b70fe380e14fe75e8e4fdecbf6b41001f
|
refs/heads/master
| 2021-01-10T03:28:58.049008
| 2017-01-24T18:48:40
| 2017-01-24T18:48:40
| 50,605,479
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,673
|
r
|
getDetailedWeatherData.R
|
source("R/getWeatherData.R")
################################################################################
# function takes daily weather conditions and determines the ozone
# relevent weather type for a given monitors time series
################################################################################
dailySkyType <- function(ASOS="KGMJ"){
# Load the ASOS station's detailed weather data
detailedData <- get(load(paste0("wundergroundDetailed/", ASOS,".RData")))
# First subset the temp data by photochemistry relevent hours
# NOTE: times are supplied as local
time <- as.POSIXlt(detailedData[,1], tz="UTC")
photoChemistryMask <- time$hour >=12 & time$hour <= 17
# Subset by relevant hours
detailedDataSubset <- detailedData[photoChemistryMask,]
time <- time[photoChemistryMask]
# What are the unique days in this series?
timeString <- as.character(time)
dateString <- str_sub(timeString, start = 1L, end = 10)
uniqueDateString <- unique(dateString)
dates <- as.POSIXlt(uniqueDateString, tz="UTC")
# you now know how many rows will be in the summary dataframe, save this
# information and create the dataframe
summary_df <- data.frame(date=dates,
percentClear=rep(NA,length(dates)),
percentScattered=rep(NA,length(dates))
)
# Loop through these days and assess the sky condition
for (i in 1:length(dates)){
# Create the days mask
day <- dates[i]
jDay <- day$yday
year <- day$year
dayMask <- jDay == time$yday & year == time$year
# What are the sky conditions?
conditions <- detailedDataSubset[dayMask,]$Conditions
# [1] "Clear" "Scattered Clouds" "Rain"
# [4] "Light Rain" "Light Drizzle" "Drizzle"
# [7] "Mostly Cloudy" "Unknown" "Overcast"
# [10] "Light Thunderstorms and Rain" "Heavy Thunderstorms and Rain" "Thunderstorms and Rain"
# [13] "Mist" "Fog" "Haze"
# [16] "Thunderstorm" "Heavy Rain" "Heavy Drizzle"
# What % of the observations are "clear" | or Scattered Clouds? | haze?
percentClear <- sum(conditions == "Clear") / length(conditions) * 100
percentScattered <- sum(conditions == "Scattered Clouds") / length(conditions) * 100
#percentScatted <- sum(conditions == "Scattered Clouds") / length(conditions) * 100
# relevant daily value
summary_df[i,2] <- percentClear
summary_df[i,3] <- percentScattered
} # end of day loop
skySummary <- summary_df
save(skySummary, file=paste0("wundergroundSkySummary/",ASOS,".RData") )
}
# Run dailySkyType for every ASOS station
makeDailySkyTypes <- function(){
# Which ones of these do we have detailed data for?
files <- list.files("wundergroundDetailed/")
downloaded <- str_replace(files, ".RData", "")
for(ASOS in downloaded){
try(dailySkyType(ASOS=ASOS), silent=TRUE)
}
}
assignDetailedWeatherData <- function(dataSource="smokeMask_HMS+PM_Gravimentric.RData",
maximumDistance=45){
# Load the chosen data packet, these are the monitors we want to assign detailed weather
# data to
load(paste0("analysisData/",dataSource))
# create mask
ozone_df <- workSpaceData[["Ozone_df"]]
workSpaceDates <- as.POSIXct(rownames(ozone_df), tz="UTC")
# Copy this dataframe as a skycondition masking product
skyCondition_df <- ozone_df
monitorLon <- workSpaceData[["lon"]]
monitorLat <- workSpaceData[["lat"]]
# Pull out the ozone_df to copy for size of sky data
sky_df <- workSpaceData[["Ozone_df"]]
nMonitor <- dim(sky_df)[2]
# Define the dats for these data
monitorTime <- as.POSIXct(rownames(sky_df), tz="UTC")
# get metadata for ASOS monitors
mdf <- getWundergroundDataMeta()
ASOSNames <- mdf$airportCode
# Which ones of these do we have detailed data for?
files <- list.files("wundergroundSkySummary/")
downloaded <- str_replace(files, ".RData", "")
# where overlap is NA, these are monitors we dont have yet
overlap <- match(downloaded, ASOSNames)
# Subset by the ASOS monitors where we have downloaded data
mdf_subset <- mdf[overlap,]
ASOSLon <- mdf_subset$Lon
ASOSLat <- mdf_subset$Lat
ASOSNames <- mdf_subset$airportCode
# Make nice points
monitorLocation <- cbind(monitorLon, monitorLat)
ASOSLocation <- cbind(ASOSLon, ASOSLat)
# Start looping through the monitors looking for sky data, and making sky
# assements!
noSky <- 0
for (i in 1:nMonitor){
# Calculate distance to all ASOS
distance <- distHaversine( monitorLocation[i,], ASOSLocation) / 1000 # km great circle distance
nearestOrder <- order(distance)
# Arrange relavent variables by distance to the monitor
sortedDistance <- distance[nearestOrder] # for sanity,
distanceMask <- sortedDistance <= maximumDistance
sortedASOSNames <- ASOSNames[nearestOrder][distanceMask]
sortedASOSLocation <- ASOSLocation[nearestOrder,][distanceMask]
# TODO: implement while() functionality to ensure the ASOS station with the
# TODO: best data is used
# identify the closest ASOS station
closestASOS <- sortedASOSNames[1]
# Open the detailed datafile of the nearby ASOS station
if(is.na(closestASOS)){
noSky <- noSky + 1
skyCondition_df[,i] <- NA
} else{
# These is an ASOS station close enough
skySummary <- get(load(paste0("wundergroundSkySummary/",closestASOS,".RData")))
skyDates <- skySummary$date
# Subset by the workspace data dates
subsetDates <- match(workSpaceDates, skyDates)
skySummary_subset <- skySummary[subsetDates, ]
skyCondition_df[,i] <- skySummary_subset$percentClear + skySummary_subset$percentScattered
}
} # End ozone monitor loop
print(noSky)
# Once we have found and assigned sky data fort all ozone monitors save this
# to the working packet
workSpaceData[["skyCondition_df"]] <- skyCondition_df
saveDir <- "analysisData/"
dataPacket <- paste0("skyMask_",dataSource)
saveFile <- paste0(saveDir, dataPacket)
save(workSpaceData,file=saveFile)
}
assignDetailedWeatherData(dataSource="smokeMask_HMS+PM_Gravimentric.RData",
maximumDistance=45)
|
b1df2607e85e6fd76876ae10d98bcdad4329bf8a
|
6c476d63f50b62a226667e6f7128f4ecab2cc657
|
/R/RobustRegression.R
|
2465b4358750f7dccbbaa4e26b1389dd504ee04d
|
[] |
no_license
|
guhjy/RobustRegression
|
5f9e317b18413543b0a91fa45e90828ce818fc38
|
069d1e54c46303cce0320d16cc709ba4c017ed3c
|
refs/heads/master
| 2021-06-10T11:49:53.556288
| 2017-01-25T22:03:51
| 2017-01-25T22:03:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,084
|
r
|
RobustRegression.R
|
#' Runs robust regression
#'
#' \code{RobustRegression} runs a robust regression with
#' heteroskedastic robust standard errors
#' @param model a linear regression object of the form lm(a ~ b)
#' @param dat the data frame object used in the linear regression model
#' @param cluster_var a character indicating variables to cluster errors by
#' @return A list object with components: model - the regression object, TidyModel,
#' a cleaned up version of the model, GlanceModel - tidy summary of the model,
#' AugModel - the original data with some augmented things
#' @examples
#' data('iris')
#' regression <- RobustRegression(model = lm(Sepal.Length ~ Sepal.Width + Petal.Length + Species, data = iris),
#' dat = iris, cluster_var = 'Species')
#' @export
RobustRegression<- function(model,dat,cluster_var = 'None', make_plot = F)
{
model$VCOV<- vcovHC(model,type='HC1')
if (cluster_var !='None')
{
model$VCOV<- ClusteredVCOV(model,dat = dat, cluster = cluster_var)
}
SEs<- data.frame(t(sqrt(diag(model$VCOV))),stringsAsFactors=F) %>% gather('variable','RobustSE')
SEs$variable<- as.character(SEs$variable)
SEs$variable[SEs$variable=='X.Intercept.']<- '(Intercept)'
model$RobustSEs<- SEs
RobustTest<- (coeftest(model,vcov.=model$VCOV))
StatNames<- colnames(RobustTest)
VarNames<- rownames(RobustTest)
RobustMat<- as.data.frame(matrix(NA,nrow=length(VarNames),ncol=2))
colnames(RobustMat)<- c('variable','RobustPvalue')
for (i in 1:length(VarNames))
{
RobustMat[i,]<- data.frame(as.character(VarNames[i]),RobustTest[i,'Pr(>|t|)'],stringsAsFactors=F)
}
TidyModel<- tidy(model) %>%
dplyr::rename(variable=term) %>%
left_join(SEs,by='variable') %>%
left_join(RobustMat,by='variable')
AugModel<- augment(model)
GlanceModel<- glance(model)
TidyModel$variable<- as.factor(TidyModel$variable)
TidyModel$variable <- reorder(TidyModel$variable, TidyModel$RobustPval)
TidyModel$ShortPval<- pmin(TidyModel$RobustPval,0.2)
if (make_plot == T){
RegPlot<- (ggplot(data=TidyModel,aes(x=variable,y=estimate,fill=ShortPval))+
geom_bar(position='dodge',stat='identity',color='black')+
scale_fill_gradient2(high='black',mid='gray99',low='red',midpoint=0.1,
breaks=c(0.05,0.1,0.15,0.2),labels=c('0.05','0.10','0.15','>0.20')
,name='P-Value',guide=guide_colorbar(reverse=T))
+theme(axis.text.x=element_text(angle=45,hjust=0.9,vjust=0.9))+
geom_errorbar(mapping=aes(ymin=estimate-1.96*RobustSE,ymax=estimate+1.96*RobustSE))+
xlab('Variable')+
ylab(paste('Marginal Effect on ',names(model$model)[1],sep='')))
} else{
RegPlot <- NA
}
TidyModel$ShortPval<- NULL
TCrit<-(qt(c(0.025,0.975),df=model$df.residual)[2])
TidyModel$LCI95<- TidyModel$estimate-TCrit*TidyModel$RobustSE
TidyModel$UCI95<- TidyModel$estimate+TCrit*TidyModel$RobustSE
return(list(model=model,TidyModel=TidyModel,AugModel=AugModel,GlanceModel=GlanceModel,RegPlot=RegPlot))
}
|
710ae1c01e57c18b5d8d029c862a0c86bb34d035
|
9096176d4a3a6305e08250fa1a7c9b5c2930f20f
|
/scripts/burkina.R
|
fdc95cead92a40d33fe937b0c2f8a239a2ae6618
|
[] |
no_license
|
cg0lden/subnational_distributions_BFA
|
6c9ad7ff1be1ce39a34a6e60724ae3a659c54364
|
4a8e3fb69691ee9b85be3ab9a064df5dc998bd30
|
refs/heads/master
| 2023-05-05T11:33:42.087553
| 2021-05-25T19:39:35
| 2021-05-25T19:39:35
| 300,038,711
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,045
|
r
|
burkina.R
|
# Clean burkina data for spade
library(tidyverse)
library(haven)
library(here)
library(janitor)
# Load the burkina data from the Stata file (coded with the food groups)
burkina <- read_dta(here( "data", "raw", "Burkina", "Burkina_omega.dta")) %>%
clean_names() %>%
select(id_subj, sample_weight, age_mother, age_child_4c, nb_r24h, sex,
wgt_food, calc, iron, zinc, vita, vitb12, omega_3, code_grp, id_mother, id_child)
summary(burkina)
table(burkina$nb_r24h)
# Make an indicator variable for mother vs child
# Need to add snail nutrients
# Red meat=9, processed meat=10
burkina_nut <- burkina %>%
mutate(mother = case_when(id_mother != "" ~ 1,
TRUE ~ 0)) %>%
mutate(id = case_when(mother == 1 ~ id_mother,
mother == 0 ~ id_child,
TRUE ~ NA_character_)) %>%
mutate(id = as.integer(id)) %>%
rename(mday = nb_r24h, b12=vitb12) %>%
group_by(id, mday) %>%
mutate(red_meat = case_when(
code_grp==13 ~ wgt_food, TRUE ~ 0)) %>%
summarize(b12 = sum(b12),
iron = sum(iron),
zinc = sum(zinc),
vita = sum(vita),
calc = sum(calc),
red_meat = sum(red_meat),
omega_3 = sum(omega_3)) %>% distinct()
# Identifying info
burkina_merge <- burkina %>%
mutate(mother = case_when(id_mother != "" ~ 1,
TRUE ~ 0)) %>%
mutate(id = case_when(mother == 1 ~ id_mother,
mother == 0 ~ id_child,
TRUE ~ NA_character_)) %>%
mutate(age = case_when(mother == 1 ~ age_mother,
mother == 0 ~ age_child_4c,
TRUE ~ NA_real_)) %>%
mutate(age = as.integer(age)) %>%
mutate(id = as.integer(id)) %>%
select(id, age, sex, sample_weight) %>% distinct(id, .keep_all=TRUE)
# Rename and format variables for spade
burkina_spade <- burkina_nut %>%
left_join(burkina_merge, by=c("id")) %>%
group_by(id, mday) %>%
dplyr::select(id, age, sex, mday, b12, iron, zinc, vita, calc, red_meat, omega_3, sample_weight) %>%
distinct()
# Check for missing or different ages
burkina_missings <- burkina_spade[is.na(burkina_spade$age), ] # shows you the missings
burkina_missings
#No missing ages
#Replace any cases where the ages are different for the same individual
ids_data <- unique(burkina_spade$id)
for (idid in ids_data){
data.id <- burkina_spade[burkina_spade$id == idid, ]
if(nrow(data.id) > 1){
burkina_spade[burkina_spade$id == idid,"age"] <-
min(burkina_spade[burkina_spade$id == idid,"age"])
}
}
# Replace any cases where the sex is different for the same individual
ids_data <- unique(burkina_spade$id)
for (idid in ids_data){
data.id <- burkina_spade[burkina_spade$id == idid, ]
if(nrow(data.id) > 1){
burkina_spade[burkina_spade$id == idid,"sex"] <-
min(burkina_spade[burkina_spade$id == idid,"sex"])
}
}
save(burkina_spade, file=here("data", "processed", "burkina"), replace)
|
332f099221369fa35a5306f1c052aa03aaa39bdb
|
f70307f5abfd6d2cb7075d61e41bfdb242fde4eb
|
/run_analysis.R
|
802084f2ea02cb81b8b4e6ae3e218900b66ae6ab
|
[] |
no_license
|
yyzzll/getdata_project
|
ba3e06dc68bf320ed648186f7ee008f34e49483a
|
c355543c2af0f56484a81915643e735cdd2c31e3
|
refs/heads/master
| 2021-01-18T16:36:49.752947
| 2014-12-18T18:27:15
| 2014-12-18T18:27:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,488
|
r
|
run_analysis.R
|
##############################
#STEP 0 Preparation
############################
##set working directory
setwd("E:/temporary/coursera_videos/John Hopkins Data Science specialization/003 getting and cleaning data-v-ongoing/course project")
##verify working directory
getwd()
###download and extract zip files from the course website
library(utils)
fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
locFile<-"./dataset.zip"
download.file(fileUrl,locFile,method="curl")
unzip(locFile)
###load data into R
#library(dplyr)
#library(data.table)
activity_labels<-read.table("./UCI HAR Dataset/activity_labels.txt",stringsAsFactors=FALSE)
features<-read.table("./UCI HAR Dataset/features.txt",stringsAsFactors=FALSE)
X_train<-read.table("./UCI HAR Dataset/train/X_train.txt")
y_train<-read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train<-read.table("./UCI HAR Dataset/train/subject_train.txt")
X_test<-read.table("./UCI HAR Dataset/test/X_test.txt")
y_test<-read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test<-read.table("./UCI HAR Dataset/test/subject_test.txt")
###############################################################
#Step 1. Merges the training and the test sets to create one data set.
##############################################################
train<-cbind(subject_train,y_train, X_train)
test<-cbind(subject_test,y_test, X_test)
all_data<-rbind(train,test)
###########################################################
#Step 2. Extracts only the measurements on the mean and standard deviation for each measurement.
###########################################################
colnames(all_data)<-c("id","activity",make.names(features[,2])) #add column names to the dataset
extracted_data<-all_data[,c(1,2, grep("mean|std", colnames(all_data),ignore.case=TRUE))] #select columns whose names contains "mean" or "std" ignoring case, and the y column
############################################################
#Step 3. Uses descriptive activity names to name the activities in the data set
############################################################
extracted_data$activity <- factor(extracted_data$activity,
levels = activity_labels[,1],
labels = activity_labels[,2])
###########################################################
#Step 4. Appropriately labels the data set with descriptive variable names.
##########################################################
#The variable names were generated from "features", which is already descriptive
#e.g., in "tBodyAcc.mean...X", "t" refers to "time", "Body" refers to "Body movement"
# "ACC" refers to the equipment for measurement, which is accelorater
# "X" means the measurement is in X-axis
# the extra "." in the variable names will be removed next to make it easier to read
colnames(extracted_data)<-gsub("\\.\\.", "", colnames(extracted_data))
##########################################################
#Step 5. From the data set in step 4, creates a second, independent tidy data set
#with the average of each variable for each activity and each subject.
##########################################################
library(dplyr)
tidy_data<-
extracted_data %>%
tbl_df %>%
group_by(id,activity) %>%
summarise_each(funs(mean))
#output tidy data to tidy_data.txt with row.name=FALSE
write.table(tidy_data, file = "tidy_data.txt", row.name=FALSE, append = FALSE, sep = "\t")
|
2e024c6d2795e6d9d8bdbcce3e63c0fdebd0395a
|
c81ff5da3b2645e7d8a19671703042c7b4c45455
|
/tests/a.R
|
eb0ea6d90b7ba7fd3e4d4f482939bf8cae526a7b
|
[] |
no_license
|
apoliakov/SciDBR
|
f4a193dc661bc5bf98e0dd3419091801cc5ecae2
|
2fb508ee32bb61aee24883f52143ac4fb8864d7c
|
refs/heads/master
| 2021-01-13T06:38:36.283935
| 2017-03-30T22:24:54
| 2017-03-30T22:24:54
| 78,246,920
| 0
| 0
| null | 2017-03-30T22:24:54
| 2017-01-06T23:37:08
|
R
|
UTF-8
|
R
| false
| false
| 2,421
|
r
|
a.R
|
check = function(a, b)
{
print(match.call())
stopifnot(all.equal(a, b, check.attributes=FALSE, check.names=FALSE))
}
library("scidb")
host = Sys.getenv("SCIDB_TEST_HOST")
if (nchar(host) > 0)
{
db = scidbconnect(host)
# 1 Data movement tests
# upload data frame
x = as.scidb(db, iris)
a = schema(x, "attributes")$name
# binary download
check(iris[, 1:4], as.R(x)[, a][, 1:4])
# iquery binary download
check(iris[, 1:4], iquery(db, x, return=TRUE)[, a][, 1:4])
# iquery CSV download
check(iris[, 1:4], iquery(db, x, return=TRUE, binary=FALSE)[, a][, 1:4])
# as.R only attributes
check(iris[, 1], as.R(x, only_attributes=TRUE)[, 1])
# only attributes and optional skipping of metadata query by supplying schema in full and abbreviated forms
check(nrow(x), nrow(as.R(x)))
check(nrow(x), nrow(as.R(x, only_attributes=TRUE)))
a = scidb(db, x@name, schema=gsub("\\[.*", "", schema(x)))
check(nrow(x), nrow(as.R(a)))
# upload vector
check(1:5, as.R(as.scidb(db, 1:5))[, 2])
# upload matrix
x = matrix(rnorm(100), 10)
check(x, matrix(as.R(as.scidb(db, x))[, 3], 10, byrow=TRUE))
# upload csparse matrix
# also check shorthand projection syntax
x = Matrix::sparseMatrix(i=sample(10, 10), j=sample(10, 10), x=runif(10))
y = as.R(as.scidb(db, x))
check(x, Matrix::sparseMatrix(i=y$i + 1, j=y$j + 1, x=y$val))
# issue #126
df = as.data.frame(matrix(runif(10*100), 10, 100))
sdf = as.scidb(db, df)
check(df, as.R(sdf, only_attributes=TRUE))
# issue #130
df = data.frame(x1 = c("NA", NA), x2 = c(0.13, NA), x3 = c(TRUE, NA), stringsAsFactors=FALSE)
x = as.scidb(db, df)
check(df, as.R(x, only_attributes=TRUE))
# upload n-d array
# XXX WRITE ME, not implemented yet
# garbage collection
gc()
# 2 AFL tests
# Issue #128
i = 4
j = 6
x = db$build("<v:double>[i=1:2,2,0, j=1:3,1,0]", i * j)
check(as.R(x)$v, c(1, 2, 2, 4, 3, 6))
x = db$apply(x, w, R(i) * R(j))
# Need as.integer() for integer64 coversion below
check(as.integer(as.R(x)$w), rep(24, 6))
# 3 Miscellaneous tests
# issue #156 type checks
# int64 option
db = scidbconnect(host, int64=TRUE)
x = db$build("<v:int64>[i=1:2,2,0]", i)
check(as.R(x), as.R(as.scidb(db, as.R(x, TRUE))))
db = scidbconnect(host, int64=FALSE)
x = db$build("<v:int64>[i=1:2,2,0]", i)
check(as.R(x), as.R(as.scidb(db, as.R(x, TRUE))))
# Issue #157
x = as.R(scidb(db, "build(<v:float>[i=1:5], sin(i))"), binary = FALSE)
}
|
a07c2a2b5755ea570b87ddca51b4c913809ffa0b
|
e0e807bc9a20bc37cca0a542d90b0cd7b30044e5
|
/Club de lectura 2021/Clubdelectura_sesion 3.R
|
cee52ed721984dbdd82540864812d7c15384a0cf
|
[] |
no_license
|
alexisrangelcalvo/meetup-presentations_barranquilla
|
de00741109b54c541a73b68196251d30b2332b38
|
2de764857c6940fe4cc5df70d1950ea60b65e8e5
|
refs/heads/master
| 2023-08-16T21:49:09.005970
| 2021-09-30T17:17:13
| 2021-09-30T17:17:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,094
|
r
|
Clubdelectura_sesion 3.R
|
# Club de lectura
# Sesion 1 - Capitulo 19
# "R for Data Science" de Hadley Wickham y Garrett Grolemund
#Organiza: R-Ladies Galapagos, R-Ladies Barranquilla, R-Ladies Milagro, R-Ladies Guayaquil
# Expositora: Isabel Vasquez Alvarez (R-Ladies Barranquilla)
#----Ejemplo función----
#Construya una función llamada salario que le ingrese el salario
#por hora y el número de horas trabajadas durante una semana por
#un trabajador. La función debe calcular el salario neto semanal.
salario<-function(sporhoras,horas){
sal<- sporhoras*horas
return(paste0("El salario neto es: $",sal))
}
tabla_salarios<- data.frame(Id=1:10,horas= trunc(runif(10,10,48)))
salario2<-function(datos){
datos[,NCOL(datos)+1]<- 120*datos$horas
return(datos)
}
salario2(tabla_salarios)
#----Ejemplo condicional----
#Construya una función llamada salario que le ingrese el salario
#por hora y el número de horas trabajadas durante una semana por
#un trabajador. La función debe calcular el salario neto semanal,
#teniendo en cuenta que si el número de horas trabajadas durante
#la semana es mayor de 48, esas horas de demás se consideran horas
#extras y tienen un 35% de recargo. Imprima el salario neto.
tabla_salarios<- data.frame(Id=1:10,horas= trunc(runif(10,10,55)))
salario2<-function(datos){
for (i in 1:NROW(datos)) {
if (datos[i,2]<48){datos[i,3]<- 120*datos[i,2]
}
else{datos[i,3]<- 120*48+ 120*1.35*(datos[i,2]-48)
}
}
return(datos)
}
salario2(tabla_salarios)
###Condicional consola
encuesta <- function() {
r <- readline("¿Te gusta R? (s/n) : ")
if ( r == "s" || r == "S") {
cat("¡Estaba seguro de eso!\n")
return(invisible(0))
} else {
cat("¿Estás seguro/a? Creo que te has equivocado al responder.\nVuelve a intentarlo.\n\n")
encuesta()
}
}
#Construya una función llamada nota que calcule la nota obtenida
#por un alumno en una evaluación de tres puntos cuya ponderación o
#importancia son 20%, 30% y 50% para los puntos I, II y III
#respectivamente. Adicionalmente la función debe generar un mensaje
#sobre si el estudiante aprobó la evaluación o no. El usuario debe
#ingresar las notas individuales de los tres puntos y la función debe
#entregar la nota final de la evaluación.
nota<- function(p1,p2,p3){
nota<- p1*0.2+p2*0.3+p3*0.5
msj<-readline("El estudiante ha obtenidos bonos para la calificación (s/n): ")
if(msj == "s" || msj== "S"){
msj2<- readline("digite el valor del bono: ")
bono <- as.numeric(msj2)
nota<- (p1*0.2+p2*0.3+p3*0.5)+bono
if(nota< 3){
return(paste0("El estudiante reprobó con ",nota))
}
else{
return(paste0("El estudiante aprobó con ",nota))
}
}
else{
if(nota< 3){
return(paste0("El estudiante reprobó con ",nota))
}
else{
return(paste0("El estudiante aprobó con ",nota))
}
}
}
nota(4.3,2,2)
#### argumentos adicionales ...
coseno <- function(w, ...) {
x <- seq(-2 * pi, 2 * pi, length = 200)
plot(x, cos(w * x), ...)
}
coseno(1)
coseno(w = 2, col = "red", type = "l", lwd = 2)
coseno(w = 2, ylab = "", xlab="")
datos<- read.csv("encuestas.csv", sep = ";")
graf_frecuencias<- function(dataset,columna,...){
library(ggplot2)
variable<- dataset[,columna]
dat<-data.frame(table(variable))
ggplot(dat,aes( x = variable, y= Freq, fill= variable))+
geom_bar(position="dodge", stat="identity")+
theme_classic() +
labs(x = "Valoración",
y = "Frecuencia")+
geom_text(aes(label = paste0(dat[,2]) , y = dat[,2]),
vjust = 1.2, size = 5, color = "white" )+
theme(...)
}
graf_frecuencias(datos,1,legend.position="none")
graf1<-graf_frecuencias(datos,1,legend.position="none")
#### Valores de entorno
sporhora<- 120
salario2<-function(datos){
datos[,NCOL(datos)+1]<- sporhora*datos$horas
return(datos)
}
salario2(tabla_salarios)
|
fc4c1789ec1fce54cbf39bbcc5476188c8951b6f
|
3269b421d5d9f87d42cadbf515bdab47f3307890
|
/man/trainSupv.Rd
|
f769e929d576d27c50dfd9bcdaae0d88b09dd199
|
[] |
no_license
|
cran/RecordLinkage
|
b544f1ddfe31a62d1900c8e81643ac266d52893a
|
b32452149857b15849e9c82fb81e812df6e921fc
|
refs/heads/master
| 2023-04-09T12:36:45.116990
| 2022-11-08T13:10:15
| 2022-11-08T13:10:15
| 17,693,280
| 6
| 16
| null | 2020-04-21T13:09:49
| 2014-03-13T03:28:30
|
C
|
UTF-8
|
R
| false
| false
| 4,414
|
rd
|
trainSupv.Rd
|
\name{trainSupv}
\alias{trainSupv}
\title{Train a Classifier}
\description{
Trains a classifier for supervised classification of record pairs.
}
\usage{
trainSupv(rpairs, method, use.pred = FALSE, omit.possible = TRUE,
convert.na = TRUE, include.data = FALSE, ...)
}
\arguments{
\item{rpairs}{Object of class \code{\link{RecLinkData}}. Training data.}
\item{method}{A character vector. The classification method to use.}
\item{use.pred}{Logical. Whether to use results of an unsupervised classification
instead of true matching status.}
\item{omit.possible}{Logical. Whether to remove pairs labeled as possible
links or with unknown status.}
\item{convert.na}{Logical. Whether to convert \code{NA}s to 0 in the
comparison patterns.}
\item{include.data}{Logical. Whether to include training data in the result object.}
\item{\dots}{Further arguments to the training method.}
}
\details{
The given dataset is used as training data for a supervised classification.
Either the true matching status has to be known for a sufficient number of
data pairs or the data must have been classified previously, e.g. by using
\code{\link{emClassify}} or \code{\link{classifyUnsup}}. In the latter case,
argument \code{use.pred} has to be set to \code{TRUE}.
A classifying method has to be provided as a character string (factors are
converted to character) through argument \code{method}.
The supported classifiers are:
\describe{
\item{\code{"svm"}}{Support vector machine, see \code{\link[e1071]{svm}}.}
\item{\code{"rpart"}}{Recursive partitioning tree, see \code{\link{rpart}}.}
\item{\code{"ada"}}{Stochastic boosting model, see \code{\link[ada]{ada}}.}
\item{\code{"bagging"}}{Bagging with classification trees, see \code{\link[ipred]{bagging}}.}
\item{\code{"nnet"}}{Single-hidden-layer neural network, see \code{\link[nnet]{nnet}}.}
\item{\code{"bumping"}}{A bootstrap based method using classification trees, see details.}
}
Arguments in \code{...} are passed to the corresponding function.
Most classifiers cannot handle \code{NA}s in the data, so by default these
are converted to 0 before training.
By \code{omit.possible = TRUE}, possible links or pairs with unknown status
are excluded from the training set. Setting this argument to \code{FALSE}
allows three-class-classification (links, non-links and possible links), but
the results tend to be poor.
Leaving \code{include.data=FALSE} saves memory, setting it to \code{TRUE} can be useful for saving the classificator while keeping track of the underlying training data.
\acronym{Bumping}, (acronym for \dQuote{Bootstrap umbrella of model
parameters}), is an ensemble method described by \cite{Tibshirani and Knight,
1999}. Such as in bagging, multiple classifiers are trained on bootstrap
samples of the training set. The key difference is that not the aggregated
decision of all classifiers (e.g. by majority vote) is used to classify new
data, but only the single model that performs best on the whole training set.
In combination with classification trees as underlying classifiers this
approach allows good interpretability of the trained model while being more
stable against outliers than traditionally induced decision trees. The number
of bootstrap samples to use can be controlled by supplying the argument
\code{n.bootstrap}, which defaults to 25.
}
\value{
An object of class \code{RecLinkClassif} with the following components:
\item{train}{If \code{include.data} is \code{TRUE}, a copy of \code{rpairs},
otherwise an empty data frame with the same column names.}
\item{model}{The model returned by the underlying training function.}
\item{method}{A copy of the argument \code{method}.}
}
\author{Andreas Borg, Murat Sariyar}
\seealso{\code{\link{classifySupv}} for classifying with the trained model,
\code{\link{classifyUnsup}} for unsupervised classification}
\references{
Tibshirani R, Knight K: Model search by bootstrap \dQuote{bumping}.
Journal of Computational and Graphical Statistics 8(1999):671--686.
}
\examples{
# Train a rpart decision tree with additional parameter minsplit
data(RLdata500)
pairs=compare.dedup(RLdata500, identity=identity.RLdata500,
blockfld=list(1,3,5,6,7))
model=trainSupv(pairs, method="rpart", minsplit=5)
summary(model)
}
\keyword{classif}
|
1cb2c682e295101f9ec8a95719ab248b65d13362
|
03f2e7c67dfde0131c241e71f847e25f10dba1c6
|
/man/hmHClust.Rd
|
23b46a4fd4552cb08d7f9d0ee920fc526354c8a7
|
[
"MIT"
] |
permissive
|
jacobkimmel/hmR
|
6e6384b3e8795b326b2cf287c1e189f409bfdb83
|
71bc2ceb3c980c014dfa82f1a024855efb6ea702
|
refs/heads/master
| 2020-03-11T06:09:58.676935
| 2020-01-05T00:24:28
| 2020-01-05T00:24:28
| 129,823,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 866
|
rd
|
hmHClust.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hm_data.R
\name{hmHClust}
\alias{hmHClust}
\title{Performs hierarchical clustering and sets `meta.data$clust` in `hm`}
\usage{
hmHClust(hm, k = 2, linkage = "ward.D2", dist_method = "euclidean",
scalar = "data")
}
\arguments{
\item{hm}{: `heteromtility` data object.}
\item{k}{: integer. number of clusters to set with `cutree()`.}
\item{linkage}{: character. method for hierarchical clustering, compatible with `hclust()`.}
\item{dist_method}{: character. method for distance matrix calculation, compatible with `dist()`.}
\item{scalar}{: character. scalar data space to use. ['data', 'unscaled.data', 'pcs'].}
}
\value{
hm : `heteromtility` data object.
adds `clust` variable to `@meta.data`
}
\description{
Performs hierarchical clustering and sets `meta.data$clust` in `hm`
}
|
808e69b62f0b8a0308208c38927424eb4a2cb8b1
|
bc7c4ea7e7c8c70e3bbac2e4fd8fbcb8863fe055
|
/plot4.R
|
068f3ce0e5876774aa14eb9a47144af2528ea57b
|
[] |
no_license
|
chrisbob12/ExData_Plotting1
|
fed2396a7fba818b171a928a650a215473f24eea
|
86c8712c96d85c642dafb6b0775f6a53f97e024b
|
refs/heads/master
| 2021-01-18T05:47:10.510007
| 2015-01-11T00:55:22
| 2015-01-11T00:55:22
| 24,867,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,097
|
r
|
plot4.R
|
## this section downloads the data files from the web source
## unzips the data file and puts it in a subfolder called "NEI data"
## check to see if data folder has been created
if(!file.exists("NEI data")){
dir.create("NEI data")
}
URL <- "http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(URL, ".\\NEI data\\eleccons.zip")
unzip(".\\NEI data\\eleccons.zip",exdir=".\\NEI data")
## this line makes a data table from the donwload
electric1 <- read.table(".\\NEI data\\household_power_consumption.txt", sep=";", quote="\"",header=TRUE, stringsAsFactors=FALSE)
## dplyr() is used to do some of the data tidying
## if you have this package installed already, comment out the next line
install.packages("dplyr")
library(dplyr)
## reformat the dates
electric2 <- mutate(electric1,Date = as.Date(Date,"%d/%m/%Y"))
electric2 <- filter(electric2, Date == "2007-02-01" | Date == "2007-02-02")
## we can lose the full data set and speed up operations
rm(electric1)
electric3 <- mutate(electric2, Global_active_power = as.numeric(Global_active_power))
rm(electric2)
electric4 <- mutate(electric3, datetime=as.POSIXct(paste(Date,Time), format="%Y-%m-%d %H:%M:%S"))
electric4 <- mutate(electric4, Sub_metering_1=as.numeric(Sub_metering_1))
electric4 <- mutate(electric4, Sub_metering_2=as.numeric(Sub_metering_2))
electric4 <- mutate(electric4, Sub_metering_3=as.numeric(Sub_metering_3))
## Plot 4
png("plot4.png", width=480, height=480)
with(electric4, {
par(mfrow=c(2,2))
plot(datetime,Global_active_power,type = "l", xlab="",ylab = "Global Active Power")
plot(datetime,Voltage,type = "l",xlab="datetime")
plot(datetime, Sub_metering_1, type ="l", xlab = "", ylab = "Energy sub metering")
points(datetime,Sub_metering_2,type="l",col = "red")
points(datetime,Sub_metering_3,type="l",col = "blue")
##par(mar = 0.1,0.1,2.1,2.1)
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd=1, col = c("black","red","blue"),bty="n")
plot(datetime,Global_reactive_power,type ="l",xlab = "datetime")
})
dev.off()
|
81f5caba349de339badbfa4ebe6c522871eb69a0
|
86d8bea37018403148fe417644202dbac06a4c15
|
/man/get.matchedsets.Rd
|
b5586f1b53ee50aa31b0c7e3f1ebbb8d96ac6af1
|
[] |
no_license
|
insongkim/PanelMatch
|
a8872cf283a360d845cc1331bc33b6d655525a01
|
6ff6a3cf5af105100b951a835fd478a3f73f280e
|
refs/heads/master
| 2023-09-03T23:32:54.654820
| 2022-06-06T01:40:31
| 2022-06-06T01:40:31
| 124,099,147
| 96
| 34
| null | 2023-04-11T21:02:14
| 2018-03-06T15:33:56
|
R
|
UTF-8
|
R
| false
| true
| 2,704
|
rd
|
get.matchedsets.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matched_set_R.r
\name{get.matchedsets}
\alias{get.matchedsets}
\title{get.matchedsets}
\usage{
get.matchedsets(
t,
id,
data,
L,
t.column,
id.column,
treatedvar,
hasbeensorted = FALSE,
match.on.missingness = TRUE,
matching = TRUE
)
}
\arguments{
\item{t}{integer vector specifying the times of treated units for which matched sets should be found. This vector should be the same length as the following \code{id} parameter -- the entries at corresponding indices in each vector should form the t,id pair of a specified treatment unit.}
\item{id}{integer vector specifying the unit ids of treated units for which matched sets should be found. note that both \code{t} and \code{id} can be of length 1}
\item{data}{data frame containing the data to be used for finding matched sets.}
\item{L}{An integer value indicating the length of treatment history to be matched}
\item{t.column}{Character string that identifies the name of the column in \code{data} that contains data about the time variable. Each specified entry in \code{t} should be somewhere in this column in the data. This data must be integer that increases by one.}
\item{id.column}{Character string that identifies the name of the column in \code{data} that contains data about the unit id variable. Each specified entry in \code{id} should be somewhere in this column in the data. This data must be integer.}
\item{treatedvar}{Character string that identifies the name of the column in \code{data} that contains data about the binary treatment variable.}
\item{hasbeensorted}{variable that only has internal usage for optimization purposes. There should be no need for a user to toggle this}
\item{match.on.missingness}{TRUE/FALSE indicating whether or not the user wants to "match on missingness." That is, should units with NAs in their treatment history windows be matched with control units that have NA's in corresponding places?}
\item{matching}{logical indicating whether or not the treatment history should be used for matching. This should almost always be set to TRUE, except for specific situations where the user is interested in particular diagnostic questions.}
}
\value{
\code{get.matchedsets} returns a "matched set" object, which primarily contains a named list of vectors. Each vector is a "matched set" containing the unit ids included in a matched set. The list names will indicate an i,t pair (formatted as "<i variable>.<t variable>") to which the vector/matched set corresponds.
}
\description{
\code{get.matchedsets} is used to identify matched sets for a given unit with a specified i, t.
}
\keyword{internal}
|
5e445140f983c5ce46ec0a32a9b8b58a17337ff5
|
2ba18c32e93ec1313f00776a6036ac197aa9d471
|
/Level1/library/PerformDatabaseOperations.R
|
e4a7457958b5f436d4bde2084d40b2f580554714
|
[] |
no_license
|
accomer/Data-Quality-Analysis
|
19ca44927718358ca857937d897deb0102cf0472
|
3b4dc46d60ca922aafcb52aaf521d5d1aef0dff5
|
refs/heads/master
| 2021-06-28T02:29:27.232166
| 2017-09-11T16:54:23
| 2017-09-11T16:54:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,229
|
r
|
PerformDatabaseOperations.R
|
establish_database_connection<-function(config)
{
#initialize database connection parameters
driver <- config$db$driver;
# load the appropriate DB library
switch(driver,
PostgreSQL = library(RPostgreSQL),
Oracle = library(ROracle),
MySQL = library(RMySQL),
SQLite = library(RSQLite),
ODBC = library(RODBC)
)
dbname <- config$db$dbname;
dbuser <- config$db$dbuser;
dbpass <- config$db$dbpass;
dbhost <- config$db$dbhost;
dbport <- config$db$dbport;
#special handling for ODBC drivers
if (grepl(driver,"ODBC",ignore.case=TRUE))
{
con <- odbcConnect(dbname, uid=dbuser, pwd=dbpass)
}
else
{
if (grepl(driver,"Oracle",ignore.case=TRUE)) # special handling for Oracle drivers
con <- dbConnect(dbDriver(driver), host=dbhost, port=dbport, dbname=dbname, user=dbuser, password=dbpass)
else
con <- dbConnect(driver, host=dbhost, port=dbport, dbname=dbname, user=dbuser, password=dbpass)
}
return(con)
}
establish_database_connection_OHDSI<-function(config)
{
library(DatabaseConnector);
library(RJDBC);
#jdbcDrivers<<-new.env();
#initialize database connection parameters
driver <- config$db$driver;
dbname <- config$db$dbname;
dbuser <- config$db$dbuser;
dbpass <- config$db$dbpass;
dbhost <- config$db$dbhost;
dbport <- config$db$dbport;
dbschema <- config$db$schema;
if (driver == "sql server") #special handling for sql server
{
connectionDetails <- createConnectionDetails(dbms=tolower(driver), server=dbhost,user=dbuser,password=dbpass,schema=dbname,port=dbport)
}
else
{
connectionDetails <- createConnectionDetails(dbms=tolower(driver), server=paste(dbhost,"/",dbname,sep=""),user=dbuser,password=dbpass,schema=dbschema,port=dbport)
}
# flog.info(connectionDetails)
con <- connect(connectionDetails)
# flog.info(con)
return(con)
}
close_database_connection <- function(con,config)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
dbDisconnect <- close
}
# close connection
dbDisconnect(con)
# the following statementfails
#dbUnloadDriver(drv)
}
close_database_connection_OHDSI <- function(con,config)
{
#special handling for ODBC drivers
dbDisconnect(con)
}
retrieve_dataframe<-function(con,config,table_name)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name<-toupper(table_name)
df<-sqlFetch(con, paste(config$db$schema, table_name, sep="."))
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name<-toupper(table_name)
df<-dbReadTable(con, table_name, schema = config$db$schema)
}
else
{
df<-dbReadTable(con, c(config$db$schema,table_name))
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
# retrieve counts
retrieve_dataframe_count<-function(con,config,table_name,column_list)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name<-toupper(table_name)
column_list<-toupper(column_list)
query<-paste("select count(",column_list,") from ",config$db$schema,".",table_name,sep="");
df<-sqlQuery(con, query)
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name<-toupper(table_name)
column_list<-toupper(column_list)
query<-paste("select count(",column_list,") from ",config$db$schema,".",table_name,sep="");
df<-dbGetQuery(con, query)
}
else
{
query<-paste("select count(",column_list,") from ",config$db$schema,".",table_name,sep="");
df<-dbGetQuery(con, query)
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
retrieve_dataframe_count_group<-function(con,config,table_name,column_list, field_name)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name<-toupper(table_name)
column_list<-toupper(column_list)
query<-paste("select ",field_name,", count(distinct ",column_list,") from ",config$db$schema,".",table_name," group by ",field_name,sep="");
df<-sqlQuery(con, query)
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name<-toupper(table_name)
column_list<-toupper(column_list)
query<-paste("select ",field_name,", count(distinct ",column_list,") from ",config$db$schema,".",table_name," group by ",field_name,sep="");
df<-dbGetQuery(con, query)
}
else
{
query<-paste("select ",field_name,", count(distinct ",column_list,") from ",config$db$schema,".",table_name," group by ",field_name,sep="");
df<-dbGetQuery(con, query)
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
# printing top 5 values
retrieve_dataframe_top_5<-function(con,config,table_name, field_name)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name<-toupper(table_name)
query<-paste("select * from (select ",field_name,", count(*) as count from ",
config$db$schema,".",table_name, " where ",
field_name," is not null group by ",
field_name ," order by 2 desc) where rownum<=5"
,sep="");
df<-sqlQuery(con, query)
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name<-toupper(table_name)
query<-paste("select * from (select ",field_name,", count(*) as count from ",
config$db$schema,".",table_name, " where ",
field_name," is not null group by ",
field_name ," order by 2 desc) where rownum<=5"
,sep="");
df<-dbGetQuery(con, query)
}
else
{
query<-paste("select ",field_name,", count(*) as count from ",config$db$schema,".",table_name," where ",field_name," is not null group by ",field_name
," order by 2 desc limit 5"
,sep="");
df<-dbGetQuery(con, query)
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
retrieve_dataframe_top_20_clause<-function(con,config,table_name, field_name,clause)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name<-toupper(table_name)
query<-paste("select * from (select ",field_name,", count(*) as count from ",
config$db$schema,".",table_name, " where ",
clause," and ",field_name," is not null group by ",
field_name ," order by 2 desc) where rownum<=20"
,sep="");
df<-sqlQuery(con, query)
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name<-toupper(table_name)
query<-paste("select * from (select ",field_name,", count(*) as count from ",
config$db$schema,".",table_name, " where ",
clause," and ",field_name," is not null group by ",
field_name ," order by 2 desc) where rownum<=20"
,sep="");
df<-dbGetQuery(con, query)
}
else
{
query<-paste("select ",field_name,", count(*) as count from ",config$db$schema,".",table_name,
" where ",clause," and ",field_name," is not null group by ",field_name
," order by 2 desc limit 20"
,sep="");
df<-dbGetQuery(con, query)
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
retrieve_dataframe_clause<-function(con,config,schema,table_name,column_list,clauses)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name<-toupper(table_name)
column_list<-toupper(column_list)
query<-paste("select ",column_list," from ",schema,".",table_name," where ",clauses,sep="");
df<-sqlQuery(con, query)
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name<-toupper(table_name)
column_list<-toupper(column_list)
query<-paste("select ",column_list," from ",schema,".",table_name," where ",clauses,sep="");
df<-dbGetQuery(con, query)
}
else
{
query<-paste("select ",column_list," from ",schema,".",table_name," where ",clauses,sep="");
# flog.info(query)
#print(query)
df<-dbGetQuery(con, query)
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
retrieve_dataframe_join_clause<-function(con,config,schema1,table_name1, schema2,table_name2,column_list,clauses)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name1<-toupper(table_name1)
table_name2<-toupper(table_name2)
column_list<-toupper(column_list)
clauses<-toupper(clauses)
query<-paste("select distinct ",column_list," from ",schema1,".",table_name1
,",",schema2,".",table_name2
," where ",clauses,sep="");
df<-sqlQuery(con, query)
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name1<-toupper(table_name1)
table_name2<-toupper(table_name2)
column_list<-toupper(column_list)
clauses<-toupper(clauses)
query<-paste("select distinct ",column_list," from ",schema1,".",table_name1
,",",schema2,".",table_name2
," where ",clauses,sep="");
df<-dbGetQuery(con, query)
}
else
{
query<-paste("select distinct ",column_list," from ",schema1,".",table_name1
,",",schema2,".",table_name2
," where ",clauses,sep="");
# flog.info(query)
df<-dbGetQuery(con, query)
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
retrieve_dataframe_join_clause_group<-function(con,config,schema1,table_name1, schema2,table_name2,column_list,clauses)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name1<-toupper(table_name1)
table_name2<-toupper(table_name2)
column_list<-toupper(column_list)
#clauses<-toupper(clauses)
query<-paste("select ",column_list,", count(*) as count from ",schema1,".",table_name1
,",",schema2,".",table_name2
," where ",clauses
," group by ",column_list
,sep="");
df<-sqlQuery(con, query)
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name1<-toupper(table_name1)
table_name2<-toupper(table_name2)
column_list<-toupper(column_list)
#clauses<-toupper(clauses)
query<-paste("select ",column_list,", count(*) as count from ",schema1,".",table_name1
,",",schema2,".",table_name2
," where ",clauses
," group by ",column_list
,sep="");
df<-dbGetQuery(con, query)
}
else
{
query<-paste("select ",column_list,", count(*) as count from ",schema1,".",table_name1
,",",schema2,".",table_name2
," where ",clauses
," group by ",column_list
," order by 2 desc"
,sep="");
# flog.info(query)
df<-dbGetQuery(con, query)
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
retrieve_dataframe_group<-function(con,config,table_name,field_name)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name<-toupper(table_name)
field_name<-toupper(field_name)
query<-paste("select ",field_name,", count(*) as Freq from ",config$db$schema,".",table_name," group by ",field_name,sep="");
df<-sqlQuery(con, query)
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name<-toupper(table_name)
field_name<-toupper(field_name)
query<-paste("select ",field_name,", count(*) as Freq from ",config$db$schema,".",table_name," group by ",field_name,sep="");
df<-dbGetQuery(con, query)
}
else
{
query<-paste("select ",field_name,", count(*) as Freq from ",config$db$schema,".",table_name," group by ",field_name,sep="");
#print(query)
#print(con)
df<-dbGetQuery(con, query)
#print(query)
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
retrieve_dataframe_group_clause<-function(con,config,table_name,field_name, clauses)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name<-toupper(table_name)
field_name<-toupper(field_name)
query<-paste("select ",field_name,", count(*) as Freq from ",config$db$schema,".",table_name," where ",clauses," group by ",field_name,sep="");
df<-sqlQuery(con, query)
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name<-toupper(table_name)
field_name<-toupper(field_name)
query<-paste("select ",field_name,", count(*) as Freq from ",config$db$schema,".",table_name," where ",clauses," group by ",field_name,sep="");
df<-dbGetQuery(con, query)
}
else
{
query<-paste("select ",field_name,", count(*) as Freq from ",config$db$schema,".",table_name," where ",clauses," group by ",field_name,sep="");
df<-dbGetQuery(con, query)
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
retrieve_dataframe_ratio_group<-function(con,config,table_name,column_list, field_name)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name<-toupper(table_name)
column_list<-toupper(column_list)
query<-paste("select ",field_name,", ",column_list," from ",config$db$schema,".",table_name," group by ",field_name,sep="");
df<-sqlQuery(con, query)
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name<-toupper(table_name)
column_list<-toupper(column_list)
query<-paste("select ",field_name,", ",column_list," from ",config$db$schema,".",table_name," group by ",field_name,sep="");
df<-dbGetQuery(con, query)
}
else
{
query<-paste("select ",field_name,",",column_list," from ",config$db$schema,".",table_name," group by ",field_name,sep="");
df<-dbGetQuery(con, query)
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
retrieve_dataframe_ratio_group_join<-function(con,config,table_name_1, table_name_2,ratio_formula, group_by_field,join_field)
{
#special handling for ODBC drivers
if (grepl(config$db$driver,"ODBC",ignore.case=TRUE))
{
table_name_1<-toupper(table_name_1)
table_name_2<-toupper(table_name_2)
ratio_formula<-toupper(ratio_formula)
group_by_field<-toupper(group_by_field)
join_field<-toupper(join_field)
query<-paste("select ",group_by_field,", ",ratio_formula," from ",config$db$schema,".",table_name_1,",",config$db$schema,".",table_name_2,
" where ",table_name_1,".",join_field,"=",table_name_2,".",join_field,
" group by ",group_by_field,sep="");
df<-sqlQuery(con, query)
}
else
{
if (grepl(config$db$driver,"Oracle",ignore.case=TRUE))
{
table_name_1<-toupper(table_name_1)
table_name_2<-toupper(table_name_2)
ratio_formula<-toupper(ratio_formula)
group_by_field<-toupper(group_by_field)
join_field<-toupper(join_field)
query<-paste("select ",group_by_field,", ",ratio_formula," from ",config$db$schema,".",table_name_1,",",config$db$schema,".",table_name_2,
" where ",table_name_1,".",join_field,"=",table_name_2,".",join_field,
" group by ",group_by_field,sep="");
df<-dbGetQuery(con, query)
}
else
{
query<-paste("select ",group_by_field,", ",ratio_formula," from ",config$db$schema,".",table_name_1,",",config$db$schema,".",table_name_2,
" where ",table_name_1,".",join_field,"=",table_name_2,".",join_field,
" group by ",group_by_field,sep="");
df<-dbGetQuery(con, query)
}
}
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
retrieve_dataframe_OHDSI<-function(con,config,table_name)
{
df<-querySql(con,paste("SELECT * FROM ",config$db$schema,".",table_name,sep=""))
#df <- as.ram(data)
#converting all names to lower case for consistency
names(df) <- tolower(names(df))
return(df);
}
# for cases where all values in a field belong to one vocab.
get_vocabulary_name_by_concept_code <- function (concept_code,con, config)
{
#return(df_vocabulary_name[1][1])
#concept_code<-gsub("^\\s+|\\s+$", "",concept_code)
concept_code<-trim(unlist(strsplit(concept_code,"\\|"))[1])
flog.info(concept_code)
df_vocabulary_name<-retrieve_dataframe_clause(con,config,config$db$vocab_schema,"concept","vocabulary_id",paste("CONCEPT_CODE in ('",concept_code,"')",sep=""))
final_vocabulary_name<-""
for (row_num in 1:nrow(df_vocabulary_name))
{
final_vocabulary_name<-paste(final_vocabulary_name,df_vocabulary_name[row_num,1],sep="")
}
return(final_vocabulary_name)
}
# for cases where values in a field may be drawn from multiple vocabularies, e.g. procedure source value
get_vocabulary_name_by_concept_codes <- function (con,config, schema1,table_name, field_name, schema2,domain)
{
#return(df_vocabulary_name[1][1])
#concept_code<-gsub("^\\s+|\\s+$", "",concept_code)
#concept_code<-trim(unlist(strsplit(concept_code,"\\|"))[1])
# flog.info(concept_code)
df_vocabulary_name<-retrieve_dataframe_join_clause(con,config,schema1,table_name,schema2,"concept","vocabulary_id",
paste(field_name,"= concept_code and upper(domain_id) =upper('",domain,"')",sep="")
)
final_vocabulary_name<-""
for (row_num in 1:nrow(df_vocabulary_name))
{
final_vocabulary_name<-paste(final_vocabulary_name,df_vocabulary_name[row_num,1],"|",sep="")
}
return(final_vocabulary_name)
}
#returns a list
get_vocabulary_name_by_concept_ids <- function (con, config, table_name, field_name, domain)
{
df_vocabulary_name<-retrieve_dataframe_join_clause(con,config,config$db$schema,table_name,config$db$vocab_schema,"concept","vocabulary_id",
paste(field_name,"= concept_id and upper(domain_id) =upper('",domain,"')",sep="")
)
return(df_vocabulary_name$vocabulary_id)
}
get_vocabulary_name <- function (concept_id,con, config)
{
df_vocabulary_name<-retrieve_dataframe_clause(con,config,config$db$vocab_schema,"concept","vocabulary_id",paste("CONCEPT_ID in (",concept_id,")"))
return(df_vocabulary_name[1][1])
}
get_concept_name <- function (concept_id,con, config)
{
df_concept_name<-retrieve_dataframe_clause(con,config,config$db$vocab_schema,"concept","concept_name",paste("CONCEPT_ID in (",concept_id,")"))
return(df_concept_name[1][1])
}
get_concept_name_by_concept_code <- function (concept_code,con, config)
{
concept_code<-gsub("^\\s+|\\s+$", "",concept_code)
df_concept_name<-retrieve_dataframe_clause(con,config,config$db$vocab_schema,"concept","concept_name",paste("CONCEPT_CODE in ('",concept_code,"')",sep=""))
# flog.info(class(df_concept_name))
# flog.info(df_concept_name)
# flog.info(dim(df_concept_name))
# there could be multiple concepts sharing the same concept code
final_concept_name<-""
for (row_num in 1:nrow(df_concept_name))
{
# flog.info(row_num)
# flog.info(df_concept_name[1,1])
# flog.info(nrow(df_concept_name))
final_concept_name<-paste(final_concept_name,df_concept_name[row_num,1],"|",sep="")
}
return(final_concept_name)
}
|
975a7d375d6ad4b0661a3ce6bab6b20cc4ab3660
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/shadow/examples/shadowFootprint.Rd.R
|
c55d515d70c444c7f66f3bb1bd55678f0fb795f0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 863
|
r
|
shadowFootprint.Rd.R
|
library(shadow)
### Name: shadowFootprint
### Title: Shadow footprint on the ground
### Aliases: shadowFootprint
### shadowFootprint,SpatialPolygonsDataFrame-method
### ** Examples
location = rgeos::gCentroid(build)
time = as.POSIXct("2004-12-24 13:30:00", tz = "Asia/Jerusalem")
solar_pos = maptools::solarpos(
matrix(c(34.7767978098526, 31.9665936050395), ncol = 2),
time
)
footprint1 = ## Using 'solar_pos'
shadowFootprint(
obstacles = build,
obstacles_height_field = "BLDG_HT",
solar_pos = solar_pos
)
footprint2 = ## Using 'time'
shadowFootprint(
obstacles = build,
obstacles_height_field = "BLDG_HT",
time = time
)
all.equal(footprint1, footprint2)
footprint = footprint1
plot(footprint, col = adjustcolor("lightgrey", alpha.f = 0.5))
plot(build, add = TRUE, col = "darkgrey")
|
078dc607d83bc5d59ed504c5afa1622ee35b9be5
|
625c4159be5b9b4cc2d57f438228b5424423e38a
|
/R/piece.R
|
c749891b91fe8eea0559b73106ce248398ce56f8
|
[] |
no_license
|
tintinthong/chessR
|
e6d936e6cd51b2159a9d28c8b6683602367fd7bb
|
60f8e254f30e1cce77d177a558ae26467144841a
|
refs/heads/master
| 2020-04-20T21:52:30.082501
| 2019-02-08T06:32:01
| 2019-02-08T06:32:01
| 169,121,594
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 241
|
r
|
piece.R
|
#
# piece <- setClass(
#
# "pawn",
#
# slots = c(
# colour="character"
# ),
#
# prototype=list(
# colour="white"
# )
# )
#
# setValidity("piece",
# function(object){
# NULL
# }
# )
#
#
# #,
# #contains="game"
|
6ee077f756fcefc11614b6a52785a4f430158410
|
053f4568fbcf754a01b083ed264803a606ced7fc
|
/07-R-Tutorials/R-Kush/Case Studies/Internet Poll/AnonymityPoll.R
|
371fdea48093396b7887e2c6645d58c51ecfbc8d
|
[] |
no_license
|
akiran1234/analyst-project
|
0a3a1b713a8e170aca8e379f3274fcf89d1504b2
|
b9adb769316139401aba99e3916f4a92d0f89b31
|
refs/heads/master
| 2020-04-10T20:53:11.438022
| 2019-01-14T06:20:24
| 2019-01-14T06:20:24
| 161,280,906
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,863
|
r
|
AnonymityPoll.R
|
getwd()
#Problem 1.1 - Loading and Summarizing the Dataset
poll=read.csv("https://storage.googleapis.com/dimensionless/Analytics/AnonymityPoll.csv")
str(poll)
summary(poll)
#How many people participated in the poll?
#Ans:- 1002
# Problem 1.2 - Loading and Summarizing the Dataset
#How many interviewees responded that they use a smartphone?
table(poll$Smartphone)
#Ans:- 487
#How many interviewees responded that they don't use a smartphone?
# Ans:- 472
#How many interviewees responded that they don't use a smartphone?
# Ans:- 43
#Problem 1.3 - Loading and Summarizing the Dataset
#Which of the following are states in the Midwest census region?
table(poll$State,poll$Region)
#or
MidwestInterviewees = subset(poll, Region=="Midwest")
table(MidwestInterviewees$State)
#Ans: Kansas, Missourri, Ohio
#To find the number of interviewees from each South region state we could have used:
SouthInterviewees = subset(poll, Region=="South")
table(SouthInterviewees$State)
# Ans:- Texas
# Problem 2.1 - Internet and Smartphone Users
#How many interviewees reported not having used the Internet and not having used a smartphone?
table(poll$Internet.Use,poll$Smartphone)
#Ans:- 186
#How many interviewees reported having used the Internet and having used a smartphone?
#Ans:-470
#How many interviewees reported having used the Internet but not having used a smartphone?
#Ans :- 285
#How many interviewees reported having used a smartphone but not having used the Internet?
# Ans :- 17
#Problem 2.2 - Internet and Smartphone Users
#How many interviewees have a missing value for their Internet use?
summary(poll$Internet.Use)
#Ans :- 1
#How many interviewees have a missing value for their smartphone use?
summary(poll$Smartphone)
# Ans:- 43
#Problem 2.3 - Internet and Smartphone Users
#How many interviewees are in the new data frame?
limited<-subset(poll,poll$Internet.Use==1|poll$Smartphone==1)
nrow(limited)
#Ans:- 792
#Problem 3.1 - Summarizing Opinions about Internet Privacy
#Which variables have missing values in the limited data frame?
summary(limited)
# Ans:- Smartphone, Age, Conservativeness, Worry.About.Info, Privacy.Importance,
#Anonymity.Possible, Tried.Masking.Identity , Privacy.Laws.Effective
# Problem 3.2 - Summarizing Opinions about Internet Privacy
#What is the average number of pieces of personal information on the Internet, according to the Info.On.Internet variable?
mean(poll$Info.On.Internet,na.rm = TRUE)
#Ans:-3.795
#Problem 3.3 - Summarizing Opinions about Internet Privacy
#How many interviewees reported a value of 0 for Info.On.Internet?
table(poll$Info.On.Internet)
#Ans:-105
#How many interviewees reported the maximum value of 11 for Info.On.Internet?
#Ans:-8
#Problem 3.4 - Summarizing Opinions about Internet Privacy
#What proportion of interviewees who answered the Worry.About.Info question worry about how much information is available about them on the Internet?
table(limited$Worry.About.Info)
#Ans:- 0.4886
#Problem 3.5 - Summarizing Opinions about Internet Privacy
#What proportion of interviewees who answered the Anonymity.Possible question think it is possible to be completely anonymous on the Internet?
table(limited$Anonymity.Possible)
#Ans:- 0.3692
#Problem 3.6 - Summarizing Opinions about Internet Privacy
#What proportion of interviewees who answered the Tried.Masking.Identity question have tried masking their identity on the Internet?
table(limited$Tried.Masking.Identity)
#Ans:- 0.163
#Problem 3.7 - Summarizing Opinions about Internet Privacy
#What proportion of interviewees who answered the Privacy.Laws.Effective question find United States privacy laws effective?
table(limited$Privacy.Laws.Effective)
#Ans:- 0.256
#Problem 4.1 - Relating Demographics to Polling Results
#Build a histogram of the age of interviewees. What is the best represented age group in the population?
hist(poll$Age)
#Ans:- People aged about 60 years old
#Problem 4.2 - Relating Demographics to Polling Results
#What is the largest number of interviewees that have exactly the same value in their Age variable AND the same value in their Info.On.Internet variable?
table(poll$Age,poll$Info.On.Internet)
max(table(limited$Age, limited$Info.On.Internet))
plot(limited$Age, limited$Info.On.Internet)
#Ans 6
#Problem 4.3 - Relating Demographics to Polling Results
#To avoid points covering each other up, we can use the jitter() function on the values we pass to the plot function. Experimenting with the command jitter(c(1, 2, 3)), what appears to be the functionality of the jitter command?
jitter(c(1,2,3))
#Ans:- D
#Problem 4.4 - Relating Demographics to Polling Results
#Now, plot Age against Info.On.Internet with plot(jitter(limited$Age), jitter(limited$Info.On.Internet)). What relationship to you observe between Age and Info.On.Internet?
plot(jitter(limited$Age), jitter(limited$Info.On.Internet),type="h",xlim=c(20,25,30,35,40,45))
#Ans C
#Problem 4.5 - Relating Demographics to Polling Results
#Use the tapply() function to obtain the summary of the Info.On.Internet value, broken down by whether an interviewee is a smartphone user.
#What is the average Info.On.Internet value for smartphone users?
tapply(limited$Info.On.Internet, limited$Smartphone, summary)
#Ans 4.368
#What is the average Info.On.Internet value for non-smartphone users?
# Ans :- 2.923
#Problem 4.6 - Relating Demographics to Polling Results
#Similarly use tapply to break down the Tried.Masking.Identity variable for smartphone and non-smartphone users.
#What proportion of smartphone users who answered the Tried.Masking.Identity question have tried masking their identity when using the Internet?
tapply(limited$Tried.Masking.Identity, limited$Smartphone, table)
#Ans:- 0.1925
#What proportion of non-smartphone users who answered the Tried.Masking.Identity question have tried masking their identity when using the Internet?
#Ans:- 0.1174
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.