blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
090107f77fef69f93558ddde69faaf6d04d4abd7 | 201a8a213e2993159f3ee1e2923af2d54b0546d2 | /R/pull.R | 20b6be97d4c88c67cd48d65bc8fda95f5704e41e | [
"MIT"
] | permissive | djnavarro/workbch | b66bd43b39e3c42bd0eef0d089fa4ec9d3698cb7 | 6cc7a28e92a24acee1f61ad60c124f701108a96a | refs/heads/master | 2020-06-12T23:38:07.160985 | 2020-04-23T02:24:58 | 2020-04-23T02:24:58 | 194,461,800 | 41 | 3 | NOASSERTION | 2019-08-09T04:47:23 | 2019-06-30T01:05:15 | R | UTF-8 | R | false | false | 1,141 | r | pull.R |
# helper functions that take a list of jobs as input
# and "pull" handy information as the output
# returns a character vector of all job names
pull_jobnames <- function(jobs) {
if(is.null(jobs)) { return(character(0)) }
return(purrr::map_chr(jobs, function(j) {j$jobname}))
}
# returns a character vector of all job paths
pull_jobpaths <- function(jobs) {
if(is.null(jobs)) { return(character(0)) }
return(purrr::map_chr(jobs, function(j) {
normalizePath(j$path, mustWork = FALSE)
}))
}
# returns a tibble with jobname, path and idstring, only
# for those jobs that contain a path. technically this
# isn't analogous to dplyr::pull but whatever
pull_jobinfo <- function(jobs) {
job_tbl <- purrr::map_df(jobs, function(x){
if(!is.null(x$path)) {
return(tibble::as_tibble(
x[c("jobname", "path", "idstring")]
))
} else {
return(tibble::tibble(
jobname = character(0),
path = character(0),
idstring = character(0)
))
}
})
job_tbl <- dplyr::arrange(job_tbl, jobname)
job_tbl <- dplyr::filter(job_tbl, !is.na(path))
return(as_wkbch_tbl(job_tbl))
}
|
47ba8df1eaf99578075556c9a4e245f4f28d0bd2 | b2ef7a87ed5912e24c5b87f9be4bf4052be6c421 | /man/layoutTimestamp.Rd | a051bf91167d25602047087f80fc96272f500abf | [
"Apache-2.0"
] | permissive | parkdongsu/OhdsiRTools | 08f558e2b037a59cd5a3cf9c785d7fe3e9303b30 | 8626e57e7402ca2d7b16ab5efc4730a43c2c225f | refs/heads/master | 2021-01-06T09:32:45.526469 | 2019-07-16T04:04:24 | 2019-07-16T04:04:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 427 | rd | layoutTimestamp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Logging.R
\name{layoutTimestamp}
\alias{layoutTimestamp}
\title{Logging layout with timestamp}
\usage{
layoutTimestamp(level, message)
}
\arguments{
\item{level}{The level of the message (e.g. "INFO")}
\item{message}{The message to layout.}
}
\description{
A layout function to be used with an appender. This layout adds the time to the message.
}
|
ec3265b142996398e1eb3bdd43452ed4c101f917 | d5364db161f73c70ee8dec6f9c2229488dbb8649 | /7일차/ex06.R | 8de2316b1e4d7232b7972a773a077d791b92fbae | [] | no_license | dksktjdrhks2/R_programming | 26ac74f3044d2267f9e5f0b1a90b694b83b16322 | e2a73997fdccecdd4246bd9ed2e0990d913b0ad6 | refs/heads/master | 2020-12-13T19:43:37.929777 | 2020-02-13T07:52:26 | 2020-02-13T07:52:26 | 234,513,120 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 275 | r | ex06.R | # %>% 함수를 연결하는 연결 연산자 (파이프)
group_by(x, cyl) %>% summarise(n()) # summarise(group_by(x, cyl), n())
x1 <- mutate(x, xx = rank(mpg))
x1
arrange(x1, xx)
mutate(x, xx = rank(mpg)) %>% arrange(xx) # arrange(mutate(x, xx = rank(mpg)), xx)
|
b31b168fd7ed9714e4cba96000981e8c34dc0ed8 | 78f82b410910e2ec0a3cd2f743ffac4ca0a14af7 | /workflow/scripts/pcacall.R | 581287a411a78c3c2569074550eb4f33a68df2a1 | [
"MIT"
] | permissive | skchronicles/RNA-seek | 22ecc5b8608b55940d1e2f190ea2612e73972550 | 295e0b36dfccfbf8efca9d5e98c3d6dbd1dab0e0 | refs/heads/main | 2023-08-05T02:21:57.445664 | 2023-07-21T19:21:51 | 2023-07-21T19:22:35 | 305,525,443 | 12 | 10 | MIT | 2022-03-04T15:35:50 | 2020-10-19T22:13:53 | Python | UTF-8 | R | false | false | 663 | r | pcacall.R | # Example Usage: Rscript pcacall.R 'DEG_cntrl-test_0.5_2' 'outfilename.html' 'STAR_files/sampletable.txt' 'DEG_cntrl-test_0.5_2/RawCountFile_RSEM_genes_filtered.txt' 'hg19seDEG' 'Enter CCBR Project Description and Notes here.' '/path/to/workingDir/Scripts/PcaReport.Rmd'
## grab args
args <- commandArgs(trailingOnly = TRUE)
DIR <- args[1]
outHtml <- args[2]
pcaRmd <- args[7]
Sys.setenv(RSTUDIO_PANDOC="/usr/local/apps/rstudio/rstudio-1.1.447/bin/pandoc/")
setwd(DIR) # new
rmarkdown::render(pcaRmd,output_file=outHtml, params = list(
folder = args[1],
sampleinfo = args[3],
data = args[4],
projectId = args[5],
projectDesc = args[6]
)
)
|
8e77d6aa32ad18cf47574acc360465c9f5a8d640 | abd9bee93e827aa1fb27630b94b6fcb949893f01 | /functions/extract_data/ocde_xls_to_zoo_v3.R | dbe438e3ac55eaccf5ac0cd5361d048b0e566507 | [] | no_license | Allisterh/gdp_forecasts-nsaleille | 0e224f66ead315dcf68a2b654f853069f2be8282 | 798f11602b6152a1df61a58d3f2035ecb6575276 | refs/heads/master | 2023-03-18T04:08:05.558171 | 2020-04-17T13:34:15 | 2020-04-17T13:34:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 668 | r | ocde_xls_to_zoo_v3.R | ocde_xls_to_zoo <- function(file){
require(xlsx)
require(zoo)
raw <- read.xlsx(file, sheetIndex = 1, header = TRUE, startRow = 1)
data <- as.numeric(as.character(raw[7:dim(raw)[1] , 3:dim(raw)[2]]))
data <- na.omit(data)
freq <- as.character(raw[3,3])
name <- as.character(raw[2,3])
if (freq == 'Monthly'){
index <- as.character(raw[7:length(data)+6, 1])
index <- as.yearmon(index, format = '%b-%Y')
}
ts <- zoo(x = data, order.by = index)
names <- as.character(raw[1,1])
id <- constructSeriesId(name)
return(list(list(zoo = ts, names = names, freq = freq, id = id, filepath = file, source = 'ocde', release = 2)))
} |
7dff58b69a0310e427ea53c8b2f795115bad63fb | e29162039c1e5daa9861cebf0ce33d8cd7f7d5cb | /neon_lidar/create_chm_model.R | 486f3ea14d2e82f1d9642a53ac8c3cd420970ec2 | [] | no_license | chennywang8/R | 33ce279802c34a736bacf38d48ec35fbea2091c3 | f9abf2f460f829fc0418b346c686f75057ee6ca4 | refs/heads/master | 2021-02-17T07:12:18.801148 | 2020-09-10T03:17:08 | 2020-09-10T03:17:08 | 245,079,424 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 759 | r | create_chm_model.R | library(raster)
library(rgdal)
# ======== Read Raw Data ===========
fdir <- "NEON-DS-Field-Site-Spatial-Data/SJER"
dsm_file <- "DigitalSurfaceModel/SJER2013_DSM.tif"
dtm_file <- "DigitalTerrainModel/SJER2013_DTM.tif"
dsm <- raster(file.path(fdir, dsm_file))
dtm <- raster(file.path(fdir, dtm_file))
GDALinfo(file.path(fdir, dsm_file))
# ============= CREATE CHM ====================
chm <- dsm - dtm
plot(chm, main="Lidar Canopy Height Model \n SJER, California")
# specs
print(chm)
# resolution
res(chm)
ncol(chm)
ncell(chm)
# coordinate reference system (CRS)
projection(chm)
# other API
hasValues(chm)
inMemory(chm)
# ============= SAVE GTiff ===================
writeRaster(chm, file.path(fdir, "chm_sjer_output.tif"), "GTiff", overwrite = TRUE)
|
6809c848efb8a69f0726833391743388094a89d9 | 8e45cebd1960b6ef4dd1bd1513d17177648a0c56 | /R/GCcattle.R | d301507ca1004022cc942d3017c410431fd099f5 | [] | no_license | QGresources/GCA | d172195ac41362a63a794034a758e25b10091f3a | ab09ac211257e65b06beb6084d13e655f0113656 | refs/heads/master | 2023-03-09T15:37:31.163165 | 2021-02-23T04:40:37 | 2021-02-23T04:40:37 | 267,158,905 | 6 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,069 | r | GCcattle.R | #' Cattle dataset
#'
#' The cattle dataset is simulated with QMSim software (Sargolzaei and Schenkel, 2009).
#' This dataset includes 2,500 individuals across six generations (from founder to generation 5),
#' each with 10,000 single nucleotide polymorphisms spread over 29 autosomes. Single phenotype with heritability of 0.6 was simulated.
#' Two fixed covariates of sex and unit are available.
#'
#' @docType data
#' @name GCcattle
#' @keywords datasets
#' @usage
#' data(GCcattle)
#'
#' @format
#' \itemize{
#' \item cattle.pheno: A matrix with a dimension of 2500 x 6, which includes one phenotype, two fixed covariates and pedigree information.
#' \item catlle.W: A 2500 by 10,000 matrix, which contains marker information.
#' }
#'
#' @author Haipeng Yu and Gota Morota
#'
#' Maintainer: Haipeng Yu \email{haipengyu@@vt.edu}
#'
#' @example man/examples/GCcattle.R
#' @references Sargolzaei, M., and F. S. Schenkel. 2009. Qmsim: a large-scale
#' genome simulator for livestock. Bioinformatics 25:680–681. doi:10.1093/bioinformatics/btp045
NULL
|
34ce69eaf91d0e40c1db1a0dd9622f83efe22f1b | 91c7fa871afb111981a68ba412a216f6a0abe23d | /DM_project.R | 47ee91816e832abcaceb287a199faf7c0c5ea4c2 | [] | no_license | asula/DM_project | 44ca4ddf1d6a19efe7f4fed16a42277499a62eff | d5fd15edd414258a3c6f3852d88e49b42381d5e9 | refs/heads/master | 2021-01-01T05:25:49.650670 | 2016-05-25T22:08:30 | 2016-05-25T22:08:30 | 59,700,899 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,668 | r | DM_project.R | aa_keywords <- read.delim("Datasets/aa_keywords.txt", header=FALSE)
rz_keywords <- read.delim("Datasets/rz_keywords.txt", header=FALSE)
rz_genres <- read.delim("Datasets/rz_genres.txt", header=FALSE)
aa_genres <- read.delim("Datasets/aa_genres.txt", header=FALSE)
rz_movies <- read.delim("Datasets/rz_movie_wins.txt", header=FALSE)
aa_movies <- read.delim("Datasets/aa_movie_wins.txt", header=FALSE)
test_keywords <- read.delim("Datasets/test_keywords.txt", header=FALSE)
colnames(test_keywords) <- c("id","keyword")
colnames(aa_keywords) <- c("id","keyword")
colnames(rz_keywords) <- c("id","keyword")
colnames(aa_genres) <- c("id","genre")
colnames(rz_genres) <- c("id","genre")
colnames(aa_movies) <- c("id","name","year")
colnames(rz_movies) <- c("id","name","year")
n.elems <- function(freqs,min_freq){
return(length(unique(freqs[freqs>=min_freq])))
}
# ------------------------------------------------------------ #
# Find keyword and genre frequencies
# ------------------------------------------------------------ #
# 1. Academy Award keywords and corresponding frequencies
aa_kw_freqs <- as.numeric(table(aa_keywords$keyword))
aa_kws <- sort(unique(aa_keywords$keyword))
# 2. Academy Award genres and corresponding frequencies
aa_gnr_freqs <- as.numeric(table(aa_genres$genre))
aa_gnrs <- sort(unique(aa_genres$genre))
# 3. Razzie keywords and correspinding frequencies
rz_kw_freqs <- as.numeric(table(rz_keywords$keyword))
rz_kws <- sort(unique(rz_keywords$keyword))
rz_lim_indx = match(setdiff(rz_kws,c("worst picture razzie winner")),rz_kws)
rz_lim_kws = rz_kws[rz_lim_indx]
rz_lim_kw_freqs = rz_kw_freqs[rz_lim_indx]
# 4. Razzie genres and corresponding frequencies
rz_gnr_freqs <- as.numeric(table(rz_genres$genre))
rz_gnrs <- sort(unique(rz_genres$genre))
# Keywords only present in AA-s:
aa_un_kw_indx = match(setdiff(aa_kws,rz_kws),aa_kws)
aa_un_kws = aa_kws[aa_un_kw_indx]
aa_un_kw_freqs = aa_kw_freqs[aa_un_kw_indx]
# Keywords only present in RZ-s:
#"box office flop","critically bashed",
rz_un_kw_indx = match(setdiff(setdiff(rz_kws,aa_kws),c("worst picture razzie winner")),rz_kws)
rz_un_kws = rz_kws[rz_un_kw_indx]
rz_un_kw_freqs = rz_kw_freqs[rz_un_kw_indx]
# ------------------------------------------------------------ #
# Prediction model :s
# ------------------------------------------------------------ #
aa_kws_lim <- aa_kws[aa_kw_freqs>10]
aa_kw_freqs_lim <- aa_kw_freqs[aa_kw_freqs>10]
rz_kws_lim <- rz_kws[rz_kw_freqs>5]
rz_kw_freqs_lim <- rz_kw_freqs[rz_kw_freqs>5]
all_kws <- union(rz_kws_lim,aa_kws_lim)
wrds_to_remove <- c("box office flop","box office hit","blockbuster","worst picture razzie winner")
all_kws <- setdiff(all_kws,wrds_to_remove)
nr = nrow(aa_movies) + nrow(rz_movies)
nc = length(all_kws)
aa_movies$id.2 <- c(1:89)
rz_movies$id.2 <- c(90:129)
pred.data <- matrix(data = rep(0,nr*nc),ncol= nc,nrow =nr)
i = 1
for (id in aa_movies$id){
present_kws <- match(intersect(aa_keywords$keyword[which(aa_keywords$id == id)],all_kws),all_kws)
pred.data[i,present_kws] <- rep(1,length(present_kws))
i = i+1
}
for (id in rz_movies$id){
present_kws <- match(intersect(rz_keywords$keyword[which(rz_keywords$id == id)],all_kws),all_kws)
pred.data[i,present_kws] <- rep(1,length(present_kws))
i = i+1
}
testv <- rep(0,175)
present_kws <- match(intersect(test_keywords$keyword[which(test_keywords$id == 7)],all_kws),all_kws)
testv[present_kws] <- rep(1,length(present_kws))
class <- c(rep(1,89),rep(0,40))
df = data.frame(pred.data)
colnames(df) = all_kws
rownames(df) = c(aa_movies$id.2,rz_movies$id.2)
df$class = class
train_indx <- sample(c(1:129),100)
test_indx <- setdiff(c(1:129),train_indx)
train <- df[train_indx,]
test <- df[test_indx,]
mt <- matrix(data=testv,nrow=1)
df2 <- data.frame(mt)
colnames(df2) <- all_kws
library(e1071)
model <- naiveBayes(class ~ ., data = df)
predictions <- predict(model,test[,c(1:179)])
mod <- glm(class ~ ., family = "binomial",data = df)
preds <- predict(mod,df2,type="response")
#test[,c(1:176)]
match(intersect(rz_keywords$keyword[which(rz_keywords$id == 1)],all_kws),all_kws)
# ------------------------------------------------------------ #
# Visualizing with word cloud
# ------------------------------------------------------------ #
library(wordcloud)
# AA keywords
wordcloud(aa_kws,aa_kw_freqs, scale = c(3.5,.000005),min.freq = 16,
colors = rev(rainbow(n.elems(aa_kw_freqs,16),start=0,end=0.3)))
# AA genres
wordcloud(aa_gnrs, aa_gnr_freqs, scale = c(6,1),min.freq = 1,
colors = rev(rainbow(n.elems(aa_gnr_freqs,1),start=0,end=0.3)))
# RZ keywords
wordcloud(rz_kws, rz_kw_freqs, scale = c(4,.005),min.freq = 6,
colors = (rainbow(n.elems(rz_kw_freqs,6),start=0.7,end=1)))
# RZ keywords vol 2
wordcloud(rz_lim_kws, rz_lim_kw_freqs, scale = c(4,.05),min.freq = 6,
colors = (rainbow(n.elems(rz_lim_kw_freqs,6),start=0.7,end=1)))
# RZ genres
wordcloud(rz_gnrs, rz_gnr_freqs, scale = c(5,1),min.freq = 2,
colors = (rainbow(n.elems(rz_gnr_freqs,2),start=0.7,end=1)))
# AA unique keywords
#length(aa_un_kw_freqs[aa_un_kw_freqs > 8])
wordcloud(aa_un_kws, aa_un_kw_freqs, scale = c(3.3,0.0000005),min.freq = 8,
colors = rev(rainbow(n.elems(aa_un_kw_freqs,8),start=0,end=0.3)))
# RZ unique keywords
wordcloud(rz_un_kws, rz_un_kw_freqs, scale = c(5,1),min.freq = 3
,colors = (rainbow(n.elems(rz_un_kw_freqs,3),start=0.7,end=1)))
most_oscar_film = aa_movies[which((as.numeric(rowSums(df))[c(1:89)])==max((rowSums(df))[c(1:89)])),][2]
most_razzie_film = rz_movies[which((as.numeric(rowSums(df))[c(90:129)])==max((rowSums(df))[c(90:129)])),][2]
|
b4691af4a556dffe78e4310991960cd4946ab613 | bbe474eaf11c4013e6f3f287d71af0ce5475dfae | /scripts/do_sample.R | 0c530572ea31a506b9aee35fe5d8d1bb2106499b | [] | no_license | valentinitnelav/Hawaii_plot_resample | 3f808fb9204d90376cc888288d327bd5c36d0397 | 6bf3d9e988670b8478c5d2c3ee65bd0deb319966 | refs/heads/master | 2020-03-06T16:59:36.699539 | 2018-03-27T13:04:01 | 2018-03-27T13:04:01 | 126,982,537 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,197 | r | do_sample.R | # *************************************************************************
# Function to run the sampling procedure
# *************************************************************************
# Parameters:
# sp_code - species codes column
# min_area - minimum sample area value
# plot_area - plot area column
# sum_abund - total plot abundance
# repl - number of replications; defaults to 1000 if not specified
# Returns:
# a 3 element list with sampled abundance mean and SD for each species
do_sample <- function(sp_code, min_area, plot_area, sum_abund, repl = 1000) {
# Compute sample size
sample_size <- round(min_area / plot_area[1] * sum_abund[1])
# Sample n times the species names. Will get a matrix of species names.
# Note: function `replicate()` will simplify its output to matrix or vector.
# It simplifies to vector if there is one row matrix!
# This happens for one-species plots.
# Therefore, the solution with `simplify = FALSE` fallowed by a `do.call`
# So, operating on lists is safer - https://stackoverflow.com/a/14490315/5193830
set.seed(666)
tbl_sp <- replicate(
n = repl,
expr = sample(x = sp_code,
size = sample_size,
replace = TRUE),
simplify = FALSE
)
tbl_sp <- do.call(cbind, tbl_sp)
# Get counts by species from the matrix of species names
sp_unq <- sort(unique(sp_code))
tbl_counts <- apply(X = tbl_sp,
MARGIN = 2,
FUN = function(col) table(factor(col, levels = sp_unq)))
# table(factor(col, levels...)) inspired from
# https://stat.ethz.ch/pipermail/r-help/2010-June/242381.html
# Note: function `apply` suffers of the same simplify issue, fix with an if;
# if vector (does not have dimensions) then convert to one row matrix.
tbl_counts <- if (is.null(dim(tbl_counts))) matrix(tbl_counts, nrow = 1) else tbl_counts
# Compute sampled mean abundance and SD.
# Wrap the results with list() so that they pass as columns in the data.table
return(
list(sp_code = sp_unq,
sampled_abund_mean = matrixStats::rowMeans2(tbl_counts),
sampled_abund_sd = matrixStats::rowSds(tbl_counts))
)
}
|
44f90a234d568476c5cfa9675df0594995366a64 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Dowd/examples/LogNormalESDFPerc.Rd.R | bec62c73876da401a42d9d92c1f47e3707c5f812 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 555 | r | LogNormalESDFPerc.Rd.R | library(Dowd)
### Name: LogNormalESDFPerc
### Title: Percentiles of ES distribution function for normally distributed
### geometric returns
### Aliases: LogNormalESDFPerc
### ** Examples
# Estimates Percentiles of ES distribution
data <- runif(5, min = 0, max = .2)
LogNormalESDFPerc(returns = data, investment = 5, perc = .7, cl = .95, hp = 60)
# Estimates Percentiles given mean, standard deviation and number of sambles of return data
LogNormalESDFPerc(mu = .012, sigma = .03, n= 10, investment = 5, perc = .8, cl = .99, hp = 40)
|
530682ba065e05402e6fd2d82a9845e9f14272cc | f209b0dd46cac6e125bea6e4edb20e199c1bd264 | /man/set_hclust_default.Rd | 65c5c97cb43b5b6bcae03aaf9aa871d630e799e6 | [] | no_license | hzongyao/confuns | 6618c8936c5673eb06130549865446ffd3c960ab | 79ea2e376029bbd976191a136c4247cde119c7d3 | refs/heads/master | 2023-05-14T19:46:24.487730 | 2021-06-04T05:07:53 | 2021-06-04T05:07:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 349 | rd | set_hclust_default.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hclust-wrapper.R
\name{set_hclust_default}
\alias{set_hclust_default}
\title{Title}
\usage{
set_hclust_default(
hcl.obj,
method.aggl = NA,
method.dist = NA,
directory = NA,
verbose = FALSE
)
}
\arguments{
\item{method.dist}{}
}
\value{
}
\description{
Title
}
|
989308227bf89d169788b30a6493834f9c66c72a | 3d797dd853e3a93a23cbdb2e0577298c856fbc39 | /R/statar.R | 9e4dede2e47e6f43d86fc95f908ff9f88a8578cf | [] | no_license | guhjy/statar | d414d032844a013ee8931f9dfb43bada44cf93b1 | bf7f6215c18de7269675da0ee604e400ebbe2b86 | refs/heads/master | 2021-04-28T04:01:43.909754 | 2018-02-19T22:06:21 | 2018-02-19T22:06:21 | 122,152,280 | 1 | 0 | null | 2018-02-20T03:47:09 | 2018-02-20T03:47:09 | null | UTF-8 | R | false | false | 2,986 | r | statar.R | #' A package for applied research
#'
#' @docType package
#' @name statar
#' @import ggplot2
#' @importFrom data.table is.data.table
#' @importFrom data.table key
#' @importFrom data.table setnames
#' @importFrom data.table setattr
#' @importFrom data.table setkeyv
#' @importFrom data.table setDF
#' @importFrom data.table setDT
#' @importFrom data.table as.data.table
#' @importFrom data.table dcast.data.table
#' @importFrom data.table :=
#' @importFrom dplyr "%>%"
#' @importFrom dplyr count
#' @importFrom dplyr desc
#' @importFrom dplyr filter
#' @importFrom dplyr distinct_
#' @importFrom dplyr everything
#' @importFrom dplyr ungroup
#' @importFrom dplyr group_indices_
#' @importFrom dplyr funs_
#' @importFrom dplyr with_order
#' @importFrom dplyr mutate_each_
#' @importFrom dplyr slice
#' @importFrom dplyr count_
#' @importFrom dplyr select_vars_
#' @importFrom dplyr select_
#' @importFrom dplyr tbl_vars
#' @importFrom dplyr filter_
#' @importFrom dplyr group_by
#' @importFrom dplyr n_distinct
#' @importFrom dplyr distinct
#' @importFrom dplyr sample_n
#' @importFrom dplyr arrange
#' @importFrom dplyr arrange_
#' @importFrom dplyr group_by_
#' @importFrom dplyr summarize
#' @importFrom dplyr n
#' @importFrom dplyr groups
#' @importFrom dplyr do
#' @importFrom dplyr do_
#' @importFrom dplyr funs
#' @importFrom dplyr summarize_
#' @importFrom dplyr mutate
#' @importFrom dplyr mutate_
#' @importFrom dplyr rename
#' @importFrom dplyr rename_
#' @importFrom dplyr select
#' @importFrom dplyr select_
#' @importFrom dplyr left_join
#' @importFrom dplyr right_join
#' @importFrom dplyr full_join
#' @importFrom dplyr inner_join
#' @importFrom dplyr semi_join
#' @importFrom dplyr anti_join
#' @importFrom dplyr data_frame
#' @importFrom dplyr bind_cols
#' @importFrom dplyr bind_rows
#' @importFrom dplyr as_data_frame
#' @importFrom dplyr one_of
#' @importFrom lazyeval as.lazy
#' @importFrom lazyeval lazy_dots
#' @importFrom lazyeval lazy_eval
#' @importFrom lazyeval all_dots
#' @importFrom lazyeval common_env
#' @importFrom lazyeval interp
#' @importFrom lazyeval lazy
#' @importFrom matrixStats weightedMean
#' @importFrom matrixStats colWeightedMeans
#' @importFrom matrixStats colRanges
#' @importFrom parallel mclapply
#' @importFrom stringr str_replace
#' @importFrom stringr str_match
#' @importFrom stringr str_detect
#' @importFrom stringr str_split
#' @importFrom stringr str_pad
#' @importFrom stringr str_sub
#' @importFrom stringr str_replace_na
#' @importFrom stringr fixed
#' @importFrom tidyr gather_
#' @importFrom tidyr spread
#' @importFrom tidyr spread_
#' @importFrom methods is
#' @importFrom stats as.formula
#' @importFrom stats complete.cases
#' @importFrom stats na.omit
#' @importFrom stats quantile
#' @importFrom stats sd
#' @importFrom stats setNames
#' @importFrom utils capture.output
#' @importFrom utils head
#' @importFrom utils tail
#' @importFrom utils type.convert
NULL
globalVariables(".SD")
globalVariables("Statbinmean")
|
32d0e496ec18ff5197e622a0fb5dd89a1ce9358d | aa86e1e3608ecb23e7bdf998770cdfe18d907a5d | /3-Model/model_analyzer.R | c5151560b2c992dadfb541fae6c5c1087a2e9f3f | [] | no_license | khakhalin/Ca-Imaging-and-Model-2018 | 2a843c29fa18d6c3b11253c6994d56d467b6bf41 | 29c7802a40c8da838bbc28602544b4f756d17769 | refs/heads/master | 2020-03-29T12:19:54.839839 | 2019-07-10T17:47:06 | 2019-07-10T17:47:06 | 149,894,036 | 2 | 0 | null | 2018-10-04T20:12:38 | 2018-09-22T16:18:15 | Matlab | UTF-8 | R | false | false | 8,231 | r | model_analyzer.R | # ========================
# Script to analyze model outputs.
# ========================
require(tidyr)
require(dplyr)
require(ggplot2)
rm(list = ls()) # Clear workspace
# Absolute address, replace as necessary
localFolder = 'C:/Users/Arseny/Documents/7_Ca imaging/git - CaImaging Paper/3-Model/'
### Combine several model outputs into one large dataframe
# 1st one creates the dataframe:
d <- read.table(paste(localFolder,"modelAnalysis 1 190322 slide looming.csv",sep=''),sep=",",header=T)
d$type = 'Base'
# All others concatenate to it:
t <- read.table(paste(localFolder,"modelAnalysis 2 190322 slide vis.csv",sep=''),sep=",",header=T)
t$type = 'Visual'
d <- rbind(d,t)
t <- read.table(paste(localFolder,"modelAnalysis 3 190322 slide rand.csv",sep=''),sep=",",header=T)
t$type = 'Random'
d <- rbind(d,t)
t <- read.table(paste(localFolder,"modelAnalysis 4 190322 slide looming nointrinsic.csv",sep=''),sep=",",header=T)
t$type = 'no Intrinsic'
d <- rbind(d,t)
t <- read.table(paste(localFolder,"modelAnalysis 5 190611 slide looming Hebb.csv",sep=''),sep=",",header=T)
t$type = 'no STDP'
d <- rbind(d,t)
t <- read.table(paste(localFolder,"modelAnalysis 6 190322 decay looming.csv",sep=''),sep=",",header=T)
t$type = 'no Competition'
d <- rbind(d,t)
names(d)
summary(d)
nExp <- nrow(d)/5
d$exp <- rep(seq(1,nExp),each=5) # Label experiments
# Remove columns that are still calculated by the scripts, but that we don't like anymore:
d <- d %>% dplyr::select (-c(nPCAto80,competition,sel90perc,sel90perc_SC,rCluSpk,rSelRnt,rSelGth,
shESelGrow,selEGrowth,nRichTo80F,clustPrefPval,revFlow,cycl,gammaIn,
rDirWei,rSelfcSelfs,rSelfcSelsc,deg0,deg12,deg5p,
clusterCompactness))
# -- Correct weird values
# For some reason for "no competition" the starting point (the very first one) is weird.
# something is wrong with renormalization of synapses for this very first snapshot.
# So tucking these values in.
d$clust <- pmin(d$clust,0.02)
ggplot(d,aes(stage,clust,color=type,group=file)) + theme_minimal() + geom_line()
d$eff[d$stage==1] <- pmin(d$eff[d$stage==1],0.04)
ggplot(d,aes(stage,eff,color=type,group=file)) + theme_minimal() + geom_line()
d$gammaOu <- abs(d$gammaOu) # from negative to positive values
# ---------- Gather, to prepare for summary statistics bellow
dg <- gather(d,var,value,-file,-type,-stage,-rewire,-exp)
dg$var <- factor(dg$var)
levels(dg$var) # List of levels (different measures we have)
# Let's try to put measures in a somewhat meaningful order
levelSequence <- c(
"file","type","stage",
"fullBrainSel","meanSel","shareSelCells","sel90m50","bestPredict",
"fullBrainSel_SC","meanSel_SC","shareSelCells_SC","sel90m50_SC",
"rSelfcSelfs","rSelfcSelsc",
"rPosSel","mDistWei",
"rSelSpk","rSelClu","rSelNet","rSelIns","selAssort",
"nRichTo80C","nClusters","gammaOu","recip",
"synfire","synHelp",
"clusterPreference","clusterCompactness",
"eff","modul","clust","flow",
"clusterPreference") # This row for those that weren't actually included in the figure
existingLevels <- levels(dg$var)
dg$var <- factor(dg$var,levels=intersect(levelSequence,existingLevels))
levels(dg$var)
# Summary data frame
dgs = dg %>% group_by(type,stage,var,rewire) %>% summarize(
m = mean(value),
n = n(),
s = sd(value),
ci = -s/sqrt(n)*qt(0.025,df=n-1)) # Averages and cis
head(dgs)
dgs$type = factor(dgs$type)
dgs$type <- factor(dgs$type,levels=rev(levels(dgs$type))) # Reverse, to make Base top, for better plotting
levels(dgs$type) # Check
# Plot Averages only, several MODEL TYPES in each plot <---- Main plot
ggplot(dgs,aes(stage,m,color=type,size=type)) +
geom_point(size=1,shape=1) +
geom_line(aes(group=type)) +
facet_wrap(~var,scales = "free_y", ncol=5) +
theme_bw() +
theme(axis.text.y=element_text(size=6),
strip.background=element_rect(linetype='blank',fill='white'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
#scale_linetype_manual(values=rev(c("solid","solid","solid","solid","dashed","dashed")))+
# Line style shouldn't be used before incscape, as it just creates a bunch of segments
scale_color_manual(values=rev(c('black','red','palegreen3','dodgerblue','violetred','gray')))+
scale_size_manual(values=rev(c(1, 0.5, 0.5, 0.5, 0.5, 0.5)))+
NULL
# To get a decent figure (close to Fig8), export as vector with 700x600 dimensions.
# Values by the end of simulation
names(dgs)
levels(dgs$var)
dgs %>% dplyr::filter(stage==5,var=="meanSel")
dgs %>% dplyr::filter(stage==5,var=="shareSelCells")
dgs %>% dplyr::filter(stage==5,var=="meanSel_SC")
dgs %>% dplyr::filter(stage==5,var=="shareSelCells_SC")
dgs %>% dplyr::filter(stage==5,var=="rSelfcSelsc")
dgs %>% dplyr::filter(stage==5,var=="clusterPreference")
# Same, but only global clustering
ggplot(subset(dgs,var=="clust"),aes(stage,m,color=type)) +
geom_point(size=1,shape=1) +
geom_line(aes(group=type)) +
theme_bw() +
theme(axis.text.y=element_text(size=6),
strip.background=element_rect(linetype='blank',fill='white'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
NULL
# Same, but only global efficiency
ggplot(subset(dgs,var=="eff"),aes(stage,m,color=type)) +
geom_point(size=1,shape=1) +
geom_line(aes(group=type)) +
theme_bw() +
theme(axis.text.y=element_text(size=6),
strip.background=element_rect(linetype='blank',fill='white'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
NULL
# --- Synfire analysis
# All curves for one type of experiments, and one var
ggplot(subset(dg,type=="Base" & var %in% c("synfire")),aes(stage,value,group=exp)) +
geom_point(size=1,shape=1) +
geom_line() +
theme_bw() +
theme(axis.text.y=element_text(size=6),
strip.background=element_rect(linetype='blank',fill='white'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
NULL
# Across types
ggplot(subset(dgs,var %in% c("synfire","synHelp")),aes(stage,m,color=type)) +
geom_point(size=1,shape=1) +
geom_line(aes(group=type)) +
theme_bw() +
theme(axis.text.y=element_text(size=6),
strip.background=element_rect(linetype='blank',fill='white'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
facet_wrap(~var,scales = "free_y") +
NULL
# ----------- end of meaningful analyses -------------
# Everything below are old attempts when I tried to compare "real" values to values on a rewired
# graph. It's really hard to visualize, and even harder to interpret, so these attempts are
# abandoned, at least for now.
# Plot Averages only, one type, but with REWIRE
ggplot(dgs,aes(stage,m,color=rewire)) + theme_bw() +
geom_point() + geom_line(aes(group=rewire)) +
facet_wrap(~var,scales = "free_y") +
theme(axis.text.y=element_text(size=6),
strip.background=element_rect(linetype='blank',fill='white'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
NULL
# All values and averages for one TYPE, but with rewires. Take a while to render
ggplot(data=dg,aes(stage,value,color=rewire)) + theme_bw() +
geom_point(alpha=0.5) +
geom_line(data=subset(dg,rewire=="original"),aes(group=file),alpha=0.3) +
facet_wrap(~var,scales = "free_y") +
geom_point(data=dgs,aes(stage,m),color="black") +
geom_line(data=dgs,aes(stage,m,group=rewire),color="black") +
theme(axis.text.y=element_text(size=6),
strip.background=element_rect(linetype='blank',fill='white'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
NULL
# It's impossible to connect rewired points; rewires are random, so we have to subset geom_line(),
# to only connect "proper" (original) points.
# Zoom in on cyclicity in particular
ggplot(data=d,aes(stage,cycl,color=rewire)) + theme_bw() +
geom_point(alpha=0.5) +
geom_line(data=subset(d,rewire=="original"),aes(group=file),alpha=0.3) +
scale_y_log10() +
NULL
names(dgs)
dgs2 <- subset(dgs,stage==5)
data.frame(sprintf('%s - %4.2f pm %4.2f',dgs2$var,dgs2$m,dgs2$s)) # means and sd by the end of training
|
747fba7c0a286f15beb1b9af3842bd31705cdd8d | e1bca973182c5570ed602b554839ac037d42fad7 | /run_analysis.R | 730280214e02a8f63f9b6e34b780b20712a00ceb | [] | no_license | martin-schulz/getting-cleaning-data | 6c68b5fbce20563fe879a0619ee413ac6e70c0f7 | bca9ce66b9a8f5fe6ababa459b1baf9497fe5bcb | refs/heads/master | 2020-04-09T05:00:50.341157 | 2018-12-03T06:57:13 | 2018-12-03T06:57:13 | 160,047,234 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,596 | r | run_analysis.R | library(dplyr)
library(data.table)
# Read training and test data files, activity and subject identifiers,
# activity and feature labels
data_train <- fread("./UCI HAR Dataset/train/X_train.txt")
data_test <- fread("./UCI HAR Dataset/test/X_test.txt")
activity_test <- fread("./UCI HAR Dataset/test/y_test.txt")
activity_train <- fread("./UCI HAR Dataset/train/y_train.txt")
subject_test <- fread("./UCI HAR Dataset/test/subject_test.txt")
subject_train <- fread("./UCI HAR Dataset/train/subject_train.txt")
activity_labels <- fread("./UCI HAR Dataset/activity_labels.txt")
feature_labels <- fread("./UCI HAR Dataset/features.txt")
# Merge training and test files and attach activity labels
data <- rbind(data_train,data_test)
subject <- rbind(subject_train,subject_test)
activity <- rbind(activity_train,activity_test) %>%
merge(y = activity_labels,all.x = TRUE)
# Attach subject and activity identifiers and labels to data set and set
# column names
data <- cbind(subject,activity,data)
colnames(data) <- c("SubjectId","ActivityId","ActivityLabel",feature_labels$V2)
# Reduce data set to only mean and standard deviation features
columns <- grep("Subject|Activity|mean|std",colnames(data))
data <- data[,..columns]
# Create aggregated data set with average measurements for each subject
# and activity
data_summary <- data[,lapply(.SD,mean),
keyby = .(SubjectId,ActivityId,ActivityLabel)]
# Write tidy data set and summary to files
write.csv(data,"./UCI HAR Dataset/tidydata.csv")
write.csv(data_summary,"./UCI HAR Dataset/tidydata_summary.csv") |
ff1747217e04556ce306ef1b2d6075160d5e2f4e | f2acfcdcf63922fd1b7e2a1de00284b630d839be | /run_analysis.R | cb9bd163dd681a0f925d7b62a4915ee0d16e7c78 | [] | no_license | jurassicduck/course3project | c362d220735553cd5cb9bbfa122e8387b4f13dcb | 7c62833fbfb3ebae36f8599ad8b0de5f5812982d | refs/heads/master | 2021-01-09T20:41:02.033416 | 2016-06-19T01:10:23 | 2016-06-19T01:10:23 | 61,452,035 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,557 | r | run_analysis.R | # This script will perform the following steps:
# 1. Merge the training and the test sets to create one data set.
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
# 3. Use descriptive activity names to name the activities in the data set.
# 4. Appropriately label the data set with descriptive activity names.
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#----------------------------------Step 1------------------------------------------------------------
## Merge the training and the test sets to create one data set.
# Set working directory
setwd('C:/Users/Jurassicduck/Documents/Coursera/Data_Science/Course3')
# Read in training data
features <- read.table('./features.txt', header=FALSE)
activity_labels <- read.table('./activity_labels.txt', header=FALSE)
subject_train <- read.table('./train/subject_train.txt', header=FALSE)
x_train <- read.table('./train/x_train.txt', header=FALSE)
y_train <- read.table('./train/y_train.txt', header=FALSE)
# Rename columns
colnames(activity_labels) <- c('activity_id','activity_type')
colnames(subject_train) <- "subject_id"
colnames(x_train) <- features[,2]
colnames(y_train) <- "activity_id"
# Create the training data set
train <- cbind(x_train, subject_train, y_train)
# Read in the test data
subject_test <- read.table('./test/subject_test.txt', header=FALSE)
x_test <- read.table('./test/x_test.txt', header=FALSE)
y_test <- read.table('./test/y_test.txt', header=FALSE)
# Rename columns
colnames(subject_test) <- "subject_id"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activity_id"
# Create the test data set
test <- cbind(x_test, subject_test, y_test)
# Combine training and test data to create a final data set
final_data <- rbind(train, test)
#--------------------------------Step 2----------------------------------------
## Extract only the measurements on the mean and standard deviation for each measurement.
# Get all column names and store them in a list
col_names <- colnames(final_data)
# Keep only the columns we want
logical_vec <- grepl("_id$", col_names) |
grepl("-mean..",col_names) &
!grepl("-meanFreq..",col_names) &
!grepl("mean..-",col_names) |
grepl("-std..",col_names) &
!grepl("-std()..-",col_names)
data1 <- final_data[logical_vec==TRUE]
#--------------------------------Step 3----------------------------------------
## Use descriptive activity names to name the activities in the data set.
# Add activity_labels info to the final data set
final_data <- merge(final_data, activity_labels, by='activity_id',all.x=TRUE);
#--------------------------------Step 4----------------------------------------
## Appropriate label the data set with descriptive variable name.
col_names <- colnames(final_data)
for (i in 1:length(col_names))
{
col_names[i] <- gsub("\\()", "", col_names[i])
col_names[i] <- gsub("^(t)", "time", col_names[i])
col_names[i] <- gsub("^(f)", "freq", col_names[i])
col_names[i] <- gsub("([Gg]ravity)", "Gravity", col_names[i])
col_names[i] <- gsub("([Bb]ody)", "Body", col_names[i])
col_names[i] <- gsub("[Gg]yro", "Gyroscope", col_names[i])
col_names[i] <- gsub("AccMag", "AccMagnitude", col_names[i])
col_names[i] <- gsub("([Bb]odyaccjerkmag)", "BodyAccJerkMagnitude", col_names[i])
col_names[i] <- gsub("JerkMag", "JerkMagnitude", col_names[i])
col_names[i] <- gsub("GyroMag", "GyroscopeMagnitude", col_names[i])
}
# Update column names for the final data set
colnames(final_data) <- col_names
#--------------------------------Step 5----------------------------------------
## Create a second, independent tidy data set with the average of each variable of
## for each activity and each subject.
# Get new data set with average of each variable for each activity and each subject
data2 <- aggregate(final_data[, !colnames(final_data) %in% c("activity_id", "subject_id", "activity_type")],
by=list(subject_id=data2$subject_id, activity_id=data2$activity_id),
mean)
# Merge activity type back into the data
data3 <- merge(data2, activity_labels, by="activity_id", all.x=TRUE)
# Export data3
write.table(data3, file='tidy_data.txt', row.names=FALSE)
|
2afb94c262485a088d6ee17ce95e3b303da07e2f | 5a7b13193bd2a52d165f826e53e7e6c7ead34962 | /Build_Parcel_Data.R | f05f15297d2ebe05e66c7fa60a5f33ad3c2d099e | [] | no_license | phildwalker/gso_data | f2879ae9b06f5df7cb68929573893a7a94dc018e | d299e5b1f0a19f5d87ff6002f00c2558b81437cd | refs/heads/master | 2022-11-20T02:48:24.942361 | 2020-07-06T22:38:23 | 2020-07-06T22:38:23 | 272,247,611 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,032 | r | Build_Parcel_Data.R | # Pulling in parcel information to capture what places are available
# Sun Jun 14 13:20:00 2020 ------------------------------
# NC onemap
#
# master address data
# https://www.nconemap.gov/datasets/nc-master-address-dataset-2014?geometry=-79.808%2C36.088%2C-79.786%2C36.091&showData=true
# GSO oarcgis
# http://www.arcgis.com/home/webmap/viewer.html?url=https%3A%2F%2Fgis.greensboro-nc.gov%2Farcgis%2Frest%2Fservices%2FPlanning%2FParcels_Ownership_Census_MyGov%2FMapServer&source=sd
# GSO building inspection information
# https://gis.greensboro-nc.gov/arcgis/rest/services/EngineeringInspections/BImap_MS/MapServer/10
# https://gis.greensboro-nc.gov/arcgis/rest/services/GISDivision/CityCountyBldgs_MS/MapServer
# BImap MS - Parcels (Features: 213726, Selected: 1)
library(tidyverse)
rowsAMT <- 213726
binsize <- 500
bincount <- ceiling(rowsAMT/ binsize)
buckets <- tibble::tibble(
order = 0:bincount,
START = order*binsize,
END = START+binsize,
path = glue::glue("https://gis.greensboro-nc.gov/arcgis/rest/services/EngineeringInspections/BImap_MS/MapServer/10/query?where=OBJECTID%3E={START}%20AND%20OBJECTID%3C{END}&outFields=*&f=json")
)
buckets$path[1]
parcel_dat <-
buckets$path %>% #takes the paths individually
map_dfr(., ~{
print(.)
out <- jsonlite::fromJSON(readLines(., warn=F))
out2 <- out$features$attributes
}) #reads in the data, and _dfr combines them together
save(parcel_dat, file = here::here("data", "parcels", "parcel_info.rda"))
# current_time <- format(Sys.time(), "%Y%m%d%H%M")
# readr::write_csv(parcel_dat, here::here("data", "parcels", paste0(current_time,"_parcel_info.csv")))
# out1 <- jsonlite::fromJSON(readLines("https://gis.greensboro-nc.gov/arcgis/rest/services/EngineeringInspections/BImap_MS/MapServer/10/query?where=OBJECTID%3E=1000%20AND%20OBJECTID%3C1500&outFields=*&f=json", warn=F))
# &geometryType=esriGeometryPoint
# esriGeometryPoint
# esriGeometryPolygon
# out2 <- out1$features$attributes
# out_geo <- out1$features$geometry
|
5abdf90c9d888a988d3f9325ecd67c13f264cb11 | 95db66b93ca693f562206561d00db5555ce8aaa4 | /bomb_plan.R | d823c1e44c8b836544a35dc37afc4a7335c484c4 | [] | no_license | huberf/stat3105-proj2 | 66cd14f8c368cd434c9b27736d9bb0d28bcf60f6 | 01c1ae0ef0063845590c90a9307f932d05ba16c8 | refs/heads/master | 2023-01-08T16:03:05.125740 | 2020-11-12T16:53:36 | 2020-11-12T16:53:36 | 312,103,488 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,302 | r | bomb_plan.R | # GET DEPENDENCIES
library(tidyverse)
library(jsonlite)
library(ggplot2)
library(sp)
library(dlm)
# PREPARES TRAINING DATA
prepare_train_data <- function() {
# Extract files
gps_raw <- list()
for (file in list.files('gps')) {
gps_raw[[file]] <- fromJSON(paste('gps/', file, sep=''), simplifyVector=TRUE)
}
# now pull out the coordinates
coords <- map(gps_raw, function (data) {t(simplify2array(data$features$geometry$coordinates))})
# make data frame for each day
days_dfs <- map(names(gps_raw), function(name) {
data.frame(time =strptime(gps_raw[[name]]$features$properties$time,format = "%Y-%m-%dT%H:%M:%S", tz="UTC"),
longitude = coords[[name]][,1],
latitude = coords[[name]][,2])
})
names(days_dfs) <- names(gps_raw)
# Process weather data
weather_raw <- fromJSON('historical_weather.json', simplifyVector=TRUE)
weather_obs <- weather_raw$observations
temperature_all <- weather_obs[, c('valid_time_gmt', 'temp')]
# PUT TOGETHER IN BIG DF
# now heap coordinates together with flag for their day
num_total <- sum(unlist(lapply(days_dfs, function(df) {return(dim(df)[1])})))
num_total
reduced_df <- data.frame(
index=unlist(lapply(days_dfs, function(df) {return(1:length(df$time))})),
time=unlist(lapply(days_dfs, function(df) {return(df$time)})),
elapsed=unlist(lapply(days_dfs, function(df) {return(df$time - df$time[1])})),
longitude=unlist(lapply(days_dfs, function(df) {return(df$longitude)})),
latitude=unlist(lapply(days_dfs, function(df) {return(df$latitude)})),
day=factor(unlist(lapply(1:length(days_dfs),
function(idx) {return(rep(idx, length(days_dfs[[idx]]$longitude)))}
)))
)
# now get UTM coordinates
spat_df <-SpatialPointsDataFrame(coords=reduced_df[,c("longitude", "latitude")],
data=reduced_df["time"],
proj4string=CRS("+proj=longlat +datum=WGS84 +units=m"))# This step converts the longitude/latitude -> UTM
utm_df <-spTransform(spat_df, CRSobj = "+proj=utm +zone=12 +datum=WGS84 +units=m")
utm_coords <- coordinates(utm_df)
reduced_df$lat_m <- utm_coords[, "latitude"]
reduced_df$long_m <- utm_coords[, "longitude"]
# now add temperature
# now pair data to temperature via linear interpolation
indices <- unlist(lapply(reduced_df$time, function(time) {which(temperature_all[, 1] > time)[1]}))
first_temp <- temperature_all[indices-1, 2]
second_temp <- temperature_all[indices, 2]
# now interpolate temperatures
diff <- temperature_all[indices, 1] - temperature_all[indices-1, 1]
weight <- (reduced_df$time - temperature_all[indices-1, 1])/diff
final_temps <- first_temp*(1-weight) + second_temp*weight
# now add temperature to dataframe
reduced_df$temperature <- final_temps
# now map "bombable" times
diff_time <- reduced_df$time[2:length(reduced_df$time)] - reduced_df$time[1:(length(reduced_df$time)-1)]
stationary <- c(TRUE, diff_time > 2*60)
reduced_df$stationary <- stationary
time_to_stationary <- unlist(map(reduced_df$time, function(time) {
stat_diff <- time - reduced_df$time[reduced_df$stationary]
ifelse(length(stat_diff[stat_diff >= 0]) != 0, min(stat_diff[stat_diff >= 0]), 0)
}))
reduced_df$bombable <- time_to_stationary > 5*60
# now run Kalman smoother
gps_variance <- 20^2
v_mat <- diag(rep(gps_variance, 2))
avg_walk_speed_m_per_sec <- 1.4 # https://en.wikipedia.org/wiki/Walking
# now smooth per day
reduced_df$smooth_lat_m <- NA
reduced_df$smooth_long_m <- NA
reduced_df$speed <- NA
for (day in levels(factor(reduced_df$day))) {
# estimate dt for every day
data <- reduced_df[reduced_df$day == day, ]
dt <- max(data[data$elapsed < 4000, ]$elapsed)/length(data[data$elapsed < 4000, ]$elapsed)
g_mat <- matrix(c(1, 0, dt, 0,
0, 1, 0, dt,
0, 0, 1, 0,
0, 0, 0, 1), byrow=TRUE, ncol=4)
dlm_spec <- dlm(
FF= matrix(c(1, 0, 0, 0, 0, 1, 0, 0), byrow=T, nrow=2),
GG= g_mat,
V = v_mat,
W = diag(c(5, 5, 1, 1)^2),
m0 = matrix(c(data$long_m[1], data$lat_m[1], rep(avg_walk_speed_m_per_sec / dt, 2)),
ncol=1), # A vector by R defaults is a k by 1 matrix
C0 = diag(rep(10^2, 4)))
dlm_filter_mod <- dlmFilter(cbind(data$long_m, data$lat_m), dlm_spec)
dlm_smooth_mod <- dlmSmooth(dlm_filter_mod)
smoothed <- dlm_smooth_mod$s
reduced_df$smooth_lat_m[reduced_df$day == day] <- smoothed[, 2][2:length(smoothed[, 2])]
reduced_df$smooth_long_m[reduced_df$day == day] <- smoothed[, 1][2:length(smoothed[, 2])]
reduced_df$speed[reduced_df$day == day] <- sqrt(dlm_smooth_mod$s[, 3]^2 + dlm_smooth_mod$s[, 4]^2)[2:length(smoothed[, 2])]
}
# now return the big dataframe
return(reduced_df)
}
# MAIN PREDICTION FUNCTION
predict <- function(start_time, start_long, start_lat, train_data) {
GRID_RESOLUTION = 2 # 2 meters
#days <- c(1:8,11)
days <- as.integer(levels(factor(train_data$day)))
# now get train data slice
lats <- seq(5193900, 5196700, by=GRID_RESOLUTION)
train_data <- train_data[train_data$elapsed < 4000, ] # only keep data from first leg
# only keep bombable points
train_data <- train_data[train_data$bombable == TRUE, ] # don't train on points that can't be used
DAYS <- 11
# now cache certain views
cache_longitudes <- list()
cache_elapsed <- list()
for (day in days) {
cache_longitudes[[day]] <- train_data$long_m[train_data$day == day]
cache_elapsed[[day]] <- train_data$elapsed[train_data$day == day]
}
# now get densities
all_uncertainties <- unlist(map(1:length(lats), function(idx) {
lat <- lats[idx]
dists <- abs(train_data$lat_m - lat)
day_idxes <- rep(NA, DAYS)
latitude_diff <- rep(NA, DAYS)
longitudes <- rep(NA, DAYS)
elapsed <- rep(NA, DAYS)
for (day in days) {
day_idxes[day] <- which.min(dists[train_data$day == day])
latitude_diff[day] <- dists[train_data$day == day][day_idxes[day]]
longitudes[day] <- cache_longitudes[[day]][day_idxes[day]]
elapsed[day] <- cache_elapsed[[day]][day_idxes[day]]
}
# Predict best longitudes and get their location uncertainty
mean_long <- mean(longitudes, na.rm=TRUE)
# go through every possible pairing and get the two with minimum uncertainty
best_uncertainty <- Inf
final_pred1 <- NA
final_pred2 <- NA
final_time1 <- NA
final_time2 <- NA
best_groupmask <- NA
for (i in 1:(length(days)-1)) {
for (j in (i+1):length(days)) {
point1 <- longitudes[days[i]]
point2 <- longitudes[days[j]]
dists <- matrix(NA, ncol=2, nrow=DAYS)
#print(dim(dists))
#print(days)
#print(length(longitudes))
#print(length(abs(longitudes - point1)))
#print(abs(longitudes - point1))
dists[, 1] <- abs(longitudes - point1)
dists[, 2] <- abs(longitudes - point2)
parent <- apply(dists, 1, which.min)
parent <- unlist(replace(parent, !sapply(parent, length), 0))
#print(parent)
# now get predicted point
pred1 <- mean(longitudes[parent == 1])
pred2 <- mean(longitudes[parent == 2])
# now get uncertainty
uncertainty1 <- sum(sqrt((longitudes[parent == 1] - pred1)^2 + latitude_diff[parent == 1]^2))
uncertainty2 <- sum(sqrt((longitudes[parent == 2] - pred2)^2 + latitude_diff[parent == 2]^2))
uncertainty <- (uncertainty1 + uncertainty2)/length(days)
#uncertainty <- sum(total_uncertainty, na.rm=TRUE)/length(days)
if (uncertainty < best_uncertainty) {
final_pred1 <- pred1
final_pred2 <- pred2
best_uncertainty <- uncertainty
best_groupmask <- parent
}
}
}
longitude_pred <- mean_long
# Get position uncertainty
#total_uncertainty <- sqrt((longitudes - mean_long)^2 + latitude_diff^2)
#uncertainty <- sum(total_uncertainty, na.rm=TRUE)/length(days)
# Now get time uncertainty
final_time1 <- mean(elapsed[parent == 1], na.rm=TRUE)
time_uncertainty <- sum(abs(elapsed[parent == 1] - final_time1), na.rm=TRUE)
final_time2 <- mean(elapsed[parent == 2], na.rm=TRUE)
time_uncertainty <- time_uncertainty +
sum(abs(elapsed[parent == 2] - final_time2), na.rm=TRUE)
time_uncertainty <- time_uncertainty/length(days)
# Now return
return(c(best_uncertainty, time_uncertainty, final_pred1, final_pred2, final_time1, final_time2))
}))
all_uncertainties <- matrix(all_uncertainties, ncol=6, byrow=TRUE)
lat_uncertainties <- all_uncertainties[, 1]
time_uncertainties <- all_uncertainties[, 2]
# now pick the best points
best_metric <- Inf
best_lat_uncertainty <- NA
best_time_uncertainty <- NA
best_lat <- NA
best_longs <- NA
best_times <- NA
for (i in 1:length(lats)) {
metric <- time_uncertainties[i] + lat_uncertainties[i]*2
if (metric < best_metric) {
best_metric <- metric
best_lat <- lats[i]
best_longs <- all_uncertainties[i, 3:4]
best_times <- all_uncertainties[i, 5:6]
best_lat_uncertainty <- lat_uncertainties[i]
best_time_uncertainty <- time_uncertainties[i]
}
}
#cat("BEST:", best_lat_uncertainty, best_time_uncertainty, best_lat, best_longs, "\n")
return(list(bomb1=list(utm_latitude=best_lat, utm_longitude=best_longs[1],
elapsed=best_times[1], time=start_time+best_times[1]),
bomb2=list(utm_latitude=best_lat, utm_longitude=best_longs[2],
elapsed=best_times[2], time=start_time+best_times[2])))
}
## FUNCTIONS TO RUN PREDICTIONS
to_utm <- function(long, lat) {
spat_df <- SpatialPoints(coords=cbind(long, lat),
proj4string=CRS("+proj=longlat +datum=WGS84 +units=m"))# This step converts the longitude/latitude -> UTM
utm_df <- spTransform(spat_df, CRSobj = "+proj=utm +zone=12 +datum=WGS84 +units=m")
utm_coords <- coordinates(utm_df)
return(list(long=utm_coords[, "long"], lat=utm_coords[, "lat"]))
}
to_latlong <- function(utm_long, utm_lat) {
spat_df <- SpatialPoints(coords=cbind(utm_long, utm_lat),
proj4string=CRS("+proj=utm +zone=12 +datum=WGS84"))
utm_df <- spTransform(spat_df, CRS("+proj=longlat +datum=WGS84"))
utm_coords <- coordinates(utm_df)
return(list(long=utm_coords[1, "utm_long"], lat=utm_coords[1, "utm_lat"]))
}
easy_predict <- function(start_time, start_longitude, start_latitude) {
train_data <- prepare_train_data()
valid_days <- c(1:8,11)
# keep only relatively clean days
train_data <- train_data[train_data$day %in% valid_days, ]
utm_coors <- to_utm(start_longitude, start_latitude)
# now pick the day with the best start
dists_to_start <- unlist(map(1:length(start_time), function(idx) {
# now get the mean distance to each days start
mean(unlist(map(valid_days, function(day) {
row <- train_data[train_data$day == day, ]
return(sqrt((row[1, ]$lat_m - utm_coors$lat[idx])^2 + (row[1, ]$long_m - utm_coors$long[idx])^2))
})))
}))
day <- which.min(dists_to_start)
# convert provided time to local representation
st_time <- as.numeric(strptime(start_time[day], format = "%Y-%m-%dT%H:%M:%S", tz="UTC"))
# generate actual prediction
prediction <- predict(st_time, start_latitude[day],
start_longitude[day], train_data)
# now add latlong as well as existing UTM
latlong1 <- to_latlong(prediction$bomb1$utm_longitude, prediction$bomb1$utm_latitude)
prediction$bomb1$longitude <- latlong1$long
prediction$bomb1$latitude <- latlong1$lat
latlong2 <- to_latlong(prediction$bomb2$utm_longitude, prediction$bomb2$utm_latitude)
prediction$bomb2$longitude <- latlong2$long
prediction$bomb2$latitude <- latlong2$lat
prediction$day_idx <- day
return(prediction)
}
|
f72a414dada182274aa2201bf5111d1bab86a6a3 | a3a5278be6e3e20c034c303cd664b45c76c3def4 | /BMAStage.R | ce9ec5722c3025271cdce8a0e1c8b30d213f892c | [] | no_license | ilvitorio/FED | 7ccd603416feddc89554c2c6ba406ec38765c1bc | 541abc98f5921af1979b25fe4f1bdc062ceb4b28 | refs/heads/master | 2020-12-08T11:40:39.730535 | 2016-09-05T12:03:47 | 2016-09-05T12:03:47 | 66,012,115 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,013 | r | BMAStage.R | #Function Definitions and library requirements
#Libraries required
require("BMA")
require("PerformanceAnalytics")
require("ggplot2")
require("quantmod")
require("ROCR")
#User defined Functions
initialTransform <- function(data){
#The function makes the initial data transformation from character to numeric
numVar<-dim(data)
data[,1]<-as.Date(data[,1],format="%d/%m/%Y")
data[,-1]<-apply(data[,-1],2,as.numeric)
return(data)
}
percentileRank <- function(vectorData){
#The function ranks a generic ranking
aux<-round(rank(vectorData)/length(vectorData),1)
return(aux)
}
DFRank <- function(DFxts){
#The function ranks the data frame and returns a DF of Ranking
burn_date<-'1999-12'
initial_index<-paste("/",burn_date,sep="")
indexer<-paste(burn_date,"/",sep="")
index_dates<- index(DFxts[indexer])[-1]
DFRanked<-DFxts
DFRanked[initial_index,] <- apply(DFxts[initial_index],2,percentileRank)
for (i in index_dates) {
#Computes the roll over in the XTS object for taking into account the
#date up to the time
newIndex<-paste("/",as.Date(i),sep="")
DFtemp<-DFxts[newIndex]
newDecile<-tail(apply(DFtemp,2,percentileRank),n=1)
DFRanked[as.Date(i),]<-newDecile
}
return(DFRanked)
}
#Begins the coding
## Charge the data and make the initial data transformation
Data_Month_Level_R <- initialTransform(read.csv("~/Upwork/Betasmartz - Black Litterman/FED's Model/Data_Month_Level_R.csv",stringsAsFactors=FALSE))
Data_Month_Perc_R <- initialTransform(read.csv("~/Upwork/Betasmartz - Black Litterman/FED's Model/Data_Month_Perc_R.csv",stringsAsFactors=FALSE))
Data_Month_PercCh_R <- initialTransform(read.csv("~/Upwork/Betasmartz - Black Litterman/FED's Model/Data_Month_PercCh_R.csv",stringsAsFactors=FALSE))
Data_Quarter_Level_R <- initialTransform(read.csv("~/Upwork/Betasmartz - Black Litterman/FED's Model/Data_Month_PercCh_R.csv",stringsAsFactors=FALSE))
## Transform the variables into XTS DataFrames
Data_Month_Level_ts <- xts(Data_Month_Level_R[,-1] , order.by=Data_Month_Level_R[,1])
Data_Month_Perc_ts <- xts(Data_Month_Perc_R[,-1] , order.by=Data_Month_Perc_R[,1])
Data_Month_PercCh_ts <- xts(Data_Month_PercCh_R[,-1] , order.by=Data_Month_PercCh_R[,1])
Data_Quarter_Level_ts <- xts(Data_Quarter_Level_R[,-1] , order.by=Data_Quarter_Level_R[,1])
## Filter all the cases to work with
Data_Month_Level_clear <- Data_Month_Level_ts[complete.cases(Data_Month_Level_ts)]
Data_Month_Perc_clear <- Data_Month_Perc_ts[complete.cases(Data_Month_Perc_ts)]
Data_Month_PercCh_clear <- Data_Month_PercCh_ts[complete.cases(Data_Month_PercCh_ts)]
Data_Quarter_Level_clear <- Data_Quarter_Level_ts[complete.cases(Data_Quarter_Level_ts)]
## Apply logarithms
Data_Month_Level_log <- log(Data_Month_Level_clear)
#Data_Quarter_Level_log <- log(Data_Quarter_Level_clear)
## Returns of the data 1-Month, 3-Month ,6-Month and 12-Month from Log
Data_Month_Level_R1 <- diff(Data_Month_Level_log,1)
Data_Month_Level_R3 <- diff(Data_Month_Level_log,3)
Data_Month_Level_R6 <- diff(Data_Month_Level_log,6)
Data_Month_Level_R12 <- diff(Data_Month_Level_log,12)
## Returns of the data 1-Month, 3-Month ,6-Month and 12-Month from Perc
Data_Month_Perc_R1 <- diff(Data_Month_Perc_clear,1)
Data_Month_Perc_R3 <- diff(Data_Month_Perc_clear,3)
Data_Month_Perc_R6 <- diff(Data_Month_Perc_clear,6)
Data_Month_Perc_R12 <- diff(Data_Month_Perc_clear,12)
## Filter again the cases to work with
Data_Month_Level_R1 <-Data_Month_Level_R1[complete.cases(Data_Month_Level_R1)]
Data_Month_Level_R3 <-Data_Month_Level_R3[complete.cases(Data_Month_Level_R3)]
Data_Month_Level_R6 <-Data_Month_Level_R6[complete.cases(Data_Month_Level_R6)]
Data_Month_Level_R12 <-Data_Month_Level_R12[complete.cases(Data_Month_Level_R12)]
Data_Month_Perc_R1 <-Data_Month_Perc_R1[complete.cases(Data_Month_Perc_R1)]
Data_Month_Perc_R3 <-Data_Month_Perc_R3[complete.cases(Data_Month_Perc_R3)]
Data_Month_Perc_R6 <-Data_Month_Perc_R6[complete.cases(Data_Month_Perc_R6)]
Data_Month_Perc_R12 <-Data_Month_Perc_R12[complete.cases(Data_Month_Perc_R12)]
## Rank the data by deciles
#Returns by Deciles
Data_Month_Level_R1_Dec <- DFRank(Data_Month_Level_R1)
Data_Month_Level_R3_Dec <- DFRank(Data_Month_Level_R3)
Data_Month_Level_R6_Dec <- DFRank(Data_Month_Level_R6)
Data_Month_Level_R12_Dec<- DFRank(Data_Month_Level_R12)
Data_Month_Perc_R1_Dec <- DFRank(Data_Month_Perc_R1)
Data_Month_Perc_R3_Dec <- DFRank(Data_Month_Perc_R3)
Data_Month_Perc_R6_Dec <- DFRank(Data_Month_Perc_R6)
Data_Month_Perc_R12_Dec <- DFRank(Data_Month_Perc_R12)
#Percentage Variables by Deciles
Data_Month_Perc_Dec <- DFRank(Data_Month_Perc_clear)
Data_Month_PercCh_Dec <- DFRank(Data_Month_PercCh_clear) |
5b7888182e158fb1bc842f394da03b8572be7730 | 9e46af7fc30aafa7a02afaadcda3bca5e8f85769 | /Time_Attendance_Employee_Report_Generation.R | 1105b81bb97307899dea016433ea799e2ae48a65 | [] | no_license | shikha34/Workforce_Perfromance_Analysis | 2484094e4fdaba4badd7a783071823a7da3b00b1 | 824956d70903f8d6b5c388de707b6ee31766e407 | refs/heads/master | 2022-12-04T11:22:09.699309 | 2020-08-18T15:08:08 | 2020-08-18T15:08:08 | 288,485,825 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 32,633 | r | Time_Attendance_Employee_Report_Generation.R |
Data_Cleaning_Pipeline<-function(x){
rm(list=ls())
library(readxl)
library(dummies)
library(stringr)
library(reshape)
library(data.table)
library(sqldf)
library("tidyr")
library("sqldf")
library("dplyr")
setwd("C:/D Drive Data/activity_split/rawdata")
myDir <- "C:/D Drive Data/activity_split/rawdata"
dep<-c("apr2019.xlsx","may2019.xlsx","jun2019.xlsx","jul2019.xlsx","aug2019.xlsx","sep2019.xlsx","oct2019.xlsx","nov2019.xlsx","dec2019.xlsx",
"jan2020.xlsx","feb2020.xlsx")
dep1 <- gsub("[.]xlsx", "", dep)
cleaning<-function(x)
{
x=sapply(x,toupper)
x=gsub("$","",x,fixed=TRUE)
x=gsub("=","",x,fixed=TRUE)
x=gsub("+","",x,fixed=TRUE)
x=gsub("-","",x,fixed=TRUE)
x=gsub("&","",x,fixed=TRUE)
x=gsub("[[:punct:]]","",x)
x=gsub("\\","",x,fixed=TRUE)
x=gsub("~","",x,fixed=TRUE)
x=gsub("^","",x,fixed=TRUE)
x=gsub("\\s+"," ",x)
x=gsub(" ","",x,fixed=TRUE)
x=gsub(" ","",x,fixed=TRUE)
x=gsub("_","",x,fixed=TRUE)
}
cleaning1<-function(x)
{
x=sapply(x,toupper)
x=gsub("$","",x,fixed=TRUE)
x=gsub("=","",x,fixed=TRUE)
x=gsub("+","",x,fixed=TRUE)
x=gsub("-","",x,fixed=TRUE)
x=gsub("&","",x,fixed=TRUE)
x=gsub("\\","",x,fixed=TRUE)
x=gsub("~","",x,fixed=TRUE)
x=gsub("^","",x,fixed=TRUE)
x=gsub("\\s+"," ",x)
x=gsub(" ","",x,fixed=TRUE)
x=gsub(" ","",x,fixed=TRUE)
x=gsub("_","",x,fixed=TRUE)
}
data<-list()
dat1<-list()
for( i in 1:NROW(dep))
{
data[i]<-list( read_excel(file.path(myDir, dep[i])) )
data[[i]]<-subset(data[[i]],select = c(EmployeeCode,EmploymentStatus,AttendanceType,Gender,DOJ,EmployeeLevelText,
WorkingCity,SubDept,Date, Shift,ShiftStartTime,ShiftEndTime,ActualInTime,ActualOutTime,AttendanceStatus,
LeaveType,LeaveStatusYN,RequestStatus,RegularizedYN,LeaveStatusYN,ApprovedRejectedOn,RosterNonRosterType
))
data[[i]]<-data[[i]][!duplicated(data[[i]][,c(1,9,10)]),]
data[[i]]$DOJ<-as.Date(data[[i]]$DOJ,format="%d-%b-%Y")
data[[i]]$ShiftStartTime<-strptime(data[[i]]$ShiftStartTime,format="%H:%M")
data[[i]]$ShiftStartTime<-strptime(data[[i]]$ShiftStartTime,format="%H:%M")
data[[i]]$ShiftEndTime<-strptime(data[[i]]$ShiftEndTime,format="%H:%M")
data[[i]]$ActualInTime<-strptime(data[[i]]$ActualInTime,format="%H:%M")
data[[i]]$ActualOutTime<-strptime(data[[i]]$ActualOutTime,format="%H:%M")
data[[i]]$Working_Hours<-ifelse(data[[i]]$ActualInTime>data[[i]]$ActualOutTime,(24-abs(difftime(data[[i]]$ActualOutTime,data[[i]]$ActualInTime,units="hours"))),abs(difftime(data[[i]]$ActualOutTime,data[[i]]$ActualInTime,units="hours")))
data[[i]]$Working_Hours1<-data[[i]]$Working_Hours
data[[i]][,c("AttendanceStatus")]<-sapply(data[[i]][,c("AttendanceStatus")],cleaning)
###PRESENT
##taking mispunch cases
data[[i]]$Working_Hours<-ifelse(data[[i]]$AttendanceStatus=="MISSPUNCH",substring(data[[i]]$Shift,1,3),data[[i]]$Working_Hours)
data[[i]]$Working_Hours<-ifelse(data[[i]]$AttendanceStatus=="MISSPUNCH" &
grepl("WOFF|HOL|ABSENT",data[[i]]$Shift) &
(data[[i]]$WorkingCity=="DEL"|data[[i]]$WorkingCity=="BOM"|data[[i]]$WorkingCity=="HYD"|data[[i]]$WorkingCity=="MAA"|data[[i]]$WorkingCity=="CCU"|
data[[i]]$WorkingCity=="BLR"),10,data[[i]]$Working_Hours)
data[[i]]$Working_Hours<-ifelse(data[[i]]$AttendanceStatus=="MISSPUNCH" &
grepl("WOFF|HOL|ABSENT",data[[i]]$Shift) &
(data[[i]]$WorkingCity!="DEL"&data[[i]]$WorkingCity!="BOM"&data[[i]]$WorkingCity!="HYD"&data[[i]]$WorkingCity!="MAA"&data[[i]]$WorkingCity!="CCU"&
data[[i]]$WorkingCity!="BLR"),8.5,data[[i]]$Working_Hours)
data[[i]]$Working_Hours<-ifelse(data[[i]]$Working_Hours==0 &
data[[i]]$AttendanceStatus=="PRESENT",substring(data[[i]]$Shift,1,3),data[[i]]$Working_Hours)
data[[i]]$Working_Hours<-ifelse(data[[i]]$Working_Hours==0 & grepl("WOFF|HOL|ABSENT",data[[i]]$Shift) &
(data[[i]]$WorkingCity=="DEL"|data[[i]]$WorkingCity=="BOM"|data[[i]]$WorkingCity=="HYD"|data[[i]]$WorkingCity=="MAA"|data[[i]]$WorkingCity=="CCU"|
data[[i]]$WorkingCity=="BLR") &
data[[i]]$AttendanceStatus=="PRESENT",10,data[[i]]$Working_Hours)
data[[i]]$Working_Hours<-ifelse(data[[i]]$Working_Hours==0 & grepl("WOFF|HOL|ABSENT",data[[i]]$Shift) &
(data[[i]]$WorkingCity!="DEL"&data[[i]]$WorkingCity!="BOM"&data[[i]]$WorkingCity!="HYD"&data[[i]]$WorkingCity!="MAA"& data[[i]]$WorkingCity!="CCU"&
data[[i]]$WorkingCity!="BLR") &
data[[i]]$AttendanceStatus=="PRESENT",8.5,data[[i]]$Working_Hours)
data[[i]]$Working_Hours<-as.numeric(data[[i]]$Working_Hours)
data[[i]]$Working_Hours<-ifelse(is.na(data[[i]]$Working_Hours),0,data[[i]]$Working_Hours)
data[[i]]$Working_Hours1<-ifelse(data[[i]]$Working_Hours>0 & (data[[i]]$AttendanceStatus=="PRESENT"|
data[[i]]$AttendanceStatus=="MISSPUNCH"|data[[i]]$AttendanceStatus=="HALFDAY" ),data[[i]]$Working_Hours,0)
##ABSENT
data[[i]]$Working_Hours1<-ifelse(data[[i]]$Working_Hours==0 & data[[i]]$AttendanceStatus=="ABSENT","ABSENT",data[[i]]$Working_Hours1)
##LEAVE
data[[i]]$Working_Hours1<-ifelse(data[[i]]$Working_Hours==0 & data[[i]]$AttendanceStatus=="LEAVE" & (data[[i]]$RequestStatus=="APPROVED"
|data[[i]]$RequestStatus=="PARTIALAPPROVED"),data[[i]]$LeaveType,data[[i]]$Working_Hours1)
#HOLIDAY
data[[i]]$Working_Hours1<-ifelse(data[[i]]$Working_Hours==0 &
(data[[i]]$AttendanceStatus=="HOLIDAYLEAVE"|data[[i]]$AttendanceStatus=="HOLIDAY"),"HOLIDAY",data[[i]]$Working_Hours1)
##WEEKOFF
data[[i]]$Working_Hours1<-ifelse(data[[i]]$Working_Hours==0 &
data[[i]]$AttendanceStatus=="WEEKOFF","WEEKOFF",data[[i]]$Working_Hours1)
data[[i]]$Date<-as.Date(data[[i]]$Date,format="%d-%b-%Y")
data[[i]]<-data[[i]][order(data[[i]]$Date),]
# data[[i]]$repeated<-lapply(1:NROW(data[[i]]),function(x){ifelse(identical(data[[i]][x,1],data[[i]][x+1,1]),0,1)})
x1<-as.data.frame( data[[i]])
x1[c( "Gender","EmployeeLevelText","WorkingCity","SubDept","RosterNonRosterType","RequestStatus","RegularizedYN","AttendanceStatus")]<-sapply(x1[c( "Gender","EmployeeLevelText","WorkingCity","SubDept","RosterNonRosterType","RequestStatus","RegularizedYN",
"AttendanceStatus")],cleaning)
x1[c( "Shift")]<-sapply(x1[c( "Shift")],cleaning1)
x1<-subset(x1,RosterNonRosterType=="ROSTEREMPLOYEES")
x1<-subset(x1,WorkingCity!="CORP")
x1$SubDept[x1$SubDept=="RSA"]<-"RAMP"
x1$SubDept[x1$SubDept=="SAFETYMARSHAL"]<-"RAMP"
x1$SubDept[x1$SubDept=="RAMPSAFETY"]<-"RAMP"
x1$SubDept[x1$SubDept=="AIRPORTOPERATIONSCUSTOMERSERVICES"]<-"CUSTOMERSERVICES"
x1<-subset(x1,SubDept=="RAMP"|SubDept=="SECURITY"|SubDept=="CUSTOMERSERVICES")
x1<-subset(x1,EmployeeLevelText=="EXECUTIVE"|EmployeeLevelText=="OFFICER"|EmployeeLevelText=="RSA"
|EmployeeLevelText=="SENIOREXECUTIVE"|EmployeeLevelText=="EXECUTIVE")
# x1<-subset(x1,EmployeeLevelText=="ASSISTANTMANAGER"|EmployeeLevelText=="MANAGER"|EmployeeLevelText=="SENIORMANAGER")
#
x1$Metro<-ifelse(x1$WorkingCity=="DEL" | x1$WorkingCity=="CCU" |
x1$WorkingCity=="BLR" | x1$WorkingCity=="HYD" | x1$WorkingCity=="BOM" | x1$WorkingCity=="MAA"|x1$WorkingCity=="PNQ",1,0)
x1$NonMetro<-ifelse(x1$WorkingCity!="DEL" & x1$WorkingCity!="CCU" &
x1$WorkingCity!="BLR" & x1$WorkingCity!="HYD" & x1$WorkingCity!="BOM" & x1$WorkingCity!="MAA"|x1$WorkingCity!="PNQ",1,0)
x1$Shift_hours<-substr(x1$Shift,start=1,stop=3)
# x1$Shift_hours<-ifelse( grepl("WOFF|HOL|ABSENT",x1$Shift) &
# (x1$WorkingCity=="DEL"|x1$WorkingCity=="BOM"|x1$WorkingCity=="HYD"|x1$WorkingCity=="MAA"|x1$WorkingCity=="CCU"|
# x1$WorkingCity=="BLR"),10,x1$Shift_hours)
#
# x1$Shift_hours<-ifelse( grepl("WOFF|HOL|ABSENT",x1$Shift) &
# (x1$WorkingCity=="DEL"&x1$WorkingCity=="BOM"&x1$WorkingCity=="HYD"&x1$WorkingCity=="MAA"& x1$WorkingCity=="CCU"&
# x1$WorkingCity=="BLR"),8.5,x1$Shift_hours)
#
x1$Shift_hours<-as.numeric(x1$Shift_hours)
x1$WEEKOFF_shift<-ifelse(x1$Shift_hours=="WOF"|x1$Shift_hours=="WOFf",1,0)
x1$WEEKOFF_actual<-ifelse(x1$AttendanceStatus=="WEEKOFF",1,0)
x1$HOLIDAY_shift<-ifelse(x1$Shift_hours=="HOL",1,0)
x1$ABSENT<-ifelse(x1$AttendanceStatus=="ABSENT",1,0)
x1$HALFDAY<-ifelse(x1$AttendanceStatus=="HALFDAY",1,0)
x1$HOLIDAY_actual<-ifelse(x1$AttendanceStatus=="HOLIDAY"|x1$AttendanceStatus=="HOLIDAYLEAVE"|x1$AttendanceStatus=="LEAVE",1,0)
x1$MISSPUNCH<-ifelse(x1$AttendanceStatus=="MISSPUNCH",1,0)
x1$BR_freq<-str_count(x1$Shift,"BR")
x1$Shift1<-ifelse(grepl("^10|^12|^11|^10.5|^11.5|^12.5", x1$Shift),
substring(x1$Shift,5),substring(x1$Shift,4))
x1$Shift1<-gsub("^G|^F|^BR|ENT","",x1$Shift1)
x1$Shift1<-ifelse(x1$Shift1=="0000TO0000",NA,x1$Shift1)
x1$ActualInTime<-as.POSIXct(x1$ActualInTime,format="%Y-%m-%d %H:%M:%S")
x1$ActualOutTime<-as.POSIXct(x1$ActualOutTime,format="%Y-%m-%d %H:%M:%S")
#making start and end shift columns
x1<-setDT(x1)[,c("Shift_Start_Time","Shift_End_Time"):=tstrsplit(Shift1,"TO")]
x1$Shift_Start_Time1<-format(strptime(x1$Shift_Start_Time,"%H%M"),"%H:%M")
x1$Shift_End_Time1<-format(strptime(x1$Shift_End_Time,"%H%M"),"%H:%M")
x1$Shift_Start_Time1<-ifelse(is.na(x1$Shift_Start_Time1),0,x1$Shift_Start_Time1)
x1$Shift_End_Time1<-ifelse(is.na(x1$Shift_End_Time1),0,x1$Shift_End_Time1)
x1$Shift_Start_Time1<-ifelse(grepl("NA",x1$Shift_Start_Time1),0,x1$Shift_Start_Time1)
x1$Shift_End_Time1<-ifelse(grepl("NA",x1$Shift_End_Time1),0,x1$Shift_End_Time1)
x1$Shift_Start_Time1<-as.POSIXct(x1$Shift_Start_Time1,"%H:%M",tz = "GMT")
x1$Shift_End_Time1<-as.POSIXct(x1$Shift_End_Time1,"%H:%M",tz = "GMT")
x1$Overtime_hrs<-x1$Shift_hours-x1$Working_Hours
# x1$EarlyTime<-difftime(x1$Shift_Start_Time1,x1$ActualInTime,units = "mins",format ="%Y-%m-%d %H:%M:%S")
# x1$LateTime<-difftime(x1$Shift_End_Time1,x1$ActualOutTime,units = "mins",format ="%Y-%m-%d %H:%M:%S")
#
x1$EarlyTime<-difftime(strptime(format(x1$Shift_Start_Time1,"%H:%M:%S"),"%H:%M:%S"),strptime(format(x1$ActualInTime,"%H:%M:%S"),"%H:%M:%S"),units="mins")
x1$LateTime<-difftime(strptime(format(x1$Shift_End_Time1,"%H:%M:%S"),"%H:%M:%S"),strptime(format(x1$ActualOutTime,"%H:%M:%S"),"%H:%M:%S"),units="mins")
x1$miss<-ifelse((strptime(format(x1$ActualInTime,"%H:%M:%S"),"%H:%M:%S")==strptime("00:00:00","%H:%M:%S") |
strptime(format(x1$ActualOutTime,"%H:%M:%S"),"%H:%M:%S")==strptime("00:00:00","%H:%M:%S") )
&
!grepl("WOF|HOL|ABSENT|LEAVE",x1$Shift) & !grepl("WEEKOFF|HOLIDAY|ABSENT|LEAVE|HOLIDAYLEAVE",x1$AttendanceStatus) ,1,0)
#remove 1 in miss as they are misspunch cases
#remove attendance with holiday ,leave, absent, weekoff , but if there is login greater than 0 then they should not be removed.
x1$removed<-ifelse((x1$HOLIDAY_actual==1 |x1$MISSPUNCH==1|x1$ABSENT==1|x1$WEEKOFF_actual==1|
x1$HALFDAY==1)& x1$Working_Hours1==0,1,0)
##having above scenarios and then removing cases which have been approved and reguarise for being present
x1$take<-ifelse((x1$HOLIDAY_actual!=1 & x1$MISSPUNCH!=1& x1$ABSENT!=1& x1$WEEKOFF_actual!=1
)& x1$Working_Hours1>0 & x1$RegularizedYN=="Y" & x1$LeaveStatusYN=="N" & (x1$RequestStatus=="APPROVED"|
x1$RequestStatus=="PARTIALAPPROVED"),1,0)
# x1$EarlyTime<-difftime(x1$Shift_Start_Time1,x1$ActualInTime,units = "mins")
# x1$LateTime<-difftime(x1$Shift_End_Time1,x1$ActualOutTime,units = "mins")
#
x1$Date<-as.character(x1$Date)
x1$ActualInTime<-as.character(x1$ActualInTime)
x1$ActualOutTime<-as.character(x1$ActualOutTime)
x1$Working_Hours<-as.character(x1$Working_Hours)
x1$Working_Hours1<-as.character(x1$Working_Hours1)
x1$ShiftStartTime<-as.character(x1$ShiftStartTime)
x1$ShiftEndTime<-as.character(x1$ShiftEndTime)
x1$DOJ<-as.character(x1$DOJ)
dat1[i]<-list(x1)
}
final<-do.call(rbind,dat1)
final$Date<-as.Date(final$Date,"%Y-%m-%d")
final$Month<-format(final$Date,"%b")
final$Year<-format(final$Date,"%Y")
final<-data.frame(final)
final$ActualInTime<-as.POSIXct(final$ActualInTime,format="%Y-%m-%d %H:%M:%S",tz="GMT")
final$ActualOutTime<-as.POSIXct(final$ActualOutTime,format="%Y-%m-%d %H:%M:%S",tz="GMT")
# final<-subset(final,select = -c(repeated))
#################late login aand early logout parameters################
final$Start_early_late<-NULL
final$End_early_late<-NULL
final$EarlyTime1<-NULL
final$LateTime1<-NULL
final$Start_early_late<- ifelse(
(
(strptime(format(final$Shift_Start_Time1,"%H:%M:%S"),"%H:%M:%S")>=strptime("18:00:0","%H:%M:%S")
& (strptime(format(final$Shift_Start_Time1,"%H:%M:%S"),"%H:%M:%S")<=strptime("23:59:00","%H:%M:%S"))
)
& ( strptime(format(final$ActualInTime,"%H:%M:%S"),"%H:%M:%S")>=strptime("00:00:00","%H:%M:%S")
&
strptime(format(final$ActualInTime,"%H:%M:%S"),"%H:%M:%S")<=strptime("12:00:00","%H:%M:%S")
)
)
|
((strptime(format(final$Shift_Start_Time1,"%H:%M:%S"),"%H:%M:%S")>=strptime("00:00:0","%H:%M:%S")
& (strptime(format(final$Shift_Start_Time1,"%H:%M:%S"),"%H:%M:%S")<=strptime("02:00:00","%H:%M:%S"))
)
&
( strptime(format(final$ActualInTime,"%H:%M:%S"),"%H:%M:%S")>=strptime("18:00:00","%H:%M:%S")
&
strptime(format(final$ActualInTime,"%H:%M:%S"),"%H:%M:%S")<=strptime("23:59:00","%H:%M:%S")
)
)
|
((strptime(format(final$Shift_Start_Time1,"%H:%M:%S"),"%H:%M:%S")>=strptime("23:00:00","%H:%M:%S")
& (strptime(format(final$Shift_Start_Time1,"%H:%M:%S"),"%H:%M:%S")<=strptime("23:59:00","%H:%M:%S"))
)
&
( strptime(format(final$ActualInTime,"%H:%M:%S"),"%H:%M:%S")>=strptime("00:00:00","%H:%M:%S")
&
strptime(format(final$ActualInTime,"%H:%M:%S"),"%H:%M:%S")<=strptime("12:00:00","%H:%M:%S")
) )
|
((strptime(format(final$Shift_Start_Time1,"%H:%M:%S"),"%H:%M:%S")>=strptime("00:00:0","%H:%M:%S")
& (strptime(format(final$Shift_Start_Time1,"%H:%M:%S"),"%H:%M:%S")<=strptime("12:00:00","%H:%M:%S"))
) &
( strptime(format(final$ActualInTime,"%H:%M:%S"),"%H:%M:%S")>=strptime("18:00:00","%H:%M:%S")
&
strptime(format(final$ActualInTime,"%H:%M:%S"),"%H:%M:%S")<=strptime("23:59:00","%H:%M:%S")
)
)
,
1,0)
final$End_early_late<- ifelse(
(
(strptime(format(final$Shift_End_Time1,"%H:%M:%S"),"%H:%M:%S")>=strptime("18:00:0","%H:%M:%S")
& (strptime(format(final$Shift_End_Time1,"%H:%M:%S"),"%H:%M:%S")<=strptime("23:59:00","%H:%M:%S"))
)
& ( strptime(format(final$ActualOutTime,"%H:%M:%S"),"%H:%M:%S")>=strptime("00:00:00","%H:%M:%S")
&
strptime(format(final$ActualOutTime,"%H:%M:%S"),"%H:%M:%S")<=strptime("12:00:00","%H:%M:%S")
)
)
|
((strptime(format(final$Shift_End_Time1,"%H:%M:%S"),"%H:%M:%S")>=strptime("00:00:0","%H:%M:%S")
& (strptime(format(final$Shift_End_Time1,"%H:%M:%S"),"%H:%M:%S")<=strptime("02:00:00","%H:%M:%S"))
)
&
( strptime(format(final$ActualOutTime,"%H:%M:%S"),"%H:%M:%S")>=strptime("18:00:00","%H:%M:%S")
&
strptime(format(final$ActualOutTime,"%H:%M:%S"),"%H:%M:%S")<=strptime("23:59:00","%H:%M:%S")
)
)
|
((strptime(format(final$Shift_End_Time1,"%H:%M:%S"),"%H:%M:%S")>=strptime("23:00:00","%H:%M:%S")
& (strptime(format(final$Shift_End_Time1,"%H:%M:%S"),"%H:%M:%S")<=strptime("23:59:00","%H:%M:%S"))
)
&
( strptime(format(final$ActualOutTime,"%H:%M:%S"),"%H:%M:%S")>=strptime("00:00:00","%H:%M:%S")
&
strptime(format(final$ActualOutTime,"%H:%M:%S"),"%H:%M:%S")<=strptime("12:00:00","%H:%M:%S")
) )
|
((strptime(format(final$Shift_End_Time1,"%H:%M:%S"),"%H:%M:%S")>=strptime("00:00:0","%H:%M:%S")
& (strptime(format(final$Shift_End_Time1,"%H:%M:%S"),"%H:%M:%S")<=strptime("12:00:00","%H:%M:%S"))
) &
( strptime(format(final$ActualOutTime,"%H:%M:%S"),"%H:%M:%S")>=strptime("18:00:00","%H:%M:%S")
&
strptime(format(final$ActualOutTime,"%H:%M:%S"),"%H:%M:%S")<=strptime("23:59:00","%H:%M:%S")
)
)
,
1,0)
##early time is for shift start and actual start
## late time is for shift end and actual end
final$LateTime1<-ifelse(final$End_early_late==1,(1440-abs(final$LateTime)),final$LateTime)
final$EarlyTime1<-ifelse(final$Start_early_late==1,(1440-abs(final$EarlyTime)),final$EarlyTime)
longg<-final
rm(final)
longg$Date<-as.Date(longg$Date,"%Y-%m-%d")
longg$Year<-format(longg$Date,"%Y")
longg$Month<-format(longg$Date,"%b")
longg$Day<-format(longg$Date,"%d")
longg$Week<-weekdays(longg$Date)
longg$Quarters<-quarters(longg$Date)
# longg<-subset(longg,select = -c(repeated))
longg<-data.frame(longg)
longg[c("EmployeeLevelText","WorkingCity","SubDept")]<-sapply(longg[c("EmployeeLevelText","WorkingCity","SubDept")],cleaning)
longg<-subset(longg,WorkingCity!="CORP")
longg<-subset(longg,EmployeeLevelText!="ASSOCIATEVP")
longg$Metro<-ifelse(longg$WorkingCity=="DEL" | longg$WorkingCity=="CCU" |
longg$WorkingCity=="BLR" | longg$WorkingCity=="HYD" | longg$WorkingCity=="BOM" | longg$WorkingCity=="MAA",1,0)
longg$NonMetro<-ifelse(longg$WorkingCity!="DEL" & longg$WorkingCity!="CCU" &
longg$WorkingCity!="BLR" & longg$WorkingCity!="HYD" & longg$WorkingCity!="BOM" & longg$WorkingCity!="MAA",1,0)
longg$LeaveType<-sapply(longg$LeaveType,cleaning)
longg$AttendanceStatus<-ifelse(longg$AttendanceStatus=="LEAVE",longg$LeaveType,longg$AttendanceStatus)
longg$WEEKOFF<-ifelse(longg$AttendanceStatus=="WEEKOFF",1,0)
longg$ABSENT<-ifelse(longg$AttendanceStatus=="ABSENT",1,0)
longg$ACCIDENTLEAVE<-ifelse(longg$AttendanceStatus=="ACCIDENTLEAVE",1,0)
longg$CASUALLEAVE<-ifelse(longg$AttendanceStatus=="CASUALLEAVE",1,0)
longg$COMPENSATORYOFF<-ifelse(longg$AttendanceStatus=="COMPENSATORYOFF",1,0)
longg$EMERGENCYLEAVE<-ifelse(longg$AttendanceStatus=="EMERGENCYLEAVE",1,0)
longg$HOLIDAY<-ifelse(longg$AttendanceStatus=="HOLIDAY"|longg$AttendanceStatus=="HOLIDAYLEAVE",1,0)
longg$LEAVEWITHOUTPAY<-ifelse(longg$AttendanceStatus=="LEAVEWITHOUTPAY",1,0)
longg$MATERNITYLEAVE<-ifelse(longg$AttendanceStatus=="MATERNITYLEAVE",1,0)
longg$OTHERLEAVE<-ifelse(longg$AttendanceStatus=="OTHERLEAVE",1,0)
longg$PATERNITYLEAVE<-ifelse(longg$AttendanceStatus=="PATERNITYLEAVE",1,0)
longg$PRESENT<-ifelse(longg$AttendanceStatus=="PRESENT",1,0)
longg$PRIVILEGELEAVE<-ifelse(longg$AttendanceStatus=="PRIVILEGELEAVE",1,0)
longg$RELOCATIONLEAVE<-ifelse(longg$AttendanceStatus=="RELOCATIONLEAVE",1,0)
longg$SICKLEAVE<-ifelse(longg$AttendanceStatus=="SICKLEAVE",1,0)
longg$UNAPPROVEDABSENCE<-ifelse(longg$AttendanceStatus=="UNAPPROVEDABSENCE",1,0)
longg$ADOPTIONLEAVE<-ifelse(longg$AttendanceStatus=="ADOPTIONLEAVE",1,0)
longg$HALFDAY<-ifelse(longg$AttendanceStatus=="HALFDAY",1,0)
longg$MISSPUNCH<-ifelse(longg$AttendanceStatus=="MISSPUNCH",1,0)
longg$SL_CL<-ifelse(longg$SICKLEAVE==1 | longg$CASUALLEAVE ==1,1,0)
longg$LONG_NAS<-ifelse(longg$MATERNITYLEAVE==1 | longg$ACCIDENTLEAVE==1|longg$OTHERLEAVE ==1| longg$PATERNITYLEAVE ==1|
longg$RELOCATIONLEAVE==1|longg$ADOPTIONLEAVE==1 | longg$EMERGENCYLEAVE==1,1,0)
longg$WEEKOFF1<-ifelse(longg$WEEKOFF==1 | longg$COMPENSATORYOFF ==1,1,0)
longg$ABSENT1<-ifelse(longg$UNAPPROVEDABSENCE==1 | longg$ABSENT ==1,1,0)
longg$PRESENT1<-ifelse(longg$PRESENT==1 | longg$HALFDAY==1|longg$MISSPUNCH==1,1,0)
longg$ctr<-1
longg<-subset(longg,!is.na(AttendanceStatus))
longg$Present_WHA<-ifelse((longg$AttendanceStatus=="PRESENT"|longg$AttendanceStatus=="MISSPUNCH"
|longg$AttendanceStatus=="HALFDAY") &
(longg$Shift=="WOFF0000TO0000"|longg$Shift=="ABSENT0000TO0000"|
longg$Shift=="HOL0000TO0000"),1,0)
longg$Date<-as.character(longg$Date)
longg$EmployeeCode<-as.character(longg$EmployeeCode)
longg<-data.table(longg)
##repaceing attendance staus with leaves if its approved or partial approved as partial approved cases wont fall in
### leave status with "Y" it will have leave status as "N
longg$AttendanceStatus<-ifelse(longg$LeaveStatusYN=="Y"&(longg$RequestStatus=="APPROVED"|
longg$RequestStatus=="PARTIALAPPROVED"),longg$LeaveType,longg$AttendanceStatus)
long1<-unique(longg,by=c("EmployeeCode","Date"))
library(reshape)
table(long1$AttendanceStatus)
long1$AttendanceStatus[long1$AttendanceStatus=="COMPENSATORYOFF"]<-"WEEKOFF"
long1$AttendanceStatus[long1$AttendanceStatus=="PRIVILEGELEAVE"]<-"PL"
long1$AttendanceStatus[long1$AttendanceStatus=="LEAVEWITHOUTPAY"]<-"LWP"
long1$AttendanceStatus[long1$AttendanceStatus=="ABSENT"|
long1$AttendanceStatus=="UNAPPROVEDABSENCE"]<-"ABSENT"
long1$AttendanceStatus[long1$AttendanceStatus=="HALFDAY"|
long1$AttendanceStatus=="MISSPUNCH"]<-"PRESENT"
long1$AttendanceStatus[long1$AttendanceStatus=="CASUALLEAVE"|
long1$AttendanceStatus=="SICKLEAVE"]<-"SL_CL"
long1$AttendanceStatus[long1$AttendanceStatus=="ADOPTIONLEAVE"|long1$AttendanceStatus=="ACCIDENTLEAVE"|
long1$AttendanceStatus=="MATERNITYLEAVE"|long1$AttendanceStatus=="EMERGENCYLEAVE"|
long1$AttendanceStatus=="OTHERLEAVE"|long1$AttendanceStatus=="PATERNITYLEAVE"|
long1$AttendanceStatus=="RELOCATIONLEAVE"]<-"LONG_NAS"
long1$AttendanceStatus[long1$AttendanceStatus=="HOLIDAYLEAVE"|
long1$AttendanceStatus=="HOLIDAY"]<-"HOL"
#monhtly weekly overall
dd1<-dummy(long1$Week)
long1<-cbind(long1,dd1)
gg11<-long1%>% filter(AttendanceStatus %in% c("PRESENT" ,"WEEKOFF","ABSENT","SL_CL"
,"PL" ,"LWP","HOL" , "LONG_NAS")) %>%
select(c("AttendanceStatus","ctr", "WeekMonday","WeekTuesday" , "WeekWednesday" ,"WeekThursday","WeekFriday","WeekSaturday", "WeekSunday",
"Year","Month","WorkingCity","SubDept","EmployeeLevelText"))
gg11<-aggregate(gg11[,c("WeekFriday", "WeekMonday","WeekSaturday", "WeekSunday" , "WeekThursday",
"WeekTuesday" , "WeekWednesday")],by=list(gg11$AttendanceStatus,gg11$Year,gg11$Month,
gg11$WorkingCity, gg11$SubDept, gg11$EmployeeLevelText),sum)
names(gg11)[1]<-"Leave_Type"
names(gg11)[2]<-"Year"
names(gg11)[3]<-"Month"
names(gg11)[4]<-"Base_Code"
names(gg11)[5]<-"Department"
names(gg11)[6]<-"Designation"
names(gg11)
gg11<-gg11[c(1:6,8,12,13,11,7,9,10)]
write.csv(gg11,"C:/D Drive Data/RosterMatrix/Jan2020/Raw1.csv")
d1<-subset(longg)
d1$Shift_Hrs<-substr(d1$Shift,1,3)
brakshift<-d1
brakshift<-brakshift[order(brakshift$EmployeeCode,brakshift$Date),]
brakshift$Shift_Hrs<-lapply(1:NROW(brakshift),function(x){
ifelse(brakshift$EmployeeCode[x]==brakshift$EmployeeCode[x+1] &
brakshift$Date[x]==brakshift$Date[x+1] &
brakshift$BR_freq[x]==1 ,8.5, brakshift$Shift_Hrs[x])})
brakshift$Shift_Hrs<-as.numeric(brakshift$Shift_Hrs)
brakshift$Date<-as.character(brakshift$Date)
## will consider two rows for breakshift cases needs to be taken for login analysis and deficit hours.
brakshift1<-d1
brakshift1<-brakshift1 %>% distinct(EmployeeCode,Date,Shift, .keep_all = TRUE)
##for shifts compliance need to take one row for each of the breakshift cases.
brakshift2<-brakshift %>% distinct(EmployeeCode,Date, .keep_all = TRUE)
brakshift1<-data.frame(brakshift1)
brakshift1$ActualInTime<-as.character(brakshift1$ActualInTime)
brakshift1$ActualOutTime<-as.character(brakshift1$ActualOutTime)
brakshift2<-data.frame(brakshift2)
brakshift2$ActualInTime<-as.character(brakshift2$ActualInTime)
brakshift2$ActualOutTime<-as.character(brakshift2$ActualOutTime)
brakshift2$Planned_weekoff<-ifelse(grepl("^WOFF",brakshift2$Shift),1,0)
brakshift2$Actual_weekoff<-ifelse(brakshift2$AttendanceStatus=="WEEKOFF"|
brakshift2$AttendanceStatus=="COMPENSATORYOFF",1,0)
brakshift2$Shift_Hrs<-ifelse(brakshift2$Actual_weekoff==1,"Actual_weekoff",brakshift2$Shift_Hrs)
###################Shift Complinace Raw Data#& Report########################################################
d3<-sqldf("select EmployeeCode,WorkingCity,SubDept,EmployeeLevelText,Month,Date,Shift, Shift_Hrs,
count(Shift_Hrs) as Freq from brakshift2 group by EmployeeCode,WorkingCity,SubDept,EmployeeLevelText,Month,Date,Shift, Shift_Hrs
")
write.csv(d3,"Shifts_comp_raw.csv")
d3_<-sqldf("select EmployeeCode,WorkingCity,SubDept,EmployeeLevelText,Month,Shift, Shift_Hrs,
count(Shift_Hrs) as Freq
from brakshift2 group by EmployeeCode,WorkingCity,SubDept,EmployeeLevelText,Month,Shift, Shift_Hrs
")
write.csv(d3_,"Shifts_comp_summary.csv")
###################################Deficit hours######################################################
d1<-brakshift1[,c(1,7,8:15,27,24,51,52,47,34,39,40,74)]
d1$Nologin<-ifelse(as.character(d1$ActualInTime)=="2020-04-01 00:00:00" & as.character(d1$ActualOutTime)=="2020-04-01 00:00:00",
1,0)
#d1<-subset(d1,AttendanceStatus=="PRESENT" & MISSPUNCH==0)
#taking all data
d1$Date<-as.Date(d1$Date,"%Y-%m-%d")
d1$Year<-format(d1$Date,"%Y")
d1$Month<-format(d1$Date,"%b")
dd<-d1
dd$StartShift<-ifelse(dd$EarlyTime1<0,"Late",ifelse(dd$EarlyTime1>0,"Early","Ontime"))
dd$EndShift<-ifelse(dd$LateTime1<0,"Late",ifelse(dd$LateTime1>0,"Early","Ontime"))
dd$Start_Shift_Change<-ifelse(dd$EarlyTime1>=180 |dd$EarlyTime1<=-180,"Shift_Change","NoShift_Change" )
dd$End_Shift_Change<-ifelse(dd$LateTime1>=180 |dd$LateTime1<=-180,"Shift_Change","NoShift_Change" )
###calcuting deficit hours column based on following conditions:
dd$Shift_hours<-ifelse(dd$AttendanceStatus=="PRIVILEGELEAVE"|dd$AttendanceStatus=="COMPENSATORYOFF"|
dd$AttendanceStatus=="CASUALLEAVE"|dd$AttendanceStatus=="HOLIDAYLEAVE"|dd$AttendanceStatus=="RELOCATIONLEAVE"|
dd$AttendanceStatus=="MATERNITYLEAVE"|dd$AttendanceStatus=="HOLIDAY",0,dd$Shift_hours)
dd$Working_Hours1<-ifelse(dd$AttendanceStatus=="WEEKOFF"|dd$AttendanceStatus=="PRIVILEGELEAVE"|dd$AttendanceStatus=="COMPENSATORYOFF"|
dd$AttendanceStatus=="CASUALLEAVE"|dd$AttendanceStatus=="HOLIDAYLEAVE"|dd$AttendanceStatus=="RELOCATIONLEAVE"|
dd$AttendanceStatus=="MATERNITYLEAVE"|dd$AttendanceStatus=="HOLIDAY",0,dd$Working_Hours1)
df_hrs2<-sqldf("select Month,EmployeeCode,WorkingCity, SubDept,AttendanceStatus,sum(Shift_hours) as
Planned_Hrs, sum(Working_Hours1) as Actual_Hours,sum(WEEKOFF1) as WEEKOFF from dd group by
Month,EmployeeCode,WorkingCity, SubDept,AttendanceStatus")
# names(dd)
##removing misspunch and leaves and taking only present cases.
dd1<-subset(dd,AttendanceStatus=="PRESENT")
df_hrs4<-sqldf("select Month,EmployeeCode,WorkingCity, SubDept,StartShift,sum(EarlyTime1) as
Mins_S from dd1 group by
Month,EmployeeCode,WorkingCity, SubDept,StartShift")
df_hrs4$Mins_S<-abs(df_hrs4$Mins_S)
df_hrs4_<-sqldf("select Month,EmployeeCode,WorkingCity, SubDept,EndShift,sum(LateTime1) as
Mins_E from dd1 group by
Month,EmployeeCode,WorkingCity, SubDept,EndShift")
df_hrs4_$Mins_E<-abs(df_hrs4_$Mins_E)
######daily Deficit Report#############################################################################
df_hrs4_daily_s<-sqldf("select Month,Date,Shift,ActualInTime,ActualOutTime,EmployeeCode,WorkingCity, SubDept,AttendanceStatus,
Start_Shift_Change,StartShift,MISSPUNCH,sum(EarlyTime1) as
Mins_S from dd1 group by
Month,Date,Shift,ActualInTime,ActualOutTime,EmployeeCode,WorkingCity, SubDept,AttendanceStatus,Start_Shift_Change,StartShift,MISSPUNCH")
df_hrs4_daily_s$Mins_S<-abs(df_hrs4_daily_s$Mins_S)
df_hrs4_daily_e<-sqldf("select Month,Date,Shift,ActualInTime,ActualOutTime,EmployeeCode,WorkingCity, SubDept,End_Shift_Change,EndShift,MISSPUNCH,sum(LateTime1) as
Mins_E from dd1 group by
Month,Date,Shift,ActualInTime,ActualOutTime,EmployeeCode,WorkingCity, SubDept,End_Shift_Change,EndShift,MISSPUNCH")
df_hrs4_daily_e$Mins_E<-abs(df_hrs4_daily_e$Mins_E)
write.csv(df_hrs2,"Deficit_HOurs.csv")
write.csv(df_hrs4,"LoginAnalysis_Start.csv")
write.csv(df_hrs4_,"LoginAnalysis_End.csv")
write.csv(df_hrs4_daily_s,"LoginAnalysis_Start_raw.csv")
write.csv(df_hrs4_daily_e,"LoginAnalysis_End_raw.csv")
dd1$LateTime1<-abs(dd1$LateTime1)
dd1$EarlyTime1<-abs(dd1$EarlyTime1)
#########################Late Login Early Logout#############################################
#d2<-subset(dd,dd$LateTime1_afterbuffer>=10|dd$EarlyTime1_afterbuffer>=10)
d2<-dd1
d3<-subset(d2,d2$StartShift=="Late"&d2$EndShift=="Early")
d4<-subset(d2,d2$StartShift=="Early"&d2$EndShift=="Late")
##summary of d3
d3_summary<-sqldf("select WorkingCity,EmployeeCode,Date,Shift,ActualInTime,ActualOutTime,EmployeeCode,WorkingCity, SubDept,Start_Shift_Change,StartShift,MISSPUNCH,sum(EarlyTime1) as
Mins_S from dd1 group by
Month,Date,Shift,ActualInTime,ActualOutTime,EmployeeCode,WorkingCity, SubDept,Start_Shift_Change,StartShift,MISSPUNCH")
d3_summary$Total_LostHours<-d3_summa
write.csv(d3,"LateLogin_EarlyLogout.csv")
write.csv(d4,"EarlyLogin_LateLogout.csv")
## Raw Attendance data Report###########################################################################
##daily
attendance<-sqldf("select Month,Date,Shift,ActualInTime,ActualOutTime,EmployeeCode,WorkingCity, SubDept,AttendanceStatus
from brakshift1 group by
Month,Date,Shift,ActualInTime,ActualOutTime,EmployeeCode,WorkingCity, SubDept,AttendanceStatus")
write.csv(attendance,"Raw_AttendanceData.csv")
}
Data_Cleaning_Pipeline()
|
3ed7ca4f677ac801a076e1a169e9bf0c9a0efc43 | 7f7600fc0d91061475f0afb2b8abd8e9b4ae8c39 | /man/simulate.mm.Rd | 74c15a85a8d6821b60175c8b7412ec965b9b6dc0 | [] | no_license | cran/smmR | 4b7ed330c8246984e04efebee84b6208c52e12bb | fb6d7eeb0cde8f340b71154e9f964ce413d1414f | refs/heads/master | 2023-07-03T11:11:15.496180 | 2021-08-03T11:00:06 | 2021-08-03T11:00:06 | 344,525,315 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,322 | rd | simulate.mm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mm.R
\name{simulate.mm}
\alias{simulate.mm}
\title{Simulates k-th order Markov chains}
\usage{
\method{simulate}{mm}(object, nsim = 1, seed = NULL, ...)
}
\arguments{
\item{object}{An object of class \link{mm}.}
\item{nsim}{An integer or vector of integers (for multiple sequences)
specifying the length of the sequence(s).}
\item{seed}{Optional. \code{seed} for the random number generator.
If no \code{seed} is given, then seed is set by using the command
\verb{set.seed(round(as.numeric(Sys.time()))}.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
A list of vectors representing the sequences.
}
\description{
Simulates k-th order Markov chains.
}
\details{
If \code{nsim} is a single integer then a chain of that length is
produced. If \code{nsim} is a vector of integers, then \code{length(nsim)}
sequences are generated with respective lengths.
}
\examples{
states <- c("a", "c", "g", "t")
s <- length(states)
k <- 2
init <- rep.int(1 / s ^ k, s ^ k)
p <- matrix(0.25, nrow = s ^ k, ncol = s)
# Specify a Markov model of order 1
markov <- mm(states = states, init = init, ptrans = p, k = k)
seqs <- simulate(object = markov, nsim = c(1000, 10000, 2000), seed = 150)
}
\seealso{
\link{mm}, \link{fitmm}
}
|
aaa426d2fa14585c328eed276a190949bf876b78 | 6855ac1106597ae48483e129fda6510354efa2bd | /tests/testthat/test-primary_input_get.R | 75233a08748be10139626bfa7ee6f1892791cb3d | [
"MIT"
] | permissive | rOpenGov/iotables | ad73aae57b410396995635d1c432744c06db32db | 91cfdbc1d29ac6fe606d3a0deecdb4c90e7016b9 | refs/heads/master | 2022-10-02T13:03:54.563374 | 2022-09-24T11:47:20 | 2022-09-24T11:47:20 | 108,267,715 | 19 | 8 | NOASSERTION | 2021-12-17T15:09:35 | 2017-10-25T12:35:47 | R | UTF-8 | R | false | false | 418 | r | test-primary_input_get.R | siot <- iotable_get(source = "germany_1995",
geo = 'DE', year = 1990,
unit = "MIO_EUR",
labelling = "short")
test_that("Retreieving a primary input vector with primary_input_get()", {
expect_equal(as.character(unlist(primary_input_get(data_table = siot,
primary_input = "D1")))[3], "296464" )
})
|
a8b43b8637c8509ae2c888bbd851f9d2e462621a | 9ee98d1c28eb9b75623b01194a006ff0d099703a | /hgRNA-invivo/030_run_traceQC.R | a159450e8c3274463150be7b45c9caaf61d54147 | [] | no_license | LiuzLab/TraceQC-manuscript | e5ea5ecdf12fceb498b55a38501c1d8d74edb28e | 04f493715ca451adba5ba108bf68e0b22120ee68 | refs/heads/main | 2023-07-29T05:47:08.266514 | 2021-09-15T00:20:50 | 2021-09-15T00:20:50 | 406,525,559 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,958 | r | 030_run_traceQC.R | library(readr)
library(dplyr)
library(stringr)
library(tools)
library(TraceQC)
library(fastqcr)
library(readr)
sra <- read_csv("./data/000_SraRunTable.txt") %>%
select(Run,`Library Name`)
qc_dir <- "./fastqc/"
fastq_dir <- "./data/020_fastq_by_identifier"
for (dir in list.dirs(fastq_dir)){
fastqc(dir,qc.dir=qc_dir)
}
identifiers <- read_csv("./data/000_ref/hgRNA_identifiers.csv")
all_length <- c(21,25,30,35)
for (l in all_length) {
ref <- read_lines(sprintf("./data/000_ref/L%s.txt",l))
refseq <- ref[1]
regions_str <- strsplit(ref[2:length(ref)],split=" ")
regions <- do.call(rbind,regions_str) %>%
as.data.frame() %>%
setNames(c("region","start","end")) %>%
mutate(start=strtoi(.data$start),
end=strtoi(.data$end)) %>%
mutate(region=as.character(.data$region))
# tmp <- list(refseq=refseq,regions=regions)
# plot_construct(tmp)
L_identifiers <- filter(identifiers,Length>=(l-1)&Length<=(l+1))
for (i in 1:nrow(L_identifiers)) {
identifier <- as.character(L_identifiers[i,"Identifier (ID)"])
spacer <- as.character(L_identifiers[i,"Spacer regions (TSS to PAM)"])
spacer_start <- regions[regions$region=="spacer","start"]
spacer_end <- regions[regions$region=="spacer","end"]
ref_id_seq <- paste(substr(refseq,start=1,stop=spacer_start-1),spacer,
substr(refseq,start=spacer_end+1,stop=nchar(refseq)),sep="")
# tmp <- list(refseq=ref_id_seq,regions=regions)
# plot_construct(tmp)
out_file <- sprintf("./data/000_ref/L%s_%s.txt",l,identifier)
write(paste(c(ref_id_seq,ref[2:3]),sep="\n"),
out_file)}}
for (f in list.files(fastq_dir,recursive=TRUE)) {
tmp <- strsplit(f,"/")[[1]][2]
tmp <- strsplit(file_path_sans_ext(tmp),split="_")[[1]]
sra <- tmp[1]
identifier <- tmp[2]
input_file <- sprintf("%s/%s",fastq_dir,f)
ref_file <- list.files("./data/000_ref/",pattern=identifier)
ref_file <- sprintf("./data/000_ref/%s",ref_file)
output_file <- sprintf("./data/030.1_alignment/%s_%s.txt",sra,identifier)
sequence_alignment(input_file=input_file,ref_file=ref_file,
output_file=output_file)}
for (input_file in list.files("./data/030.1_alignment/")) {
print(input_file)
aligned_reads <- read_tsv(sprintf("./data/030.1_alignment/%s",input_file))
if (nrow(aligned_reads)>0) {
traceQC_input <- list(aligned_reads=aligned_reads)
mutation_event <- seq_to_character(traceQC_input,ncores=1,
use_CPM=FALSE,alignment_score_cutoff=-Inf,
abundance_cutoff=0)
write_tsv(mutation_event,sprintf("./data/030.2_traceQC_obj/%s",input_file))}
}
for (ref_file in list.files("./data/000_ref/",pattern="L[0-9][0-9]_")) {
alignment_threshold <- sequence_permutation(ref_file=sprintf("./data/000_ref/%s",ref_file))
write_tsv(alignment_threshold,sprintf("./data/030.3_alignment_threshold/alignment_threshold_%s",ref_file))}
|
7edbca63f259bc63c8d5467ecafe1c6fb4ff5dc4 | a6c21b44f52a54259a62c273606dbf987b587de6 | /man/civet.getAllFilenames.Rd | 918c1d63b005faade2a053fa45757d4a2d61b906 | [] | no_license | bcdarwin/RMINC | c020787ed115745bf7838070612dcb9a7f9a9d91 | 5af39882f3a3a5b9cbe9af5ca3a845472b703cc3 | refs/heads/master | 2021-01-22T04:09:39.095013 | 2014-08-25T16:21:00 | 2014-08-25T16:21:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,172 | rd | civet.getAllFilenames.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{civet.getAllFilenames}
\alias{civet.getAllFilenames}
\alias{civet.organizeCivetDatFilesMidWhiteGrey}
\title{civet.getAllFilenames}
\usage{
civet.getAllFilenames(gf, idvar, prefix, basedir, append = TRUE, civetVersion = "1.1.9")
civet.getAllFilenames(gf, idvar, prefix, basedir, append = TRUE, civetVersion = "1.1.9")
}
\arguments{
\item{gf}{Data Frame with subject information}
\item{idvar}{column name in gf with subject IDs}
\item{prefix}{Prefix specified when CIVET was run}
\item{basedir}{directory where all CIVET output was stored}
\item{append}{Whether to append the results to the input gf}
\item{civetVersion}{Version of CIVET}
\item{gf}{Data Frame with subject information}
\item{idvar}{column name in gf with subject IDs}
\item{prefix}{Prefix specified when CIVET was run}
\item{basedir}{directory where all CIVET output was stored}
\item{append}{Whether to append the results to the input gf}
\item{civetVersion}{Version of CIVET}
}
\value{
gf is returned with CIVET filenames
gf is returned with CIVET filenames
mincLm Returns a vector of mincSingleDim class
}
\description{
Generates list of filenames output by CIVET
Generates list of filenames output by CIVET
}
\details{
Prior to running, read.csv may be called to generate the input argument gf.
The results will be stored under the column name CIVETFILES either in the input gf (if append = TRUE) or in a new gf.
Currently only CIVET versions 1.1.9 and 1.1.12 are supported.
Prior to running, read.csv may be called to generate the input argument gf.
The results will be stored under the column name CIVETFILES either in the input gf (if append = TRUE) or in a new gf.
Currently only CIVET versions 1.1.9 and 1.1.12 are supported.
}
\examples{
gf = read.csv("~/SubjectTable.csv")
civet.getAllFilenames(gf,"ID","ABC123","~/CIVET","TRUE","1.1.12")
gf = civet.readAllCivetFiles("~/Atlases/AAL/AAL.csv",gf)
gf = read.csv("~/SubjectTable.csv")
civet.getAllFilenames(gf,"ID","ABC123","~/CIVET","TRUE","1.1.12")
gf = civet.readAllCivetFiles("~/Atlases/AAL/AAL.csv",gf)
}
\seealso{
civet.readAllCivetFiles
civet.readAllCivetFiles
}
|
832880127aa382421e75256481983661b1ebfe84 | f3b59c0819434138fa5d3838423c3c87a19616bc | /Projects/NCSC_Workspace/NCSC.R | 7ef136524bebfc1cfeae779a22ef3f543de38c7a | [
"MIT"
] | permissive | beemyfriend/beemyfriend.github.io | 2dca0a1c1270f81b7ed93dd8f863eaff3737eba1 | 3f82b78b8682550fad603666a06fb421f4319200 | refs/heads/master | 2021-03-22T05:23:18.822953 | 2019-01-22T02:54:25 | 2019-01-22T02:54:25 | 81,542,060 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,217 | r | NCSC.R | setwd('NCSC_Workspace/')
library(tidyverse)
library(readxl)
hackSheets <- readxl::excel_sheets('Vizathon-Data.xlsx')
hackData <- map(hackSheets, function(x){
read_excel('Vizathon-Data.xlsx', sheet = x)
})
names(hackData) <- hackSheets
map(hackData, names)
nestedCourtData <- hackData$USState %>%
select(USStateID, USStateName) %>%
left_join(hackData$USStateCourt) %>%
left_join(select(hackData$Court, -DisplayOrder)) %>%
left_join(select(hackData$Funding, FundingID, FundingDescription)) %>%
select(-FundingID) %>%
mutate(CourtID = str_c("CourtID_", CourtID)) %>%
left_join(select(hackData$AppealProcess, -AppealProcessID) %>%
mutate_all(function(x) str_c("CourtID_", x)) %>%
mutate(Child = ChildCourtID,
Parent = ParentCourtID) %>%
group_by(ChildCourtID) %>%
nest(.key = 'ParentCourts'),
by=c('CourtID' = 'ChildCourtID')) %>%
left_join(
hackData$CourtCaseType %>%
left_join(select(hackData$CaseType, -DisplayOrder)) %>%
select(-CourtCaseTypeID, -CaseTypeID, -DisplayOrder) %>%
mutate(CourtID = str_c("CourtID_", CourtID)) %>%
group_by(CourtID) %>%
nest(.key = 'CaseTypes')) %>%
filter(!is.na(CourtLevelID)) %>%
mutate(CourtName = str_split(CourtName, ' ')) %>%
group_by(USStateID, USStateName) %>%
nest(.key = 'Courts') %>%
mutate(ChildParent = map(Courts, function(x) select(bind_rows(x$ParentCourts), -ParentCourtID)))
nestedCourtDataJSON <- jsonlite::toJSON(nestedCourtData)
write(nestedCourtDataJSON, "nestedCourtData.json")
write(str_c('data = ', nestedCourtDataJSON), 'nestedCourtData.js')
courtTypes <- nestedCourtData %>% unnest() %>% .$CourtLevelID %>% unique
courtTypesJSON <- jsonlite::toJSON(courtTypes)
write(courtTypesJSON, 'courtTypes.json')
state_info <- read.delim('state.psv', sep='|') %>%
rename(id = STATE) %>%
mutate(id = sprintf('%02d', id)) %>%
mutate(STATE_NAME = STATE_NAME %>% as.character() %>% if_else(. == "Hawaii", "Hawai'i", .)) %>%
left_join(hackData$USState, by = c("STATE_NAME" = 'USStateName')) %>%
left_join(select(hackData$TrialStructure, -DisplayOrder)) %>%
left_join(select(hackData$PopulationCategory, -DisplayOrder)) %>%
left_join(select(hackData$Rural, -DisplayOrder)) %>%
left_join(select(hackData$TrialCriminalProcessing, -DisplayOrder)) %>%
left_join(select(hackData$DeathPenalty, -DisplayOrder))
select(-DisplayOrder, -PopulationCategoryID, -RuralID, -TrialStructureID, -TrialCriminalProcessingID, -DeathPenalty)
us_json <- read_lines('us-10m.v1.json')
write(str_c('us_topo =', us_json), 'us-10m.v1.js')
write_tsv(state_info, 'state.tsv')
state_info_json <- jsonlite::toJSON(state_info)
write(state_info_json, 'state_info.json')
write(str_c('state_data = ', state_info_json), 'state_data.js')
nestedCourtData %>% filter(is.na(CourtName))
nestedCourtData %>% filter(USStateName %in% c("Oklahoma", "Texas"))
hackData$Court %>%
left_join(hackData$CourtCourtName) %>%
filter(CourtID == 93) %>%
select(CourtID, CourtName, CourtCourtNameID, CourtNameID ) %>%
left_join(hackData$CourtName) %>%
select(-PanelDecisionID, -NumberPanels, -NumberOfIndividualCourts, -CaseManagementID)
|
0819c7cbf4e3829fa64d81a54468cd05faca4a2b | ad3eb6c0b4238950ec766f819c2216dc547f7c8a | /R/tidyHtmlTable.R | 014a543dacb68e944dabc065811f6329d31d033d | [] | no_license | Karagul/htmlTable | 33dd12a5bba4c6a41ab1ca3b7edfbe0ffdb67351 | 6e16c47172d6091fa825fed8aee66ae579f0d60d | refs/heads/master | 2021-01-30T08:04:53.428045 | 2019-12-04T21:20:15 | 2019-12-04T21:20:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,225 | r | tidyHtmlTable.R | #' Generate an htmlTable using a ggplot2-like interface
#'
#' Builds an \code{htmlTable} by mapping columns from the input data, \code{x},
#' to elements of an output \code{htmlTable} (e.g. rnames, header, etc.)
#'
#' @section Column-mapping parameters:
#' The \code{tidyHtmlTable} function is designed to work like ggplot2 in that
#' columns from \code{x} are mapped to specific parameters from the
#' \code{htmlTable} function. At minimum, \code{x} must contain the names
#' of columns mapping to \code{rnames}, \code{header}, and \code{rnames}.
#' \code{header} and \code{rnames} retain the same meaning as in the
#' htmlTable function. \code{value} contains the individual values that will
#' be used to fill each cell within the output \code{htmlTable}.
#'
#' A full list of parameters from \code{htmlTable} which may be mapped to
#' columns within \code{x} include:
#'
#' \itemize{
#' \item \code{value}
#' \item \code{header}
#' \item \code{rnames}
#' \item \code{rgroup}
#' \item \code{cgroup1}
#' \item \code{cgroup2}
#' \item \code{tspanner}
#' }
#'
#' Note that unlike in \code{htmlTable} which contains \code{cgroup},
#' and which may specify a variable number of column groups,
#' \code{tidyhtmlTable} contains the parameters \code{cgroup1} and
#' \code{cgroup2}. These parameters correspond to the inward most and outward
#' most column groups respectively.
#'
#' Also note that the coordinates of each \code{value} within \code{x} must be
#' unambiguously mapped to a position within the output \code{htmlTable}.
#' Therefore, the each row-wise combination the variables specified above
#' contained in \code{x} must be unique.
#'
#' @section Hidden values:
#' \code{htmlTable} Allows for some values within \code{rgroup},
#' \code{cgroup}, etc. to be specified as \code{""}. The following parameters
#' allow for specific values to be treated as if they were a string of length
#' zero in the \code{htmlTable} function.
#'
#' \itemize{
#' \item \code{hidden_rgroup}
#' \item \code{hidden_tspanner}
#' }
#' @section Additional dependencies:
#' In order to run this function you also must have \code{\link[dplyr]{dplyr}} and
#' \code{\link[tidyr]{tidyr}} packages installed. These have been removed due to
#' the additional 20 Mb that these dependencies added (issue #47). The particular
#' functions required are:
#'
#' \itemize{
#' \item \code{\link[dplyr]{dplyr}}:
#' \code{mutate_at},
#' \code{select},
#' \code{pull},
#' \code{slice},
#' \code{filter},
#' \code{arrange_at},
#' \code{mutate_if},
#' \code{is.grouped_df},
#' \code{left_join}
#' \item \code{\link[tidyr]{tidyr}}: \code{spread}
#' }
#'
#' @param x Tidy data used to build the \code{htmlTable}
#' @param value The column containing values filling individual cells of the
#' output \code{htmlTable}
#' @param header The column in \code{x} specifying column headings
#' @param rnames The column in \code{x} specifying row names
#' @param rgroup The column in \code{x} specifying row groups
#' @param hidden_rgroup rgroup values that will be hidden.
#' @param cgroup1 The column in \code{x} specifying the inner most column
#' groups
#' @param cgroup2 The column in \code{x} specifying the outer most column
#' groups
#' @param tspanner The column in \code{x} specifying tspanner groups
#' @param hidden_tspanner tspanner values that will be hidden.
#' @param ... Additional arguments that will be passed to the inner
#' \code{htmlTable} function
#' @return Returns html code that will build a pretty table
#' @export
#' @seealso \code{\link{htmlTable}}
#' @examples
#' \dontrun{
#' library(tidyverse)
#' mtcars %>%
#' rownames_to_column %>%
#' select(rowname, cyl, gear, hp, mpg, qsec) %>%
#' gather(per_metric, value, hp, mpg, qsec) %>%
#' group_by(cyl, gear, per_metric) %>%
#' summarise(Mean = round(mean(value), 1),
#' SD = round(sd(value), 1),
#' Min = round(min(value), 1),
#' Max = round(max(value), 1)) %>%
#' gather(summary_stat, value, Mean, SD, Min, Max) %>%
#' ungroup %>%
#' mutate(gear = paste(gear, "Gears"),
#' cyl = paste(cyl, "Cylinders")) %>%
#' tidyHtmlTable(header = "gear",
#' cgroup1 = "cyl",
#' cell_value = "value",
#' rnames = "summary_stat",
#' rgroup = "per_metric")
#' }
tidyHtmlTable <- function(x,
value = "value",
header = "header",
rnames = "rnames",
rgroup = NULL,
hidden_rgroup = NULL,
cgroup1 = NULL,
cgroup2 = NULL,
tspanner = NULL,
hidden_tspanner = NULL,
...) {
UseMethod("tidyHtmlTable")
}
#' @export
tidyHtmlTable.default <- function(x, ...) {
stop("x must be of class data.frame")
}
#' @export
tidyHtmlTable.data.frame <- function(x,
value = "value",
header = "header",
rnames = "rnames",
rgroup = NULL,
hidden_rgroup = NULL,
cgroup1 = NULL,
cgroup2 = NULL,
tspanner = NULL,
hidden_tspanner = NULL,
...) {
# You need the suggested package for this function
safeLoadPkg("dplyr")
safeLoadPkg("tidyr")
argument_checker(x,
value = value,
header = header,
rnames = rnames,
rgroup = rgroup,
hidden_rgroup = NULL,
cgroup1 = cgroup1,
cgroup2 = cgroup2,
tspanner = tspanner,
hidden_tspanner = NULL)
check_uniqueness(x,
header = header,
rnames = rnames,
rgroup = rgroup,
cgroup1 = cgroup1,
cgroup2 = cgroup2,
tspanner = tspanner)
x <- remove_na_rows(x,
header = header,
rnames = rnames,
rgroup = rgroup,
cgroup1 = cgroup1,
cgroup2 = cgroup2,
tspanner = tspanner)
# Create tables from which to gather row, column, and tspanner names
# and indices
row_ref_tbl <- x %>%
get_row_tbl(rnames = rnames,
rgroup = rgroup,
tspanner = tspanner)
# Hide row groups specified in hidden_rgroup
if (!(is.null(hidden_rgroup))) {
row_ref_tbl <- row_ref_tbl %>%
dplyr::mutate_at(rgroup,
function(x){ifelse(x %in% hidden_rgroup, "", x)})
}
# Hide tspanners specified in hidden_tspanner
if (!(is.null(hidden_tspanner))) {
row_ref_tbl <- row_ref_tbl %>%
dplyr::mutate_at(tspanner,
function(x){ifelse(x %in% hidden_tspanner, "", x)})
}
col_ref_tbl <- x %>%
get_col_tbl(header = header,
cgroup1 = cgroup1,
cgroup2 = cgroup2)
# Format the values for display
to_select <- c("r_idx", "c_idx", value)
formatted_df <- x %>%
add_col_idx(header = header,
cgroup1 = cgroup1,
cgroup2 = cgroup2) %>%
add_row_idx(rnames = rnames,
rgroup = rgroup,
tspanner = tspanner) %>%
dplyr::select(to_select) %>%
dplyr::mutate_at(value, as.character) %>%
# Spread will fill missing values (both explict and implicit) with the
# same value, so we need to convert these values to a character if we want
# them to show up correctly in the final table
tidyr::spread(key = "c_idx",
value = value,
fill = "")
formatted_df$r_idx <- NULL
# Get names and indices for row groups and tspanners
htmlTable_args <- list(x = formatted_df,
rnames = row_ref_tbl %>% dplyr::pull(rnames),
header = col_ref_tbl %>% dplyr::pull(header),
...)
if (!is.null(rgroup)) {
# This will take care of a problem in which adjacent row groups
# with the same value will cause rgroup and tspanner collision
comp_val <- row_ref_tbl %>% dplyr::pull(rgroup)
if (!is.null(tspanner)) {
comp_val <- paste0(comp_val,
row_ref_tbl %>% dplyr::pull(tspanner))
}
lens <- rle(comp_val)$lengths
idx <- cumsum(lens)
htmlTable_args$rgroup <- row_ref_tbl %>%
dplyr::slice(idx) %>%
dplyr::pull(rgroup)
htmlTable_args$n.rgroup <- lens
}
if (!is.null(tspanner)) {
htmlTable_args$tspanner <-
rle(row_ref_tbl %>% dplyr::pull(tspanner))$value
htmlTable_args$n.tspanner <-
rle(row_ref_tbl %>% dplyr::pull(tspanner))$lengths
}
# Get names and indices for column groups
if(!is.null(cgroup1)) {
cgroup1_out <- rle(col_ref_tbl %>% dplyr::pull(cgroup1))$value
n.cgroup1 <- rle(col_ref_tbl %>% dplyr::pull(cgroup1))$lengths
if(!is.null(cgroup2)) {
cgroup2_out <- rle(col_ref_tbl %>% dplyr::pull(cgroup2))$value
n.cgroup2 <- rle(col_ref_tbl %>% dplyr::pull(cgroup2))$lengths
len_diff <- length(cgroup1_out) - length(cgroup2_out)
if (len_diff < 0) {
stop("cgroup2 cannot contain more categories than cgroup1")
} else if (len_diff > 0) {
cgroup2_out <- c(cgroup2, rep(NA, len_diff))
n.cgroup2 <- c(n.cgroup2, rep(NA, len_diff))
}
cgroup1_out <- rbind(cgroup2, cgroup1)
n.cgroup1 <- rbind(n.cgroup2, n.cgroup1)
}
htmlTable_args$cgroup <- cgroup1_out
htmlTable_args$n.cgroup <- n.cgroup1
}
do.call(htmlTable, htmlTable_args)
}
# You need the suggested package for this function
safeLoadPkg <- function(pkg) {
if (!requireNamespace(pkg, quietly = TRUE)) {
stop("The package ", pkg, " is needed for this function to work. Please install it.",
call. = FALSE)
}
}
# Removes rows containing NA values in any mapped columns from the tidy dataset
remove_na_rows <- function(x, ...) {
cols <- as.character(get_col_vars(...))
na.log <- x %>%
dplyr::select(cols) %>%
is.na
na.row.sums <- na.log %>%
rowSums
keep.idx <- na.row.sums == 0
removed <- sum(na.row.sums > 0)
if (removed != 0) {
na.col.sums <- na.log %>%
colSums
na.cols <- colnames(na.log)[na.col.sums > 0]
warning(paste0("NA values were detected in the following columns of ",
"the tidy dataset: ",
paste(na.cols, collapse = ", "), ". ",
removed, " row(s) in the tidy dataset were removed."))
}
return(x %>% dplyr::filter(keep.idx))
}
# This checks to make sure that the mapping columns of the tidy dataset
# uniquely specify a given value
check_uniqueness <- function(x, ...) {
# Get arguments
args <- simplify_arg_list(...)
cols <- as.character(args)
dupes <- x %>%
dplyr::select(cols) %>%
duplicated
if (sum(dupes) != 0) {
stop(paste0("The input parameters ",
paste(paste0("\"", names(args), "\""), collapse = ", "),
" do not specify unique rows. The following rows ",
"are duplicated: ",
paste(which(dupes), collapse = ", ")))
}
}
# Converts arguments from ... into a list and removes those that have been set
# to NULL
simplify_arg_list <- function(...) {
x <- list(...)
idx <- sapply(x, is.null)
return(x[!idx])
}
# This function gets arguments from ..., removes those that are NULL,
# and then subsets those that should map tidy data columns to htmlTable
# parameters
get_col_vars <- function(...) {
out <- simplify_arg_list(...)
return(out[names(out) %in%
c("value", "header",
"rnames", "rgroup",
"cgroup1", "cgroup2",
"tspanner")])
}
# Checks a variety of assumptions about input arguments and prepares an
# appropriate error message if those assumptions are violated
argument_checker <- function(x, ...) {
# Check if x is a grouped tbl_df
if(dplyr::is.grouped_df(x)) {
stop("x cannot be a grouped_df")
}
# Check that all the input are characters
all_args <- simplify_arg_list(...)
idx <- which(!sapply(all_args, is.character))
if (length(idx) > 0) {
stop("The following parameters must be of type character: ",
paste(names(all_args)[idx], collapse = ", "))
}
# Check that all of the arguments that would be used map columns to
# character attributes are of length 1
col_vars <- get_col_vars(...)
idx <- which(sapply(col_vars, length) > 1)
if (length(idx) > 0) {
stop("The following parameters must be of length 1: ",
paste(names(col_vars)[idx], collapse = ", "))
}
# Find column variables that are not columns in the dataset
idx <- which(!(as.character(col_vars) %in% colnames(x)))
if (length(idx) > 0) {
stop("The following arguments need values that correspond to column ",
"names in x: ",
paste0(names(col_vars), " = ",
as.character(col_vars),
collapse = ", "))
}
}
get_col_tbl <- function(x,
header,
cgroup1 = NULL,
cgroup2 = NULL) {
cols <- c(cgroup2, cgroup1, header)
out <- x %>%
dplyr::select(cols) %>%
unique %>%
dplyr::arrange_at(cols) %>%
# This is necessary in order to not generate NA values when setting
# hidden elements to ""
dplyr::mutate_if(is.factor, as.character)
out$c_idx <- 1:nrow(out)
return(out)
}
get_row_tbl <- function(x,
rnames,
rgroup = NULL,
tspanner = NULL) {
cols <- c(tspanner, rgroup, rnames)
out <- x %>%
dplyr::select(cols) %>%
unique %>%
dplyr::arrange_at(cols) %>%
# This is necessary in order to not generate NA values when setting
# hidden elements to ""
dplyr::mutate_if(is.factor, as.character)
out$r_idx <- 1:nrow(out)
return(out)
}
add_col_idx <- function(x,
header,
cgroup1 = NULL,
cgroup2 = NULL) {
cols <- c(cgroup2, cgroup1, header)
col_idx_df <- x %>%
get_col_tbl(header = header,
cgroup1 = cgroup1,
cgroup2 = cgroup2)
out <- suppressWarnings(
x %>%
dplyr::left_join(col_idx_df, cols)
)
return(out)
}
add_row_idx <- function(x,
rnames,
rgroup = NULL,
tspanner = NULL) {
cols <- c(tspanner, rgroup, rnames)
row_idx_df <- x %>%
get_row_tbl(rnames = rnames,
rgroup = rgroup,
tspanner = tspanner)
out <- suppressWarnings(
x %>%
dplyr::left_join(row_idx_df, by = cols)
)
return(out)
}
|
433ce2c3fe2801d1b8b6012e6f0c3425cb556b00 | 68a1e6493c59c0a7b1129e0b2bbde1bca685586f | /EDA/analysis.R | 6eec0ab48d47d6aa28853fc165f0d509ef24732f | [] | no_license | chashizume/project-1-gamma | f828d659218aa4f1854f33260c9293e55b3d004c | cf8c34110e10fccffe4fcc19a484c4fdadb63e09 | refs/heads/main | 2023-08-20T06:58:15.330655 | 2021-10-21T03:59:44 | 2021-10-21T03:59:44 | 417,209,532 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 283 | r | analysis.R | library(tidyverse)
library(ggplot2)
library(dplyr)
covid_data <- read.csv('covid_short_data_states.csv')
trend_search <- read.csv('Conspiracy - 2020_2021.csv')
# weekly_covid_cases <- covid_data %>%
# group_by(week = cut(date, "week")) %>%
# summarise(value = mean(cases)) |
1432ac828288e465746ad2b6d4fbcafc36e3c5c6 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /LBSPR/man/initialize-LB_obj-method.Rd | f9cece18abb5fe08102ef263da346f02083acc47 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 540 | rd | initialize-LB_obj-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defineMeths.r
\docType{methods}
\name{initialize,LB_obj-method}
\alias{initialize,LB_obj-method}
\title{Create a new LB_obj object}
\usage{
\S4method{initialize}{LB_obj}(.Object, defaults = FALSE,
verbose = FALSE)
}
\arguments{
\item{.Object}{class of object to be created}
\item{defaults}{use defaults?}
\item{verbose}{display a message?}
}
\value{
a object of class \code{'LB_obj'}
}
\description{
Function
}
\author{
A. Hordyk
}
|
22591415f03e69d62164fc8c4f066fc7f5d1147e | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/2058_2/rinput.R | 43667fe9dfc81c3eb0e95b6a95a3da13efbe787d | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("2058_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2058_2_unrooted.txt") |
af6f391d91dab5403c914262e2c701d61d5bf0a6 | 625fe4ecff15cbadf6ea68a5a904973e1a173c6d | /plot4.R | ae8c6f2ae4494fbea34407f1477026e76cffd08d | [] | no_license | Fredfav/ExData_Plotting1 | 09e71fe0690fc5fddef4c84f784245c3f1e7577a | 41f587f1d6b998a9255436f1047cdf99dd7a93fc | refs/heads/master | 2021-01-21T09:20:53.615777 | 2016-04-20T07:41:53 | 2016-04-20T07:41:53 | 56,608,690 | 0 | 0 | null | 2016-04-19T15:26:05 | 2016-04-19T15:26:04 | null | UTF-8 | R | false | false | 1,781 | r | plot4.R | ###############################
#
# Create Plot 4
#
###############################
# Load the data
#source("load_data.R")
# Setup the environment
plot4 <- paste(getwd(), "/pics/plot4.png", sep = "")
# Create the plot
if(!file.exists(plot4)){
png("pics/plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
plot(inputData$Time, inputData$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(inputData$Time, inputData$Voltage, type = "l", xlab = "datetime", ylab = "Global Active Power")
plot(inputData$Time, inputData$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(inputData$Time, inputData$Sub_metering_2, type="l", col="red")
lines(inputData$Time, inputData$Sub_metering_3, type="l", col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2, col=c("black", "red", "blue"))
plot(inputData$Time, inputData$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global Active Power")
dev.off()
} else {
par(mfrow=c(2,2))
plot(inputData$Time, inputData$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(inputData$Time, inputData$Voltage, type = "l", xlab = "datetime", ylab = "Global Active Power")
plot(inputData$Time, inputData$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(inputData$Time, inputData$Sub_metering_2, type="l", col="red")
lines(inputData$Time, inputData$Sub_metering_3, type="l", col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2, col=c("black", "red", "blue"))
plot(inputData$Time, inputData$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global Active Power")
} |
a9736006c6bfb8fd7f01512edcf713b57f8d2bb8 | fce4ab47efad8ed048d7b400f162bb466caacd1e | /visualisation/meta_feature_importance_pipeline_specific.R | fad0bb068f017217c22cbac587cc9149bce7e30f | [] | no_license | TMKarrer/domain_ranking_scz | 622d8226dc8a525da293a3589bb6a14eaa594b2c | 805ebc184f8b684caf795e9f7e49883aa5303150 | refs/heads/master | 2020-03-20T05:46:42.944498 | 2018-07-25T13:11:45 | 2018-07-25T13:11:45 | 137,225,914 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,059 | r | meta_feature_importance_pipeline_specific.R | # Visualization of the pipeline-specific feature importance of the meta-priors.
# libraries
library(ggplot2)
library(dplyr)
library(magrittr)
library(viridis)
library(ggjoy)
project_dir = '/Users/Teresa/sciebo/Meta_Priors_teresa'
modality_list = c('vbm', 'rs', 'vbm_rs')
taxonomy = c('BD', 'PC')
methods_list = c('all_methods')
methods <- c('ha_pc', 'md_pc', 'ts_pc', 'kmeans', 'ward', 'spectral', 'pca', 'ica', 'sparse_pca')
methods_labels <- c('ha_pc', 'md_pc', 'ts_pc', 'K-means', 'ward', 'spectral', 'PCA', 'ICA', 'sparse PCA')
k_list <- c(5000, 5000, 5000, 100, 100, 100, 100, 100, 100)
for (modality in modality_list){
print(modality)
for (i in taxonomy) {
print(i)
if (i == 'BD'){
name = 'Mental domains'
domain_breaks = c(1.0, 10.0, 20.0, 30.0)
w = 9
h = 9
} else if (i == 'PC'){
name = 'Experimental tasks'
domain_breaks = c(1.0, 10.0, 20.0, 30.0, 40.0, 50.0)
w = 11
h = 15
}
setwd(paste(project_dir, "models/LogReg_RF", modality, "_meta_ranking", "all_methods", .Platform$file.sep, sep='/'))
data_mean <- read.csv(file=paste(i, '_', modality, '_all_methods_BT_data_mean.csv', sep='')) %>% select(-X)
data_mean <- data_mean[order(data_mean$means),]
rank_data_mean <- read.csv(file=paste(i, '_', modality, '_all_methods_rank_BT_data_mean.csv', sep='')) %>% select(-X)
rank_data_mean <- rank_data_mean[order(rank_data_mean$means),]
for (modality in modality_list){
for (i in taxonomy) {
k_idx <- 0
data <- data.frame()
rank_data <- data.frame()
for (method in methods) {
k_idx <- k_idx + 1
setwd(paste(project_dir, "/models/LogReg_RF/", modality, '/', method, .Platform$file.sep, sep=''))
pipeline_data <- read.csv(file=paste(i, '_', method, '_', k_list[k_idx], '_BT_data_mean.csv', sep=''))
pipeline_data <- pipeline_data %>% dplyr:: select(-X)
pipeline_data$method <- rep(method, nrow(pipeline_data))
data <- rbind(data, pipeline_data)
pipeline_rank_data <- data.frame(pipeline_data$priors, rank(-pipeline_data$means))
colnames(pipeline_rank_data) <- c("priors", "means")
pipeline_rank_data$method <- rep(method, nrow(pipeline_rank_data))
rank_data <- rbind(rank_data, pipeline_rank_data)
}
}
}
setwd(paste(project_dir, "models/LogReg_RF", modality, "_meta_ranking", "all_methods", .Platform$file.sep, sep='/'))
rank_data <- group_by(rank_data, priors) %>%
mutate(m=mean(rank_data)) %>%
arrange(m) %>%
ungroup() %>%
mutate(priors=factor(priors, levels = rank_data_mean$priors[order(rank_data_mean$means, decreasing=TRUE)] %>% unique))
ggplot() +
geom_point(data=rank_data, aes(y=priors, x=means, color=method), shape=16, size=2.5, alpha=0.7) +
geom_point(data=rank_data_mean, aes(y=priors, x=means, color='Mean'), shape=18, size=3) +
ylab(name) + xlab("Discriminabilty rank") +
theme(panel.background=element_rect(fill='white'),
axis.ticks = element_blank(), axis.line = element_line(color="grey", size = 0.1),
text = element_text(size=17), axis.text=element_text(size=14)) +
scale_x_continuous(breaks=domain_breaks) +
scale_color_manual(name="Brain sampling\nstrategy",
breaks=c('ha_pc', 'md_pc', 'ts_pc', 'kmeans', 'ward', 'spectral', 'pca', 'ica', 'sparse_pca', 'Mean'),
labels=c('ha_pc', 'md_pc', 'ts_pc', 'K-means', 'ward', 'spectral', 'PCA', 'ICA', 'sparse PCA', 'Mean'),
values=c('ha_pc' = '#FFFF00', 'md_pc' = '#FFCC00', 'ts_pc' = '#FF9900', 'kmeans'= '#00CC33', 'ward' = '#009933', 'spectral' = '#006633',
'pca' = '#66CCCC', 'ica'= '#339999', 'sparse_pca' = '#003399', 'Mean' = 'black')) +
ggsave(file=paste(i, '_', modality, '_all_methods_rank_BT_feature_importance_joyplot_pipeline_specific.png', sep=''), width = w, height = h)
}
}
|
a82fa69c3388d708274a15a8e2734e0c641e3a27 | 5ed5201af3d033274e557266e6a4bc97343e7f0a | /cachematrix.R | 64318b942938e485823e3987afac4766c97198d1 | [] | no_license | ChandraChandrasekar/ProgrammingAssignment2 | 472db86329154ddc577ad0252235eb687fbc6e84 | 9f1614913567d136fe1969c2512be6d3549aeb0b | refs/heads/master | 2021-01-14T14:01:57.767208 | 2015-05-24T08:37:04 | 2015-05-24T08:37:04 | 35,717,132 | 0 | 0 | null | 2015-05-16T09:09:57 | 2015-05-16T09:09:57 | null | UTF-8 | R | false | false | 1,796 | r | cachematrix.R | ## The functions in this file help define a special matrix object with associated operations (somewhat like an abstract data type),
## which allows for its inverse to be cached (or 'memoized', in LISP lingo); if the inverse function
## is invoked more than once, the cached value is returned if the matrix is not changed. These functions
## assume that any matrix provided as input is invertible.
## Define the special matrix object and associated functions to set/get values, set/get inverses and
## to keep track of if the matric was modified after the inverse was computed
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL # internal variable to store the inverse of matrix x
set <- function(y) {
x <<- y
inv <<- NULL # any time set is called, inv is set to NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Function to get inverse of special matrix object; if the cached value of the inverse is available, that is returned;
## else the value is computed using 'solve' and cached
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inver <- x$getInverse()
if(!is.null(inver)) {
message("Getting the cached inverse value")
return(inver)
}
# else if inver is null, which is true if the matrix has been reset and/or if the inverse has not been cached so far, compute & cache it
dataObject <- x$get()
inver <- solve(dataObject, ...)
x$setInverse(inver)
inver
}
|
b2cc8756d90e7a7ccd8c9043a0ad2cebe96da2fb | 1cc473a79cb15e60c1ff3f6b28cbbcfa812b8bbb | /physical_network/server.R | bc0cb11d0ae8dd07789d2eb6b519fd294e1a8d2e | [] | no_license | robert-cope/physical_network | 5abbe6392cff1faac24114069c78c4fc871bfac4 | 9bfae34e713b247ae3134064c9be13599e1a4d56 | refs/heads/master | 2021-01-10T04:21:42.134346 | 2016-01-27T03:03:15 | 2016-01-27T03:03:15 | 50,477,476 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,565 | r | server.R | ####################################################
### Please see:
### Cope RC, Ross JV, Wittmann TA, Prowse TAA, Cassey P. (2016) Integrative Analysis of the Physical Transport Network into Australia. PLoS One. In Press.
### Robert C. Cope (c) 2016
### robert.cope@adelaide.edu.eu
####################################################
Sys.setlocale('LC_ALL','C')
library(ggplot2)
library(maps)
statesSrcY<-read.csv('statesSrcY3.csv',header=T,stringsAsFactors=F,sep=' ')
statesSrcYm<-read.csv('statesSrcY3-m.csv',header=T,stringsAsFactors=F,sep=' ')
shinyServer(function(input, output) {
yearInput <- reactive({
switch(input$year,
'1999' = 1999,
'2000' = 2000,
'2001' = 2001,
'2002' = 2002,
'2003' = 2003,
'2004' = 2004,
'2005' = 2005,
'2006' = 2006,
'2007' = 2007,
'2008' = 2008,
'2009' = 2009,
'2010' = 2010,
'2011' = 2011,
'2012'=2012)
})
yearBase <- reactive({
switch(input$yearB,
'N/A' = 0,
'1999' = 1999,
'2000' = 2000,
'2001' = 2001,
'2002' = 2002,
'2003' = 2003,
'2004' = 2004,
'2005' = 2005,
'2006' = 2006,
'2007' = 2007,
'2008' = 2008,
'2009' = 2009,
'2010' = 2010,
'2011' = 2011,
'2012'=2012)
})
volPassR<-reactive({
switch(input$volPass,
'vol' = 1,
'pass' = 0)
})
lastOrAll<-reactive({
switch(input$lastOrAll,
'last' = 1,
'all' = 0)
})
output$distPlot <- renderPlot({
######
if(lastOrAll() == 1){
if(yearBase() == 0){
yr = yearInput()
map.world <- map_data(map = "world")
p1<-ggplot(map.world, aes(x = long, y = lat,group=group))
p1<-p1+geom_polygon()#+coord_cartesian(xlim = c(100, 180),ylim=c(-50,50))
p1 <- p1 + theme(legend.position="none") # remove legend with fill colours
statesSrcYY<-subset(statesSrcY,Year==yr)
statesSrcYY$flightsPFreq <- ifelse(is.na(statesSrcYY$flightsPFreq),0,statesSrcYY$flightsPFreq)
statesSrcYY$flightsCFreq <- ifelse(is.na(statesSrcYY$flightsCFreq),0,statesSrcYY$flightsCFreq)
statesSrcYY$shipsFreq <- ifelse(is.na(statesSrcYY$shipsFreq),0,statesSrcYY$shipsFreq)
statesSrcYY$flightsPTotal <- ifelse(is.na(statesSrcYY$flightsPTotal),0,statesSrcYY$flightsPTotal)
if (volPassR() == 1){
statesSrcYY$weightedTot<-statesSrcYY$flightsPFreq*input$wPass + input$wCargo*statesSrcYY$flightsCFreq+input$wShips*statesSrcYY$shipsFreq
} else {
statesSrcYY$weightedTot<-statesSrcYY$flightsPTotal/100*input$wPass + input$wCargo*statesSrcYY$flightsCFreq+input$wShips*statesSrcYY$shipsFreq
}
p1<-p1+geom_point(data=statesSrcYY,aes(x=x_coord_centroid,y=y_coord_centroid,size=weightedTot,group=StateID),col='red')
print(p1)
} else {
yrB <- yearBase()
yr = yearInput()
map.world <- map_data(map = "world")
p1<-ggplot(map.world, aes(x = long, y = lat,group=group))
p1<-p1+geom_polygon()#+coord_cartesian(xlim = c(100, 180),ylim=c(-50,50))
p1 <- p1 + theme(legend.position="none") # remove legend with fill colours
statesSrcYY<-subset(statesSrcY,Year==yr)
statesSrcYY$flightsPFreq <- ifelse(is.na(statesSrcYY$flightsPFreq),0,statesSrcYY$flightsPFreq)
statesSrcYY$flightsCFreq <- ifelse(is.na(statesSrcYY$flightsCFreq),0,statesSrcYY$flightsCFreq)
statesSrcYY$shipsFreq <- ifelse(is.na(statesSrcYY$shipsFreq),0,statesSrcYY$shipsFreq)
statesSrcYY$flightsPTotal <- ifelse(is.na(statesSrcYY$flightsPTotal),0,statesSrcYY$flightsPTotal)
statesSrcYB<-subset(statesSrcY,Year==yrB)
statesSrcYB$flightsPFreq <- ifelse(is.na(statesSrcYB$flightsPFreq),0,statesSrcYB$flightsPFreq)
statesSrcYB$flightsCFreq <- ifelse(is.na(statesSrcYB$flightsCFreq),0,statesSrcYB$flightsCFreq)
statesSrcYB$shipsFreq <- ifelse(is.na(statesSrcYB$shipsFreq),0,statesSrcYB$shipsFreq)
statesSrcYB$flightsPTotal <- ifelse(is.na(statesSrcYB$flightsPTotal),0,statesSrcYB$flightsPTotal)
if (volPassR() == 1){
statesSrcYY$weightedTot<-statesSrcYY$flightsPFreq*input$wPass + input$wCargo*statesSrcYY$flightsCFreq+input$wShips*statesSrcYY$shipsFreq
statesSrcYB$weightedTot<-statesSrcYB$flightsPFreq*input$wPass + input$wCargo*statesSrcYB$flightsCFreq+input$wShips*statesSrcYB$shipsFreq
#statesSrcYY$propW <- statesSrcYY$weightedTot / sum(statesSrcYY$weightedTot)
#statesSrcYB$propW <- statesSrcYB$weightedTot / sum(statesSrcYB$weightedTot)
} else {
statesSrcYY$weightedTot<-statesSrcYY$flightsPTotal/100*input$wPass + input$wCargo*statesSrcYY$flightsCFreq+input$wShips*statesSrcYY$shipsFreq
statesSrcYB$weightedTot<-statesSrcYB$flightsPTotal/100*input$wPass + input$wCargo*statesSrcYB$flightsCFreq+input$wShips*statesSrcYB$shipsFreq
#statesSrcYY$propW <- statesSrcYY$weightedTot / sum(statesSrcYY$weightedTot)
#statesSrcYB$propW <- statesSrcYB$weightedTot / sum(statesSrcYB$weightedTot)
}
statesSrcYY$propChange<- NA
#print(statesSrcYB$propW)
for(i in 1:length(statesSrcYY$propChange)){
statesSrcYY[i,]$propChange <- abs(statesSrcYY[i,]$weightedTot - statesSrcYB[statesSrcYB$StateID ==statesSrcYY[i,]$StateID,]$weightedTot)
#if(statesSrcYB[statesSrcYB$StateID ==statesSrcYY[i,]$StateID,]$propW > 0){
#statesSrcYY[i,]$isMore<- statesSrcYY[i,]$propW / statesSrcYB[statesSrcYB$StateID ==statesSrcYY[i,]$StateID,]$propW
#} else {
# statesSrcYY[i,]$isMore<-1
#}
}
statesSrcYY$propChange <- statesSrcYY$propChange / max(statesSrcYY$propChange)
p1<-p1+geom_point(data=statesSrcYY,aes(x=x_coord_centroid,y=y_coord_centroid,size=weightedTot,group=StateID,col=propChange))+scale_colour_gradientn(colours=c('lightblue','red'))
print(p1)
}
#########
} else {
#########
if(yearBase() == 0){
yr = yearInput()
map.world <- map_data(map = "world")
p1<-ggplot(map.world, aes(x = long, y = lat,group=group))
p1<-p1+geom_polygon()#+coord_cartesian(xlim = c(100, 180),ylim=c(-50,50))
p1 <- p1 + theme(legend.position="none") # remove legend with fill colours
statesSrcYY<-subset(statesSrcYm,Year==yr)
statesSrcYY$flightsPFreqm <- ifelse(is.na(statesSrcYY$flightsPFreqm),0,statesSrcYY$flightsPFreqm)
statesSrcYY$flightsCFreqm <- ifelse(is.na(statesSrcYY$flightsCFreqm),0,statesSrcYY$flightsCFreqm)
statesSrcYY$shipsFreq <- ifelse(is.na(statesSrcYY$shipsFreq),0,statesSrcYY$shipsFreq)
statesSrcYY$flightsPTotalm <- ifelse(is.na(statesSrcYY$flightsPTotalm),0,statesSrcYY$flightsPTotalm)
if (volPassR() == 1){
statesSrcYY$weightedTot<-statesSrcYY$flightsPFreqm*input$wPass + input$wCargo*statesSrcYY$flightsCFreqm+input$wShips*statesSrcYY$shipsFreq
} else {
statesSrcYY$weightedTot<-statesSrcYY$flightsPTotalm/100*input$wPass + input$wCargo*statesSrcYY$flightsCFreqm+input$wShips*statesSrcYY$shipsFreq
}
p1<-p1+geom_point(data=statesSrcYY,aes(x=x_coord_centroid,y=y_coord_centroid,size=weightedTot,group=StateID),col='red')
print(p1)
} else {
yrB <- yearBase()
yr = yearInput()
map.world <- map_data(map = "world")
p1<-ggplot(map.world, aes(x = long, y = lat,group=group))
p1<-p1+geom_polygon()#+coord_cartesian(xlim = c(100, 180),ylim=c(-50,50))
p1 <- p1 + theme(legend.position="none") # remove legend with fill colours
statesSrcYY<-subset(statesSrcYm,Year==yr)
statesSrcYY$flightsPFreqm <- ifelse(is.na(statesSrcYY$flightsPFreqm),0,statesSrcYY$flightsPFreqm)
statesSrcYY$flightsCFreqm <- ifelse(is.na(statesSrcYY$flightsCFreqm),0,statesSrcYY$flightsCFreqm)
statesSrcYY$shipsFreq <- ifelse(is.na(statesSrcYY$shipsFreq),0,statesSrcYY$shipsFreq)
statesSrcYY$flightsPTotalm <- ifelse(is.na(statesSrcYY$flightsPTotalm),0,statesSrcYY$flightsPTotalm)
statesSrcYB<-subset(statesSrcYm,Year==yrB)
statesSrcYB$flightsPFreqm <- ifelse(is.na(statesSrcYB$flightsPFreqm),0,statesSrcYB$flightsPFreqm)
statesSrcYB$flightsCFreqm <- ifelse(is.na(statesSrcYB$flightsCFreqm),0,statesSrcYB$flightsCFreqm)
statesSrcYB$shipsFreq <- ifelse(is.na(statesSrcYB$shipsFreq),0,statesSrcYB$shipsFreq)
statesSrcYB$flightsPTotalm <- ifelse(is.na(statesSrcYB$flightsPTotalm),0,statesSrcYB$flightsPTotalm)
if (volPassR() == 1){
statesSrcYY$weightedTot<-statesSrcYY$flightsPFreqm*input$wPass + input$wCargo*statesSrcYY$flightsCFreqm+input$wShips*statesSrcYY$shipsFreq
statesSrcYB$weightedTot<-statesSrcYB$flightsPFreqm*input$wPass + input$wCargo*statesSrcYB$flightsCFreqm+input$wShips*statesSrcYB$shipsFreq
#statesSrcYY$propW <- statesSrcYY$weightedTot / sum(statesSrcYY$weightedTot)
#statesSrcYB$propW <- statesSrcYB$weightedTot / sum(statesSrcYB$weightedTot)
} else {
statesSrcYY$weightedTot<-statesSrcYY$flightsPTotalm/100*input$wPass + input$wCargo*statesSrcYY$flightsCFreqm+input$wShips*statesSrcYY$shipsFreq
statesSrcYB$weightedTot<-statesSrcYB$flightsPTotalm/100*input$wPass + input$wCargo*statesSrcYB$flightsCFreqm+input$wShips*statesSrcYB$shipsFreq
#statesSrcYY$propW <- statesSrcYY$weightedTot / sum(statesSrcYY$weightedTot)
#statesSrcYB$propW <- statesSrcYB$weightedTot / sum(statesSrcYB$weightedTot)
}
statesSrcYY$propChange<- NA
#print(statesSrcYB$propW)
for(i in 1:length(statesSrcYY$propChange)){
statesSrcYY[i,]$propChange <- abs(statesSrcYY[i,]$weightedTot - statesSrcYB[statesSrcYB$StateID ==statesSrcYY[i,]$StateID,]$weightedTot)
#if(statesSrcYB[statesSrcYB$StateID ==statesSrcYY[i,]$StateID,]$propW > 0){
#statesSrcYY[i,]$isMore<- statesSrcYY[i,]$propW / statesSrcYB[statesSrcYB$StateID ==statesSrcYY[i,]$StateID,]$propW
#} else {
# statesSrcYY[i,]$isMore<-1
#}
}
statesSrcYY$propChange <- statesSrcYY$propChange / max(statesSrcYY$propChange)
p1<-p1+geom_point(data=statesSrcYY,aes(x=x_coord_centroid,y=y_coord_centroid,size=weightedTot,group=StateID,col=propChange))+scale_colour_gradientn(colours=c('lightblue','red'))
print(p1)
}
#########
}
})
output$distPlot2 <- renderPlot({
yr = yearInput()
statesSrcY$flightsPFreq <- ifelse(is.na(statesSrcY$flightsPFreq),0,statesSrcY$flightsPFreq)
statesSrcY$flightsCFreq <- ifelse(is.na(statesSrcY$flightsCFreq),0,statesSrcY$flightsCFreq)
statesSrcY$shipsFreq <- ifelse(is.na(statesSrcY$shipsFreq),0,statesSrcY$shipsFreq)
statesSrcY$flightsPTotal <- ifelse(is.na(statesSrcY$flightsPTotal),0,statesSrcY$flightsPTotal)
if (volPassR() == 1){
statesSrcY$weightedTot<-statesSrcY$flightsPFreq*input$wPass + input$wCargo*statesSrcY$flightsCFreq+input$wShips*statesSrcY$shipsFreq
} else {
statesSrcY$weightedTot<-statesSrcY$flightsPTotal/100*input$wPass + input$wCargo*statesSrcY$flightsCFreq+input$wShips*statesSrcY$shipsFreq
}
t1<-subset(statesSrcY,Year==yr)
t1<-t1[order(-t1$weightedTot),]
keyStates<-t1[1:10,]$StateID
pos<-matrix(nrow=10,ncol=14)
for(i in 1:14){
t2<-subset(statesSrcY,Year==1998+i)
t2<-t2[order(-t2$weightedTot),]
for (j in 1:10){
pos[j,i] <- which(t2$StateID == keyStates[j])
}
}
plot(1,xlim=c(1999,2012),ylim=c(1,10),xlab='Year',ylab='')
for (j in 1:10){
points(1999:2012,pos[j,],col=j,lwd=2,type='l')
}
abline(v=yr,col='red',lty=2)
})
output$view <- renderTable({
if(lastOrAll()==1){
#######
yr = yearInput()
statesSrcYY<-subset(statesSrcY,Year==yr)
statesSrcYY$flightsPFreq <- ifelse(is.na(statesSrcYY$flightsPFreq),0,statesSrcYY$flightsPFreq)
statesSrcYY$flightsCFreq <- ifelse(is.na(statesSrcYY$flightsCFreq),0,statesSrcYY$flightsCFreq)
statesSrcYY$shipsFreq <- ifelse(is.na(statesSrcYY$shipsFreq),0,statesSrcYY$shipsFreq)
statesSrcYY$flightsPTotal <- ifelse(is.na(statesSrcYY$flightsPTotal),0,statesSrcYY$flightsPTotal)
if (volPassR() == 1){
statesSrcYY$weightedTot<-statesSrcYY$flightsPFreq*input$wPass + input$wCargo*statesSrcYY$flightsCFreq+input$wShips*statesSrcYY$shipsFreq
} else {
statesSrcYY$weightedTot<-statesSrcYY$flightsPTotal/100*input$wPass + input$wCargo*statesSrcYY$flightsCFreq+input$wShips*statesSrcYY$shipsFreq
}
statesSrcYY<-statesSrcYY[order(-statesSrcYY$weightedTot),]
tM<-head(statesSrcYY[,c(1,26,27,28,29,30)],n=10)
colnames(tM)<- c("Source_region", "Passenger_Flights", "Cargo_Flights","Ships","#Passengers_flying", "Weighted_cumulative_impact_(I)")
#M <- print(xtable(tM,digits=c(0,0,0,0,0,0,0)),
# floating=FALSE, tabular.environment="array", comment=FALSE, print.results=FALSE,include.rownames=F)
#html <- paste0("$$", M, "$$")
#list(
# withMathJax(HTML(html))
#)
tM
########
} else {
########
yr = yearInput()
statesSrcYY<-subset(statesSrcYm,Year==yr)
statesSrcYY$flightsPFreqm <- ifelse(is.na(statesSrcYY$flightsPFreqm),0,statesSrcYY$flightsPFreqm)
statesSrcYY$flightsCFreqm <- ifelse(is.na(statesSrcYY$flightsCFreqm),0,statesSrcYY$flightsCFreqm)
statesSrcYY$shipsFreq <- ifelse(is.na(statesSrcYY$shipsFreq),0,statesSrcYY$shipsFreq)
statesSrcYY$flightsPTotalm <- ifelse(is.na(statesSrcYY$flightsPTotalm),0,statesSrcYY$flightsPTotalm)
if (volPassR() == 1){
statesSrcYY$weightedTot<-statesSrcYY$flightsPFreqm*input$wPass + input$wCargo*statesSrcYY$flightsCFreqm+input$wShips*statesSrcYY$shipsFreq
} else {
statesSrcYY$weightedTot<-statesSrcYY$flightsPTotalm/100*input$wPass + input$wCargo*statesSrcYY$flightsCFreqm+input$wShips*statesSrcYY$shipsFreq
}
statesSrcYY<-statesSrcYY[order(-statesSrcYY$weightedTot),]
tM<-head(statesSrcYY[,c(1,26,27,28,29,30)],n=10)
colnames(tM)<- c("Source_region", "Passenger_Flights", "Cargo_Flights","Ships","#Passengers_flying", "Weighted_cumulative_impact_(I)")
#M <- print(xtable(tM,digits=c(0,0,0,0,0,0,0)),
# floating=FALSE, tabular.environment="array", comment=FALSE, print.results=FALSE,include.rownames=F)
#html <- paste0("$$", M, "$$")
#list(
# withMathJax(HTML(html))
#)
tM
########
}
},include.rownames=F,digits=c(0,0,0,0,0,0,0))
})
|
cdb1cb3d7119422c68f0a4a531484a449bc39847 | 05fc3ee8d1dcf3c5a003752926fa9259d595667c | /cachematrix.R | 27c9606f8aeccc8ce24bb8e7dcb6b5c43e2fe56c | [] | no_license | ecocarlisle/ProgrammingAssignment2 | ab84d3d5ebad1d35e65e58693b306aec4a66e987 | d88b7ce698ac3b4ac1a45d3538324e79738e0911 | refs/heads/master | 2020-12-27T09:37:58.800084 | 2014-12-21T04:22:41 | 2014-12-21T04:22:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,161 | r | cachematrix.R | ## matrix is passed in and a list is created with getters and setters.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
## initialize x to value passed in and m to NULL
set <- function(y) {
x <<- y
m <<- NULL
}
## retrieve x
get <- function() x
## cache m to value passed in
setcache <- function(solve) m <<- solve
## retrieve m
getcache <- function() m
## return a list of functions
list(set = set, get = get,
setcache = setcache,
getcache = getcache)
}
## Return a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
##call getcache function from the list of functions passed in
m <- x$getcache()
##if m is not null retrieve m from cache and tell the user such
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## m is null so call the get function from list of functions passed into cacheSolve()
## data will become the special matrix
data <- x$get()
##compute the inverse of the special matrix
m <- solve(data, ...)
## the special matrix is cached
x$setcache(m)
##return the inverse of the special matrix
m
} |
658a658819c48c145a170f5ace073a0382610ee0 | 654f100a3a03778d53f5e26c3cc1418cb22a780d | /binomial/R/check-binomial.R | 29b4993e21be1791685a8b0ff0772ea60b195376 | [] | no_license | caroluscha/workout3 | e0303b1066f740435918c8329eeaee90e8f003a6 | f0f80ecddc1f266014121a8d4cb67a5fde9467cd | refs/heads/master | 2020-05-18T15:15:38.779771 | 2019-05-04T02:34:12 | 2019-05-04T02:34:12 | 184,493,636 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 737 | r | check-binomial.R | #private checker functions for inputs
#this function checks whether prob is between 0 and 1
check_prob <- function(prob) {
if (prob > 0 & prob < 1 & length(prob) == 1) {
return(TRUE)
}
else {
stop("p has to be a number betwen 0 and 1")
}
}
#checks if trials (number of trials) is a non-negative integer
check_trials <- function(trials) {
if (trials > 0 & trials%%1 ==0) {
return(TRUE)
}
else {
stop("trials must be a non-negative integer")
}
}
#checks whether success is a non-negative integer and less than or equal to trials
check_success <- function(success, trials) {
if (success >= 0 & success%%1 == 0 & success <= trials) {
return(TRUE)
}
else{
stop("invalid success value")
}
}
|
716cd8e694c9b02845e9b141abea330bc3a81f49 | b4403540cfba3929f0d844454b282ea02dd5e743 | /pospkg/tests/testthat/testthat.R | 9a658638f7b891f9c89acbd315df4b9044010e4f | [] | no_license | AhmedRashid4/Capstone_2 | 5654dd0656d75e38dbeae1cf7bbf30bedc3a563e | fe2ad8cf227fbc6f1a6ee4eb0c1a557bb330eb92 | refs/heads/main | 2023-02-27T00:13:05.298931 | 2021-02-02T15:17:22 | 2021-02-02T15:17:22 | 330,974,561 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 56 | r | testthat.R | library(testthat)
library(pospkg)
test_check("pospkg")
|
3b5d40e23a07a00f7f9e478f2f0dffa50b3a168c | f59f1e7d62af7b612fc2c7f31c6a7c0362efdb87 | /cachematrix.R | b8ee1889a70a0398b379d401919e1f79a9a4762a | [] | no_license | painkillerz/ProgrammingAssignment2 | 5b46c44436e1b5944ee7f5b4c6137a2a2a2fe826 | c2ff2679c8672ef18de60b47aae344abce60e917 | refs/heads/master | 2021-01-16T22:55:44.335680 | 2014-08-24T17:09:59 | 2014-08-24T17:09:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,988 | r | cachematrix.R | ## These functions are useful in the inversion of square matrixes through the use of caching
## makeCacheMatrix will create a list from which cacheSolve can either solve and cache or retrieve cached data
## makeCacheMatrix creates a list whereby the cached matrix can be stored from an input matrix
makeCacheMatrix <- function (x = numeric()){ ## function accepts matrix, x, as an argument
## no restriction on matrix size,but solve() will
## display error message if matrix not square invertible
mat <- NULL ## mat will store inverse square
##resets to NULL for every function call
get <- function() { x } ## return original matrix
setmat <- function(solve) ## solves and stores the solved matrix by superassignment
{ mat <<- solve }
getmat <- function() { mat } ## function to return cached matrix data
list(get = get, setmat = setmat, getmat = getmat) ## returns list with:
## matrix (get(x)), assignment function,
## and retrieval function
}
## cacheSolve will either retrieve or solve a matrix from the list generated by makeCacheMatrix
cacheSolve <- function (x, ...){ ## takes the list from makeCacheMatrix as an argument
## just a simple retrieval/solving function
mat <- x$getmat() ## reads the matrix from makeCacheMatrix list
if(!is.null(mat)) { ## if mat exists (cached, retrieves the matrix)
message("Retrieving cached matrix data:...")
mat
return
}
solver <- x$get() ## else, retrieve input matrix
mat <- solve(solver, ...) ##solves input matrix to inverse
x$setmat(mat) ## stores value in list from makeCacheMatrix
mat ## return
}
|
73e7b96546c4695b9ba2058dc7ecbe9573261052 | 0c3c0b5c20b0d72a7333eafe085241fa02ed77f8 | /man/adductDB.Rd | 4e6185281dd27f526a31228541a86f30da80c1a9 | [] | no_license | tianyabeef/RFQI | 1c4bc40f90bc619a50c91105670d1007f9012763 | a3a736714d15ca8f91ef20d5f9359af22b528a40 | refs/heads/master | 2022-02-28T12:10:42.862374 | 2019-11-15T08:59:40 | 2019-11-15T08:59:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 723 | rd | adductDB.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{adductDB}
\alias{adductDB}
\title{possible adduct when ionization}
\format{A list containing four matrix, each matrix is an adduct table
\describe{
\item{HILIC_pos}{chromatographic column is HILIC, and ionization mode is "positive"}
\item{HILIC_neg}{chromatographic column is HILIC, and ionization mode is "negative"}
\item{RP_pos}{chromatographic column is RP, and ionization mode is "positive"}
\item{RP_neg}{chromatographic column is RP, and ionization mode is "negative"}
}}
\source{
data("adductDB", package="RFQI")
}
\usage{
adductDB
}
\description{
possible adduct when ionization
}
\keyword{datasets}
|
a10555f7afd25058b02819850bda088291d434e7 | 2d8acfab5bcf43e8f77f17e2b5943e3a741b1321 | /man/print.Rd | 2ad3455f4621188920de0e5167187f4ace62d0cf | [] | no_license | shabbychef/SharpeR | 347b2e181ac3bafe83723df4f01b110cfd85b116 | df46933d5e0b7576ffb5a46b9fb70607f89ed0e0 | refs/heads/master | 2022-06-04T00:27:23.216085 | 2021-08-18T17:50:04 | 2021-08-18T17:50:04 | 7,334,041 | 20 | 5 | null | 2013-09-13T22:04:43 | 2012-12-27T00:52:58 | R | UTF-8 | R | false | true | 1,880 | rd | print.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sr.r
\name{print.sr}
\alias{print.sr}
\alias{print.sropt}
\alias{print.del_sropt}
\title{Print values.}
\usage{
\method{print}{sr}(x, ...)
\method{print}{sropt}(x, ...)
\method{print}{del_sropt}(x, ...)
}
\arguments{
\item{x}{an object of class \code{sr} or \code{sropt}.}
\item{...}{further arguments to be passed to or from methods.}
}
\value{
the object, wrapped in \code{invisible}.
}
\description{
Displays an object, returning it \emph{invisibly},
(via \code{invisible(x)}.)
}
\examples{
# compute a 'daily' Sharpe
mysr <- as.sr(rnorm(253*8),ope=1,epoch="day")
print(mysr)
# roll your own.
ope <- 253
zeta <- 1.0
n <- 6 * ope
rvs <- rsr(1,n,zeta,ope=ope)
roll.own <- sr(sr=rvs,df=n-1,ope=ope,rescal=sqrt(1/n))
print(roll.own)
# put a bunch in. naming becomes a problem.
rvs <- rsr(5,n,zeta,ope=ope)
roll.own <- sr(sr=rvs,df=n-1,ope=ope,rescal=sqrt(1/n))
print(roll.own)
# for sropt objects:
nfac <- 5
nyr <- 10
ope <- 253
# simulations with no covariance structure.
# under the null:
set.seed(as.integer(charToRaw("be determinstic")))
Returns <- matrix(rnorm(ope*nyr*nfac,mean=0,sd=0.0125),ncol=nfac)
asro <- as.sropt(Returns,drag=0,ope=ope)
print(asro)
}
\references{
Sharpe, William F. "Mutual fund performance." Journal of business (1966): 119-138.
\url{https://ideas.repec.org/a/ucp/jnlbus/v39y1965p119.html}
}
\seealso{
Other sr:
\code{\link{as.sr}()},
\code{\link{confint.sr}()},
\code{\link{dsr}()},
\code{\link{is.sr}()},
\code{\link{plambdap}()},
\code{\link{power.sr_test}()},
\code{\link{predint}()},
\code{\link{reannualize}()},
\code{\link{se}()},
\code{\link{sr_equality_test}()},
\code{\link{sr_test}()},
\code{\link{sr_unpaired_test}()},
\code{\link{sr_vcov}()},
\code{\link{sr}},
\code{\link{summary.sr}}
}
\author{
Steven E. Pav \email{shabbychef@gmail.com}
}
\concept{sr}
|
7470f4053f1f6ccde70fea12cce3ddd77714f27e | ab1ec946ff07ca10d48bbacccf42b3f382d7180f | /app/server.R | d32786280d5114bc23997170324b1fab49c5c69e | [] | no_license | ucb243-fall16-mfa/juliet | 27f4d3d644a17c56045517341757df5298ed45bd | d32c7ce686489615fd875b00718ba2893d3a0b8a | refs/heads/master | 2020-06-12T20:09:32.154456 | 2016-12-15T23:05:41 | 2016-12-15T23:05:41 | 75,756,107 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 560 | r | server.R |
function(input, output,session) {
output$plot<-renderPlot({
if (input$plot_ob=="a"){
plot_eigen(review)
}else if (input$plot_ob=="b"){
plot_common_factor(review)
}else if(input$plot_ob=="c"){
plot_partial_factor(review,dim_plot=c(input$dim_1,input$dim_2))
}else if(input$plot_ob=="d"){
plot_partial_factor_loading(review,dim_plot=c(input$dim_1,input$dim_2))
}else if(input$plot_ob=="e"){
plot.boostrap_ratio(review,10)
}else{
plot.boostrap_conf(review,10)
}
})
} |
4e87c1cdc77d6f983605663a5222e5f73717ae2c | 3b35ece106cb520f8e7d96c11e8f34ab25e21659 | /script.R | 8edc162921e0b39c76f9dff9314382c526316799 | [] | no_license | inteligentni/lab02_R_intro | eb53697c90dfd54d42d57d58db8876e5d79f2c52 | ac6dcb57cb362c05eeb401e3d41336641aae5d3d | refs/heads/master | 2023-03-17T07:56:39.677680 | 2023-03-10T16:19:25 | 2023-03-10T16:19:25 | 167,940,704 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,484 | r | script.R | ###############################
# Data sampling
###############################
# create a vector with values from 1 to 10
x <- 1:10
# create a sample of size 5 from the vector
sample(x, size = 5)
# create a sample of size 20 from the vector, where duplicates are allowed
sample(x, size = 20, replace = TRUE)
# set seed and create two sample of size 20 from the vector, where duplicates are allowed
set.seed(10)
sample(x, size = 20, replace = TRUE)
set.seed(10)
sample(x, size = 20, replace = TRUE)
###############################
# Matrices
###############################
# create a 2 x 4 matrix with values from 8 to 1, filled by rows
a <- matrix(8:1, nrow = 2, ncol = 4, byrow = TRUE)
a
# get the first row
a[1, ]
# get the element from row 1, column 2
a[1,2]
# get number of rows
nrow(a)
# get number of columns
ncol(a)
# create two matrices of the same dimension
matrix1 <- matrix(c(3, 9, -1, 4), nrow = 2)
matrix1
matrix2 <- matrix(c(5, 2, 0, 9), nrow = 2)
matrix2
# add matrix2 to matrix1
matrix1 + matrix2
# transpose a matrix
t(matrix1)
###############################
# Lists
###############################
# create a new list with attributes: passport, age, diplomatic
traveler1 <- list(passport = "P123123", age = 34, diplomatic=TRUE)
traveler1
# get the 2nd element
traveler1[2]
# get the value of the 2nd element
traveler1[[2]]
# get the value of the age element
traveler1$age
# get the list length
length(traveler1)
# add new list after the 2nd element
traveler1 <- append(traveler1, list(country = "AUS"), after=2)
length(traveler1)
traveler1
# delete 3rd element
traveler1[[3]] <- NULL
length(traveler1)
traveler1
# concatinate two lists
traveler2 <- list(passport = "P456456", age = 14, diplomatic = FALSE)
travelers <- c(traveler1, traveler2)
travelers
# check if travelers is a list
is.list(travelers)
# get names of all list elements
names(travelers)
# get elements with 'age' in their name
travelers[grepl('age', names(travelers))]
###############################
# Foreach loop
###############################
# print all odd numbers from 1 to 10 using for each loop
for (i in 1:10) {
if (i %% 2 == 1) {
print(paste(i,"is odd number"))
}
}
###############################
# While loop
###############################
# print all odd numbers from 1 to 10 using while loop
i <- 1
while (i <= 10) {
if (i %% 2 == 1) {
print(paste(i,"is odd number"))
}
i <- i + 1
}
###############################
# Task 1
###############################
#Create a 2 x 3 matrix with the following elements: 3, 9, -1, 4, 2, 6 (by row). Print only the positive values from the first row.
# Answer:
matrix1 <- matrix(c(3, 9, -1, 4, 2, 6), nrow = 2)
for (i in matrix1[1,]) {
if (i > 0) {
print(i)
}
}
###############################
## if-else
###############################
# use ifelse function to create a new attribute called 'request' with the value 'assistance required' if a traveler is younger than 10 years, and the value 'no special requests' otherwise
traveler1$request <- ifelse(test = traveler1$age < 10,
yes = "assistance required",
no = "no special requests")
traveler1
########################################
# User-defined functions and apply
########################################
# create a function that adds two numbers. The default value for the second argument is 1
add <- function(x, y = 1){
x + y
}
add(2)
add(2, 3)
# create a function returning an absolute value of x. Return the result using the return() function
my_abs <- function(x) {
if (x > 0) {
return(x)
}
return(-x)
}
my_abs(5)
my_abs(-5)
##############################################################
# Applying a function over rows and columns in data frame
##############################################################
# load the data "data/beatles_v2.csv"
beatles <- read.csv("data/beatles_v2.csv")
# get the number of characters in the song title "Yellow Submarine"
nchar("Yellow Submarine")
# get the number of characters of the first 10 songs
sapply(beatles$Title[1:10], nchar)
# calculate the mean value of the duration and Top.50.Billboard values of all songs from 1963
apply(beatles[beatles$Year == 1963, c(4,9)], 2, mean)
# calculate the mean value of the duration and Top.50.Billboard values that are not NAs of all songs from 1963
mean.with.na <- function(x) {
mean(x, na.rm = TRUE)
}
apply(beatles[beatles$Year == 1963, c(4,9)], 2, mean.with.na)
###############################
# Working with tables
###############################
# create a contingency table of column Year values
year.counts <- table(beatles$Year)
year.counts
# get the 4th element from the table
year.counts[4]
# store the 4th element from the table in a variable
x <- year.counts[4]
x
# convert the variable to numeric
y <- as.numeric(x)
y
# sort the table in the descending order
sort(year.counts, decreasing = T)
# get the proportions table for the values of the Year column
year.counts.prop <- prop.table(year.counts)
year.counts.prop
# sort the proportions table in the descending order
sort(year.counts.prop, decreasing = T)
# get the proportions table for the values of the Year column, but limiting number of digits to 2
round(year.counts.prop, digits = 2)
# create a contingency table Top.50.Billboard vs. Year
xtabs(~Top.50.Billboard + Year, beatles)
###############################
# Manipulating data frames
###############################
###############################
## Adding new rows and columns
###############################
# create a new column On.album and set FALSE for all songs
beatles$On.album <- FALSE
head(beatles)
# create a new data frame with two columns (with sample data)
additional.columns <- data.frame(
Platinum = sample(c(TRUE, FALSE), 310, replace = TRUE),
Score = sample(5:10, 310, replace = TRUE)
)
# combine two data frames
beatles <- cbind(beatles, additional.columns)
head(beatles)
# get the first song
new.song <- beatles[1, ]
# add the song to the end of the data frame
beatles <- rbind(beatles, new.song)
tail(beatles)
# add the song after the 3rd song in the data frame
beatles <- rbind(beatles[1:3, ],
new.song,
beatles[4:nrow(beatles), ])
head(beatles)
###############################
## Removing columns and rows
###############################
# remove the attribute On.album
beatles$On.album <- NULL
names(beatles)
# remove columns Platinum (at index 10) and Score (at index 11)
beatles <- beatles[,-c(10, 11)]
names(beatles)
# create a subset of the data frame without songs in rows 2, 4 and 6
beatles1 <- beatles[-c(2, 4, 6), ]
head(beatles1)
# create a subset of the data frame without songs in rows from 1 to 8
beatles2 <- beatles[-(1:8), ]
head(beatles2)
##################################
## Updating column and row names
#################################
# get column names
colnames(beatles)
# change name of the column that starts with 'Genre' to 'Song.genre'
genreIndex <- which(startsWith(colnames(beatles), "Genre"))
colnames(beatles)[genreIndex] <- "Song.genre"
colnames(beatles)
# change name of the column at the index 6 to 'Genre'
colnames(beatles)[6] <- "Genre"
colnames(beatles)
# change row names to a string containing word 'song' and a song order number
rownames(beatles) <- paste("song", 1:nrow(beatles))
head(beatles)
# change row names to a string containing order number
rownames(beatles) <- c(1:nrow(beatles))
head(beatles)
##################################
## Retrieving and changing values
##################################
# get songs in rows from 1 to 5, but only attributes Title and Album.debut
first.songs <- beatles[1:5, c("Title", "Album.debut")]
first.songs
# get the songs from year 1964 not having McCartney as a lead vocal
indexes <- which((beatles$Year == "1964") & (!grepl('McCartney', beatles$Lead.vocal)))
selected.songs <- beatles[indexes, ]
head(selected.songs)
# get the songs from year 1958, but only attributes Title and Album.debut
songs.1958 <- subset(beatles, Year == 1958, c("Title", "Album.debut"))
head(songs.1958)
# create a vector of logical values denoting whether the attribute Album.debut has a value or not
empty.album.debut <- beatles$Album.debut == ""
# compute how many songs lack the data about the debut album
sum(empty.album.debut)
# for songs without debut album data, set the value of the Album.debut attribute to 'empty'
beatles$Album.debut[empty.album.debut] <- "empty"
# set the value back to empty string
beatles$Album.debut[empty.album.debut] <- ""
###############################
## Saving dataset
###############################
# save dataset to a CSV file, but without the row names (row numbers) column
write.csv(beatles, "data/beatles_v3.csv", row.names = F)
# save R object for the next session into file "data/beatles_v3.RData"
saveRDS(beatles, "data/beatles_v3.RData")
# restore R object from the file "data/beatles_v3.RData" in the next session
b3 <- readRDS("data/beatles_v3.RData")
###############################
# Task 2
###############################
# Create a new column in the *beatles* data frame called *Billboard.hit* having TRUE for all songs that were in the Top 50 Billboard (songs that have the Top.50.Billboard defined), and FALSE for all other songs (not having this value set).
# Answer:
beatles$Billboard.hit <- FALSE
beatles$Billboard.hit[!is.na(beatles$Top.50.Billboard)] <- TRUE
head(beatles)
|
c02f0664cb2b0d77cb37703f02fc943c4cd9d541 | 30975dc286e36555ae827345d8df7b4cc22ac345 | /man/combineKinrespLists.Rd | c5fb6ed264ba21136a89d3e8ca808605d0a6c28b | [] | no_license | bgctw/twKinresp | c70eec323028176b340681f6c307103e7d397bbc | 94234c83ba34d9a69203f162586028a78f9a33c7 | refs/heads/master | 2020-05-15T18:59:22.958201 | 2019-04-20T21:14:19 | 2019-04-20T21:14:19 | 182,444,867 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 293 | rd | combineKinrespLists.Rd | \name{combineKinrespLists}
\alias{combineKinrespLists}
\title{combineKinrespLists}
\description{Combine several kinrespList to one bigger kinrespList.}
\usage{combineKinrespLists(x)}
\arguments{
\item{x}{List of kinrespList entries}
}
\author{Thomas Wutzler <thomas.wutzler@web.de>}
|
c62aa185502d3bd110c79e5b28922d20387961af | 8ea63fc989af442de911427da7174f31be972d96 | /2020/06_analyze_for_mv_paper/06e_grep_mv_snps_from_mv_results.R | 629d86a359cb3512c0cfd1e9273ff0c36876cda5 | [] | no_license | Defrag1236/GWAS_on_russian_population_of_ovines | 5b958feb9d643623a3ea2bac6ac332bb0e596252 | aba270efa7b9b0e48a4ea0594c6d32bdb835e2ca | refs/heads/master | 2021-09-14T08:02:16.659273 | 2021-08-18T11:25:26 | 2021-08-18T11:25:26 | 210,596,347 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,745 | r | 06e_grep_mv_snps_from_mv_results.R | ### grep mv snps from mv results ###
# load data
setwd("/home/common/projects/ovine_selection/GWAS_on_russian_population_of_ovines/2020/results/Rdata")
load("multi_6d.Rdata")
load("multi_42d.Rdata")
load("multi_3m.Rdata")
load("multi_all.Rdata")
library(data.table)
setwd("/home/common/projects/ovine_selection/ovines_multivariate_analysis/results")
mv_snps <- fread("general_table_for_paper_new_threshold.txt", head=T, stringsAsFactors=F, data.table=F)
setwd("/home/common/projects/ovine_selection/GWAS_on_russian_population_of_ovines/2020/results/mv_stephens")
p_6d <- fread("p_value_6d_multi_stephens.txt", head=F, stringsAsFactors=F, data.table=F)
p_42d <- fread("p_value_42d_multi_stephens.txt", head=F, stringsAsFactors=F, data.table=F)
p_3m <- fread("p_value_3m_multi_stephens.txt", head=F, stringsAsFactors=F, data.table=F)
p_all <- fread("p_value_all_multi_stephens.txt", head=F, stringsAsFactors=F, data.table=F)
setwd("/home/common/projects/ovine_selection/GWAS_on_russian_population_of_ovines/2020/results/mv_stephens/filtered")
index_1 <- read.table("p_value_index_1_multi_stephens_maf_0.05_filtered.txt", head=F, stringsAsFactors=F)
index_2 <- read.table("p_value_index_2_multi_stephens_maf_0.05_filtered.txt", head=F, stringsAsFactors=F)
index_3 <- read.table("p_value_index_3_multi_stephens_maf_0.05_filtered.txt", head=F, stringsAsFactors=F)
index_4 <- read.table("p_value_index_4_multi_stephens_maf_0.05_filtered.txt", head=F, stringsAsFactors=F)
index_5 <- read.table("p_value_index_5_multi_stephens_maf_0.05_filtered.txt", head=F, stringsAsFactors=F)
index_6 <- read.table("p_value_index_6_multi_stephens_maf_0.05_filtered.txt", head=F, stringsAsFactors=F)
index_7 <- read.table("p_value_index_7_multi_stephens_maf_0.05_filtered.txt", head=F, stringsAsFactors=F)
mass <- read.table("p_value_mass_multi_stephens_maf_0.05_filtered.txt", head=F, stringsAsFactors=F)
# count lambda for mv multiabel
median (qchisq(multi_6d$scan$p, df=1, lower.tail=F)/qchisq(0.5,df=1))
median (qchisq(multi_42d$scan$p, df=1, lower.tail=F)/qchisq(0.5,df=1))
median (qchisq(multi_3m$scan$p, df=1, lower.tail=F)/qchisq(0.5,df=1))
median (qchisq(multi_all$scan$p, df=1, lower.tail=F)/qchisq(0.5,df=1))
# grep mv snps from multiabel
mv_6d <- list()
mv_42d <- list()
mv_3m <- list()
mv_all <- list()
for (n in(1:nrow(mv_snps))) {
mv_6d[[n]] <- multi_6d$scan[grepl(mv_snps[n,1], multi_6d$scan$marker), 1:6]
mv_42d[[n]] <- multi_42d$scan[grepl(mv_snps[n,1], multi_42d$scan$marker), 1:6]
mv_3m[[n]] <- multi_3m$scan[grepl(mv_snps[n,1], multi_3m$scan$marker), 1:6]
mv_all[[n]] <- multi_all$scan[grepl(mv_snps[n,1], multi_all$scan$marker), 1:6]
}
mv_6d <- do.call(rbind, mv_6d)
mv_42d <- do.call(rbind, mv_3m)
mv_3m <- do.call(rbind, mv_3m)
mv_all <- do.call(rbind, mv_all)
# grep mv snps from stephens
## for rime traits
mv_6d_s <- list()
mv_42d_s <- list()
mv_3m_s <- list()
mv_all_s <- list()
for (n in(1:nrow(mv_snps))) {
mv_6d_s[[n]] <- p_6d[grepl(mv_snps[n,1], p_6d[,1]),]
mv_42d_s[[n]] <- p_42d[grepl(mv_snps[n,1], p_42d[,1]),]
mv_3m_s[[n]] <- p_3m[grepl(mv_snps[n,1], p_3m[,1]),]
mv_all_s[[n]] <- p_all[grepl(mv_snps[n,1], p_all[,1]),]
}
mv_6d_s <- do.call(rbind, mv_6d_s)
mv_42d_s <- do.call(rbind, mv_42d_s)
mv_3m_s <- do.call(rbind, mv_3m_s)
mv_all_s <- do.call(rbind, mv_all_s)
## for index traits
mv_index_1 <- list()
mv_index_2 <- list()
mv_index_3 <- list()
mv_index_4 <- list()
mv_index_5 <- list()
mv_index_6 <- list()
mv_index_7 <- list()
mv_mass <- list()
for (n in(1:nrow(mv_snps))) {
mv_index_1[[n]] <- index_1[grepl(mv_snps[n,1], index_1[,1]),]
mv_index_2[[n]] <- index_2[grepl(mv_snps[n,1], index_2[,1]),]
mv_index_3[[n]] <- index_3[grepl(mv_snps[n,1], index_3[,1]),]
mv_index_4[[n]] <- index_4[grepl(mv_snps[n,1], index_4[,1]),]
mv_index_5[[n]] <- index_5[grepl(mv_snps[n,1], index_5[,1]),]
mv_index_6[[n]] <- index_6[grepl(mv_snps[n,1], index_6[,1]),]
mv_index_7[[n]] <- index_7[grepl(mv_snps[n,1], index_7[,1]),]
mv_mass[[n]] <- mass[grepl(mv_snps[n,1], mass[,1]),]
print (n)
}
mv_index_1 <- do.call(rbind, mv_index_1)
mv_index_2 <- do.call(rbind, mv_index_2)
mv_index_3 <- do.call(rbind, mv_index_3)
mv_index_4 <- do.call(rbind, mv_index_4)
mv_index_5 <- do.call(rbind, mv_index_5)
mv_index_6 <- do.call(rbind, mv_index_6)
mv_index_7 <- do.call(rbind, mv_index_7)
mv_mass <- do.call(rbind, mv_mass)
mv_index <- cbind(SNP=mv_index_1[,1], index_1=mv_index_1[,2], index_2=mv_index_2[,2], index_3=mv_index_3[,2], index_4=mv_index_4[,2], index_5=mv_index_5[,2], index_6=mv_index_6[,2], index_7=mv_index_7[,2], mass=mv_mass[,2])
# save results
setwd("/home/common/projects/ovine_selection/GWAS_on_russian_population_of_ovines/2020/results/for_mv_paper/mv_results")
fwrite(mv_6d, "mv_6d_mv_snps_grep.txt", col.names=T, row.names=F, quote=F)
fwrite(mv_42d, "mv_42d_mv_snps_grep.txt", col.names=T, row.names=F, quote=F)
fwrite(mv_3m, "mv_3m_mv_snps_grep.txt", col.names=T, row.names=F, quote=F)
fwrite(mv_all, "mv_all_mv_snps_grep.txt", col.names=T, row.names=F, quote=F)
setwd("/home/common/projects/ovine_selection/GWAS_on_russian_population_of_ovines/2020/results/for_mv_paper/mv_results")
fwrite(mv_6d_s, "mv_6d_stephens_mv_snps_grep_p.txt", col.names=F, row.names=F, quote=F)
fwrite(mv_42d_s, "mv_42d_stephens_mv_snps_grep_p.txt", col.names=F, row.names=F, quote=F)
fwrite(mv_3m_s, "mv_3m_stephens_mv_snps_grep_p.txt", col.names=F, row.names=F, quote=F)
fwrite(mv_all_s, "mv_all_stephens_mv_snps_grep_p.txt", col.names=F, row.names=F, quote=F)
fwrite(mv_index, "mv_index_mv_snps_grep_p.txt", col.names=T, row.names=F, quote=F) |
36b16c943b01ebbaecdb37ee2d0137ab93a8efb0 | 63474c298112a98b027b38d9246aa7347610f3c2 | /makeCacheMatrix.R | 8613fd520e2a7c81151e32eb417fa5bfd9aa5794 | [] | no_license | bplus426/ProgrammingAssignment2 | e6a86b4b0b043a959aa89a40325bf04ab915996f | 5a4555bbe6fcd07601dedea0a716065dddfcd9fb | refs/heads/master | 2020-03-07T13:47:18.019576 | 2018-03-31T08:58:07 | 2018-03-31T08:58:07 | 127,509,944 | 0 | 0 | null | 2018-03-31T07:33:44 | 2018-03-31T07:33:43 | null | UTF-8 | R | false | false | 568 | r | makeCacheMatrix.R | ## t1
makeCacheMatrix <- function(x = matrix()) {
t1 <- NULL
set <- function(y) {
x <<- y
t1 <<- NULL
}
get <- function() x
sett1erse <- function(solve) t1 <<-solve
gett1erse <- function() t1
list(set = set,
get = get,
sett1erse = sett1erse,
gett1erse = gett1erse)
}
cacheSolve <- function(x, ...) {
t1 <- x$gett1erse()
if(!is.null(t1)) {
message("getting cached data")
return(t1)
}
data <- x$get()
t1 <- solve(data, ...)
x$sett1erse(t1)
t1
}
|
a0ffcbe93620357fdb50cb5e865ef94eac683a65 | b74be7cd1a0e017d9b27d4253c93455d89c451e7 | /sea-level-predictor.r | 18191bbeda4228c98f4e7c83fb59c29a95a25166 | [] | no_license | jgeng98/fcc-data-analysis-python-sea-level-predictor | 7ef0fe115b2f12f5dbf5acf9d17904a8da5b5ea1 | 30d1b5b2391ad6e95ab0349df7589f79edb8a9dd | refs/heads/master | 2023-03-10T21:34:25.193980 | 2021-02-16T03:31:38 | 2021-02-16T03:31:38 | 338,459,025 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,274 | r | sea-level-predictor.r | library(tidyverse)
df <- read_csv("epa-sea-level.csv")
# Create a scatterplot with x="Year" and y="CSIRO Adjusted Sea Level"
# Add the line of best fit, predicting up to year 2050
p <- df %>%
ggplot(aes(x = Year, y = `CSIRO Adjusted Sea Level`)) +
geom_point() +
xlim(1880, 2050) +
ylim(0.0, 15.0) +
stat_smooth(
method = "lm",
size = 1,
color = "red",
se = FALSE,
fullrange = TRUE
)
# Recalculate the line of best fit using just data from 2000-2013
df_subset <- df[df$Year >= 2000, ]
fit_subset <- lm(`CSIRO Adjusted Sea Level` ~ Year, data = df_subset)
int <- coef(fit_subset)[1]
slope <- coef(fit_subset)[2]
# Add second best fit line to the scatter plot, predicting up to year 2050
p + geom_abline(slope = slope, intercept = int, color = "blue", size = 1) +
labs(x = "Year", y = "Sea Level (inches)", title = "Rise in Sea Level") +
theme(plot.title = element_text(hjust = 0.5)) +
annotate(
geom = "text",
label = "Fitted line using only the years after 2000",
x = 2025,
y = 15,
color = "blue"
) +
annotate(
geom = "text",
label = "Fitted line using all years",
x = 2045,
y = 10.5,
color = "red"
) |
50498627f9e0f1c5b1e8a907d4d255c79314bef9 | 5af786899f99e1bc8b33fad883382f1ee9920a24 | /tbspapp/man/tbsignatureprofilerapp.Rd | e0fee087ca360301e0fee5b14c98362fc99dc491 | [] | no_license | cjlove1130/tbspapp | 3a5a9c30a8a74cf157830ffa925e0d47693f4be5 | df7882c9aad125f8b3dea301377272bbb9caeec8 | refs/heads/master | 2020-12-08T05:07:17.737973 | 2020-03-16T20:39:23 | 2020-03-16T20:39:23 | 232,892,880 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 520 | rd | tbsignatureprofilerapp.Rd | \name{tbsignatureprofilerapp}
\alias{tbsignatureprofilerapp}
\title{
Run the TBSignatureProfiler app
}
\description{
This function opens the TBSignatureProfiler R Shiny web application
}
\usage{
tbsignatureprofilerapp()
}
\value{
The R Shiny app will open
}
\author{
Christian Love
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
#Open the application
if(interactive()){
TBSPapp()
}
}
|
4265db6f1564648344483d0139bd5f751612ad9b | a621515a38621c9a36666984af5a4dc9944ba035 | /optimisation.R | 1d89f9a21a27d77978933aa9172408b6001fe6fe | [
"MIT"
] | permissive | robertkck/voter_rep | cb714794537e404ee785cb1033d6658b7f60fc38 | 8714bb38341b394d3bd6a6f17274946e4261be4f | refs/heads/master | 2021-01-12T01:05:04.420639 | 2019-06-25T15:42:11 | 2019-06-25T15:42:11 | 78,334,894 | 1 | 0 | MIT | 2019-06-25T15:21:35 | 2017-01-08T10:02:36 | R | UTF-8 | R | false | false | 3,400 | r | optimisation.R | # Optimisation exercise
# author: Robert Kalcik
# This script calculates the Cambridge Compromise allocation of seats for
# Parliament sizes between 600 and 800 seats. The allocation is subsequently
# evaluated using a number of measures of inequality. The results are plotted.
library(ggplot2)
library(countrycode)
library(SciencesPo)
library(tidyverse)
library(IC2)
source('funk/voting_gini.R')
source('funk/camcom.R')
source('funk/malapportionment.R')
theil <- function(pop, rep){
pop_prop <- pop / sum(pop)
rep_prop <- rep / pop
mu <- sum(rep_prop*pop_prop)
temp <- (pop_prop*rep_prop / mu) * log(rep_prop / mu)
return(sum(temp))
}
# Load population data and set treaty limits
eu <- read.csv("data/eu.csv")
eu <- eu[eu$GEO != "United Kingdom", ]
m <- 6
M <- 96
# Calculate CamCom allocation for every Parliament size between 600 and 800
rep_opt <- purrr::map(600:800, ~ alloc.camcom(eu$pop, m, M, .x)$rep)
rep_opt_df <- data.frame(matrix(unlist(rep_opt), nrow = 27), row.names = eu$GEO)
colnames(rep_opt_df) <- paste0('H', 600:800)
# write.csv(rep_opt_df, 'output/CamCom_allocations.csv')
# Compute Gini, Malapportionment and other inequality metrics from the Science Po package
ginis <- map_dbl(rep_opt, ~ voting_gini(eu$pop, .x))
mals <- map_dbl(rep_opt, ~ mal(eu$pop, .x))
spo1 <- map_dbl(rep_opt, ~ SciencesPo::Proportionality(eu$pop, .x, index = "Loosemore-Hanby"))
spo2 <- map_dbl(rep_opt, ~ 10 * Proportionality(eu$pop, .x, index = "Rae"))
spo3 <- map_dbl(rep_opt, ~ 1 - Proportionality(eu$pop, .x, index = "Cox-Shugart"))
spo4 <- map_dbl(rep_opt, ~ Proportionality(eu$pop, .x, index = "Farina"))
spo5 <- map_dbl(rep_opt, ~ Proportionality(eu$pop, .x, index = "Gallagher"))
spo6 <- map_dbl(rep_opt, ~ 10 * Proportionality(eu$pop, .x, index = "Grofman"))
spo7 <- map_dbl(rep_opt, ~ Proportionality(eu$pop, .x, index = "Lijphart"))
spo8 <- map_dbl(rep_opt, ~ 1 - Proportionality(eu$pop, .x, index = "Rose"))
spo9 <- map_dbl(rep_opt, ~ Proportionality(eu$pop, .x, index = "DHondt"))
# theils <- map_dbl(rep_opt, ~ theil(eu$pop, .x))
theils <- map_dbl(rep_opt, ~ calcGEI(.x / eu$pop, w = eu$pop, alpha = 1)$ineq$index)
entropies <- map_dbl(rep_opt, ~ calcGEI(.x / eu$pop, w = eu$pop, alpha = 0)$ineq$index)
# Collect and demean results
results <- data.frame(Gini = ginis, "LoosemoreHanby" = mals, Rae = spo2, Cox = spo3, Farina = spo4, Gallagher = spo5, Grofman = spo6, Rose = spo8, Theil = theils, Entropy = entropies)
results <- sweep(results, 2, apply(results, 2, mean))
minima <- apply(results, 2, which.min)
results$H <- 600:800
# Export the long format
results_long <- results %>%
gather("methods", "t", -H) %>%
transform(methods = factor(methods,levels = c("Gini", "Cox", "Farina", "Gallagher", "LoosemoreHanby", "Grofman", "Rae", "Rose", "Theil", "Entropy")))
# write.csv(results_long, 'optimisation.csv')
# Prepare facet plot
min_labels <- data.frame(x = 650, y = 0.04, min_text = paste0("Min: ", 600 + minima), methods = names(minima))
p <- ggplot(results_long, aes(x = H, y = t, color = methods)) +
geom_point() +
facet_wrap( ~ methods, ncol = 4) +
scale_x_continuous(breaks = c(600, 700, 800)) +
geom_text(data = min_labels, aes(x,y,label = min_text, color = methods), color = "black", inherit.aes = FALSE) +
xlab("Parliament size") +
ylab("Demeaned inequality coefficients") +
theme(legend.position = "none")
p
|
02ac2f4ad63e36c2c3b18753f0d7650482f565e0 | 27d9b2e4188698bbbb039328cb01961ca8f99e08 | /run_analysis.R | f1d0a3e680209c8c3a3ace6a14287da393975a2f | [] | no_license | dbaggett16/getcleandataproject | 2148d5b0adf3776cca680774d3357e75efb9e230 | 2bfa75bab79481d761aae0d166a7a4b61a734c73 | refs/heads/master | 2020-02-26T17:24:50.464946 | 2016-11-10T23:21:53 | 2016-11-10T23:21:53 | 71,700,037 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,750 | r | run_analysis.R | # R Script written by David S. Baggett for the Getting and Cleaning Data Course
run_analysis <- function(directory = 0, overwrite = FALSE, dlmethod = 0)
{
# Checking for required packages and arguments
if (!is.element("dplyr", installed.packages())) {stop("Please install 'dplyr' package before running this script")}
library(dplyr)
if (!dir.exists("UCI HAR Dataset")) {
if (directory == 0) {stop("If 'UCI HAR Dataset' does not exist in your working directory, you must call run_analysis(directory, overwrite) with a valid directory name.")}
if (dir.exists(directory) && overwrite != TRUE) {stop("Warning: Dirctory exists! Choose a different directory or call run_analysis(directory, overwrite) with overwrite = TRUE.")}
rootdir <- paste(directory, "/UCI HAR Dataset", sep="")
datafile <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
temp <- tempfile()
# Set file download method (default: curl for mac, auto for others)
if (dlmethod != 0) {dlmethod <- dlmethod}
else if (Sys.info()["sysname"] == "Darwin") {dlmethod <- "curl"} else {dlmethod <- "auto"}
# Download and extract files from ZIP archive.
message("Downloading data ZIP archive...")
download.file(url=datafile, destfile=temp, method=dlmethod, quiet= TRUE)
message("Extracting files to ", directory)
unzip (temp, exdir = directory)
unlink(temp)
}
else if (dir.exists("UCI HAR Dataset")){rootdir <- "UCI HAR Dataset"}
# Merge training and test data to create one data set.
traindir <- paste(rootdir, "/train", sep="")
testdir <- paste(rootdir, "/test", sep="")
message("Getting extracted data...")
X_test <- read.table(paste(testdir, "/X_test.txt", sep = ""))
Y_test <- read.table(paste(testdir, "/y_test.txt", sep = ""))
SubjectTest <- read.table(paste(testdir, "/subject_test.txt", sep = ""))
X_train <- read.table(paste(traindir, "/X_train.txt", sep = ""))
Y_train <- read.table(paste(traindir, "/y_train.txt", sep = ""))
SubjectTrain <- read.table(paste(traindir, "/subject_train.txt", sep = ""))
features <- read.table(paste(rootdir, "/features.txt", sep = ""))
message("Merging extracted data...")
ctrain <- cbind(SubjectTrain, Y_train, X_train)
ctest <- cbind(SubjectTest, Y_test, X_test)
traintest <- rbind(ctrain, ctest)
# Create descriptive variable names
message("Creating descriptive variable names...")
featurenames <- as.character(features[1:561, 2])
cnames <- as.character(c("subject","activity"))
cnames <- append(cnames, as.character(features[1:561,2]))
cnames <- gsub("-","", cnames, fixed = TRUE)
cnames <- gsub("(","", cnames, fixed = TRUE)
cnames <- gsub(")","", cnames, fixed = TRUE)
cnames <- gsub(",","", cnames, fixed = TRUE)
names(traintest) <- cnames
# Rename activities with more descriptive names.
message("Renaming activities...")
activities <- c("walking", "walkingup", "walkingdown", "sitting", "standing", "laying")
for (i in 1:6){
traintest$activity[which(traintest$activity == i)]<-activities[i]
}
# Extract mean and standard deviation for each measurement.
message("Extracting mean and standard deviation for each measurement.")
extractdata <- data.frame(traintest)
tidydata <- select(extractdata, 1:2, contains("std"), contains("mean"))
# Create a second, independent tidy data set with the average of each variable for each activity and each subject.
message("Creating tidy data file.")
write.table(tidydata, file = paste(rootdir, "/tidydata.txt", sep = ""))
# Output ending message and tidydata file location
endingmessage <- paste("Completed Successfully: Type mytidydata <- read.table('", rootdir, "/tidydata.txt') to import tidy data file.", sep = "")
message(endingmessage)
} |
0eacfdd8eea939c6c0dceffd61d57cae1ab83857 | ee025a1ae0f2b4dd3a6993657cf023bb2477bbb3 | /Ch 4 Lab 4 KNN.R | 4bd8e5ba48cf6d8bed7ba5641f36143eb585d337 | [] | no_license | Rakeshsuku/Statistical-Learning-Practice-Codes | 1ed19c7b7575dc8c0a83a627130b33d6175b7314 | 733e07285b1137cc6d0a67340c5b591e99aa49fd | refs/heads/master | 2020-03-31T08:31:55.430253 | 2018-10-10T08:39:16 | 2018-10-10T08:39:16 | 152,061,185 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,441 | r | Ch 4 Lab 4 KNN.R | #################### K-Nearest Neighbour #####################
library (ISLR)
library (MASS)
library (class)
## We use knn () function in class library to fit a KNN model. The knn () function takes 4 inputs. see text page 163.
attach (Smarket)
##We will now split the data to training set and test set and fit the model on training set and estimate the prediction accuracy on the test set.
train = (Year<2005) ##Year contains the Year for all 1250 entries in the Smarket data set.
##train now is logical vector.
Smarket.2005 = Smarket[!train,] ##Test data set.
dim (Smarket.2005) ##Dimensions of the test data set
Direction.2005 = Direction [!train]
train.X = cbind (Lag1, Lag2)[train,] ##Joins Lag1 and Lag2 variables for training data and assigns it to train.X
test.X = cbind (Lag1, Lag2)[!train,]
train.Direction = Direction[train]
## Now we must set seed to ensure reproducibility when R randomly tries to break ties as nearest neighbours.
set.seed (1)
knn.pred = knn (train.X, test.X, train.Direction, k=1)
table (knn.pred, Direction.2005)
mean (knn.pred == Direction.2005) ##The result for K=1 is not good as only 50% observations are correctly classified.
## Below is an analysis using K = 3.
knn.pred = knn (train.X, test.X, train.Direction, k=3)
table (knn.pred, Direction.2005)
mean (knn.pred == Direction.2005)
## Further increasing k (not done here) is found to avail no additional improvement.
|
d4a668aed5b71a1ba741c32c12395dd7423ec968 | c3ed88a4aabbb507a059734a49e259f97867abe3 | /man/RT.spell.Rd | 8128895963cef6bee1a34c191d3f6833447d4fac | [] | no_license | RECETOX/Retip | 53ce7efa2eba237a4b1c49b6e5179ad804290fd0 | 0835edd0858c85da362d839dbcea0315eb47e0d5 | refs/heads/master | 2023-03-24T09:44:36.210640 | 2021-03-18T12:33:40 | 2021-03-18T12:33:40 | 296,605,098 | 0 | 0 | null | 2021-03-18T12:33:41 | 2020-09-18T11:44:53 | R | UTF-8 | R | false | true | 903 | rd | RT.spell.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_RT_spell.R
\name{RT.spell}
\alias{RT.spell}
\title{Predict Retention Time}
\usage{
RT.spell(training, target, model = model, cesc = cesc)
}
\arguments{
\item{training}{A training dataset with calculated Chemical Descriptors used in the model}
\item{target}{A target dataset with calculated Chemical Descriptors where you want to predict Retention Time}
\item{model}{A previusly computed model, like Xgboost, Keras etc}
\item{cesc}{A model for center and scale data calculated with cesc function}
}
\value{
Returns target dataframe with Retention time calculated
}
\description{
Predict Retention Time
}
\examples{
\donttest{
# target dataframe needs to have 3 mandatory columns Name, InchIKey ans SMILES
# and the whole descriptors calculated and not filtered
target_rtp <- RT.spell(training,target,model=xgb)}
}
|
cf881d087c6dbca33c1615f24e12cb7c90de46c2 | 7540976892a2f76e39236d453129f0bef4b90244 | /ggTimeSeries.R | fbc841de2fb9424fc795bfb94506460dd47d8277 | [] | no_license | irichgreen/Viz_Practice | e4afa03d7e5bb8a3f6d9d29a02948918764e1f54 | 5be3cce3eb7a6437d9c7084c773d85d8b84647e1 | refs/heads/master | 2021-01-24T09:46:54.271650 | 2017-01-13T05:54:16 | 2017-01-13T05:54:16 | 70,122,181 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,083 | r | ggTimeSeries.R | # Example from https://github.com/Ather-Energy/ggTimeSeries
library(ggplot2)
library(ggthemes)
library(data.table)
library(ggTimeSeries)
# creating some data
set.seed(10)
dfData = data.frame(
Time = 1:1000,
Signal = abs(
c(
cumsum(rnorm(1000, 0, 3)),
cumsum(rnorm(1000, 0, 4)),
cumsum(rnorm(1000, 0, 1)),
cumsum(rnorm(1000, 0, 2))
)
),
VariableLabel = c(rep('Class A', 1000), rep('Class B', 1000), rep('Class C', 1000), rep('Class D', 1000))
)
# base plot
p1 = ggplot(dfData, aes(x = Time, y = Signal, group = VariableLabel, fill = VariableLabel)) +
stat_steamgraph()
# adding some formatting
p1 +
xlab('') +
ylab('') +
coord_fixed( 0.2 * diff(range(dfData$Time)) / diff(range(dfData$Signal)))
library(plotly)
ggplotly()
library("ggplot2")
library("ggExtra")
# Basic usage
set.seed(30)
df <- data.frame(x = rnorm(500, 50, 10), y = runif(500, 0, 50))
p <- ggplot(df, aes(x, y)) + geom_point()
ggMarginal(p)
ggMarginal(p, colour = "red")
ggMarginal(p, type = "histogram")
|
f0c7bdba6fbd6df740ca225fab1805578f8d7c2d | db9d910712d7df47de99739484e177be12bf6474 | /Petchey_etal_figures/figure8.R | 77ee61948bf4049213991dc33950311e8c9c9f9d | [] | no_license | Dusty-Gannon/ecopredtools | b10d7f35a70ca3db47ffc9fbcd6b7439c223e724 | 33b11db6d601a263d24a162222658627acd3813f | refs/heads/master | 2021-06-01T04:27:05.260950 | 2015-12-04T11:08:18 | 2015-12-04T11:08:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,915 | r | figure8.R | ## Code for box figure of Petchey et al, explaining the Lyapunov exponent
## Owen Petchey 1.12.14
## Plot three graphs
## 1. Two time series.
## 2. The absolute difference between these two time series.
## 3. Function relating forecast horizon with Lyapunov exponent.
## Use the logistic map
logistic <- function(r,n) r*n*(1-n)
get.iterate.logistic <- function(r,n0,tt){
rez <- numeric(length=its)
rez[1] <- n0
### iterate tt times
for(i in 2:tt)
rez[i] <- logistic(r,rez[i-1])
rez
}
r <- 3.6 ## intrinsic growth rate
n0 <- 0.01 ## starting density of time series 1
e <- 1e-5 ## difference to starting density of time series 2
its <- 50 ## number of iterations
ts1 <- get.iterate.logistic(r,n0,its)
ts2 <- get.iterate.logistic(r,n0+e,its)
## Function relating forecast horizon to Lyapunov exponent
Tp <- function(le, Delta, delta) 1 / le * log(Delta / delta)
le <- seq(0, 1, length=1000) ## the Lyapunov exponent
Delta <- 0.01 ## the accuracy threshold
delta <- 1e-10 ## the accuracy of the initial conditions
layout(matrix(1:3, 1, 3))
## Plot 1
plot(1:its, ts1, type="l", xlab="Time", ylab="Population size", log="y")
lines(1:its, ts2, type="l", col="blue")
mtext(3, line=1,text="(a)", adj=0, font=2, lwd=0.1)
## Plot 2
plot(1:its, log10(abs(ts1-ts2)), type="l", ylab="Log of absolute difference", xlab="Time")
mtext(3, line=1,text="(b)", adj=0, font=2)
## Plot 3
plot(le, log10(Tp(le, Delta, delta)), type="l",
ylim=c(0.8, 4.5),
ylab="Log10(Forecast horizon)", xlab="Lyapunov exponent")
delta <- 1e-5
lines(le, log10(Tp(le, Delta, delta)), type="l")
mtext(3, line=1,text="(c)", adj=0, font=2)
|
cee71da8a875d24138da676b676c4b11b81c576a | 3fdb12a1fe34aca6b96aa9047df4593404a5fc52 | /scan-all.R | e50fa442877eab329c857db69f72f71999cda211 | [] | no_license | carnegie-dpb/bartonlab-modeling | 06c90e10df8fc37973a02db41f2c882bc8ceedfd | 7d875f16f675bf94fc04a360ae8f6855d4642619 | refs/heads/master | 2021-01-22T03:48:47.674881 | 2018-04-18T22:29:04 | 2018-04-18T22:29:04 | 81,460,423 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,616 | r | scan-all.R | ##
## scan ALL genes to assemble a dataframe of fit parameters
##
source("transmodel.fit.R")
source("Rsquared.R")
source("rhop.R")
source("~/R/getAllIDs.R")
scanall = function(schema,condition) {
rhon0=1
rhoc0=20
nu=10
gamman=0.760
R2threshold = 0.60
## get an array of all gene ids
ids = getAllIDs(schema)
fits = data.frame(id=character(),rhop0=numeric(),etap=numeric(),gammap=numeric(),minimum=numeric(),R2=numeric(), check.rows=T)
dataTimes = getTimes(schema=schema, condition=condition)/60
## loop over all, doing fit
for (i in 1:length(ids)) {
dataValues = getExpression(schema=schema, condition=condition, gene=ids[i])
fit = transmodel.fit(doPlot=F,schema=schema,condition=condition,rhon0=rhon0,rhoc0=rhoc0,nu=nu,gamman=gamman, dataTimes=dataTimes, dataValues=dataValues)
if (fit$code==4) {
print(paste(ids[i],"ITERATION LIMIT EXCEEDED"))
} else if (fit$code==5) {
print(paste(ids[i],"MAXIMUM STEP SIZE EXCEEDED FIVE CONSECUTIVE TIMES"))
} else {
rhop0 = fit$estimate[1]
etap = fit$estimate[2]
gammap = fit$estimate[3]
## get R-squared and error metric
fitValues = dataTimes
for (j in 1:length(fitValues)) {
fitValues[j] = rhop(rhoc0, rhon0, nu, gamman, rhop0, etap, gammap, dataTimes[j])
}
R2 = Rsquared(fitValues,dataValues)
if (R2>R2threshold) {
print(paste(ids[i],fit$minimum,R2))
fits = rbind(fits, data.frame(id=ids[i],rhop0=rhop0,etap=etap,gammap=gammap,minimum=fit$minimum,R2=R2))
}
}
}
## move id to rowname
rownames(fits)=fits$id
fits$id = NULL
|
9810381a29b4ad94aa25aaad966d9e483e04c816 | ca2f465cc9686a89b7de9e5386b0ca5b56b63336 | /man/HomomorphicEncryption-package.Rd | 4669d4f43f92ba711d95a3a777316faa0cf3d283 | [] | no_license | iamtrask/R-Homomorphic-Encryption-Package | 48d9a71217a2cda00e6db599ea879f1c254d72e4 | 5bcafed256857fb2de01c10cb2dd3fc82f08f1f5 | refs/heads/master | 2021-01-01T04:14:34.765683 | 2017-07-13T17:36:07 | 2017-07-13T17:36:07 | 97,148,953 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,304 | rd | HomomorphicEncryption-package.Rd | \name{HomomorphicEncryption-package}
\alias{HomomorphicEncryption-package}
\alias{HomomorphicEncryption}
\docType{package}
\title{
Homomorphic Encryption
}
\description{
This package provides easy to use implementations of some homomorphic encryption
schemes. An encryption scheme is said to be homomorphic when certain functions
can be applied directly to the cipher text in such a way that decrypting the
result renders the same answer as if the function had been applied to the
unencrypted data.
}
\details{
\tabular{ll}{
Package: \tab HomomorphicEncryption\cr
Type: \tab Package\cr
Version: \tab 0.2\cr
Date: \tab 2015-08-19\cr
License: \tab GPL-2\cr
}
}
\author{
Louis Aslett
Maintainer: Louis Aslett <aslett@stats.ox.ac.uk>
}
\references{
Aslett, L. J. M., Esperança, P. M. and Holmes, C. C. (2015), A review of homomorphic encryption and software tools for encrypted statistical machine learning. Technical report, University of Oxford.
}
\keyword{ package }
\examples{
# Generate cryptographic parameters
p <- pars("FandV")
# Create public/private keypair
keys <- keygen(p)
# Encrypt the values 2 and 3
ct1 <- enc(keys$pk, 2)
ct2 <- enc(keys$pk, 3)
# Homomorphically add the cipertexts together
ct <- ct1 + ct2
# Decrypt to 5, the result of applying + to plain messages
dec(keys$sk, ct)
}
|
c1e031df868a388ebda4286794c981c82cb8388d | fab4e7ad290309d0028e8de349d262519b705ef0 | /mutation_rate/plot/cerevisiae/mutation.R | 7ad3079918f8185b178b13357378f6e1b79e225a | [] | no_license | michaelbarton/michael-barton-thesis-figures | f4f3eb5937983b13c0b081cbc1b69d8228f436fd | 7e0ad7de773cd5156209458b56ba3e2bc5915100 | refs/heads/master | 2016-09-11T04:09:10.956144 | 2009-08-17T15:42:57 | 2009-08-17T15:42:57 | 86,110 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 991 | r | mutation.R | rm(list=ls())
require(lattice)
source('helper/find_replace.R')
source('helper/panel.confidence.R')
data <- read.csv('data/acid_mutation_rates.csv')
data <- subset(data, (cost_type == "weight" | cost_type == "glu-abs" | cost_type == "glu-rel"))
data$cost_type <- find.replace(data$cost_type,
c("weight","glu-rel","glu-abs"),
c("Molecular weight","Glucose relative","Glucose absolute")
)
plot <- xyplot(
mutation ~ cost | cost_type,
data=data,
scale=list(relation="free"),
xlab="Amino acid cost",
ylab="Mean relative substitution rate",
ylim=c(0.85,1.2),
panel = function(x,y,...){
panel.confidence(x,y)
panel.xyplot(x,y)
cor <- cor.test(x,y,method="spear")
panel.text(min(x), 1.2, paste("R = ",round(cor$estimate,3)),pos=4)
panel.text(min(x), 1.17, paste("p = ",round(cor$p.value,3)),pos=4)
}
)
postscript("results/mutation.eps",width=4,height=10,onefile=FALSE,horizontal=FALSE, paper = "special",colormodel="rgb")
print(plot)
graphics.off()
|
8cfffc0fd5dcb0240347cc3141865311e0844b0c | 906b0c0e9ed26b83384d5b84d5945dd6f8ec14b6 | /man/CCAGFA-package.Rd | 2f496654a557daf8203182523bca08dd5ff4b7cb | [] | no_license | cran/CCAGFA | b836817921defdccd368b644ab5ba6d044972a3c | 238222647882845383f0eb836923e6b15db5eb5f | refs/heads/master | 2021-01-24T16:10:29.274695 | 2015-12-17T16:08:55 | 2015-12-17T16:08:55 | 17,678,232 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,738 | rd | CCAGFA-package.Rd | \name{CCAGFA-package}
\alias{CCAGFA-package}
\alias{CCGFA}
\docType{package}
\title{CCAGFA: Bayesian canonical correlation analysis (BCCA), inter-battery
factor analysis (BIBFA), and group factor analysis (GFA)}
\description{Variational Bayesian solution for canonical
correlation analysis, inter-battery factor analysis and group factor
analysis. The package contains code for learning the model and
some supporting functionality for interpretation.
The Bayesian CCA model as implemented here was originally presented by Virtanen et al. (2011),
but a more comprehensive treatment is found in Klami et al. (2013). The latter also explains
the BIBFA model. The GFA extends CCA to multiple data sources (or groups of variables), providing interpretable linear
factorizations that describe variation shared by all possible subsets of sources. It was originally presented by Virtanen et al. (2012). Later Klami et al. (2014) provide a more extensive literature review and present a novel hierarchical low-rank ARD prior for the factor loadings to better account for inter-source relationships.
We recommend that scientific publications using the code for CCA or BIBFA cite Klami et al. (2013),
and publications using the code for GFA cite Virtanen et al. (2012), until Klami et al. (2014) has been published.
The package is based on the research done in the SMLB group, Helsinki Institute for Information Technology HIIT, Department of Information and Computer Science, Aalto University, http://research.ics.aalto.fi/mi/.}
\details{
\tabular{ll}{
Package: \tab CCAGFA\cr
Type: \tab Package\cr
Version: \tab 1.0.4\cr
Date: \tab 2013-04-23\cr
License: \tab GPL (>= 2)\cr
}
}
\author{Seppo Virtanen, Eemeli Leppaaho and Arto Klami.
Maintainer: Seppo Virtanen <seppo.j.virtanen@aalto.fi>}
\references{
Virtanen, S. and Klami, A., and Kaski, S.: Bayesian CCA via group-wise
sparsity. In \emph{Proceedings of the 28th International Conference on Machine
Learning (ICML)}, pages 457-464, 2011.
Virtanen, S. and Klami, A., and Khan, S.A. and Kaski, S.: Baysian
group factor analysis. In \emph{Proceedings of the 15th International
Conference on Artificial Intelligence and Statistics (AISTATS)}, volume 22 of JMLR W&CP, pages 1269-1277, 2012.
Klami, A. and Virtanen, S., and Kaski, S.: Bayesian Canonical Correlation
Analysis. \emph{Journal of Machine Learning Research},14:965-1003, 2013.
Klami, A. and Virtanen, S., Leppaaho, E., and Kaski, S.: Group Factor Analysis. \emph{IEEE Transactions on Neural Networks and Learning Systems}, to appear.
}
\keyword{package}
%\seealso{%%~~ \code{\link[somepkg:somepkg-package]{somepkg}} ~~}}
\examples{# Load the package
# require(CCAGFA)
# demo(CCAGFAexample)
}
|
ee344b7c8821b84a1e3af6e2786e77508e9b8a1e | 29585dff702209dd446c0ab52ceea046c58e384e | /oai/R/list_identifiers.R | 2d368c00c4e35211f186ac2d06d1337980b83ef4 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,662 | r | list_identifiers.R | #' List OAI-PMH identifiers
#'
#' @export
#' @template url_ddd
#' @template as
#' @param prefix Specifies the metadata format that the records will be
#' returned in.
#' @param from specifies that records returned must have been created/update/deleted
#' on or after this date.
#' @param until specifies that records returned must have been created/update/deleted
#' on or before this date.
#' @param set specifies the set that returned records must belong to.
#' @param token a token previously provided by the server to resume a request
#' where it last left off.
#' @examples \dontrun{
#' # from
#' today <- format(Sys.Date(), "%Y-%m-%d")
#' list_identifiers(from = today)
#'
#' # from and until
#' list_identifiers(from = '2011-06-01T', until = '2011-07-01T')
#'
#' # longer time span
#' list_identifiers(from = '2011-06-01T', until = '2011-09-01T')
#'
#' # set parameter - here, using ANDS - Australian National Data Service
#' list_identifiers(from = '2011-09-01T', until = '2012-09-01T', set = "ANDS")
#'
#' # Get a list
#' list_identifiers(from = today, as = "list")
#'
#' # Get raw text
#' list_identifiers(from = today, as = "raw")
#' }
list_identifiers <- function(url = "http://oai.datacite.org/oai", prefix = "oai_dc", from = NULL,
until = NULL, set = NULL, token = NULL, as = "df", ...) {
check_url(url)
if (!is.null(token)) from <- until <- set <- prefix <- NULL
args <- sc(list(verb = "ListIdentifiers", metadataPrefix = prefix, from = from,
until = until, set = set, resumptionToken = token))
out <- while_oai(url, args, token, as, ...)
oai_give(out, as, "ListRecords")
}
|
544ec725132f98cd73de25378e58e3aaac99cef6 | 2f41983e3c7788201ffb7722f982a4cf446e8910 | /cachematrix.R | 86e7bced4aa268ff2b07482afb002453cedf0d5a | [] | no_license | oespinga2/ProgrammingAssignment2 | f78ae86e36f71f0d9b3326dc1db5b4653bb3eedc | 65b24c476b07a7192561db91d4dd23e3afbf9005 | refs/heads/master | 2021-01-18T12:07:38.704127 | 2015-02-18T05:02:16 | 2015-02-18T05:02:16 | 30,951,631 | 0 | 0 | null | 2015-02-18T04:12:30 | 2015-02-18T04:12:30 | null | UTF-8 | R | false | false | 1,073 | r | cachematrix.R | ## These functions create functions that define a 'CacheMatrix' object which allocates space for a matrix and its inverse. A second function computes the inverse of the function or returns the cached matrix
## The functions sets, gets a matrix and it's inverse. Note that it doesn't check for rows or columns or whether the stores matrix is invertible.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function returns the cached inverse if this exists or calculates the inverse and stores it in a 'makeCacheMatrix' object if necessary.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached inverse")
return(inv)
}
matrix <- x$get()
inv <- solve(matrix, ...)
x$setinverse(inv)
inv
}
|
1be801361523765200430494e6b585d807f751cf | a1e3d7b9928dbc0112fa6d866f9295b0b3109f04 | /plot2.R | 7b9bec3ceaecd9f5ac8c9991c1cbbb18bd79f3c8 | [] | no_license | nerijusuza/Exp-Data-Analysis-Project-2 | b9995faeb362596ffa4841be62977e72ffa5beb4 | fcfaba1e5a504a709c9dbbbc9e11b2a1abd98b50 | refs/heads/master | 2021-01-10T07:05:23.536794 | 2016-01-24T02:06:21 | 2016-01-24T02:06:21 | 48,629,579 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,160 | r | plot2.R | library(utils)
library(plyr)
library(ggplot2)
if(!file.exists("./Class_4.3")) {
dir.create("./Class_4.3")
}
if(!file.exists("./Class_4.3/exdata-data-NEI_data.zip")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip", destfile = "./Class_4.3/exdata-data-NEI_data.zip")
}
if(!(file.exists("summarySCC_PM25.rds") &&
file.exists("Source_Classification_Code.rds"))){
unzip("./Class_4.3/exdata-data-NEI_data.zip", list = FALSE, overwrite = TRUE, exdir = "./Class_4.3")
}
if(!exists("NEI")){
NEI <- readRDS("./Class_4.3/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./Class_4.3/Source_Classification_Code.rds")
}
SubsetNEI <- NEI[NEI$fips == "24510", ]
png(filename = "./Class_4.3/plot2.png", width = 480, height = 480, bg = "transparent", units = 'px')
AggregatedTotalByYear <- aggregate(Emissions ~ year, SubsetNEI, sum)
barplot(
height = AggregatedTotalByYear$Emissions,
names.arg = AggregatedTotalByYear$year,
xlab = "Year",
ylab = "PM2.5 Emissions (Tons)",
main = "Total PM2.5 Emissions In Baltimore City"
)
dev.off() |
06c83d9a43d6fb36be34c977084be6b7ad03c5b3 | 9cc7423f4a94698df5173188b63c313a7df99b0e | /R/as.data.frame.density.R | d950efd412ec9190edb0eeadf3cc07a74b5a7dbb | [
"MIT"
] | permissive | HugoNjb/psycho.R | 71a16406654b11007f0d2f84b8d36587c5c8caec | 601eef008ec463040c68bf72ac1ed8d4a8f7751f | refs/heads/master | 2020-03-27T01:24:23.389884 | 2018-07-19T13:08:53 | 2018-07-19T13:08:53 | 145,707,311 | 1 | 0 | null | 2018-08-22T12:39:27 | 2018-08-22T12:39:27 | null | UTF-8 | R | false | false | 427 | r | as.data.frame.density.R | #' Coerce to a Data Frame.
#'
#' Functions to check if an object is a data frame, or coerce it if possible.
#'
#' @param x any R object.
#' @param ... additional arguments to be passed to or from methods.
#'
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#' @method as.data.frame density
#' @export
as.data.frame.density <- function(x, ...) {
df <- data.frame(x = x$x, y = x$y)
return(df)
}
|
4c5028f3fa1c3fdff4ae466e0444613cb49e3a7b | fce449f97dd37a42fc59b93c40f3b584e0692109 | /man/RAPIN.Rd | 602f6389b571d4d8bf1a1a771239875a5adb9144 | [] | no_license | sidiropoulos/PSigA | 374e3f43344c8974639b7b7828f529ed86b088aa | 5d7d995e015b762bd0c030722852f42b643eb94c | refs/heads/master | 2020-04-13T18:01:20.383214 | 2016-04-14T11:04:53 | 2016-04-14T11:04:53 | 41,098,356 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 572 | rd | RAPIN.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PsigA.R
\docType{data}
\name{RAPIN}
\alias{RAPIN}
\title{Rapin et al. signatures}
\format{An object of type list with 21 entries, each one representing a
gene signature. The name of each list entry represents the signature's name.}
\source{
\url{http://www.bloodjournal.org/content/123/6/894}
}
\usage{
data(RAPIN)
}
\value{
A list of 21 character vectors, each one containing gene names in
HGNC format.
}
\description{
21 gene pathways obtained from Rapin et al. (2014).
}
\keyword{datasets}
|
0d2754538e5b7c7fb8003d8def1283b791652336 | 619c0ba0282a4c2cb9a1b20a14536ef82dc46e8f | /man/eigenCentrality.Rd | 2caf2e1c976696e5f41d25d8bffa289afe339eff | [] | no_license | SEELab/enaR | 796b51159ca43d2338ef441022e2077db516bc7f | 281a0c71f83fb4659c9300801e41d09729dbd261 | refs/heads/develop | 2023-04-26T01:58:20.788858 | 2023-04-22T20:24:54 | 2023-04-22T20:24:54 | 12,623,293 | 14 | 8 | null | 2018-05-17T22:34:51 | 2013-09-05T16:52:53 | R | UTF-8 | R | false | true | 589 | rd | eigenCentrality.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eigenCentrality.R
\name{eigenCentrality}
\alias{eigenCentrality}
\title{the Eigen Centrality of a Network}
\usage{
eigenCentrality(x = "matrix")
}
\arguments{
\item{x}{A matrix defining a network graph.}
}
\value{
Returns the eigen based centrality of the network.
}
\description{
Calculates the centrality of a network using eigen vectors.
}
\references{
Bonacich, P., 1987. Power and centrality: a family of measures.
American Journal of Sociology 92: 1170-1182.
}
\author{
Stuart R. Borrett Matthew K. Lau
}
|
8c280c90a0c0d88b4e4c57139e38205db3d0de01 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/NISTunits/examples/NISTpoundPerInchTOkgPerMeter.Rd.R | 1c2ac64d1b4e3b97ae7752278c67188561b785a0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 238 | r | NISTpoundPerInchTOkgPerMeter.Rd.R | library(NISTunits)
### Name: NISTpoundPerInchTOkgPerMeter
### Title: Convert pound per inch to kilogram per meter
### Aliases: NISTpoundPerInchTOkgPerMeter
### Keywords: programming
### ** Examples
NISTpoundPerInchTOkgPerMeter(10)
|
9211d88f6b5ad4cb6f4fa18a446321f2d26844bb | b21fddcfd1727e9c4e6c587122ff7c9ed78d0557 | /update-docs.R | 42cf51b4994720e2ff0ba295c678a6d4365dc741 | [] | no_license | dylanbeaudette/process-kssl-snapshot | bae4ddfb64c1c6f4f6555c4d70c7fe792c5495cf | f873b50823336a61725e21c727353e23edc2d6e9 | refs/heads/master | 2021-01-09T21:46:20.968821 | 2020-03-18T21:51:07 | 2020-03-18T21:51:07 | 47,850,591 | 7 | 2 | null | null | null | null | UTF-8 | R | false | false | 146 | r | update-docs.R |
load(file='S:/NRCS/Lab_Data/cached-data/kssl-SPC.Rda')
cat(paste0(' * ', horizonNames(lab), '\n'))
cat(paste0(' * ', siteNames(lab), '\n'))
|
e04e7ffa41f97dd773ebbce2e445da49779ddd4c | 7266285230fb1c7490b7bd161fab1180e4ac735d | /cachematrix.R | bdbc4547cfc67404a2626d089204e2942761ad9f | [] | no_license | rjonczy/ProgrammingAssignment2 | 40e2d9a0d5c8a6a102870a16cf613b2e4f8c39e7 | 066737a7f207a9aeb70a18510ccc25d96b18a08f | refs/heads/master | 2021-01-18T06:09:33.559157 | 2015-03-17T09:44:44 | 2015-03-17T09:44:44 | 32,326,115 | 0 | 0 | null | 2015-03-16T13:19:23 | 2015-03-16T13:19:23 | null | UTF-8 | R | false | false | 2,389 | r | cachematrix.R | #
# Example:
#
# 1.) create a matrix
# > A <- matrix(c(-1, -2, 1, 1), 2, 2)
#
# 2.) create a inverse cacheable matrix object
# > Ac <- makeCacheMatrix(A)
#
# 3.) calculate inverse for a 1st time
# > cacheSolve(Ac)
# [,1] [,2]
# [1,] 1 -1
# [2,] 2 -1
#
# 4.) calcute inverse (for the same matrix) 2nd time
# > cacheSolve(Ac)
# getting cached data
# [,1] [,2]
# [1,] 1 -1
# [2,] 2 -1
# 5.) check if we got correct matrix inverse
# > Ac$get() %*% cacheSolve(Ac)
# getting cached data
# [,1] [,2]
# [1,] 1 0
# [2,] 0 1
#
# function makeCacheMatrix - creates a special "matrix" object that can cache its inverse,
# as input it takes matrix and returns special "matrix" object
#
makeCacheMatrix <- function(x = matrix()) {
# inverse matrix, initially NULL value
inverse <- NULL
# function to set x matrix
set <- function(y) {
# sets x and inverse in parent environment
x <<- y
inverse <<- NULL
}
# function to get (return) x matrix
get <- function() x
# function to set inverse of matrix
setInverse <- function(i) inverse <<- i
# function to get inverse of matrix
getInverse <- function() inverse
# list of functions
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
#
# function cacheSolve - computes the inverse of special "matrix" object returned by makeCacheMatrix function
# - if inverse has been already calculated (and matrix has not chnaged) than function returns inverse from cache
# - if inverse has not been calculated before (or matrix changed), calculate it and return
#
cacheSolve <- function(x, ...) {
# first we try to get inverse of x and assign to i variable
i <- x$getInverse()
# if i is not null, it means that we already calculated inverse of matrix,
# than return it instead of calculating again
if(!is.null(i)) {
message("getting cached data")
return(i)
}
# in this case i is null, so it means that i was not calculated before,
# than get matrix and assign to variable m
m <- x$get()
# calculate inverse by calling function solve()
i <- solve(m, ...)
# set calculated inverse
x$setInverse(i)
# return calculated inverse
i
}
|
752076b20237ca6575f02cead4cc0bf3f513f71e | 1b49c5d2e3d0f7e9310891ddb7bdc01876d7983d | /man/ch.globalFilterByQuantile.Rd | 4bf515ba09ad1328d47faca676dfbfb1265b11c1 | [] | no_license | ccpluncw/ccpl_R_chMorals | 1eb2442104e72d0df4e0ed7a3384c14b78a52a82 | ffc69bafe71b791c452787e267a15a532b0fde00 | refs/heads/master | 2023-05-04T15:02:05.404269 | 2023-04-16T00:35:25 | 2023-04-16T00:35:25 | 139,591,832 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,678 | rd | ch.globalFilterByQuantile.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ch.globalFilterByQuantile.r
\name{ch.globalFilterByQuantile}
\alias{ch.globalFilterByQuantile}
\title{A function to filter RT data based on the coefficient of variation.}
\usage{
ch.globalFilterByQuantile(
data,
snCol,
RTcol,
CVlowQuantileThreshold = 0,
CVhighQuantileThreshold = 1,
statsOutputFile = NULL
)
}
\arguments{
\item{data}{A dataframe containing the choice RT data. (typically after running through ch.moralsDataPrep()).}
\item{snCol}{a string that specifies the name of the column in "data" that contains the subject number.}
\item{RTcol}{a string that specifies the name of the column in "data" that contains the RT for each trial.}
\item{CVlowQuantileThreshold}{A number specifying the quantile that all subjects whose coefficient of variation falls below it will be removed from the dataset. DEFAULT = 0 (none removed)}
\item{CVhighQuantileThreshold}{A number specifying the quantile that all subjects whose coefficient of variation falls above it will be removed from the dataset. DEFAULT = 1 (none removed)}
\item{statsOutputFile}{the filename that you want the statistic summary output written to. DEFAULT = NULL (no file written)}
}
\value{
a dataframe of filtered data.
}
\description{
This function filters RT data based on the coefficient of variation. Specifically, it removes subjects based on their variability of RTs relative to the subjects in the dataset (by quantile).
}
\examples{
ch.globalFilterByQuantile (data=moralsData, "sn", "RT", CVlowQuantileThreshold = 0, CVhighQuantileThreshold = 0.95)
}
\keyword{filter}
\keyword{global}
\keyword{quantile}
|
ee0e3494ef8a64cc55d658756d99f23f9daf6b50 | 137e06395bbd61fccbe0e6ce307cb87479343822 | /man/Fixpoint.test.Rd | 0c517b49da400aebae285934e5d46a8cb0c05dd5 | [] | no_license | cran/ComparisonSurv | fd87f9f97533b6fac23b17db79f393bd55b43479 | 5dbc3611c5fd4a74059100ede980cdf0b8e7784d | refs/heads/master | 2022-09-01T14:17:46.377625 | 2022-08-25T09:42:39 | 2022-08-25T09:42:39 | 236,574,717 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,123 | rd | Fixpoint.test.Rd | \name{Fixpoint.test}
\alias{Fixpoint.test}
\title{Statistical inference methods for testing at a fixed time point}
\description{
A function used to produce the results of various statistical inference methods for testing at a fixed time point.
}
\usage{
Fixpoint.test(time, status, group, t0)
}
\arguments{
\item{time}{The follow up time for right censored data.}
\item{status}{The status indicator, normally 1=event, 0=alive or right censored.}
\item{group}{The group indicator for comparison, and the elements of this vector must take either 0 or 1. Normally, 0= control group, 1= treatment group.}
\item{t0}{The fixed time point for testing.}
}
\value{
A list containing the following components:
\item{est.g0}{The estimation of survival rates at the fixed timepoint for control group.}
\item{est.g1}{The estimation of survival rates at the fixed timepoint for treatment group.}
\item{test}{The results of statistical inference at the fixed timepoint.}
\item{method}{Containing 5 types of statistical inference methods: naive, log, cloglog, arcsin, and logist. See more details in references.}
\item{t0}{The prespecified fixed time point.}
\item{est}{The survival rate at the prespecified timepoint.}
\item{lower.95.CI}{The lower 95 percent pointwise confidence interval for the survival function.}
\item{upper.95.CI}{The upper 95 percent pointwise confidence interval for the survival function.}
\item{statistic}{The statistics of corresponding methods.}
\item{pvalue}{The test P value of corresponding methods.}
}
\references{
[1]Klein JP, Logan B, Harhoff M, et al. Analyzing survival curves at a fixed point in time. Statistics in Medicine, 2007, 26(24):4505-4519.
[2]Anderson JR, Pike LBC. Approximate Confidence Intervals for Probabilities of Survival and Quantiles in Life-Table Analysis. Biometrics, 1982, 38(2):407-416.
}
\examples{
#get 'Crossdata' from package
data(Crossdata)
data1<-Crossdata
#
#if there exist differences at 6 months and 12 months
Fixpoint.test(data1$time,data1$status,data1$group,t0=0.5)
Fixpoint.test(data1$time,data1$status,data1$group,t0=1)
}
|
1e927d5c3a2a5b21b941d87cfb7106b0c11b6b77 | ab21be7a9e8245f8dd05057d93620ae7b206df66 | /man/random_OHLC.Rd | 72533c739e128761e560afc109e3db3ab8966e7d | [] | no_license | hedgefair/HighFreq | 1ac99967821eec3ffd862154768ac1f68223e5a6 | 0db7ba03a339be44ada225c63cba466f9b657d29 | refs/heads/master | 2021-01-22T06:59:09.901372 | 2016-08-27T13:06:55 | 2016-08-27T13:06:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,877 | rd | random_OHLC.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HighFreq.R
\name{random_OHLC}
\alias{random_OHLC}
\title{Calculate a random \code{OHLC} time series of prices and trading volumes, in
\code{xts} format.}
\usage{
random_OHLC(oh_lc = NULL, re_duce = TRUE, ...)
}
\arguments{
\item{oh_lc}{\code{OHLC} time series of prices and trading volumes, in
\code{xts} format.}
\item{re_duce}{\code{Boolean} should \code{oh_lc} time series be transformed
to reduced form? (default is \code{TRUE})}
}
\value{
an \code{xts} time series with the same dimensions and the same time
index as the input \code{oh_lc} time series.
}
\description{
Calculate a random \code{OHLC} time series of prices and trading volumes,
either by generating random log-normal prices, or by randomly sampling from
an input time series.
}
\details{
If the input \code{oh_lc} time series is \code{NULL} (the default),
then a synthetic minutely \code{OHLC} time series of random log-normal
prices is calculated, over the two previous calendar days.
If the input \code{oh_lc} time series is not \code{NULL}, then the rows of
\code{oh_lc} are randomly sampled, to produce a random time series.
If \code{re_duce} is \code{TRUE} (the default), then the \code{oh_lc} time
series is first transformed to reduced form, then randomly sampled, and
finally converted to standard form.
Note: randomly sampling from an intraday time series over multiple days
will cause the overnight price jumps to be re-arranged into intraday price
jumps. This will cause moment estimates to become inflated compared to the
original time series.
}
\examples{
# create minutely synthetic OHLC time series of random prices
oh_lc <- HighFreq::random_OHLC()
# create random time series from SPY by randomly sampling it
oh_lc <- HighFreq::random_OHLC(oh_lc=SPY["2012-02-13/2012-02-15"])
}
|
4111318660a7e80a5bacac0531ad5c616268ffd2 | 28d121adf069bb7ac8578d673236364672e3aade | /man/prettyRadioButtonsFieldSet.Rd | 9f6bbf2710339119942ce5b81e5318548852c9bc | [] | no_license | cusom/CUSOM.ShinyHelpers | e8d1a3d31925137d1033a63ebd12ec8b2150993f | c562d0762d739bd8fc983c8eb37208105d4e3060 | refs/heads/master | 2023-09-03T08:01:26.224068 | 2023-08-18T14:52:39 | 2023-08-18T14:52:39 | 299,706,304 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,075 | rd | prettyRadioButtonsFieldSet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prettyRadioButtonsFieldSet.R
\name{prettyRadioButtonsFieldSet}
\alias{prettyRadioButtonsFieldSet}
\title{Create a group of radio buttons with common input}
\usage{
prettyRadioButtonsFieldSet(
inputId,
fieldSetData,
label,
selected = NULL,
status = "primary",
shape = c("round", "square", "curve"),
outline = FALSE,
fill = FALSE,
thick = FALSE,
animation = NULL,
icon = NULL,
plain = FALSE,
bigger = FALSE,
inline = FALSE,
width = NULL
)
}
\arguments{
\item{inputId}{The \code{input} slot that will be used to access the value.}
\item{fieldSetData}{A \code{tibble} of values used to build group of radio buttons.}
\item{label}{Display label for the control.}
\item{selected}{The values that should be initially selected,
(if not specified then defaults to the first value).}
\item{status}{Add a class to the radio,
you can use Bootstrap status like 'info', 'primary', 'danger', 'warning' or 'success'.}
\item{shape}{Shape of the radio between \code{square}, \code{curve} and \code{round}.}
\item{outline}{Color also the border of the radio (\code{TRUE} or \code{FALSE}).}
\item{fill}{Fill the radio with color (\code{TRUE} or \code{FALSE}).}
\item{thick}{Make the content inside radio smaller (\code{TRUE} or \code{FALSE}).}
\item{animation}{Add an animation when radio is checked, a value between
\code{smooth}, \code{jelly}, \code{tada}, \code{rotate}, \code{pulse}.}
\item{icon}{Optional, display an icon on the radio, must be an icon created with \code{icon}.}
\item{plain}{Remove the border when radio is checked (\code{TRUE} or \code{FALSE}).}
\item{bigger}{Scale the radio a bit bigger (\code{TRUE} or \code{FALSE}).}
\item{inline}{If \code{TRUE}, render the choices inline (i.e. horizontally).}
\item{width}{The width of the input, e.g. `400px`, or `100\%`.}
}
\value{
returns original dataframe if threshold is met for each group member. If threshold not met, NULL is returned
}
\description{
Create a group of radio buttons with common input
}
|
9cd4859cc7c00be87051b881094a931b3ce23a08 | 6e0e933f3db3419a454e2e0c1ed58482235aa5a9 | /jpsurv/JPSurvWrapper.R | 8196e88d380bfbebc9297d5ebb0bf2858091d62f | [] | no_license | danielwu5960/nci-webtools-dccps-seer | 500f9ee885e3fc96dcb85063096dd945fddd339f | cd5088333aea69f0325ef1554fb3aaf749ac8d8a | refs/heads/master | 2021-01-22T16:21:16.363599 | 2015-06-23T17:09:03 | 2015-06-23T17:09:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,011 | r | JPSurvWrapper.R | library('rjson')
library('JPSurv')
VERBOSE=TRUE
getDictionary <- function (inputFile, path, tokenId) {
fqFileName = file.path(path, inputFile)
outputFileName = paste("form-", tokenId, ".json", sep="")
fqOutputFileName = file.path(path, outputFileName)
seerFormData = dictionary.overview(fqFileName)
cat(toJSON(seerFormData), file = fqOutputFileName)
return(tokenId)
}
getSubsetStr <- function (yearOfDiagnosisVarName, yearOfDiagnosisRange, cohortVars, cohortValues) {
yearOfDiagnosisVarName=paste0("`",getCorrectFormat(yearOfDiagnosisVarName), "`")
startYearStr=paste(yearOfDiagnosisVarName, ">=", yearOfDiagnosisRange[1])
endYearStr=paste(yearOfDiagnosisVarName, "<=", yearOfDiagnosisRange[2])
yearStr=paste(startYearStr, endYearStr, sep='&')
cohortVars=paste0("`",getCorrectFormat(cohortVars), "`")
subsetStr=paste(paste(cohortVars, cohortValues, sep="=="), collapse='&')
subsetStr=paste(subsetStr, yearStr, sep='&')
#cat("*subsetStr\n")
#cat(subsetStr)
#cat("\n\n")
return (subsetStr)
}
getFactorStr <- function (covariateVars) {
factorStr=NULL
if (nchar(covariateVars)!=0) {
covariateVars=paste0("`", getCorrectFormat(covariateVars), "`")
factorStr=paste("~-1+", paste(gsub("$", ")", gsub("^", "factor(", covariateVars)), collapse="+"), sep='')
}
#cat("*factorStr\n")
#cat(factorStr)
#cat("\n\n")
return (factorStr)
}
#replace empty space with _, strip anything other than alphanumeric _ /
getCorrectFormat <-function(variable) {
variable=gsub("[^[:alnum:]_/]", "", gsub(" ", "_", variable))
return (variable)
}
getGraphWrapper <- function (filePath, jpsurvDataString) {
#print("R: getGraph")
jpsurvData = fromJSON(jpsurvDataString)
#print(jpsurvData$tokenId)
#print("*jpsurvData.plot =")
#print(jpsurvData$plot)
fittedResultFile=paste("output-", jpsurvData$tokenId,".rds", sep="")
intervals=c(5,10)
intervals = jpsurvData$plot$form$intervals
covariateValues = c("Localized", "Distant")
covariateValues = jpsurvData$plot$form$covariateVars
outputGraphFile = paste("plot-", jpsurvData$tokenId, "-",jpsurvData$plot$static$imageId, ".png", sep="")
outputGraphFile = paste(filePath, outputGraphFile, sep="/")
getGraph(filePath, fittedResultFile, intervals, covariateValues, outputGraphFile)
}
#filePath="."
#fittedResultFile="Breast_RelativeSurvival.output"
#intervals=c(5,10)
#covariateValues=c("Localized", "Distant")
#outputGraphFile="./Breast_RelativeSurvival123.png"
getGraph <- function (filePath, fittedResultFile, intervals, covariateValues, outputGraphFile) {
#filePath="/h1/kneislercp/nci-analysis-tools-web-presence/src/jpsurv/tmp"
#cat("*** R variables ***\n")
#cat("*intervals:\n")
#print(intervals, row.names=FALSE)
#cat("*covariateValues\n")
#print(covariateValues, row.names=FALSE)
#cat("\n")
#cat("outputGraphFile:\n")
#cat(outputGraphFile)
#cat("\n")
outFile=paste(filePath, fittedResultFile, sep="/" )
outputData=readRDS(outFile)
fit.result=outputData$fit.result
continousVector=rep(NA, length(covariateValues))
png(file=outputGraphFile)
plot(fit.result,Intervals=intervals,covar.continuous=continousVector,covar.cat=covariateValues)
dev.off()
}
getFittedResultWrapper <- function (filePath, jpsurvDataString) {
jpsurvData = fromJSON(jpsurvDataString)
seerFilePrefix = jpsurvData$calculate$static$seerFilePrefix
yearOfDiagnosisVarName = jpsurvData$calculate$static$yearOfDiagnosisVarName
yearOfDiagnosisRange = jpsurvData$calculate$form$yearOfDiagnosisRange
allVars=jpsurvData$calculate$static$allVars
cohortVars=jpsurvData$calculate$form$cohortVars
cohortValues=jpsurvData$calculate$form$cohortValues
covariateVars=jpsurvData$calculate$form$covariateVars
numJP=jpsurvData$calculate$form$joinPoints
fileName = paste('output', jpsurvData$tokenId, sep="-" )
fileName = paste(fileName, "rds", sep="." )
outputFileName = fileName
fileName = paste('output', jpsurvData$tokenId, sep="-" )
fileName = paste(fileName, "rds", sep="." )
outputFileName =paste(filePath, fileName, sep="/" )
return (getFittedResult(filePath, seerFilePrefix, yearOfDiagnosisVarName, yearOfDiagnosisRange, allVars, cohortVars, cohortValues, covariateVars, numJP, outputFileName))
}
#filePath="C:/devel/R"
#seerFilePrefix="SEER9_Survival_6CancerSitesByStage_1975_2007"
#yearOfDiagnosisVarName="Year of diagnosis (75-07 individual)"
#yearOfDiagnosisRange=c(1975, 2007)
#allVars=c("Sites: CR LB B O P T","Sex Male or Female","SEER historic stage A (All/loc/reg/dist)", "Year of diagnosis (75-07 individual)")
#cohortVars=c("Sites: CR LB B O P T")
#cohortValues=c("\"Colon and Rectum\"")
#covariateVars=c("SEER historic stage A (All/loc/reg/dist)")
#numJP=1
#outputFileName="SEER9_Survival_6CancerSitesByStage_1975_2007.output"
#filePath="C:/devel/R"
#seerFilePrefix="Breast_RelativeSurvival"
#yearOfDiagnosisVarName="Year of diagnosis 1975"
#yearOfDiagnosisRange=c(1975, 2011)
#allVars=c("Age groups","Breast stage","Year of diagnosis 1975")
#cohortVars=c("Age groups")
#cohortValues=c("\"65+\"")
#covariateVars=c("Breast stage")
#numJP=1
#outputFileName="Breast_RelativeSurvival.output"
getFittedResult <- function (filePath, seerFilePrefix, yearOfDiagnosisVarName, yearOfDiagnosisRange, allVars, cohortVars, cohortValues, covariateVars, numJP, outputFileName) {
if(VERBOSE) {
cat("*filePath\n")
cat(filePath)
cat("\n")
cat("*seerFilePrefix\n")
cat(seerFilePrefix)
cat("\n")
cat("*yearOfDiagnosisVarName\n")
cat(yearOfDiagnosisVarName)
cat("\n")
cat("*yearOfDiagnosisRange")
cat("\n")
print(yearOfDiagnosisRange, row.names=FALSE)
cat("\n")
cat("*allVars\n")
print(allVars, row.names=FALSE)
cat("*cohortVars\n")
print(cohortVars, row.names=FALSE)
cat("*cohortValues\n")
print(cohortValues, row.names=FALSE)
cat("*covariateVars\n")
print(covariateVars, row.names=FALSE)
cat("*numJP\n")
print(numJP, row.names=FALSE)
cat("*outputFileName\n")
print(outputFileName, row.names=FALSE)
cat("\n****\n")
}
file=paste(filePath, seerFilePrefix, sep="/" )
varLabels=getCorrectFormat(allVars)
seerdata = joinpoint.seerdata(seerfilename=file,
newvarnames=varLabels,
NoFit=T,
UseVarLabelsInData=varLabels)
subsetStr=getSubsetStr(yearOfDiagnosisVarName, yearOfDiagnosisRange, cohortVars, cohortValues)
#assign subsetStr in the global in order for eval(parse(text=)) to work
assign("subsetStr", subsetStr, envir = .GlobalEnv)
factorStr=getFactorStr(covariateVars)
assign("factorStr", factorStr, envir= .GlobalEnv)
fit.result=joinpoint(seerdata,
subset = eval(parse(text=subsetStr)),
year=getCorrectFormat(yearOfDiagnosisVarName),
observedrelsurv="Relative_Survival_Cum",
model.form = eval(parse(text=factorStr)),
maxnum.jp=numJP);
#save seerdata and fit.result as RData
#cat("***outputFileName")
#cat(outputFileName)
#cat("\n")
outputData=list("seerdata"=seerdata, "fit.result"=fit.result)
saveRDS(outputData, outputFileName)
#cat("\n\nOutput file has been written: ")
#cat(outputFileName)
#cat("\n")
apcJson=paste(toJSON(fit.result$apc))
return (apcJson)
}
getDownloadOutputWrapper <- function (filePath, jpsurvDataString) {
#print("R: getDownloadOutputWrapper")
jpsurvData = fromJSON(jpsurvDataString)
#print(jpsurvData)
#seerFilePrefix = jpsurvData$calculate$static$seerFilePrefix
yearOfDiagnosisVarName = jpsurvData$calculate$static$yearOfDiagnosisVarName
yearOfDiagnosisRange = jpsurvData$calculate$form$yearOfDiagnosisRange
#allVars=jpsurvData$calculate$static$allVars
cohortVars=jpsurvData$calculate$form$cohortVars
cohortValues=jpsurvData$calculate$form$cohortValues
#covariateVars=jpsurvData$calculate$form$covariateVars
#numJP=jpsurvData$calculate$form$joinPoints
subsetStr=getSubsetStr(yearOfDiagnosisVarName, yearOfDiagnosisRange, cohortVars, cohortValues)
assign("subsetStr", subsetStr, envir = .GlobalEnv)
fileName = paste('link', jpsurvData$tokenId, sep="-" )
fileName = paste(fileName, "rds", sep="." )
#outputFileName = fileName
fileName = paste('link', jpsurvData$tokenId, sep="-" )
fileName = paste(fileName, "rds", sep="." )
outputFileName =paste(filePath, fileName, sep="/" )
fittedResultFile=paste("output-", jpsurvData$tokenId,".rds", sep="")
downloadFile=paste("link-", jpsurvData$tokenId,".csv", sep="")
getDownloadOutput(filePath, fittedResultFile, subsetStr, downloadFile)
return (downloadFile)
}
getDownloadOutput <- function(filePath, fittedResultFile, subsetStr, downloadFile) {
downloadFile=paste(filePath, downloadFile, sep="/" )
outFile=paste(filePath, fittedResultFile, sep="/" )
outputData=readRDS(outFile)
downloadOutput = output.overview(outputData$seerdata, outputData$fit.result, subsetStr);
write.csv(downloadOutput, downloadFile)
}
|
2a31687fec9f51e50ab8dc8044d96e2ec9803dba | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/lmomRFA/examples/regquant.Rd.R | 2e8362dc91c6d9ac5117d496a8c04b852b61a5cf | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 558 | r | regquant.Rd.R | library(lmomRFA)
### Name: regquant
### Title: Quantiles and quantile function of a regional frequency
### distribution
### Aliases: regqfunc regquant
### Keywords: misc
### ** Examples
rfit <- regfit(Cascades,"gno") # Fit regional distribution
# Compute some quantiles
regquant(seq(0.1, 0.9, by=0.1), regfit(Cascades,"gno"))
# Get the quantile function (regional growth curve)
rgc <- regqfunc(rfit)
# Compute quantiles by evaluating the regional growth curve
rgc(seq(0.1, 0.9, by=0.1))
# Plot the regional growth curve
curve(rgc, 0.01, 0.99)
|
94f415ea8e2cc581418289635e7eb66924045f35 | 4b5d28e6a031fc0fae5f3da40ffb1b593c18cd48 | /R/summary.q3.R | 66384cb6a7526221a4a7bf26ab3af744128edd2e | [] | no_license | cran/pairwise | 446b603f99c237bda8dcc291eeeb916b226eaccc | 1f355868d5fb52777e39d5aced27c934984b527b | refs/heads/master | 2023-04-28T04:42:27.164640 | 2023-04-17T20:10:02 | 2023-04-17T20:10:02 | 17,698,183 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,251 | r | summary.q3.R | #' @title S3 Summary for Q3 Fit Statistic
#' @exportS3Method summary q3
#' @keywords methods
#' @method summary q3
#' @description S3 summary method for object of class\code{"q3"}
#' @param object object of class\code{"q3"}
#' @param maxrc numerical with default \code{maxrc=3} to specify the output of the maximum number of highest residual correlations in terms of absolute value.
#' @param ... other parameters passed trough
########################### hier die summary method fuer pair #############################
summary.q3<-function(object, maxrc=3, ...){
cat("Yens (1984) Q3 statistic based on", object$resid_cor$type, "correlation: \n")
cat("- missing treatment:", object$resid_cor$use, " \n")
cat("- type of residuals used:", object$residuals$type, " \n")
print(object$statistic$Q3)
#object$resid_cor$cor
cors <- unique(na.omit(c(object$resid_cor$cor)))
names(cors) <- apply(X = (combn(colnames(object$resid_cor$cor),m = 2)),MARGIN = 2,FUN = function(x){paste(x,collapse = "~")})
index <- names(sort(abs(cors),decreasing = T)[1:maxrc])
cat("Results for the",maxrc,"highest residual correlations in terms of absolute value: \n")
print(cors[index])
invisible((list(statistic=object$statistic$Q3, maxrc=cors[index])))
}
|
ab525f9edc034af6204ea9cf8d1a68cfbdfb62db | 82d6329ef1162e6ae4e8481015abfa218c556026 | /data-raw/load_data.R | de87874f5142ccc7795cc843c311f0763925fc8b | [] | no_license | rsait/ORdensity | 8d33fa3a4eb5d03b79766171e1f0915abd6115ac | 4536aefc1c654abba67342cc81256cedfb72fb18 | refs/heads/master | 2020-05-20T03:14:11.816580 | 2019-09-06T07:54:38 | 2019-09-06T07:54:38 | 185,353,939 | 1 | 0 | null | 2019-09-05T12:25:44 | 2019-05-07T08:12:22 | R | UTF-8 | R | false | false | 77 | r | load_data.R | simexpr <- read.table("simexpr.dat", header=TRUE)
usethis::use_data(simexpr)
|
45a86a21c9cdf87d8bcbb39b84b8fda8a94415b8 | 25a1006af4e8d97d223058a97044b974a1deafab | /man/crossoverCDFvector.Rd | 432e3b5615925258563fe048505413396688b74a | [] | no_license | adimitromanolakis/sim1000G | 8eb932f1b7ea8457d3124879f0d19cc369d6b2a5 | a07bd8f4cf69b7d3f5a24138e8f799d61025f04b | refs/heads/master | 2021-10-10T07:59:55.510857 | 2021-09-24T14:25:28 | 2021-09-24T14:25:28 | 93,431,651 | 16 | 1 | null | null | null | null | UTF-8 | R | false | true | 433 | rd | crossoverCDFvector.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recombination.R
\docType{data}
\name{crossoverCDFvector}
\alias{crossoverCDFvector}
\title{Contains recombination model information.}
\format{An object of class \code{logical} of length 1.}
\usage{
crossoverCDFvector
}
\description{
This vector contains the density between two recombination events, as a cumulative density function.
}
\keyword{datasets}
|
cdbeaadd8176da1dcd33a72ea61c5c63f4bb1719 | a58436d809eb1715d6d1b35ca1ba2564ce6082c0 | /Statistical Inference/project2.R | 85e3bf37391b6b36c9cb7d3749c2079597708d63 | [] | no_license | donelianc/coursera-data-science | 7801066849b7cf16f96d554139e07c70433228f7 | 62cb135167e9d67ffa900c1c6af15cf7bc814365 | refs/heads/master | 2022-09-08T17:34:40.703864 | 2020-06-02T00:33:20 | 2020-06-02T00:33:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 342 | r | project2.R | # Ian Castillo Rosales
# Project - Statistical Inference - Data Science Specialization
# 10102014
# Set my working directory
setwd("~/Desktop/repos/datasciencecoursera/Statistical Inference")
library(datasets)
data(ToothGrowth)
data <- ToothGrowth
str(data)
summary(data$len)
hist(data$len)
table(data$supp)
table(data$dose)
t.test(data) |
dcd97d5d3281b541ba6e688fc2d4a68bc18a32e1 | c55569483b114d2345b4be85e8e5b0472e84fe2d | /tests/testthat.R | edcc4e480f9200e69eba28acd015d235c8dfa9b3 | [] | no_license | fate-ewi/bayesdfa | e54a367cbeaad95ddd4c810dff56cdafa630164f | 6a6bf9d71e962434b17045286c42cd14a74062ce | refs/heads/main | 2023-06-24T22:10:43.064535 | 2023-06-15T18:40:00 | 2023-06-15T18:40:00 | 80,677,345 | 22 | 9 | null | 2023-09-08T15:27:28 | 2017-02-02T00:00:18 | C++ | UTF-8 | R | false | false | 60 | r | testthat.R | library(testthat)
library(bayesdfa)
test_check("bayesdfa")
|
ac43aceebe2cbe8096a7be457e2713130621b7e7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/jomo/examples/jomo.glmer.MCMCchain.Rd.R | e0fcc310ce753d0760baebfa1d3a90446077ba3a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 920 | r | jomo.glmer.MCMCchain.Rd.R | library(jomo)
### Name: jomo.glmer.MCMCchain
### Title: glmer Compatible JM Imputation - A tool to check convergence of
### the MCMC
### Aliases: jomo.glmer.MCMCchain
### ** Examples
attach(cldata)
# make sure sex is a factor:
sex<-factor(sex)
# we define the data frame with all the variables
data<-data.frame(measure,age, sex, city)
# And the formula of the substantive lm model
# sex as an outcome only because it is the only binary variable in the dataset...
formula<-as.formula(sex~age+measure+(1|city))
#And finally we run the imputation function:
# imp<-jomo.glmer.MCMCchain(formula,data, nburn=100)
# Note the example is commented out to avoid time consuming examples,
# which go against CRAN policies.
# We can check, for example, the convergence of the first element of beta:
# plot(c(1:100),imp$collectbeta[1,1,1:100],type="l")
|
d4d2dd596d5565f1b45ae0b3ee4ad268bef6b87a | fa880ff9252106dce0d51d250fd6455d44988b6d | /user-crawler.R | 9cd9a9cd15cf9b430aeb7dbaffbf3a9225033cb1 | [
"Apache-2.0"
] | permissive | vuw-sim-stia/twitter-crawler | 51459bbb29a3d0c79435eaa1e03ab2e3deccbb6f | ba376da1661aeeca66adc11ffc9fbf14e235a1f2 | refs/heads/master | 2021-07-10T19:33:05.954273 | 2017-10-13T01:29:18 | 2017-10-13T01:29:18 | 106,761,669 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,385 | r | user-crawler.R | library(twitteR)
consumerKey <- "[your consumer key]"
consumerSecret <- "[your consumer secret]"
accessToken <- "[your access token]"
accessTokenSecret <- "[your token secret]"
setup_twitter_oauth(consumerKey, consumerSecret, accessToken, accessTokenSecret)
users <- c("BarackObama") #put seed list of users
all_tags <- c("#science") #put seed list of tags
all_users <- c() #this will be all users referred to by users from the seed list, we crawl their content as well
tweetIds <- c()
for(user in users){
tweets <- userTimeline(user, n = 3200)
for(tweet in tweets){
tags <- unlist(regmatches(tweet$text,gregexpr("#(\\d|\\w)+",tweet$text)))
usrs <- unlist(regmatches(tweet$text,gregexpr("@(\\d|\\w)+",tweet$text)))
all_tags <- unique(c(all_tags,tags))
all_users <- unique(c(all_users,usrs))
if(length(which(tweetIds==tweet$id)==0)){
tweetIds <- c(tweetIds,tweet$id)
write(paste(tweet$id,tweet$screenName,tweet$text,tweet$replyToSID,tweet$replyToSN,tweet$replyToUID,tweet$isRetweet,tweet$retweetCount,tags,usrs,sep=";"), "collected-tweets.csv" ,append=TRUE)
}
}
}
for(user in all_users){
tweets <- userTimeline(user, n = 3200)
for(tweet in tweets){
tags <- unlist(regmatches(tweet$text,gregexpr("#(\\d|\\w)+",tweet$text)))
usrs <- unlist(regmatches(tweet$text,gregexpr("@(\\d|\\w)+",tweet$text)))
all_tags <- unique(c(all_tags,tags))
all_users <- unique(c(all_users,usrs))
if(length(which(tweetIds==tweet$id)==0)){
tweetIds <- c(tweetIds,tweet$id)
write(paste(tweet$id,tweet$screenName,tweet$text,tweet$replyToSID,tweet$replyToSN,tweet$replyToUID,tweet$isRetweet,tweet$retweetCount,tags,usrs,sep=";"), "collected-tweets.csv" ,append=TRUE)
}
}
}
for(tag in all_tags){
tweets <- searchTwitter(tag, n=3200, resultType = "recent")
for(tweet in tweets){
tags <- unlist(regmatches(tweet$text,gregexpr("#(\\d|\\w)+",tweet$text)))
usrs <- unlist(regmatches(tweet$text,gregexpr("@(\\d|\\w)+",tweet$text)))
all_tags <- unique(c(all_tags,tags))
all_users <- unique(c(all_users,usrs))
if(length(which(tweetIds==tweet$id)==0)){
tweetIds <- c(tweetIds,tweet$id)
write(paste(tweet$id,tweet$screenName,tweet$text,tweet$replyToSID,tweet$replyToSN,tweet$replyToUID,tweet$isRetweet,tweet$retweetCount,tags,usrs,sep=";"), "collected-tweets.csv" ,append=TRUE)
}
}
} |
4309d382ffaa5421667c6e2cba43809da316f7de | 0dbcb6f66ada1c90aa35b3b0ec98a5778a80bbb2 | /PainDev.R | 9418602cb1489a45e4dc5a918b80bcca83f8e447 | [] | no_license | KeelanMc/Chrnoic-Pain-CRT | 354615fc6f726b798a118172cff214065015c7a2 | 25a09254a5617e6bceccdc386bbcaf47d9027473 | refs/heads/main | 2023-03-31T16:36:47.310777 | 2021-03-23T15:02:14 | 2021-03-23T15:02:14 | 350,337,224 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,447 | r | PainDev.R | library(Hmisc)
#'hc114_','hc115_','depsoc','depmat','depsev','ac012_'
cpaindev = phbr54[c('mergeid','ph088_',"ph010d1",'initact','initialage','initialyear',
'country','gender','pasthstay','Loweducation','cciw_w4',
'fdistress','dn010_','ep018_','ep027_','ep028_','ac016_','ac012_'
,initialillvar,futureillvar,variablemixed)]
#Isolating people who developed the pain
cpaindev = cpaindev[cpaindev$ph010d1=='Not selected'&!is.na(cpaindev$ph088_),]
cpaindev = cpaindev[!is.na(cpaindev$cciw_w4),]
population = sum(cpaindev$cciw_w4,na.rm = TRUE)
cpaindev$initialage =as.numeric(levels(cpaindev$initialage)[cpaindev$initialage])
cpaindev$bmi = factor(cpaindev$bmi,levels = c("Normal","Underweight" ,"Overweight","Obese" ))
cpaindev$parent = cpaindev$nchild!='None'
#Removing na's from waiting list question
#cpaindev$hc115_[cpaindev$hc115_ %in% c("Don't know",'Refusal')]=NA
# cpaindev = cpaindev[!is.na(cpaindev$hc115_),]
# cpaindev$hc115_=droplevels(cpaindev$hc115_)
#Removing na's from isolation question
cpaindev$ac016_[cpaindev$ac016_ %in% c("Don't know",'Refusal')]=NA
cpaindev = cpaindev[!is.na(cpaindev$ac016_),]
cpaindev$ac016_=droplevels(cpaindev$ac016_)
cpaindev$ac016_ = factor(cpaindev$ac016_,levels = c("Never","Rarely","Sometimes","Often"))
cpaindev$Isolation = ifelse(cpaindev$ac016_=='Often',1,0)
#cpaindev$hc115_ = factor(cpaindev$hc115_, levels= c('No','Yes'))
#cpaindev$hc114_ = factor(cpaindev$hc114_, levels= c('No','Yes'))
#Removing na's from life satisfaction question
cpaindev$ac012_[cpaindev$ac012_ %in% c("Don't know",'Refusal')]=NA
cpaindev = cpaindev[!is.na(cpaindev$ac012_),]
cpaindev$ac012_ = droplevels(cpaindev$ac012_)
cpaindev$ac012_ = as.numeric(levels(cpaindev$ac012_)[cpaindev$ac012_])
names(cpaindev)[names(cpaindev) == 'ac012_'] <- 'Life_satisfaction'
names(cpaindev)[names(cpaindev) == 'eurod'] <- 'Depression'
names(cpaindev)[names(cpaindev) == 'ph006d8'] <- 'Arthritis'
names(cpaindev)[names(cpaindev) == 'ph006d12initial'] <- 'Parkinsons'
names(cpaindev)[names(cpaindev) == 'ph006d1initial'] <- 'Heart_Problems'
names(cpaindev)[names(cpaindev) == 'mh007_'] <- 'Sleep_Problems'
names(cpaindev)[names(cpaindev) == 'ep027_'] <- 'PhysicalLabour'
names(cpaindev)[names(cpaindev) == 'sphus'] <- 'Self_reported_health'
cpaindev$PhysicalLabour[cpaindev$PhysicalLabour %in% c("Don't know",'Refusal')]=NA
cpaindev$ep018_[cpaindev$ep018_ %in% c("Don't know",'Refusal')]=NA
cpaindevtest = cpaindev[!is.na(cpaindev$PhysicalLabour),]
cpaindevtest$PhysicalLabour=droplevels(cpaindevtest$PhysicalLabour)
cpaindevtest$PhysicalLabour = factor(cpaindevtest$PhysicalLabour,levels = c("No",levels(cpaindevtest$PhysicalLabour),"Yes"))
cpaindevtest$PhysicalLabour[cpaindevtest$PhysicalLabour %in% c("Agree","Strongly agree")] = 'Yes'
cpaindevtest$PhysicalLabour[cpaindevtest$PhysicalLabour %in% c("Disagree","Strongly disagree")] = 'No'
cpaindevtest$PhysicalLabour=droplevels(cpaindevtest$PhysicalLabour)
cpaindev$fdistress2 = cpaindev$fdistress %in% c('With great difficulty','With some difficulty')
andrewlogit <- glm(ph088_ ~ initialage + country +gender+ Depression+bmi
+Loweducation+Self_reported_health+Sleep_Problems+phactiv+Arthritis + Parkinsons+Heart_Problems
,data = cpaindev, family = "binomial")
summary(andrewlogit)
AIC(andrewlogit)
BIC(andrewlogit)
andrewlogit <- glm(ph088_ ~ initialage + country +gender+ Depression+bmi+mobility+parent
+Loweducation+Self_reported_health+Sleep_Problems+phactiv+Arthritis + Parkinsons+Heart_Problems
,data = cpaindev, family = "binomial")
Keelanlogit <- glm(ph088_ ~ initialage + country +gender+ Depression+bmi+mobility+parent
+Loweducation+Self-reported_health+Sleep_Problems+phactiv+Arthritis + Parkinsons+Heart_Problems+
Life_satisfaction+PhysicalLabour+Isolation
,data = cpaindev, family = "binomial")
BIC(Keelanlogit)-BIC(andrewlogit)
#Extracting the coefficients for each country for comparison
countrycoeff = as.data.frame(andrewlogit$coefficients[3:14])
rownames(countrycoeff) = sub("country","",rownames(countrycoeff))
countrycoeff['Austria',]= 0
countrycoeff=cbind(Country = rownames(countrycoeff), countrycoeff)
rownames(countrycoeff) <- 1:nrow(countrycoeff)
|
845e5fbe4d4382cef7bb8c4324e15e847c9ccc2b | 6278ae94cd7fdca1846b0dbacd93f62cbde4b278 | /src/financial/04_getcpb.R | d5c325602e2f843e761724d8d37a56925edc62ba | [
"MIT"
] | permissive | uva-bi-sdad/capitals | 2a0cc445231558534b18a40c05a17a46b155f34f | 4913d8fc79a362016bdb258d04e2a22029e93593 | refs/heads/master | 2023-05-04T20:05:11.266666 | 2021-05-29T16:48:19 | 2021-05-29T16:48:19 | 290,484,318 | 1 | 4 | null | 2020-11-10T19:59:54 | 2020-08-26T12:01:20 | R | UTF-8 | R | false | false | 5,581 | r | 04_getcpb.R | library(readr)
library(dplyr)
library(tidycensus)
library(sf)
library(stringr)
#
# Read in --------------------------------------------------------------------------------
#
# Note: Original file too large for version control. Start with data file for all counties from
# https://www.census.gov/data/datasets/2018/econ/cbp/2018-cbp.html and
# https://www.census.gov/data/datasets/2014/econ/cbp/2014-cbp.html if replicating entire workflow.
# Layout explanations at
# https://www2.census.gov/programs-surveys/cbp/technical-documentation/records-layouts/2018_record_layouts/county-layout-2018.txt
cbpdata14 <- read_csv("./rivanna_data/financial/fin_cbp_2014_orig.txt")
cbpdata18 <- read_csv("./rivanna_data/financial/fin_cbp_2018_orig.txt")
#
# Get ACS --------------------------------------------------------------------------------
#
# Key
readRenviron("~/.Renviron")
Sys.getenv("CENSUS_API_KEY")
# Pull and transform
acsvars <- c("B01003_001")
acsdata14 <- get_acs(geography = "county", state = c(19, 41, 51),
variables = acsvars,
year = 2014, survey = "acs5",
cache_table = TRUE, output = "wide", geometry = TRUE,
keep_geo_vars = TRUE)
acsdata18 <- get_acs(geography = "county", state = c(19, 41, 51),
variables = acsvars,
year = 2018, survey = "acs5",
cache_table = TRUE, output = "wide", geometry = TRUE,
keep_geo_vars = TRUE)
acsdata14 <- acsdata14 %>% transmute(
STATEFP = STATEFP,
COUNTYFP = COUNTYFP,
COUNTYNS = COUNTYNS,
AFFGEOID = AFFGEOID,
GEOID = GEOID,
LSAD = LSAD,
NAME.x = NAME.x,
NAME.y = NAME.y,
geometry = geometry,
totalpop14 = B01003_001E
)
acsdata18 <- acsdata18 %>% transmute(
STATEFP = STATEFP,
COUNTYFP = COUNTYFP,
COUNTYNS = COUNTYNS,
AFFGEOID = AFFGEOID,
GEOID = GEOID,
LSAD = LSAD,
NAME.x = NAME.x,
NAME.y = NAME.y,
geometry = geometry,
totalpop18 = B01003_001E
)
#
# Calculate businesses per 10k --------------------------------------------------------------------------------
#
# Prepare CBP
# NAICS = industry code, ------ is top level
# est = Total Number of Establishments
cbpdata18$GEOID <- paste0(cbpdata18$fipstate, cbpdata18$fipscty)
cbpdata18 <- cbpdata18 %>% filter(fipstate == 41 | fipstate == 51 | fipstate == 19)
cbpdata18 <- cbpdata18 %>% filter(naics == "------")
cbpdata14$GEOID <- paste0(cbpdata14$fipstate, cbpdata14$fipscty)
cbpdata14 <- cbpdata14 %>% filter(fipstate == 41 | fipstate == 51 | fipstate == 19)
cbpdata14 <- cbpdata14 %>% filter(naics == "------")
cbpdata14 <- cbpdata14 %>% rename(est14 = est)
cbpdata14 <- cbpdata14 %>% select(GEOID, est14)
# Join
data14 <- left_join(acsdata14, cbpdata14, by = "GEOID")
data18 <- left_join(acsdata18, cbpdata18, by = "GEOID")
data14 <- data14 %>% select(GEOID, totalpop14, est14) %>% st_set_geometry(NULL)
data <- left_join(data18, data14, by = "GEOID")
#
# Calculate --------------------------------------------------------------------------------
#
# Number of businesses per 10,000 people
data <- data %>% mutate(fin_estper10k = est/totalpop18 * 10000)
# Number of new businesses 2014-18 per 10,000 people
data <- data %>% mutate(fin_newestper10k = fin_estper10k - (est14/totalpop14 * 10000))
#
# Calculate HHIs --------------------------------------------------------------------------------
#
# HHI of employment within that county
# fin_emphhi: Square the share of employment for each industry (naics: ----) within a county, then sum those squared values to receive the HHI for that county.
# HHI of payroll within that county
# fin_aphhi: Square the share of payroll (ap_share) for each industry within a county, then sum those squared values to receive the HHI for that county.
# Prepare
cbpforindex <- read_csv("./rivanna_data/financial/fin_cbp_2018_orig.txt")
cbpforindex <- cbpforindex %>% filter(fipstate == 41 | fipstate == 51 | fipstate == 19)
cbpforindex$GEOID <- paste0(cbpforindex$fipstate, cbpforindex$fipscty)
# Filter to major industries
cbpforindex <- cbpforindex %>% filter(naics != "------")
cbpforindex <- cbpforindex %>% filter(str_detect(naics, "----"))
cbpforindex$naics <- as.factor(cbpforindex$naics)
cbpforindex <- data.frame(cbpforindex)
# Prepare totals dataframe (denominator)
cbpforindex_totals <- data.frame(cbpdata18)
cbpforindex_totals <- cbpforindex_totals %>% select(GEOID, emp, ap)
cbpforindex_totals <- cbpforindex_totals %>% rename(emp_total = emp, ap_total = ap)
# Calculate industry shares, then total index
cbpforindex <- left_join(cbpforindex, cbpforindex_totals, by = "GEOID")
cbpforindex <- cbpforindex %>% mutate(share_emp = emp/emp_total,
share_ap = ap/ap_total,
share_emp_sq = share_emp^2,
share_ap_sq = share_ap^2)
cbpforindex <- cbpforindex %>% group_by(GEOID) %>%
mutate(fin_emphhi = sum(share_emp_sq),
fin_aphhi = sum(share_ap_sq)) %>%
ungroup()
# Get one row
cbpforindex <- cbpforindex %>% select(GEOID, fin_emphhi, fin_aphhi)
cbpforindex <- cbpforindex %>% group_by(GEOID) %>% slice(1) %>% ungroup()
cbpforindex <- data.frame(cbpforindex)
#
# Join to other data --------------------------------------------------------------------------------
#
data <- left_join(data, cbpforindex, by = "GEOID")
#
# Write out --------------------------------------------------------------------------------
#
write_rds(data, "./rivanna_data/financial/fin_cbp_2018.Rds")
|
2b644e6af6281f29119f46972a44af8372052c6a | 6cc8a8bdfca3c0efc56a4899ebeb3d68489b1a91 | /step_Logistic_exp.R | ecc8d123726731457beb17899efa05417c0693bd | [] | no_license | effat/HPC_2 | 8bfe1cc78b24c58c955a13f816a12e3633ff79c3 | 74256e562956f8dd252c5f0d9f754683fa0f063c | refs/heads/master | 2021-05-05T23:14:07.335308 | 2018-01-19T05:45:26 | 2018-01-19T05:45:26 | 116,530,144 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,095 | r | step_Logistic_exp.R |
library('MASS')
library('dplyr')
a<-read.table("data1_clean_2.csv",sep=",",header=T)
##arguments to stepwise regression
classIndex <- 20
remove_atts<-c(20, 1, 4) # classindex, stdID, MCAS
y <-a[,classIndex]
df_subset <-as.data.frame(cbind(a[-remove_atts]))
full<-glm(y ~ ., family = binomial,data = df_subset)
full_summary<-summary(full)
##apply stepwise regression, default direction is both
step<-stepAIC(full, trace = FALSE)
##apply stepwise backward elimination
backward<-step<-stepAIC(full,direction="backward", trace = FALSE)
### final model chosen by stepwise logistic regression
final_default<-names(step$coefficients)
final_back<-names(backward$coefficients)
###print indices of final_back
final_back_ind<-c()
feature_name<-names(a)
for (i in 1:ncol(a)){
if(feature_name[i]%in% final_back)
final_back_ind<-c(final_back_ind, i-1)###python index 0 based
}
cat("att selected ", final_back,"\n")
cat("att selected indices ", final_back_ind,"\n")
###call from another function
do_step<-function(a){
##arguments to stepwise regression
classIndex <- 20
remove_atts<-c(20, 1, 4) # classindex, stdID, MCAS
y <-a[,classIndex]
df_subset <-as.data.frame(cbind(a[-remove_atts]))
full<-glm(y ~ ., family = binomial,data = df_subset)
full_summary<-summary(full)
##apply stepwise regression, default direction is both
step<-stepAIC(full, trace = FALSE)
##apply stepwise backward elimination
backward<-step<-stepAIC(full,direction="backward", trace = FALSE)
### final model chosen by stepwise logistic regression
final_default<-names(step$coefficients)
final_back<-names(backward$coefficients)
###print indices of final_back
final_back_ind<-c()
feature_name<-names(a)
for (i in 1:ncol(a)){
if(feature_name[i]%in% final_back)
final_back_ind<-c(final_back_ind, i-1)###python index 0 based
}
cat("att selected ", final_back,"\n")
cat("att selected indices ", final_back_ind,"\n")
}
|
d0eb8bebe4847aea687979f505e0d1f4d46a0f26 | a22be77ec187759412eb82debf250adb038e7939 | /R HW/Stat Group Assignment 1.R | f2fac99cae62d4dd35933a61bb492390eaadd62a | [] | no_license | meganstiles/Stat_6021 | 47ce8d0fac3414025d3ca8560ee8880ef395dd85 | 79b45e8fe7b6dc37a96f1fd04c8922861f5e4b61 | refs/heads/master | 2021-01-11T15:09:10.465593 | 2017-01-28T18:14:57 | 2017-01-28T18:14:57 | 80,301,097 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 883 | r | Stat Group Assignment 1.R |
## Start with given x-values
x <- read.table("teamassign01data.txt")[,1]
x
## Generate corresponding y-values according to the model y ~ 25 + 4x + e, where e~N(0,var=12^2)
y <- 25 + 4*x + rnorm(100, mean=0, sd = 12)
write.table(y, file = "yvalues.txt")
## Plot the relationship
plot(x,y, pch=20, cex=0.3)
data.set<- cbind(x,y)
data.set<- as.data.frame(data.set)
colnames<- c("x_values", "y_values")
names(data.set)<- colnames
write.csv(data.set, "data.csv")
my.lm<- lm((y_values)~(x_values), data = data.set)
my.lm
#Coefficients = 26.481 (intercept) slope = 3.926
new_prediction<- data.frame(x_values =18)
predict(my.lm, new_prediction, interval = "none")# 99.70545
#Report MSRes
anova(my.lm) #MSRes = 192,528
#Part 2
#b if the linear relationship is strong, then the slope should not vary significantly between models, thus the variance of B1
#should be close to zero
|
8af860f429aed5db9a9d165f1df88a65e347f315 | 277489838022577e592e48513160c13c6c80cb50 | /plot3.R | 488a56abf23c1e5067c63edb0c21f5f322f45482 | [] | no_license | DanOswald/ExData_Plotting1 | e1b5068935b791fb50bf1dbfa3679e26bb78dc30 | e265b2c3a69d0c50abf75af5b8fa7de171be733c | refs/heads/master | 2021-01-15T21:49:01.151988 | 2014-08-10T23:03:45 | 2014-08-10T23:03:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 663 | r | plot3.R | c4w1 <- read.table("../household_power_consumption.txt",
header = TRUE, na.strings = "?", sep = ";")
c4w1[, 1] <- as.Date(c4w1[, 1], format = "%d/%m/%Y")
c4w1 <- c4w1[c4w1[, 1] == "2007-02-01" | c4w1[, 1] == "2007-02-02", ]
z <- strptime(paste(c4w1[, 1], c4w1[, 2]), format = "%Y-%m-%d %H:%M:%S")
png(file = "plot3.png")
plot(z, c4w1[, 7],
type = "n", xlab = "", ylab = "Energy sub metering")
lines(z, c4w1[, 7], col = "black")
lines(z, c4w1[, 8], col = "red")
lines(z, c4w1[, 9], col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() |
0ca23c28ac1eac587b9a2c0386061044acf9d8a8 | 2defb970de80008d3a5f77728bf3f896832fe2e1 | /data/Data Aggregation Scripts/Count_Waiting_Time_WA.R | 195cc3b12e638326e440ebd6d86a0e9b88fba715 | [] | no_license | FredHutch/COVID_modeling_schools | bd211844ebd00c5977ac6ad0ef8b298aa7d6a8f2 | 98c1a879e1685b78a21427780f0f34941c309034 | refs/heads/master | 2023-08-31T14:37:46.365370 | 2021-10-06T16:36:47 | 2021-10-06T16:36:47 | 413,988,503 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,476 | r | Count_Waiting_Time_WA.R | ####Count Waiting Time Washington####
#Author: Mia Moore
#Date: 4/11/2021
#Purpose: Use aggregated data to count waiting time for alternative variables
#Input:
# REQD_DATA.Rdata
#Output:
# REQD_Data_ALT1.Rdata
# REQD_Data_ALT2.Rdata
library(plyr)
library(lubridate)
load("REQD_DATA_AGGR.Rdata")
REQD_DATA_ALT1.var = c("DATE",
"WEEK",
"AGEGR",
"DEATHS",
"HOSPITALIZATIONS",
"NEGATIVE_TESTS",
"MILD_SYMPTOMATIC_DIAGNOSIS",
"SEVERE_SYMPTOMATIC_DIAGNOSIS",
"SEVERE_SYMPTOMATIC_DIAGNOSIS_EARLY",
"SEVERE_SYMPTOMATIC_DIAGNOSIS_LATE",
"WAITING.TO.BE.DIAGNOSED.MILD",
"WAITING.TO.BE.DIAGNOSED.EARLY",
"WAITING.TO.BE.DIAGNOSED.LATE",
"WAITING.TO.BE.DIAGNOSED.SEVERE")
REQD_DATA_ALT2.var = c("DATE",
"WEEK",
"AGEGR",
"NEGATIVE_TESTS",
"MILD_SYMPTOMATIC_DIAGNOSIS",
"WAITING.TO.BE.DIAGNOSED.MILD",
"WAITING.TO.DIE.HOSPITAL",
"WAITING.TO.DIE.NOT.HOSPITAL",
"HOSPITAL_DEATHS",
"NON_HOSPITAL_DEATHS",
"HOSPITALIZATIONS")
REQD_DATA_BASIC.var = c("DATE",
"WEEK",
"AGEGR",
"CASES",
"DEATHS",
"HOSPITAL_DEATHS",
"NON_HOSPITAL_DEATHS",
"HOSPITALIZATIONS")
get.waiting = function(X){
data.frame(DATE = X$DATE,
WAITING.TO.BE.DIAGNOSED.MILD = cumsum(X$MILD_SYMPTOMS_PRE_DIAGNOSIS) - cumsum(X$MILD_SYMPTOMATIC_DIAGNOSIS),
WAITING.TO.BE.DIAGNOSED.SEVERE = cumsum(X$SEVERE_SYMPTOMS_PRE_DIAGNOSIS) - cumsum(X$SEVERE_SYMPTOMATIC_DIAGNOSIS),
WAITING.TO.BE.DIAGNOSED.EARLY = cumsum(X$SEVERE_SYMPTOMS_PRE_DIAGNOSIS_EARLY) - cumsum(X$SEVERE_SYMPTOMATIC_DIAGNOSIS_EARLY),
WAITING.TO.BE.DIAGNOSED.LATE = cumsum(X$SEVERE_SYMPTOMS_PRE_DIAGNOSIS_LATE) - cumsum(X$SEVERE_SYMPTOMATIC_DIAGNOSIS_LATE),
WAITING.TO.DIE.HOSPITAL = cumsum(X$PRE_DEATH_HOSPITALIZATION) - cumsum(X$HOSPITAL_DEATHS),
WAITING.TO.DIE.NOT.HOSPITAL = cumsum(X$NON_HOSPITAL_PREDEATH_DIAGNOSIS) - cumsum(X$NON_HOSPITAL_DEATHS),
WAITING.FOR.HOSPITALIZATION = cumsum(X$CASES_NO_DEATH_NO_HOSPITALIZATION)
)
}
REQD_DATA_AUG = merge(ddply(REQD_DATA_AGGR, "AGEGR", .fun = get.waiting), REQD_DATA_AGGR)
REQD_DATA_AUG$WEEK = pmin(week(REQD_DATA_AUG$DATE), 52) + 52 * (year(REQD_DATA_AUG$DATE) - 2020)
REQD_DATA_BASIC = REQD_DATA_AUG[, REQD_DATA_BASIC.var]
REQD_DATA_ALT1 = REQD_DATA_AUG[, REQD_DATA_ALT1.var]
REQD_DATA_ALT2 = REQD_DATA_AUG[, REQD_DATA_ALT2.var]
weekly.count = function(x){
.n = setdiff(names(x), c("WEEK", "AGEGR", "DATE"))
.x = as.data.frame(t(colSums(x[,.n])))
.x$STARTDT = min(x$DATE)
.x$ENDDT = max(x$DATE)
.x
}
write.csv(REQD_DATA_ALT1, file = "REQD_DATA_ALT1.csv")
write.csv(REQD_DATA_ALT2, file = "REQD_DATA_ALT2.csv")
write.csv(REQD_DATA_BASIC, file = "REQD_DATA_BASIC.csv")
save(REQD_DATA_ALT1, file = "REQD_DATA_ALT1.Rdata")
save(REQD_DATA_ALT2, file = "REQD_DATA_ALT2.Rdata")
save(REQD_DATA_BASIC, file = "REQD_DATA_BASIC.Rdata")
REQD_DATA_BASIC_WEEKLY = ddply(REQD_DATA_BASIC, c("WEEK", "AGEGR"), .fun = weekly.count)
REQD_DATA_ALT1_WEEKLY = ddply(REQD_DATA_ALT1, c("WEEK", "AGEGR"), .fun = weekly.count)
REQD_DATA_ALT2_WEEKLY = ddply(REQD_DATA_ALT2, c("WEEK", "AGEGR"), .fun = weekly.count)
#write.csv(REQD_DATA_ALT1_WEEKLY, file = "REQD_DATA_ALT1_WEEKLY.csv")
#write.csv(REQD_DATA_ALT2_WEEKLY, file = "REQD_DATA_ALT2_WEEKLY.csv")
#write.csv(REQD_DATA_BASIC_WEEKLY, file = "REQD_DATA_BASIC_WEEKLY.csv")
#save(REQD_DATA_ALT1_WEEKLY, file = "REQD_DATA_ALT1_WEEKLY.Rdata")
#save(REQD_DATA_ALT2_WEEKLY, file = "REQD_DATA_ALT2_WEEKLY.Rdata")
#save(REQD_DATA_BASIC_WEEKLY, file = "REQD_DATA_BASIC_WEEKLY.Rdata")
WA_DATA_AUG_WEEKLY = ddply(REQD_DATA_AUG, c("WEEK", "AGEGR"), .fun = weekly.count)
write.csv(WA_DATA_AUG_WEEKLY, file = "WA_DATA_AUG_WEEKLY.csv")
save(WA_DATA_AUG_WEEKLY, file = "WA_DATA_AUG_WEEKLY.Rdata") |
e307d4e297ce8d22800919092fd916b1f6d12fbc | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/vsn/examples/vsnLikelihood.Rd.R | e46379444ee1d969b2d15ac4910411a286545a1b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 612 | r | vsnLikelihood.Rd.R | library(vsn)
### Name: logLik-methods
### Title: Calculate the log likelihood and its gradient for the vsn model
### Aliases: logLik-methods logLik,vsnInput-method plotVsnLogLik
### ** Examples
data("kidney")
v = new("vsnInput", x=exprs(kidney),
pstart=array(as.numeric(NA), dim=c(1, ncol(kidney), 2)))
fit = vsn2(kidney)
print(coef(fit))
p = sapply(seq(-1, 1, length=31), function(f) coef(fit)+c(0,0,f,0))
ll = logLik(v, p)
plot(p[3, ], ll[1, ], type="l", xlab=expression(b[1]), ylab=expression(-log(L)))
abline(v=coef(fit)[3], col="red")
plotVsnLogLik(v, coef(fit), whichp=c(1,3), expand=0.2)
|
70570687a99acbcb24c4e8d8421cc2a2d7511eaa | 1df008e2c10fd55064fe9939b441ccf8a6a17f1e | /optparse_template.R | c33fe2331a7e9fdffeeffbb97be8e2b493330849 | [] | no_license | pablo-gar/Rmods | 7a0ede60f449574e236d78e3b47909d5e3c7b1e5 | 10f7407995ffc97413aa77587ff02333e4d10187 | refs/heads/master | 2021-05-23T05:31:56.177761 | 2020-09-10T19:46:35 | 2020-09-10T19:46:35 | 95,050,189 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,326 | r | optparse_template.R | # This a template for the use of optparse
suppressMessages(suppressWarnings({
library("optparse")
}))
main <- function() {
opts <- getOpts(positional_arguments=F)
input_file <- opts$i
out_file <- opts$o
# DO SOMETHING
}
getOpts <- function(positional_arguments=F) {
# Specify the type of options, this will aid for checking the type of opts
required <- c('input', 'output') # Will throw an error if these are not given
files <- c('input') # Will throw an error if the path to this options does not exist
out_files <- c('output') # Will throw an error if the parent dir to these files don't exist
dirs <- c() # Will check that these folders exist
# Create arg list
option_list <- list(
make_option(c('-o', '--outoput'), type = 'character', help = 'Path to output file'),
make_option(c('-i', '--input'), type = 'character', help = 'Path to input file')
)
# DO NOT MODIFY AFTER THIS
opt_parser <- OptionParser(usage='usage: %prog [options]',
option_list=option_list,
description='Calculate median gene expression across regions')
opt <- parse_args(opt_parser, positional_arguments=positional_arguments)
if(positional_arguments) {
opt_check <- opt$options
} else {
opt_check <- opt
}
# Checking for essential arguments
for (i in required) {
if(is.null(opt_check[i][[1]])) {
stop ('"--', i, '" is a required argument, run with "-h" for help')
}
}
# Checking files exist
for(i in files) {
if(!file.exists(opt_check[i][[1]])) {
stop ('"--', i, '" "', opt_check[i][[1]], '" file does not exist')
}
}
# Checking that we can write out files
for(i in out_files) {
if(!dir.exists(dirname(opt_check[i][[1]]))) {
stop ('"--', i, '" "', opt_check[i][[1]], '" parent folder does not exist')
}
}
# Checking dirs exists()
for(i in dirs) {
if(!dir.exists(opt_check[i][[1]])) {
stop ('"--', i, '" "', opt_check[i][[1]], '" folder does not exist')
}
}
return(opt)
}
main()
|
f9e4c0a0c402be37b153a68cfceecc1cd5efc714 | 0f832b9304728f9fa428373846a1a81ff1047869 | /tests/test-all.R | 3b09961de1fa8dfc25740c57ac25849f6a6b07ff | [] | no_license | hrbrmstr/webhose | 237ce249c064ca92216b1cd2431cf15d9a0db26f | 543e24f2aa023bf562e6caf925a62ea51f80a436 | refs/heads/master | 2021-08-08T19:01:02.264913 | 2017-11-10T22:53:17 | 2017-11-10T22:53:17 | 105,010,813 | 13 | 5 | null | 2017-11-03T10:50:22 | 2017-09-27T11:39:29 | R | UTF-8 | R | false | false | 40 | r | test-all.R | library(testthat)
test_check("webhose")
|
7c867d3f9f2b5ccabef29cf3070f39f901d25831 | b482c98ff5065b055887827fb8dc86e3055c6438 | /man/fit_trawl_intersection_LM.Rd | 3a7d17935a44d634e612e2fdb57dc03c338bff3f | [] | no_license | cran/trawl | bdb7d749804b73f5170d348e0614aff4d3d253fd | 4013b1fa141f3ae3c0c13447488e45125a675b9c | refs/heads/master | 2021-06-15T02:31:33.763015 | 2021-02-22T16:30:02 | 2021-02-22T16:30:02 | 145,911,822 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 860 | rd | fit_trawl_intersection_LM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FitCrossTerms.R
\name{fit_trawl_intersection_LM}
\alias{fit_trawl_intersection_LM}
\title{Finds the intersection of two long memory (LM) trawl sets}
\usage{
fit_trawl_intersection_LM(alpha1, H1, alpha2, H2, LM1, LM2, plotdiag = FALSE)
}
\arguments{
\item{alpha1, H1, alpha2, H2}{parameters of the two long memory trawls}
\item{LM1}{Lebesgue measure of the first trawl}
\item{LM2}{Lebesgue measure of the second trawl}
\item{plotdiag}{binary variable specifying whether or not diagnostic plots
should be provided}
}
\value{
the Lebesgue measure of the intersection of the two trawl sets
}
\description{
Finds the intersection of two long memory (LM) trawl sets
}
\details{
Computes \eqn{R_{12}(0)=\mbox{Leb}(A_1 \cap A_2)} based on two trawl
functions \eqn{g_1} and \eqn{g_2}.
}
|
52d5d8b5c0b5b92c7a22b7a5902071b6bae07aff | dbc2af76893a0b669f2d9a032980c2111bfbc4d5 | /man/bottom_average.Rd | 6aebae4902b36be92a0699d1e9b4753f839cd851 | [
"MIT"
] | permissive | thomasblanchet/gpinter | e974de36c0efd4c8070fb9b8cc0311bb10c356df | 0ce91dd088f2e066c7021b297f0ec3cecade2072 | refs/heads/master | 2022-11-28T11:18:10.537146 | 2022-11-22T16:22:40 | 2022-11-22T16:22:40 | 72,655,645 | 19 | 5 | null | 2017-04-19T08:25:44 | 2016-11-02T15:51:21 | R | UTF-8 | R | false | true | 725 | rd | bottom_average.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distribution-functions.R
\name{bottom_average}
\alias{bottom_average}
\title{Bottom average for generalized Pareto interpolation}
\usage{
bottom_average(dist, p, ...)
}
\arguments{
\item{dist}{An object of class \code{gpinter_dist_orig}, \code{gpinter_dist_indiv},
\code{gpinter_dist_addup} or \code{gpinter_dist_merge}.}
\item{p}{A vector of probabilities in [0, 1].}
\item{...}{Ignored.}
}
\value{
The value of the average of the bottom 100*p%.
}
\description{
Compute the average below the \code{p}-th quantile for a distribution
estimated via generalized Pareto interpolation.
}
\author{
Thomas Blanchet, Juliette Fournier, Thomas Piketty
}
|
02bc54b091a5b814421bea00d67c3a938113d437 | 26c7549bde293f118fe01cb9d8159f68237de17b | /R/fct05_regressionBacktest.R | 11c254a425397e8c3c8c0a4e02c22a7df8acfc8c | [] | no_license | raphael210/RFactorModel | 6c5445dd75970940e082f1393d30ada204f1fbc7 | 29f471a7410b9a34ba6cab89a7dc5a4f24ee17b2 | refs/heads/master | 2020-05-21T22:20:56.345447 | 2019-02-01T07:43:47 | 2019-02-01T07:43:47 | 64,194,730 | 1 | 6 | null | 2017-03-15T05:40:55 | 2016-07-26T06:09:39 | R | UTF-8 | R | false | false | 40,739 | r | fct05_regressionBacktest.R |
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ==============
# --------------------- backtesting with 'regression' method -------------
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ==============
# --------------------- ~~ database update --------------
#' lcdb_regtables
#'
#' build and update local database's regression result tables
#' @name lcdb_regtables
#' @rdname lcdb_regtables
#' @param begT is begin date
#' @param endT is end date
#' @param FactorLists see example in \code{\link{buildFactorLists}}.
#' @examples
#' begT <- as.Date('2005-01-04')
#' endT <- as.Date('2009-12-31')
#' FactorLists <- buildFactorLists(
#' buildFactorList(factorFun="gf.SIZE"),
#' buildFactorList(factorFun="gf.GROWTH"),
#' buildFactorList(factorFun="gf.TRADING"),
#' buildFactorList(factorFun="gf.EARNINGYIELD"),
#' buildFactorList(factorFun="gf.VALUE"),
#' buildFactorList(factorFun="gf.OTHER"))
#' lcdb.build.RegTables(begT,endT,FactorLists)
#' begT <- as.Date('2010-01-04')
#' endT <- as.Date('2014-12-31')
#' lcdb.update.RegTables(begT,endT,FactorLists)
#' @export
lcdb.build.RegTables <- function(begT,endT,FactorLists){
con <- db.local()
if(RSQLite::dbExistsTable(con, "Reg_FactorRtn")) RSQLite::dbRemoveTable(con,'Reg_FactorRtn')
if(RSQLite::dbExistsTable(con, "Reg_Residual")) RSQLite::dbRemoveTable(con,'Reg_Residual')
if(RSQLite::dbExistsTable(con, "Reg_RSquare")) RSQLite::dbRemoveTable(con,'Reg_RSquare')
RSQLite::dbGetQuery(con,"CREATE TABLE Reg_FactorRtn (
date int NOT NULL,
fname TEXT NOT NULL,
frtn_d1 decimal(10,6) NULL,
tstat_d1 decimal(10,4) NULL,
frtn_w1 decimal(10,6) NULL,
tstat_w1 decimal(10,4) NULL,
frtn_w2 decimal(10,6) NULL,
tstat_w2 decimal(10,4) NULL,
frtn_m1 decimal(10,6) NULL,
tstat_m1 decimal(10,4) NULL)")
RSQLite::dbGetQuery(con,"CREATE UNIQUE INDEX IX_Reg_FactorRtn ON Reg_FactorRtn(date,fname)")
RSQLite::dbGetQuery(con,"CREATE TABLE Reg_Residual (
date int NOT NULL,
stockID TEXT NOT NULL,
res_d1 decimal(10,8) NULL,
res_w1 decimal(10,8) NULL,
res_w2 decimal(10,8) NULL,
res_m1 decimal(10,8) NULL)")
RSQLite::dbGetQuery(con,"CREATE UNIQUE INDEX IX_Reg_Residual ON Reg_Residual(date,stockID)")
RSQLite::dbGetQuery(con,"CREATE TABLE Reg_RSquare (
date int NOT NULL,
rsquare_d1 decimal(10,4) NULL,
rsquare_w1 decimal(10,4) NULL,
rsquare_w2 decimal(10,4) NULL,
rsquare_m1 decimal(10,4) NULL)")
RSQLite::dbGetQuery(con,"CREATE UNIQUE INDEX IX_Reg_RSquare ON Reg_RSquare(date)")
if(missing(begT)) begT <- as.Date('2005-01-04')
if(missing(endT)){
endT <- RSQLite::dbGetQuery(con,"select max(TradingDay) from QT_FactorScore")[[1]]
endT <- trday.offset(intdate2r(endT),by = months(-1))
}
RSQLite::dbDisconnect(con)
dates <- getRebDates(begT,endT,rebFreq = 'day')
dates <- split(dates,cut(dates,'month'))
plyr::l_ply(dates,lcdb.subfun.regtables,FactorLists,.progress = plyr::progress_text(style=3))
return('Done!')
}
#inner function
lcdb.subfun.regtables <- function(dates,FactorLists){
message(paste(min(rdate2int(dates)),' to ',max(rdate2int(dates))),'...')
TS <- getTS(dates,indexID = 'EI000985')
TSF <- getMultiFactor(TS,FactorLists)
prd_lists <- list(d1=lubridate::days(1),
w1=lubridate::weeks(1),
w2=lubridate::weeks(2),
m1=months(1))
for(j in 1:length(prd_lists)){
TSFR <- getTSR(TSF,dure = prd_lists[[j]])
re <- reg.TSFR(TSFR,regType='glm')
if(j==1){
fRtn <- re$fRtn
res <- re$res
RSquare <- re$RSquare
}else{
fRtn <- dplyr::left_join(fRtn,re$fRtn,by=c('date','fname'))
res <- dplyr::left_join(res,re$res,by=c('date','stockID'))
RSquare <- dplyr::left_join(RSquare,re$RSquare,by='date')
}
}
colnames(fRtn) <- c('date','fname',paste(c("frtn","tstat"),rep(names(prd_lists),each = 2),sep = '_'))
colnames(res) <- c('date','stockID',paste('res',names(prd_lists),sep = '_'))
colnames(RSquare) <- c('date',paste("rsquare",names(prd_lists),sep = '_'))
con <- db.local()
RSQLite::dbWriteTable(con,'Reg_FactorRtn',transform(fRtn,date=rdate2int(date)),overwrite=FALSE,append=TRUE,row.names=FALSE)
RSQLite::dbWriteTable(con,'Reg_Residual',transform(res,date=rdate2int(date)),overwrite=FALSE,append=TRUE,row.names=FALSE)
RSQLite::dbWriteTable(con,'Reg_RSquare',transform(RSquare,date=rdate2int(date)),overwrite=FALSE,append=TRUE,row.names=FALSE)
RSQLite::dbDisconnect(con)
}
#' @rdname lcdb_regtables
#'
#' @export
lcdb.update.RegTables <- function(begT,endT,FactorLists){
con <- db.local()
if(missing(begT)){
begT <- RSQLite::dbGetQuery(con,"select max(date) from Reg_RSquare")[[1]]
begT <- trday.offset(intdate2r(begT),lubridate::days(1))
}
if(missing(endT)){
endT <- RSQLite::dbGetQuery(con,"select max(TradingDay) from QT_FactorScore")[[1]]
endT <- trday.offset(intdate2r(endT),by = months(-1))
}
if(begT>endT) return('Done!')
tmp.dates <- RSQLite::dbGetQuery(con,"select min(date) 'mindate',max(date) 'maxdate' from Reg_RSquare")
tmp.dates <- transform(tmp.dates,mindate=intdate2r(mindate),maxdate=intdate2r(maxdate))
if(begT<= tmp.dates$maxdate && endT>= tmp.dates$mindate){
RSQLite::dbGetQuery(con, paste("delete from Reg_FactorRtn WHERE date>=",rdate2int(begT),
" and date<=",rdate2int(endT)))
RSQLite::dbGetQuery(con, paste("delete from Reg_RSquare WHERE date>=",rdate2int(begT),
" and date<=",rdate2int(endT)))
RSQLite::dbGetQuery(con, paste("delete from Reg_Residual WHERE date>=",rdate2int(begT),
" and date<=",rdate2int(endT)))
}
RSQLite::dbDisconnect(con)
dates <- getRebDates(begT,endT,rebFreq = 'day')
dates <- split(dates,cut(dates,'month'))
plyr::l_ply(dates,lcdb.subfun.regtables,FactorLists,.progress = plyr::progress_text(style=3))
return('Done!')
}
# --------------------- ~~ Backtesting --------------
#' regression_result
#'
#' Regression to the TSFR data, calculate factor return, residuals, and R squrare, etc.
#' @name regression_result
#' @rdname regression_result
#' @aliases reg.TSFR
#' @param TS a \bold{TS} object.
#' @param dure see example in \code{\link{getTSR}}.
#' @param TSFR a \bold{TSFR} object.
#' @param regType the regress type,the default type is "glm".
#' @param glm_wgt glm's weight data, default value is sqrt of floating market value.
#' @param sectorAttr sector attribute.
#' @param secRtnOut whether output sector's return,default value is \code{FALSE}.
#' @return return a list, contains dataframes of TSFR,frtn, residual and Rsquare.
#' @export
#' @author Ruifei.yin
#' @examples
#' RebDates <- getRebDates(as.Date('2014-01-31'),as.Date('2016-09-30'))
#' TS <- getTS(RebDates,indexID = 'EI000985')
#' FactorLists <- buildFactorLists(
#' buildFactorList(factorFun="gf.SIZE"),
#' buildFactorList(factorFun="gf.GROWTH"),
#' buildFactorList(factorFun="gf.TRADING"),
#' buildFactorList(factorFun="gf.EARNINGYIELD"),
#' buildFactorList(factorFun="gf.VALUE"),
#' buildFactorList(factorFun="gf.OTHER"))
#' reg_results <- reg.TS(TS)
#' reg_results <- reg.TS(TS,FactorLists)
#' ----------------------------------------------------------
#' TSF <- getMultiFactor(TS,FactorLists)
#' TSFR <- getTSR(TSF)
#' reg_results <- reg.TSFR(TSFR)
reg.TSFR <- function(TSFR,regType=c('glm','lm'),glm_wgt=c("sqrtFV","res"),
sectorAttr=defaultSectorAttr(),secRtnOut=FALSE){
regType <- match.arg(regType)
glm_wgt <- match.arg(glm_wgt)
TSFRraw <- TSFR
factorNames <- guess_factorNames(TSFR,no_factorname = c('glm_wgt','sector'),is_factorname = 'factorscore',silence=TRUE)
if(!is.null(sectorAttr)){
TSFR <- getSectorID(TS = TSFR,sectorAttr = sectorAttr,fillNA = TRUE)
}
if(regType=='glm'){ #get glm_wgt data
if(!('glm_wgt' %in% colnames(TSFR))){
if(glm_wgt=="sqrtFV"){
TSw <- gf_cap(TSFR[,c('date','stockID')],log = FALSE,var="float_cap",na_fill=TRUE)
TSw <- transform(TSw,factorscore=sqrt(factorscore))
TSw <- dplyr::rename(TSw,glm_wgt=factorscore)
TSFR <- merge.x(TSFR,TSw,by =c("date","stockID"))
}else if(glm_wgt=="res"){
}
}
}
if(is.null(sectorAttr)){
re <- lm_NPeriod(TSFR,y='periodrtn',x=factorNames,lmtype = regType)
}else{
re <- lm_NPeriod(TSFR,y='periodrtn',x=factorNames,lmtype = regType,secIN =TRUE)
}
fRtn <- re$coef %>% dplyr::select(date,term,estimate,statistic) %>%
dplyr::rename(fname=term,frtn=estimate,Tstat=statistic) %>%
dplyr::filter(fname!='(Intercept)') %>%
dplyr::mutate(fname=ifelse(substr(fname,1,8)=='sectorES',stringr::str_replace(fname,'sectorES','ES'),fname))
if(secRtnOut==FALSE){
fRtn <- dplyr::filter(fRtn,substr(fname,1,2)!='ES')
}
res <- re$resd %>% dplyr::select(date,stockID,res) %>% dplyr::filter(!is.na(res))
RSquare <- re$rsq %>% dplyr::rename(rsquare=rsq)
# # pure-factor-port wgt
# tmp.x <- as.matrix(tmp.tsfr[,c(factorNames)])
# tmp.w <- as.matrix(tmp.tsfr[,"glm_wgt"])
# tmp.w <- diag(c(tmp.w),length(tmp.w))
# tmp.f <- solve(crossprod(tmp.x,tmp.w) %*% tmp.x) %*% crossprod(tmp.x,tmp.w)
# pfpwgt <- rbind(pfpwgt,data.frame(date=dates$date[i],stockID=tmp.tsfr$stockID,t(tmp.f)))
result <- list(TSFR=TSFRraw,fRtn=fRtn,res=res,RSquare=RSquare)
return(result)
}
#' @rdname regression_result
#' @aliases reg.TS
#' @export
reg.TS <- function(TS,FactorLists,dure=months(1),regType=c('glm','lm'),glm_wgt=c("sqrtFV","res"),
sectorAttr=defaultSectorAttr(),secRtnOut=FALSE){
regType <- match.arg(regType)
glm_wgt <- match.arg(glm_wgt)
if(missing(FactorLists)){
TSR <- getTSR(TS,dure)
reg <- reg.TSFR(TSR, regType, glm_wgt, sectorAttr, secRtnOut)
re <- list(TSFR=TSR,fRtn=reg$fRtn,res=reg$res,RSquare=reg$RSquare)
}else{
TSF <- getMultiFactor(TS,FactorLists)
TSFR <- getTSR(TSF,dure)
reg <- reg.TSFR(TSFR, regType, glm_wgt, sectorAttr, secRtnOut)
re <- list(TSFR=TSFR,fRtn=reg$fRtn,res=reg$res,RSquare=reg$RSquare)
}
return(re)
}
#' factor_select
#'
#' \bold{reg.factor_select} select alpha or risk factors using regression method.
#' \bold{factor_VIF} caculate factor's VIF.
#' @name factor_select
#' @rdname factor_select
#' @param TSFR a \bold{TSFR} object.
#' @param forder self defined factor importance order,can be missing,can be set of character or number,length of \code{forder} can be shorter than factors.
#' @export
#' @examples
#' RebDates <- getRebDates(as.Date('2014-01-31'),as.Date('2016-09-30'))
#' TS <- getTS(RebDates,indexID = 'EI000905')
#' factorIDs <- c("F000006","F000008","F000012","F000015",
#' "F000016")
#' tmp <- buildFactorLists_lcfs(factorIDs,factorRefine=refinePar_default("scale"))
#' factorLists <- buildFactorLists(
#' buildFactorList(factorFun="gf.NP_YOY",
#' factorPar=list(),
#' factorDir=1),
#' buildFactorList(factorFun="gf.ln_mkt_cap",
#' factorPar=list(),
#' factorDir=-1),
#' buildFactorList(factorFun="gf.G_MLL_Q",
#' factorPar=list(),
#' factorDir=1),
#' factorRefine=refinePar_default("scale"))
#' factorLists <- c(tmp,factorLists)
#' TSF <- getMultiFactor(TS,FactorLists = factorLists)
#' ----------------------VIF----------------------
#' VIF <- factor_VIF(TSF)
#'
#' TSFR <- getTSR(TSF)
#' re <- reg.factor_select(TSFR)
#' re <- reg.factor_select(TSFR,sectorAttr=NULL)
#' nstock <- length(factorLists)
#' re <- reg.factor_select(TSFR,forder=sample(1:nstock,nstock))
reg.factor_select <- function(TSFR,sectorAttr=defaultSectorAttr(),forder){
cols <- colnames(TSFR)
fname <- guess_factorNames(TSFR)
#sector only
result <- data.frame()
if(!is.null(sectorAttr)){
TSFR <- getSectorID(TSFR,sectorAttr = sectorAttr,fillNA = TRUE)
secNames <- unique(TSFR$sector)
secrs <- reg.TSFR(TSFR[,c("date","date_end","stockID",secNames,'sector',"periodrtn")],sectorAttr = 'existing')[[4]]
result <- data.frame(fname='sector',rsquare=mean(secrs$RSquare,na.rm = TRUE),
frtn=NA,fttest=NA,pttest=NA,tag='risk')
TSF <- TSFR[,c('date','stockID',fname,secNames,'sector')]
} else {
TSF <- TSFR[,c('date','stockID',fname)]
}
if(!missing(forder)){
if(typeof(forder)=='character'){
if(length(forder)==length(fname)){
fname <- forder
}else{
fname <- c(forder,setdiff(fname,forder))
}
}else{
if(length(forder)==length(fname)){
fname <- fname[forder]
}else{
fname <- c(fname[forder],fname[setdiff(seq(1:length(fname)),forder)])
}
}
}
selectf <- NULL
while(length(setdiff(fname,selectf))>0){
rsquare <- data.frame()
frtn <- data.frame()
res <- data.frame()
if(missing(forder)){
fnameset <- setdiff(fname,selectf)
}else{
if(length(forder)==length(fname)){
fnameset <- setdiff(fname,selectf)[1]
}else{
if(length(selectf)<length(forder)){
fnameset <- setdiff(fname,selectf)[1]
}else{
fnameset <- setdiff(fname,selectf)
}
}
}
for(i in fnameset){
if(is.null(sectorAttr)){
tmp.TSF <- TSF[,c("date","stockID",union(selectf,i))]
if(ncol(tmp.TSF)>3){
tmp.TSF <- factor_orthogon_single(tmp.TSF,y=i,sectorAttr = NULL)
}
tmp.TSFR <- dplyr::left_join(tmp.TSF,TSFR[,c("date","date_end","stockID","periodrtn")],
by=c("date","stockID"))
frs <- reg.TSFR(tmp.TSFR,sectorAttr = NULL)
}else{
tmp.TSF <- TSF[,c("date","stockID",union(selectf,i),secNames,'sector')]
tmp.TSF <- factor_orthogon_single(tmp.TSF,y=i,sectorAttr = 'existing')
tmp.TSFR <- dplyr::left_join(tmp.TSF,TSFR[,c("date","date_end","stockID","periodrtn")],
by=c("date","stockID"))
frs <- reg.TSFR(tmp.TSFR,sectorAttr = 'existing')
}
tmp.res <- data.frame(tmp.TSF[,c('date','stockID')],fname=i,res=tmp.TSF[,i])
tmp <- data.frame(frs$RSquare,fname=i)
rsquare <- rbind(rsquare,tmp)
res <- rbind(res,tmp.res)
frtn <- rbind(frtn,data.frame(frs$fRtn))
}
rsquare <- rsquare %>% dplyr::group_by(fname) %>%
dplyr::summarise(rsquare = mean(rsquare,trim = 0.025,na.rm = TRUE)) %>%
dplyr::arrange(desc(rsquare)) %>% dplyr::slice(1)
tmp.selectf <- as.character(rsquare$fname)
tmp.frtn <- frtn[frtn$fname==tmp.selectf,'frtn']
testres <- t.test(tmp.frtn)
rsquare <- transform(rsquare,frtn=mean(tmp.frtn,trim = 0.025,na.rm = TRUE),
fttest=testres$statistic,
pttest=testres$p.value,
tag=ifelse(testres$statistic>2,'alpha','risk'))
result <- rbind(result,rsquare)
selectf <- c(selectf,tmp.selectf)
res <- res[res$fname==tmp.selectf,c('date','stockID','res')]
TSFR[,tmp.selectf] <- dplyr::left_join(TSFR[,c("date","stockID")],
res,by=c("date","stockID"))[,3]
}
rownames(result) <- NULL
result <- transform(result,fname=as.character(fname),
rsquare=round(rsquare,digits = 3),
frtn=round(frtn,digits = 4),
fttest=round(fttest,digits = 2),
pttest=round(pttest,digits = 3),
tag=as.character(tag),
rsqPct=round((rsquare/dplyr::lag(rsquare)-1)*100,digits = 1))
TSFR <- TSFR[,cols]
return(list(result=result,TSFR=TSFR))
}
#' @rdname factor_select
#' @param TSF is a \bold{TSF} object.
#' @param testf is test factor name, can be missing.
#' @param sectorAttr a sector-attribute list or NULL or 'existing'. If a list, regress with the sectors specified by sectorAttr;if "existing", use the existing sector data in TSF(Make sure they are already exist!); if null, not regress with sectors.
#' @return data frame of VIF and residual.
#' @export
factor_VIF <- function(TSF,sectorAttr=defaultSectorAttr()){
fname <- guess_factorNames(TSF,is_factorname = "factorscore",silence=TRUE)
if(!is.null(sectorAttr)){
TSF <- getSectorID(TSF,sectorAttr = sectorAttr,fillNA = TRUE)
}
result <- data.frame()
for(j in 1:length(fname)){
fname_j <- fname[j]
if(is.null(sectorAttr)){
re <- lm_NPeriod(TSF,fname_j,x=setdiff(fname,fname_j))
}else{
re <- lm_NPeriod(TSF,fname_j,x=setdiff(fname,fname_j),secIN = TRUE)
}
VIF <- re$rsq
VIF <- transform(VIF,vif=1/(1-rsq),
fname=fname_j)
result <- rbind(result,VIF)
}
result <- result[,c("fname","date","vif")]
return(result)
}
# --------------------- ~~ Backtesting results --------------
#' regression_result_summary
#'
#' summary of regression result, such as chart of rsquare and factor return,etc.
#' @param reg_results is regression_result
#' @param factet whether to plot wealth index of factor's return in one graph, the default value is FALSE.
#' @name regression_result_summary
#' @seealso \link{reg.TSFR}
NULL
#' @rdname regression_result_summary
#'
#' @export
table.reg.rsquare <- function(reg_results){
# Rsquare
RSquare <- reg_results$RSquare
re <- round(summary(RSquare$rsquare),3)
re <- data.frame(cbind(begT=min(RSquare$date),
endT=max(RSquare$date),
NPeriod=nrow(RSquare),
t(re)))
re <- transform(re,begT=as.Date(begT,origin='1970-01-01'),
endT=as.Date(endT,origin='1970-01-01'))
colnames(re) <- c("begT","endT","NPeriod","Min","Qu.1st","Median","Mean","Qu.3rd","Max")
return(re)
}
#' @rdname regression_result_summary
#'
#' @export
table.reg.fRtn <- function(reg_results,includeVIF=FALSE){
# annrtn,annvol,sharpe,hitRatio,avg_T_sig
fRtn <- reg_results$fRtn
tstat <- fRtn %>% dplyr::group_by(fname) %>% dplyr::summarise(avgT=mean(abs(Tstat)),
TPer=sum(Tstat>2)/length(Tstat))
colnames(tstat) <- c("fname","mean(abs(T))","percent T>2")
tstat$fname <- as.character(tstat$fname)
fRtn <- reshape2::dcast(fRtn,date~fname,value.var = 'frtn')
fRtn <- xts::xts(fRtn[,-1,drop=FALSE],fRtn[,1])
rtnsum <- t(rtn.summary(fRtn))
rtnsum <- data.frame(fname=rownames(rtnsum),rtnsum,stringsAsFactors = FALSE)
rownames(rtnsum) <- NULL
colnames(rtnsum) <- c("fname","ann_rtn","ann_sd","ann_Sharpe","hit_ratio","max_drawdown")
re <- dplyr::left_join(rtnsum,tstat,by='fname')
if(includeVIF){
TSF <- reg_results$TSFR %>% dplyr::select(-date_end,-periodrtn)
VIF <- factor_VIF(TSF,sectorAttr = NULL)
VIF <- VIF %>% dplyr::group_by(fname) %>% dplyr::summarise(vif=mean(vif)) %>% dplyr::ungroup()
re <- dplyr::left_join(re,VIF,by='fname')
}
# re <- dplyr::arrange(re,dplyr::desc(ann_Sharpe))
return(re)
}
#' @rdname regression_result_summary
#'
#' @export
chart.reg.fRtnWealthIndex <- function(reg_results,facet=FALSE){
# charts for each factor
fRtn <- reg_results$fRtn
fRtn <- reshape2::dcast(fRtn,date~fname,value.var = 'frtn')
fRtn <- xts::xts(fRtn[,-1],fRtn[,1])
if(facet==FALSE){
ggplot.WealthIndex(fRtn,size=1)
}else{
N <- floor(sqrt(ncol(fRtn)))
fRtn <- WealthIndex(fRtn)
fRtn <- melt.ts(fRtn)
ggplot(fRtn, aes(x=time, y=value)) +ggtitle('wealth index')+
geom_line(size=1,colour = "red")+facet_wrap( ~ variable,scales = 'free',ncol = N)
}
}
#' @rdname regression_result_summary
#'
#' @export
chart.reg.fRtnBar <- function(reg_results){
# charts for each factor
fRtn <- reg_results$fRtn
N <- floor(sqrt(length(unique(fRtn$fname))))
ggplot(fRtn, aes(x=date, y=frtn)) +ggtitle('factor return')+
geom_bar(position="dodge",stat="identity")+facet_wrap( ~ fname,scales = 'free',ncol = N)
}
#' @rdname regression_result_summary
#'
#' @export
chart.reg.rsquare <- function(reg_results){
RSquare <- reg_results$RSquare
Nperiod <- nrow(RSquare)
if(Nperiod>12){
RSquare <- xts::xts(RSquare[,-1],RSquare[,1])
colnames(RSquare) <- c('rsquare')
tmp <- zoo::rollmean(RSquare,12,align='right')
tmp <- data.frame(date=zoo::index(tmp),RSquareMA=zoo::coredata(tmp))
RSquare <- data.frame(time=time(RSquare),zoo::coredata(RSquare))
ggplot(RSquare, aes(x=time, y=rsquare))+geom_line(color="#D55E00") +
ggtitle('RSquare(with MA series)') +geom_line(data=tmp,aes(x=date,y=rsquare),size=1,color="#56B4E9")
}else{
ggplot(RSquare, aes(x=date, y=rsquare))+geom_line(color="#D55E00") + ggtitle('RSquare')
}
}
#' @rdname regression_result_summary
#'
#' @export
MC.chart.reg.corr <- function(reg_results){
fRtn <- reg_results$fRtn
fRtn <- reshape2::dcast(fRtn,date~fname,value.var = 'frtn')
fRtn.cor <- cor(as.matrix(fRtn[,-1]))
ggplot.corr(fRtn.cor)
}
#' factor return,covariance and delta
#'
#' calculate factor return, factor covariance and residual variance.
#' @name f_rtn_cov_delta
#' @rdname f_rtn_cov_delta
#' @param RebDates is date set, can be missing.
#' @param dure a period object from package \code{lubridate}. (ie. \code{months(1),weeks(2)}. See example in \code{\link{trday.offset}}.) If null, then get periodrtn between \code{date} and the next \code{date}, else get periodrtn of '\code{dure}' starting from \code{date}.
#' @param rolling default value is \code{FALSE}, if value is \code{TRUE} means the data period is \code{nwin} forward.
#' @param rtntype is method to caculate factor return,\bold{mean} means average of historical data,\bold{forcast} means forcast factor return based on historical data,it may take a while,the forcast method come from package \code{\link[forecast]{ets}}.
#' @param covtype means type of caculating covariance,\bold{shrink} can see example in \code{\link[nlshrink]{nlshrink_cov}},simple see \code{\link{cov}}.
#' @param nwin is rolling windows forward.
#' @param reg_results see examples in \code{\link{reg.TSFR}}
#' @return a data frame of factors' return .
#' @examples
#' fRtn <- getfRtn(reg_results=reg_results)
#' fCov <- getfCov(reg_results=reg_results)
#' Delta <- getDelta(dure=months(1),rolling=FALSE,nwin=24,reg_results)
#' rtn_cov_delta <- f_rtn_cov_delta(reg_results=reg_results)
#' @export
f_rtn_cov_delta <- function(dure=months(1),rolling=FALSE,rtntype=c('mean','forcast'),
covtype=c('shrink','simple'),nwin=24,reg_results) {
rtntype <- match.arg(rtntype)
covtype <- match.arg(covtype)
fRtn <- getfRtn(dure=dure,rolling=rolling,rtntype=rtntype,
nwin=nwin,reg_results)
fCov <- getfCov(dure=dure,rolling=rolling,covtype=covtype,
nwin=nwin,reg_results)
Delta <- getDelta(dure=dure,rolling=rolling,nwin=nwin,reg_results)
re <- list(fRtn=fRtn,fCov=fCov,Delta=Delta)
return(re)
}
# inner function
get_frtn_res <- function(begT,endT,dure,reg_results,outtype=c('frtn','res')){
outtype <- match.arg(outtype)
if(missing(begT)) begT <- as.Date('1990-01-01')
if(missing(endT)) endT <- as.Date('2100-01-01')
if(missing(reg_results)){
if(dure==lubridate::days(1)){
dbname <- 'd1'
}else if(dure==lubridate::weeks(1)){
dbname <- 'w1'
}else if(dure==lubridate::weeks(2)){
dbname <- 'w2'
}else if(dure==months(1)){
dbname <- 'm1'
}
dbname <- paste(outtype,dbname,sep = '_')
if(outtype=='frtn'){
qr <- paste("SELECT date,fname,",dbname," 'frtn'
FROM Reg_FactorRtn where date>=",rdate2int(begT),
" and date<=",rdate2int(endT))
}else if(outtype=='res'){
qr <- paste("SELECT date,stockID,",dbname," 'res'
FROM Reg_Residual where date>=",rdate2int(begT),
" and date<=",rdate2int(endT))
}
con <- db.local()
re <- dbGetQuery(con,qr)
dbDisconnect(con)
re <- re %>% dplyr::mutate(date=intdate2r(date),date_end=trday.offset(date,dure))
}else{
if(outtype=='frtn'){
re <- reg_results$fRtn %>% dplyr::select(-Tstat) %>% dplyr::filter(date>=begT,date<=endT)
}else{
re <- reg_results$res %>% dplyr::filter(date>=begT,date<=endT)
}
dates <- reg_results$TSFR %>% dplyr::select(date,date_end) %>% dplyr::distinct()
re <- re %>% dplyr::left_join(dates,by='date')
}
return(re)
}
#' @rdname f_rtn_cov_delta
#'
#' @export
getfRtn <- function(dure=months(1),rolling=FALSE,rtntype=c('mean','forcast'),nwin=24,reg_results){
rtntype <- match.arg(rtntype)
if(missing(reg_results)){
rtndata <- get_frtn_res(dure=dure)
}else{
rtndata <- get_frtn_res(reg_results=reg_results)
}
if(rtntype=='mean'){
if(rolling){
result <- rtndata %>% dplyr::arrange(fname,date_end) %>% dplyr::group_by(fname) %>%
dplyr::mutate(frtnroll=zoo::rollmean(frtn,nwin,na.pad = TRUE,align='right')) %>% dplyr::ungroup() %>%
dplyr::select(date_end,fname,frtnroll) %>% dplyr::rename(date=date_end,frtn=frtnroll) %>%
dplyr::arrange(date,fname) %>% filter(!is.na(frtn))
}else{
result <- rtndata %>% dplyr::group_by(fname) %>% dplyr::summarise(frtn=mean(frtn,na.rm = TRUE)) %>% dplyr::ungroup()
}
result <- as.data.frame(result)
}else if(rtntype=='forcast'){
rtndata <- reshape2::dcast(rtndata,date_end~fname,value.var = 'frtn')
if(rolling){
RebDates <- rtndata$date_end
}else{
RebDates <- max(rtndata$date_end)
}
result <- data.frame()
for(i in 1:length(RebDates)){
rtndata_ <- rtndata %>% dplyr::filter(date_end<=RebDates[i]) %>% dplyr::select(-date_end)
if(rolling && nrow(rtndata_)<nwin){
next
}
for(j in 1:ncol(rtndata_)){
myts <- ts(data= rtndata_[,j])
fit <- forecast::ets(rtndata_[,j])
fit.forcast <- forecast::forecast(fit, 1)
result_ <- data.frame(date=RebDates[i],fname=colnames(rtndata_)[j],
frtn=as.numeric(fit.forcast$mean),stringsAsFactors = FALSE)
result <- rbind(result,result_)
}
}
result <- dplyr::arrange(result,date,fname)
if(!rolling){
result <- transform(result,date=NULL)
}
}
return(result)
}
#' @rdname f_rtn_cov_delta
#'
#' @export
getfCov <- function(dure=months(1),rolling=FALSE,covtype=c('shrink','simple'),
nwin=24,reg_results){
covtype <- match.arg(covtype)
if(missing(reg_results)){
rtndata <- get_frtn_res(dure=dure)
}else{
rtndata <- get_frtn_res(reg_results=reg_results)
}
rtndata <- reshape2::dcast(rtndata,date_end~fname,value.var = 'frtn')
if(rolling){
RebDates <- rtndata$date_end
result <- data.frame()
for(i in 1:length(RebDates)){
rtnmat <- rtndata %>% dplyr::filter(date_end<=RebDates[i]) %>% dplyr::select(-date_end)
rtnmat <- tail(rtnmat,nwin)
rtnmat <- as.matrix(rtnmat)
if(nrow(rtnmat)<nwin){
next
}
if(covtype=='simple'){
result_ <- as.data.frame(cov(rtnmat))
}else{
result_ <- as.data.frame(nlshrink::nlshrink_cov(rtnmat))
colnames(result_) <- colnames(rtnmat)
}
result_ <- data.frame(date=RebDates[i],result_)
result <- rbind(result,result_)
}
}else{
rtnmat <- as.matrix(rtndata[,-1])
if(covtype=='simple'){
result <- as.data.frame(cov(rtnmat))
}else{
result <- as.data.frame(nlshrink::nlshrink_cov(rtnmat))
colnames(result) <- colnames(rtnmat)
rownames(result) <- colnames(rtnmat)
}
}
return(result)
}
#' @rdname f_rtn_cov_delta
#'
#' @export
getDelta <- function(dure=months(1),rolling=FALSE,nwin=24,reg_results){
if(missing(reg_results)){
resdata <- get_frtn_res(dure=dure,outtype = 'res')
}else{
resdata <- get_frtn_res(reg_results=reg_results,outtype = 'res')
}
if(rolling){
resdata <- reshape2::dcast(resdata,date_end~stockID,value.var = 'res')
RebDates <- resdata$date_end
result <- data.frame()
for(i in 1:length(RebDates)){
resdata_ <- resdata %>% dplyr::filter(date_end<=RebDates[i])
resdata_ <- tail(resdata_,nwin)
if(nrow(resdata_)<nwin){
next
}
resdata_ <- reshape2::melt(resdata_,id.vars='date_end',variable.name = "stockID", na.rm = TRUE,value.name = "res")
result_ <- resdata_ %>% dplyr::group_by(stockID) %>% dplyr::summarise(n =n(),var = var(res)) %>%
dplyr::ungroup() %>% dplyr::filter(n>=nwin/2) %>% dplyr::select(-n)
result_ <- data.frame(date=RebDates[i],result_)
result <- rbind(result,result_)
}
}else{
result <- resdata %>% dplyr::group_by(stockID) %>% dplyr::summarise(n =n(),var = var(res)) %>%
dplyr::ungroup() %>% dplyr::filter(n>=3) %>% dplyr::select(-n)
}
result <- as.data.frame(result)
return(result)
}
#' biasTest
#'
#' @export
#' @examples
#' biasTest(reg_results)
biasTest <- function(reg_results,portID='EI000300',nwin=12){
rtn_cov_delta <- f_rtn_cov_delta(rolling = TRUE,nwin = nwin,reg_results=reg_results)
fcov <- rtn_cov_delta$fCov
fnames <- setdiff(colnames(fcov),'date')
delta <- rtn_cov_delta$Delta
# calculate factor return
TSFR_total <- reg_results$TSFR
dates <- unique(TSFR_total$date_end)
port <- getIndexCompWgt(portID,dates)
TSWF <- dplyr::left_join(port,TSFR_total,by=c('date','stockID'))
biasdf <- data.frame()
for(i in 1:length(dates)){
TSWF_ <- TSWF %>% dplyr::filter(date==dates[i])
TSWF_ <- na.omit(TSWF_)
portrtn_ <- sum(TSWF_$wgt*TSWF_$periodrtn,na.rm = TRUE)
wgt_ <- as.matrix(TSWF_$wgt,ncol=1)
Xmat_ <- as.matrix(TSWF_[,fnames])
if('date' %in% colnames(fcov)){
Fmat_ <- as.matrix(fcov[fcov$date==dates[i],-1])
}else{
Fmat_ <- as.matrix(fcov)
}
if(nrow(Fmat_)==0) next
if('date' %in% colnames(delta)){
delta_ <- delta[delta$date==dates[i],-1]
}else{
delta_ <- delta
}
if(nrow(delta_)==0) next
deltamat_ <- dplyr::left_join(TSWF_[,'stockID',drop=FALSE],delta_,by='stockID')
deltamat_[is.na(deltamat_$var),'var'] <- median(deltamat_$var,na.rm = TRUE)
deltamat_ <- diag(deltamat_$var)
portvar_ <- sqrt(as.numeric(t(wgt_) %*% (Xmat_ %*% Fmat_ %*% t(Xmat_)+deltamat_) %*% wgt_))
biasdf <- rbind(biasdf,data.frame(date=dates[i],rtn=portrtn_,var=portvar_))
}
biasdf <- transform(biasdf,b=rtn/var)
biasdf <- xts::xts(biasdf[,'b'],order.by = biasdf[,'date'])
names(biasdf) <- 'b'
biasdf <- zoo::rollapply(biasdf,nwin,sd,align='right')
biasdf <- na.omit(biasdf)
ggplot.ts.line(biasdf)
}
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ==============
# --------------------- Performance & Risk Attribution -------------
# ===================== xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ==============
#' calculate factor exposure
#'
#' @export
exposure.TSWF <- function(TSWF) {
factorNames <- guess_factorNames(TSWF,silence = TRUE)
TSWF <- TSWF %>% dplyr::select(one_of(c("date","wgt",factorNames)))
factorexp <- TSWF %>% tidyr::gather(key='fname',value='fexp',-date,-wgt) %>% group_by(date,fname) %>%
dplyr::summarise(fexptot=sum(wgt*fexp,na.rm = TRUE)) %>% dplyr::ungroup()
factorexp <- factorexp %>% tidyr::spread(fname,fexptot) %>% dplyr::select(one_of(c("date",factorNames)))
factorexp <- as.data.frame(factorexp)
return(factorexp)
}
#' calculate port exposure
#'
#' @export exposure.port
exposure.port <- function(port,factorLists,bmk=NULL,univ=NULL,
sectorAttr = defaultSectorAttr()){
check.Port(port)
# get active wgt if bmk is provided.
if(!is.null(bmk)){
port <- getActivewgt(port = port,bmk = bmk,res = "all")
}
# univ is nessecary when any of factorStd is not 'none'.
if(!is.null(univ)){ # get factorscore in univ
dates <- unique(port$date)
TS <- getTS(dates,indexID = univ)
TSF <- getMultiFactor(TS,factorLists)
TSWF <- merge.x(port,TSF,by=c('date','stockID'))
} else { # get factorscore only in port
factorSTD <- sapply(factorLists, function(x){x$factorRefine$std$method})
if(any(factorSTD != "none")){
warning("univ is nessecary when any of factorStd is not 'none'!")
}
TSWF <- getMultiFactor(port,factorLists)
}
if(!is.null(sectorAttr)){
TSWF <- gf_sector(TSWF,sectorAttr = sectorAttr)
}
# arrange exposure
if(!is.null(bmk)){
# bmk
TSWF_bmk <- subset(TSWF, select = -c(portwgt,actwgt))
TSWF_bmk <- dplyr::rename(TSWF_bmk, wgt = benchwgt)
fexp_bmk <- exposure.TSWF(TSWF_bmk)
fexp_bmk <- reshape2::melt(fexp_bmk, id = "date")
fexp_bmk <- dplyr::rename(fexp_bmk, bmk_exposure = value)
# port
TSWF_port <- subset(TSWF, select = -c(benchwgt,actwgt))
TSWF_port <- dplyr::rename(TSWF_port, wgt = portwgt)
fexp_port <- exposure.TSWF(TSWF_port)
fexp_port <- reshape2::melt(fexp_port, id = "date")
fexp_port <- dplyr::rename(fexp_port, port_exposure = value)
# merge and compute act
fexp <- merge(fexp_bmk, fexp_port, by = c("date", "variable"))
fexp <- dplyr::rename(fexp, fName = variable)
fexp$act_exposure <- fexp$port_exposure - fexp$bmk_exposure
fexp <- dplyr::arrange(fexp, fName, date)
}else{
fexp <- exposure.TSWF(TSWF)
fexp <- dplyr::arrange(fexp,date)
fexp <- reshape2::melt(fexp, id.vars="date", variable.name="fName", value.name="exposure")
}
return(fexp)
}
# --------------------- ~~ Performance attribution --------------
#' PA_RA_Analysis
#'
#' performance attribution and risk attribution analysis.
#' @name PA_RA_Analysis
NULL
#' getPAData
#'
#' @rdname PA_RA_Analysis
#' @export
#' @examples
#' FactorLists <- buildFactorLists(
#' buildFactorList(factorFun="gf.SIZE"),
#' buildFactorList(factorFun="gf.GROWTH"),
#' buildFactorList(factorFun="gf.TRADING"),
#' buildFactorList(factorFun="gf.FORECAST"),
#' buildFactorList(factorFun="gf.EARNINGYIELD"),
#' buildFactorList(factorFun="gf.VALUE"),
#' buildFactorList(factorFun="gf.QUALITY"))
#' PA_tables <- getPAData(port,FactorLists)
#' PA_tables <- getPAData(port,FactorLists,bmk='EI000905')
getPAData <- function(port,FactorLists,bmk=NULL,univ="EI000985",sectorAttr = defaultSectorAttr()){
# get active wgt, if necessary
if(!is.null(bmk)){
port <- getActivewgt(port = port,bmk = bmk,res = "active")
port <- dplyr::rename(port,wgt=actwgt)
}
# calculate factor return
TS <- getTS(unique(port$date),indexID = univ) # get TSFR within rebDates==dates & univ==univ
TSF <- getMultiFactor(TS,FactorLists)
fnames <- guess_factorNames(TSF,silence = TRUE)
TSFR <- getTSR(TSF)
regdata <- (reg.TSFR(TSFR,sectorAttr = sectorAttr,secRtnOut = TRUE))[['fRtn']]
frtn <- reshape2::dcast(regdata,date~fname,value.var = 'frtn')
#calculate factor covariance
fcov <- nlshrink::nlshrink_cov(as.matrix(frtn[,fnames]))
colnames(fcov) <- fnames
rownames(fcov) <- fnames
# calculate factor exposure
TSWF <- merge.x(port,TSFR,by=c('date','stockID'))
TSWF <- na.omit(TSWF)
if(!is.null(sectorAttr)){
TSWF <- gf_sector(TSWF,sectorAttr = sectorAttr)
}
fexp <- exposure.TSWF(TSWF)
fexp <- dplyr::arrange(fexp,date)
# calculate performance attribution
if(!missing(bmk)){
rtn.short <- unique(TSWF[,c('date','date_end')])
rtn.short <- getPeriodrtn_EI(stockID=bmk,begT=rtn.short$date, endT=rtn.short$date_end)
rtn.short <- dplyr::rename(rtn.short,date=begT,date_end=endT,bmkrtn=periodrtn)
TSWF <- merge.x(TSWF,rtn.short[,c( "date","date_end","bmkrtn")])
TSWF <- transform(TSWF,periodrtn=periodrtn-bmkrtn)
}
portrtn <- TSWF %>% dplyr::group_by(date) %>% dplyr::summarise(rtn=sum(wgt*periodrtn, na.rm = TRUE)) %>%
dplyr::ungroup() %>% dplyr::arrange(date)
portrtn <- as.data.frame(portrtn)
frtn <- dplyr::select(frtn,one_of(colnames(fexp))) # make the order of cols same with fexp
fattr_m <- as.matrix(frtn[, -1])*as.matrix(fexp[, -1])
res_m <- data.frame(res=portrtn[,-1]-rowSums(fattr_m))
perfattr <- data.frame(date=portrtn$date,fattr_m,res_m)
# calculate risk attribution
riskattr <- data.frame()
dates <- unique(TSWF$date)
for(i in 1:length(dates)){
TSWF_ <- TSWF %>% dplyr::filter(date==dates[i])
wgtmat <- matrix(TSWF_$wgt,ncol = 1)
Xmat <- as.matrix(TSWF_[,fnames])
for(j in fnames){
Xmatk <- Xmat
Xmatk[,setdiff(fnames,j)] <- 0
# Xmatk[,j] <- 1
riskattr_ <- data.frame(date=dates[i],
fname=j,
frisk=t(wgtmat) %*% Xmat %*% fcov %*% t(t(wgtmat) %*% Xmatk),stringsAsFactors = FALSE)
riskattr <- rbind(riskattr,riskattr_)
}
}
riskattr <- reshape2::dcast(riskattr,date~fname,value.var = 'frisk')
riskattr <- riskattr[,c('date',fnames)]
return(list(frtn=frtn,fexp=fexp,perfattr=perfattr,portrtn=portrtn,riskattr=riskattr))
}
#' chart.PA.exposure
#'
#' @rdname PA_RA_Analysis
#' @export
#' @examples
#' chart.PA.exposure(PA_tables)
#' chart.PA.exposure(PA_tables,plotInd=TRUE)
chart.PA.exposure <- function(PA_tables,plotInd=FALSE){
factorexp <- PA_tables$fexp
#plot factor exposure
fnames <- guess_factorNames(factorexp,silence = TRUE)
indnames <- fnames[stringr::str_detect(fnames,'^ES\\d')]
fnames <- setdiff(fnames,indnames)
factormean <- colMeans(factorexp[,c(fnames,indnames)])
factormean <- data.frame(factorName=names(factormean),
factorExposure=unname(factormean),stringsAsFactors = FALSE)
factormean <- transform(factormean,
factorName=ifelse(factorName %in% indnames,sectorID2name(factorName),factorName),
tag=ifelse(factorName %in% fnames,'style','industry'))
if(!plotInd){
factormean <- dplyr::filter(factormean,tag=='style')
}
ggplot(factormean,aes(x=reorder(factorName,-factorExposure),y=factorExposure,fill=tag))+
geom_bar(stat = "identity")+labs(title='Factor Exposure',x='',y='')+
facet_wrap(~tag,scales = "free",ncol = 1)+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
#' chart.PA.attr
#'
#' @rdname PA_RA_Analysis
#' @export
#' @examples
#' chart.PA.attr(PA_tables)
#' chart.PA.attr(PA_tables,plotInd=TRUE)
chart.PA.attr <- function(PA_tables,plotInd=FALSE,attributeAnn=TRUE){
perfattr <- PA_tables$perfattr
fnames <- guess_factorNames(perfattr,no_factorname = 'res',silence = TRUE)
indnames <- fnames[stringr::str_detect(fnames,'^ES\\d')]
fnames <- setdiff(fnames,indnames)
#plot summary factor performance attribution
if(!plotInd){
perfattr <- perfattr[,c('date',fnames,'res')]
}
perfts <- xts::xts(perfattr[,-1],order.by = perfattr[,1])
if(attributeAnn){
rtnsum <- rtn.summary(perfts)
rtnsum <- rtnsum['ann_rtn',]
}else{
rtnsum <- rtn.periods(perfts)
rtnsum <- rtnsum["Cumulative Return",]
}
rtnsum <- data.frame(factorName=names(rtnsum),factorAttribution=unname(rtnsum),stringsAsFactors = FALSE)
rtnsum <- transform(rtnsum,
factorName=ifelse(factorName %in% indnames,sectorID2name(factorName),factorName),
tag=ifelse(factorName %in% c(fnames,'res'),'style','industry'))
p1 <- ggplot(rtnsum,aes(x=reorder(factorName,-factorAttribution),y=factorAttribution,fill=tag))+
geom_bar(stat = "identity")+
facet_wrap(~tag,scales = "free",ncol = 1)+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
if(attributeAnn==TRUE){
p1+labs(title='Factor Attribution(Annulized)',x='',y='')
}else{
p1+labs(title='Factor Attribution',x='',y='')
}
}
# --------------------- ~~ Risk attribution --------------
#' chart.RA.attr
#'
#' @rdname PA_RA_Analysis
#' @export
#' @examples
#' chart.RA.attr(PA_tables)
chart.RA.attr <- function(PA_tables){
riskattr <- PA_tables$riskattr
fnames <- guess_factorNames(riskattr,silence = TRUE)
riskattr <- tidyr::gather(riskattr,'fname','frisk',fnames)
riskattr <- riskattr %>% group_by(fname) %>% summarise(risk=sum(frisk))
ggplot(riskattr,aes(x=reorder(fname,-risk),y=risk))+
geom_bar(stat = "identity")+
theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
|
55734801544eb1068063017cdfd43af6de69d7de | cc1e956097f25984011863d9b10b13e957e01160 | /cachematrix.R | 360e66799216d1d6a1995aae271065650e90ce0b | [] | no_license | stalebi/ProgrammingAssignment2 | 42f0bbf33131ff8ffee366d8fecbe9baf51f478a | 59826cf977b3bde77ec9567543ef61bae8e85648 | refs/heads/master | 2021-01-18T00:34:24.371540 | 2015-02-22T16:11:25 | 2015-02-22T16:11:25 | 31,167,376 | 0 | 0 | null | 2015-02-22T15:28:10 | 2015-02-22T15:28:10 | null | UTF-8 | R | false | false | 1,438 | r | cachematrix.R | # As the calculation of inversion of a matrix is costly, the following two
# functions are developed to cache the inverse of a matrix.
# makeCacheMatrix creates a list of functions to
# set the value of matrix x
# get the value of matrix x
# set the value of inverse of matrix x
# get the value of inverse of matrix x
makeCacheMatrix <- function(x = matrix()){
inver <- NULL
set <- function(y) {
x <<- y
inver <<- NULL
}
get <- function() x
setInver <- function(solve) inver <<- solve
getInver <- function() inver
list(set = set, get = get, setInver = setInver, getInver = getInver)
}
# The cacheSolve function returns the inverse of the matrix. If
# the inverse has already been computed, it returns its value.
# Otherwise, it computes the inverse.
# cacheSolve assumes that x is invertible
cacheSolve <- function(x, ...) {
inver <- x$getInver()
if(!is.null(inver)) {
message("getting cached data")
return(inver)
}
data <- x$get()
inver <- solve(data, ...)
x$setInver(inver)
inver
}
# Running the code
# A <- matrix(1:4,2,2)
# A
# [,1] [,2]
# [1,] 1 3
# [2,] 2 4
# AA <- makeCacheMatrix(A)
# AA$get()
# [,1] [,2]
# [1,] 1 3
# [2,] 2 4
# cacheSolve(AA)
# [,1] [,2]
# [1,] -2 1.5
# [2,] 1 -0.5
# cacheSolve(AA)
#getting cached data
# [,1] [,2]
# [1,] -2 1.5
# [2,] 1 -0.5
|
de506f6c2ba2c75d8baf63901bd2f6f1ac812ebc | 62ec89b6d425b18ff1ff5be2b9649785282d7865 | /inst/snippets/colors.R | 53304fd3cf97705da8645d26f1b8d51d34299581 | [] | no_license | klaassenj/Lock5withR | 8cf9bab45ba1d87db77713b8c9b6826b663363c3 | f2773d9f828b72882ed1c4d6b3b2e539a3b3b24a | refs/heads/master | 2023-08-02T15:20:09.975771 | 2018-03-14T22:48:09 | 2018-03-14T22:48:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13 | r | colors.R | ## colors()
|
59ac24aa9c6ec113feb2971538d994fb310d6519 | c891f0ea2909dbee394ed07b194c51a165050773 | /man/Subset-Predict.Rd | 7a406275ba282b759787e637d1f960d3c838feff | [] | no_license | cran/gapfill | ee127e4a2dc4e38e4c80da01feecb9257c8d8b81 | 3696e9f19b95c7995a08ca8e3c54c92ea631c250 | refs/heads/master | 2021-07-19T11:59:55.964257 | 2021-02-12T09:10:05 | 2021-02-12T09:10:05 | 58,034,772 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 6,634 | rd | Subset-Predict.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gapfill.R
\name{Subset-Predict}
\alias{Subset-Predict}
\alias{Subset}
\alias{Predict}
\alias{fnSubset}
\alias{fnPredict}
\title{Subset and Predict Functions}
\usage{
Subset(data, mp, i, initialSize = c(10L, 10L, 1L, 5L))
Predict(
a,
i,
nTargetImage = 5,
nImages = 4,
nQuant = 2,
predictionInterval = FALSE,
qrErrorToNA = TRUE
)
}
\arguments{
\item{data}{Numeric array with four dimensions. The input (satellite) data to be gap-filled.
Missing values should be encoded as \code{NA}.
The data should have the dimensions: x coordinate, y coordinate, seasonal index (e.g., day of the year), and year.
See the \code{ndvi} dataset for an example.}
\item{mp}{Integer vector of length 4 encoding the position of the missing value in \code{data} to predict.}
\item{i}{Integer vector of length 1. The number of tried subsets that lead to a \code{NA} return value from \code{Predict}.}
\item{initialSize}{Integer vector of length 4, that provides the size of the subset for \code{i = 0}.}
\item{a}{Return value of \code{Subset()}.}
\item{nTargetImage}{Integer vector of length 1. Minimum number of non-NA values in the image containing the missing value.
If the criterion is not met, \code{NA} is returned.}
\item{nImages}{Integer vector of length 1. Minimum number of non-empty images.
If the criterion is not met, \code{NA} is returned.}
\item{nQuant}{Integer vector of length 1. Parameter passed to \code{\link{EstimateQuantile}}.}
\item{predictionInterval}{Logical vector of length 1.
If \code{FALSE} (default), no prediction interval is returned.
If \code{TRUE}, the predicted value together with the lower and upper bounds
of an approximated 90\% prediction interval are returned.
In that case, the function returns 3 values, and hence,
the argument \code{nPredict} of \code{\link{gapfill}} has to be set to 3 in order to store all returned values.}
\item{qrErrorToNA}{Logical vector of length 1.
If \code{TRUE} (default), an error in the quentile regression fitting leads to a \code{NA} return value.
If \code{FALSE}, an error in the quentile regression fitting leads to an error and stops the prediction.}
}
\value{
\code{Subset} returns an array with 4 dimensions containing the missing value
at the position indicated by the attribute \code{mp}.
\code{Predict} returns a numeric vector containing the predicted value
(and if \code{predictionInterval} is \code{TRUE}, the lower and upper bounds of the prediction interval),
or \code{NA}, if no prediction was feasible.
}
\description{
The \code{Subset} and \code{Predict} function used in the default configuration of \code{\link{Gapfill}}.
To predict a missing value, the two function are called sequentially as described the help page of \code{\link{Gapfill}}.
}
\details{
The \code{Subset} function defines the search strategy to find a
relevant subset by calling the function \code{\link{ArrayAround}}.
The size of the initial subset is given by the argument \code{initialSize}.
Its default values is \code{c(5L, 5L, 1L, 5L)}, which corresponds to a spatial extend of 5 pixels
in each direction from the missing value and includes time points having the previous, the same or the next seasonal index and
are not further apart than 5 years.
With an increase of the argument \code{i}, the spatial extent of the subset increases.
The \code{Predict} function decides whether the subset \code{a} is suitable and
calculates the prediction (fill value) when a suitable subset is provided.
To formulate the conditions that are used to decide if a subset is suitable,
consider the subset \code{a} as a collection of images.
More precisely, if \code{dim(a)} \code{=} \code{c(d1, d2, d3, d4)},
it can be seen as a collection of \code{d3*d4} images with an extent of \code{d1} by \code{d2} pixels.
Using this terminology, we require the following conditions to be fulfilled
in order to predict the missing value:
\itemize{
\item \code{a} contains at least \code{nTargetImage} non-NA values in the image containing the missing value,
\item \code{a} contains at least \code{nImages} non-empty images.
}
The prediction itself is based on sorting procedures (see \code{\link{Score}} and
\code{\link{EstimateQuantile}}) and the quantile regression function \code{\link[quantreg]{rq}}.
If the argument \code{predictionInterval} is \code{TRUE} the \code{Predict} functions returns
the predicted value together with the lower and upper bounds of an approximated 90\% prediction interval.
The interval combines the uncertainties introduced by \code{\link{Score}}
and \code{\link{EstimateQuantile}}.
}
\note{
The current implementation of \code{Subset} does not take into account
that locations at the boundary of \code{data} can be neighboring to each other.
For example, if global data (entire sphere) are considered, the location
\code{data[1,1,,]} is a neighbor of \code{data[dim(data)[1], dim(data)[2],,]}.
Similar considerations apply when data are available for an entire year.
To take this into account, the \code{Subset} function can be redefined accordingly or
the data can be augmented.
}
\examples{
## Assume we choose c(5, 5, 1, 5) as initalSize of the subset
iS <- c(5, 5, 1, 5)
## case 1: initial subset leads to prediction -------
i <- 0
a <- Subset(data = ndvi, mp = c(1, 3, 1, 2), i = i, initialSize = iS)
p <- Predict(a = a, i = i)
p
stopifnot(identical(a, ArrayAround(data = ndvi, mp = c(1, 3, 1, 2),
size = c(5 + i, 5 + i, 1, 5))))
stopifnot(identical(p, Gapfill(data = ndvi, subset = 1807,
initialSize = iS, verbose = FALSE)$fill[1807]))
## case 2: two tries are necessary ------------------
i <- 0
a <- Subset(data = ndvi, mp = c(20, 1, 1, 2), i = i, initialSize = iS)
p <- Predict(a = a, i = i)
p
## Increase i and try again.
i <- i + 1
a <- Subset(data = ndvi, mp = c(20, 1, 1, 2), i = i, initialSize = iS)
p <- Predict(a = a, i = i)
p
stopifnot(identical(a, ArrayAround(data = ndvi, mp = c(20, 1, 1, 2),
size = c(5 + i, 5 + i, 1, 6))))
stopifnot(identical(p, Gapfill(data = ndvi, subset = 1784,
initialSize = iS, verbose = FALSE)$fill[1784]))
}
\references{
F. Gerber, R. de Jong, M. E. Schaepman, G. Schaepman-Strub, and R. Furrer (2018)
in IEEE Transactions on Geoscience and Remote Sensing, pp. 1-13, \doi{10.1109/TGRS.2017.2785240}.
}
\seealso{
\code{\link{Gapfill}}, \code{\link{Extend}},
\code{\link{EstimateQuantile}}, \code{\link{Score}}, \code{\link{ndvi}}.
}
\author{
Florian Gerber, \email{flora.fauna.gerber@gmail.com}.
}
|
058f2783db586a595f4360d8bb7a57d284c2b238 | 777dc7c074e8595f5bfc6cc88a74edb651d3a616 | /files/functions.R | c452219fbd7527ce5e86d86e9a053617b101c9d4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bumjoon/bumjoon.github.io | f4797567cf87b8805064c1925e95e92eb60dde50 | aefcba37a9698ac6f6dff7225b4bca300a530974 | refs/heads/master | 2023-07-06T11:19:00.671847 | 2023-06-27T13:51:51 | 2023-06-27T13:51:51 | 85,373,648 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 28,101 | r | functions.R | ###=============================================================================
### TRAC PHASE 2
### FUNCTIONS AND SOURCE FOR DESCRIPTIVE STATISTICS OF LIFELOG
### BUMJOON KANG, UFL
###=============================================================================
# Data read and output ----------------------------------------------------------------
# fix column names
fix.colnames.1 <- function(x) {
if (!is.data.frame(x)) {
stop("x is not a data frame")
}
command.setname <- paste("colnames(", x, ") <- gsub('_','.',tolower(colnames(", x, ")))", sep="")
cat (command.setname)
eval(parse(text=command.setname))
#colnames(x) <- gsub("_",".",tolower(colnames(x)))
#colnames(x)<-gsub("..", ".", colnames(x), fixed=T)
#colnames(x)<-gsub(" ", ".", colnames(x))
#colnames(x)<-gsub("/", ".", colnames(x))
#olnames(x)<-gsub("#", "num", colnames(x))
#return(x)
}
fix.colnames <- function(x) {
if (!is.data.frame(x)) {
stop("x is not a data frame")
}
v.names <- colnames(x)
v.names <- gsub("_",".",tolower(v.names))
v.names <-gsub("..", ".", v.names, fixed=T)
v.names <-gsub(" ", ".", v.names)
v.names <-gsub("/", ".", v.names)
v.names <-gsub("-", ".", v.names)
v.names <-gsub("#", "num", v.names)
return(v.names)
}
unfix.colnames <- function(x) {
if (!is.data.frame(x)) {
stop("x is not a data frame")
}
v.names <- colnames(x)
v.names <- gsub(".","_",tolower(v.names), fixed=T)
v.names <- gsub("..", "_", v.names, fixed=T)
v.names <- gsub(" ", "_", v.names)
v.names <- gsub("/", "_", v.names)
v.names <- gsub("#", "num", v.names)
return(v.names)
}
# Graphics ----------------------------------------------------------------
resetPar <- function() {
dev.new()
op <- par(no.readonly = TRUE)
dev.off()
op
}
# Easy tips--------------------------------------------------------------
"%w/o%" <- function(x, y) x[!x %in% y] #-- x without y
"%not in%" <- function (x, y) is.na(match(x, y, nomatch=NA_integer_))
#(1:10) %w/o% c(3,7,12)
###=============================================================================
### Make variable names
###=============================================================================
make.var.prefix.suffix <- function(prefix,suffix) {
x <- NULL
for(j in 1:length(suffix)) {
for(k in 1:length(prefix)) {
x <- c(x, paste(prefix[k],suffix[j],sep="_"))
}
}
x
}
make.var.suffix.prefix <- function(prefix,suffix) {
x <- NULL
for(k in 1:length(prefix)) {
for(j in 1:length(suffix)) {
x <- c(x, paste(prefix[k],suffix[j],sep="_"))
}
}
x
}
###-----------------------------------------------------------------------------
### variables for generating other variables
###-----------------------------------------------------------------------------
var.dist <- c("min","25qtl","median","mean","75qtl","max","sd")
var.tm <- c("trip","walk","bike","car","transit")
var.rec <- c("acc","gps","place","trip")
###-----------------------------------------------------------------------------
### variables for data collection
###-----------------------------------------------------------------------------
var.valid <- make.var.prefix.suffix(c("vd"),"n")
var.raw <- make.var.prefix.suffix(var.rec, "raw_n")
var.num <- make.var.prefix.suffix(c(var.rec,"bout_500x5x7"), "n")
var.data.collection <- c(var.valid, var.raw, var.num)
###-----------------------------------------------------------------------------
### variables for person-day level variables
###-----------------------------------------------------------------------------
### accelerometer data
var.acc <- c("acc_d_hour",
"nonwear_0x20_d_hour",
make.var.prefix.suffix(c("mvpa_500","mpa_2020_5999","vpa_5999p"),
"d_minute"),
"count_d_sum")
### count distribution
var.count.dist <- make.var.prefix.suffix("count", var.dist)
### gps data
var.gps <- c("gps_d_minute")
### speed distribution
var.speed.dist <- make.var.prefix.suffix("speed", var.dist)
### place data
var.place <- c("place_d_n",
make.var.prefix.suffix(c("place","home","work","fitness"),
"d_minute"))
### trip data
var.trip <- make.var.prefix.suffix(var.tm, c("d_n","d_minute"))
### accelerometer + GPS data
var.acc.gps <- "acc_to_gps_d_ratio"
### accelerometer + place data
var.acc.place <- make.var.prefix.suffix(c("place","home","work","fitness"),
"count_d_sum")
### accelerometer + trip data
var.acc.trip <- make.var.prefix.suffix(var.tm, "count_d_sum")
### GPS + place data
var.gps.place <- "gps_to_place_d_ratio"
### GPS + trip data
var.gps.trip <- "gps_to_trip_d_ratio"
### bout
var.bout <- make.var.suffix.prefix(c("bout","bout_500x5x7","bout_500x4x5"),
c("d_n","d_minute","count_d_sum"))
### all of above
var.person.day <- c(var.acc, var.count.dist, var.gps, var.speed.dist, var.place, var.trip,
var.acc.gps, var.acc.place, var.acc.trip, var.gps.place, var.gps.trip, var.bout)
###-----------------------------------------------------------------------------
### variables for person-average-day level
###-----------------------------------------------------------------------------
var.person.avg.day <- var.person.day
var.cs.dist <- c(var.count.dist, var.speed.dist)
################################################################################
### ###
### Data collection level ###
### ###
################################################################################
###=============================================================================
### Get stats at subject level
###=============================================================================
get.stat.data.collection <- function() {
##----------------------------------------------------------------------
## result to be stored here
##----------------------------------------------------------------------
stat.data.collection.i <- data.frame(matrix(data=NA, nrow=1, ncol=length(var.data.collection)+1))
colnames(stat.data.collection.i) <- c("id", var.data.collection)
## reset all values as NA
for(j in 1:length(var.data.collection)) {
eval(parse(text=sprintf("%s <- NA", var.data.collection[j])))
}
##----------------------------------------------------------------------
## calc new stats
##----------------------------------------------------------------------
###vdl_n <- length(unique(subset(dat.raw, vdl==1)$jday_trac) %w/o% NA)
vd_n <- length(unique(dat$jdaya_trac) %w/o% NA) # generic valid days
## raw data
acc_raw_n <- nrow(subset(dat.raw, !is.na(recnum_acc)))
gps_raw_n <- nrow(subset(dat.raw, !is.na(recnum_gps)))
place_raw_n <- length(unique(dat.raw$recnum_place) %w/o% NA)
trip_raw_n <- length(unique(dat.raw$recnum_trip) %w/o% NA)
## number of records in total
acc_n <- nrow(subset(dat, !is.na(recnum_acc)))
gps_n <- nrow(subset(dat, !is.na(recnum_gps))) # number of GPS records
place_n <- length(unique(dat$recnum_place) %w/o% NA)
trip_n <- length(unique(dat$recnum_trip) %w/o% NA)
bout_500x5x7_n <- length(unique(dat$bout_num_500x5x7) %w/o% NA)
##----------------------------------------------------------------------
## store results
##----------------------------------------------------------------------
for (j in 1:length(var.data.collection)) {
stat.data.collection.i[1, var.data.collection[j]] <- eval(parse(text=sprintf("%s", var.data.collection[j])))
}
stat.data.collection.i[1,"id"] <- id.
## output result
stat.data.collection.i
}
################################################################################
### ###
### Person-day level ###
### ###
################################################################################
###=============================================================================
### Get stats at splitted day level
###=============================================================================
get.stat.person.day <- function() {
##----------------------------------------------------------------------
## reset all values as NA
##----------------------------------------------------------------------
for(k in 1:length(var.person.day)) {
eval(parse(text=sprintf("%s <- NA", var.person.day[k])))
}
##----------------------------------------------------------------------
## calc new stats for j-th day
##----------------------------------------------------------------------
## accelerometer data
acc_d_hour <- nrow(subset(dasj, !is.na(recnum_acc))) / (2*60)
nonwear_0x20_d_hour <- (sum(dasj$nonwear_0x20, na.rm=TRUE) + sum(is.na(dasj$nonwear_0x20)))/(2*60)
nonwear_0x60_2_d_hour <- (sum(dasj$nonwear_0x60_2, na.rm=TRUE) + sum(is.na(dasj$nonwear_0x60_2)))/(2*60)
mvpa_500_d_minute <- sum(dasj$mvpa_500==1, na.rm=TRUE) / 2
mpa_2020_5999_d_minute <- sum(dasj$counts >= 2020/2 & dasj$counts < 5999/2, na.rm=TRUE) / 2
vpa_5999p_d_minute <- sum(dasj$counts >= 5999/2, na.rm=TRUE) / 2
count_d_sum <- sum(subset(dasj, !is.na(recnum_acc) & vd==1)$counts, na.rm=T)
## count distribution
x <- subset(dasj, !is.na(recnum_acc))$counts
if(length(x)>0) { # with only available ACC data
count_min <- min(x)
count_25qtl <- quantile(x, probs=0.25)
count_median <- median(x)
count_mean <- mean(x)
count_75qtl <- quantile(x, probs=0.75)
count_max <- max(x)
count_sd <- sd(x)
}
## gps data
gps_d_minute <- nrow(subset(dasj, !is.na(recnum_gps))) / 2
## speed distribution
x <- subset(dasj, !is.na(recnum_gps))$speed_kmh
if(length(x)>0) { # with only available GPS data
speed_min <- min(x)
speed_25qtl <- quantile(x, probs=0.25)
speed_median <- median(x)
speed_mean <- mean(x)
speed_75qtl <- quantile(x, probs=0.75)
speed_max <- max(x)
speed_sd <- sd(x)
}
## place data
place_d_n <- length(unique(dasj$seq_id) %w/o% NA)
place_d_minute <- sum(!is.na(dasj$seq_id)) / 2
home_d_minute <- length(grep("HOME|HOUSE", toupper(dasj$placename))) / 2
work_d_minute <- sum(dasj$activity==10,na.rm=T) / 2
fitness_d_minute <- sum(dasj$activity==5,na.rm=T) / 2
## trip data
## trip frequency
trip_d_n <- length(unique(dasj$tripnum) %w/o% NA)
walk_d_n <- length(unique(subset(dasj, travel_mode==13)$tripnum) %w/o% NA)
bike_d_n <- length(unique(subset(dasj, travel_mode==12)$tripnum) %w/o% NA)
car_d_n <- length(unique(subset(dasj, travel_mode==1 | travel_mode==2)$tripnum) %w/o% NA)
transit_d_n <- length(unique(subset(dasj, travel_mode==3 |
travel_mode==4 |
travel_mode==5 |
travel_mode==6)$tripnum) %w/o% NA)
## trip duration
trip_d_minute <- sum(!is.na(dasj$tripnum)) / 2
walk_d_minute <- sum(dasj$travel_mode==13, na.rm=T) / 2
bike_d_minute <- sum(dasj$travel_mode==12, na.rm=T) / 2
car_d_minute <- sum(dasj$travel_mode==1 | dasj$travel_mode==2 , na.rm=T) / 2
transit_d_minute <- sum(dasj$travel_mode==3 |
dasj$travel_mode==4 |
dasj$travel_mode==5 |
dasj$travel_mode==6, na.rm=T) / 2
## accelerometer + GPS data
acc_to_gps_d_ratio <- mean(!is.na(subset(dasj, !is.na(recnum_acc))$recnum_gps))
## accelerometer + place data
place_count_d_sum <- sum(subset(dasj, !is.na(seq_id))$counts, na.rm=T)
home_count_d_sum <- sum(dasj[grep("HOME|HOUSE", toupper(dasj$placename)),]$counts, na.rm=T)
work_count_d_sum <- sum(subset(dasj, activity==10)$counts,na.rm=T)
fitness_count_d_sum <- sum(subset(dasj, activity==5)$counts,na.rm=T)
## accelerometer + trip data
trip_count_d_sum <- sum(subset(dasj, !is.na(tripnum))$counts,na.rm=T)
walk_count_d_sum <- sum(subset(dasj, travel_mode==13)$counts,na.rm=T)
bike_count_d_sum <- sum(subset(dasj, travel_mode==12)$counts,na.rm=T)
car_count_d_sum <- sum(subset(dasj, travel_mode==1 | travel_mode==2)$counts,na.rm=T)
transit_count_d_sum <- sum(subset(dasj, travel_mode==3 |
travel_mode==4 |
travel_mode==5 |
travel_mode==6)$counts,na.rm=T)
## GPS + place data
gps_to_place_d_ratio <- ifelse(gps_d_minute > 0, mean(!is.na(subset(dasj, !is.na(recnum_gps))$seq_id)), NA)
## GPS + trip data
gps_to_trip_d_ratio <- ifelse(gps_d_minute > 0, mean(!is.na(subset(dasj, !is.na(recnum_gps))$tripnum)), NA)
## bout
bout_500x5x7_d_n <- length(unique(dasj$bout_num_500x5x7) %w/o% NA)
bout_500x5x7_d_minute <- nrow(subset(dasj, bout_500x5x7==1)) / 2
bout_500x5x7_count_d_sum <- sum(subset(dasj, bout_500x5x7==1)$counts, na.rm=T)
bout_500x4x5_d_n <- length(unique(dasj$bout_num_500x4x5) %w/o% NA)
bout_500x4x5_d_minute <- nrow(subset(dasj, bout_500x4x5==1)) / 2
bout_500x4x5_count_d_sum <- sum(subset(dasj, bout_500x4x5==1)$counts, na.rm=T)
bout_d_n <- bout_500x5x7_d_n + bout_500x4x5_d_n
bout_d_minute <- bout_500x5x7_d_minute + bout_500x4x5_d_minute
bout_count_d_sum <- bout_500x5x7_count_d_sum + bout_500x4x5_count_d_sum
stat.person.day.i[j,"id"] <- id.
stat.person.day.i[j,"id_jday_trac"] <- jday.names[j]
stat.person.day.i[j,"id_day_trac"] <- dasj$id_day_trac[1]
##----------------------------------------------------------------------
## store stats for j-th day
##----------------------------------------------------------------------
for (k in 1:length(var.person.day)) {
stat.person.day.i[j, var.person.day[k]] <- eval(parse(text=sprintf("%s", var.person.day[k])))
}
stat.person.day.i
}
################################################################################
### ###
### Person-average-day level ###
### ###
################################################################################
###=============================================================================
### Get stats at subject level from dat
###=============================================================================
get.stat.cs.dist <- function() {
##----------------------------------------------------------------------
## result to be stored here
##----------------------------------------------------------------------
stat.cs.dist.i <- data.frame(matrix(data=NA, nrow=1, ncol=length(var.cs.dist)+1))
colnames(stat.cs.dist.i) <- c("id", var.cs.dist)
## reset all values as NA
for(j in 1:length(var.cs.dist)) {
eval(parse(text=sprintf("%s <- NA", var.cs.dist[j])))
}
##----------------------------------------------------------------------
## calc new stats
##----------------------------------------------------------------------
## count distribution
x <- subset(dat, !is.na(recnum_acc))$counts
if(length(x)>0) { # with only available ACC data
count_min <- min(x)
count_25qtl <- quantile(x, probs=0.25)
count_median <- median(x)
count_mean <- mean(x)
count_75qtl <- quantile(x, probs=0.75)
count_max <- max(x)
count_sd <- sd(x)
}
## speed distribution
x <- subset(dat, !is.na(recnum_gps))$speed_kmh
if(length(x)>0) { # with only available GPS data
speed_min <- min(x)
speed_25qtl <- quantile(x, probs=0.25)
speed_median <- median(x)
speed_mean <- mean(x)
speed_75qtl <- quantile(x, probs=0.75)
speed_max <- max(x)
speed_sd <- sd(x)
}
##----------------------------------------------------------------------
## store results
##----------------------------------------------------------------------
for (j in 1:length(var.cs.dist)) {
stat.cs.dist.i[1, var.cs.dist[j]] <- eval(parse(text=sprintf("%s", var.cs.dist[j])))
}
stat.cs.dist.i[1,"id"] <- id.
## output result
stat.cs.dist.i
}
################################################################################
### ###
### Bout Level summary of summaries ###
### ###
################################################################################
###=============================================================================
### Get stats of records of bout subset
###=============================================================================
get.stat.bout <- function(dat) {
subject_bout_n <- length(unique(dat$id))
bout_n <- length(unique(dat$id_bout_num))
rec_bout_n <- nrow(dat)
rec_bout_gps_n <- nrow(subset(dat, !is.na(recnum_gps)))
x <- dat$counts
if(length(x)>0) { # with only available ACC data
count_bout_min <- min(x)
count_bout_25qtl <- quantile(x, probs=0.25)
count_bout_median <- median(x)
count_bout_mean <- mean(x)
count_bout_75qtl <- quantile(x, probs=0.75)
count_bout_max <- max(x)
count_bout_sd <- sd(x)
}
x <- subset(dat, !is.na(recnum_gps))$speed_kmh
if(length(x)>0) { # with only available ACC data
speed_bout_min <- min(x)
speed_bout_25qtl <- quantile(x, probs=0.25)
speed_bout_median <- median(x)
speed_bout_mean <- mean(x)
speed_bout_75qtl <- quantile(x, probs=0.75)
speed_bout_max <- max(x)
speed_bout_sd <- sd(x)
}
var.bout.rec <- c("subject_bout_n","bout_n","rec_bout_n","rec_bout_gps_n",
make.var.suffix.prefix(c("count_bout","speed_bout"), var.dist))
bout.rec <- data.frame(matrix(data=NA, nrow=1, ncol=length(var.bout.rec)))
colnames(bout.rec) <- var.bout.rec
for(k in 1:length(var.bout.rec)) {
eval(parse(text=sprintf("bout.rec[1,k] <- %s", var.bout.rec[k])))
}
bout.rec
}
###=============================================================================
### Get stats of bouts at bout level
###=============================================================================
### general information
###-----------------------------------------------------------------------------
get.stat.per.bout <- function(dat) {
## duration
bout.duration <- aggregate(dat$id_bout_num, by=list(dat$id_bout_num), function(x) length(x)/2)
colnames(bout.duration) <- c("id_bout_num","bout_duration_minute")
## minutes of mvpa500
mvpa500 <- aggregate(dat$mvpa500_minute, by=list(dat$id_bout_num), function(x) length(x)/2)
colnames(bout.duration) <- c("id_bout_num","mvpa500_minute")
## gps coverage
bout.gps.cov <- aggregate(!is.na(dat$recnum_gps), by=list(dat$id_bout_num), mean)
bout.speed.mean <- aggregate(dat$speed, by=list(dat$id_bout_num), function(x) mean(x, na.rm=T))
colnames(bout.speed.mean) <- c("id_bout_num","bout_speed_kmh_mean")
stat.bout <- data.frame(id_bout_num=unique(dat$id_bout_num))
stat.bout <- merge(stat.bout, bout.duration, by="id_bout_num", all=T)
stat.bout <- merge(stat.bout, bout.gps.cov, by="id_bout_num", all=T)
stat.bout <- merge(stat.bout, bout.count.mean, by="id_bout_num", all=T)
stat.bout <- merge(stat.bout, bout.speed.mean, by="id_bout_num", all=T)
stat.bout <- within(stat.bout, bout_speed_kmh_mean <- ifelse(bout_gps_cov_ratio==0, NA, bout_speed_kmh_mean))
stat.name.bout <- colnames(stat.bout) %w/o% "id_bout_num"
per.bout <- NULL
for(i in 1:length(stat.name.bout)) {
stat <- stat.name.bout[i]
eval(parse(text=sprintf('
x <- stat.bout$%s
xx <- data.frame(
%s_min = min(x, na.rm=T),
%s_25qtl = quantile(x, probs=0.25, na.rm=T),
%s_median = median(x, na.rm=T),
%s_mean = mean(x, na.rm=T),
%s_75qtl = quantile(x, probs=0.75, na.rm=T),
%s_max = max(x, na.rm=T),
%s_sd = sd(x, na.rm=T))',
stat,stat,stat,stat,stat,stat,stat,stat)))
per.bout <- data.frame(c(per.bout, xx))
}
per.bout
}
### bout X trip/place data
###-----------------------------------------------------------------------------
## all trips
string.tm <- 'data.frame(rbind(
bout_overlap_%s_n=c(length(id_bout_num.trip), NA),
bout_overlap_%s_proportion = c(mean(with(bout.ovlp.trip, aggregate(!is.na(tripnum), by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T),
sd(with(bout.ovlp.trip, aggregate(!is.na(tripnum), by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T)),
bout_overlap_%s_count = c(mean(with(bout.ovlp.trip, aggregate(counts, by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T),
sd(with(bout.ovlp.trip, aggregate(counts, by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T)),
bout_overlap_%s_gpv_cov = c(mean(with(bout.ovlp.trip, aggregate(!is.na(recnum_gps), by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T),
sd(with(bout.ovlp.trip, aggregate(!is.na(recnum_gps), by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T)),
bout_overlap_%s_speed = c(mean(with(bout.ovlp.trip, aggregate(speed_kmh, by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T),
sd(with(bout.ovlp.trip, aggregate(speed_kmh, by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T))
))'
## by travel modes
get.stat.bout.travel.mode <- function(bout.all, tm, var.tm) {
## bout.all: given data
## tm: given travel mode vector
## var.tm: given travel mode character for variable names
id_bout_num.x <- unique(subset(bout.all, travel_mode %in% tm)$id_bout_num) # only ids for those bouts overlapping with the given travel modes
bout.ovlp.x <- subset(bout.all, id_bout_num %in% id_bout_num.x) # only those bouts overlapping with the given travel modes
string <- 'data.frame(rbind(
bout_overlap_%s_n = c(length(id_bout_num.x), NA),
bout_overlap_%s_proportion = c(mean(with(bout.ovlp.x, aggregate(travel_mode %%in%% tm, by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T),
sd(with(bout.ovlp.x, aggregate(travel_mode %%in%% tm, by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T)),
bout_overlap_%s_count = c(mean(with(bout.ovlp.x, aggregate(counts, by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T),
sd(with(bout.ovlp.x, aggregate(counts, by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T)),
bout_overlap_%s_gpv_cov = c(mean(with(bout.ovlp.x, aggregate(!is.na(recnum_gps), by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T),
sd(with(bout.ovlp.x, aggregate(!is.na(recnum_gps), by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T)),
bout_overlap_%s_speed = c(mean(with(bout.ovlp.x, aggregate(speed_kmh, by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T),
sd(with(bout.ovlp.x, aggregate(speed_kmh, by=list(id_bout_num), function(x) mean(x, na.rm=T)))$x, na.rm=T))
))'
eval(parse(text=sprintf(string, var.tm,var.tm,var.tm,var.tm,var.tm)))
}
################################################################################
### ###
### Read table from gist server ###
### ###
################################################################################
lifelog.columns <- "
id
, recnum_acc
, recnum_gps
, recnum_place
, recnum_trip
, daygroup
, axis1
, axis2
, axis3
, counts
, time_acc_utc
, time_acc_local
, jdaya
, jdaya_trac
, mvpa_500x5x7
, bout_500x5x7
, bout_num_500x5x7
, acc_source
, wear_troiano
, nonwear_0x20
, vda_0x20
, latitude
, longitude
, speed_kmh
, altitude_m
, time_gps_utc
, time_gps_utc_std
, x_waspn
, y_waspn
, jdayg
, jdayg_trac
, gps_source
, placenum_from
, t_time_start_utc
, t_time_end_utc
, t_time_start_local
, t_time_end_local
, placename_from
, placenum_to
, placename_to
, travel_mode
, duration_minutes
, tour
, daynum
, placenum
, placename
, address
, city
, zip
, activity
, comment
, calendar_date
, p_time_arrived_utc
, p_time_left_utc
, p_time_arrived_local
, p_time_left_local
, vd
"
###=============================================================================
### for pushing into gist server
###=============================================================================
lifelog.schema <- "
(
id
, text
, recnum_acc integer
, recnum_gps bigint
, recnum_place bigint
, recnum_trip bigint
, daygroup bigint
, axis1 integer
, axis2 integer
, axis3 integer
, counts integer
, time_acc_utc timestamp without time zone
, time_acc_local timestamp without time zone
, jdaya double precision
, jdaya_trac double precision
, mvpa_500x5x7 integer
, bout_500x5x7 integer
, bout_num_500x5x7 integer
, acc_source character varying
, wear_troiano integer
, nonwear_0x20 integer
, vda_0x20 integer
, latitude double precision
, longitude double precision
, speed_kmh double precision
, altitude_m double precision
, time_gps_utc timestamp without time zone
, time_gps_utc_std timestamp without time zone
, x_waspn double precision
, y_waspn double precision
, jdayg integer
, jdayg_trac integer
, gps_source character varying
--, the_geom_4326 geometry(PointZM, 4326) -------------------- geom
--, the_geom_2926 geometry(PointZM, 2926) -------------------- geom
, placenum_from bigint
, t_time_start_utc timestamp without time zone
, t_time_end_utc timestamp without time zone
, t_time_start_local timestamp without time zone
, t_time_end_local timestamp without time zone
, placename_from text
, placenum_to bigint
, placename_to text
, travel_mode text
, duration_minutes bigint
, tour integer
, daynum bigint
, placenum bigint
, placename text
, address text
, city text
, zip text
, activity text
, comment character varying
, calendar_date date
, p_time_arrived_utc timestamp without time zone
, p_time_left_utc timestamp without time zone
, p_time_arrived_local timestamp without time zone
, p_time_left_local timestamp without time zone
, vd integer
)"
################################################################################
### ###
### Lifelog ###
### ###
################################################################################
### functions
### considering spring forward and fall back, calc jday_trac
spring.forward <- c("2004-04-04","2005-04-03","2006-04-02","2007-03-11","2008-03-09","2009-03-08","2010-03-14","2011-03-13","2012-03-11","2013-03-10","2014-03-09")
fall.back <- c("2004-10-31","2005-10-30","2006-10-29","2007-11-04","2008-11-02","2009-11-01","2010-11-07","2011-11-06","2012-11-04","2013-11-03","2014-11-02")
get.jday.trac <- function(x) {
date.jday <- substr(x, 1, 10)
hour.subtract <- rep(3,length(x))
hour.subtract <- ifelse(date.jday %in% spring.forward, 2, hour.subtract)
hour.subtract <- ifelse(date.jday %in% fall.back, 4, hour.subtract)
as.POSIXlt(x-hour.subtract*60*60)$yday
}
get.day.trac <- function(x) {
date.jday <- substr(x, 1, 10)
hour.subtract <- rep(3,length(x))
hour.subtract <- ifelse(date.jday %in% spring.forward, 2, hour.subtract)
hour.subtract <- ifelse(date.jday %in% fall.back, 4, hour.subtract)
x1 <- as.POSIXlt(x-hour.subtract*60*60)
x1$hour <- 0
x1$min <- 0
x1$sec <- 0
as.character(x1)
}
################################################################################
### ###
### Misc. ###
### ###
################################################################################
tm.code <- data.frame(code=c(1:14,97),tm=c("car","carpool","bus","light_rail","monorail","heavy_rail","dial-a-ride","school_bus","ferry","taxi","motocycle","bicycle","walk","airplane","other"))
###=============================================================================
### END OF CODE
###=============================================================================
|
3b5ef3de881c0c6fcf671358278826f06b1cf350 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /MMVBVS/man/plot_beta.Rd | 5270444f008eb06dd11dbdc0737e89f67e442543 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 484 | rd | plot_beta.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyze_results.R
\name{plot_beta}
\alias{plot_beta}
\title{Plot the coefficients for each variable for each iteration of MCMC}
\usage{
plot_beta(result, title = "")
}
\arguments{
\item{result}{Output object from mmvbvs function}
\item{title}{A string object for the title of the resulting plot}
}
\value{
ggplot object
}
\description{
Plot the coefficients for each variable for each iteration of MCMC
}
|
0b14cd60528227a97ea5150920cee0be0e949574 | 592a15eb25edb66fff4a938d3a20969aa8d2e3aa | /R/NIRFigAndeler.R | 09723219f968eed7f2d6e2514f6808a39fc98474 | [] | no_license | Rapporteket/intensiv | a6accf24600e47cded5065a988f1192e5ba62c14 | 544b6b120efa17da041eba1f53a986712f648ec3 | refs/heads/rel | 2023-07-19T03:01:43.656550 | 2023-07-12T09:38:42 | 2023-07-12T09:38:42 | 60,067,715 | 0 | 0 | null | 2023-07-06T05:53:31 | 2016-05-31T07:30:48 | R | UTF-8 | R | false | false | 17,315 | r | NIRFigAndeler.R | #' Funksjon som beregner aggregerte verdier (andeler) for ulike variabler/variabelkombinasjoner
#'
#' Denne funksjonen beregner AggVerdier (fordeling) av valgt variabel
#' filtrert på de utvalg som er gjort. Kan trenge funksjonerne:
#' NIRUtvalgEnh (skal endre navn til NIRUtvalg når ferdig)
#' NIRFigSoyler
#'
#' Funksjonen benytter funksjonene: NIRRegDataSQL, NIRPreprosess, NIRVarTilrettelegg, NIRUtvalgEnh
#' og NIRFigSoyler
#'
#' Argumentet \emph{valgtVar} har følgende valgmuligheter:
#' \itemize{
#' \item alder: Aldersfordeling, 10-årige grupper
#' \item ExtendedHemodynamicMonitoring: Hemodynamisk overvåkn.
#' \item inklKrit: Andeler for de 5 inklusjonskriteriene
#' \item InnMaate: Hastegrad inn på intensiv (Elektivt, Akutt medisinsk, Akutt kirurgisk)
#' \item isolering: Isolasjon, type
#' \item isoleringDogn Isolasjon, varighet
#' \item liggetid: Liggetid
#' \item NEMS: Skår for ressursbruk. (Nine Equivalents of Nursing Manpower Use Score)
#' \item Nas: Skår for sykepleieraktiviteter. (Nursing Activities Score)
#' \item nyreBeh: Nyrebeh., type
#' \item nyreBehTid: Nyrebeh., varighet
#' \item PrimaryReasonAdmitted: Hovedårsak til intensivopphold
#' \item respiratortid: Tid tilbrakt i respirator
#' \item respiratortidNonInv: Respiratortid, ikke-invasiv
#' \item respiratortidInvMoverf: Respiratortid, invasiv m/overf.
#' \item respiratortidInvUoverf: Respiratortid, invasiv u/overf.
#' \item SAPSII: Skår for alvorlighetsgrad av sykdom. (Simplified Acute Physiology Score II)
#' \item spesTiltak: Spesielle tiltak
#' }
#' Argumentet \emph{enhetsUtvalg} har følgende valgmuligheter:
#' \itemize{
#' \item 0: Hele landet
#' \item 1: Egen enhet mot resten av landet (Standard)
#' \item 2: Egen enhet
#' \item 3: Egen enhet mot egen sykehustype
#' \item 4: Egen sykehustype
#' \item 5: Egen sykehustype mot resten av landet
#' \item 6: Egen enhet mot egen region [NB: Intensivregiisteret mangler pt. variabel for region]
#' \item 7: Egen region [NB: Mangler pt. variabel for region]
#' \item 8: Egen region mot resten [NB: Mangler pt. variabel for region]
#' }
#'
#' @param RegData En dataramme med alle nødvendige variabler fra registeret
#' @inheritParams NIRUtvalgEnh
#' @inheritParams NIRVarTilrettelegg
#' @param figurtype Hvilken figurtype som ønskes ut:
#' andel (fordelingsfigurer),
#' andelGrVar (andel i hver kategori av grupperingsvariabel, eks. sykehus),
#' andelTid (andel per tidsenhet, eks. år, måned),
#' andelPP (andel før og etter),
#' gjsnGrVar (sentralmål i hver kategori av grupperingsvariabel, eks. sykehus),
#' gjsnTid (sentralmål per tidsenhet, eks. år, måned)
#' @param preprosess Preprosesser data
#' FALSE: Nei
#' TRUE: Ja (Standard)
#' @param hentData Gjør spørring mot database
#' 0: Nei, RegData gis som input til funksjonen (Standard)
#' 1: Ja
#' @param lagFig Angir om figur skal lages eller ikke 0-ikke lag, 1-lag
#'
#' @return Søylediagram (fordeling) av valgt variabel. De enkelte verdiene kan også sendes med.
#'
#' @export
NIRFigAndeler <- function(RegData=0, valgtVar='alder', datoFra='2011-01-01', datoTil='3000-12-31', aar=0,
overfPas=0, minald=0, maxald=110, erMann='',InnMaate='', dodInt='', velgDiag=0, outfile='',
grType=99, preprosess=1, hentData=0, reshID=0, velgAvd=0, enhetsUtvalg=0, lagFig=1, ...) { #, session='') {
if ("session" %in% names(list(...))) {
rapbase::repLogger(session = list(...)[["session"]], msg = paste0('Fordelingsfigur: ',valgtVar))
}
# if ("velgAvd" %in% names(list(...))) {
# reshID <- velgAvd
# }
if (hentData == 1) {
RegData <- NIRRegDataSQL(datoFra, datoTil) #minald=0, maxald=110, erMann='',InnMaate='', dodInt=''
}
# Hvis RegData ikke har blitt preprosessert. (I samledokument gjøres dette i samledokumentet)
if (preprosess){
RegData <- NIRPreprosess(RegData=RegData) #, reshID=reshID)
}
# "%i%" <- intersect
#--------------- Definere variable ------------------------------
Totalskaarer <- c('SumScoreSatisfactionCare', 'SumScoreSatisfactionDecision', 'SumScoreAllQuestions')
Del1 <- c('BehandlingHoeflighetRespektMedfoelelse', 'SymptomSmerte', 'SymptomPustebesvaer',
'SymptomUro', 'BehandlingBesvarerBehov', 'BehandlingBesvarerStoette',
'BehandlingSamarbeid', 'BehandlingBesvarerHoeflighetRespektMedfoelelse',
'SykepleierOmsorg', 'SykepleierKommunikasjon', 'LegeBehandling',
'AtmosfaerenIntensivAvd', 'AtmosfaerenPaaroerenderom', 'OmfangetAvBehandlingen')
Del2 <- c('LegeInformasjonFrekvens', 'SvarPaaSpoersmaal', 'ForklaringForstaaelse',
'InformasjonsAerlighet', 'InformasjonOmForloep', 'InformasjonsOverensstemmelse',
'BeslutningsInvolvering', 'BeslutningsStoette', 'BeslutningsKontroll',
'BeslutningsTid', 'LivsLengde', 'LivssluttKomfor', 'LivssluttStoette')
PaarorVar <- c(Del1, Del2, Totalskaarer)
if (valgtVar %in% PaarorVar){
NIRVarSpes <- NIRVarTilretteleggPaaror(RegData=RegData, valgtVar=valgtVar, figurtype='andeler')
} else {
NIRVarSpes <- NIRVarTilrettelegg(RegData=RegData, valgtVar=valgtVar, figurtype='andeler')
}
RegData <- NIRVarSpes$RegData
flerevar <- NIRVarSpes$flerevar
NIRUtvalg <- NIRUtvalgEnh(RegData=RegData, datoFra=datoFra, datoTil=datoTil, aar=aar,
minald=minald, maxald=maxald, velgDiag = velgDiag,
erMann=erMann, InnMaate=InnMaate, dodInt=dodInt,
reshID=reshID, grType=grType, enhetsUtvalg=enhetsUtvalg,
velgAvd=velgAvd) #overfPas = overfPas,
RegData <- NIRUtvalg$RegData
utvalgTxt <- NIRUtvalg$utvalgTxt
#--------------- Gjøre beregninger ------------------------------
#Gjør beregninger selv om det evt ikke skal vise figur ut. Trenger utdata.
AggVerdier <- list(Hoved = NA, Rest = NULL)
N <- list(Hoved = NULL, Rest =NULL)
Nfig <- list(Hoved = NULL, Rest =NULL) #figurtekst: N i legend
Ngr <- list(Hoved = NULL, Rest =NULL)
ind <- NIRUtvalg$ind
variable <- NIRVarSpes$variable
Ngr$Hoved <- switch(as.character(flerevar),
'0' = table(RegData$VariabelGr[ind$Hoved]),
# '1' = colSums(sapply(RegData[ind$Hoved ,variable], as.numeric), na.rm=T))
'1' = apply(RegData[ind$Hoved,variable], MARGIN=2,
FUN=function(x) sum(x == 1, na.rm=T)))
#N$ gjelder selv om totalutvalget er ulikt for de ulike variablene i flerevar
N$Hoved <- switch(as.character(flerevar),
'0' = sum(Ngr$Hoved), #length(ind$Hoved)- Kan inneholde NA
# '1' = length(ind$Hoved)
'1' = apply(RegData[ind$Hoved,variable], MARGIN=2,
FUN=function(x) sum(x %in% 0:1, na.rm=T)))
AggVerdier$Hoved <- 100*Ngr$Hoved/N$Hoved
if (NIRUtvalg$medSml==1) {
Ngr$Rest <- switch(as.character(flerevar),
'0' = table(RegData$VariabelGr[ind$Rest]),
# '1' = colSums(sapply(RegData[ind$Rest ,variable], as.numeric), na.rm=T))
'1' = apply(RegData[ind$Rest,variable], MARGIN=2,
FUN=function(x) sum(x == 1, na.rm=T)))
N$Rest <- switch(as.character(flerevar),
'0' = sum(Ngr$Rest),
'1' = apply(RegData[ind$Rest,variable], MARGIN=2,
FUN=function(x) sum(x %in% 0:1, na.rm=T)))
AggVerdier$Rest <- 100*Ngr$Rest/N$Rest
}
if(flerevar==1) {
Nfig$Hoved <- ifelse(min(N$Hoved)==max(N$Hoved),
min(N$Hoved[1]),
paste0(min(N$Hoved),'-',max(N$Hoved)))
if (NIRUtvalg$medSml==1){
Nfig$Rest <- ifelse(min(N$Rest)==max(N$Rest),
min(N$Rest[1]),
paste0(min(N$Rest),'-',max(N$Rest)))}
} else {
Nfig <- N}
grtxt2 <- paste0(sprintf('%.1f',AggVerdier$Hoved), '%') #paste0('(', sprintf('%.1f',AggVerdier$Hoved), '%)')
# grtxt2 <- paste0(paste0('(', sprintf('%.1f',Utdata$AggVerdier$Hoved), '%)'),
# paste0('\n(', sprintf('%.1f',Utdata$AggVerdier$Rest), '%)'))
xAkseTxt <- NIRVarSpes$xAkseTxt
yAkseTxt <- 'Andel opphold (%)'
retn <- NIRVarSpes$retn
tittel <- NIRVarSpes$tittel
hovedgrTxt <- NIRUtvalg$hovedgrTxt
medSml <- NIRUtvalg$medSml
grtxt <- NIRVarSpes$grtxt
cexgr <- NIRVarSpes$cexgr
grTypeTxt <- NIRUtvalg$grTypeTxt
smltxt <- NIRUtvalg$smltxt
KImaal <- NIRVarSpes$KImaal
fargepalett <- NIRUtvalg$fargepalett
FigDataParam <- list(AggVerdier=AggVerdier,
Nfig=Nfig,
N=N,
Ngr=Ngr,
KImaal <- NIRVarSpes$KImaal,
grtxt2=grtxt2,
grtxt=grtxt,
grTypeTxt=grTypeTxt,
tittel=tittel,
retn=retn,
xAkseTxt=xAkseTxt,
yAkseTxt=yAkseTxt,
utvalgTxt=utvalgTxt,
fargepalett=NIRUtvalg$fargepalett,
medSml=medSml,
hovedgrTxt=hovedgrTxt,
smltxt=smltxt)
if (lagFig == 1) {
#---------------------------------------FRA FIGANDELER, FigGjsnGrVar og FigAndelGrVar--------------------------
#Hvis for få observasjoner..
if ((Nfig$Hoved < 5) | (dim(RegData)[1]<5))
#| ((enhetsUtvalg %in% c(1,3)) & length(which(RegData$ReshId == reshID))<5)) #(dim(RegData)[1]-N$Hoved <5) )
# if (dim(RegData)[1] < 10 | ((enhetsUtvalg %in% c(1,3)) & length(which(RegData$ReshId == reshID))<5) )
#|(grVar=='' & length(which(RegData$ReshId == reshID))<5 & enhetsUtvalg %in% c(1,3)))
{
#-----------Figur---------------------------------------
FigTypUt <- rapFigurer::figtype(outfile) #FigTypUt <- figtype(outfile)
farger <- FigTypUt$farger
plot.new()
title(tittel) #, line=-6)
legend('topleft',legend=utvalgTxt, bty='n', cex=0.9, text.col=farger[1])
tekst <- 'For få registreringer i egen eller sammenligningsgruppe'
text(0.5, 0.6, tekst, cex=1.2)
if ( outfile != '') {dev.off()}
} else {
#Plottspesifikke parametre:
#Høyde må avhenge av antall grupper
hoyde <- ifelse(length(AggVerdier$Hoved)>20, 3*800, 3*600)
FigTypUt <- rapFigurer::figtype(outfile, height=hoyde, fargepalett=fargepalett)
#Tilpasse marger for å kunne skrive utvalgsteksten
NutvTxt <- length(utvalgTxt)
vmarg <- switch(retn, V=0.05, H=min(1,max(0, strwidth(grtxt, units='figure', cex=cexgr)*0.75)))
#NB: strwidth oppfører seg ulikt avh. av device...
par('fig'=c(vmarg, 1, 0, 1-0.02*(NutvTxt-1))) #Har alltid datoutvalg med
farger <- FigTypUt$farger
fargeHoved <- farger[1]
fargeRest <- farger[3]
graa <- c('#4D4D4D','#737373','#A6A6A6','#DADADA') #Mørk til lys # Fire graatoner
antGr <- length(grtxt)
lwdRest <- 3 #tykkelse på linja som repr. landet
cexleg <- 0.9 #Størrelse på legendtekst
#Horisontale søyler
if (retn == 'H') {
#Definerer disse i beregningsfunksjonen?
xmax <- max(c(AggVerdier$Hoved, AggVerdier$Rest),na.rm=T)*1.2
#xmax <- ifelse(valgtMaal=='Andel', min(xmax, 100), xmax) #100 som maks bare hvis andelsfigur..
xmax <- min(xmax, 100)
ymin <- 0.3 #0.5/cexgr^4 #0.05*antGr #Fordi avstand til x-aksen av en eller annen grunn øker når antall sykehus øker
ymax <- 0.4+1.25*length(AggVerdier$Hoved) #c(0.3/xkr^4, 0.3+1.25*length(Midt)), 0.2+1.2*length(AggVerdier$Hoved)
#Må def. pos først for å få strek for hele gruppa bak søylene
### reverserer for å slippe å gjøre det på konf.int
pos <- rev(barplot(rev(as.numeric(AggVerdier$Hoved)), xlim=c(0,xmax), ylim=c(ymin, ymax), #, plot=FALSE)
xlab=xAkseTxt, horiz=T, border=NA, col=fargeHoved)) #, col.axis='white', col='white'))
indOK <- which(AggVerdier$Hoved>=0)
posOK <- pos[indOK]
posOver <- max(pos)+0.35*log(max(pos))
posDiff <- 1.2*(pos[1]-pos[2])
posOK <- pos[indOK]
minpos <- min(posOK)-0.7
maxpos <- max(posOK)+0.7
if (medSml == 1) { #Legge på prikker for sammenlikning
legend(xmax/4, posOver+0.6*posDiff,
c(paste0(hovedgrTxt, ' (N=', Nfig$Hoved,')'), paste0(smltxt, ' (N=', Nfig$Rest,')')),
border=c(fargeHoved,NA), col=c(fargeHoved,fargeRest), bty='n', pch=c(15,18),
pt.cex=2, lwd=lwdRest, lty=NA, ncol=1)
} else {
legend(xmax/4, posOver+0.6*posDiff, paste0(hovedgrTxt, ' (N=', Nfig$Hoved,')'),
border=NA, fill=fargeHoved, bty='n', ncol=1)
}
#Legge på gruppe/søylenavn
grtxt <- paste(grtxt, grtxt2, sep='\n')
mtext(at=pos+0.05, text=grtxt, side=2, las=1, cex=cexgr, adj=1, line=0.25)
#Fordelingsfigurer:
if (medSml == 1) { #Legge på prikker for sammenlikning
points(as.numeric(AggVerdier$Rest), pos, col=fargeRest, cex=2, pch=18) #c("p","b","o"),
}
} #Slutt horisontale søyler
if (retn == 'V' ) {
#Vertikale søyler. Det er bare andeler som har vertikale søyler.
ymax <- min(max(c(AggVerdier$Hoved, AggVerdier$Rest),na.rm=T)*1.25, 115)
pos <- barplot(as.numeric(AggVerdier$Hoved), beside=TRUE, las=1, ylab=yAkseTxt,
sub=xAkseTxt, col=fargeHoved, border='white', ylim=c(0, ymax))
mtext(at=pos, grtxt, side=1, las=1, cex=0.95*cexgr, adj=0.5, line=0.5)
mtext(at=pos, grtxt2, side=1, las=1, cex=0.8*cexgr, adj=0.5, line=1.5, col=graa[2])
mtext(at=0, paste0(hovedgrTxt,': '), side=1, cex=0.8*cexgr, adj=0.9, line=1.5, col=graa[2])
#legend(x=0, y=-0.05*ymax, legend=paste0(hovedgrTxt,':'), col=fargeRest,pch=18,bty="n",ncol=2, cex=0.9*cexgr, xpd=TRUE) #pt.cex=0.7,
if (medSml == 1) {
grtxt3 <- paste0(sprintf('%.1f',AggVerdier$Rest), '%') #paste0('(', sprintf('%.1f',AggVerdier$Rest), '%)')
mtext(at=pos, grtxt3, side=1, las=1, cex=0.8*cexgr, adj=0.5, line=2.5, col=graa[2])
mtext(at=0, paste0(smltxt,': '), side=1, cex=0.8*cexgr, adj=0.9, line=2.5, col=graa[2])
points(pos, as.numeric(AggVerdier$Rest), col=fargeRest, cex=2, pch=18) #c("p","b","o"),
legend('top', legend=c(paste0(hovedgrTxt, ' (N=', Nfig$Hoved,')'), paste0(smltxt, ' (N=', Nfig$Rest,')')),
border=c(fargeHoved,NA), col=c(fargeHoved,fargeRest), bty='n', pch=c(15,18), pt.cex=2, lty=c(NA,NA),
lwd=lwdRest, ncol=2, cex=cexleg)
} else {
legend('top', legend=paste0(hovedgrTxt, ' (N=', Nfig$Hoved,')'),
border=NA, fill=fargeHoved, bty='n', ncol=1, cex=cexleg)
}
}
title(tittel, line=1.5) #cex.main=1.3)
#Tekst som angir hvilket utvalg som er gjort
mtext(utvalgTxt, side=3, las=1, cex=0.9, adj=0, col=farger[1], line=c(3+0.8*((NutvTxt-1):0)))
par('fig'=c(0, 1, 0, 1))
if ( outfile != '') {dev.off()}
} #Nok observasjoner
} #Figur
return(invisible(FigDataParam))
}
|
0d2660dc37bd712aa57252287100cca5f09dccaf | a626a03701b0edb42fcd7cb88fbf3c3f6097056e | /RDM/Ch07/02_시험.R | 151e381cc85175af65ff78f37b8263a252759c17 | [] | no_license | HongSun2da/R | 6361d00016de1144e34edb1b5055e778e041d1c8 | cb0f609a5e8733a62fc0d5f0aa52e8f2df0c8154 | refs/heads/master | 2023-04-22T09:25:11.845781 | 2021-05-03T09:05:26 | 2021-05-03T09:05:26 | 354,025,046 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,676 | r | 02_시험.R |
# 01. 데이터 불러 오기 ------------------------------------
data_df = read.csv("FlightDelays.csv",
header = TRUE,
na.strings = ".")
data_df
str(data_df)
summary(data_df)
# 02. 데이터 전처리 하기 ------------------------------------
# - 1. DAY_WEEK
data_df$DAY_WEEK = factor(data_df$DAY_WEEK,
levels = c(1:7),
labels = c("mon","tue","wed","thu","fri","sat","sun"))
data_df$CRS_DEP_TIME = factor(round(data_df$CRS_DEP_TIME/100))
data_df$ORIGIN = factor(data_df$ORIGIN)
data_df$DEST = factor(data_df$DEST)
data_df$CARRIER = factor(data_df$CARRIER)
data_df$TAIL_NUM = factor(data_df$TAIL_NUM)
data_df$Flight.Status = factor(data_df$Flight.Status)
data_df$ORIGIN = relevel(data_df$ORIGIN, ref = "IAD")
data_df$DEST = relevel(data_df$DEST, ref = "LGA")
data_df$CARRIER = relevel(data_df$CARRIER, ref = "US")
data_df$DAY_WEEK = relevel(data_df$DAY_WEEK, ref = "wed")
data_df$isDelay = 1 * (data_df$Flight.Status == "delayed")
# 03. 기술통계 확인 하기 ------------------------------------
library(psych)
describe(data_df) # - 1. describe 확인
pairs.panels(data_df) # - 2. 상관관계 확인
# 04. 데이터 분리 하기 ------------------------------------
set.seed(2)
selected.var = c(10, 1, 8, 4, 2, 9, 14)
train_index = sample(c(1:dim(data_df)[1]),
dim(data_df)[1]*0.6)
train_df = data_df[train_index, selected.var]
test_df = data_df[-train_index, selected.var]
describe(train_df) # 3000 obs. of 12 variables
describe(test_df) # 2000 obs. of 12 variables
# 05. 로지스틱 분석 ------------------------------------
model = glm(isDelay ~ .,
data = train_df,
family = "binomial")
summary(model)
# - 1. Odds 값 확인 하기
odds = data.frame(summary(model)$coefficients,
odds = exp(coef(model)))
round(odds, 8)
# 06. 예측 모델 화인 [test_df] ------------------------------------
pred = predict(model,
test_df[, -7],
type = "response")
head(pred)
# - 1. 실제 = 예측 값 비교 하기
data.frame(actual = test_df$isDelay[1:30],
predicted = round(pred[1:30], 5))
# 07. 모델 평가 ------------------------------------
library(caret)
confusionMatrix(as.factor(ifelse(pred > 0.5, 1, 0)),
as.factor(test_df$isDelay))
# Reference
# Prediction 0 1
# 0 692 173
# 1 1 15
#
# Accuracy : 0.8025
# 95% CI : (0.7746, 0.8283)
# No Information Rate : 0.7866
# P-Value [Acc > NIR] : 0.133
# 08. gain chart 만들기 ------------------------------------
# install.packages("gains")
library(gains)
gain = gains(test_df$isDelay,
pred,
groups = 10)
gain
# - 1. lift chart
plot(c(0, gain$cume.pct.of.total * sum(test_df$isDelay)) ~ c(0, gain$cume.obs),
xlab="cases",
ylab="Cumulative",
main="",
type="l")
# - 2. 10분위 차트
heights = gain$mean.resp/mean(test_df$isDelay)
midpoints = barplot(heights,
names.arg = gain$depth,
ylim = c(0, 9),
xlab = "Percentile",
ylab = "Mean Response",
main = "Decile-wise lift chart")
# 09. 중요한 변수 선택 ------------------------------------
data_val = step(model, direction = "backward")
summary(data_val)
|
f65e675b9c8b7fa40ed0e854a899dbded71d597d | 858147a293aa34c4e30035b8314d5e8fdb8653c6 | /new_graph.R | 2fa50af144c31cf52d27adce2cded4132f30a597 | [] | no_license | caseyiannone/College_Scorecard | 493b8900cdc921e4c544a5ede80d99bbe1083942 | 6720898bc68e34c697de6e777850dcc8f296d1e3 | refs/heads/master | 2016-09-12T22:45:44.192196 | 2016-05-27T00:30:14 | 2016-05-27T00:30:14 | 59,791,124 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,219 | r | new_graph.R | library(data.table)
library(dplyr)
library(tidyr)
library(plyr)
library(sqldf)
library(ggvis)
library(ggplot2)
library(googleVis)
library(ggthemes)
library(googlesheets)
library(portfolio)
library(purrr)
library(tidyr)
library(lubridate)
library(scales)
library(gridExtra)
library(viridis)
library(knitr)
library(ggmap)
library(reshape2)
library(choroplethr)
library(choroplethrMaps)
library(gridExtra)
library(RColorBrewer)
#PC Working directory
setwd("C:/Users/ciannone/Google Drive/Graduate_Coursework/Florida State University/Practicum/Analysis_Plots")
FD <- read.csv("Final_Data.csv",header=TRUE,sep=",")
Earnings <- read.csv("Book1.csv",header=TRUE,sep=",")
#Combine IPEDS & Scorecard data
Full_DS <- inner_join(FD, Earnings, by="UNITID")
Full_DS <- data.table(Full_DS)
names(Full_DS)
#change all nulls to NA, as there is a combination of Nulls and NAs used in the files
Full_DS[Full_DS == "NULL"] <- NA
Full_DS[Full_DS == "PrivacySuppressed"] <- NA
ggplot(FD, aes(x=md_earn_wne_p10, color=factor(CONTROL), fill=factor(CONTROL), group=factor(CONTROL))) +
geom_density(alpha=0.3) +
theme_light(base_size=16) +
xlab("Median Earnings 10 Years after Matriculation") + ylab("")
setnames(Full_DS,"md_earn_wne_p10", "e50")
setnames(Full_DS,"pct10_earn_wne_p10", "e10")
setnames(Full_DS,"pct25_earn_wne_p10", "e25")
setnames(Full_DS,"pct75_earn_wne_p10" , "e75")
setnames(Full_DS, "pct90_earn_wne_p10" , "e90")
ORDER BY s11.pct75_earn_wne_p10 DESC")
earnings <- cbind(Rank=1:nrow(earnings), earnings)
earnings$College <- paste(earnings$Rank, earnings$College, sep=". ")
earnings$College <- factor(earnings$College, levels=rev(earnings$College))
ggplot(Full_DS[1:20,], aes(x=INSTNM, ymin=e10, lower=e25, middle=e50, upper=e75, ymax=e90)) +
geom_boxplot(stat="identity", fill="#53cfff") +
geom_text(aes(x=INSTNM, y=e75-2000, ymax=e75, hjust=0.95, label=paste0("$", e75)), size=4) +
#theme_light(base_size=16) +
theme(axis.text.y = element_text(hjust=0, color="black")) +
coord_flip() +
xlab("") + ylab("") +
ggtitle("Top Quartile Earnings 10 Years After Matriculation ($)")
|
df22623db4fecf88acab812aa10ba46698d85843 | 152c16bd35fe3536441db22f5c1634cf7660fe36 | /scripts/neural_net_bt.R | 82abba01118786e873ece0fe44b4594d02c7f43e | [] | no_license | computbiolgeek/kcnq1_predictive_modeling | 970e58747fffd862ae1c6056708b8c0b36f15a42 | 9d3d78c6be164b9c0a083e77720dcd66f48d03d7 | refs/heads/master | 2020-06-25T08:28:18.840223 | 2017-10-26T23:00:38 | 2017-10-26T23:00:38 | 94,237,357 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,935 | r | neural_net_bt.R | #########################################################
# @brief Fit neural net to the data set
# @details Completely rewritten from the previous version.
# Bootstrap is used for estimating optimism about AUC
# @version 1.1
# @author Bian Li
# @date 10/15/2015
#########################################################
# load required libraries
library(nnet)
library(ROCR)
# load data set
datafile <- "training_dataset.csv"
kcnq_df <- read.csv(file = datafile, header = TRUE, stringsAsFactors = FALSE)
# rename the last column
colnames(kcnq_df)[ncol(kcnq_df)] <- c("label")
#############################################################################
# Fit a neural net to the original data set
#############################################################################
hidden.layer <- 3
# fit a neural net to the whole data set
orig_net <- nnet(label ~ pssm + erate, data = kcnq_df, size = hidden.layer, decay = 0.02, maxit = 1000)
# predictions on the original data set
orig_predictions <- predict(orig_net, newdata = kcnq_df, type = "raw")
# create a ROCR prediction object
orig_rocr <- prediction(predictions = orig_predictions, labels = kcnq_df$label)
# compute the apparent AUC
AUC_app <- unlist(performance(orig_rocr, measure = "auc")@y.values)
# save the model for reuse
save(orig_net, file = "kcnq1_nnet.rda")
########################
# Bootstrap AUC
########################
# number of boostrap iterations
itrs <- 200
AUC_bts <- numeric(length = itrs)
AUC_bt_origs <- numeric(length = itrs)
AUC_optimisms <- numeric(length = itrs)
for (i in 1:itrs) {
# sample a bootstrap data set
bt_indices <- sample(1:nrow(kcnq_df), nrow(kcnq_df), replace = TRUE)
bt_set <- kcnq_df[bt_indices, ]
# fit a neural net with the same architecture to the bootstrap data set
bt_net <- nnet(label ~ pssm + erate, data = bt_set, size = hidden.layer, decay = 0.02, maxit = 1000)
# predictions on the bootstrap data set
bt_predictions <- predict(bt_net, newdata = bt_set, type = "raw")
# create a ROCR prediction object
rocr_bt <- prediction(predictions = bt_predictions, labels = bt_set$label)
# compute the AUC from the bootstrap data set
AUC_bts[i] <- unlist(performance(rocr_bt, measure = "auc")@y.values)
# use the bootstrap neural net to predict the original data set
predictions_bt_orig <- predict(bt_net, newdata = kcnq_df, type = "raw")
# create a ROCR prediction object
rocr_bt_orig <- prediction(predictions = predictions_bt_orig, labels = kcnq_df$label)
# compute the AUC from the bootstrap data set
AUC_bt_origs[i] <- unlist(performance(rocr_bt_orig, measure = "auc")@y.values)
# push back current optimism about AUC
AUC_optimisms[i] <- AUC_bts[i] - AUC_bt_origs[i]
}
#################################################
# Compute the optimism adjusted AUC
#################################################
AUC_mean_optimism <- mean(AUC_optimisms)
AUC_adj <- AUC_app - AUC_mean_optimism |
c437375be2969bdf82372c1c3445e483690bd040 | 0f9804c76cce7663449a9c7c7ce3e366a7b04ed5 | /plot4.R | 6a153ed47a5f4dfe31088dcc2c5a7e9041113474 | [] | no_license | JessicaRoelands/ExData_Plotting1 | 32d7ee34651b06401354bb8824f18782acd3c846 | 64901af22c65dda438f7bbd1c762d0fbffb8a2a7 | refs/heads/master | 2020-12-25T21:01:30.907987 | 2015-11-08T18:49:03 | 2015-11-08T18:49:03 | 45,618,705 | 0 | 0 | null | 2015-11-05T15:05:41 | 2015-11-05T15:05:41 | null | UTF-8 | R | false | false | 1,403 | r | plot4.R | ##read in data
power <- read.csv("household_power_consumption.txt", sep = ";", dec = ".", na.strings="?")
##create one time variable including both date and time with POSIXct class
power$Date <- strptime(paste(power$Date, power$Time), format = "%d/%m/%Y %H:%M:%S")
##subset data to only get data from the 1st and 2nd of Feb 2007
power2 <- subset(power, Date >= as.POSIXct("2007-02-01") & Date < as.POSIXct("2007-02-03"))
power2$Global_active_power <- as.numeric(power2$Global_active_power)
power2$Sub_metering_1 <- as.numeric(power2$Sub_metering_1)
power2$Sub_metering_2 <- as.numeric(power2$Sub_metering_2)
power2$Sub_metering_3 <- as.numeric(power2$Sub_metering_3)
power2$Voltage <- as.numeric(power2$Voltage)
par(mfrow = c(2,2))
with(power2, {
plot(Date, Global_active_power, type="l", ylab="Global Active Power", xlab="")
plot(Date, Voltage, type="l", ylab="Voltage", xlab="datetime")
plot(Date, Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(Date, Sub_metering_2, col = "red")
lines(Date, Sub_metering_3, col = "blue")
legend("topright", bty = "n", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1)
plot(Date, Global_reactive_power, type="l", ylab="Global_reactive_power",xlab="datetime")
})
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off() |
c9ee6fd312e879f4774d3333172bb19020b75e17 | c0389789f70c37c3c76ed175f1b467c8b355ba3a | /README.R | 4dd24389a7a12eaf3c7a53364afcd604b27781e7 | [] | no_license | aprevatte/COVID19-API | e1ca90d37891ae5431a6624a446f64957d885866 | e9eb9c9cf9f91f172c9ddf2d3522f303a471309e | refs/heads/main | 2023-08-11T11:16:14.966241 | 2021-10-07T05:46:48 | 2021-10-07T05:46:48 | 412,850,988 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 426 | r | README.R | # author: Alex Prevatte
# date: 10/5/2021
# purpose: Render README.Rmd to a .md file called README.md for my repo.
rmarkdown::render(input = "~/NCSU/ST558/Repos/ST558-Project1/README.Rmd",
output_format = "github_document",
output_file = "README.md",
output_options = list(
toc = TRUE,
html_preview = FALSE
)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.